1 /*
2  * blktrace output analysis: generate a timeline & gather statistics
3  *
4  * Copyright (C) 2006 Alan D. Brunelle <Alan.Brunelle@hp.com>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  */
21 #include <stdio.h>
22 #include "globals.h"
23 
24 #define N_DEV_HASH	128
25 #define DEV_HASH(dev)	((MAJOR(dev) ^ MINOR(dev)) & (N_DEV_HASH - 1))
26 struct list_head	dev_heads[N_DEV_HASH];
27 
dip_rb_mkhds(void)28 static inline void *dip_rb_mkhds(void)
29 {
30 	size_t len = N_IOP_TYPES * sizeof(struct rb_root);
31 	return memset(malloc(len), 0, len);
32 }
33 
__destroy(struct rb_node * n)34 static void __destroy(struct rb_node *n)
35 {
36 	if (n) {
37 		struct io *iop = rb_entry(n, struct io, rb_node);
38 
39 		__destroy(n->rb_left);
40 		__destroy(n->rb_right);
41 		io_release(iop);
42 	}
43 }
44 
__destroy_heads(struct rb_root * roots)45 static void __destroy_heads(struct rb_root *roots)
46 {
47 	int i;
48 
49 	for (i = 0; i < N_IOP_TYPES; i++)
50 		__destroy(roots[i].rb_node);
51 
52 	free(roots);
53 }
54 
init_dev_heads(void)55 void init_dev_heads(void)
56 {
57 	int i;
58 	for (i = 0; i < N_DEV_HASH; i++)
59 		INIT_LIST_HEAD(&dev_heads[i]);
60 }
61 
__dip_find(__u32 device)62 struct d_info *__dip_find(__u32 device)
63 {
64 	struct d_info *dip;
65 	struct list_head *p;
66 
67 	__list_for_each(p, &dev_heads[DEV_HASH(device)]) {
68 		dip = list_entry(p, struct d_info, hash_head);
69 		if (device == dip->device)
70 			return dip;
71 	}
72 
73 	return NULL;
74 }
75 
__dip_exit(struct d_info * dip)76 void __dip_exit(struct d_info *dip)
77 {
78 	list_del(&dip->all_head);
79 	__destroy_heads(dip->heads);
80 	region_exit(&dip->regions);
81 	seeki_free(dip->seek_handle);
82 	seeki_free(dip->q2q_handle);
83 	aqd_free(dip->aqd_handle);
84 	plat_free(dip->q2d_plat_handle);
85 	plat_free(dip->q2c_plat_handle);
86 	plat_free(dip->d2c_plat_handle);
87 	bno_dump_free(dip->bno_dump_handle);
88 	unplug_hist_free(dip->up_hist_handle);
89 	if (output_all_data)
90 		q2d_free(dip->q2d_priv);
91 	if (dip->pit_fp)
92 		fclose(dip->pit_fp);
93 	free(dip);
94 }
95 
dip_exit(void)96 void dip_exit(void)
97 {
98 	struct list_head *p, *q;
99 
100 	list_for_each_safe(p, q, &all_devs) {
101 		struct d_info *dip = list_entry(p, struct d_info, all_head);
102 		__dip_exit(dip);
103 	}
104 }
105 
mkhandle(char * str,__u32 device,char * post)106 static inline char *mkhandle(char *str, __u32 device, char *post)
107 {
108 	int mjr = device >> MINORBITS;
109 	int mnr = device & ((1 << MINORBITS) - 1);
110 
111 	sprintf(str, "%03d,%03d%s", mjr, mnr, post);
112 	return str;
113 }
114 
open_pit(char * str)115 static inline FILE *open_pit(char *str)
116 {
117 	FILE *fp = my_fopen(str, "w");
118 
119 	if (fp == NULL)
120 		perror(str);
121 
122 	return fp;
123 }
124 
dip_alloc(__u32 device,struct io * iop)125 struct d_info *dip_alloc(__u32 device, struct io *iop)
126 {
127 	struct d_info *dip = __dip_find(device);
128 
129 	if (dip == NULL) {
130 		char str[256];
131 
132 		dip = malloc(sizeof(struct d_info));
133 		memset(dip, 0, sizeof(*dip));
134 		dip->heads = dip_rb_mkhds();
135 		region_init(&dip->regions);
136 		dip->device = device;
137 		dip->last_q = (__u64)-1;
138 		dip->devmap = dev_map_find(device);
139 		dip->bno_dump_handle = bno_dump_alloc(device);
140 		dip->up_hist_handle = unplug_hist_alloc(device);
141 		dip->seek_handle = seeki_alloc(mkhandle(str, device, "_d2d"));
142 		dip->q2q_handle = seeki_alloc(mkhandle(str, device, "_q2q"));
143 		dip->aqd_handle = aqd_alloc(mkhandle(str, device, "_aqd"));
144 		dip->q2d_plat_handle =
145 				plat_alloc(mkhandle(str, device, "_q2d_plat"));
146 		dip->q2c_plat_handle =
147 				plat_alloc(mkhandle(str, device, "_q2c_plat"));
148 		dip->d2c_plat_handle =
149 				plat_alloc(mkhandle(str, device, "_d2c_plat"));
150 		latency_alloc(dip);
151 		list_add_tail(&dip->hash_head, &dev_heads[DEV_HASH(device)]);
152 		list_add_tail(&dip->all_head, &all_devs);
153 		dip->start_time = BIT_TIME(iop->t.time);
154 		dip->pre_culling = 1;
155 		if (output_all_data)
156 			dip->q2d_priv = q2d_alloc();
157 		n_devs++;
158 		if (per_io_trees)
159 			dip->pit_fp = open_pit(mkhandle(per_io_trees,
160 							  device, "_pit.dat"));
161 	}
162 
163 	if (dip->pre_culling) {
164 		if (iop->type == IOP_Q || iop->type == IOP_A)
165 			dip->pre_culling = 0;
166 		else
167 			return NULL;
168 	}
169 
170 	iop->linked = dip_rb_ins(dip, iop);
171 	dip->end_time = BIT_TIME(iop->t.time);
172 
173 	return dip;
174 }
175 
iop_rem_dip(struct io * iop)176 void iop_rem_dip(struct io *iop)
177 {
178 	if (iop->linked) {
179 		dip_rb_rem(iop);
180 		iop->linked = 0;
181 	}
182 }
183 
dip_foreach(struct io * iop,enum iop_type type,void (* fnc)(struct io * iop,struct io * this),int rm_after)184 void dip_foreach(struct io *iop, enum iop_type type,
185 		 void (*fnc)(struct io *iop, struct io *this), int rm_after)
186 {
187 	if (rm_after) {
188 		LIST_HEAD(head);
189 		struct io *this;
190 		struct list_head *p, *q;
191 
192 		dip_rb_fe(iop->dip, type, iop, fnc, &head);
193 		list_for_each_safe(p, q, &head) {
194 			this = list_entry(p, struct io, f_head);
195 			list_del(&this->f_head);
196 			io_release(this);
197 		}
198 	} else
199 		dip_rb_fe(iop->dip, type, iop, fnc, NULL);
200 }
201 
dip_foreach_list(struct io * iop,enum iop_type type,struct list_head * hd)202 void dip_foreach_list(struct io *iop, enum iop_type type, struct list_head *hd)
203 {
204 	dip_rb_fe(iop->dip, type, iop, NULL, hd);
205 }
206 
dip_find_sec(struct d_info * dip,enum iop_type type,__u64 sec)207 struct io *dip_find_sec(struct d_info *dip, enum iop_type type, __u64 sec)
208 {
209 	return dip_rb_find_sec(dip, type, sec);
210 }
211 
dip_foreach_out(void (* func)(struct d_info *,void *),void * arg)212 void dip_foreach_out(void (*func)(struct d_info *, void *), void *arg)
213 {
214 	if (devices == NULL) {
215 		struct list_head *p;
216 		__list_for_each(p, &all_devs)
217 			func(list_entry(p, struct d_info, all_head), arg);
218 	} else {
219 		int i;
220 		struct d_info *dip;
221 		unsigned int mjr, mnr;
222 		char *p = devices;
223 
224 		while (p && ((i = sscanf(p, "%u,%u", &mjr, &mnr)) == 2)) {
225 			dip = __dip_find((__u32)((mjr << MINORBITS) | mnr));
226 			func(dip, arg);
227 			p = strchr(p, ';');
228 			if (p) p++;
229 		}
230 	}
231 }
232 
dip_plug(__u32 dev,double cur_time)233 void dip_plug(__u32 dev, double cur_time)
234 {
235 	struct d_info *dip = __dip_find(dev);
236 
237 	if (dip && !dip->is_plugged) {
238 		dip->is_plugged = 1;
239 		dip->last_plug = cur_time;
240 	}
241 }
242 
unplug(struct d_info * dip,double cur_time)243 static inline void unplug(struct d_info *dip, double cur_time)
244 {
245 	dip->is_plugged = 0;
246 	dip->plugged_time += (cur_time - dip->last_plug);
247 }
248 
dip_unplug(__u32 dev,double cur_time,__u64 nios_up)249 void dip_unplug(__u32 dev, double cur_time, __u64 nios_up)
250 {
251 	struct d_info *dip = __dip_find(dev);
252 
253 	if (dip && dip->is_plugged) {
254 		dip->nplugs++;
255 		dip->nios_up += nios_up;
256 		unplug(dip, cur_time);
257 	}
258 }
259 
dip_unplug_tm(__u32 dev,double cur_time,__u64 nios_up)260 void dip_unplug_tm(__u32 dev, double cur_time, __u64 nios_up)
261 {
262 	struct d_info *dip = __dip_find(dev);
263 
264 	if (dip && dip->is_plugged) {
265 		dip->nios_upt += nios_up;
266 		dip->nplugs_t++;
267 		unplug(dip, cur_time);
268 	}
269 }
270 
dip_cleanup(void)271 void dip_cleanup(void)
272 {
273 	struct list_head *p, *q;
274 
275 	list_for_each_safe(p, q, &all_devs) {
276 		struct d_info *dip = list_entry(p, struct d_info, all_head);
277 
278 		if (dip->n_qs == 0 && dip->n_ds == 0)
279 			__dip_exit(dip);
280 	}
281 }
282