1 /* 2 * blktrace output analysis: generate a timeline & gather statistics 3 * 4 * Copyright (C) 2006 Alan D. Brunelle <Alan.Brunelle (at) hp.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * 20 */ 21 22 static inline int remapper_dev(__u32 dev) 23 { 24 int mjr = MAJOR(dev); 25 return mjr == 9 || mjr == 253 || mjr == 254; 26 } 27 28 static inline void region_init(struct region_info *reg) 29 { 30 INIT_LIST_HEAD(®->qranges); 31 INIT_LIST_HEAD(®->cranges); 32 } 33 34 static inline void __region_exit(struct list_head *range_head) 35 { 36 struct list_head *p, *q; 37 struct range_info *rip; 38 39 list_for_each_safe(p, q, range_head) { 40 rip = list_entry(p, struct range_info, head); 41 free(rip); 42 } 43 } 44 45 static inline void region_exit(struct region_info *reg) 46 { 47 __region_exit(®->qranges); 48 __region_exit(®->cranges); 49 } 50 51 static inline void update_range(struct list_head *head_p, __u64 time) 52 { 53 struct range_info *rip; 54 55 if (!list_empty(head_p)) { 56 rip = list_entry(head_p->prev, struct range_info, head); 57 58 if (time < rip->end) 59 return; 60 61 if (BIT_TIME(time - rip->end) < range_delta) { 62 rip->end = time; 63 return; 64 } 65 } 66 67 rip = malloc(sizeof(*rip)); 68 rip->start = rip->end = time; 69 list_add_tail(&rip->head, head_p); 70 } 71 72 static inline void update_qregion(struct region_info *reg, __u64 time) 73 { 74 update_range(®->qranges, time); 75 } 76 77 static inline void update_cregion(struct region_info *reg, __u64 time) 78 { 79 update_range(®->cranges, time); 80 } 81 82 static inline void avg_update(struct avg_info *ap, __u64 t) 83 { 84 if (ap->n++ == 0) 85 ap->min = ap->total = ap->max = t; 86 else { 87 if (t < ap->min) 88 ap->min = t; 89 else if (t > ap->max) 90 ap->max = t; 91 ap->total += t; 92 } 93 } 94 95 static inline void avg_update_n(struct avg_info *ap, __u64 t, int n) 96 { 97 if (ap->n == 0) { 98 ap->min = ap->max = t; 99 ap->total = (n * t); 100 } else { 101 if (t < ap->min) 102 ap->min = t; 103 else if (t > ap->max) 104 ap->max = t; 105 ap->total += (n * t); 106 } 107 108 ap->n += n; 109 } 110 111 static inline void avg_unupdate(struct avg_info *ap, __u64 t) 112 { 113 ap->n--; 114 ap->total -= t; 115 } 116 117 static inline void update_lq(__u64 *last_q, struct avg_info *avg, __u64 time) 118 { 119 if (*last_q != ((__u64)-1)) 120 avg_update(avg, (time > *last_q) ? time - *last_q : 1); 121 *last_q = time; 122 } 123 124 static inline void dip_update_q(struct d_info *dip, struct io *iop) 125 { 126 if (remapper_dev(dip->device)) 127 update_lq(&dip->last_q, &dip->avgs.q2q_dm, iop->t.time); 128 else 129 update_lq(&dip->last_q, &dip->avgs.q2q, iop->t.time); 130 update_qregion(&dip->regions, iop->t.time); 131 } 132 133 static inline struct io *io_alloc(void) 134 { 135 struct io *iop = malloc(sizeof(*iop)); 136 137 memset(iop, 0, sizeof(struct io)); 138 list_add_tail(&iop->a_head, &all_ios); 139 140 return iop; 141 } 142 143 static inline void io_free(struct io *iop) 144 { 145 list_del(&iop->a_head); 146 free(iop); 147 } 148 149 static inline void io_free_all(void) 150 { 151 struct io *iop; 152 struct list_head *p, *q; 153 154 list_for_each_safe(p, q, &all_ios) { 155 iop = list_entry(p, struct io, a_head); 156 free(iop); 157 } 158 } 159 160 static inline int io_setup(struct io *iop, enum iop_type type) 161 { 162 iop->type = type; 163 iop->dip = dip_alloc(iop->t.device, iop); 164 if (iop->linked) { 165 iop->pip = find_process(iop->t.pid, NULL); 166 iop->bytes_left = iop->t.bytes; 167 } 168 169 return iop->linked; 170 } 171 172 static inline void io_release(struct io *iop) 173 { 174 if (iop->linked) 175 iop_rem_dip(iop); 176 if (iop->pdu) 177 free(iop->pdu); 178 179 io_free(iop); 180 } 181 182 #define UPDATE_AVGS(_avg, _iop, _pip, _time) do { \ 183 avg_update(&all_avgs. _avg , _time); \ 184 avg_update(&_iop->dip->avgs. _avg , _time); \ 185 if (_pip) avg_update(&_pip->avgs. _avg , _time); \ 186 } while (0) 187 188 #define UPDATE_AVGS_N(_avg, _iop, _pip, _time, _n) do { \ 189 avg_update_n(&all_avgs. _avg , _time, _n); \ 190 avg_update_n(&_iop->dip->avgs. _avg , _time, _n); \ 191 if (_pip) avg_update_n(&_pip->avgs. _avg , _time,_n); \ 192 } while (0) 193 194 #define UNUPDATE_AVGS(_avg, _iop, _pip, _time) do { \ 195 avg_unupdate(&all_avgs. _avg , _time); \ 196 avg_unupdate(&_iop->dip->avgs. _avg , _time); \ 197 if (_pip) avg_unupdate(&_pip->avgs. _avg , _time); \ 198 } while (0) 199 200 static inline void update_q2c(struct io *iop, __u64 c_time) 201 { 202 if (remapper_dev(iop->dip->device)) 203 UPDATE_AVGS(q2c_dm, iop, iop->pip, c_time); 204 else 205 UPDATE_AVGS(q2c, iop, iop->pip, c_time); 206 } 207 208 static inline void update_q2a(struct io *iop, __u64 a_time) 209 { 210 if (remapper_dev(iop->dip->device)) 211 UPDATE_AVGS(q2a_dm, iop, iop->pip, a_time); 212 else 213 UPDATE_AVGS(q2a, iop, iop->pip, a_time); 214 } 215 216 static inline void update_q2g(struct io *iop, __u64 g_time) 217 { 218 UPDATE_AVGS(q2g, iop, iop->pip, g_time); 219 } 220 221 static inline void update_s2g(struct io *iop, __u64 g_time) 222 { 223 UPDATE_AVGS(s2g, iop, iop->pip, g_time); 224 } 225 226 static inline void unupdate_q2g(struct io *iop, __u64 g_time) 227 { 228 UNUPDATE_AVGS(q2g, iop, iop->pip, g_time); 229 } 230 231 static inline void update_g2i(struct io *iop, __u64 i_time) 232 { 233 UPDATE_AVGS(g2i, iop, iop->pip, i_time); 234 } 235 236 static inline void unupdate_g2i(struct io *iop, __u64 i_time) 237 { 238 UNUPDATE_AVGS(g2i, iop, iop->pip, i_time); 239 } 240 241 static inline void update_q2m(struct io *iop, __u64 m_time) 242 { 243 UPDATE_AVGS(q2m, iop, iop->pip, m_time); 244 } 245 246 static inline void unupdate_q2m(struct io *iop, __u64 m_time) 247 { 248 UNUPDATE_AVGS(q2m, iop, iop->pip, m_time); 249 } 250 251 static inline void update_i2d(struct io *iop, __u64 d_time) 252 { 253 UPDATE_AVGS(i2d, iop, iop->pip, d_time); 254 } 255 256 static inline void unupdate_i2d(struct io *iop, __u64 d_time) 257 { 258 UNUPDATE_AVGS(i2d, iop, iop->pip, d_time); 259 } 260 261 static inline void update_m2d(struct io *iop, __u64 d_time) 262 { 263 UPDATE_AVGS(m2d, iop, iop->pip, d_time); 264 } 265 266 static inline void unupdate_m2d(struct io *iop, __u64 d_time) 267 { 268 UNUPDATE_AVGS(m2d, iop, iop->pip, d_time); 269 } 270 271 static inline void update_d2c(struct io *iop, __u64 c_time) 272 { 273 UPDATE_AVGS(d2c, iop, iop->pip, c_time); 274 } 275 276 static inline void update_blks(struct io *iop) 277 { 278 __u64 nblks = iop->t.bytes >> 9; 279 avg_update(&all_avgs.blks, nblks); 280 avg_update(&iop->dip->avgs.blks, nblks); 281 if (iop->pip) 282 avg_update(&iop->pip->avgs.blks, nblks); 283 } 284 285 static inline struct rb_root *__get_root(struct d_info *dip, enum iop_type type) 286 { 287 struct rb_root *roots = dip->heads; 288 return &roots[type]; 289 } 290 291 static inline int dip_rb_ins(struct d_info *dip, struct io *iop) 292 { 293 return rb_insert(__get_root(dip, iop->type), iop); 294 } 295 296 static inline void dip_rb_rem(struct io *iop) 297 { 298 rb_erase(&iop->rb_node, __get_root(iop->dip, iop->type)); 299 } 300 301 static inline void dip_rb_fe(struct d_info *dip, enum iop_type type, 302 struct io *iop, 303 void (*fnc)(struct io *iop, struct io *this), 304 struct list_head *head) 305 { 306 rb_foreach(__get_root(dip, type)->rb_node, iop, fnc, head); 307 } 308 309 static inline struct io *dip_rb_find_sec(struct d_info *dip, 310 enum iop_type type, __u64 sec) 311 { 312 return rb_find_sec(__get_root(dip, type), sec); 313 } 314 315 static inline __u64 tdelta(__u64 from, __u64 to) 316 { 317 return (from < to) ? (to - from) : 1; 318 } 319 320 static inline int type2c(enum iop_type type) 321 { 322 int c; 323 324 switch (type) { 325 case IOP_Q: c = 'Q'; break; 326 case IOP_X: c = 'X'; break; 327 case IOP_A: c = 'A'; break; 328 case IOP_I: c = 'I'; break; 329 case IOP_M: c = 'M'; break; 330 case IOP_D: c = 'D'; break; 331 case IOP_C: c = 'C'; break; 332 case IOP_R: c = 'R'; break; 333 case IOP_G: c = 'G'; break; 334 default : c = '?'; break; 335 } 336 337 return c; 338 } 339 340 static inline int histo_idx(__u64 nbytes) 341 { 342 int idx = (nbytes >> 9) - 1; 343 return min(idx, N_HIST_BKTS-1); 344 } 345 346 static inline void update_q_histo(__u64 nbytes) 347 { 348 q_histo[histo_idx(nbytes)]++; 349 } 350 351 static inline void update_d_histo(__u64 nbytes) 352 { 353 d_histo[histo_idx(nbytes)]++; 354 } 355 356 static inline struct io *io_first_list(struct list_head *head) 357 { 358 if (list_empty(head)) 359 return NULL; 360 361 return list_entry(head->next, struct io, f_head); 362 } 363 364 static inline void __dump_iop(FILE *ofp, struct io *iop, int extra_nl) 365 { 366 fprintf(ofp, "%5d.%09lu %3d,%-3d %c %10llu+%-4u\n", 367 (int)SECONDS(iop->t.time), 368 (unsigned long)NANO_SECONDS(iop->t.time), 369 MAJOR(iop->t.device), MINOR(iop->t.device), type2c(iop->type), 370 (unsigned long long)iop->t.sector, t_sec(&iop->t)); 371 if (extra_nl) fprintf(ofp, "\n"); 372 } 373 374 static inline void __dump_iop2(FILE *ofp, struct io *a_iop, struct io *l_iop) 375 { 376 fprintf(ofp, "%5d.%09lu %3d,%-3d %c %10llu+%-4u <- (%3d,%-3d) %10llu\n", 377 (int)SECONDS(a_iop->t.time), 378 (unsigned long)NANO_SECONDS(a_iop->t.time), 379 MAJOR(a_iop->t.device), MINOR(a_iop->t.device), 380 type2c(a_iop->type), (unsigned long long)a_iop->t.sector, 381 t_sec(&a_iop->t), MAJOR(l_iop->t.device), 382 MINOR(l_iop->t.device), (unsigned long long)l_iop->t.sector); 383 } 384