1 /* 2 * blktrace support code for fio 3 */ 4 #include <stdio.h> 5 #include <stdlib.h> 6 #include <sys/stat.h> 7 #include <dirent.h> 8 9 #include "flist.h" 10 #include "fio.h" 11 #include "blktrace_api.h" 12 #include "lib/linux-dev-lookup.h" 13 14 #define TRACE_FIFO_SIZE 8192 15 16 /* 17 * fifo refill frontend, to avoid reading data in trace sized bites 18 */ 19 static int refill_fifo(struct thread_data *td, struct fifo *fifo, int fd) 20 { 21 char buf[TRACE_FIFO_SIZE]; 22 unsigned int total; 23 int ret; 24 25 total = sizeof(buf); 26 if (total > fifo_room(fifo)) 27 total = fifo_room(fifo); 28 29 ret = read(fd, buf, total); 30 if (ret < 0) { 31 td_verror(td, errno, "read blktrace file"); 32 return -1; 33 } 34 35 if (ret > 0) 36 ret = fifo_put(fifo, buf, ret); 37 38 dprint(FD_BLKTRACE, "refill: filled %d bytes\n", ret); 39 return ret; 40 } 41 42 /* 43 * Retrieve 'len' bytes from the fifo, refilling if necessary. 44 */ 45 static int trace_fifo_get(struct thread_data *td, struct fifo *fifo, int fd, 46 void *buf, unsigned int len) 47 { 48 if (fifo_len(fifo) < len) { 49 int ret = refill_fifo(td, fifo, fd); 50 51 if (ret < 0) 52 return ret; 53 } 54 55 return fifo_get(fifo, buf, len); 56 } 57 58 /* 59 * Just discard the pdu by seeking past it. 60 */ 61 static int discard_pdu(struct thread_data *td, struct fifo *fifo, int fd, 62 struct blk_io_trace *t) 63 { 64 if (t->pdu_len == 0) 65 return 0; 66 67 dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len); 68 return trace_fifo_get(td, fifo, fd, NULL, t->pdu_len); 69 } 70 71 /* 72 * Check if this is a blktrace binary data file. We read a single trace 73 * into memory and check for the magic signature. 74 */ 75 int is_blktrace(const char *filename, int *need_swap) 76 { 77 struct blk_io_trace t; 78 int fd, ret; 79 80 fd = open(filename, O_RDONLY); 81 if (fd < 0) 82 return 0; 83 84 ret = read(fd, &t, sizeof(t)); 85 close(fd); 86 87 if (ret < 0) { 88 perror("read blktrace"); 89 return 0; 90 } else if (ret != sizeof(t)) { 91 log_err("fio: short read on blktrace file\n"); 92 return 0; 93 } 94 95 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) { 96 *need_swap = 0; 97 return 1; 98 } 99 100 /* 101 * Maybe it needs to be endian swapped... 102 */ 103 t.magic = fio_swap32(t.magic); 104 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) { 105 *need_swap = 1; 106 return 1; 107 } 108 109 return 0; 110 } 111 112 #define FMINORBITS 20 113 #define FMINORMASK ((1U << FMINORBITS) - 1) 114 #define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS)) 115 #define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK)) 116 117 static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action) 118 { 119 struct io_piece *ipo; 120 121 ipo = calloc(1, sizeof(*ipo)); 122 init_ipo(ipo); 123 124 ipo->ddir = DDIR_INVAL; 125 ipo->fileno = fileno; 126 ipo->file_action = action; 127 flist_add_tail(&ipo->list, &td->io_log_list); 128 } 129 130 static int trace_add_file(struct thread_data *td, __u32 device) 131 { 132 static unsigned int last_maj, last_min, last_fileno; 133 unsigned int maj = FMAJOR(device); 134 unsigned int min = FMINOR(device); 135 struct fio_file *f; 136 char dev[256]; 137 unsigned int i; 138 139 if (last_maj == maj && last_min == min) 140 return last_fileno; 141 142 last_maj = maj; 143 last_min = min; 144 145 /* 146 * check for this file in our list 147 */ 148 for_each_file(td, f, i) 149 if (f->major == maj && f->minor == min) { 150 last_fileno = f->fileno; 151 return last_fileno; 152 } 153 154 strcpy(dev, "/dev"); 155 if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) { 156 int fileno; 157 158 if (td->o.replay_redirect) 159 dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden" 160 " with: %s\n", maj, min, 161 td->o.replay_redirect); 162 else 163 dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min); 164 165 dprint(FD_BLKTRACE, "add devices %s\n", dev); 166 fileno = add_file_exclusive(td, dev); 167 td->o.open_files++; 168 td->files[fileno]->major = maj; 169 td->files[fileno]->minor = min; 170 trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE); 171 last_fileno = fileno; 172 } 173 174 return last_fileno; 175 } 176 177 /* 178 * Store blk_io_trace data in an ipo for later retrieval. 179 */ 180 static void store_ipo(struct thread_data *td, unsigned long long offset, 181 unsigned int bytes, int rw, unsigned long long ttime, 182 int fileno) 183 { 184 struct io_piece *ipo = malloc(sizeof(*ipo)); 185 186 init_ipo(ipo); 187 188 /* 189 * the 512 is wrong here, it should be the hardware sector size... 190 */ 191 ipo->offset = offset * 512; 192 ipo->len = bytes; 193 ipo->delay = ttime / 1000; 194 if (rw) 195 ipo->ddir = DDIR_WRITE; 196 else 197 ipo->ddir = DDIR_READ; 198 ipo->fileno = fileno; 199 200 dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n", 201 ipo->ddir, ipo->offset, 202 ipo->len, ipo->delay); 203 queue_io_piece(td, ipo); 204 } 205 206 static void handle_trace_notify(struct blk_io_trace *t) 207 { 208 switch (t->action) { 209 case BLK_TN_PROCESS: 210 dprint(FD_BLKTRACE, "got process notify: %x, %d\n", 211 t->action, t->pid); 212 break; 213 case BLK_TN_TIMESTAMP: 214 dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n", 215 t->action, t->pid); 216 break; 217 case BLK_TN_MESSAGE: 218 break; 219 default: 220 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action); 221 break; 222 } 223 } 224 225 static void handle_trace_discard(struct thread_data *td, 226 struct blk_io_trace *t, 227 unsigned long long ttime, 228 unsigned long *ios, unsigned int *bs) 229 { 230 struct io_piece *ipo = malloc(sizeof(*ipo)); 231 int fileno; 232 233 init_ipo(ipo); 234 fileno = trace_add_file(td, t->device); 235 236 ios[DDIR_TRIM]++; 237 if (t->bytes > bs[DDIR_TRIM]) 238 bs[DDIR_TRIM] = t->bytes; 239 240 td->o.size += t->bytes; 241 242 memset(ipo, 0, sizeof(*ipo)); 243 INIT_FLIST_HEAD(&ipo->list); 244 245 /* 246 * the 512 is wrong here, it should be the hardware sector size... 247 */ 248 ipo->offset = t->sector * 512; 249 ipo->len = t->bytes; 250 ipo->delay = ttime / 1000; 251 ipo->ddir = DDIR_TRIM; 252 ipo->fileno = fileno; 253 254 dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n", 255 ipo->offset, ipo->len, 256 ipo->delay); 257 queue_io_piece(td, ipo); 258 } 259 260 static void handle_trace_fs(struct thread_data *td, struct blk_io_trace *t, 261 unsigned long long ttime, unsigned long *ios, 262 unsigned int *bs) 263 { 264 int rw; 265 int fileno; 266 267 fileno = trace_add_file(td, t->device); 268 269 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0; 270 271 if (t->bytes > bs[rw]) 272 bs[rw] = t->bytes; 273 274 ios[rw]++; 275 td->o.size += t->bytes; 276 store_ipo(td, t->sector, t->bytes, rw, ttime, fileno); 277 } 278 279 /* 280 * We only care for queue traces, most of the others are side effects 281 * due to internal workings of the block layer. 282 */ 283 static void handle_trace(struct thread_data *td, struct blk_io_trace *t, 284 unsigned long *ios, unsigned int *bs) 285 { 286 static unsigned long long last_ttime; 287 unsigned long long delay; 288 289 if ((t->action & 0xffff) != __BLK_TA_QUEUE) 290 return; 291 292 if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) { 293 if (!last_ttime || td->o.no_stall) { 294 last_ttime = t->time; 295 delay = 0; 296 } else { 297 delay = t->time - last_ttime; 298 last_ttime = t->time; 299 } 300 } 301 302 if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY)) 303 handle_trace_notify(t); 304 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD)) 305 handle_trace_discard(td, t, delay, ios, bs); 306 else 307 handle_trace_fs(td, t, delay, ios, bs); 308 } 309 310 static void byteswap_trace(struct blk_io_trace *t) 311 { 312 t->magic = fio_swap32(t->magic); 313 t->sequence = fio_swap32(t->sequence); 314 t->time = fio_swap64(t->time); 315 t->sector = fio_swap64(t->sector); 316 t->bytes = fio_swap32(t->bytes); 317 t->action = fio_swap32(t->action); 318 t->pid = fio_swap32(t->pid); 319 t->device = fio_swap32(t->device); 320 t->cpu = fio_swap32(t->cpu); 321 t->error = fio_swap16(t->error); 322 t->pdu_len = fio_swap16(t->pdu_len); 323 } 324 325 static int t_is_write(struct blk_io_trace *t) 326 { 327 return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0; 328 } 329 330 /* 331 * Load a blktrace file by reading all the blk_io_trace entries, and storing 332 * them as io_pieces like the fio text version would do. 333 */ 334 int load_blktrace(struct thread_data *td, const char *filename, int need_swap) 335 { 336 struct blk_io_trace t; 337 unsigned long ios[DDIR_RWDIR_CNT], skipped_writes; 338 unsigned int rw_bs[DDIR_RWDIR_CNT]; 339 struct fifo *fifo; 340 int fd, i, old_state; 341 struct fio_file *f; 342 int this_depth, depth; 343 344 fd = open(filename, O_RDONLY); 345 if (fd < 0) { 346 td_verror(td, errno, "open blktrace file"); 347 return 1; 348 } 349 350 fifo = fifo_alloc(TRACE_FIFO_SIZE); 351 352 old_state = td_bump_runstate(td, TD_SETTING_UP); 353 354 td->o.size = 0; 355 356 ios[0] = ios[1] = 0; 357 rw_bs[0] = rw_bs[1] = 0; 358 skipped_writes = 0; 359 this_depth = depth = 0; 360 do { 361 int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t)); 362 363 if (ret < 0) 364 goto err; 365 else if (!ret) 366 break; 367 else if (ret < (int) sizeof(t)) { 368 log_err("fio: short fifo get\n"); 369 break; 370 } 371 372 if (need_swap) 373 byteswap_trace(&t); 374 375 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) { 376 log_err("fio: bad magic in blktrace data: %x\n", 377 t.magic); 378 goto err; 379 } 380 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) { 381 log_err("fio: bad blktrace version %d\n", 382 t.magic & 0xff); 383 goto err; 384 } 385 ret = discard_pdu(td, fifo, fd, &t); 386 if (ret < 0) { 387 td_verror(td, ret, "blktrace lseek"); 388 goto err; 389 } else if (t.pdu_len != ret) { 390 log_err("fio: discarded %d of %d\n", ret, t.pdu_len); 391 goto err; 392 } 393 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) { 394 if ((t.action & 0xffff) == __BLK_TA_QUEUE) 395 this_depth++; 396 else if ((t.action & 0xffff) == __BLK_TA_COMPLETE) { 397 depth = max(depth, this_depth); 398 this_depth = 0; 399 } 400 401 if (t_is_write(&t) && read_only) { 402 skipped_writes++; 403 continue; 404 } 405 } 406 407 handle_trace(td, &t, ios, rw_bs); 408 } while (1); 409 410 for (i = 0; i < td->files_index; i++) { 411 f = td->files[i]; 412 trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE); 413 } 414 415 fifo_free(fifo); 416 close(fd); 417 418 td_restore_runstate(td, old_state); 419 420 if (!td->files_index) { 421 log_err("fio: did not find replay device(s)\n"); 422 return 1; 423 } 424 425 /* 426 * For stacked devices, we don't always get a COMPLETE event so 427 * the depth grows to insane values. Limit it to something sane(r). 428 */ 429 if (!depth || depth > 1024) 430 depth = 1024; 431 432 if (skipped_writes) 433 log_err("fio: %s skips replay of %lu writes due to read-only\n", 434 td->o.name, skipped_writes); 435 436 if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) { 437 log_err("fio: found no ios in blktrace data\n"); 438 return 1; 439 } else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) { 440 td->o.td_ddir = TD_DDIR_READ; 441 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ]; 442 } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) { 443 td->o.td_ddir = TD_DDIR_WRITE; 444 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE]; 445 } else { 446 td->o.td_ddir = TD_DDIR_RW; 447 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ]; 448 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE]; 449 td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM]; 450 } 451 452 /* 453 * We need to do direct/raw ios to the device, to avoid getting 454 * read-ahead in our way. 455 */ 456 td->o.odirect = 1; 457 458 /* 459 * we don't know if this option was set or not. it defaults to 1, 460 * so we'll just guess that we should override it if it's still 1 461 */ 462 if (td->o.iodepth == 1) 463 td->o.iodepth = td->o.iodepth_low = depth; 464 465 return 0; 466 err: 467 close(fd); 468 fifo_free(fifo); 469 return 1; 470 } 471