Home | History | Annotate | Download | only in fio

Lines Matching refs:td

30 static void populate_hdr(struct thread_data *td, struct io_u *io_u,
33 static void fill_hdr(struct thread_data *td, struct io_u *io_u,
36 static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
40 void fill_buffer_pattern(struct thread_data *td, void *p, unsigned int len)
42 (void)cpy_pattern(td->o.buffer_pattern, td->o.buffer_pattern_bytes, p, len);
51 static unsigned long fill_buffer(struct thread_data *td, void *p,
54 struct frand_state *fs = &td->verify_state;
55 struct thread_options *o = &td->o;
60 void fill_verify_pattern(struct thread_data *td, void *p, unsigned int len,
63 struct thread_options *o = &td->o;
71 io_u->rand_seed = fill_buffer(td, p, len);
77 if (!td->o.verify_fmt_sz && io_u->buf_filled_len >= len) {
83 (void)paste_format(td->o.verify_pattern, td->o.verify_pattern_bytes,
84 td->o.verify_fmt, td->o.verify_fmt_sz,
89 static unsigned int get_hdr_inc(struct thread_data *td, struct io_u *io_u)
94 if (td->o.verify_interval && td->o.verify_interval <= io_u->buflen)
95 hdr_inc = td->o.verify_interval;
100 static void fill_pattern_headers(struct thread_data *td, struct io_u *io_u,
107 fill_verify_pattern(td, p, io_u->buflen, io_u, seed, use_seed);
109 hdr_inc = get_hdr_inc(td, io_u);
113 populate_hdr(td, io_u, hdr, header_num, hdr_inc);
204 static inline unsigned int hdr_size(struct thread_data *td,
207 if (td->o.verify == VERIFY_PATTERN_NO_HDR)
230 struct thread_data *td;
300 struct thread_data *td = vc->td;
306 if (!td->o.verify_dump)
327 fill_pattern_headers(td, &dummy, hdr->rand_seed, 1);
336 struct thread_data *td = vc->td;
339 if (td->o.verify == VERIFY_PATTERN_NO_HDR) {
340 __fill_hdr(td, vc->io_u, &shdr, 0, vc->io_u->buflen, 0);
371 return vc->io_u->buf + vc->hdr_num * hdr->len + hdr_size(vc->td, hdr);
376 struct thread_data *td = vc->td;
379 unsigned int header_size = __hdr_size(td->o.verify);
383 pattern = td->o.verify_pattern;
384 pattern_size = td->o.verify_pattern_bytes;
388 td->o.verify_fmt, td->o.verify_fmt_sz, io_u);
391 len = get_hdr_inc(td, io_u) - header_size;
392 mod = (get_hdr_inc(td, io_u) * vc->hdr_num + header_size) % pattern_size;
414 if (mod == td->o.verify_pattern_bytes)
433 XXH32_update(state, p, hdr->len - hdr_size(vc->td, hdr));
455 fio_sha3_update(sha3_ctx, p, hdr->len - hdr_size(vc->td, hdr));
537 fio_sha512_update(&sha512_ctx, p, hdr->len - hdr_size(vc->td, hdr));
562 fio_sha256_update(&sha256_ctx, p, hdr->len - hdr_size(vc->td, hdr));
588 fio_sha1_update(&sha1_ctx, p, hdr->len - hdr_size(vc->td, hdr));
610 c = fio_crc7(p, hdr->len - hdr_size(vc->td, hdr));
631 c = fio_crc16(p, hdr->len - hdr_size(vc->td, hdr));
652 c = fio_crc64(p, hdr->len - hdr_size(vc->td, hdr));
673 c = fio_crc32(p, hdr->len - hdr_size(vc->td, hdr));
694 c = fio_crc32c(p, hdr->len - hdr_size(vc->td, hdr));
719 fio_md5_update(&md5_ctx, p, hdr->len - hdr_size(vc->td, hdr));
736 int verify_io_u_async(struct thread_data *td, struct io_u **io_u_ptr)
740 pthread_mutex_lock(&td->io_u_lock);
743 put_file_log(td, io_u->file);
746 td->cur_depth--;
747 io_u_clear(td, io_u, IO_U_F_IN_CUR_DEPTH);
749 flist_add_tail(&io_u->verify_list, &td->verify_list);
751 pthread_mutex_unlock(&td->io_u_lock);
753 pthread_cond_signal(&td->verify_cond);
797 static int verify_trimmed_io_u(struct thread_data *td, struct io_u *io_u)
801 if (!td->o.trim_zero)
816 static int verify_header(struct io_u *io_u, struct thread_data *td,
839 if (hdr->offset != io_u->offset + hdr_num * td->o.verify_interval) {
854 if (td_write(td) && (td_min_bs(td) == td_max_bs(td)) &&
855 !td->o.time_based)
856 if (!td->o.verify_only || td->o.loops == 0)
877 if (td->o.verify_dump)
884 int verify_io_u(struct thread_data *td, struct io_u **io_u_ptr)
892 if (td->o.verify == VERIFY_NULL || io_u->ddir != DDIR_READ)
898 if (td_ioengine_flagged(td, FIO_FAKEIO))
902 ret = verify_trimmed_io_u(td, io_u);
906 hdr_inc = get_hdr_inc(td, io_u);
914 .td = td,
918 if (ret && td->o.verify_fatal)
921 header_size = __hdr_size(td->o.verify);
922 if (td->o.verify_offset)
923 memswp(p, p + td->o.verify_offset, header_size);
930 if (td->o.verifysort || (td->flags & TD_F_VER_BACKLOG))
933 if (td->o.verify != VERIFY_PATTERN_NO_HDR) {
934 ret = verify_header(io_u, td, hdr, hdr_num, hdr_inc);
939 if (td->o.verify != VERIFY_NONE)
940 verify_type = td->o.verify;
948 if (td->o.verify_pattern_bytes)
1009 if (ret && td->o.verify_fatal)
1010 fio_mark_td_terminate(td);
1157 static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
1164 hdr->verify_type = td->o.verify;
1167 hdr->offset = io_u->offset + header_num * td->o.verify_interval;
1170 hdr->thread = td->thread_number;
1176 static void fill_hdr(struct thread_data *td, struct io_u *io_u,
1181 if (td->o.verify != VERIFY_PATTERN_NO_HDR)
1182 __fill_hdr(td, io_u, hdr, header_num, header_len, rand_seed);
1185 static void populate_hdr(struct thread_data *td, struct io_u *io_u,
1194 fill_hdr(td, io_u, hdr, header_num, header_len, io_u->rand_seed);
1196 data_len = header_len - hdr_size(td, hdr);
1198 data = p + hdr_size(td, hdr);
1199 switch (td->o.verify) {
1277 log_err("fio: bad verify type: %d\n", td->o.verify);
1281 if (td->o.verify_offset && hdr_size(td, hdr))
1282 memswp(p, p + td->o.verify_offset, hdr_size(td, hdr));
1289 void populate_verify_io_u(struct thread_data *td, struct io_u *io_u)
1291 if (td->o.verify == VERIFY_NULL)
1294 io_u->numberio = td->io_issues[io_u->ddir];
1296 fill_pattern_headers(td, io_u, 0, 0);
1299 int get_next_verify(struct thread_data *td, struct io_u *io_u)
1309 if (!RB_EMPTY_ROOT(&td->io_hist_tree)) {
1310 struct rb_node *n = rb_first(&td->io_hist_tree);
1321 rb_erase(n, &td->io_hist_tree);
1324 } else if (!flist_empty(&td->io_hist_list)) {
1325 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
1340 td->io_hist_len--;
1346 io_u_set(td, io_u, IO_U_F_VER_LIST);
1349 io_u_set(td, io_u, IO_U_F_TRIMMED);
1352 int r = td_io_open_file(td, io_u->file);
1367 remove_trim_entry(td, ipo);
1371 if (!td->o.verify_pattern_bytes) {
1372 io_u->rand_seed = __rand(&td->verify_state);
1374 io_u->rand_seed *= __rand(&td->verify_state);
1384 void fio_verify_init(struct thread_data *td)
1386 if (td->o.verify == VERIFY_CRC32C_INTEL ||
1387 td->o.verify == VERIFY_CRC32C) {
1395 struct thread_data *td = data;
1399 if (fio_option_is_set(&td->o, verify_cpumask) &&
1400 fio_setaffinity(td->pid, td->o.verify_cpumask)) {
1409 if (td->verify_thread_exit)
1412 pthread_mutex_lock(&td->io_u_lock);
1414 while (flist_empty(&td->verify_list) &&
1415 !td->verify_thread_exit) {
1416 ret = pthread_cond_wait(&td->verify_cond,
1417 &td->io_u_lock);
1419 pthread_mutex_unlock(&td->io_u_lock);
1424 flist_splice_init(&td->verify_list, &list);
1425 pthread_mutex_unlock(&td->io_u_lock);
1434 io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
1435 ret = verify_io_u(td, &io_u);
1437 put_io_u(td, io_u);
1440 if (td_non_fatal_error(td, ERROR_TYPE_VERIFY_BIT, ret)) {
1441 update_error_count(td, ret);
1442 td_clear_error(td);
1449 td_verror(td, ret, "async_verify");
1450 if (td->o.verify_fatal)
1451 fio_mark_td_terminate(td);
1455 pthread_mutex_lock(&td->io_u_lock);
1456 td->nr_verify_threads--;
1457 pthread_mutex_unlock(&td->io_u_lock);
1459 pthread_cond_signal(&td->free_cond);
1463 int verify_async_init(struct thread_data *td)
1471 td->verify_thread_exit = 0;
1473 td->verify_threads = malloc(sizeof(pthread_t) * td->o.verify_async);
1474 for (i = 0; i < td->o.verify_async; i++) {
1475 ret = pthread_create(&td->verify_threads[i], &attr,
1476 verify_async_thread, td);
1482 ret = pthread_detach(td->verify_threads[i]);
1488 td->nr_verify_threads++;
1493 if (i != td->o.verify_async) {
1495 td->verify_thread_exit = 1;
1497 pthread_cond_broadcast(&td->verify_cond);
1504 void verify_async_exit(struct thread_data *td)
1506 td->verify_thread_exit = 1;
1508 pthread_cond_broadcast(&td->verify_cond);
1510 pthread_mutex_lock(&td->io_u_lock);
1512 while (td->nr_verify_threads)
1513 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1515 pthread_mutex_unlock(&td->io_u_lock);
1516 free(td->verify_threads);
1517 td->verify_threads = NULL;
1532 static int __fill_file_completions(struct thread_data *td,
1542 if (td->io_blocks[DDIR_WRITE] < td->o.iodepth)
1543 comps = td->io_blocks[DDIR_WRITE];
1545 comps = td->o.iodepth;
1550 j = td->o.iodepth - 1;
1560 static int fill_file_completions(struct thread_data *td,
1567 for_each_file(td, f, i)
1568 comps += __fill_file_completions(td, s, f, index);
1576 struct thread_data *td;
1589 for_each_td(td, i) {
1592 td->stop_io = 1;
1593 td->flags |= TD_F_VSTATE_SAVED;
1594 depth += (td->o.iodepth * td->o.nr_files);
1610 for_each_td(td, i) {
1617 comps = fill_file_completions(td, s, &index);
1620 s->depth = cpu_to_le64((uint64_t) td->o.iodepth);
1621 s->nofiles = cpu_to_le64((uint64_t) td->o.nr_files);
1622 s->numberio = cpu_to_le64((uint64_t) td->io_issues[DDIR_WRITE]);
1624 if (td->random_state.use64) {
1625 s->rand.state64.s[0] = cpu_to_le64(td->random_state.state64.s1);
1626 s->rand.state64.s[1] = cpu_to_le64(td->random_state.state64.s2);
1627 s->rand.state64.s[2] = cpu_to_le64(td->random_state.state64.s3);
1628 s->rand.state64.s[3] = cpu_to_le64(td->random_state.state64.s4);
1629 s->rand.state64.s[4] = cpu_to_le64(td->random_state.state64.s5);
1633 s->rand.state32.s[0] = cpu_to_le32(td->random_state.state32.s1);
1634 s->rand.state32.s[1] = cpu_to_le32(td->random_state.state32.s2);
1635 s->rand.state32.s[2] = cpu_to_le32(td->random_state.state32.s3);
1640 strncpy((char *) s->name, td->o.name, sizeof(s->name) - 1);
1736 void verify_free_state(struct thread_data *td)
1738 if (td->vstate)
1739 free(td->vstate);
1742 void verify_assign_state(struct thread_data *td, void *p)
1766 td->vstate = p;
1787 int verify_load_state(struct thread_data *td, const char *prefix)
1795 if (!td->o.verify_state)
1798 fd = open_state_file(td->o.name, prefix, td->thread_number - 1, 0);
1805 td_verror(td, errno, "read verify state hdr");
1824 td_verror(td, errno, "read verify state");
1837 verify_assign_state(td, s);
1849 int verify_state_should_stop(struct thread_data *td, struct io_u *io_u)
1851 struct thread_io_list *s = td->vstate;
1862 if ((td->io_blocks[DDIR_READ] < s->depth ||
1863 s->numberio - td->io_blocks[DDIR_READ] > s->depth) &&