Home | History | Annotate | Download | only in fio
      1 /*
      2  * Status and ETA code
      3  */
      4 #include <unistd.h>
      5 #include <fcntl.h>
      6 #include <string.h>
      7 
      8 #include "fio.h"
      9 
     10 static char run_str[REAL_MAX_JOBS + 1];
     11 
     12 /*
     13  * Sets the status of the 'td' in the printed status map.
     14  */
     15 static void check_str_update(struct thread_data *td)
     16 {
     17 	char c = run_str[td->thread_number - 1];
     18 
     19 	switch (td->runstate) {
     20 	case TD_REAPED:
     21 		if (td->error)
     22 			c = 'X';
     23 		else if (td->sig)
     24 			c = 'K';
     25 		else
     26 			c = '_';
     27 		break;
     28 	case TD_EXITED:
     29 		c = 'E';
     30 		break;
     31 	case TD_RAMP:
     32 		c = '/';
     33 		break;
     34 	case TD_RUNNING:
     35 		if (td_rw(td)) {
     36 			if (td_random(td)) {
     37 				if (td->o.rwmix[DDIR_READ] == 100)
     38 					c = 'r';
     39 				else if (td->o.rwmix[DDIR_WRITE] == 100)
     40 					c = 'w';
     41 				else
     42 					c = 'm';
     43 			} else {
     44 				if (td->o.rwmix[DDIR_READ] == 100)
     45 					c = 'R';
     46 				else if (td->o.rwmix[DDIR_WRITE] == 100)
     47 					c = 'W';
     48 				else
     49 					c = 'M';
     50 			}
     51 		} else if (td_read(td)) {
     52 			if (td_random(td))
     53 				c = 'r';
     54 			else
     55 				c = 'R';
     56 		} else if (td_write(td)) {
     57 			if (td_random(td))
     58 				c = 'w';
     59 			else
     60 				c = 'W';
     61 		} else {
     62 			if (td_random(td))
     63 				c = 'd';
     64 			else
     65 				c = 'D';
     66 		}
     67 		break;
     68 	case TD_PRE_READING:
     69 		c = 'p';
     70 		break;
     71 	case TD_VERIFYING:
     72 		c = 'V';
     73 		break;
     74 	case TD_FSYNCING:
     75 		c = 'F';
     76 		break;
     77 	case TD_FINISHING:
     78 		c = 'f';
     79 		break;
     80 	case TD_CREATED:
     81 		c = 'C';
     82 		break;
     83 	case TD_INITIALIZED:
     84 	case TD_SETTING_UP:
     85 		c = 'I';
     86 		break;
     87 	case TD_NOT_CREATED:
     88 		c = 'P';
     89 		break;
     90 	default:
     91 		log_err("state %d\n", td->runstate);
     92 	}
     93 
     94 	run_str[td->thread_number - 1] = c;
     95 }
     96 
     97 /*
     98  * Convert seconds to a printable string.
     99  */
    100 void eta_to_str(char *str, unsigned long eta_sec)
    101 {
    102 	unsigned int d, h, m, s;
    103 	int disp_hour = 0;
    104 
    105 	s = eta_sec % 60;
    106 	eta_sec /= 60;
    107 	m = eta_sec % 60;
    108 	eta_sec /= 60;
    109 	h = eta_sec % 24;
    110 	eta_sec /= 24;
    111 	d = eta_sec;
    112 
    113 	if (d) {
    114 		disp_hour = 1;
    115 		str += sprintf(str, "%02ud:", d);
    116 	}
    117 
    118 	if (h || disp_hour)
    119 		str += sprintf(str, "%02uh:", h);
    120 
    121 	str += sprintf(str, "%02um:", m);
    122 	str += sprintf(str, "%02us", s);
    123 }
    124 
    125 /*
    126  * Best effort calculation of the estimated pending runtime of a job.
    127  */
    128 static int thread_eta(struct thread_data *td)
    129 {
    130 	unsigned long long bytes_total, bytes_done;
    131 	unsigned long eta_sec = 0;
    132 	unsigned long elapsed;
    133 	uint64_t timeout;
    134 
    135 	elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
    136 	timeout = td->o.timeout / 1000000UL;
    137 
    138 	bytes_total = td->total_io_size;
    139 
    140 	if (td->o.fill_device && td->o.size  == -1ULL) {
    141 		if (!td->fill_device_size || td->fill_device_size == -1ULL)
    142 			return 0;
    143 
    144 		bytes_total = td->fill_device_size;
    145 	}
    146 
    147 	if (td->o.zone_size && td->o.zone_skip && bytes_total) {
    148 		unsigned int nr_zones;
    149 		uint64_t zone_bytes;
    150 
    151 		zone_bytes = bytes_total + td->o.zone_size + td->o.zone_skip;
    152 		nr_zones = (zone_bytes - 1) / (td->o.zone_size + td->o.zone_skip);
    153 		bytes_total -= nr_zones * td->o.zone_skip;
    154 	}
    155 
    156 	/*
    157 	 * if writing and verifying afterwards, bytes_total will be twice the
    158 	 * size. In a mixed workload, verify phase will be the size of the
    159 	 * first stage writes.
    160 	 */
    161 	if (td->o.do_verify && td->o.verify && td_write(td)) {
    162 		if (td_rw(td)) {
    163 			unsigned int perc = 50;
    164 
    165 			if (td->o.rwmix[DDIR_WRITE])
    166 				perc = td->o.rwmix[DDIR_WRITE];
    167 
    168 			bytes_total += (bytes_total * perc) / 100;
    169 		} else
    170 			bytes_total <<= 1;
    171 	}
    172 
    173 	if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
    174 		double perc, perc_t;
    175 
    176 		bytes_done = ddir_rw_sum(td->io_bytes);
    177 
    178 		if (bytes_total) {
    179 			perc = (double) bytes_done / (double) bytes_total;
    180 			if (perc > 1.0)
    181 				perc = 1.0;
    182 		} else
    183 			perc = 0.0;
    184 
    185 		if (td->o.time_based) {
    186 			if (timeout) {
    187 				perc_t = (double) elapsed / (double) timeout;
    188 				if (perc_t < perc)
    189 					perc = perc_t;
    190 			} else {
    191 				/*
    192 				 * Will never hit, we can't have time_based
    193 				 * without a timeout set.
    194 				 */
    195 				perc = 0.0;
    196 			}
    197 		}
    198 
    199 		eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
    200 
    201 		if (td->o.timeout &&
    202 		    eta_sec > (timeout + done_secs - elapsed))
    203 			eta_sec = timeout + done_secs - elapsed;
    204 	} else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
    205 			|| td->runstate == TD_INITIALIZED
    206 			|| td->runstate == TD_SETTING_UP
    207 			|| td->runstate == TD_RAMP
    208 			|| td->runstate == TD_PRE_READING) {
    209 		int t_eta = 0, r_eta = 0;
    210 		unsigned long long rate_bytes;
    211 
    212 		/*
    213 		 * We can only guess - assume it'll run the full timeout
    214 		 * if given, otherwise assume it'll run at the specified rate.
    215 		 */
    216 		if (td->o.timeout) {
    217 			uint64_t timeout = td->o.timeout;
    218 			uint64_t start_delay = td->o.start_delay;
    219 			uint64_t ramp_time = td->o.ramp_time;
    220 
    221 			t_eta = timeout + start_delay + ramp_time;
    222 			t_eta /= 1000000ULL;
    223 
    224 			if (in_ramp_time(td)) {
    225 				unsigned long ramp_left;
    226 
    227 				ramp_left = mtime_since_now(&td->epoch);
    228 				ramp_left = (ramp_left + 999) / 1000;
    229 				if (ramp_left <= t_eta)
    230 					t_eta -= ramp_left;
    231 			}
    232 		}
    233 		rate_bytes = ddir_rw_sum(td->o.rate);
    234 		if (rate_bytes) {
    235 			r_eta = (bytes_total / 1024) / rate_bytes;
    236 			r_eta += (td->o.start_delay / 1000000ULL);
    237 		}
    238 
    239 		if (r_eta && t_eta)
    240 			eta_sec = min(r_eta, t_eta);
    241 		else if (r_eta)
    242 			eta_sec = r_eta;
    243 		else if (t_eta)
    244 			eta_sec = t_eta;
    245 		else
    246 			eta_sec = 0;
    247 	} else {
    248 		/*
    249 		 * thread is already done or waiting for fsync
    250 		 */
    251 		eta_sec = 0;
    252 	}
    253 
    254 	return eta_sec;
    255 }
    256 
    257 static void calc_rate(int unified_rw_rep, unsigned long mtime,
    258 		      unsigned long long *io_bytes,
    259 		      unsigned long long *prev_io_bytes, unsigned int *rate)
    260 {
    261 	int i;
    262 
    263 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
    264 		unsigned long long diff;
    265 
    266 		diff = io_bytes[i] - prev_io_bytes[i];
    267 		if (unified_rw_rep) {
    268 			rate[i] = 0;
    269 			rate[0] += ((1000 * diff) / mtime) / 1024;
    270 		} else
    271 			rate[i] = ((1000 * diff) / mtime) / 1024;
    272 
    273 		prev_io_bytes[i] = io_bytes[i];
    274 	}
    275 }
    276 
    277 static void calc_iops(int unified_rw_rep, unsigned long mtime,
    278 		      unsigned long long *io_iops,
    279 		      unsigned long long *prev_io_iops, unsigned int *iops)
    280 {
    281 	int i;
    282 
    283 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
    284 		unsigned long long diff;
    285 
    286 		diff = io_iops[i] - prev_io_iops[i];
    287 		if (unified_rw_rep) {
    288 			iops[i] = 0;
    289 			iops[0] += (diff * 1000) / mtime;
    290 		} else
    291 			iops[i] = (diff * 1000) / mtime;
    292 
    293 		prev_io_iops[i] = io_iops[i];
    294 	}
    295 }
    296 
    297 /*
    298  * Print status of the jobs we know about. This includes rate estimates,
    299  * ETA, thread state, etc.
    300  */
    301 int calc_thread_status(struct jobs_eta *je, int force)
    302 {
    303 	struct thread_data *td;
    304 	int i, unified_rw_rep;
    305 	unsigned long rate_time, disp_time, bw_avg_time, *eta_secs;
    306 	unsigned long long io_bytes[DDIR_RWDIR_CNT];
    307 	unsigned long long io_iops[DDIR_RWDIR_CNT];
    308 	struct timeval now;
    309 
    310 	static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
    311 	static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
    312 	static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
    313 	static struct timeval rate_prev_time, disp_prev_time;
    314 
    315 	if (!force) {
    316 		if (output_format != FIO_OUTPUT_NORMAL &&
    317 		    f_out == stdout)
    318 			return 0;
    319 		if (temp_stall_ts || eta_print == FIO_ETA_NEVER)
    320 			return 0;
    321 
    322 		if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
    323 			return 0;
    324 	}
    325 
    326 	if (!ddir_rw_sum(rate_io_bytes))
    327 		fill_start_time(&rate_prev_time);
    328 	if (!ddir_rw_sum(disp_io_bytes))
    329 		fill_start_time(&disp_prev_time);
    330 
    331 	eta_secs = malloc(thread_number * sizeof(unsigned long));
    332 	memset(eta_secs, 0, thread_number * sizeof(unsigned long));
    333 
    334 	je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
    335 
    336 	io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
    337 	io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
    338 	bw_avg_time = ULONG_MAX;
    339 	unified_rw_rep = 0;
    340 	for_each_td(td, i) {
    341 		unified_rw_rep += td->o.unified_rw_rep;
    342 		if (is_power_of_2(td->o.kb_base))
    343 			je->is_pow2 = 1;
    344 		je->unit_base = td->o.unit_base;
    345 		if (td->o.bw_avg_time < bw_avg_time)
    346 			bw_avg_time = td->o.bw_avg_time;
    347 		if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
    348 		    || td->runstate == TD_FSYNCING
    349 		    || td->runstate == TD_PRE_READING
    350 		    || td->runstate == TD_FINISHING) {
    351 			je->nr_running++;
    352 			if (td_read(td)) {
    353 				je->t_rate[0] += td->o.rate[DDIR_READ];
    354 				je->t_iops[0] += td->o.rate_iops[DDIR_READ];
    355 				je->m_rate[0] += td->o.ratemin[DDIR_READ];
    356 				je->m_iops[0] += td->o.rate_iops_min[DDIR_READ];
    357 			}
    358 			if (td_write(td)) {
    359 				je->t_rate[1] += td->o.rate[DDIR_WRITE];
    360 				je->t_iops[1] += td->o.rate_iops[DDIR_WRITE];
    361 				je->m_rate[1] += td->o.ratemin[DDIR_WRITE];
    362 				je->m_iops[1] += td->o.rate_iops_min[DDIR_WRITE];
    363 			}
    364 			if (td_trim(td)) {
    365 				je->t_rate[2] += td->o.rate[DDIR_TRIM];
    366 				je->t_iops[2] += td->o.rate_iops[DDIR_TRIM];
    367 				je->m_rate[2] += td->o.ratemin[DDIR_TRIM];
    368 				je->m_iops[2] += td->o.rate_iops_min[DDIR_TRIM];
    369 			}
    370 
    371 			je->files_open += td->nr_open_files;
    372 		} else if (td->runstate == TD_RAMP) {
    373 			je->nr_running++;
    374 			je->nr_ramp++;
    375 		} else if (td->runstate == TD_SETTING_UP) {
    376 			je->nr_running++;
    377 			je->nr_setting_up++;
    378 		} else if (td->runstate < TD_RUNNING)
    379 			je->nr_pending++;
    380 
    381 		if (je->elapsed_sec >= 3)
    382 			eta_secs[i] = thread_eta(td);
    383 		else
    384 			eta_secs[i] = INT_MAX;
    385 
    386 		check_str_update(td);
    387 
    388 		if (td->runstate > TD_SETTING_UP) {
    389 			int ddir;
    390 
    391 			for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
    392 				if (unified_rw_rep) {
    393 					io_bytes[0] += td->io_bytes[ddir];
    394 					io_iops[0] += td->io_blocks[ddir];
    395 				} else {
    396 					io_bytes[ddir] += td->io_bytes[ddir];
    397 					io_iops[ddir] += td->io_blocks[ddir];
    398 				}
    399 			}
    400 		}
    401 	}
    402 
    403 	if (exitall_on_terminate)
    404 		je->eta_sec = INT_MAX;
    405 	else
    406 		je->eta_sec = 0;
    407 
    408 	for_each_td(td, i) {
    409 		if (exitall_on_terminate) {
    410 			if (eta_secs[i] < je->eta_sec)
    411 				je->eta_sec = eta_secs[i];
    412 		} else {
    413 			if (eta_secs[i] > je->eta_sec)
    414 				je->eta_sec = eta_secs[i];
    415 		}
    416 	}
    417 
    418 	free(eta_secs);
    419 
    420 	fio_gettime(&now, NULL);
    421 	rate_time = mtime_since(&rate_prev_time, &now);
    422 
    423 	if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
    424 		calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
    425 				je->rate);
    426 		memcpy(&rate_prev_time, &now, sizeof(now));
    427 		add_agg_sample(je->rate[DDIR_READ], DDIR_READ, 0);
    428 		add_agg_sample(je->rate[DDIR_WRITE], DDIR_WRITE, 0);
    429 		add_agg_sample(je->rate[DDIR_TRIM], DDIR_TRIM, 0);
    430 	}
    431 
    432 	disp_time = mtime_since(&disp_prev_time, &now);
    433 
    434 	/*
    435 	 * Allow a little slack, the target is to print it every 1000 msecs
    436 	 */
    437 	if (!force && disp_time < 900)
    438 		return 0;
    439 
    440 	calc_rate(unified_rw_rep, disp_time, io_bytes, disp_io_bytes, je->rate);
    441 	calc_iops(unified_rw_rep, disp_time, io_iops, disp_io_iops, je->iops);
    442 
    443 	memcpy(&disp_prev_time, &now, sizeof(now));
    444 
    445 	if (!force && !je->nr_running && !je->nr_pending)
    446 		return 0;
    447 
    448 	je->nr_threads = thread_number;
    449 	memcpy(je->run_str, run_str, thread_number * sizeof(char));
    450 	return 1;
    451 }
    452 
    453 void display_thread_status(struct jobs_eta *je)
    454 {
    455 	static struct timeval disp_eta_new_line;
    456 	static int eta_new_line_init, eta_new_line_pending;
    457 	static int linelen_last;
    458 	static int eta_good;
    459 	char output[REAL_MAX_JOBS + 512], *p = output;
    460 	char eta_str[128];
    461 	double perc = 0.0;
    462 
    463 	if (je->eta_sec != INT_MAX && je->elapsed_sec) {
    464 		perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
    465 		eta_to_str(eta_str, je->eta_sec);
    466 	}
    467 
    468 	if (eta_new_line_pending) {
    469 		eta_new_line_pending = 0;
    470 		p += sprintf(p, "\n");
    471 	}
    472 
    473 	p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open);
    474 	if (je->m_rate[0] || je->m_rate[1] || je->t_rate[0] || je->t_rate[1]) {
    475 		char *tr, *mr;
    476 
    477 		mr = num2str(je->m_rate[0] + je->m_rate[1], 4, 0, je->is_pow2, 8);
    478 		tr = num2str(je->t_rate[0] + je->t_rate[1], 4, 0, je->is_pow2, 8);
    479 		p += sprintf(p, ", CR=%s/%s KB/s", tr, mr);
    480 		free(tr);
    481 		free(mr);
    482 	} else if (je->m_iops[0] || je->m_iops[1] || je->t_iops[0] || je->t_iops[1]) {
    483 		p += sprintf(p, ", CR=%d/%d IOPS",
    484 					je->t_iops[0] + je->t_iops[1],
    485 					je->m_iops[0] + je->m_iops[1]);
    486 	}
    487 	if (je->eta_sec != INT_MAX && je->nr_running) {
    488 		char perc_str[32];
    489 		char *iops_str[DDIR_RWDIR_CNT];
    490 		char *rate_str[DDIR_RWDIR_CNT];
    491 		size_t left;
    492 		int l;
    493 		int ddir;
    494 
    495 		if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running)
    496 			strcpy(perc_str, "-.-% done");
    497 		else {
    498 			double mult = 100.0;
    499 
    500 			if (je->nr_setting_up && je->nr_running)
    501 				mult *= (1.0 - (double) je->nr_setting_up / (double) je->nr_running);
    502 
    503 			eta_good = 1;
    504 			perc *= mult;
    505 			sprintf(perc_str, "%3.1f%% done", perc);
    506 		}
    507 
    508 		for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
    509 			rate_str[ddir] = num2str(je->rate[ddir], 5,
    510 						1024, je->is_pow2, je->unit_base);
    511 			iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0, 0);
    512 		}
    513 
    514 		left = sizeof(output) - (p - output) - 1;
    515 
    516 		l = snprintf(p, left, ": [%s] [%s] [%s/%s/%s /s] [%s/%s/%s iops] [eta %s]",
    517 				je->run_str, perc_str, rate_str[DDIR_READ],
    518 				rate_str[DDIR_WRITE], rate_str[DDIR_TRIM],
    519 				iops_str[DDIR_READ], iops_str[DDIR_WRITE],
    520 				iops_str[DDIR_TRIM], eta_str);
    521 		p += l;
    522 		if (l >= 0 && l < linelen_last)
    523 			p += sprintf(p, "%*s", linelen_last - l, "");
    524 		linelen_last = l;
    525 
    526 		for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
    527 			free(rate_str[ddir]);
    528 			free(iops_str[ddir]);
    529 		}
    530 	}
    531 	p += sprintf(p, "\r");
    532 
    533 	printf("%s", output);
    534 
    535 	if (!eta_new_line_init) {
    536 		fio_gettime(&disp_eta_new_line, NULL);
    537 		eta_new_line_init = 1;
    538 	} else if (eta_new_line &&
    539 		   mtime_since_now(&disp_eta_new_line) > eta_new_line * 1000) {
    540 		fio_gettime(&disp_eta_new_line, NULL);
    541 		eta_new_line_pending = 1;
    542 	}
    543 
    544 	fflush(stdout);
    545 }
    546 
    547 void print_thread_status(void)
    548 {
    549 	struct jobs_eta *je;
    550 	size_t size;
    551 
    552 	if (!thread_number)
    553 		return;
    554 
    555 	size = sizeof(*je) + thread_number * sizeof(char) + 1;
    556 	je = malloc(size);
    557 	memset(je, 0, size);
    558 
    559 	if (calc_thread_status(je, 0))
    560 		display_thread_status(je);
    561 
    562 	free(je);
    563 }
    564 
    565 void print_status_init(int thr_number)
    566 {
    567 	run_str[thr_number] = 'P';
    568 }
    569