Home | History | Annotate | Download | only in util
      1 #include "annotate.h"
      2 #include "util.h"
      3 #include "build-id.h"
      4 #include "hist.h"
      5 #include "session.h"
      6 #include "sort.h"
      7 #include "evsel.h"
      8 #include <math.h>
      9 
     10 static bool hists__filter_entry_by_dso(struct hists *hists,
     11 				       struct hist_entry *he);
     12 static bool hists__filter_entry_by_thread(struct hists *hists,
     13 					  struct hist_entry *he);
     14 static bool hists__filter_entry_by_symbol(struct hists *hists,
     15 					  struct hist_entry *he);
     16 
     17 enum hist_filter {
     18 	HIST_FILTER__DSO,
     19 	HIST_FILTER__THREAD,
     20 	HIST_FILTER__PARENT,
     21 	HIST_FILTER__SYMBOL,
     22 };
     23 
     24 struct callchain_param	callchain_param = {
     25 	.mode	= CHAIN_GRAPH_REL,
     26 	.min_percent = 0.5,
     27 	.order  = ORDER_CALLEE,
     28 	.key	= CCKEY_FUNCTION
     29 };
     30 
     31 u16 hists__col_len(struct hists *hists, enum hist_column col)
     32 {
     33 	return hists->col_len[col];
     34 }
     35 
     36 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
     37 {
     38 	hists->col_len[col] = len;
     39 }
     40 
     41 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
     42 {
     43 	if (len > hists__col_len(hists, col)) {
     44 		hists__set_col_len(hists, col, len);
     45 		return true;
     46 	}
     47 	return false;
     48 }
     49 
     50 void hists__reset_col_len(struct hists *hists)
     51 {
     52 	enum hist_column col;
     53 
     54 	for (col = 0; col < HISTC_NR_COLS; ++col)
     55 		hists__set_col_len(hists, col, 0);
     56 }
     57 
     58 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
     59 {
     60 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
     61 
     62 	if (hists__col_len(hists, dso) < unresolved_col_width &&
     63 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
     64 	    !symbol_conf.dso_list)
     65 		hists__set_col_len(hists, dso, unresolved_col_width);
     66 }
     67 
     68 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
     69 {
     70 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
     71 	int symlen;
     72 	u16 len;
     73 
     74 	/*
     75 	 * +4 accounts for '[x] ' priv level info
     76 	 * +2 accounts for 0x prefix on raw addresses
     77 	 * +3 accounts for ' y ' symtab origin info
     78 	 */
     79 	if (h->ms.sym) {
     80 		symlen = h->ms.sym->namelen + 4;
     81 		if (verbose)
     82 			symlen += BITS_PER_LONG / 4 + 2 + 3;
     83 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
     84 	} else {
     85 		symlen = unresolved_col_width + 4 + 2;
     86 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
     87 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
     88 	}
     89 
     90 	len = thread__comm_len(h->thread);
     91 	if (hists__new_col_len(hists, HISTC_COMM, len))
     92 		hists__set_col_len(hists, HISTC_THREAD, len + 6);
     93 
     94 	if (h->ms.map) {
     95 		len = dso__name_len(h->ms.map->dso);
     96 		hists__new_col_len(hists, HISTC_DSO, len);
     97 	}
     98 
     99 	if (h->parent)
    100 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
    101 
    102 	if (h->branch_info) {
    103 		if (h->branch_info->from.sym) {
    104 			symlen = (int)h->branch_info->from.sym->namelen + 4;
    105 			if (verbose)
    106 				symlen += BITS_PER_LONG / 4 + 2 + 3;
    107 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
    108 
    109 			symlen = dso__name_len(h->branch_info->from.map->dso);
    110 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
    111 		} else {
    112 			symlen = unresolved_col_width + 4 + 2;
    113 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
    114 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
    115 		}
    116 
    117 		if (h->branch_info->to.sym) {
    118 			symlen = (int)h->branch_info->to.sym->namelen + 4;
    119 			if (verbose)
    120 				symlen += BITS_PER_LONG / 4 + 2 + 3;
    121 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
    122 
    123 			symlen = dso__name_len(h->branch_info->to.map->dso);
    124 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
    125 		} else {
    126 			symlen = unresolved_col_width + 4 + 2;
    127 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
    128 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
    129 		}
    130 	}
    131 
    132 	if (h->mem_info) {
    133 		if (h->mem_info->daddr.sym) {
    134 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
    135 			       + unresolved_col_width + 2;
    136 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
    137 					   symlen);
    138 		} else {
    139 			symlen = unresolved_col_width + 4 + 2;
    140 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
    141 					   symlen);
    142 		}
    143 		if (h->mem_info->daddr.map) {
    144 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
    145 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
    146 					   symlen);
    147 		} else {
    148 			symlen = unresolved_col_width + 4 + 2;
    149 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
    150 		}
    151 	} else {
    152 		symlen = unresolved_col_width + 4 + 2;
    153 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
    154 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
    155 	}
    156 
    157 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
    158 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
    159 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
    160 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
    161 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
    162 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
    163 }
    164 
    165 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
    166 {
    167 	struct rb_node *next = rb_first(&hists->entries);
    168 	struct hist_entry *n;
    169 	int row = 0;
    170 
    171 	hists__reset_col_len(hists);
    172 
    173 	while (next && row++ < max_rows) {
    174 		n = rb_entry(next, struct hist_entry, rb_node);
    175 		if (!n->filtered)
    176 			hists__calc_col_len(hists, n);
    177 		next = rb_next(&n->rb_node);
    178 	}
    179 }
    180 
    181 static void hist_entry__add_cpumode_period(struct hist_entry *he,
    182 					   unsigned int cpumode, u64 period)
    183 {
    184 	switch (cpumode) {
    185 	case PERF_RECORD_MISC_KERNEL:
    186 		he->stat.period_sys += period;
    187 		break;
    188 	case PERF_RECORD_MISC_USER:
    189 		he->stat.period_us += period;
    190 		break;
    191 	case PERF_RECORD_MISC_GUEST_KERNEL:
    192 		he->stat.period_guest_sys += period;
    193 		break;
    194 	case PERF_RECORD_MISC_GUEST_USER:
    195 		he->stat.period_guest_us += period;
    196 		break;
    197 	default:
    198 		break;
    199 	}
    200 }
    201 
    202 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
    203 				u64 weight)
    204 {
    205 
    206 	he_stat->period		+= period;
    207 	he_stat->weight		+= weight;
    208 	he_stat->nr_events	+= 1;
    209 }
    210 
    211 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
    212 {
    213 	dest->period		+= src->period;
    214 	dest->period_sys	+= src->period_sys;
    215 	dest->period_us		+= src->period_us;
    216 	dest->period_guest_sys	+= src->period_guest_sys;
    217 	dest->period_guest_us	+= src->period_guest_us;
    218 	dest->nr_events		+= src->nr_events;
    219 	dest->weight		+= src->weight;
    220 }
    221 
    222 static void hist_entry__decay(struct hist_entry *he)
    223 {
    224 	he->stat.period = (he->stat.period * 7) / 8;
    225 	he->stat.nr_events = (he->stat.nr_events * 7) / 8;
    226 	/* XXX need decay for weight too? */
    227 }
    228 
    229 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
    230 {
    231 	u64 prev_period = he->stat.period;
    232 
    233 	if (prev_period == 0)
    234 		return true;
    235 
    236 	hist_entry__decay(he);
    237 
    238 	if (!he->filtered)
    239 		hists->stats.total_period -= prev_period - he->stat.period;
    240 
    241 	return he->stat.period == 0;
    242 }
    243 
    244 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
    245 {
    246 	struct rb_node *next = rb_first(&hists->entries);
    247 	struct hist_entry *n;
    248 
    249 	while (next) {
    250 		n = rb_entry(next, struct hist_entry, rb_node);
    251 		next = rb_next(&n->rb_node);
    252 		/*
    253 		 * We may be annotating this, for instance, so keep it here in
    254 		 * case some it gets new samples, we'll eventually free it when
    255 		 * the user stops browsing and it agains gets fully decayed.
    256 		 */
    257 		if (((zap_user && n->level == '.') ||
    258 		     (zap_kernel && n->level != '.') ||
    259 		     hists__decay_entry(hists, n)) &&
    260 		    !n->used) {
    261 			rb_erase(&n->rb_node, &hists->entries);
    262 
    263 			if (sort__need_collapse)
    264 				rb_erase(&n->rb_node_in, &hists->entries_collapsed);
    265 
    266 			hist_entry__free(n);
    267 			--hists->nr_entries;
    268 		}
    269 	}
    270 }
    271 
    272 /*
    273  * histogram, sorted on item, collects periods
    274  */
    275 
    276 static struct hist_entry *hist_entry__new(struct hist_entry *template)
    277 {
    278 	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
    279 	struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
    280 
    281 	if (he != NULL) {
    282 		*he = *template;
    283 
    284 		if (he->ms.map)
    285 			he->ms.map->referenced = true;
    286 
    287 		if (he->branch_info) {
    288 			/*
    289 			 * This branch info is (a part of) allocated from
    290 			 * machine__resolve_bstack() and will be freed after
    291 			 * adding new entries.  So we need to save a copy.
    292 			 */
    293 			he->branch_info = malloc(sizeof(*he->branch_info));
    294 			if (he->branch_info == NULL) {
    295 				free(he);
    296 				return NULL;
    297 			}
    298 
    299 			memcpy(he->branch_info, template->branch_info,
    300 			       sizeof(*he->branch_info));
    301 
    302 			if (he->branch_info->from.map)
    303 				he->branch_info->from.map->referenced = true;
    304 			if (he->branch_info->to.map)
    305 				he->branch_info->to.map->referenced = true;
    306 		}
    307 
    308 		if (he->mem_info) {
    309 			if (he->mem_info->iaddr.map)
    310 				he->mem_info->iaddr.map->referenced = true;
    311 			if (he->mem_info->daddr.map)
    312 				he->mem_info->daddr.map->referenced = true;
    313 		}
    314 
    315 		if (symbol_conf.use_callchain)
    316 			callchain_init(he->callchain);
    317 
    318 		INIT_LIST_HEAD(&he->pairs.node);
    319 	}
    320 
    321 	return he;
    322 }
    323 
    324 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
    325 {
    326 	if (!h->filtered) {
    327 		hists__calc_col_len(hists, h);
    328 		++hists->nr_entries;
    329 		hists->stats.total_period += h->stat.period;
    330 	}
    331 }
    332 
    333 static u8 symbol__parent_filter(const struct symbol *parent)
    334 {
    335 	if (symbol_conf.exclude_other && parent == NULL)
    336 		return 1 << HIST_FILTER__PARENT;
    337 	return 0;
    338 }
    339 
    340 static struct hist_entry *add_hist_entry(struct hists *hists,
    341 				      struct hist_entry *entry,
    342 				      struct addr_location *al,
    343 				      u64 period,
    344 				      u64 weight)
    345 {
    346 	struct rb_node **p;
    347 	struct rb_node *parent = NULL;
    348 	struct hist_entry *he;
    349 	int cmp;
    350 
    351 	p = &hists->entries_in->rb_node;
    352 
    353 	while (*p != NULL) {
    354 		parent = *p;
    355 		he = rb_entry(parent, struct hist_entry, rb_node_in);
    356 
    357 		/*
    358 		 * Make sure that it receives arguments in a same order as
    359 		 * hist_entry__collapse() so that we can use an appropriate
    360 		 * function when searching an entry regardless which sort
    361 		 * keys were used.
    362 		 */
    363 		cmp = hist_entry__cmp(he, entry);
    364 
    365 		if (!cmp) {
    366 			he_stat__add_period(&he->stat, period, weight);
    367 
    368 			/*
    369 			 * This mem info was allocated from machine__resolve_mem
    370 			 * and will not be used anymore.
    371 			 */
    372 			free(entry->mem_info);
    373 
    374 			/* If the map of an existing hist_entry has
    375 			 * become out-of-date due to an exec() or
    376 			 * similar, update it.  Otherwise we will
    377 			 * mis-adjust symbol addresses when computing
    378 			 * the history counter to increment.
    379 			 */
    380 			if (he->ms.map != entry->ms.map) {
    381 				he->ms.map = entry->ms.map;
    382 				if (he->ms.map)
    383 					he->ms.map->referenced = true;
    384 			}
    385 			goto out;
    386 		}
    387 
    388 		if (cmp < 0)
    389 			p = &(*p)->rb_left;
    390 		else
    391 			p = &(*p)->rb_right;
    392 	}
    393 
    394 	he = hist_entry__new(entry);
    395 	if (!he)
    396 		return NULL;
    397 
    398 	rb_link_node(&he->rb_node_in, parent, p);
    399 	rb_insert_color(&he->rb_node_in, hists->entries_in);
    400 out:
    401 	hist_entry__add_cpumode_period(he, al->cpumode, period);
    402 	return he;
    403 }
    404 
    405 struct hist_entry *__hists__add_mem_entry(struct hists *self,
    406 					  struct addr_location *al,
    407 					  struct symbol *sym_parent,
    408 					  struct mem_info *mi,
    409 					  u64 period,
    410 					  u64 weight)
    411 {
    412 	struct hist_entry entry = {
    413 		.thread	= al->thread,
    414 		.ms = {
    415 			.map	= al->map,
    416 			.sym	= al->sym,
    417 		},
    418 		.stat = {
    419 			.period	= period,
    420 			.weight = weight,
    421 			.nr_events = 1,
    422 		},
    423 		.cpu	= al->cpu,
    424 		.ip	= al->addr,
    425 		.level	= al->level,
    426 		.parent = sym_parent,
    427 		.filtered = symbol__parent_filter(sym_parent),
    428 		.hists = self,
    429 		.mem_info = mi,
    430 		.branch_info = NULL,
    431 	};
    432 	return add_hist_entry(self, &entry, al, period, weight);
    433 }
    434 
    435 struct hist_entry *__hists__add_branch_entry(struct hists *self,
    436 					     struct addr_location *al,
    437 					     struct symbol *sym_parent,
    438 					     struct branch_info *bi,
    439 					     u64 period,
    440 					     u64 weight)
    441 {
    442 	struct hist_entry entry = {
    443 		.thread	= al->thread,
    444 		.ms = {
    445 			.map	= bi->to.map,
    446 			.sym	= bi->to.sym,
    447 		},
    448 		.cpu	= al->cpu,
    449 		.ip	= bi->to.addr,
    450 		.level	= al->level,
    451 		.stat = {
    452 			.period	= period,
    453 			.nr_events = 1,
    454 			.weight = weight,
    455 		},
    456 		.parent = sym_parent,
    457 		.filtered = symbol__parent_filter(sym_parent),
    458 		.branch_info = bi,
    459 		.hists	= self,
    460 		.mem_info = NULL,
    461 	};
    462 
    463 	return add_hist_entry(self, &entry, al, period, weight);
    464 }
    465 
    466 struct hist_entry *__hists__add_entry(struct hists *self,
    467 				      struct addr_location *al,
    468 				      struct symbol *sym_parent, u64 period,
    469 				      u64 weight)
    470 {
    471 	struct hist_entry entry = {
    472 		.thread	= al->thread,
    473 		.ms = {
    474 			.map	= al->map,
    475 			.sym	= al->sym,
    476 		},
    477 		.cpu	= al->cpu,
    478 		.ip	= al->addr,
    479 		.level	= al->level,
    480 		.stat = {
    481 			.period	= period,
    482 			.nr_events = 1,
    483 			.weight = weight,
    484 		},
    485 		.parent = sym_parent,
    486 		.filtered = symbol__parent_filter(sym_parent),
    487 		.hists	= self,
    488 		.branch_info = NULL,
    489 		.mem_info = NULL,
    490 	};
    491 
    492 	return add_hist_entry(self, &entry, al, period, weight);
    493 }
    494 
    495 int64_t
    496 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
    497 {
    498 	struct sort_entry *se;
    499 	int64_t cmp = 0;
    500 
    501 	list_for_each_entry(se, &hist_entry__sort_list, list) {
    502 		cmp = se->se_cmp(left, right);
    503 		if (cmp)
    504 			break;
    505 	}
    506 
    507 	return cmp;
    508 }
    509 
    510 int64_t
    511 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
    512 {
    513 	struct sort_entry *se;
    514 	int64_t cmp = 0;
    515 
    516 	list_for_each_entry(se, &hist_entry__sort_list, list) {
    517 		int64_t (*f)(struct hist_entry *, struct hist_entry *);
    518 
    519 		f = se->se_collapse ?: se->se_cmp;
    520 
    521 		cmp = f(left, right);
    522 		if (cmp)
    523 			break;
    524 	}
    525 
    526 	return cmp;
    527 }
    528 
    529 void hist_entry__free(struct hist_entry *he)
    530 {
    531 	free(he->branch_info);
    532 	free(he->mem_info);
    533 	free(he);
    534 }
    535 
    536 /*
    537  * collapse the histogram
    538  */
    539 
    540 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
    541 					 struct rb_root *root,
    542 					 struct hist_entry *he)
    543 {
    544 	struct rb_node **p = &root->rb_node;
    545 	struct rb_node *parent = NULL;
    546 	struct hist_entry *iter;
    547 	int64_t cmp;
    548 
    549 	while (*p != NULL) {
    550 		parent = *p;
    551 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
    552 
    553 		cmp = hist_entry__collapse(iter, he);
    554 
    555 		if (!cmp) {
    556 			he_stat__add_stat(&iter->stat, &he->stat);
    557 
    558 			if (symbol_conf.use_callchain) {
    559 				callchain_cursor_reset(&callchain_cursor);
    560 				callchain_merge(&callchain_cursor,
    561 						iter->callchain,
    562 						he->callchain);
    563 			}
    564 			hist_entry__free(he);
    565 			return false;
    566 		}
    567 
    568 		if (cmp < 0)
    569 			p = &(*p)->rb_left;
    570 		else
    571 			p = &(*p)->rb_right;
    572 	}
    573 
    574 	rb_link_node(&he->rb_node_in, parent, p);
    575 	rb_insert_color(&he->rb_node_in, root);
    576 	return true;
    577 }
    578 
    579 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
    580 {
    581 	struct rb_root *root;
    582 
    583 	pthread_mutex_lock(&hists->lock);
    584 
    585 	root = hists->entries_in;
    586 	if (++hists->entries_in > &hists->entries_in_array[1])
    587 		hists->entries_in = &hists->entries_in_array[0];
    588 
    589 	pthread_mutex_unlock(&hists->lock);
    590 
    591 	return root;
    592 }
    593 
    594 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
    595 {
    596 	hists__filter_entry_by_dso(hists, he);
    597 	hists__filter_entry_by_thread(hists, he);
    598 	hists__filter_entry_by_symbol(hists, he);
    599 }
    600 
    601 void hists__collapse_resort(struct hists *hists)
    602 {
    603 	struct rb_root *root;
    604 	struct rb_node *next;
    605 	struct hist_entry *n;
    606 
    607 	if (!sort__need_collapse)
    608 		return;
    609 
    610 	root = hists__get_rotate_entries_in(hists);
    611 	next = rb_first(root);
    612 
    613 	while (next) {
    614 		if (session_done())
    615 			break;
    616 		n = rb_entry(next, struct hist_entry, rb_node_in);
    617 		next = rb_next(&n->rb_node_in);
    618 
    619 		rb_erase(&n->rb_node_in, root);
    620 		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
    621 			/*
    622 			 * If it wasn't combined with one of the entries already
    623 			 * collapsed, we need to apply the filters that may have
    624 			 * been set by, say, the hist_browser.
    625 			 */
    626 			hists__apply_filters(hists, n);
    627 		}
    628 	}
    629 }
    630 
    631 /*
    632  * reverse the map, sort on period.
    633  */
    634 
    635 static int period_cmp(u64 period_a, u64 period_b)
    636 {
    637 	if (period_a > period_b)
    638 		return 1;
    639 	if (period_a < period_b)
    640 		return -1;
    641 	return 0;
    642 }
    643 
    644 static int hist_entry__sort_on_period(struct hist_entry *a,
    645 				      struct hist_entry *b)
    646 {
    647 	int ret;
    648 	int i, nr_members;
    649 	struct perf_evsel *evsel;
    650 	struct hist_entry *pair;
    651 	u64 *periods_a, *periods_b;
    652 
    653 	ret = period_cmp(a->stat.period, b->stat.period);
    654 	if (ret || !symbol_conf.event_group)
    655 		return ret;
    656 
    657 	evsel = hists_to_evsel(a->hists);
    658 	nr_members = evsel->nr_members;
    659 	if (nr_members <= 1)
    660 		return ret;
    661 
    662 	periods_a = zalloc(sizeof(periods_a) * nr_members);
    663 	periods_b = zalloc(sizeof(periods_b) * nr_members);
    664 
    665 	if (!periods_a || !periods_b)
    666 		goto out;
    667 
    668 	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
    669 		evsel = hists_to_evsel(pair->hists);
    670 		periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
    671 	}
    672 
    673 	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
    674 		evsel = hists_to_evsel(pair->hists);
    675 		periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
    676 	}
    677 
    678 	for (i = 1; i < nr_members; i++) {
    679 		ret = period_cmp(periods_a[i], periods_b[i]);
    680 		if (ret)
    681 			break;
    682 	}
    683 
    684 out:
    685 	free(periods_a);
    686 	free(periods_b);
    687 
    688 	return ret;
    689 }
    690 
    691 static void __hists__insert_output_entry(struct rb_root *entries,
    692 					 struct hist_entry *he,
    693 					 u64 min_callchain_hits)
    694 {
    695 	struct rb_node **p = &entries->rb_node;
    696 	struct rb_node *parent = NULL;
    697 	struct hist_entry *iter;
    698 
    699 	if (symbol_conf.use_callchain)
    700 		callchain_param.sort(&he->sorted_chain, he->callchain,
    701 				      min_callchain_hits, &callchain_param);
    702 
    703 	while (*p != NULL) {
    704 		parent = *p;
    705 		iter = rb_entry(parent, struct hist_entry, rb_node);
    706 
    707 		if (hist_entry__sort_on_period(he, iter) > 0)
    708 			p = &(*p)->rb_left;
    709 		else
    710 			p = &(*p)->rb_right;
    711 	}
    712 
    713 	rb_link_node(&he->rb_node, parent, p);
    714 	rb_insert_color(&he->rb_node, entries);
    715 }
    716 
    717 void hists__output_resort(struct hists *hists)
    718 {
    719 	struct rb_root *root;
    720 	struct rb_node *next;
    721 	struct hist_entry *n;
    722 	u64 min_callchain_hits;
    723 
    724 	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
    725 
    726 	if (sort__need_collapse)
    727 		root = &hists->entries_collapsed;
    728 	else
    729 		root = hists->entries_in;
    730 
    731 	next = rb_first(root);
    732 	hists->entries = RB_ROOT;
    733 
    734 	hists->nr_entries = 0;
    735 	hists->stats.total_period = 0;
    736 	hists__reset_col_len(hists);
    737 
    738 	while (next) {
    739 		n = rb_entry(next, struct hist_entry, rb_node_in);
    740 		next = rb_next(&n->rb_node_in);
    741 
    742 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
    743 		hists__inc_nr_entries(hists, n);
    744 	}
    745 }
    746 
    747 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
    748 				       enum hist_filter filter)
    749 {
    750 	h->filtered &= ~(1 << filter);
    751 	if (h->filtered)
    752 		return;
    753 
    754 	++hists->nr_entries;
    755 	if (h->ms.unfolded)
    756 		hists->nr_entries += h->nr_rows;
    757 	h->row_offset = 0;
    758 	hists->stats.total_period += h->stat.period;
    759 	hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
    760 
    761 	hists__calc_col_len(hists, h);
    762 }
    763 
    764 
    765 static bool hists__filter_entry_by_dso(struct hists *hists,
    766 				       struct hist_entry *he)
    767 {
    768 	if (hists->dso_filter != NULL &&
    769 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
    770 		he->filtered |= (1 << HIST_FILTER__DSO);
    771 		return true;
    772 	}
    773 
    774 	return false;
    775 }
    776 
    777 void hists__filter_by_dso(struct hists *hists)
    778 {
    779 	struct rb_node *nd;
    780 
    781 	hists->nr_entries = hists->stats.total_period = 0;
    782 	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
    783 	hists__reset_col_len(hists);
    784 
    785 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
    786 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
    787 
    788 		if (symbol_conf.exclude_other && !h->parent)
    789 			continue;
    790 
    791 		if (hists__filter_entry_by_dso(hists, h))
    792 			continue;
    793 
    794 		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
    795 	}
    796 }
    797 
    798 static bool hists__filter_entry_by_thread(struct hists *hists,
    799 					  struct hist_entry *he)
    800 {
    801 	if (hists->thread_filter != NULL &&
    802 	    he->thread != hists->thread_filter) {
    803 		he->filtered |= (1 << HIST_FILTER__THREAD);
    804 		return true;
    805 	}
    806 
    807 	return false;
    808 }
    809 
    810 void hists__filter_by_thread(struct hists *hists)
    811 {
    812 	struct rb_node *nd;
    813 
    814 	hists->nr_entries = hists->stats.total_period = 0;
    815 	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
    816 	hists__reset_col_len(hists);
    817 
    818 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
    819 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
    820 
    821 		if (hists__filter_entry_by_thread(hists, h))
    822 			continue;
    823 
    824 		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
    825 	}
    826 }
    827 
    828 static bool hists__filter_entry_by_symbol(struct hists *hists,
    829 					  struct hist_entry *he)
    830 {
    831 	if (hists->symbol_filter_str != NULL &&
    832 	    (!he->ms.sym || strstr(he->ms.sym->name,
    833 				   hists->symbol_filter_str) == NULL)) {
    834 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
    835 		return true;
    836 	}
    837 
    838 	return false;
    839 }
    840 
    841 void hists__filter_by_symbol(struct hists *hists)
    842 {
    843 	struct rb_node *nd;
    844 
    845 	hists->nr_entries = hists->stats.total_period = 0;
    846 	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
    847 	hists__reset_col_len(hists);
    848 
    849 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
    850 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
    851 
    852 		if (hists__filter_entry_by_symbol(hists, h))
    853 			continue;
    854 
    855 		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
    856 	}
    857 }
    858 
    859 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
    860 {
    861 	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
    862 }
    863 
    864 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
    865 {
    866 	return symbol__annotate(he->ms.sym, he->ms.map, privsize);
    867 }
    868 
    869 void events_stats__inc(struct events_stats *stats, u32 type)
    870 {
    871 	++stats->nr_events[0];
    872 	++stats->nr_events[type];
    873 }
    874 
    875 void hists__inc_nr_events(struct hists *hists, u32 type)
    876 {
    877 	events_stats__inc(&hists->stats, type);
    878 }
    879 
    880 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
    881 						 struct hist_entry *pair)
    882 {
    883 	struct rb_root *root;
    884 	struct rb_node **p;
    885 	struct rb_node *parent = NULL;
    886 	struct hist_entry *he;
    887 	int cmp;
    888 
    889 	if (sort__need_collapse)
    890 		root = &hists->entries_collapsed;
    891 	else
    892 		root = hists->entries_in;
    893 
    894 	p = &root->rb_node;
    895 
    896 	while (*p != NULL) {
    897 		parent = *p;
    898 		he = rb_entry(parent, struct hist_entry, rb_node_in);
    899 
    900 		cmp = hist_entry__collapse(he, pair);
    901 
    902 		if (!cmp)
    903 			goto out;
    904 
    905 		if (cmp < 0)
    906 			p = &(*p)->rb_left;
    907 		else
    908 			p = &(*p)->rb_right;
    909 	}
    910 
    911 	he = hist_entry__new(pair);
    912 	if (he) {
    913 		memset(&he->stat, 0, sizeof(he->stat));
    914 		he->hists = hists;
    915 		rb_link_node(&he->rb_node_in, parent, p);
    916 		rb_insert_color(&he->rb_node_in, root);
    917 		hists__inc_nr_entries(hists, he);
    918 		he->dummy = true;
    919 	}
    920 out:
    921 	return he;
    922 }
    923 
    924 static struct hist_entry *hists__find_entry(struct hists *hists,
    925 					    struct hist_entry *he)
    926 {
    927 	struct rb_node *n;
    928 
    929 	if (sort__need_collapse)
    930 		n = hists->entries_collapsed.rb_node;
    931 	else
    932 		n = hists->entries_in->rb_node;
    933 
    934 	while (n) {
    935 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
    936 		int64_t cmp = hist_entry__collapse(iter, he);
    937 
    938 		if (cmp < 0)
    939 			n = n->rb_left;
    940 		else if (cmp > 0)
    941 			n = n->rb_right;
    942 		else
    943 			return iter;
    944 	}
    945 
    946 	return NULL;
    947 }
    948 
    949 /*
    950  * Look for pairs to link to the leader buckets (hist_entries):
    951  */
    952 void hists__match(struct hists *leader, struct hists *other)
    953 {
    954 	struct rb_root *root;
    955 	struct rb_node *nd;
    956 	struct hist_entry *pos, *pair;
    957 
    958 	if (sort__need_collapse)
    959 		root = &leader->entries_collapsed;
    960 	else
    961 		root = leader->entries_in;
    962 
    963 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
    964 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
    965 		pair = hists__find_entry(other, pos);
    966 
    967 		if (pair)
    968 			hist_entry__add_pair(pair, pos);
    969 	}
    970 }
    971 
    972 /*
    973  * Look for entries in the other hists that are not present in the leader, if
    974  * we find them, just add a dummy entry on the leader hists, with period=0,
    975  * nr_events=0, to serve as the list header.
    976  */
    977 int hists__link(struct hists *leader, struct hists *other)
    978 {
    979 	struct rb_root *root;
    980 	struct rb_node *nd;
    981 	struct hist_entry *pos, *pair;
    982 
    983 	if (sort__need_collapse)
    984 		root = &other->entries_collapsed;
    985 	else
    986 		root = other->entries_in;
    987 
    988 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
    989 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
    990 
    991 		if (!hist_entry__has_pairs(pos)) {
    992 			pair = hists__add_dummy_entry(leader, pos);
    993 			if (pair == NULL)
    994 				return -1;
    995 			hist_entry__add_pair(pos, pair);
    996 		}
    997 	}
    998 
    999 	return 0;
   1000 }
   1001