xref: /linux/tools/perf/util/hist.c (revision f884ab15afdc5514e88105c92a4e2e1e6539869a)
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include "evsel.h"
8 #include <math.h>
9 
10 static bool hists__filter_entry_by_dso(struct hists *hists,
11 				       struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13 					  struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15 					  struct hist_entry *he);
16 
17 enum hist_filter {
18 	HIST_FILTER__DSO,
19 	HIST_FILTER__THREAD,
20 	HIST_FILTER__PARENT,
21 	HIST_FILTER__SYMBOL,
22 };
23 
24 struct callchain_param	callchain_param = {
25 	.mode	= CHAIN_GRAPH_REL,
26 	.min_percent = 0.5,
27 	.order  = ORDER_CALLEE
28 };
29 
30 u16 hists__col_len(struct hists *hists, enum hist_column col)
31 {
32 	return hists->col_len[col];
33 }
34 
35 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
36 {
37 	hists->col_len[col] = len;
38 }
39 
40 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
41 {
42 	if (len > hists__col_len(hists, col)) {
43 		hists__set_col_len(hists, col, len);
44 		return true;
45 	}
46 	return false;
47 }
48 
49 void hists__reset_col_len(struct hists *hists)
50 {
51 	enum hist_column col;
52 
53 	for (col = 0; col < HISTC_NR_COLS; ++col)
54 		hists__set_col_len(hists, col, 0);
55 }
56 
57 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
58 {
59 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
60 
61 	if (hists__col_len(hists, dso) < unresolved_col_width &&
62 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
63 	    !symbol_conf.dso_list)
64 		hists__set_col_len(hists, dso, unresolved_col_width);
65 }
66 
67 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
68 {
69 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
70 	int symlen;
71 	u16 len;
72 
73 	if (h->ms.sym)
74 		hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
75 	else {
76 		symlen = unresolved_col_width + 4 + 2;
77 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
78 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
79 	}
80 
81 	len = thread__comm_len(h->thread);
82 	if (hists__new_col_len(hists, HISTC_COMM, len))
83 		hists__set_col_len(hists, HISTC_THREAD, len + 6);
84 
85 	if (h->ms.map) {
86 		len = dso__name_len(h->ms.map->dso);
87 		hists__new_col_len(hists, HISTC_DSO, len);
88 	}
89 
90 	if (h->parent)
91 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
92 
93 	if (h->branch_info) {
94 		/*
95 		 * +4 accounts for '[x] ' priv level info
96 		 * +2 account of 0x prefix on raw addresses
97 		 */
98 		if (h->branch_info->from.sym) {
99 			symlen = (int)h->branch_info->from.sym->namelen + 4;
100 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
101 
102 			symlen = dso__name_len(h->branch_info->from.map->dso);
103 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
104 		} else {
105 			symlen = unresolved_col_width + 4 + 2;
106 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
107 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
108 		}
109 
110 		if (h->branch_info->to.sym) {
111 			symlen = (int)h->branch_info->to.sym->namelen + 4;
112 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
113 
114 			symlen = dso__name_len(h->branch_info->to.map->dso);
115 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
116 		} else {
117 			symlen = unresolved_col_width + 4 + 2;
118 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
119 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
120 		}
121 	}
122 
123 	if (h->mem_info) {
124 		/*
125 		 * +4 accounts for '[x] ' priv level info
126 		 * +2 account of 0x prefix on raw addresses
127 		 */
128 		if (h->mem_info->daddr.sym) {
129 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
130 			       + unresolved_col_width + 2;
131 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
132 					   symlen);
133 		} else {
134 			symlen = unresolved_col_width + 4 + 2;
135 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
136 					   symlen);
137 		}
138 		if (h->mem_info->daddr.map) {
139 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
140 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
141 					   symlen);
142 		} else {
143 			symlen = unresolved_col_width + 4 + 2;
144 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
145 		}
146 	} else {
147 		symlen = unresolved_col_width + 4 + 2;
148 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
149 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
150 	}
151 
152 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
153 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
154 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
155 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
156 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
157 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
158 }
159 
160 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
161 {
162 	struct rb_node *next = rb_first(&hists->entries);
163 	struct hist_entry *n;
164 	int row = 0;
165 
166 	hists__reset_col_len(hists);
167 
168 	while (next && row++ < max_rows) {
169 		n = rb_entry(next, struct hist_entry, rb_node);
170 		if (!n->filtered)
171 			hists__calc_col_len(hists, n);
172 		next = rb_next(&n->rb_node);
173 	}
174 }
175 
176 static void hist_entry__add_cpumode_period(struct hist_entry *he,
177 					   unsigned int cpumode, u64 period)
178 {
179 	switch (cpumode) {
180 	case PERF_RECORD_MISC_KERNEL:
181 		he->stat.period_sys += period;
182 		break;
183 	case PERF_RECORD_MISC_USER:
184 		he->stat.period_us += period;
185 		break;
186 	case PERF_RECORD_MISC_GUEST_KERNEL:
187 		he->stat.period_guest_sys += period;
188 		break;
189 	case PERF_RECORD_MISC_GUEST_USER:
190 		he->stat.period_guest_us += period;
191 		break;
192 	default:
193 		break;
194 	}
195 }
196 
197 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
198 				u64 weight)
199 {
200 
201 	he_stat->period		+= period;
202 	he_stat->weight		+= weight;
203 	he_stat->nr_events	+= 1;
204 }
205 
206 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
207 {
208 	dest->period		+= src->period;
209 	dest->period_sys	+= src->period_sys;
210 	dest->period_us		+= src->period_us;
211 	dest->period_guest_sys	+= src->period_guest_sys;
212 	dest->period_guest_us	+= src->period_guest_us;
213 	dest->nr_events		+= src->nr_events;
214 	dest->weight		+= src->weight;
215 }
216 
217 static void hist_entry__decay(struct hist_entry *he)
218 {
219 	he->stat.period = (he->stat.period * 7) / 8;
220 	he->stat.nr_events = (he->stat.nr_events * 7) / 8;
221 	/* XXX need decay for weight too? */
222 }
223 
224 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
225 {
226 	u64 prev_period = he->stat.period;
227 
228 	if (prev_period == 0)
229 		return true;
230 
231 	hist_entry__decay(he);
232 
233 	if (!he->filtered)
234 		hists->stats.total_period -= prev_period - he->stat.period;
235 
236 	return he->stat.period == 0;
237 }
238 
239 static void __hists__decay_entries(struct hists *hists, bool zap_user,
240 				   bool zap_kernel, bool threaded)
241 {
242 	struct rb_node *next = rb_first(&hists->entries);
243 	struct hist_entry *n;
244 
245 	while (next) {
246 		n = rb_entry(next, struct hist_entry, rb_node);
247 		next = rb_next(&n->rb_node);
248 		/*
249 		 * We may be annotating this, for instance, so keep it here in
250 		 * case some it gets new samples, we'll eventually free it when
251 		 * the user stops browsing and it agains gets fully decayed.
252 		 */
253 		if (((zap_user && n->level == '.') ||
254 		     (zap_kernel && n->level != '.') ||
255 		     hists__decay_entry(hists, n)) &&
256 		    !n->used) {
257 			rb_erase(&n->rb_node, &hists->entries);
258 
259 			if (sort__need_collapse || threaded)
260 				rb_erase(&n->rb_node_in, &hists->entries_collapsed);
261 
262 			hist_entry__free(n);
263 			--hists->nr_entries;
264 		}
265 	}
266 }
267 
268 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
269 {
270 	return __hists__decay_entries(hists, zap_user, zap_kernel, false);
271 }
272 
273 void hists__decay_entries_threaded(struct hists *hists,
274 				   bool zap_user, bool zap_kernel)
275 {
276 	return __hists__decay_entries(hists, zap_user, zap_kernel, true);
277 }
278 
279 /*
280  * histogram, sorted on item, collects periods
281  */
282 
283 static struct hist_entry *hist_entry__new(struct hist_entry *template)
284 {
285 	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
286 	struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
287 
288 	if (he != NULL) {
289 		*he = *template;
290 
291 		if (he->ms.map)
292 			he->ms.map->referenced = true;
293 
294 		if (he->branch_info) {
295 			if (he->branch_info->from.map)
296 				he->branch_info->from.map->referenced = true;
297 			if (he->branch_info->to.map)
298 				he->branch_info->to.map->referenced = true;
299 		}
300 
301 		if (he->mem_info) {
302 			if (he->mem_info->iaddr.map)
303 				he->mem_info->iaddr.map->referenced = true;
304 			if (he->mem_info->daddr.map)
305 				he->mem_info->daddr.map->referenced = true;
306 		}
307 
308 		if (symbol_conf.use_callchain)
309 			callchain_init(he->callchain);
310 
311 		INIT_LIST_HEAD(&he->pairs.node);
312 	}
313 
314 	return he;
315 }
316 
317 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
318 {
319 	if (!h->filtered) {
320 		hists__calc_col_len(hists, h);
321 		++hists->nr_entries;
322 		hists->stats.total_period += h->stat.period;
323 	}
324 }
325 
326 static u8 symbol__parent_filter(const struct symbol *parent)
327 {
328 	if (symbol_conf.exclude_other && parent == NULL)
329 		return 1 << HIST_FILTER__PARENT;
330 	return 0;
331 }
332 
333 static struct hist_entry *add_hist_entry(struct hists *hists,
334 				      struct hist_entry *entry,
335 				      struct addr_location *al,
336 				      u64 period,
337 				      u64 weight)
338 {
339 	struct rb_node **p;
340 	struct rb_node *parent = NULL;
341 	struct hist_entry *he;
342 	int cmp;
343 
344 	pthread_mutex_lock(&hists->lock);
345 
346 	p = &hists->entries_in->rb_node;
347 
348 	while (*p != NULL) {
349 		parent = *p;
350 		he = rb_entry(parent, struct hist_entry, rb_node_in);
351 
352 		/*
353 		 * Make sure that it receives arguments in a same order as
354 		 * hist_entry__collapse() so that we can use an appropriate
355 		 * function when searching an entry regardless which sort
356 		 * keys were used.
357 		 */
358 		cmp = hist_entry__cmp(he, entry);
359 
360 		if (!cmp) {
361 			he_stat__add_period(&he->stat, period, weight);
362 
363 			/* If the map of an existing hist_entry has
364 			 * become out-of-date due to an exec() or
365 			 * similar, update it.  Otherwise we will
366 			 * mis-adjust symbol addresses when computing
367 			 * the history counter to increment.
368 			 */
369 			if (he->ms.map != entry->ms.map) {
370 				he->ms.map = entry->ms.map;
371 				if (he->ms.map)
372 					he->ms.map->referenced = true;
373 			}
374 			goto out;
375 		}
376 
377 		if (cmp < 0)
378 			p = &(*p)->rb_left;
379 		else
380 			p = &(*p)->rb_right;
381 	}
382 
383 	he = hist_entry__new(entry);
384 	if (!he)
385 		goto out_unlock;
386 
387 	rb_link_node(&he->rb_node_in, parent, p);
388 	rb_insert_color(&he->rb_node_in, hists->entries_in);
389 out:
390 	hist_entry__add_cpumode_period(he, al->cpumode, period);
391 out_unlock:
392 	pthread_mutex_unlock(&hists->lock);
393 	return he;
394 }
395 
396 struct hist_entry *__hists__add_mem_entry(struct hists *self,
397 					  struct addr_location *al,
398 					  struct symbol *sym_parent,
399 					  struct mem_info *mi,
400 					  u64 period,
401 					  u64 weight)
402 {
403 	struct hist_entry entry = {
404 		.thread	= al->thread,
405 		.ms = {
406 			.map	= al->map,
407 			.sym	= al->sym,
408 		},
409 		.stat = {
410 			.period	= period,
411 			.weight = weight,
412 			.nr_events = 1,
413 		},
414 		.cpu	= al->cpu,
415 		.ip	= al->addr,
416 		.level	= al->level,
417 		.parent = sym_parent,
418 		.filtered = symbol__parent_filter(sym_parent),
419 		.hists = self,
420 		.mem_info = mi,
421 		.branch_info = NULL,
422 	};
423 	return add_hist_entry(self, &entry, al, period, weight);
424 }
425 
426 struct hist_entry *__hists__add_branch_entry(struct hists *self,
427 					     struct addr_location *al,
428 					     struct symbol *sym_parent,
429 					     struct branch_info *bi,
430 					     u64 period,
431 					     u64 weight)
432 {
433 	struct hist_entry entry = {
434 		.thread	= al->thread,
435 		.ms = {
436 			.map	= bi->to.map,
437 			.sym	= bi->to.sym,
438 		},
439 		.cpu	= al->cpu,
440 		.ip	= bi->to.addr,
441 		.level	= al->level,
442 		.stat = {
443 			.period	= period,
444 			.nr_events = 1,
445 			.weight = weight,
446 		},
447 		.parent = sym_parent,
448 		.filtered = symbol__parent_filter(sym_parent),
449 		.branch_info = bi,
450 		.hists	= self,
451 		.mem_info = NULL,
452 	};
453 
454 	return add_hist_entry(self, &entry, al, period, weight);
455 }
456 
457 struct hist_entry *__hists__add_entry(struct hists *self,
458 				      struct addr_location *al,
459 				      struct symbol *sym_parent, u64 period,
460 				      u64 weight)
461 {
462 	struct hist_entry entry = {
463 		.thread	= al->thread,
464 		.ms = {
465 			.map	= al->map,
466 			.sym	= al->sym,
467 		},
468 		.cpu	= al->cpu,
469 		.ip	= al->addr,
470 		.level	= al->level,
471 		.stat = {
472 			.period	= period,
473 			.nr_events = 1,
474 			.weight = weight,
475 		},
476 		.parent = sym_parent,
477 		.filtered = symbol__parent_filter(sym_parent),
478 		.hists	= self,
479 		.branch_info = NULL,
480 		.mem_info = NULL,
481 	};
482 
483 	return add_hist_entry(self, &entry, al, period, weight);
484 }
485 
486 int64_t
487 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
488 {
489 	struct sort_entry *se;
490 	int64_t cmp = 0;
491 
492 	list_for_each_entry(se, &hist_entry__sort_list, list) {
493 		cmp = se->se_cmp(left, right);
494 		if (cmp)
495 			break;
496 	}
497 
498 	return cmp;
499 }
500 
501 int64_t
502 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
503 {
504 	struct sort_entry *se;
505 	int64_t cmp = 0;
506 
507 	list_for_each_entry(se, &hist_entry__sort_list, list) {
508 		int64_t (*f)(struct hist_entry *, struct hist_entry *);
509 
510 		f = se->se_collapse ?: se->se_cmp;
511 
512 		cmp = f(left, right);
513 		if (cmp)
514 			break;
515 	}
516 
517 	return cmp;
518 }
519 
520 void hist_entry__free(struct hist_entry *he)
521 {
522 	free(he->branch_info);
523 	free(he->mem_info);
524 	free(he);
525 }
526 
527 /*
528  * collapse the histogram
529  */
530 
531 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
532 					 struct rb_root *root,
533 					 struct hist_entry *he)
534 {
535 	struct rb_node **p = &root->rb_node;
536 	struct rb_node *parent = NULL;
537 	struct hist_entry *iter;
538 	int64_t cmp;
539 
540 	while (*p != NULL) {
541 		parent = *p;
542 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
543 
544 		cmp = hist_entry__collapse(iter, he);
545 
546 		if (!cmp) {
547 			he_stat__add_stat(&iter->stat, &he->stat);
548 
549 			if (symbol_conf.use_callchain) {
550 				callchain_cursor_reset(&callchain_cursor);
551 				callchain_merge(&callchain_cursor,
552 						iter->callchain,
553 						he->callchain);
554 			}
555 			hist_entry__free(he);
556 			return false;
557 		}
558 
559 		if (cmp < 0)
560 			p = &(*p)->rb_left;
561 		else
562 			p = &(*p)->rb_right;
563 	}
564 
565 	rb_link_node(&he->rb_node_in, parent, p);
566 	rb_insert_color(&he->rb_node_in, root);
567 	return true;
568 }
569 
570 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
571 {
572 	struct rb_root *root;
573 
574 	pthread_mutex_lock(&hists->lock);
575 
576 	root = hists->entries_in;
577 	if (++hists->entries_in > &hists->entries_in_array[1])
578 		hists->entries_in = &hists->entries_in_array[0];
579 
580 	pthread_mutex_unlock(&hists->lock);
581 
582 	return root;
583 }
584 
585 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
586 {
587 	hists__filter_entry_by_dso(hists, he);
588 	hists__filter_entry_by_thread(hists, he);
589 	hists__filter_entry_by_symbol(hists, he);
590 }
591 
592 static void __hists__collapse_resort(struct hists *hists, bool threaded)
593 {
594 	struct rb_root *root;
595 	struct rb_node *next;
596 	struct hist_entry *n;
597 
598 	if (!sort__need_collapse && !threaded)
599 		return;
600 
601 	root = hists__get_rotate_entries_in(hists);
602 	next = rb_first(root);
603 
604 	while (next) {
605 		n = rb_entry(next, struct hist_entry, rb_node_in);
606 		next = rb_next(&n->rb_node_in);
607 
608 		rb_erase(&n->rb_node_in, root);
609 		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
610 			/*
611 			 * If it wasn't combined with one of the entries already
612 			 * collapsed, we need to apply the filters that may have
613 			 * been set by, say, the hist_browser.
614 			 */
615 			hists__apply_filters(hists, n);
616 		}
617 	}
618 }
619 
620 void hists__collapse_resort(struct hists *hists)
621 {
622 	return __hists__collapse_resort(hists, false);
623 }
624 
625 void hists__collapse_resort_threaded(struct hists *hists)
626 {
627 	return __hists__collapse_resort(hists, true);
628 }
629 
630 /*
631  * reverse the map, sort on period.
632  */
633 
634 static int period_cmp(u64 period_a, u64 period_b)
635 {
636 	if (period_a > period_b)
637 		return 1;
638 	if (period_a < period_b)
639 		return -1;
640 	return 0;
641 }
642 
643 static int hist_entry__sort_on_period(struct hist_entry *a,
644 				      struct hist_entry *b)
645 {
646 	int ret;
647 	int i, nr_members;
648 	struct perf_evsel *evsel;
649 	struct hist_entry *pair;
650 	u64 *periods_a, *periods_b;
651 
652 	ret = period_cmp(a->stat.period, b->stat.period);
653 	if (ret || !symbol_conf.event_group)
654 		return ret;
655 
656 	evsel = hists_to_evsel(a->hists);
657 	nr_members = evsel->nr_members;
658 	if (nr_members <= 1)
659 		return ret;
660 
661 	periods_a = zalloc(sizeof(periods_a) * nr_members);
662 	periods_b = zalloc(sizeof(periods_b) * nr_members);
663 
664 	if (!periods_a || !periods_b)
665 		goto out;
666 
667 	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
668 		evsel = hists_to_evsel(pair->hists);
669 		periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
670 	}
671 
672 	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
673 		evsel = hists_to_evsel(pair->hists);
674 		periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
675 	}
676 
677 	for (i = 1; i < nr_members; i++) {
678 		ret = period_cmp(periods_a[i], periods_b[i]);
679 		if (ret)
680 			break;
681 	}
682 
683 out:
684 	free(periods_a);
685 	free(periods_b);
686 
687 	return ret;
688 }
689 
690 static void __hists__insert_output_entry(struct rb_root *entries,
691 					 struct hist_entry *he,
692 					 u64 min_callchain_hits)
693 {
694 	struct rb_node **p = &entries->rb_node;
695 	struct rb_node *parent = NULL;
696 	struct hist_entry *iter;
697 
698 	if (symbol_conf.use_callchain)
699 		callchain_param.sort(&he->sorted_chain, he->callchain,
700 				      min_callchain_hits, &callchain_param);
701 
702 	while (*p != NULL) {
703 		parent = *p;
704 		iter = rb_entry(parent, struct hist_entry, rb_node);
705 
706 		if (hist_entry__sort_on_period(he, iter) > 0)
707 			p = &(*p)->rb_left;
708 		else
709 			p = &(*p)->rb_right;
710 	}
711 
712 	rb_link_node(&he->rb_node, parent, p);
713 	rb_insert_color(&he->rb_node, entries);
714 }
715 
716 static void __hists__output_resort(struct hists *hists, bool threaded)
717 {
718 	struct rb_root *root;
719 	struct rb_node *next;
720 	struct hist_entry *n;
721 	u64 min_callchain_hits;
722 
723 	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
724 
725 	if (sort__need_collapse || threaded)
726 		root = &hists->entries_collapsed;
727 	else
728 		root = hists->entries_in;
729 
730 	next = rb_first(root);
731 	hists->entries = RB_ROOT;
732 
733 	hists->nr_entries = 0;
734 	hists->stats.total_period = 0;
735 	hists__reset_col_len(hists);
736 
737 	while (next) {
738 		n = rb_entry(next, struct hist_entry, rb_node_in);
739 		next = rb_next(&n->rb_node_in);
740 
741 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
742 		hists__inc_nr_entries(hists, n);
743 	}
744 }
745 
746 void hists__output_resort(struct hists *hists)
747 {
748 	return __hists__output_resort(hists, false);
749 }
750 
751 void hists__output_resort_threaded(struct hists *hists)
752 {
753 	return __hists__output_resort(hists, true);
754 }
755 
756 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
757 				       enum hist_filter filter)
758 {
759 	h->filtered &= ~(1 << filter);
760 	if (h->filtered)
761 		return;
762 
763 	++hists->nr_entries;
764 	if (h->ms.unfolded)
765 		hists->nr_entries += h->nr_rows;
766 	h->row_offset = 0;
767 	hists->stats.total_period += h->stat.period;
768 	hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
769 
770 	hists__calc_col_len(hists, h);
771 }
772 
773 
774 static bool hists__filter_entry_by_dso(struct hists *hists,
775 				       struct hist_entry *he)
776 {
777 	if (hists->dso_filter != NULL &&
778 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
779 		he->filtered |= (1 << HIST_FILTER__DSO);
780 		return true;
781 	}
782 
783 	return false;
784 }
785 
786 void hists__filter_by_dso(struct hists *hists)
787 {
788 	struct rb_node *nd;
789 
790 	hists->nr_entries = hists->stats.total_period = 0;
791 	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
792 	hists__reset_col_len(hists);
793 
794 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
795 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
796 
797 		if (symbol_conf.exclude_other && !h->parent)
798 			continue;
799 
800 		if (hists__filter_entry_by_dso(hists, h))
801 			continue;
802 
803 		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
804 	}
805 }
806 
807 static bool hists__filter_entry_by_thread(struct hists *hists,
808 					  struct hist_entry *he)
809 {
810 	if (hists->thread_filter != NULL &&
811 	    he->thread != hists->thread_filter) {
812 		he->filtered |= (1 << HIST_FILTER__THREAD);
813 		return true;
814 	}
815 
816 	return false;
817 }
818 
819 void hists__filter_by_thread(struct hists *hists)
820 {
821 	struct rb_node *nd;
822 
823 	hists->nr_entries = hists->stats.total_period = 0;
824 	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
825 	hists__reset_col_len(hists);
826 
827 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
828 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
829 
830 		if (hists__filter_entry_by_thread(hists, h))
831 			continue;
832 
833 		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
834 	}
835 }
836 
837 static bool hists__filter_entry_by_symbol(struct hists *hists,
838 					  struct hist_entry *he)
839 {
840 	if (hists->symbol_filter_str != NULL &&
841 	    (!he->ms.sym || strstr(he->ms.sym->name,
842 				   hists->symbol_filter_str) == NULL)) {
843 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
844 		return true;
845 	}
846 
847 	return false;
848 }
849 
850 void hists__filter_by_symbol(struct hists *hists)
851 {
852 	struct rb_node *nd;
853 
854 	hists->nr_entries = hists->stats.total_period = 0;
855 	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
856 	hists__reset_col_len(hists);
857 
858 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
859 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
860 
861 		if (hists__filter_entry_by_symbol(hists, h))
862 			continue;
863 
864 		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
865 	}
866 }
867 
868 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
869 {
870 	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
871 }
872 
873 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
874 {
875 	return symbol__annotate(he->ms.sym, he->ms.map, privsize);
876 }
877 
878 void events_stats__inc(struct events_stats *stats, u32 type)
879 {
880 	++stats->nr_events[0];
881 	++stats->nr_events[type];
882 }
883 
884 void hists__inc_nr_events(struct hists *hists, u32 type)
885 {
886 	events_stats__inc(&hists->stats, type);
887 }
888 
889 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
890 						 struct hist_entry *pair)
891 {
892 	struct rb_root *root;
893 	struct rb_node **p;
894 	struct rb_node *parent = NULL;
895 	struct hist_entry *he;
896 	int cmp;
897 
898 	if (sort__need_collapse)
899 		root = &hists->entries_collapsed;
900 	else
901 		root = hists->entries_in;
902 
903 	p = &root->rb_node;
904 
905 	while (*p != NULL) {
906 		parent = *p;
907 		he = rb_entry(parent, struct hist_entry, rb_node_in);
908 
909 		cmp = hist_entry__collapse(he, pair);
910 
911 		if (!cmp)
912 			goto out;
913 
914 		if (cmp < 0)
915 			p = &(*p)->rb_left;
916 		else
917 			p = &(*p)->rb_right;
918 	}
919 
920 	he = hist_entry__new(pair);
921 	if (he) {
922 		memset(&he->stat, 0, sizeof(he->stat));
923 		he->hists = hists;
924 		rb_link_node(&he->rb_node_in, parent, p);
925 		rb_insert_color(&he->rb_node_in, root);
926 		hists__inc_nr_entries(hists, he);
927 	}
928 out:
929 	return he;
930 }
931 
932 static struct hist_entry *hists__find_entry(struct hists *hists,
933 					    struct hist_entry *he)
934 {
935 	struct rb_node *n;
936 
937 	if (sort__need_collapse)
938 		n = hists->entries_collapsed.rb_node;
939 	else
940 		n = hists->entries_in->rb_node;
941 
942 	while (n) {
943 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
944 		int64_t cmp = hist_entry__collapse(iter, he);
945 
946 		if (cmp < 0)
947 			n = n->rb_left;
948 		else if (cmp > 0)
949 			n = n->rb_right;
950 		else
951 			return iter;
952 	}
953 
954 	return NULL;
955 }
956 
957 /*
958  * Look for pairs to link to the leader buckets (hist_entries):
959  */
960 void hists__match(struct hists *leader, struct hists *other)
961 {
962 	struct rb_root *root;
963 	struct rb_node *nd;
964 	struct hist_entry *pos, *pair;
965 
966 	if (sort__need_collapse)
967 		root = &leader->entries_collapsed;
968 	else
969 		root = leader->entries_in;
970 
971 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
972 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
973 		pair = hists__find_entry(other, pos);
974 
975 		if (pair)
976 			hist_entry__add_pair(pair, pos);
977 	}
978 }
979 
980 /*
981  * Look for entries in the other hists that are not present in the leader, if
982  * we find them, just add a dummy entry on the leader hists, with period=0,
983  * nr_events=0, to serve as the list header.
984  */
985 int hists__link(struct hists *leader, struct hists *other)
986 {
987 	struct rb_root *root;
988 	struct rb_node *nd;
989 	struct hist_entry *pos, *pair;
990 
991 	if (sort__need_collapse)
992 		root = &other->entries_collapsed;
993 	else
994 		root = other->entries_in;
995 
996 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
997 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
998 
999 		if (!hist_entry__has_pairs(pos)) {
1000 			pair = hists__add_dummy_entry(leader, pos);
1001 			if (pair == NULL)
1002 				return -1;
1003 			hist_entry__add_pair(pos, pair);
1004 		}
1005 	}
1006 
1007 	return 0;
1008 }
1009