xref: /linux/tools/perf/util/hist.c (revision a8fe58cec351c25e09c393bf46117c0c47b5a17c)
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11 
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 				       struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 					  struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 					  struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19 					  struct hist_entry *he);
20 
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23 	return hists->col_len[col];
24 }
25 
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28 	hists->col_len[col] = len;
29 }
30 
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33 	if (len > hists__col_len(hists, col)) {
34 		hists__set_col_len(hists, col, len);
35 		return true;
36 	}
37 	return false;
38 }
39 
40 void hists__reset_col_len(struct hists *hists)
41 {
42 	enum hist_column col;
43 
44 	for (col = 0; col < HISTC_NR_COLS; ++col)
45 		hists__set_col_len(hists, col, 0);
46 }
47 
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51 
52 	if (hists__col_len(hists, dso) < unresolved_col_width &&
53 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54 	    !symbol_conf.dso_list)
55 		hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57 
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61 	int symlen;
62 	u16 len;
63 
64 	/*
65 	 * +4 accounts for '[x] ' priv level info
66 	 * +2 accounts for 0x prefix on raw addresses
67 	 * +3 accounts for ' y ' symtab origin info
68 	 */
69 	if (h->ms.sym) {
70 		symlen = h->ms.sym->namelen + 4;
71 		if (verbose)
72 			symlen += BITS_PER_LONG / 4 + 2 + 3;
73 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74 	} else {
75 		symlen = unresolved_col_width + 4 + 2;
76 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
78 	}
79 
80 	len = thread__comm_len(h->thread);
81 	if (hists__new_col_len(hists, HISTC_COMM, len))
82 		hists__set_col_len(hists, HISTC_THREAD, len + 6);
83 
84 	if (h->ms.map) {
85 		len = dso__name_len(h->ms.map->dso);
86 		hists__new_col_len(hists, HISTC_DSO, len);
87 	}
88 
89 	if (h->parent)
90 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91 
92 	if (h->branch_info) {
93 		if (h->branch_info->from.sym) {
94 			symlen = (int)h->branch_info->from.sym->namelen + 4;
95 			if (verbose)
96 				symlen += BITS_PER_LONG / 4 + 2 + 3;
97 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98 
99 			symlen = dso__name_len(h->branch_info->from.map->dso);
100 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101 		} else {
102 			symlen = unresolved_col_width + 4 + 2;
103 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 		}
106 
107 		if (h->branch_info->to.sym) {
108 			symlen = (int)h->branch_info->to.sym->namelen + 4;
109 			if (verbose)
110 				symlen += BITS_PER_LONG / 4 + 2 + 3;
111 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112 
113 			symlen = dso__name_len(h->branch_info->to.map->dso);
114 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115 		} else {
116 			symlen = unresolved_col_width + 4 + 2;
117 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119 		}
120 	}
121 
122 	if (h->mem_info) {
123 		if (h->mem_info->daddr.sym) {
124 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
125 			       + unresolved_col_width + 2;
126 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127 					   symlen);
128 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129 					   symlen + 1);
130 		} else {
131 			symlen = unresolved_col_width + 4 + 2;
132 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133 					   symlen);
134 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
135 					   symlen);
136 		}
137 
138 		if (h->mem_info->iaddr.sym) {
139 			symlen = (int)h->mem_info->iaddr.sym->namelen + 4
140 			       + unresolved_col_width + 2;
141 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
142 					   symlen);
143 		} else {
144 			symlen = unresolved_col_width + 4 + 2;
145 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
146 					   symlen);
147 		}
148 
149 		if (h->mem_info->daddr.map) {
150 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
151 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
152 					   symlen);
153 		} else {
154 			symlen = unresolved_col_width + 4 + 2;
155 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
156 		}
157 	} else {
158 		symlen = unresolved_col_width + 4 + 2;
159 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
160 		hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
161 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
162 	}
163 
164 	hists__new_col_len(hists, HISTC_CPU, 3);
165 	hists__new_col_len(hists, HISTC_SOCKET, 6);
166 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
167 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
168 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
169 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
170 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
171 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
172 
173 	if (h->srcline)
174 		hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
175 
176 	if (h->srcfile)
177 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
178 
179 	if (h->transaction)
180 		hists__new_col_len(hists, HISTC_TRANSACTION,
181 				   hist_entry__transaction_len());
182 }
183 
184 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
185 {
186 	struct rb_node *next = rb_first(&hists->entries);
187 	struct hist_entry *n;
188 	int row = 0;
189 
190 	hists__reset_col_len(hists);
191 
192 	while (next && row++ < max_rows) {
193 		n = rb_entry(next, struct hist_entry, rb_node);
194 		if (!n->filtered)
195 			hists__calc_col_len(hists, n);
196 		next = rb_next(&n->rb_node);
197 	}
198 }
199 
200 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
201 					unsigned int cpumode, u64 period)
202 {
203 	switch (cpumode) {
204 	case PERF_RECORD_MISC_KERNEL:
205 		he_stat->period_sys += period;
206 		break;
207 	case PERF_RECORD_MISC_USER:
208 		he_stat->period_us += period;
209 		break;
210 	case PERF_RECORD_MISC_GUEST_KERNEL:
211 		he_stat->period_guest_sys += period;
212 		break;
213 	case PERF_RECORD_MISC_GUEST_USER:
214 		he_stat->period_guest_us += period;
215 		break;
216 	default:
217 		break;
218 	}
219 }
220 
221 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
222 				u64 weight)
223 {
224 
225 	he_stat->period		+= period;
226 	he_stat->weight		+= weight;
227 	he_stat->nr_events	+= 1;
228 }
229 
230 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
231 {
232 	dest->period		+= src->period;
233 	dest->period_sys	+= src->period_sys;
234 	dest->period_us		+= src->period_us;
235 	dest->period_guest_sys	+= src->period_guest_sys;
236 	dest->period_guest_us	+= src->period_guest_us;
237 	dest->nr_events		+= src->nr_events;
238 	dest->weight		+= src->weight;
239 }
240 
241 static void he_stat__decay(struct he_stat *he_stat)
242 {
243 	he_stat->period = (he_stat->period * 7) / 8;
244 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
245 	/* XXX need decay for weight too? */
246 }
247 
248 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
249 {
250 	u64 prev_period = he->stat.period;
251 	u64 diff;
252 
253 	if (prev_period == 0)
254 		return true;
255 
256 	he_stat__decay(&he->stat);
257 	if (symbol_conf.cumulate_callchain)
258 		he_stat__decay(he->stat_acc);
259 	decay_callchain(he->callchain);
260 
261 	diff = prev_period - he->stat.period;
262 
263 	hists->stats.total_period -= diff;
264 	if (!he->filtered)
265 		hists->stats.total_non_filtered_period -= diff;
266 
267 	return he->stat.period == 0;
268 }
269 
270 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
271 {
272 	rb_erase(&he->rb_node, &hists->entries);
273 
274 	if (sort__need_collapse)
275 		rb_erase(&he->rb_node_in, &hists->entries_collapsed);
276 	else
277 		rb_erase(&he->rb_node_in, hists->entries_in);
278 
279 	--hists->nr_entries;
280 	if (!he->filtered)
281 		--hists->nr_non_filtered_entries;
282 
283 	hist_entry__delete(he);
284 }
285 
286 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
287 {
288 	struct rb_node *next = rb_first(&hists->entries);
289 	struct hist_entry *n;
290 
291 	while (next) {
292 		n = rb_entry(next, struct hist_entry, rb_node);
293 		next = rb_next(&n->rb_node);
294 		if (((zap_user && n->level == '.') ||
295 		     (zap_kernel && n->level != '.') ||
296 		     hists__decay_entry(hists, n))) {
297 			hists__delete_entry(hists, n);
298 		}
299 	}
300 }
301 
302 void hists__delete_entries(struct hists *hists)
303 {
304 	struct rb_node *next = rb_first(&hists->entries);
305 	struct hist_entry *n;
306 
307 	while (next) {
308 		n = rb_entry(next, struct hist_entry, rb_node);
309 		next = rb_next(&n->rb_node);
310 
311 		hists__delete_entry(hists, n);
312 	}
313 }
314 
315 /*
316  * histogram, sorted on item, collects periods
317  */
318 
319 static struct hist_entry *hist_entry__new(struct hist_entry *template,
320 					  bool sample_self)
321 {
322 	size_t callchain_size = 0;
323 	struct hist_entry *he;
324 
325 	if (symbol_conf.use_callchain)
326 		callchain_size = sizeof(struct callchain_root);
327 
328 	he = zalloc(sizeof(*he) + callchain_size);
329 
330 	if (he != NULL) {
331 		*he = *template;
332 
333 		if (symbol_conf.cumulate_callchain) {
334 			he->stat_acc = malloc(sizeof(he->stat));
335 			if (he->stat_acc == NULL) {
336 				free(he);
337 				return NULL;
338 			}
339 			memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
340 			if (!sample_self)
341 				memset(&he->stat, 0, sizeof(he->stat));
342 		}
343 
344 		map__get(he->ms.map);
345 
346 		if (he->branch_info) {
347 			/*
348 			 * This branch info is (a part of) allocated from
349 			 * sample__resolve_bstack() and will be freed after
350 			 * adding new entries.  So we need to save a copy.
351 			 */
352 			he->branch_info = malloc(sizeof(*he->branch_info));
353 			if (he->branch_info == NULL) {
354 				map__zput(he->ms.map);
355 				free(he->stat_acc);
356 				free(he);
357 				return NULL;
358 			}
359 
360 			memcpy(he->branch_info, template->branch_info,
361 			       sizeof(*he->branch_info));
362 
363 			map__get(he->branch_info->from.map);
364 			map__get(he->branch_info->to.map);
365 		}
366 
367 		if (he->mem_info) {
368 			map__get(he->mem_info->iaddr.map);
369 			map__get(he->mem_info->daddr.map);
370 		}
371 
372 		if (symbol_conf.use_callchain)
373 			callchain_init(he->callchain);
374 
375 		if (he->raw_data) {
376 			he->raw_data = memdup(he->raw_data, he->raw_size);
377 
378 			if (he->raw_data == NULL) {
379 				map__put(he->ms.map);
380 				if (he->branch_info) {
381 					map__put(he->branch_info->from.map);
382 					map__put(he->branch_info->to.map);
383 					free(he->branch_info);
384 				}
385 				if (he->mem_info) {
386 					map__put(he->mem_info->iaddr.map);
387 					map__put(he->mem_info->daddr.map);
388 				}
389 				free(he->stat_acc);
390 				free(he);
391 				return NULL;
392 			}
393 		}
394 		INIT_LIST_HEAD(&he->pairs.node);
395 		thread__get(he->thread);
396 	}
397 
398 	return he;
399 }
400 
401 static u8 symbol__parent_filter(const struct symbol *parent)
402 {
403 	if (symbol_conf.exclude_other && parent == NULL)
404 		return 1 << HIST_FILTER__PARENT;
405 	return 0;
406 }
407 
408 static struct hist_entry *hists__findnew_entry(struct hists *hists,
409 					       struct hist_entry *entry,
410 					       struct addr_location *al,
411 					       bool sample_self)
412 {
413 	struct rb_node **p;
414 	struct rb_node *parent = NULL;
415 	struct hist_entry *he;
416 	int64_t cmp;
417 	u64 period = entry->stat.period;
418 	u64 weight = entry->stat.weight;
419 
420 	p = &hists->entries_in->rb_node;
421 
422 	while (*p != NULL) {
423 		parent = *p;
424 		he = rb_entry(parent, struct hist_entry, rb_node_in);
425 
426 		/*
427 		 * Make sure that it receives arguments in a same order as
428 		 * hist_entry__collapse() so that we can use an appropriate
429 		 * function when searching an entry regardless which sort
430 		 * keys were used.
431 		 */
432 		cmp = hist_entry__cmp(he, entry);
433 
434 		if (!cmp) {
435 			if (sample_self)
436 				he_stat__add_period(&he->stat, period, weight);
437 			if (symbol_conf.cumulate_callchain)
438 				he_stat__add_period(he->stat_acc, period, weight);
439 
440 			/*
441 			 * This mem info was allocated from sample__resolve_mem
442 			 * and will not be used anymore.
443 			 */
444 			zfree(&entry->mem_info);
445 
446 			/* If the map of an existing hist_entry has
447 			 * become out-of-date due to an exec() or
448 			 * similar, update it.  Otherwise we will
449 			 * mis-adjust symbol addresses when computing
450 			 * the history counter to increment.
451 			 */
452 			if (he->ms.map != entry->ms.map) {
453 				map__put(he->ms.map);
454 				he->ms.map = map__get(entry->ms.map);
455 			}
456 			goto out;
457 		}
458 
459 		if (cmp < 0)
460 			p = &(*p)->rb_left;
461 		else
462 			p = &(*p)->rb_right;
463 	}
464 
465 	he = hist_entry__new(entry, sample_self);
466 	if (!he)
467 		return NULL;
468 
469 	hists->nr_entries++;
470 
471 	rb_link_node(&he->rb_node_in, parent, p);
472 	rb_insert_color(&he->rb_node_in, hists->entries_in);
473 out:
474 	if (sample_self)
475 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
476 	if (symbol_conf.cumulate_callchain)
477 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
478 	return he;
479 }
480 
481 struct hist_entry *__hists__add_entry(struct hists *hists,
482 				      struct addr_location *al,
483 				      struct symbol *sym_parent,
484 				      struct branch_info *bi,
485 				      struct mem_info *mi,
486 				      struct perf_sample *sample,
487 				      bool sample_self)
488 {
489 	struct hist_entry entry = {
490 		.thread	= al->thread,
491 		.comm = thread__comm(al->thread),
492 		.ms = {
493 			.map	= al->map,
494 			.sym	= al->sym,
495 		},
496 		.socket	 = al->socket,
497 		.cpu	 = al->cpu,
498 		.cpumode = al->cpumode,
499 		.ip	 = al->addr,
500 		.level	 = al->level,
501 		.stat = {
502 			.nr_events = 1,
503 			.period	= sample->period,
504 			.weight = sample->weight,
505 		},
506 		.parent = sym_parent,
507 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
508 		.hists	= hists,
509 		.branch_info = bi,
510 		.mem_info = mi,
511 		.transaction = sample->transaction,
512 		.raw_data = sample->raw_data,
513 		.raw_size = sample->raw_size,
514 	};
515 
516 	return hists__findnew_entry(hists, &entry, al, sample_self);
517 }
518 
519 static int
520 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
521 		    struct addr_location *al __maybe_unused)
522 {
523 	return 0;
524 }
525 
526 static int
527 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
528 			struct addr_location *al __maybe_unused)
529 {
530 	return 0;
531 }
532 
533 static int
534 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
535 {
536 	struct perf_sample *sample = iter->sample;
537 	struct mem_info *mi;
538 
539 	mi = sample__resolve_mem(sample, al);
540 	if (mi == NULL)
541 		return -ENOMEM;
542 
543 	iter->priv = mi;
544 	return 0;
545 }
546 
547 static int
548 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
549 {
550 	u64 cost;
551 	struct mem_info *mi = iter->priv;
552 	struct hists *hists = evsel__hists(iter->evsel);
553 	struct perf_sample *sample = iter->sample;
554 	struct hist_entry *he;
555 
556 	if (mi == NULL)
557 		return -EINVAL;
558 
559 	cost = sample->weight;
560 	if (!cost)
561 		cost = 1;
562 
563 	/*
564 	 * must pass period=weight in order to get the correct
565 	 * sorting from hists__collapse_resort() which is solely
566 	 * based on periods. We want sorting be done on nr_events * weight
567 	 * and this is indirectly achieved by passing period=weight here
568 	 * and the he_stat__add_period() function.
569 	 */
570 	sample->period = cost;
571 
572 	he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
573 				sample, true);
574 	if (!he)
575 		return -ENOMEM;
576 
577 	iter->he = he;
578 	return 0;
579 }
580 
581 static int
582 iter_finish_mem_entry(struct hist_entry_iter *iter,
583 		      struct addr_location *al __maybe_unused)
584 {
585 	struct perf_evsel *evsel = iter->evsel;
586 	struct hists *hists = evsel__hists(evsel);
587 	struct hist_entry *he = iter->he;
588 	int err = -EINVAL;
589 
590 	if (he == NULL)
591 		goto out;
592 
593 	hists__inc_nr_samples(hists, he->filtered);
594 
595 	err = hist_entry__append_callchain(he, iter->sample);
596 
597 out:
598 	/*
599 	 * We don't need to free iter->priv (mem_info) here since the mem info
600 	 * was either already freed in hists__findnew_entry() or passed to a
601 	 * new hist entry by hist_entry__new().
602 	 */
603 	iter->priv = NULL;
604 
605 	iter->he = NULL;
606 	return err;
607 }
608 
609 static int
610 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
611 {
612 	struct branch_info *bi;
613 	struct perf_sample *sample = iter->sample;
614 
615 	bi = sample__resolve_bstack(sample, al);
616 	if (!bi)
617 		return -ENOMEM;
618 
619 	iter->curr = 0;
620 	iter->total = sample->branch_stack->nr;
621 
622 	iter->priv = bi;
623 	return 0;
624 }
625 
626 static int
627 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
628 			     struct addr_location *al __maybe_unused)
629 {
630 	/* to avoid calling callback function */
631 	iter->he = NULL;
632 
633 	return 0;
634 }
635 
636 static int
637 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
638 {
639 	struct branch_info *bi = iter->priv;
640 	int i = iter->curr;
641 
642 	if (bi == NULL)
643 		return 0;
644 
645 	if (iter->curr >= iter->total)
646 		return 0;
647 
648 	al->map = bi[i].to.map;
649 	al->sym = bi[i].to.sym;
650 	al->addr = bi[i].to.addr;
651 	return 1;
652 }
653 
654 static int
655 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
656 {
657 	struct branch_info *bi;
658 	struct perf_evsel *evsel = iter->evsel;
659 	struct hists *hists = evsel__hists(evsel);
660 	struct perf_sample *sample = iter->sample;
661 	struct hist_entry *he = NULL;
662 	int i = iter->curr;
663 	int err = 0;
664 
665 	bi = iter->priv;
666 
667 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
668 		goto out;
669 
670 	/*
671 	 * The report shows the percentage of total branches captured
672 	 * and not events sampled. Thus we use a pseudo period of 1.
673 	 */
674 	sample->period = 1;
675 	sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
676 
677 	he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
678 				sample, true);
679 	if (he == NULL)
680 		return -ENOMEM;
681 
682 	hists__inc_nr_samples(hists, he->filtered);
683 
684 out:
685 	iter->he = he;
686 	iter->curr++;
687 	return err;
688 }
689 
690 static int
691 iter_finish_branch_entry(struct hist_entry_iter *iter,
692 			 struct addr_location *al __maybe_unused)
693 {
694 	zfree(&iter->priv);
695 	iter->he = NULL;
696 
697 	return iter->curr >= iter->total ? 0 : -1;
698 }
699 
700 static int
701 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
702 			  struct addr_location *al __maybe_unused)
703 {
704 	return 0;
705 }
706 
707 static int
708 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
709 {
710 	struct perf_evsel *evsel = iter->evsel;
711 	struct perf_sample *sample = iter->sample;
712 	struct hist_entry *he;
713 
714 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
715 				sample, true);
716 	if (he == NULL)
717 		return -ENOMEM;
718 
719 	iter->he = he;
720 	return 0;
721 }
722 
723 static int
724 iter_finish_normal_entry(struct hist_entry_iter *iter,
725 			 struct addr_location *al __maybe_unused)
726 {
727 	struct hist_entry *he = iter->he;
728 	struct perf_evsel *evsel = iter->evsel;
729 	struct perf_sample *sample = iter->sample;
730 
731 	if (he == NULL)
732 		return 0;
733 
734 	iter->he = NULL;
735 
736 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
737 
738 	return hist_entry__append_callchain(he, sample);
739 }
740 
741 static int
742 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
743 			      struct addr_location *al __maybe_unused)
744 {
745 	struct hist_entry **he_cache;
746 
747 	callchain_cursor_commit(&callchain_cursor);
748 
749 	/*
750 	 * This is for detecting cycles or recursions so that they're
751 	 * cumulated only one time to prevent entries more than 100%
752 	 * overhead.
753 	 */
754 	he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
755 	if (he_cache == NULL)
756 		return -ENOMEM;
757 
758 	iter->priv = he_cache;
759 	iter->curr = 0;
760 
761 	return 0;
762 }
763 
764 static int
765 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
766 				 struct addr_location *al)
767 {
768 	struct perf_evsel *evsel = iter->evsel;
769 	struct hists *hists = evsel__hists(evsel);
770 	struct perf_sample *sample = iter->sample;
771 	struct hist_entry **he_cache = iter->priv;
772 	struct hist_entry *he;
773 	int err = 0;
774 
775 	he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
776 				sample, true);
777 	if (he == NULL)
778 		return -ENOMEM;
779 
780 	iter->he = he;
781 	he_cache[iter->curr++] = he;
782 
783 	hist_entry__append_callchain(he, sample);
784 
785 	/*
786 	 * We need to re-initialize the cursor since callchain_append()
787 	 * advanced the cursor to the end.
788 	 */
789 	callchain_cursor_commit(&callchain_cursor);
790 
791 	hists__inc_nr_samples(hists, he->filtered);
792 
793 	return err;
794 }
795 
796 static int
797 iter_next_cumulative_entry(struct hist_entry_iter *iter,
798 			   struct addr_location *al)
799 {
800 	struct callchain_cursor_node *node;
801 
802 	node = callchain_cursor_current(&callchain_cursor);
803 	if (node == NULL)
804 		return 0;
805 
806 	return fill_callchain_info(al, node, iter->hide_unresolved);
807 }
808 
809 static int
810 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
811 			       struct addr_location *al)
812 {
813 	struct perf_evsel *evsel = iter->evsel;
814 	struct perf_sample *sample = iter->sample;
815 	struct hist_entry **he_cache = iter->priv;
816 	struct hist_entry *he;
817 	struct hist_entry he_tmp = {
818 		.hists = evsel__hists(evsel),
819 		.cpu = al->cpu,
820 		.thread = al->thread,
821 		.comm = thread__comm(al->thread),
822 		.ip = al->addr,
823 		.ms = {
824 			.map = al->map,
825 			.sym = al->sym,
826 		},
827 		.parent = iter->parent,
828 		.raw_data = sample->raw_data,
829 		.raw_size = sample->raw_size,
830 	};
831 	int i;
832 	struct callchain_cursor cursor;
833 
834 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
835 
836 	callchain_cursor_advance(&callchain_cursor);
837 
838 	/*
839 	 * Check if there's duplicate entries in the callchain.
840 	 * It's possible that it has cycles or recursive calls.
841 	 */
842 	for (i = 0; i < iter->curr; i++) {
843 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
844 			/* to avoid calling callback function */
845 			iter->he = NULL;
846 			return 0;
847 		}
848 	}
849 
850 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
851 				sample, false);
852 	if (he == NULL)
853 		return -ENOMEM;
854 
855 	iter->he = he;
856 	he_cache[iter->curr++] = he;
857 
858 	if (symbol_conf.use_callchain)
859 		callchain_append(he->callchain, &cursor, sample->period);
860 	return 0;
861 }
862 
863 static int
864 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
865 			     struct addr_location *al __maybe_unused)
866 {
867 	zfree(&iter->priv);
868 	iter->he = NULL;
869 
870 	return 0;
871 }
872 
873 const struct hist_iter_ops hist_iter_mem = {
874 	.prepare_entry 		= iter_prepare_mem_entry,
875 	.add_single_entry 	= iter_add_single_mem_entry,
876 	.next_entry 		= iter_next_nop_entry,
877 	.add_next_entry 	= iter_add_next_nop_entry,
878 	.finish_entry 		= iter_finish_mem_entry,
879 };
880 
881 const struct hist_iter_ops hist_iter_branch = {
882 	.prepare_entry 		= iter_prepare_branch_entry,
883 	.add_single_entry 	= iter_add_single_branch_entry,
884 	.next_entry 		= iter_next_branch_entry,
885 	.add_next_entry 	= iter_add_next_branch_entry,
886 	.finish_entry 		= iter_finish_branch_entry,
887 };
888 
889 const struct hist_iter_ops hist_iter_normal = {
890 	.prepare_entry 		= iter_prepare_normal_entry,
891 	.add_single_entry 	= iter_add_single_normal_entry,
892 	.next_entry 		= iter_next_nop_entry,
893 	.add_next_entry 	= iter_add_next_nop_entry,
894 	.finish_entry 		= iter_finish_normal_entry,
895 };
896 
897 const struct hist_iter_ops hist_iter_cumulative = {
898 	.prepare_entry 		= iter_prepare_cumulative_entry,
899 	.add_single_entry 	= iter_add_single_cumulative_entry,
900 	.next_entry 		= iter_next_cumulative_entry,
901 	.add_next_entry 	= iter_add_next_cumulative_entry,
902 	.finish_entry 		= iter_finish_cumulative_entry,
903 };
904 
905 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
906 			 int max_stack_depth, void *arg)
907 {
908 	int err, err2;
909 
910 	err = sample__resolve_callchain(iter->sample, &iter->parent,
911 					iter->evsel, al, max_stack_depth);
912 	if (err)
913 		return err;
914 
915 	iter->max_stack = max_stack_depth;
916 
917 	err = iter->ops->prepare_entry(iter, al);
918 	if (err)
919 		goto out;
920 
921 	err = iter->ops->add_single_entry(iter, al);
922 	if (err)
923 		goto out;
924 
925 	if (iter->he && iter->add_entry_cb) {
926 		err = iter->add_entry_cb(iter, al, true, arg);
927 		if (err)
928 			goto out;
929 	}
930 
931 	while (iter->ops->next_entry(iter, al)) {
932 		err = iter->ops->add_next_entry(iter, al);
933 		if (err)
934 			break;
935 
936 		if (iter->he && iter->add_entry_cb) {
937 			err = iter->add_entry_cb(iter, al, false, arg);
938 			if (err)
939 				goto out;
940 		}
941 	}
942 
943 out:
944 	err2 = iter->ops->finish_entry(iter, al);
945 	if (!err)
946 		err = err2;
947 
948 	return err;
949 }
950 
951 int64_t
952 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
953 {
954 	struct perf_hpp_fmt *fmt;
955 	int64_t cmp = 0;
956 
957 	perf_hpp__for_each_sort_list(fmt) {
958 		cmp = fmt->cmp(fmt, left, right);
959 		if (cmp)
960 			break;
961 	}
962 
963 	return cmp;
964 }
965 
966 int64_t
967 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
968 {
969 	struct perf_hpp_fmt *fmt;
970 	int64_t cmp = 0;
971 
972 	perf_hpp__for_each_sort_list(fmt) {
973 		cmp = fmt->collapse(fmt, left, right);
974 		if (cmp)
975 			break;
976 	}
977 
978 	return cmp;
979 }
980 
981 void hist_entry__delete(struct hist_entry *he)
982 {
983 	thread__zput(he->thread);
984 	map__zput(he->ms.map);
985 
986 	if (he->branch_info) {
987 		map__zput(he->branch_info->from.map);
988 		map__zput(he->branch_info->to.map);
989 		zfree(&he->branch_info);
990 	}
991 
992 	if (he->mem_info) {
993 		map__zput(he->mem_info->iaddr.map);
994 		map__zput(he->mem_info->daddr.map);
995 		zfree(&he->mem_info);
996 	}
997 
998 	zfree(&he->stat_acc);
999 	free_srcline(he->srcline);
1000 	if (he->srcfile && he->srcfile[0])
1001 		free(he->srcfile);
1002 	free_callchain(he->callchain);
1003 	free(he->trace_output);
1004 	free(he->raw_data);
1005 	free(he);
1006 }
1007 
1008 /*
1009  * collapse the histogram
1010  */
1011 
1012 bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
1013 				  struct rb_root *root, struct hist_entry *he)
1014 {
1015 	struct rb_node **p = &root->rb_node;
1016 	struct rb_node *parent = NULL;
1017 	struct hist_entry *iter;
1018 	int64_t cmp;
1019 
1020 	while (*p != NULL) {
1021 		parent = *p;
1022 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1023 
1024 		cmp = hist_entry__collapse(iter, he);
1025 
1026 		if (!cmp) {
1027 			he_stat__add_stat(&iter->stat, &he->stat);
1028 			if (symbol_conf.cumulate_callchain)
1029 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
1030 
1031 			if (symbol_conf.use_callchain) {
1032 				callchain_cursor_reset(&callchain_cursor);
1033 				callchain_merge(&callchain_cursor,
1034 						iter->callchain,
1035 						he->callchain);
1036 			}
1037 			hist_entry__delete(he);
1038 			return false;
1039 		}
1040 
1041 		if (cmp < 0)
1042 			p = &(*p)->rb_left;
1043 		else
1044 			p = &(*p)->rb_right;
1045 	}
1046 	hists->nr_entries++;
1047 
1048 	rb_link_node(&he->rb_node_in, parent, p);
1049 	rb_insert_color(&he->rb_node_in, root);
1050 	return true;
1051 }
1052 
1053 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1054 {
1055 	struct rb_root *root;
1056 
1057 	pthread_mutex_lock(&hists->lock);
1058 
1059 	root = hists->entries_in;
1060 	if (++hists->entries_in > &hists->entries_in_array[1])
1061 		hists->entries_in = &hists->entries_in_array[0];
1062 
1063 	pthread_mutex_unlock(&hists->lock);
1064 
1065 	return root;
1066 }
1067 
1068 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1069 {
1070 	hists__filter_entry_by_dso(hists, he);
1071 	hists__filter_entry_by_thread(hists, he);
1072 	hists__filter_entry_by_symbol(hists, he);
1073 	hists__filter_entry_by_socket(hists, he);
1074 }
1075 
1076 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1077 {
1078 	struct rb_root *root;
1079 	struct rb_node *next;
1080 	struct hist_entry *n;
1081 
1082 	if (!sort__need_collapse)
1083 		return;
1084 
1085 	hists->nr_entries = 0;
1086 
1087 	root = hists__get_rotate_entries_in(hists);
1088 
1089 	next = rb_first(root);
1090 
1091 	while (next) {
1092 		if (session_done())
1093 			break;
1094 		n = rb_entry(next, struct hist_entry, rb_node_in);
1095 		next = rb_next(&n->rb_node_in);
1096 
1097 		rb_erase(&n->rb_node_in, root);
1098 		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1099 			/*
1100 			 * If it wasn't combined with one of the entries already
1101 			 * collapsed, we need to apply the filters that may have
1102 			 * been set by, say, the hist_browser.
1103 			 */
1104 			hists__apply_filters(hists, n);
1105 		}
1106 		if (prog)
1107 			ui_progress__update(prog, 1);
1108 	}
1109 }
1110 
1111 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1112 {
1113 	struct perf_hpp_fmt *fmt;
1114 	int64_t cmp = 0;
1115 
1116 	perf_hpp__for_each_sort_list(fmt) {
1117 		if (perf_hpp__should_skip(fmt, a->hists))
1118 			continue;
1119 
1120 		cmp = fmt->sort(fmt, a, b);
1121 		if (cmp)
1122 			break;
1123 	}
1124 
1125 	return cmp;
1126 }
1127 
1128 static void hists__reset_filter_stats(struct hists *hists)
1129 {
1130 	hists->nr_non_filtered_entries = 0;
1131 	hists->stats.total_non_filtered_period = 0;
1132 }
1133 
1134 void hists__reset_stats(struct hists *hists)
1135 {
1136 	hists->nr_entries = 0;
1137 	hists->stats.total_period = 0;
1138 
1139 	hists__reset_filter_stats(hists);
1140 }
1141 
1142 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1143 {
1144 	hists->nr_non_filtered_entries++;
1145 	hists->stats.total_non_filtered_period += h->stat.period;
1146 }
1147 
1148 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1149 {
1150 	if (!h->filtered)
1151 		hists__inc_filter_stats(hists, h);
1152 
1153 	hists->nr_entries++;
1154 	hists->stats.total_period += h->stat.period;
1155 }
1156 
1157 static void __hists__insert_output_entry(struct rb_root *entries,
1158 					 struct hist_entry *he,
1159 					 u64 min_callchain_hits,
1160 					 bool use_callchain)
1161 {
1162 	struct rb_node **p = &entries->rb_node;
1163 	struct rb_node *parent = NULL;
1164 	struct hist_entry *iter;
1165 
1166 	if (use_callchain)
1167 		callchain_param.sort(&he->sorted_chain, he->callchain,
1168 				      min_callchain_hits, &callchain_param);
1169 
1170 	while (*p != NULL) {
1171 		parent = *p;
1172 		iter = rb_entry(parent, struct hist_entry, rb_node);
1173 
1174 		if (hist_entry__sort(he, iter) > 0)
1175 			p = &(*p)->rb_left;
1176 		else
1177 			p = &(*p)->rb_right;
1178 	}
1179 
1180 	rb_link_node(&he->rb_node, parent, p);
1181 	rb_insert_color(&he->rb_node, entries);
1182 }
1183 
1184 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1185 {
1186 	struct rb_root *root;
1187 	struct rb_node *next;
1188 	struct hist_entry *n;
1189 	u64 min_callchain_hits;
1190 	struct perf_evsel *evsel = hists_to_evsel(hists);
1191 	bool use_callchain;
1192 
1193 	if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1194 		use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1195 	else
1196 		use_callchain = symbol_conf.use_callchain;
1197 
1198 	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1199 
1200 	if (sort__need_collapse)
1201 		root = &hists->entries_collapsed;
1202 	else
1203 		root = hists->entries_in;
1204 
1205 	next = rb_first(root);
1206 	hists->entries = RB_ROOT;
1207 
1208 	hists__reset_stats(hists);
1209 	hists__reset_col_len(hists);
1210 
1211 	while (next) {
1212 		n = rb_entry(next, struct hist_entry, rb_node_in);
1213 		next = rb_next(&n->rb_node_in);
1214 
1215 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1216 		hists__inc_stats(hists, n);
1217 
1218 		if (!n->filtered)
1219 			hists__calc_col_len(hists, n);
1220 
1221 		if (prog)
1222 			ui_progress__update(prog, 1);
1223 	}
1224 }
1225 
1226 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1227 				       enum hist_filter filter)
1228 {
1229 	h->filtered &= ~(1 << filter);
1230 	if (h->filtered)
1231 		return;
1232 
1233 	/* force fold unfiltered entry for simplicity */
1234 	h->unfolded = false;
1235 	h->row_offset = 0;
1236 	h->nr_rows = 0;
1237 
1238 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1239 
1240 	hists__inc_filter_stats(hists, h);
1241 	hists__calc_col_len(hists, h);
1242 }
1243 
1244 
1245 static bool hists__filter_entry_by_dso(struct hists *hists,
1246 				       struct hist_entry *he)
1247 {
1248 	if (hists->dso_filter != NULL &&
1249 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1250 		he->filtered |= (1 << HIST_FILTER__DSO);
1251 		return true;
1252 	}
1253 
1254 	return false;
1255 }
1256 
1257 void hists__filter_by_dso(struct hists *hists)
1258 {
1259 	struct rb_node *nd;
1260 
1261 	hists->stats.nr_non_filtered_samples = 0;
1262 
1263 	hists__reset_filter_stats(hists);
1264 	hists__reset_col_len(hists);
1265 
1266 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1267 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1268 
1269 		if (symbol_conf.exclude_other && !h->parent)
1270 			continue;
1271 
1272 		if (hists__filter_entry_by_dso(hists, h))
1273 			continue;
1274 
1275 		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1276 	}
1277 }
1278 
1279 static bool hists__filter_entry_by_thread(struct hists *hists,
1280 					  struct hist_entry *he)
1281 {
1282 	if (hists->thread_filter != NULL &&
1283 	    he->thread != hists->thread_filter) {
1284 		he->filtered |= (1 << HIST_FILTER__THREAD);
1285 		return true;
1286 	}
1287 
1288 	return false;
1289 }
1290 
1291 void hists__filter_by_thread(struct hists *hists)
1292 {
1293 	struct rb_node *nd;
1294 
1295 	hists->stats.nr_non_filtered_samples = 0;
1296 
1297 	hists__reset_filter_stats(hists);
1298 	hists__reset_col_len(hists);
1299 
1300 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1301 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1302 
1303 		if (hists__filter_entry_by_thread(hists, h))
1304 			continue;
1305 
1306 		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1307 	}
1308 }
1309 
1310 static bool hists__filter_entry_by_symbol(struct hists *hists,
1311 					  struct hist_entry *he)
1312 {
1313 	if (hists->symbol_filter_str != NULL &&
1314 	    (!he->ms.sym || strstr(he->ms.sym->name,
1315 				   hists->symbol_filter_str) == NULL)) {
1316 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
1317 		return true;
1318 	}
1319 
1320 	return false;
1321 }
1322 
1323 void hists__filter_by_symbol(struct hists *hists)
1324 {
1325 	struct rb_node *nd;
1326 
1327 	hists->stats.nr_non_filtered_samples = 0;
1328 
1329 	hists__reset_filter_stats(hists);
1330 	hists__reset_col_len(hists);
1331 
1332 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1333 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1334 
1335 		if (hists__filter_entry_by_symbol(hists, h))
1336 			continue;
1337 
1338 		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1339 	}
1340 }
1341 
1342 static bool hists__filter_entry_by_socket(struct hists *hists,
1343 					  struct hist_entry *he)
1344 {
1345 	if ((hists->socket_filter > -1) &&
1346 	    (he->socket != hists->socket_filter)) {
1347 		he->filtered |= (1 << HIST_FILTER__SOCKET);
1348 		return true;
1349 	}
1350 
1351 	return false;
1352 }
1353 
1354 void hists__filter_by_socket(struct hists *hists)
1355 {
1356 	struct rb_node *nd;
1357 
1358 	hists->stats.nr_non_filtered_samples = 0;
1359 
1360 	hists__reset_filter_stats(hists);
1361 	hists__reset_col_len(hists);
1362 
1363 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1364 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1365 
1366 		if (hists__filter_entry_by_socket(hists, h))
1367 			continue;
1368 
1369 		hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
1370 	}
1371 }
1372 
1373 void events_stats__inc(struct events_stats *stats, u32 type)
1374 {
1375 	++stats->nr_events[0];
1376 	++stats->nr_events[type];
1377 }
1378 
1379 void hists__inc_nr_events(struct hists *hists, u32 type)
1380 {
1381 	events_stats__inc(&hists->stats, type);
1382 }
1383 
1384 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1385 {
1386 	events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1387 	if (!filtered)
1388 		hists->stats.nr_non_filtered_samples++;
1389 }
1390 
1391 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1392 						 struct hist_entry *pair)
1393 {
1394 	struct rb_root *root;
1395 	struct rb_node **p;
1396 	struct rb_node *parent = NULL;
1397 	struct hist_entry *he;
1398 	int64_t cmp;
1399 
1400 	if (sort__need_collapse)
1401 		root = &hists->entries_collapsed;
1402 	else
1403 		root = hists->entries_in;
1404 
1405 	p = &root->rb_node;
1406 
1407 	while (*p != NULL) {
1408 		parent = *p;
1409 		he = rb_entry(parent, struct hist_entry, rb_node_in);
1410 
1411 		cmp = hist_entry__collapse(he, pair);
1412 
1413 		if (!cmp)
1414 			goto out;
1415 
1416 		if (cmp < 0)
1417 			p = &(*p)->rb_left;
1418 		else
1419 			p = &(*p)->rb_right;
1420 	}
1421 
1422 	he = hist_entry__new(pair, true);
1423 	if (he) {
1424 		memset(&he->stat, 0, sizeof(he->stat));
1425 		he->hists = hists;
1426 		rb_link_node(&he->rb_node_in, parent, p);
1427 		rb_insert_color(&he->rb_node_in, root);
1428 		hists__inc_stats(hists, he);
1429 		he->dummy = true;
1430 	}
1431 out:
1432 	return he;
1433 }
1434 
1435 static struct hist_entry *hists__find_entry(struct hists *hists,
1436 					    struct hist_entry *he)
1437 {
1438 	struct rb_node *n;
1439 
1440 	if (sort__need_collapse)
1441 		n = hists->entries_collapsed.rb_node;
1442 	else
1443 		n = hists->entries_in->rb_node;
1444 
1445 	while (n) {
1446 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1447 		int64_t cmp = hist_entry__collapse(iter, he);
1448 
1449 		if (cmp < 0)
1450 			n = n->rb_left;
1451 		else if (cmp > 0)
1452 			n = n->rb_right;
1453 		else
1454 			return iter;
1455 	}
1456 
1457 	return NULL;
1458 }
1459 
1460 /*
1461  * Look for pairs to link to the leader buckets (hist_entries):
1462  */
1463 void hists__match(struct hists *leader, struct hists *other)
1464 {
1465 	struct rb_root *root;
1466 	struct rb_node *nd;
1467 	struct hist_entry *pos, *pair;
1468 
1469 	if (sort__need_collapse)
1470 		root = &leader->entries_collapsed;
1471 	else
1472 		root = leader->entries_in;
1473 
1474 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1475 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
1476 		pair = hists__find_entry(other, pos);
1477 
1478 		if (pair)
1479 			hist_entry__add_pair(pair, pos);
1480 	}
1481 }
1482 
1483 /*
1484  * Look for entries in the other hists that are not present in the leader, if
1485  * we find them, just add a dummy entry on the leader hists, with period=0,
1486  * nr_events=0, to serve as the list header.
1487  */
1488 int hists__link(struct hists *leader, struct hists *other)
1489 {
1490 	struct rb_root *root;
1491 	struct rb_node *nd;
1492 	struct hist_entry *pos, *pair;
1493 
1494 	if (sort__need_collapse)
1495 		root = &other->entries_collapsed;
1496 	else
1497 		root = other->entries_in;
1498 
1499 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1500 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
1501 
1502 		if (!hist_entry__has_pairs(pos)) {
1503 			pair = hists__add_dummy_entry(leader, pos);
1504 			if (pair == NULL)
1505 				return -1;
1506 			hist_entry__add_pair(pos, pair);
1507 		}
1508 	}
1509 
1510 	return 0;
1511 }
1512 
1513 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1514 			  struct perf_sample *sample, bool nonany_branch_mode)
1515 {
1516 	struct branch_info *bi;
1517 
1518 	/* If we have branch cycles always annotate them. */
1519 	if (bs && bs->nr && bs->entries[0].flags.cycles) {
1520 		int i;
1521 
1522 		bi = sample__resolve_bstack(sample, al);
1523 		if (bi) {
1524 			struct addr_map_symbol *prev = NULL;
1525 
1526 			/*
1527 			 * Ignore errors, still want to process the
1528 			 * other entries.
1529 			 *
1530 			 * For non standard branch modes always
1531 			 * force no IPC (prev == NULL)
1532 			 *
1533 			 * Note that perf stores branches reversed from
1534 			 * program order!
1535 			 */
1536 			for (i = bs->nr - 1; i >= 0; i--) {
1537 				addr_map_symbol__account_cycles(&bi[i].from,
1538 					nonany_branch_mode ? NULL : prev,
1539 					bi[i].flags.cycles);
1540 				prev = &bi[i].to;
1541 			}
1542 			free(bi);
1543 		}
1544 	}
1545 }
1546 
1547 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1548 {
1549 	struct perf_evsel *pos;
1550 	size_t ret = 0;
1551 
1552 	evlist__for_each(evlist, pos) {
1553 		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1554 		ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1555 	}
1556 
1557 	return ret;
1558 }
1559 
1560 
1561 u64 hists__total_period(struct hists *hists)
1562 {
1563 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1564 		hists->stats.total_period;
1565 }
1566 
1567 int parse_filter_percentage(const struct option *opt __maybe_unused,
1568 			    const char *arg, int unset __maybe_unused)
1569 {
1570 	if (!strcmp(arg, "relative"))
1571 		symbol_conf.filter_relative = true;
1572 	else if (!strcmp(arg, "absolute"))
1573 		symbol_conf.filter_relative = false;
1574 	else
1575 		return -1;
1576 
1577 	return 0;
1578 }
1579 
1580 int perf_hist_config(const char *var, const char *value)
1581 {
1582 	if (!strcmp(var, "hist.percentage"))
1583 		return parse_filter_percentage(NULL, value, 0);
1584 
1585 	return 0;
1586 }
1587 
1588 int __hists__init(struct hists *hists)
1589 {
1590 	memset(hists, 0, sizeof(*hists));
1591 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1592 	hists->entries_in = &hists->entries_in_array[0];
1593 	hists->entries_collapsed = RB_ROOT;
1594 	hists->entries = RB_ROOT;
1595 	pthread_mutex_init(&hists->lock, NULL);
1596 	hists->socket_filter = -1;
1597 	return 0;
1598 }
1599 
1600 static void hists__delete_remaining_entries(struct rb_root *root)
1601 {
1602 	struct rb_node *node;
1603 	struct hist_entry *he;
1604 
1605 	while (!RB_EMPTY_ROOT(root)) {
1606 		node = rb_first(root);
1607 		rb_erase(node, root);
1608 
1609 		he = rb_entry(node, struct hist_entry, rb_node_in);
1610 		hist_entry__delete(he);
1611 	}
1612 }
1613 
1614 static void hists__delete_all_entries(struct hists *hists)
1615 {
1616 	hists__delete_entries(hists);
1617 	hists__delete_remaining_entries(&hists->entries_in_array[0]);
1618 	hists__delete_remaining_entries(&hists->entries_in_array[1]);
1619 	hists__delete_remaining_entries(&hists->entries_collapsed);
1620 }
1621 
1622 static void hists_evsel__exit(struct perf_evsel *evsel)
1623 {
1624 	struct hists *hists = evsel__hists(evsel);
1625 
1626 	hists__delete_all_entries(hists);
1627 }
1628 
1629 static int hists_evsel__init(struct perf_evsel *evsel)
1630 {
1631 	struct hists *hists = evsel__hists(evsel);
1632 
1633 	__hists__init(hists);
1634 	return 0;
1635 }
1636 
1637 /*
1638  * XXX We probably need a hists_evsel__exit() to free the hist_entries
1639  * stored in the rbtree...
1640  */
1641 
1642 int hists__init(void)
1643 {
1644 	int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1645 					    hists_evsel__init,
1646 					    hists_evsel__exit);
1647 	if (err)
1648 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1649 
1650 	return err;
1651 }
1652