xref: /linux/tools/perf/util/hist.c (revision 64af4e0da419ef9e9db0d34a3b5836adbf90a5e8)
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11 
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 				       struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 					  struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 					  struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19 					  struct hist_entry *he);
20 
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23 	return hists->col_len[col];
24 }
25 
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28 	hists->col_len[col] = len;
29 }
30 
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33 	if (len > hists__col_len(hists, col)) {
34 		hists__set_col_len(hists, col, len);
35 		return true;
36 	}
37 	return false;
38 }
39 
40 void hists__reset_col_len(struct hists *hists)
41 {
42 	enum hist_column col;
43 
44 	for (col = 0; col < HISTC_NR_COLS; ++col)
45 		hists__set_col_len(hists, col, 0);
46 }
47 
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51 
52 	if (hists__col_len(hists, dso) < unresolved_col_width &&
53 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54 	    !symbol_conf.dso_list)
55 		hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57 
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61 	int symlen;
62 	u16 len;
63 
64 	/*
65 	 * +4 accounts for '[x] ' priv level info
66 	 * +2 accounts for 0x prefix on raw addresses
67 	 * +3 accounts for ' y ' symtab origin info
68 	 */
69 	if (h->ms.sym) {
70 		symlen = h->ms.sym->namelen + 4;
71 		if (verbose)
72 			symlen += BITS_PER_LONG / 4 + 2 + 3;
73 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74 	} else {
75 		symlen = unresolved_col_width + 4 + 2;
76 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
78 	}
79 
80 	len = thread__comm_len(h->thread);
81 	if (hists__new_col_len(hists, HISTC_COMM, len))
82 		hists__set_col_len(hists, HISTC_THREAD, len + 6);
83 
84 	if (h->ms.map) {
85 		len = dso__name_len(h->ms.map->dso);
86 		hists__new_col_len(hists, HISTC_DSO, len);
87 	}
88 
89 	if (h->parent)
90 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91 
92 	if (h->branch_info) {
93 		if (h->branch_info->from.sym) {
94 			symlen = (int)h->branch_info->from.sym->namelen + 4;
95 			if (verbose)
96 				symlen += BITS_PER_LONG / 4 + 2 + 3;
97 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98 
99 			symlen = dso__name_len(h->branch_info->from.map->dso);
100 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101 		} else {
102 			symlen = unresolved_col_width + 4 + 2;
103 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 		}
106 
107 		if (h->branch_info->to.sym) {
108 			symlen = (int)h->branch_info->to.sym->namelen + 4;
109 			if (verbose)
110 				symlen += BITS_PER_LONG / 4 + 2 + 3;
111 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112 
113 			symlen = dso__name_len(h->branch_info->to.map->dso);
114 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115 		} else {
116 			symlen = unresolved_col_width + 4 + 2;
117 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119 		}
120 	}
121 
122 	if (h->mem_info) {
123 		if (h->mem_info->daddr.sym) {
124 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
125 			       + unresolved_col_width + 2;
126 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127 					   symlen);
128 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129 					   symlen + 1);
130 		} else {
131 			symlen = unresolved_col_width + 4 + 2;
132 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133 					   symlen);
134 		}
135 
136 		if (h->mem_info->iaddr.sym) {
137 			symlen = (int)h->mem_info->iaddr.sym->namelen + 4
138 			       + unresolved_col_width + 2;
139 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
140 					   symlen);
141 		} else {
142 			symlen = unresolved_col_width + 4 + 2;
143 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
144 					   symlen);
145 		}
146 
147 		if (h->mem_info->daddr.map) {
148 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
149 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
150 					   symlen);
151 		} else {
152 			symlen = unresolved_col_width + 4 + 2;
153 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
154 		}
155 	} else {
156 		symlen = unresolved_col_width + 4 + 2;
157 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
158 		hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
159 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
160 	}
161 
162 	hists__new_col_len(hists, HISTC_CPU, 3);
163 	hists__new_col_len(hists, HISTC_SOCKET, 6);
164 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
165 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
166 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
167 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
168 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
169 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
170 
171 	if (h->srcline)
172 		hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
173 
174 	if (h->srcfile)
175 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
176 
177 	if (h->transaction)
178 		hists__new_col_len(hists, HISTC_TRANSACTION,
179 				   hist_entry__transaction_len());
180 }
181 
182 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
183 {
184 	struct rb_node *next = rb_first(&hists->entries);
185 	struct hist_entry *n;
186 	int row = 0;
187 
188 	hists__reset_col_len(hists);
189 
190 	while (next && row++ < max_rows) {
191 		n = rb_entry(next, struct hist_entry, rb_node);
192 		if (!n->filtered)
193 			hists__calc_col_len(hists, n);
194 		next = rb_next(&n->rb_node);
195 	}
196 }
197 
198 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
199 					unsigned int cpumode, u64 period)
200 {
201 	switch (cpumode) {
202 	case PERF_RECORD_MISC_KERNEL:
203 		he_stat->period_sys += period;
204 		break;
205 	case PERF_RECORD_MISC_USER:
206 		he_stat->period_us += period;
207 		break;
208 	case PERF_RECORD_MISC_GUEST_KERNEL:
209 		he_stat->period_guest_sys += period;
210 		break;
211 	case PERF_RECORD_MISC_GUEST_USER:
212 		he_stat->period_guest_us += period;
213 		break;
214 	default:
215 		break;
216 	}
217 }
218 
219 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
220 				u64 weight)
221 {
222 
223 	he_stat->period		+= period;
224 	he_stat->weight		+= weight;
225 	he_stat->nr_events	+= 1;
226 }
227 
228 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
229 {
230 	dest->period		+= src->period;
231 	dest->period_sys	+= src->period_sys;
232 	dest->period_us		+= src->period_us;
233 	dest->period_guest_sys	+= src->period_guest_sys;
234 	dest->period_guest_us	+= src->period_guest_us;
235 	dest->nr_events		+= src->nr_events;
236 	dest->weight		+= src->weight;
237 }
238 
239 static void he_stat__decay(struct he_stat *he_stat)
240 {
241 	he_stat->period = (he_stat->period * 7) / 8;
242 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
243 	/* XXX need decay for weight too? */
244 }
245 
246 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
247 {
248 	u64 prev_period = he->stat.period;
249 	u64 diff;
250 
251 	if (prev_period == 0)
252 		return true;
253 
254 	he_stat__decay(&he->stat);
255 	if (symbol_conf.cumulate_callchain)
256 		he_stat__decay(he->stat_acc);
257 
258 	diff = prev_period - he->stat.period;
259 
260 	hists->stats.total_period -= diff;
261 	if (!he->filtered)
262 		hists->stats.total_non_filtered_period -= diff;
263 
264 	return he->stat.period == 0;
265 }
266 
267 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
268 {
269 	rb_erase(&he->rb_node, &hists->entries);
270 
271 	if (sort__need_collapse)
272 		rb_erase(&he->rb_node_in, &hists->entries_collapsed);
273 	else
274 		rb_erase(&he->rb_node_in, hists->entries_in);
275 
276 	--hists->nr_entries;
277 	if (!he->filtered)
278 		--hists->nr_non_filtered_entries;
279 
280 	hist_entry__delete(he);
281 }
282 
283 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
284 {
285 	struct rb_node *next = rb_first(&hists->entries);
286 	struct hist_entry *n;
287 
288 	while (next) {
289 		n = rb_entry(next, struct hist_entry, rb_node);
290 		next = rb_next(&n->rb_node);
291 		if (((zap_user && n->level == '.') ||
292 		     (zap_kernel && n->level != '.') ||
293 		     hists__decay_entry(hists, n))) {
294 			hists__delete_entry(hists, n);
295 		}
296 	}
297 }
298 
299 void hists__delete_entries(struct hists *hists)
300 {
301 	struct rb_node *next = rb_first(&hists->entries);
302 	struct hist_entry *n;
303 
304 	while (next) {
305 		n = rb_entry(next, struct hist_entry, rb_node);
306 		next = rb_next(&n->rb_node);
307 
308 		hists__delete_entry(hists, n);
309 	}
310 }
311 
312 /*
313  * histogram, sorted on item, collects periods
314  */
315 
316 static struct hist_entry *hist_entry__new(struct hist_entry *template,
317 					  bool sample_self)
318 {
319 	size_t callchain_size = 0;
320 	struct hist_entry *he;
321 
322 	if (symbol_conf.use_callchain)
323 		callchain_size = sizeof(struct callchain_root);
324 
325 	he = zalloc(sizeof(*he) + callchain_size);
326 
327 	if (he != NULL) {
328 		*he = *template;
329 
330 		if (symbol_conf.cumulate_callchain) {
331 			he->stat_acc = malloc(sizeof(he->stat));
332 			if (he->stat_acc == NULL) {
333 				free(he);
334 				return NULL;
335 			}
336 			memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
337 			if (!sample_self)
338 				memset(&he->stat, 0, sizeof(he->stat));
339 		}
340 
341 		map__get(he->ms.map);
342 
343 		if (he->branch_info) {
344 			/*
345 			 * This branch info is (a part of) allocated from
346 			 * sample__resolve_bstack() and will be freed after
347 			 * adding new entries.  So we need to save a copy.
348 			 */
349 			he->branch_info = malloc(sizeof(*he->branch_info));
350 			if (he->branch_info == NULL) {
351 				map__zput(he->ms.map);
352 				free(he->stat_acc);
353 				free(he);
354 				return NULL;
355 			}
356 
357 			memcpy(he->branch_info, template->branch_info,
358 			       sizeof(*he->branch_info));
359 
360 			map__get(he->branch_info->from.map);
361 			map__get(he->branch_info->to.map);
362 		}
363 
364 		if (he->mem_info) {
365 			map__get(he->mem_info->iaddr.map);
366 			map__get(he->mem_info->daddr.map);
367 		}
368 
369 		if (symbol_conf.use_callchain)
370 			callchain_init(he->callchain);
371 
372 		if (he->raw_data) {
373 			he->raw_data = memdup(he->raw_data, he->raw_size);
374 
375 			if (he->raw_data == NULL) {
376 				map__put(he->ms.map);
377 				if (he->branch_info) {
378 					map__put(he->branch_info->from.map);
379 					map__put(he->branch_info->to.map);
380 					free(he->branch_info);
381 				}
382 				if (he->mem_info) {
383 					map__put(he->mem_info->iaddr.map);
384 					map__put(he->mem_info->daddr.map);
385 				}
386 				free(he->stat_acc);
387 				free(he);
388 				return NULL;
389 			}
390 		}
391 		INIT_LIST_HEAD(&he->pairs.node);
392 		thread__get(he->thread);
393 	}
394 
395 	return he;
396 }
397 
398 static u8 symbol__parent_filter(const struct symbol *parent)
399 {
400 	if (symbol_conf.exclude_other && parent == NULL)
401 		return 1 << HIST_FILTER__PARENT;
402 	return 0;
403 }
404 
405 static struct hist_entry *hists__findnew_entry(struct hists *hists,
406 					       struct hist_entry *entry,
407 					       struct addr_location *al,
408 					       bool sample_self)
409 {
410 	struct rb_node **p;
411 	struct rb_node *parent = NULL;
412 	struct hist_entry *he;
413 	int64_t cmp;
414 	u64 period = entry->stat.period;
415 	u64 weight = entry->stat.weight;
416 
417 	p = &hists->entries_in->rb_node;
418 
419 	while (*p != NULL) {
420 		parent = *p;
421 		he = rb_entry(parent, struct hist_entry, rb_node_in);
422 
423 		/*
424 		 * Make sure that it receives arguments in a same order as
425 		 * hist_entry__collapse() so that we can use an appropriate
426 		 * function when searching an entry regardless which sort
427 		 * keys were used.
428 		 */
429 		cmp = hist_entry__cmp(he, entry);
430 
431 		if (!cmp) {
432 			if (sample_self)
433 				he_stat__add_period(&he->stat, period, weight);
434 			if (symbol_conf.cumulate_callchain)
435 				he_stat__add_period(he->stat_acc, period, weight);
436 
437 			/*
438 			 * This mem info was allocated from sample__resolve_mem
439 			 * and will not be used anymore.
440 			 */
441 			zfree(&entry->mem_info);
442 
443 			/* If the map of an existing hist_entry has
444 			 * become out-of-date due to an exec() or
445 			 * similar, update it.  Otherwise we will
446 			 * mis-adjust symbol addresses when computing
447 			 * the history counter to increment.
448 			 */
449 			if (he->ms.map != entry->ms.map) {
450 				map__put(he->ms.map);
451 				he->ms.map = map__get(entry->ms.map);
452 			}
453 			goto out;
454 		}
455 
456 		if (cmp < 0)
457 			p = &(*p)->rb_left;
458 		else
459 			p = &(*p)->rb_right;
460 	}
461 
462 	he = hist_entry__new(entry, sample_self);
463 	if (!he)
464 		return NULL;
465 
466 	hists->nr_entries++;
467 
468 	rb_link_node(&he->rb_node_in, parent, p);
469 	rb_insert_color(&he->rb_node_in, hists->entries_in);
470 out:
471 	if (sample_self)
472 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
473 	if (symbol_conf.cumulate_callchain)
474 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
475 	return he;
476 }
477 
478 struct hist_entry *__hists__add_entry(struct hists *hists,
479 				      struct addr_location *al,
480 				      struct symbol *sym_parent,
481 				      struct branch_info *bi,
482 				      struct mem_info *mi,
483 				      struct perf_sample *sample,
484 				      bool sample_self)
485 {
486 	struct hist_entry entry = {
487 		.thread	= al->thread,
488 		.comm = thread__comm(al->thread),
489 		.ms = {
490 			.map	= al->map,
491 			.sym	= al->sym,
492 		},
493 		.socket	 = al->socket,
494 		.cpu	 = al->cpu,
495 		.cpumode = al->cpumode,
496 		.ip	 = al->addr,
497 		.level	 = al->level,
498 		.stat = {
499 			.nr_events = 1,
500 			.period	= sample->period,
501 			.weight = sample->weight,
502 		},
503 		.parent = sym_parent,
504 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
505 		.hists	= hists,
506 		.branch_info = bi,
507 		.mem_info = mi,
508 		.transaction = sample->transaction,
509 		.raw_data = sample->raw_data,
510 		.raw_size = sample->raw_size,
511 	};
512 
513 	return hists__findnew_entry(hists, &entry, al, sample_self);
514 }
515 
516 static int
517 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
518 		    struct addr_location *al __maybe_unused)
519 {
520 	return 0;
521 }
522 
523 static int
524 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
525 			struct addr_location *al __maybe_unused)
526 {
527 	return 0;
528 }
529 
530 static int
531 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
532 {
533 	struct perf_sample *sample = iter->sample;
534 	struct mem_info *mi;
535 
536 	mi = sample__resolve_mem(sample, al);
537 	if (mi == NULL)
538 		return -ENOMEM;
539 
540 	iter->priv = mi;
541 	return 0;
542 }
543 
544 static int
545 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
546 {
547 	u64 cost;
548 	struct mem_info *mi = iter->priv;
549 	struct hists *hists = evsel__hists(iter->evsel);
550 	struct perf_sample *sample = iter->sample;
551 	struct hist_entry *he;
552 
553 	if (mi == NULL)
554 		return -EINVAL;
555 
556 	cost = sample->weight;
557 	if (!cost)
558 		cost = 1;
559 
560 	/*
561 	 * must pass period=weight in order to get the correct
562 	 * sorting from hists__collapse_resort() which is solely
563 	 * based on periods. We want sorting be done on nr_events * weight
564 	 * and this is indirectly achieved by passing period=weight here
565 	 * and the he_stat__add_period() function.
566 	 */
567 	sample->period = cost;
568 
569 	he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
570 				sample, true);
571 	if (!he)
572 		return -ENOMEM;
573 
574 	iter->he = he;
575 	return 0;
576 }
577 
578 static int
579 iter_finish_mem_entry(struct hist_entry_iter *iter,
580 		      struct addr_location *al __maybe_unused)
581 {
582 	struct perf_evsel *evsel = iter->evsel;
583 	struct hists *hists = evsel__hists(evsel);
584 	struct hist_entry *he = iter->he;
585 	int err = -EINVAL;
586 
587 	if (he == NULL)
588 		goto out;
589 
590 	hists__inc_nr_samples(hists, he->filtered);
591 
592 	err = hist_entry__append_callchain(he, iter->sample);
593 
594 out:
595 	/*
596 	 * We don't need to free iter->priv (mem_info) here since the mem info
597 	 * was either already freed in hists__findnew_entry() or passed to a
598 	 * new hist entry by hist_entry__new().
599 	 */
600 	iter->priv = NULL;
601 
602 	iter->he = NULL;
603 	return err;
604 }
605 
606 static int
607 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
608 {
609 	struct branch_info *bi;
610 	struct perf_sample *sample = iter->sample;
611 
612 	bi = sample__resolve_bstack(sample, al);
613 	if (!bi)
614 		return -ENOMEM;
615 
616 	iter->curr = 0;
617 	iter->total = sample->branch_stack->nr;
618 
619 	iter->priv = bi;
620 	return 0;
621 }
622 
623 static int
624 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
625 			     struct addr_location *al __maybe_unused)
626 {
627 	/* to avoid calling callback function */
628 	iter->he = NULL;
629 
630 	return 0;
631 }
632 
633 static int
634 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
635 {
636 	struct branch_info *bi = iter->priv;
637 	int i = iter->curr;
638 
639 	if (bi == NULL)
640 		return 0;
641 
642 	if (iter->curr >= iter->total)
643 		return 0;
644 
645 	al->map = bi[i].to.map;
646 	al->sym = bi[i].to.sym;
647 	al->addr = bi[i].to.addr;
648 	return 1;
649 }
650 
651 static int
652 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
653 {
654 	struct branch_info *bi;
655 	struct perf_evsel *evsel = iter->evsel;
656 	struct hists *hists = evsel__hists(evsel);
657 	struct perf_sample *sample = iter->sample;
658 	struct hist_entry *he = NULL;
659 	int i = iter->curr;
660 	int err = 0;
661 
662 	bi = iter->priv;
663 
664 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
665 		goto out;
666 
667 	/*
668 	 * The report shows the percentage of total branches captured
669 	 * and not events sampled. Thus we use a pseudo period of 1.
670 	 */
671 	sample->period = 1;
672 	sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
673 
674 	he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
675 				sample, true);
676 	if (he == NULL)
677 		return -ENOMEM;
678 
679 	hists__inc_nr_samples(hists, he->filtered);
680 
681 out:
682 	iter->he = he;
683 	iter->curr++;
684 	return err;
685 }
686 
687 static int
688 iter_finish_branch_entry(struct hist_entry_iter *iter,
689 			 struct addr_location *al __maybe_unused)
690 {
691 	zfree(&iter->priv);
692 	iter->he = NULL;
693 
694 	return iter->curr >= iter->total ? 0 : -1;
695 }
696 
697 static int
698 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
699 			  struct addr_location *al __maybe_unused)
700 {
701 	return 0;
702 }
703 
704 static int
705 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
706 {
707 	struct perf_evsel *evsel = iter->evsel;
708 	struct perf_sample *sample = iter->sample;
709 	struct hist_entry *he;
710 
711 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
712 				sample, true);
713 	if (he == NULL)
714 		return -ENOMEM;
715 
716 	iter->he = he;
717 	return 0;
718 }
719 
720 static int
721 iter_finish_normal_entry(struct hist_entry_iter *iter,
722 			 struct addr_location *al __maybe_unused)
723 {
724 	struct hist_entry *he = iter->he;
725 	struct perf_evsel *evsel = iter->evsel;
726 	struct perf_sample *sample = iter->sample;
727 
728 	if (he == NULL)
729 		return 0;
730 
731 	iter->he = NULL;
732 
733 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
734 
735 	return hist_entry__append_callchain(he, sample);
736 }
737 
738 static int
739 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
740 			      struct addr_location *al __maybe_unused)
741 {
742 	struct hist_entry **he_cache;
743 
744 	callchain_cursor_commit(&callchain_cursor);
745 
746 	/*
747 	 * This is for detecting cycles or recursions so that they're
748 	 * cumulated only one time to prevent entries more than 100%
749 	 * overhead.
750 	 */
751 	he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
752 	if (he_cache == NULL)
753 		return -ENOMEM;
754 
755 	iter->priv = he_cache;
756 	iter->curr = 0;
757 
758 	return 0;
759 }
760 
761 static int
762 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
763 				 struct addr_location *al)
764 {
765 	struct perf_evsel *evsel = iter->evsel;
766 	struct hists *hists = evsel__hists(evsel);
767 	struct perf_sample *sample = iter->sample;
768 	struct hist_entry **he_cache = iter->priv;
769 	struct hist_entry *he;
770 	int err = 0;
771 
772 	he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
773 				sample, true);
774 	if (he == NULL)
775 		return -ENOMEM;
776 
777 	iter->he = he;
778 	he_cache[iter->curr++] = he;
779 
780 	hist_entry__append_callchain(he, sample);
781 
782 	/*
783 	 * We need to re-initialize the cursor since callchain_append()
784 	 * advanced the cursor to the end.
785 	 */
786 	callchain_cursor_commit(&callchain_cursor);
787 
788 	hists__inc_nr_samples(hists, he->filtered);
789 
790 	return err;
791 }
792 
793 static int
794 iter_next_cumulative_entry(struct hist_entry_iter *iter,
795 			   struct addr_location *al)
796 {
797 	struct callchain_cursor_node *node;
798 
799 	node = callchain_cursor_current(&callchain_cursor);
800 	if (node == NULL)
801 		return 0;
802 
803 	return fill_callchain_info(al, node, iter->hide_unresolved);
804 }
805 
806 static int
807 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
808 			       struct addr_location *al)
809 {
810 	struct perf_evsel *evsel = iter->evsel;
811 	struct perf_sample *sample = iter->sample;
812 	struct hist_entry **he_cache = iter->priv;
813 	struct hist_entry *he;
814 	struct hist_entry he_tmp = {
815 		.hists = evsel__hists(evsel),
816 		.cpu = al->cpu,
817 		.thread = al->thread,
818 		.comm = thread__comm(al->thread),
819 		.ip = al->addr,
820 		.ms = {
821 			.map = al->map,
822 			.sym = al->sym,
823 		},
824 		.parent = iter->parent,
825 		.raw_data = sample->raw_data,
826 		.raw_size = sample->raw_size,
827 	};
828 	int i;
829 	struct callchain_cursor cursor;
830 
831 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
832 
833 	callchain_cursor_advance(&callchain_cursor);
834 
835 	/*
836 	 * Check if there's duplicate entries in the callchain.
837 	 * It's possible that it has cycles or recursive calls.
838 	 */
839 	for (i = 0; i < iter->curr; i++) {
840 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
841 			/* to avoid calling callback function */
842 			iter->he = NULL;
843 			return 0;
844 		}
845 	}
846 
847 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
848 				sample, false);
849 	if (he == NULL)
850 		return -ENOMEM;
851 
852 	iter->he = he;
853 	he_cache[iter->curr++] = he;
854 
855 	if (symbol_conf.use_callchain)
856 		callchain_append(he->callchain, &cursor, sample->period);
857 	return 0;
858 }
859 
860 static int
861 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
862 			     struct addr_location *al __maybe_unused)
863 {
864 	zfree(&iter->priv);
865 	iter->he = NULL;
866 
867 	return 0;
868 }
869 
870 const struct hist_iter_ops hist_iter_mem = {
871 	.prepare_entry 		= iter_prepare_mem_entry,
872 	.add_single_entry 	= iter_add_single_mem_entry,
873 	.next_entry 		= iter_next_nop_entry,
874 	.add_next_entry 	= iter_add_next_nop_entry,
875 	.finish_entry 		= iter_finish_mem_entry,
876 };
877 
878 const struct hist_iter_ops hist_iter_branch = {
879 	.prepare_entry 		= iter_prepare_branch_entry,
880 	.add_single_entry 	= iter_add_single_branch_entry,
881 	.next_entry 		= iter_next_branch_entry,
882 	.add_next_entry 	= iter_add_next_branch_entry,
883 	.finish_entry 		= iter_finish_branch_entry,
884 };
885 
886 const struct hist_iter_ops hist_iter_normal = {
887 	.prepare_entry 		= iter_prepare_normal_entry,
888 	.add_single_entry 	= iter_add_single_normal_entry,
889 	.next_entry 		= iter_next_nop_entry,
890 	.add_next_entry 	= iter_add_next_nop_entry,
891 	.finish_entry 		= iter_finish_normal_entry,
892 };
893 
894 const struct hist_iter_ops hist_iter_cumulative = {
895 	.prepare_entry 		= iter_prepare_cumulative_entry,
896 	.add_single_entry 	= iter_add_single_cumulative_entry,
897 	.next_entry 		= iter_next_cumulative_entry,
898 	.add_next_entry 	= iter_add_next_cumulative_entry,
899 	.finish_entry 		= iter_finish_cumulative_entry,
900 };
901 
902 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
903 			 int max_stack_depth, void *arg)
904 {
905 	int err, err2;
906 
907 	err = sample__resolve_callchain(iter->sample, &iter->parent,
908 					iter->evsel, al, max_stack_depth);
909 	if (err)
910 		return err;
911 
912 	iter->max_stack = max_stack_depth;
913 
914 	err = iter->ops->prepare_entry(iter, al);
915 	if (err)
916 		goto out;
917 
918 	err = iter->ops->add_single_entry(iter, al);
919 	if (err)
920 		goto out;
921 
922 	if (iter->he && iter->add_entry_cb) {
923 		err = iter->add_entry_cb(iter, al, true, arg);
924 		if (err)
925 			goto out;
926 	}
927 
928 	while (iter->ops->next_entry(iter, al)) {
929 		err = iter->ops->add_next_entry(iter, al);
930 		if (err)
931 			break;
932 
933 		if (iter->he && iter->add_entry_cb) {
934 			err = iter->add_entry_cb(iter, al, false, arg);
935 			if (err)
936 				goto out;
937 		}
938 	}
939 
940 out:
941 	err2 = iter->ops->finish_entry(iter, al);
942 	if (!err)
943 		err = err2;
944 
945 	return err;
946 }
947 
948 int64_t
949 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
950 {
951 	struct perf_hpp_fmt *fmt;
952 	int64_t cmp = 0;
953 
954 	perf_hpp__for_each_sort_list(fmt) {
955 		cmp = fmt->cmp(fmt, left, right);
956 		if (cmp)
957 			break;
958 	}
959 
960 	return cmp;
961 }
962 
963 int64_t
964 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
965 {
966 	struct perf_hpp_fmt *fmt;
967 	int64_t cmp = 0;
968 
969 	perf_hpp__for_each_sort_list(fmt) {
970 		cmp = fmt->collapse(fmt, left, right);
971 		if (cmp)
972 			break;
973 	}
974 
975 	return cmp;
976 }
977 
978 void hist_entry__delete(struct hist_entry *he)
979 {
980 	thread__zput(he->thread);
981 	map__zput(he->ms.map);
982 
983 	if (he->branch_info) {
984 		map__zput(he->branch_info->from.map);
985 		map__zput(he->branch_info->to.map);
986 		zfree(&he->branch_info);
987 	}
988 
989 	if (he->mem_info) {
990 		map__zput(he->mem_info->iaddr.map);
991 		map__zput(he->mem_info->daddr.map);
992 		zfree(&he->mem_info);
993 	}
994 
995 	zfree(&he->stat_acc);
996 	free_srcline(he->srcline);
997 	if (he->srcfile && he->srcfile[0])
998 		free(he->srcfile);
999 	free_callchain(he->callchain);
1000 	free(he->trace_output);
1001 	free(he->raw_data);
1002 	free(he);
1003 }
1004 
1005 /*
1006  * collapse the histogram
1007  */
1008 
1009 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
1010 					 struct rb_root *root,
1011 					 struct hist_entry *he)
1012 {
1013 	struct rb_node **p = &root->rb_node;
1014 	struct rb_node *parent = NULL;
1015 	struct hist_entry *iter;
1016 	int64_t cmp;
1017 
1018 	while (*p != NULL) {
1019 		parent = *p;
1020 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1021 
1022 		cmp = hist_entry__collapse(iter, he);
1023 
1024 		if (!cmp) {
1025 			he_stat__add_stat(&iter->stat, &he->stat);
1026 			if (symbol_conf.cumulate_callchain)
1027 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
1028 
1029 			if (symbol_conf.use_callchain) {
1030 				callchain_cursor_reset(&callchain_cursor);
1031 				callchain_merge(&callchain_cursor,
1032 						iter->callchain,
1033 						he->callchain);
1034 			}
1035 			hist_entry__delete(he);
1036 			return false;
1037 		}
1038 
1039 		if (cmp < 0)
1040 			p = &(*p)->rb_left;
1041 		else
1042 			p = &(*p)->rb_right;
1043 	}
1044 	hists->nr_entries++;
1045 
1046 	rb_link_node(&he->rb_node_in, parent, p);
1047 	rb_insert_color(&he->rb_node_in, root);
1048 	return true;
1049 }
1050 
1051 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1052 {
1053 	struct rb_root *root;
1054 
1055 	pthread_mutex_lock(&hists->lock);
1056 
1057 	root = hists->entries_in;
1058 	if (++hists->entries_in > &hists->entries_in_array[1])
1059 		hists->entries_in = &hists->entries_in_array[0];
1060 
1061 	pthread_mutex_unlock(&hists->lock);
1062 
1063 	return root;
1064 }
1065 
1066 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1067 {
1068 	hists__filter_entry_by_dso(hists, he);
1069 	hists__filter_entry_by_thread(hists, he);
1070 	hists__filter_entry_by_symbol(hists, he);
1071 	hists__filter_entry_by_socket(hists, he);
1072 }
1073 
1074 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1075 {
1076 	struct rb_root *root;
1077 	struct rb_node *next;
1078 	struct hist_entry *n;
1079 
1080 	if (!sort__need_collapse)
1081 		return;
1082 
1083 	hists->nr_entries = 0;
1084 
1085 	root = hists__get_rotate_entries_in(hists);
1086 
1087 	next = rb_first(root);
1088 
1089 	while (next) {
1090 		if (session_done())
1091 			break;
1092 		n = rb_entry(next, struct hist_entry, rb_node_in);
1093 		next = rb_next(&n->rb_node_in);
1094 
1095 		rb_erase(&n->rb_node_in, root);
1096 		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1097 			/*
1098 			 * If it wasn't combined with one of the entries already
1099 			 * collapsed, we need to apply the filters that may have
1100 			 * been set by, say, the hist_browser.
1101 			 */
1102 			hists__apply_filters(hists, n);
1103 		}
1104 		if (prog)
1105 			ui_progress__update(prog, 1);
1106 	}
1107 }
1108 
1109 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1110 {
1111 	struct perf_hpp_fmt *fmt;
1112 	int64_t cmp = 0;
1113 
1114 	perf_hpp__for_each_sort_list(fmt) {
1115 		if (perf_hpp__should_skip(fmt, a->hists))
1116 			continue;
1117 
1118 		cmp = fmt->sort(fmt, a, b);
1119 		if (cmp)
1120 			break;
1121 	}
1122 
1123 	return cmp;
1124 }
1125 
1126 static void hists__reset_filter_stats(struct hists *hists)
1127 {
1128 	hists->nr_non_filtered_entries = 0;
1129 	hists->stats.total_non_filtered_period = 0;
1130 }
1131 
1132 void hists__reset_stats(struct hists *hists)
1133 {
1134 	hists->nr_entries = 0;
1135 	hists->stats.total_period = 0;
1136 
1137 	hists__reset_filter_stats(hists);
1138 }
1139 
1140 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1141 {
1142 	hists->nr_non_filtered_entries++;
1143 	hists->stats.total_non_filtered_period += h->stat.period;
1144 }
1145 
1146 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1147 {
1148 	if (!h->filtered)
1149 		hists__inc_filter_stats(hists, h);
1150 
1151 	hists->nr_entries++;
1152 	hists->stats.total_period += h->stat.period;
1153 }
1154 
1155 static void __hists__insert_output_entry(struct rb_root *entries,
1156 					 struct hist_entry *he,
1157 					 u64 min_callchain_hits,
1158 					 bool use_callchain)
1159 {
1160 	struct rb_node **p = &entries->rb_node;
1161 	struct rb_node *parent = NULL;
1162 	struct hist_entry *iter;
1163 
1164 	if (use_callchain)
1165 		callchain_param.sort(&he->sorted_chain, he->callchain,
1166 				      min_callchain_hits, &callchain_param);
1167 
1168 	while (*p != NULL) {
1169 		parent = *p;
1170 		iter = rb_entry(parent, struct hist_entry, rb_node);
1171 
1172 		if (hist_entry__sort(he, iter) > 0)
1173 			p = &(*p)->rb_left;
1174 		else
1175 			p = &(*p)->rb_right;
1176 	}
1177 
1178 	rb_link_node(&he->rb_node, parent, p);
1179 	rb_insert_color(&he->rb_node, entries);
1180 }
1181 
1182 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1183 {
1184 	struct rb_root *root;
1185 	struct rb_node *next;
1186 	struct hist_entry *n;
1187 	u64 min_callchain_hits;
1188 	struct perf_evsel *evsel = hists_to_evsel(hists);
1189 	bool use_callchain;
1190 
1191 	if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1192 		use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1193 	else
1194 		use_callchain = symbol_conf.use_callchain;
1195 
1196 	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1197 
1198 	if (sort__need_collapse)
1199 		root = &hists->entries_collapsed;
1200 	else
1201 		root = hists->entries_in;
1202 
1203 	next = rb_first(root);
1204 	hists->entries = RB_ROOT;
1205 
1206 	hists__reset_stats(hists);
1207 	hists__reset_col_len(hists);
1208 
1209 	while (next) {
1210 		n = rb_entry(next, struct hist_entry, rb_node_in);
1211 		next = rb_next(&n->rb_node_in);
1212 
1213 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1214 		hists__inc_stats(hists, n);
1215 
1216 		if (!n->filtered)
1217 			hists__calc_col_len(hists, n);
1218 
1219 		if (prog)
1220 			ui_progress__update(prog, 1);
1221 	}
1222 }
1223 
1224 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1225 				       enum hist_filter filter)
1226 {
1227 	h->filtered &= ~(1 << filter);
1228 	if (h->filtered)
1229 		return;
1230 
1231 	/* force fold unfiltered entry for simplicity */
1232 	h->unfolded = false;
1233 	h->row_offset = 0;
1234 	h->nr_rows = 0;
1235 
1236 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1237 
1238 	hists__inc_filter_stats(hists, h);
1239 	hists__calc_col_len(hists, h);
1240 }
1241 
1242 
1243 static bool hists__filter_entry_by_dso(struct hists *hists,
1244 				       struct hist_entry *he)
1245 {
1246 	if (hists->dso_filter != NULL &&
1247 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1248 		he->filtered |= (1 << HIST_FILTER__DSO);
1249 		return true;
1250 	}
1251 
1252 	return false;
1253 }
1254 
1255 void hists__filter_by_dso(struct hists *hists)
1256 {
1257 	struct rb_node *nd;
1258 
1259 	hists->stats.nr_non_filtered_samples = 0;
1260 
1261 	hists__reset_filter_stats(hists);
1262 	hists__reset_col_len(hists);
1263 
1264 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1265 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1266 
1267 		if (symbol_conf.exclude_other && !h->parent)
1268 			continue;
1269 
1270 		if (hists__filter_entry_by_dso(hists, h))
1271 			continue;
1272 
1273 		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1274 	}
1275 }
1276 
1277 static bool hists__filter_entry_by_thread(struct hists *hists,
1278 					  struct hist_entry *he)
1279 {
1280 	if (hists->thread_filter != NULL &&
1281 	    he->thread != hists->thread_filter) {
1282 		he->filtered |= (1 << HIST_FILTER__THREAD);
1283 		return true;
1284 	}
1285 
1286 	return false;
1287 }
1288 
1289 void hists__filter_by_thread(struct hists *hists)
1290 {
1291 	struct rb_node *nd;
1292 
1293 	hists->stats.nr_non_filtered_samples = 0;
1294 
1295 	hists__reset_filter_stats(hists);
1296 	hists__reset_col_len(hists);
1297 
1298 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1299 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1300 
1301 		if (hists__filter_entry_by_thread(hists, h))
1302 			continue;
1303 
1304 		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1305 	}
1306 }
1307 
1308 static bool hists__filter_entry_by_symbol(struct hists *hists,
1309 					  struct hist_entry *he)
1310 {
1311 	if (hists->symbol_filter_str != NULL &&
1312 	    (!he->ms.sym || strstr(he->ms.sym->name,
1313 				   hists->symbol_filter_str) == NULL)) {
1314 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
1315 		return true;
1316 	}
1317 
1318 	return false;
1319 }
1320 
1321 void hists__filter_by_symbol(struct hists *hists)
1322 {
1323 	struct rb_node *nd;
1324 
1325 	hists->stats.nr_non_filtered_samples = 0;
1326 
1327 	hists__reset_filter_stats(hists);
1328 	hists__reset_col_len(hists);
1329 
1330 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1331 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1332 
1333 		if (hists__filter_entry_by_symbol(hists, h))
1334 			continue;
1335 
1336 		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1337 	}
1338 }
1339 
1340 static bool hists__filter_entry_by_socket(struct hists *hists,
1341 					  struct hist_entry *he)
1342 {
1343 	if ((hists->socket_filter > -1) &&
1344 	    (he->socket != hists->socket_filter)) {
1345 		he->filtered |= (1 << HIST_FILTER__SOCKET);
1346 		return true;
1347 	}
1348 
1349 	return false;
1350 }
1351 
1352 void hists__filter_by_socket(struct hists *hists)
1353 {
1354 	struct rb_node *nd;
1355 
1356 	hists->stats.nr_non_filtered_samples = 0;
1357 
1358 	hists__reset_filter_stats(hists);
1359 	hists__reset_col_len(hists);
1360 
1361 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1362 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1363 
1364 		if (hists__filter_entry_by_socket(hists, h))
1365 			continue;
1366 
1367 		hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
1368 	}
1369 }
1370 
1371 void events_stats__inc(struct events_stats *stats, u32 type)
1372 {
1373 	++stats->nr_events[0];
1374 	++stats->nr_events[type];
1375 }
1376 
1377 void hists__inc_nr_events(struct hists *hists, u32 type)
1378 {
1379 	events_stats__inc(&hists->stats, type);
1380 }
1381 
1382 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1383 {
1384 	events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1385 	if (!filtered)
1386 		hists->stats.nr_non_filtered_samples++;
1387 }
1388 
1389 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1390 						 struct hist_entry *pair)
1391 {
1392 	struct rb_root *root;
1393 	struct rb_node **p;
1394 	struct rb_node *parent = NULL;
1395 	struct hist_entry *he;
1396 	int64_t cmp;
1397 
1398 	if (sort__need_collapse)
1399 		root = &hists->entries_collapsed;
1400 	else
1401 		root = hists->entries_in;
1402 
1403 	p = &root->rb_node;
1404 
1405 	while (*p != NULL) {
1406 		parent = *p;
1407 		he = rb_entry(parent, struct hist_entry, rb_node_in);
1408 
1409 		cmp = hist_entry__collapse(he, pair);
1410 
1411 		if (!cmp)
1412 			goto out;
1413 
1414 		if (cmp < 0)
1415 			p = &(*p)->rb_left;
1416 		else
1417 			p = &(*p)->rb_right;
1418 	}
1419 
1420 	he = hist_entry__new(pair, true);
1421 	if (he) {
1422 		memset(&he->stat, 0, sizeof(he->stat));
1423 		he->hists = hists;
1424 		rb_link_node(&he->rb_node_in, parent, p);
1425 		rb_insert_color(&he->rb_node_in, root);
1426 		hists__inc_stats(hists, he);
1427 		he->dummy = true;
1428 	}
1429 out:
1430 	return he;
1431 }
1432 
1433 static struct hist_entry *hists__find_entry(struct hists *hists,
1434 					    struct hist_entry *he)
1435 {
1436 	struct rb_node *n;
1437 
1438 	if (sort__need_collapse)
1439 		n = hists->entries_collapsed.rb_node;
1440 	else
1441 		n = hists->entries_in->rb_node;
1442 
1443 	while (n) {
1444 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1445 		int64_t cmp = hist_entry__collapse(iter, he);
1446 
1447 		if (cmp < 0)
1448 			n = n->rb_left;
1449 		else if (cmp > 0)
1450 			n = n->rb_right;
1451 		else
1452 			return iter;
1453 	}
1454 
1455 	return NULL;
1456 }
1457 
1458 /*
1459  * Look for pairs to link to the leader buckets (hist_entries):
1460  */
1461 void hists__match(struct hists *leader, struct hists *other)
1462 {
1463 	struct rb_root *root;
1464 	struct rb_node *nd;
1465 	struct hist_entry *pos, *pair;
1466 
1467 	if (sort__need_collapse)
1468 		root = &leader->entries_collapsed;
1469 	else
1470 		root = leader->entries_in;
1471 
1472 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1473 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
1474 		pair = hists__find_entry(other, pos);
1475 
1476 		if (pair)
1477 			hist_entry__add_pair(pair, pos);
1478 	}
1479 }
1480 
1481 /*
1482  * Look for entries in the other hists that are not present in the leader, if
1483  * we find them, just add a dummy entry on the leader hists, with period=0,
1484  * nr_events=0, to serve as the list header.
1485  */
1486 int hists__link(struct hists *leader, struct hists *other)
1487 {
1488 	struct rb_root *root;
1489 	struct rb_node *nd;
1490 	struct hist_entry *pos, *pair;
1491 
1492 	if (sort__need_collapse)
1493 		root = &other->entries_collapsed;
1494 	else
1495 		root = other->entries_in;
1496 
1497 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1498 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
1499 
1500 		if (!hist_entry__has_pairs(pos)) {
1501 			pair = hists__add_dummy_entry(leader, pos);
1502 			if (pair == NULL)
1503 				return -1;
1504 			hist_entry__add_pair(pos, pair);
1505 		}
1506 	}
1507 
1508 	return 0;
1509 }
1510 
1511 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1512 			  struct perf_sample *sample, bool nonany_branch_mode)
1513 {
1514 	struct branch_info *bi;
1515 
1516 	/* If we have branch cycles always annotate them. */
1517 	if (bs && bs->nr && bs->entries[0].flags.cycles) {
1518 		int i;
1519 
1520 		bi = sample__resolve_bstack(sample, al);
1521 		if (bi) {
1522 			struct addr_map_symbol *prev = NULL;
1523 
1524 			/*
1525 			 * Ignore errors, still want to process the
1526 			 * other entries.
1527 			 *
1528 			 * For non standard branch modes always
1529 			 * force no IPC (prev == NULL)
1530 			 *
1531 			 * Note that perf stores branches reversed from
1532 			 * program order!
1533 			 */
1534 			for (i = bs->nr - 1; i >= 0; i--) {
1535 				addr_map_symbol__account_cycles(&bi[i].from,
1536 					nonany_branch_mode ? NULL : prev,
1537 					bi[i].flags.cycles);
1538 				prev = &bi[i].to;
1539 			}
1540 			free(bi);
1541 		}
1542 	}
1543 }
1544 
1545 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1546 {
1547 	struct perf_evsel *pos;
1548 	size_t ret = 0;
1549 
1550 	evlist__for_each(evlist, pos) {
1551 		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1552 		ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1553 	}
1554 
1555 	return ret;
1556 }
1557 
1558 
1559 u64 hists__total_period(struct hists *hists)
1560 {
1561 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1562 		hists->stats.total_period;
1563 }
1564 
1565 int parse_filter_percentage(const struct option *opt __maybe_unused,
1566 			    const char *arg, int unset __maybe_unused)
1567 {
1568 	if (!strcmp(arg, "relative"))
1569 		symbol_conf.filter_relative = true;
1570 	else if (!strcmp(arg, "absolute"))
1571 		symbol_conf.filter_relative = false;
1572 	else
1573 		return -1;
1574 
1575 	return 0;
1576 }
1577 
1578 int perf_hist_config(const char *var, const char *value)
1579 {
1580 	if (!strcmp(var, "hist.percentage"))
1581 		return parse_filter_percentage(NULL, value, 0);
1582 
1583 	return 0;
1584 }
1585 
1586 static int hists_evsel__init(struct perf_evsel *evsel)
1587 {
1588 	struct hists *hists = evsel__hists(evsel);
1589 
1590 	memset(hists, 0, sizeof(*hists));
1591 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1592 	hists->entries_in = &hists->entries_in_array[0];
1593 	hists->entries_collapsed = RB_ROOT;
1594 	hists->entries = RB_ROOT;
1595 	pthread_mutex_init(&hists->lock, NULL);
1596 	hists->socket_filter = -1;
1597 	return 0;
1598 }
1599 
1600 static void hists__delete_remaining_entries(struct rb_root *root)
1601 {
1602 	struct rb_node *node;
1603 	struct hist_entry *he;
1604 
1605 	while (!RB_EMPTY_ROOT(root)) {
1606 		node = rb_first(root);
1607 		rb_erase(node, root);
1608 
1609 		he = rb_entry(node, struct hist_entry, rb_node_in);
1610 		hist_entry__delete(he);
1611 	}
1612 }
1613 
1614 static void hists__delete_all_entries(struct hists *hists)
1615 {
1616 	hists__delete_entries(hists);
1617 	hists__delete_remaining_entries(&hists->entries_in_array[0]);
1618 	hists__delete_remaining_entries(&hists->entries_in_array[1]);
1619 	hists__delete_remaining_entries(&hists->entries_collapsed);
1620 }
1621 
1622 static void hists_evsel__exit(struct perf_evsel *evsel)
1623 {
1624 	struct hists *hists = evsel__hists(evsel);
1625 
1626 	hists__delete_all_entries(hists);
1627 }
1628 
1629 /*
1630  * XXX We probably need a hists_evsel__exit() to free the hist_entries
1631  * stored in the rbtree...
1632  */
1633 
1634 int hists__init(void)
1635 {
1636 	int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1637 					    hists_evsel__init,
1638 					    hists_evsel__exit);
1639 	if (err)
1640 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1641 
1642 	return err;
1643 }
1644