xref: /linux/tools/perf/util/hist.c (revision 607bfbd7ffc60156ae0831c917497dc91a57dd8d)
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11 
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 				       struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 					  struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 					  struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19 					  struct hist_entry *he);
20 
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23 	return hists->col_len[col];
24 }
25 
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28 	hists->col_len[col] = len;
29 }
30 
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33 	if (len > hists__col_len(hists, col)) {
34 		hists__set_col_len(hists, col, len);
35 		return true;
36 	}
37 	return false;
38 }
39 
40 void hists__reset_col_len(struct hists *hists)
41 {
42 	enum hist_column col;
43 
44 	for (col = 0; col < HISTC_NR_COLS; ++col)
45 		hists__set_col_len(hists, col, 0);
46 }
47 
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51 
52 	if (hists__col_len(hists, dso) < unresolved_col_width &&
53 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54 	    !symbol_conf.dso_list)
55 		hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57 
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61 	int symlen;
62 	u16 len;
63 
64 	/*
65 	 * +4 accounts for '[x] ' priv level info
66 	 * +2 accounts for 0x prefix on raw addresses
67 	 * +3 accounts for ' y ' symtab origin info
68 	 */
69 	if (h->ms.sym) {
70 		symlen = h->ms.sym->namelen + 4;
71 		if (verbose)
72 			symlen += BITS_PER_LONG / 4 + 2 + 3;
73 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74 	} else {
75 		symlen = unresolved_col_width + 4 + 2;
76 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
78 	}
79 
80 	len = thread__comm_len(h->thread);
81 	if (hists__new_col_len(hists, HISTC_COMM, len))
82 		hists__set_col_len(hists, HISTC_THREAD, len + 6);
83 
84 	if (h->ms.map) {
85 		len = dso__name_len(h->ms.map->dso);
86 		hists__new_col_len(hists, HISTC_DSO, len);
87 	}
88 
89 	if (h->parent)
90 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91 
92 	if (h->branch_info) {
93 		if (h->branch_info->from.sym) {
94 			symlen = (int)h->branch_info->from.sym->namelen + 4;
95 			if (verbose)
96 				symlen += BITS_PER_LONG / 4 + 2 + 3;
97 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98 
99 			symlen = dso__name_len(h->branch_info->from.map->dso);
100 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101 		} else {
102 			symlen = unresolved_col_width + 4 + 2;
103 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 		}
106 
107 		if (h->branch_info->to.sym) {
108 			symlen = (int)h->branch_info->to.sym->namelen + 4;
109 			if (verbose)
110 				symlen += BITS_PER_LONG / 4 + 2 + 3;
111 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112 
113 			symlen = dso__name_len(h->branch_info->to.map->dso);
114 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115 		} else {
116 			symlen = unresolved_col_width + 4 + 2;
117 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119 		}
120 	}
121 
122 	if (h->mem_info) {
123 		if (h->mem_info->daddr.sym) {
124 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
125 			       + unresolved_col_width + 2;
126 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127 					   symlen);
128 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129 					   symlen + 1);
130 		} else {
131 			symlen = unresolved_col_width + 4 + 2;
132 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133 					   symlen);
134 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
135 					   symlen);
136 		}
137 
138 		if (h->mem_info->iaddr.sym) {
139 			symlen = (int)h->mem_info->iaddr.sym->namelen + 4
140 			       + unresolved_col_width + 2;
141 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
142 					   symlen);
143 		} else {
144 			symlen = unresolved_col_width + 4 + 2;
145 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
146 					   symlen);
147 		}
148 
149 		if (h->mem_info->daddr.map) {
150 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
151 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
152 					   symlen);
153 		} else {
154 			symlen = unresolved_col_width + 4 + 2;
155 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
156 		}
157 	} else {
158 		symlen = unresolved_col_width + 4 + 2;
159 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
160 		hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
161 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
162 	}
163 
164 	hists__new_col_len(hists, HISTC_CPU, 3);
165 	hists__new_col_len(hists, HISTC_SOCKET, 6);
166 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
167 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
168 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
169 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
170 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
171 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
172 
173 	if (h->srcline)
174 		hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
175 
176 	if (h->srcfile)
177 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
178 
179 	if (h->transaction)
180 		hists__new_col_len(hists, HISTC_TRANSACTION,
181 				   hist_entry__transaction_len());
182 }
183 
184 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
185 {
186 	struct rb_node *next = rb_first(&hists->entries);
187 	struct hist_entry *n;
188 	int row = 0;
189 
190 	hists__reset_col_len(hists);
191 
192 	while (next && row++ < max_rows) {
193 		n = rb_entry(next, struct hist_entry, rb_node);
194 		if (!n->filtered)
195 			hists__calc_col_len(hists, n);
196 		next = rb_next(&n->rb_node);
197 	}
198 }
199 
200 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
201 					unsigned int cpumode, u64 period)
202 {
203 	switch (cpumode) {
204 	case PERF_RECORD_MISC_KERNEL:
205 		he_stat->period_sys += period;
206 		break;
207 	case PERF_RECORD_MISC_USER:
208 		he_stat->period_us += period;
209 		break;
210 	case PERF_RECORD_MISC_GUEST_KERNEL:
211 		he_stat->period_guest_sys += period;
212 		break;
213 	case PERF_RECORD_MISC_GUEST_USER:
214 		he_stat->period_guest_us += period;
215 		break;
216 	default:
217 		break;
218 	}
219 }
220 
221 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
222 				u64 weight)
223 {
224 
225 	he_stat->period		+= period;
226 	he_stat->weight		+= weight;
227 	he_stat->nr_events	+= 1;
228 }
229 
230 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
231 {
232 	dest->period		+= src->period;
233 	dest->period_sys	+= src->period_sys;
234 	dest->period_us		+= src->period_us;
235 	dest->period_guest_sys	+= src->period_guest_sys;
236 	dest->period_guest_us	+= src->period_guest_us;
237 	dest->nr_events		+= src->nr_events;
238 	dest->weight		+= src->weight;
239 }
240 
241 static void he_stat__decay(struct he_stat *he_stat)
242 {
243 	he_stat->period = (he_stat->period * 7) / 8;
244 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
245 	/* XXX need decay for weight too? */
246 }
247 
248 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
249 {
250 	u64 prev_period = he->stat.period;
251 	u64 diff;
252 
253 	if (prev_period == 0)
254 		return true;
255 
256 	he_stat__decay(&he->stat);
257 	if (symbol_conf.cumulate_callchain)
258 		he_stat__decay(he->stat_acc);
259 	decay_callchain(he->callchain);
260 
261 	diff = prev_period - he->stat.period;
262 
263 	hists->stats.total_period -= diff;
264 	if (!he->filtered)
265 		hists->stats.total_non_filtered_period -= diff;
266 
267 	return he->stat.period == 0;
268 }
269 
270 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
271 {
272 	rb_erase(&he->rb_node, &hists->entries);
273 
274 	if (sort__need_collapse)
275 		rb_erase(&he->rb_node_in, &hists->entries_collapsed);
276 	else
277 		rb_erase(&he->rb_node_in, hists->entries_in);
278 
279 	--hists->nr_entries;
280 	if (!he->filtered)
281 		--hists->nr_non_filtered_entries;
282 
283 	hist_entry__delete(he);
284 }
285 
286 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
287 {
288 	struct rb_node *next = rb_first(&hists->entries);
289 	struct hist_entry *n;
290 
291 	while (next) {
292 		n = rb_entry(next, struct hist_entry, rb_node);
293 		next = rb_next(&n->rb_node);
294 		if (((zap_user && n->level == '.') ||
295 		     (zap_kernel && n->level != '.') ||
296 		     hists__decay_entry(hists, n))) {
297 			hists__delete_entry(hists, n);
298 		}
299 	}
300 }
301 
302 void hists__delete_entries(struct hists *hists)
303 {
304 	struct rb_node *next = rb_first(&hists->entries);
305 	struct hist_entry *n;
306 
307 	while (next) {
308 		n = rb_entry(next, struct hist_entry, rb_node);
309 		next = rb_next(&n->rb_node);
310 
311 		hists__delete_entry(hists, n);
312 	}
313 }
314 
315 /*
316  * histogram, sorted on item, collects periods
317  */
318 
319 static struct hist_entry *hist_entry__new(struct hist_entry *template,
320 					  bool sample_self)
321 {
322 	size_t callchain_size = 0;
323 	struct hist_entry *he;
324 
325 	if (symbol_conf.use_callchain)
326 		callchain_size = sizeof(struct callchain_root);
327 
328 	he = zalloc(sizeof(*he) + callchain_size);
329 
330 	if (he != NULL) {
331 		*he = *template;
332 
333 		if (symbol_conf.cumulate_callchain) {
334 			he->stat_acc = malloc(sizeof(he->stat));
335 			if (he->stat_acc == NULL) {
336 				free(he);
337 				return NULL;
338 			}
339 			memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
340 			if (!sample_self)
341 				memset(&he->stat, 0, sizeof(he->stat));
342 		}
343 
344 		map__get(he->ms.map);
345 
346 		if (he->branch_info) {
347 			/*
348 			 * This branch info is (a part of) allocated from
349 			 * sample__resolve_bstack() and will be freed after
350 			 * adding new entries.  So we need to save a copy.
351 			 */
352 			he->branch_info = malloc(sizeof(*he->branch_info));
353 			if (he->branch_info == NULL) {
354 				map__zput(he->ms.map);
355 				free(he->stat_acc);
356 				free(he);
357 				return NULL;
358 			}
359 
360 			memcpy(he->branch_info, template->branch_info,
361 			       sizeof(*he->branch_info));
362 
363 			map__get(he->branch_info->from.map);
364 			map__get(he->branch_info->to.map);
365 		}
366 
367 		if (he->mem_info) {
368 			map__get(he->mem_info->iaddr.map);
369 			map__get(he->mem_info->daddr.map);
370 		}
371 
372 		if (symbol_conf.use_callchain)
373 			callchain_init(he->callchain);
374 
375 		if (he->raw_data) {
376 			he->raw_data = memdup(he->raw_data, he->raw_size);
377 
378 			if (he->raw_data == NULL) {
379 				map__put(he->ms.map);
380 				if (he->branch_info) {
381 					map__put(he->branch_info->from.map);
382 					map__put(he->branch_info->to.map);
383 					free(he->branch_info);
384 				}
385 				if (he->mem_info) {
386 					map__put(he->mem_info->iaddr.map);
387 					map__put(he->mem_info->daddr.map);
388 				}
389 				free(he->stat_acc);
390 				free(he);
391 				return NULL;
392 			}
393 		}
394 		INIT_LIST_HEAD(&he->pairs.node);
395 		thread__get(he->thread);
396 	}
397 
398 	return he;
399 }
400 
401 static u8 symbol__parent_filter(const struct symbol *parent)
402 {
403 	if (symbol_conf.exclude_other && parent == NULL)
404 		return 1 << HIST_FILTER__PARENT;
405 	return 0;
406 }
407 
408 static struct hist_entry *hists__findnew_entry(struct hists *hists,
409 					       struct hist_entry *entry,
410 					       struct addr_location *al,
411 					       bool sample_self)
412 {
413 	struct rb_node **p;
414 	struct rb_node *parent = NULL;
415 	struct hist_entry *he;
416 	int64_t cmp;
417 	u64 period = entry->stat.period;
418 	u64 weight = entry->stat.weight;
419 
420 	p = &hists->entries_in->rb_node;
421 
422 	while (*p != NULL) {
423 		parent = *p;
424 		he = rb_entry(parent, struct hist_entry, rb_node_in);
425 
426 		/*
427 		 * Make sure that it receives arguments in a same order as
428 		 * hist_entry__collapse() so that we can use an appropriate
429 		 * function when searching an entry regardless which sort
430 		 * keys were used.
431 		 */
432 		cmp = hist_entry__cmp(he, entry);
433 
434 		if (!cmp) {
435 			if (sample_self) {
436 				he_stat__add_period(&he->stat, period, weight);
437 				hists->stats.total_period += period;
438 				if (!he->filtered)
439 					hists->stats.total_non_filtered_period += period;
440 			}
441 			if (symbol_conf.cumulate_callchain)
442 				he_stat__add_period(he->stat_acc, period, weight);
443 
444 			/*
445 			 * This mem info was allocated from sample__resolve_mem
446 			 * and will not be used anymore.
447 			 */
448 			zfree(&entry->mem_info);
449 
450 			/* If the map of an existing hist_entry has
451 			 * become out-of-date due to an exec() or
452 			 * similar, update it.  Otherwise we will
453 			 * mis-adjust symbol addresses when computing
454 			 * the history counter to increment.
455 			 */
456 			if (he->ms.map != entry->ms.map) {
457 				map__put(he->ms.map);
458 				he->ms.map = map__get(entry->ms.map);
459 			}
460 			goto out;
461 		}
462 
463 		if (cmp < 0)
464 			p = &(*p)->rb_left;
465 		else
466 			p = &(*p)->rb_right;
467 	}
468 
469 	he = hist_entry__new(entry, sample_self);
470 	if (!he)
471 		return NULL;
472 
473 	if (sample_self)
474 		hists__inc_stats(hists, he);
475 	else
476 		hists->nr_entries++;
477 
478 	rb_link_node(&he->rb_node_in, parent, p);
479 	rb_insert_color(&he->rb_node_in, hists->entries_in);
480 out:
481 	if (sample_self)
482 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
483 	if (symbol_conf.cumulate_callchain)
484 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
485 	return he;
486 }
487 
488 struct hist_entry *__hists__add_entry(struct hists *hists,
489 				      struct addr_location *al,
490 				      struct symbol *sym_parent,
491 				      struct branch_info *bi,
492 				      struct mem_info *mi,
493 				      struct perf_sample *sample,
494 				      bool sample_self)
495 {
496 	struct hist_entry entry = {
497 		.thread	= al->thread,
498 		.comm = thread__comm(al->thread),
499 		.ms = {
500 			.map	= al->map,
501 			.sym	= al->sym,
502 		},
503 		.socket	 = al->socket,
504 		.cpu	 = al->cpu,
505 		.cpumode = al->cpumode,
506 		.ip	 = al->addr,
507 		.level	 = al->level,
508 		.stat = {
509 			.nr_events = 1,
510 			.period	= sample->period,
511 			.weight = sample->weight,
512 		},
513 		.parent = sym_parent,
514 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
515 		.hists	= hists,
516 		.branch_info = bi,
517 		.mem_info = mi,
518 		.transaction = sample->transaction,
519 		.raw_data = sample->raw_data,
520 		.raw_size = sample->raw_size,
521 	};
522 
523 	return hists__findnew_entry(hists, &entry, al, sample_self);
524 }
525 
526 static int
527 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
528 		    struct addr_location *al __maybe_unused)
529 {
530 	return 0;
531 }
532 
533 static int
534 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
535 			struct addr_location *al __maybe_unused)
536 {
537 	return 0;
538 }
539 
540 static int
541 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
542 {
543 	struct perf_sample *sample = iter->sample;
544 	struct mem_info *mi;
545 
546 	mi = sample__resolve_mem(sample, al);
547 	if (mi == NULL)
548 		return -ENOMEM;
549 
550 	iter->priv = mi;
551 	return 0;
552 }
553 
554 static int
555 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
556 {
557 	u64 cost;
558 	struct mem_info *mi = iter->priv;
559 	struct hists *hists = evsel__hists(iter->evsel);
560 	struct perf_sample *sample = iter->sample;
561 	struct hist_entry *he;
562 
563 	if (mi == NULL)
564 		return -EINVAL;
565 
566 	cost = sample->weight;
567 	if (!cost)
568 		cost = 1;
569 
570 	/*
571 	 * must pass period=weight in order to get the correct
572 	 * sorting from hists__collapse_resort() which is solely
573 	 * based on periods. We want sorting be done on nr_events * weight
574 	 * and this is indirectly achieved by passing period=weight here
575 	 * and the he_stat__add_period() function.
576 	 */
577 	sample->period = cost;
578 
579 	he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
580 				sample, true);
581 	if (!he)
582 		return -ENOMEM;
583 
584 	iter->he = he;
585 	return 0;
586 }
587 
588 static int
589 iter_finish_mem_entry(struct hist_entry_iter *iter,
590 		      struct addr_location *al __maybe_unused)
591 {
592 	struct perf_evsel *evsel = iter->evsel;
593 	struct hists *hists = evsel__hists(evsel);
594 	struct hist_entry *he = iter->he;
595 	int err = -EINVAL;
596 
597 	if (he == NULL)
598 		goto out;
599 
600 	hists__inc_nr_samples(hists, he->filtered);
601 
602 	err = hist_entry__append_callchain(he, iter->sample);
603 
604 out:
605 	/*
606 	 * We don't need to free iter->priv (mem_info) here since the mem info
607 	 * was either already freed in hists__findnew_entry() or passed to a
608 	 * new hist entry by hist_entry__new().
609 	 */
610 	iter->priv = NULL;
611 
612 	iter->he = NULL;
613 	return err;
614 }
615 
616 static int
617 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
618 {
619 	struct branch_info *bi;
620 	struct perf_sample *sample = iter->sample;
621 
622 	bi = sample__resolve_bstack(sample, al);
623 	if (!bi)
624 		return -ENOMEM;
625 
626 	iter->curr = 0;
627 	iter->total = sample->branch_stack->nr;
628 
629 	iter->priv = bi;
630 	return 0;
631 }
632 
633 static int
634 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
635 			     struct addr_location *al __maybe_unused)
636 {
637 	/* to avoid calling callback function */
638 	iter->he = NULL;
639 
640 	return 0;
641 }
642 
643 static int
644 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
645 {
646 	struct branch_info *bi = iter->priv;
647 	int i = iter->curr;
648 
649 	if (bi == NULL)
650 		return 0;
651 
652 	if (iter->curr >= iter->total)
653 		return 0;
654 
655 	al->map = bi[i].to.map;
656 	al->sym = bi[i].to.sym;
657 	al->addr = bi[i].to.addr;
658 	return 1;
659 }
660 
661 static int
662 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
663 {
664 	struct branch_info *bi;
665 	struct perf_evsel *evsel = iter->evsel;
666 	struct hists *hists = evsel__hists(evsel);
667 	struct perf_sample *sample = iter->sample;
668 	struct hist_entry *he = NULL;
669 	int i = iter->curr;
670 	int err = 0;
671 
672 	bi = iter->priv;
673 
674 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
675 		goto out;
676 
677 	/*
678 	 * The report shows the percentage of total branches captured
679 	 * and not events sampled. Thus we use a pseudo period of 1.
680 	 */
681 	sample->period = 1;
682 	sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
683 
684 	he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
685 				sample, true);
686 	if (he == NULL)
687 		return -ENOMEM;
688 
689 	hists__inc_nr_samples(hists, he->filtered);
690 
691 out:
692 	iter->he = he;
693 	iter->curr++;
694 	return err;
695 }
696 
697 static int
698 iter_finish_branch_entry(struct hist_entry_iter *iter,
699 			 struct addr_location *al __maybe_unused)
700 {
701 	zfree(&iter->priv);
702 	iter->he = NULL;
703 
704 	return iter->curr >= iter->total ? 0 : -1;
705 }
706 
707 static int
708 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
709 			  struct addr_location *al __maybe_unused)
710 {
711 	return 0;
712 }
713 
714 static int
715 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
716 {
717 	struct perf_evsel *evsel = iter->evsel;
718 	struct perf_sample *sample = iter->sample;
719 	struct hist_entry *he;
720 
721 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
722 				sample, true);
723 	if (he == NULL)
724 		return -ENOMEM;
725 
726 	iter->he = he;
727 	return 0;
728 }
729 
730 static int
731 iter_finish_normal_entry(struct hist_entry_iter *iter,
732 			 struct addr_location *al __maybe_unused)
733 {
734 	struct hist_entry *he = iter->he;
735 	struct perf_evsel *evsel = iter->evsel;
736 	struct perf_sample *sample = iter->sample;
737 
738 	if (he == NULL)
739 		return 0;
740 
741 	iter->he = NULL;
742 
743 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
744 
745 	return hist_entry__append_callchain(he, sample);
746 }
747 
748 static int
749 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
750 			      struct addr_location *al __maybe_unused)
751 {
752 	struct hist_entry **he_cache;
753 
754 	callchain_cursor_commit(&callchain_cursor);
755 
756 	/*
757 	 * This is for detecting cycles or recursions so that they're
758 	 * cumulated only one time to prevent entries more than 100%
759 	 * overhead.
760 	 */
761 	he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
762 	if (he_cache == NULL)
763 		return -ENOMEM;
764 
765 	iter->priv = he_cache;
766 	iter->curr = 0;
767 
768 	return 0;
769 }
770 
771 static int
772 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
773 				 struct addr_location *al)
774 {
775 	struct perf_evsel *evsel = iter->evsel;
776 	struct hists *hists = evsel__hists(evsel);
777 	struct perf_sample *sample = iter->sample;
778 	struct hist_entry **he_cache = iter->priv;
779 	struct hist_entry *he;
780 	int err = 0;
781 
782 	he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
783 				sample, true);
784 	if (he == NULL)
785 		return -ENOMEM;
786 
787 	iter->he = he;
788 	he_cache[iter->curr++] = he;
789 
790 	hist_entry__append_callchain(he, sample);
791 
792 	/*
793 	 * We need to re-initialize the cursor since callchain_append()
794 	 * advanced the cursor to the end.
795 	 */
796 	callchain_cursor_commit(&callchain_cursor);
797 
798 	hists__inc_nr_samples(hists, he->filtered);
799 
800 	return err;
801 }
802 
803 static int
804 iter_next_cumulative_entry(struct hist_entry_iter *iter,
805 			   struct addr_location *al)
806 {
807 	struct callchain_cursor_node *node;
808 
809 	node = callchain_cursor_current(&callchain_cursor);
810 	if (node == NULL)
811 		return 0;
812 
813 	return fill_callchain_info(al, node, iter->hide_unresolved);
814 }
815 
816 static int
817 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
818 			       struct addr_location *al)
819 {
820 	struct perf_evsel *evsel = iter->evsel;
821 	struct perf_sample *sample = iter->sample;
822 	struct hist_entry **he_cache = iter->priv;
823 	struct hist_entry *he;
824 	struct hist_entry he_tmp = {
825 		.hists = evsel__hists(evsel),
826 		.cpu = al->cpu,
827 		.thread = al->thread,
828 		.comm = thread__comm(al->thread),
829 		.ip = al->addr,
830 		.ms = {
831 			.map = al->map,
832 			.sym = al->sym,
833 		},
834 		.parent = iter->parent,
835 		.raw_data = sample->raw_data,
836 		.raw_size = sample->raw_size,
837 	};
838 	int i;
839 	struct callchain_cursor cursor;
840 
841 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
842 
843 	callchain_cursor_advance(&callchain_cursor);
844 
845 	/*
846 	 * Check if there's duplicate entries in the callchain.
847 	 * It's possible that it has cycles or recursive calls.
848 	 */
849 	for (i = 0; i < iter->curr; i++) {
850 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
851 			/* to avoid calling callback function */
852 			iter->he = NULL;
853 			return 0;
854 		}
855 	}
856 
857 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
858 				sample, false);
859 	if (he == NULL)
860 		return -ENOMEM;
861 
862 	iter->he = he;
863 	he_cache[iter->curr++] = he;
864 
865 	if (symbol_conf.use_callchain)
866 		callchain_append(he->callchain, &cursor, sample->period);
867 	return 0;
868 }
869 
870 static int
871 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
872 			     struct addr_location *al __maybe_unused)
873 {
874 	zfree(&iter->priv);
875 	iter->he = NULL;
876 
877 	return 0;
878 }
879 
880 const struct hist_iter_ops hist_iter_mem = {
881 	.prepare_entry 		= iter_prepare_mem_entry,
882 	.add_single_entry 	= iter_add_single_mem_entry,
883 	.next_entry 		= iter_next_nop_entry,
884 	.add_next_entry 	= iter_add_next_nop_entry,
885 	.finish_entry 		= iter_finish_mem_entry,
886 };
887 
888 const struct hist_iter_ops hist_iter_branch = {
889 	.prepare_entry 		= iter_prepare_branch_entry,
890 	.add_single_entry 	= iter_add_single_branch_entry,
891 	.next_entry 		= iter_next_branch_entry,
892 	.add_next_entry 	= iter_add_next_branch_entry,
893 	.finish_entry 		= iter_finish_branch_entry,
894 };
895 
896 const struct hist_iter_ops hist_iter_normal = {
897 	.prepare_entry 		= iter_prepare_normal_entry,
898 	.add_single_entry 	= iter_add_single_normal_entry,
899 	.next_entry 		= iter_next_nop_entry,
900 	.add_next_entry 	= iter_add_next_nop_entry,
901 	.finish_entry 		= iter_finish_normal_entry,
902 };
903 
904 const struct hist_iter_ops hist_iter_cumulative = {
905 	.prepare_entry 		= iter_prepare_cumulative_entry,
906 	.add_single_entry 	= iter_add_single_cumulative_entry,
907 	.next_entry 		= iter_next_cumulative_entry,
908 	.add_next_entry 	= iter_add_next_cumulative_entry,
909 	.finish_entry 		= iter_finish_cumulative_entry,
910 };
911 
912 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
913 			 int max_stack_depth, void *arg)
914 {
915 	int err, err2;
916 
917 	err = sample__resolve_callchain(iter->sample, &iter->parent,
918 					iter->evsel, al, max_stack_depth);
919 	if (err)
920 		return err;
921 
922 	iter->max_stack = max_stack_depth;
923 
924 	err = iter->ops->prepare_entry(iter, al);
925 	if (err)
926 		goto out;
927 
928 	err = iter->ops->add_single_entry(iter, al);
929 	if (err)
930 		goto out;
931 
932 	if (iter->he && iter->add_entry_cb) {
933 		err = iter->add_entry_cb(iter, al, true, arg);
934 		if (err)
935 			goto out;
936 	}
937 
938 	while (iter->ops->next_entry(iter, al)) {
939 		err = iter->ops->add_next_entry(iter, al);
940 		if (err)
941 			break;
942 
943 		if (iter->he && iter->add_entry_cb) {
944 			err = iter->add_entry_cb(iter, al, false, arg);
945 			if (err)
946 				goto out;
947 		}
948 	}
949 
950 out:
951 	err2 = iter->ops->finish_entry(iter, al);
952 	if (!err)
953 		err = err2;
954 
955 	return err;
956 }
957 
958 int64_t
959 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
960 {
961 	struct hists *hists = left->hists;
962 	struct perf_hpp_fmt *fmt;
963 	int64_t cmp = 0;
964 
965 	hists__for_each_sort_list(hists, fmt) {
966 		cmp = fmt->cmp(fmt, left, right);
967 		if (cmp)
968 			break;
969 	}
970 
971 	return cmp;
972 }
973 
974 int64_t
975 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
976 {
977 	struct hists *hists = left->hists;
978 	struct perf_hpp_fmt *fmt;
979 	int64_t cmp = 0;
980 
981 	hists__for_each_sort_list(hists, fmt) {
982 		cmp = fmt->collapse(fmt, left, right);
983 		if (cmp)
984 			break;
985 	}
986 
987 	return cmp;
988 }
989 
990 void hist_entry__delete(struct hist_entry *he)
991 {
992 	thread__zput(he->thread);
993 	map__zput(he->ms.map);
994 
995 	if (he->branch_info) {
996 		map__zput(he->branch_info->from.map);
997 		map__zput(he->branch_info->to.map);
998 		zfree(&he->branch_info);
999 	}
1000 
1001 	if (he->mem_info) {
1002 		map__zput(he->mem_info->iaddr.map);
1003 		map__zput(he->mem_info->daddr.map);
1004 		zfree(&he->mem_info);
1005 	}
1006 
1007 	zfree(&he->stat_acc);
1008 	free_srcline(he->srcline);
1009 	if (he->srcfile && he->srcfile[0])
1010 		free(he->srcfile);
1011 	free_callchain(he->callchain);
1012 	free(he->trace_output);
1013 	free(he->raw_data);
1014 	free(he);
1015 }
1016 
1017 /*
1018  * If this is not the last column, then we need to pad it according to the
1019  * pre-calculated max lenght for this column, otherwise don't bother adding
1020  * spaces because that would break viewing this with, for instance, 'less',
1021  * that would show tons of trailing spaces when a long C++ demangled method
1022  * names is sampled.
1023 */
1024 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1025 				   struct perf_hpp_fmt *fmt, int printed)
1026 {
1027 	if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1028 		const int width = fmt->width(fmt, hpp, hists_to_evsel(he->hists));
1029 		if (printed < width) {
1030 			advance_hpp(hpp, printed);
1031 			printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1032 		}
1033 	}
1034 
1035 	return printed;
1036 }
1037 
1038 /*
1039  * collapse the histogram
1040  */
1041 
1042 bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
1043 				  struct rb_root *root, struct hist_entry *he)
1044 {
1045 	struct rb_node **p = &root->rb_node;
1046 	struct rb_node *parent = NULL;
1047 	struct hist_entry *iter;
1048 	int64_t cmp;
1049 
1050 	while (*p != NULL) {
1051 		parent = *p;
1052 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1053 
1054 		cmp = hist_entry__collapse(iter, he);
1055 
1056 		if (!cmp) {
1057 			he_stat__add_stat(&iter->stat, &he->stat);
1058 			if (symbol_conf.cumulate_callchain)
1059 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
1060 
1061 			if (symbol_conf.use_callchain) {
1062 				callchain_cursor_reset(&callchain_cursor);
1063 				callchain_merge(&callchain_cursor,
1064 						iter->callchain,
1065 						he->callchain);
1066 			}
1067 			hist_entry__delete(he);
1068 			return false;
1069 		}
1070 
1071 		if (cmp < 0)
1072 			p = &(*p)->rb_left;
1073 		else
1074 			p = &(*p)->rb_right;
1075 	}
1076 	hists->nr_entries++;
1077 
1078 	rb_link_node(&he->rb_node_in, parent, p);
1079 	rb_insert_color(&he->rb_node_in, root);
1080 	return true;
1081 }
1082 
1083 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1084 {
1085 	struct rb_root *root;
1086 
1087 	pthread_mutex_lock(&hists->lock);
1088 
1089 	root = hists->entries_in;
1090 	if (++hists->entries_in > &hists->entries_in_array[1])
1091 		hists->entries_in = &hists->entries_in_array[0];
1092 
1093 	pthread_mutex_unlock(&hists->lock);
1094 
1095 	return root;
1096 }
1097 
1098 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1099 {
1100 	hists__filter_entry_by_dso(hists, he);
1101 	hists__filter_entry_by_thread(hists, he);
1102 	hists__filter_entry_by_symbol(hists, he);
1103 	hists__filter_entry_by_socket(hists, he);
1104 }
1105 
1106 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1107 {
1108 	struct rb_root *root;
1109 	struct rb_node *next;
1110 	struct hist_entry *n;
1111 
1112 	if (!sort__need_collapse)
1113 		return;
1114 
1115 	hists->nr_entries = 0;
1116 
1117 	root = hists__get_rotate_entries_in(hists);
1118 
1119 	next = rb_first(root);
1120 
1121 	while (next) {
1122 		if (session_done())
1123 			break;
1124 		n = rb_entry(next, struct hist_entry, rb_node_in);
1125 		next = rb_next(&n->rb_node_in);
1126 
1127 		rb_erase(&n->rb_node_in, root);
1128 		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1129 			/*
1130 			 * If it wasn't combined with one of the entries already
1131 			 * collapsed, we need to apply the filters that may have
1132 			 * been set by, say, the hist_browser.
1133 			 */
1134 			hists__apply_filters(hists, n);
1135 		}
1136 		if (prog)
1137 			ui_progress__update(prog, 1);
1138 	}
1139 }
1140 
1141 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1142 {
1143 	struct hists *hists = a->hists;
1144 	struct perf_hpp_fmt *fmt;
1145 	int64_t cmp = 0;
1146 
1147 	hists__for_each_sort_list(hists, fmt) {
1148 		if (perf_hpp__should_skip(fmt, a->hists))
1149 			continue;
1150 
1151 		cmp = fmt->sort(fmt, a, b);
1152 		if (cmp)
1153 			break;
1154 	}
1155 
1156 	return cmp;
1157 }
1158 
1159 static void hists__reset_filter_stats(struct hists *hists)
1160 {
1161 	hists->nr_non_filtered_entries = 0;
1162 	hists->stats.total_non_filtered_period = 0;
1163 }
1164 
1165 void hists__reset_stats(struct hists *hists)
1166 {
1167 	hists->nr_entries = 0;
1168 	hists->stats.total_period = 0;
1169 
1170 	hists__reset_filter_stats(hists);
1171 }
1172 
1173 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1174 {
1175 	hists->nr_non_filtered_entries++;
1176 	hists->stats.total_non_filtered_period += h->stat.period;
1177 }
1178 
1179 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1180 {
1181 	if (!h->filtered)
1182 		hists__inc_filter_stats(hists, h);
1183 
1184 	hists->nr_entries++;
1185 	hists->stats.total_period += h->stat.period;
1186 }
1187 
1188 static void __hists__insert_output_entry(struct rb_root *entries,
1189 					 struct hist_entry *he,
1190 					 u64 min_callchain_hits,
1191 					 bool use_callchain)
1192 {
1193 	struct rb_node **p = &entries->rb_node;
1194 	struct rb_node *parent = NULL;
1195 	struct hist_entry *iter;
1196 
1197 	if (use_callchain) {
1198 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1199 			u64 total = he->stat.period;
1200 
1201 			if (symbol_conf.cumulate_callchain)
1202 				total = he->stat_acc->period;
1203 
1204 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1205 		}
1206 		callchain_param.sort(&he->sorted_chain, he->callchain,
1207 				      min_callchain_hits, &callchain_param);
1208 	}
1209 
1210 	while (*p != NULL) {
1211 		parent = *p;
1212 		iter = rb_entry(parent, struct hist_entry, rb_node);
1213 
1214 		if (hist_entry__sort(he, iter) > 0)
1215 			p = &(*p)->rb_left;
1216 		else
1217 			p = &(*p)->rb_right;
1218 	}
1219 
1220 	rb_link_node(&he->rb_node, parent, p);
1221 	rb_insert_color(&he->rb_node, entries);
1222 }
1223 
1224 static void output_resort(struct hists *hists, struct ui_progress *prog,
1225 			  bool use_callchain)
1226 {
1227 	struct rb_root *root;
1228 	struct rb_node *next;
1229 	struct hist_entry *n;
1230 	u64 min_callchain_hits;
1231 
1232 	min_callchain_hits = hists__total_period(hists) * (callchain_param.min_percent / 100);
1233 
1234 	if (sort__need_collapse)
1235 		root = &hists->entries_collapsed;
1236 	else
1237 		root = hists->entries_in;
1238 
1239 	next = rb_first(root);
1240 	hists->entries = RB_ROOT;
1241 
1242 	hists__reset_stats(hists);
1243 	hists__reset_col_len(hists);
1244 
1245 	while (next) {
1246 		n = rb_entry(next, struct hist_entry, rb_node_in);
1247 		next = rb_next(&n->rb_node_in);
1248 
1249 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1250 		hists__inc_stats(hists, n);
1251 
1252 		if (!n->filtered)
1253 			hists__calc_col_len(hists, n);
1254 
1255 		if (prog)
1256 			ui_progress__update(prog, 1);
1257 	}
1258 }
1259 
1260 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1261 {
1262 	bool use_callchain;
1263 
1264 	if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1265 		use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1266 	else
1267 		use_callchain = symbol_conf.use_callchain;
1268 
1269 	output_resort(evsel__hists(evsel), prog, use_callchain);
1270 }
1271 
1272 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1273 {
1274 	output_resort(hists, prog, symbol_conf.use_callchain);
1275 }
1276 
1277 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1278 				       enum hist_filter filter)
1279 {
1280 	h->filtered &= ~(1 << filter);
1281 	if (h->filtered)
1282 		return;
1283 
1284 	/* force fold unfiltered entry for simplicity */
1285 	h->unfolded = false;
1286 	h->row_offset = 0;
1287 	h->nr_rows = 0;
1288 
1289 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1290 
1291 	hists__inc_filter_stats(hists, h);
1292 	hists__calc_col_len(hists, h);
1293 }
1294 
1295 
1296 static bool hists__filter_entry_by_dso(struct hists *hists,
1297 				       struct hist_entry *he)
1298 {
1299 	if (hists->dso_filter != NULL &&
1300 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1301 		he->filtered |= (1 << HIST_FILTER__DSO);
1302 		return true;
1303 	}
1304 
1305 	return false;
1306 }
1307 
1308 static bool hists__filter_entry_by_thread(struct hists *hists,
1309 					  struct hist_entry *he)
1310 {
1311 	if (hists->thread_filter != NULL &&
1312 	    he->thread != hists->thread_filter) {
1313 		he->filtered |= (1 << HIST_FILTER__THREAD);
1314 		return true;
1315 	}
1316 
1317 	return false;
1318 }
1319 
1320 static bool hists__filter_entry_by_symbol(struct hists *hists,
1321 					  struct hist_entry *he)
1322 {
1323 	if (hists->symbol_filter_str != NULL &&
1324 	    (!he->ms.sym || strstr(he->ms.sym->name,
1325 				   hists->symbol_filter_str) == NULL)) {
1326 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
1327 		return true;
1328 	}
1329 
1330 	return false;
1331 }
1332 
1333 static bool hists__filter_entry_by_socket(struct hists *hists,
1334 					  struct hist_entry *he)
1335 {
1336 	if ((hists->socket_filter > -1) &&
1337 	    (he->socket != hists->socket_filter)) {
1338 		he->filtered |= (1 << HIST_FILTER__SOCKET);
1339 		return true;
1340 	}
1341 
1342 	return false;
1343 }
1344 
1345 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1346 
1347 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1348 {
1349 	struct rb_node *nd;
1350 
1351 	hists->stats.nr_non_filtered_samples = 0;
1352 
1353 	hists__reset_filter_stats(hists);
1354 	hists__reset_col_len(hists);
1355 
1356 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1357 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1358 
1359 		if (filter(hists, h))
1360 			continue;
1361 
1362 		hists__remove_entry_filter(hists, h, type);
1363 	}
1364 }
1365 
1366 void hists__filter_by_thread(struct hists *hists)
1367 {
1368 	hists__filter_by_type(hists, HIST_FILTER__THREAD,
1369 			      hists__filter_entry_by_thread);
1370 }
1371 
1372 void hists__filter_by_dso(struct hists *hists)
1373 {
1374 	hists__filter_by_type(hists, HIST_FILTER__DSO,
1375 			      hists__filter_entry_by_dso);
1376 }
1377 
1378 void hists__filter_by_symbol(struct hists *hists)
1379 {
1380 	hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
1381 			      hists__filter_entry_by_symbol);
1382 }
1383 
1384 void hists__filter_by_socket(struct hists *hists)
1385 {
1386 	hists__filter_by_type(hists, HIST_FILTER__SOCKET,
1387 			      hists__filter_entry_by_socket);
1388 }
1389 
1390 void events_stats__inc(struct events_stats *stats, u32 type)
1391 {
1392 	++stats->nr_events[0];
1393 	++stats->nr_events[type];
1394 }
1395 
1396 void hists__inc_nr_events(struct hists *hists, u32 type)
1397 {
1398 	events_stats__inc(&hists->stats, type);
1399 }
1400 
1401 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1402 {
1403 	events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1404 	if (!filtered)
1405 		hists->stats.nr_non_filtered_samples++;
1406 }
1407 
1408 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1409 						 struct hist_entry *pair)
1410 {
1411 	struct rb_root *root;
1412 	struct rb_node **p;
1413 	struct rb_node *parent = NULL;
1414 	struct hist_entry *he;
1415 	int64_t cmp;
1416 
1417 	if (sort__need_collapse)
1418 		root = &hists->entries_collapsed;
1419 	else
1420 		root = hists->entries_in;
1421 
1422 	p = &root->rb_node;
1423 
1424 	while (*p != NULL) {
1425 		parent = *p;
1426 		he = rb_entry(parent, struct hist_entry, rb_node_in);
1427 
1428 		cmp = hist_entry__collapse(he, pair);
1429 
1430 		if (!cmp)
1431 			goto out;
1432 
1433 		if (cmp < 0)
1434 			p = &(*p)->rb_left;
1435 		else
1436 			p = &(*p)->rb_right;
1437 	}
1438 
1439 	he = hist_entry__new(pair, true);
1440 	if (he) {
1441 		memset(&he->stat, 0, sizeof(he->stat));
1442 		he->hists = hists;
1443 		rb_link_node(&he->rb_node_in, parent, p);
1444 		rb_insert_color(&he->rb_node_in, root);
1445 		hists__inc_stats(hists, he);
1446 		he->dummy = true;
1447 	}
1448 out:
1449 	return he;
1450 }
1451 
1452 static struct hist_entry *hists__find_entry(struct hists *hists,
1453 					    struct hist_entry *he)
1454 {
1455 	struct rb_node *n;
1456 
1457 	if (sort__need_collapse)
1458 		n = hists->entries_collapsed.rb_node;
1459 	else
1460 		n = hists->entries_in->rb_node;
1461 
1462 	while (n) {
1463 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1464 		int64_t cmp = hist_entry__collapse(iter, he);
1465 
1466 		if (cmp < 0)
1467 			n = n->rb_left;
1468 		else if (cmp > 0)
1469 			n = n->rb_right;
1470 		else
1471 			return iter;
1472 	}
1473 
1474 	return NULL;
1475 }
1476 
1477 /*
1478  * Look for pairs to link to the leader buckets (hist_entries):
1479  */
1480 void hists__match(struct hists *leader, struct hists *other)
1481 {
1482 	struct rb_root *root;
1483 	struct rb_node *nd;
1484 	struct hist_entry *pos, *pair;
1485 
1486 	if (sort__need_collapse)
1487 		root = &leader->entries_collapsed;
1488 	else
1489 		root = leader->entries_in;
1490 
1491 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1492 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
1493 		pair = hists__find_entry(other, pos);
1494 
1495 		if (pair)
1496 			hist_entry__add_pair(pair, pos);
1497 	}
1498 }
1499 
1500 /*
1501  * Look for entries in the other hists that are not present in the leader, if
1502  * we find them, just add a dummy entry on the leader hists, with period=0,
1503  * nr_events=0, to serve as the list header.
1504  */
1505 int hists__link(struct hists *leader, struct hists *other)
1506 {
1507 	struct rb_root *root;
1508 	struct rb_node *nd;
1509 	struct hist_entry *pos, *pair;
1510 
1511 	if (sort__need_collapse)
1512 		root = &other->entries_collapsed;
1513 	else
1514 		root = other->entries_in;
1515 
1516 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1517 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
1518 
1519 		if (!hist_entry__has_pairs(pos)) {
1520 			pair = hists__add_dummy_entry(leader, pos);
1521 			if (pair == NULL)
1522 				return -1;
1523 			hist_entry__add_pair(pos, pair);
1524 		}
1525 	}
1526 
1527 	return 0;
1528 }
1529 
1530 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1531 			  struct perf_sample *sample, bool nonany_branch_mode)
1532 {
1533 	struct branch_info *bi;
1534 
1535 	/* If we have branch cycles always annotate them. */
1536 	if (bs && bs->nr && bs->entries[0].flags.cycles) {
1537 		int i;
1538 
1539 		bi = sample__resolve_bstack(sample, al);
1540 		if (bi) {
1541 			struct addr_map_symbol *prev = NULL;
1542 
1543 			/*
1544 			 * Ignore errors, still want to process the
1545 			 * other entries.
1546 			 *
1547 			 * For non standard branch modes always
1548 			 * force no IPC (prev == NULL)
1549 			 *
1550 			 * Note that perf stores branches reversed from
1551 			 * program order!
1552 			 */
1553 			for (i = bs->nr - 1; i >= 0; i--) {
1554 				addr_map_symbol__account_cycles(&bi[i].from,
1555 					nonany_branch_mode ? NULL : prev,
1556 					bi[i].flags.cycles);
1557 				prev = &bi[i].to;
1558 			}
1559 			free(bi);
1560 		}
1561 	}
1562 }
1563 
1564 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1565 {
1566 	struct perf_evsel *pos;
1567 	size_t ret = 0;
1568 
1569 	evlist__for_each(evlist, pos) {
1570 		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1571 		ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1572 	}
1573 
1574 	return ret;
1575 }
1576 
1577 
1578 u64 hists__total_period(struct hists *hists)
1579 {
1580 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1581 		hists->stats.total_period;
1582 }
1583 
1584 int parse_filter_percentage(const struct option *opt __maybe_unused,
1585 			    const char *arg, int unset __maybe_unused)
1586 {
1587 	if (!strcmp(arg, "relative"))
1588 		symbol_conf.filter_relative = true;
1589 	else if (!strcmp(arg, "absolute"))
1590 		symbol_conf.filter_relative = false;
1591 	else
1592 		return -1;
1593 
1594 	return 0;
1595 }
1596 
1597 int perf_hist_config(const char *var, const char *value)
1598 {
1599 	if (!strcmp(var, "hist.percentage"))
1600 		return parse_filter_percentage(NULL, value, 0);
1601 
1602 	return 0;
1603 }
1604 
1605 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
1606 {
1607 	memset(hists, 0, sizeof(*hists));
1608 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1609 	hists->entries_in = &hists->entries_in_array[0];
1610 	hists->entries_collapsed = RB_ROOT;
1611 	hists->entries = RB_ROOT;
1612 	pthread_mutex_init(&hists->lock, NULL);
1613 	hists->socket_filter = -1;
1614 	hists->hpp_list = hpp_list;
1615 	return 0;
1616 }
1617 
1618 static void hists__delete_remaining_entries(struct rb_root *root)
1619 {
1620 	struct rb_node *node;
1621 	struct hist_entry *he;
1622 
1623 	while (!RB_EMPTY_ROOT(root)) {
1624 		node = rb_first(root);
1625 		rb_erase(node, root);
1626 
1627 		he = rb_entry(node, struct hist_entry, rb_node_in);
1628 		hist_entry__delete(he);
1629 	}
1630 }
1631 
1632 static void hists__delete_all_entries(struct hists *hists)
1633 {
1634 	hists__delete_entries(hists);
1635 	hists__delete_remaining_entries(&hists->entries_in_array[0]);
1636 	hists__delete_remaining_entries(&hists->entries_in_array[1]);
1637 	hists__delete_remaining_entries(&hists->entries_collapsed);
1638 }
1639 
1640 static void hists_evsel__exit(struct perf_evsel *evsel)
1641 {
1642 	struct hists *hists = evsel__hists(evsel);
1643 
1644 	hists__delete_all_entries(hists);
1645 }
1646 
1647 static int hists_evsel__init(struct perf_evsel *evsel)
1648 {
1649 	struct hists *hists = evsel__hists(evsel);
1650 
1651 	__hists__init(hists, &perf_hpp_list);
1652 	return 0;
1653 }
1654 
1655 /*
1656  * XXX We probably need a hists_evsel__exit() to free the hist_entries
1657  * stored in the rbtree...
1658  */
1659 
1660 int hists__init(void)
1661 {
1662 	int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1663 					    hists_evsel__init,
1664 					    hists_evsel__exit);
1665 	if (err)
1666 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1667 
1668 	return err;
1669 }
1670 
1671 void perf_hpp_list__init(struct perf_hpp_list *list)
1672 {
1673 	INIT_LIST_HEAD(&list->fields);
1674 	INIT_LIST_HEAD(&list->sorts);
1675 }
1676