xref: /linux/tools/perf/util/hist.c (revision 2975489458c59ce2e348b1b3aef5d8d2acb5cc8d)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "map.h"
8 #include "map_symbol.h"
9 #include "branch.h"
10 #include "mem-events.h"
11 #include "session.h"
12 #include "namespaces.h"
13 #include "sort.h"
14 #include "units.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include "annotate.h"
18 #include "srcline.h"
19 #include "symbol.h"
20 #include "thread.h"
21 #include "block-info.h"
22 #include "ui/progress.h"
23 #include <errno.h>
24 #include <math.h>
25 #include <inttypes.h>
26 #include <sys/param.h>
27 #include <linux/rbtree.h>
28 #include <linux/string.h>
29 #include <linux/time64.h>
30 #include <linux/zalloc.h>
31 
32 static bool hists__filter_entry_by_dso(struct hists *hists,
33 				       struct hist_entry *he);
34 static bool hists__filter_entry_by_thread(struct hists *hists,
35 					  struct hist_entry *he);
36 static bool hists__filter_entry_by_symbol(struct hists *hists,
37 					  struct hist_entry *he);
38 static bool hists__filter_entry_by_socket(struct hists *hists,
39 					  struct hist_entry *he);
40 
41 u16 hists__col_len(struct hists *hists, enum hist_column col)
42 {
43 	return hists->col_len[col];
44 }
45 
46 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
47 {
48 	hists->col_len[col] = len;
49 }
50 
51 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
52 {
53 	if (len > hists__col_len(hists, col)) {
54 		hists__set_col_len(hists, col, len);
55 		return true;
56 	}
57 	return false;
58 }
59 
60 void hists__reset_col_len(struct hists *hists)
61 {
62 	enum hist_column col;
63 
64 	for (col = 0; col < HISTC_NR_COLS; ++col)
65 		hists__set_col_len(hists, col, 0);
66 }
67 
68 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
69 {
70 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
71 
72 	if (hists__col_len(hists, dso) < unresolved_col_width &&
73 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
74 	    !symbol_conf.dso_list)
75 		hists__set_col_len(hists, dso, unresolved_col_width);
76 }
77 
78 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
79 {
80 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
81 	int symlen;
82 	u16 len;
83 
84 	if (h->block_info)
85 		return;
86 	/*
87 	 * +4 accounts for '[x] ' priv level info
88 	 * +2 accounts for 0x prefix on raw addresses
89 	 * +3 accounts for ' y ' symtab origin info
90 	 */
91 	if (h->ms.sym) {
92 		symlen = h->ms.sym->namelen + 4;
93 		if (verbose > 0)
94 			symlen += BITS_PER_LONG / 4 + 2 + 3;
95 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
96 	} else {
97 		symlen = unresolved_col_width + 4 + 2;
98 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
99 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
100 	}
101 
102 	len = thread__comm_len(h->thread);
103 	if (hists__new_col_len(hists, HISTC_COMM, len))
104 		hists__set_col_len(hists, HISTC_THREAD, len + 8);
105 
106 	if (h->ms.map) {
107 		len = dso__name_len(h->ms.map->dso);
108 		hists__new_col_len(hists, HISTC_DSO, len);
109 	}
110 
111 	if (h->parent)
112 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
113 
114 	if (h->branch_info) {
115 		if (h->branch_info->from.sym) {
116 			symlen = (int)h->branch_info->from.sym->namelen + 4;
117 			if (verbose > 0)
118 				symlen += BITS_PER_LONG / 4 + 2 + 3;
119 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
120 
121 			symlen = dso__name_len(h->branch_info->from.map->dso);
122 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
123 		} else {
124 			symlen = unresolved_col_width + 4 + 2;
125 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
126 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
127 		}
128 
129 		if (h->branch_info->to.sym) {
130 			symlen = (int)h->branch_info->to.sym->namelen + 4;
131 			if (verbose > 0)
132 				symlen += BITS_PER_LONG / 4 + 2 + 3;
133 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
134 
135 			symlen = dso__name_len(h->branch_info->to.map->dso);
136 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
137 		} else {
138 			symlen = unresolved_col_width + 4 + 2;
139 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
140 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
141 		}
142 
143 		if (h->branch_info->srcline_from)
144 			hists__new_col_len(hists, HISTC_SRCLINE_FROM,
145 					strlen(h->branch_info->srcline_from));
146 		if (h->branch_info->srcline_to)
147 			hists__new_col_len(hists, HISTC_SRCLINE_TO,
148 					strlen(h->branch_info->srcline_to));
149 	}
150 
151 	if (h->mem_info) {
152 		if (h->mem_info->daddr.sym) {
153 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
154 			       + unresolved_col_width + 2;
155 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
156 					   symlen);
157 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
158 					   symlen + 1);
159 		} else {
160 			symlen = unresolved_col_width + 4 + 2;
161 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
162 					   symlen);
163 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
164 					   symlen);
165 		}
166 
167 		if (h->mem_info->iaddr.sym) {
168 			symlen = (int)h->mem_info->iaddr.sym->namelen + 4
169 			       + unresolved_col_width + 2;
170 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
171 					   symlen);
172 		} else {
173 			symlen = unresolved_col_width + 4 + 2;
174 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
175 					   symlen);
176 		}
177 
178 		if (h->mem_info->daddr.map) {
179 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
180 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
181 					   symlen);
182 		} else {
183 			symlen = unresolved_col_width + 4 + 2;
184 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
185 		}
186 
187 		hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
188 				   unresolved_col_width + 4 + 2);
189 
190 	} else {
191 		symlen = unresolved_col_width + 4 + 2;
192 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
193 		hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
194 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
195 	}
196 
197 	hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
198 	hists__new_col_len(hists, HISTC_CPU, 3);
199 	hists__new_col_len(hists, HISTC_SOCKET, 6);
200 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
201 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
202 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
203 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
204 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
205 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
206 	if (symbol_conf.nanosecs)
207 		hists__new_col_len(hists, HISTC_TIME, 16);
208 	else
209 		hists__new_col_len(hists, HISTC_TIME, 12);
210 
211 	if (h->srcline) {
212 		len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
213 		hists__new_col_len(hists, HISTC_SRCLINE, len);
214 	}
215 
216 	if (h->srcfile)
217 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
218 
219 	if (h->transaction)
220 		hists__new_col_len(hists, HISTC_TRANSACTION,
221 				   hist_entry__transaction_len());
222 
223 	if (h->trace_output)
224 		hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
225 }
226 
227 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
228 {
229 	struct rb_node *next = rb_first_cached(&hists->entries);
230 	struct hist_entry *n;
231 	int row = 0;
232 
233 	hists__reset_col_len(hists);
234 
235 	while (next && row++ < max_rows) {
236 		n = rb_entry(next, struct hist_entry, rb_node);
237 		if (!n->filtered)
238 			hists__calc_col_len(hists, n);
239 		next = rb_next(&n->rb_node);
240 	}
241 }
242 
243 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
244 					unsigned int cpumode, u64 period)
245 {
246 	switch (cpumode) {
247 	case PERF_RECORD_MISC_KERNEL:
248 		he_stat->period_sys += period;
249 		break;
250 	case PERF_RECORD_MISC_USER:
251 		he_stat->period_us += period;
252 		break;
253 	case PERF_RECORD_MISC_GUEST_KERNEL:
254 		he_stat->period_guest_sys += period;
255 		break;
256 	case PERF_RECORD_MISC_GUEST_USER:
257 		he_stat->period_guest_us += period;
258 		break;
259 	default:
260 		break;
261 	}
262 }
263 
264 static long hist_time(unsigned long htime)
265 {
266 	unsigned long time_quantum = symbol_conf.time_quantum;
267 	if (time_quantum)
268 		return (htime / time_quantum) * time_quantum;
269 	return htime;
270 }
271 
272 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
273 				u64 weight)
274 {
275 
276 	he_stat->period		+= period;
277 	he_stat->weight		+= weight;
278 	he_stat->nr_events	+= 1;
279 }
280 
281 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
282 {
283 	dest->period		+= src->period;
284 	dest->period_sys	+= src->period_sys;
285 	dest->period_us		+= src->period_us;
286 	dest->period_guest_sys	+= src->period_guest_sys;
287 	dest->period_guest_us	+= src->period_guest_us;
288 	dest->nr_events		+= src->nr_events;
289 	dest->weight		+= src->weight;
290 }
291 
292 static void he_stat__decay(struct he_stat *he_stat)
293 {
294 	he_stat->period = (he_stat->period * 7) / 8;
295 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
296 	/* XXX need decay for weight too? */
297 }
298 
299 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
300 
301 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
302 {
303 	u64 prev_period = he->stat.period;
304 	u64 diff;
305 
306 	if (prev_period == 0)
307 		return true;
308 
309 	he_stat__decay(&he->stat);
310 	if (symbol_conf.cumulate_callchain)
311 		he_stat__decay(he->stat_acc);
312 	decay_callchain(he->callchain);
313 
314 	diff = prev_period - he->stat.period;
315 
316 	if (!he->depth) {
317 		hists->stats.total_period -= diff;
318 		if (!he->filtered)
319 			hists->stats.total_non_filtered_period -= diff;
320 	}
321 
322 	if (!he->leaf) {
323 		struct hist_entry *child;
324 		struct rb_node *node = rb_first_cached(&he->hroot_out);
325 		while (node) {
326 			child = rb_entry(node, struct hist_entry, rb_node);
327 			node = rb_next(node);
328 
329 			if (hists__decay_entry(hists, child))
330 				hists__delete_entry(hists, child);
331 		}
332 	}
333 
334 	return he->stat.period == 0;
335 }
336 
337 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
338 {
339 	struct rb_root_cached *root_in;
340 	struct rb_root_cached *root_out;
341 
342 	if (he->parent_he) {
343 		root_in  = &he->parent_he->hroot_in;
344 		root_out = &he->parent_he->hroot_out;
345 	} else {
346 		if (hists__has(hists, need_collapse))
347 			root_in = &hists->entries_collapsed;
348 		else
349 			root_in = hists->entries_in;
350 		root_out = &hists->entries;
351 	}
352 
353 	rb_erase_cached(&he->rb_node_in, root_in);
354 	rb_erase_cached(&he->rb_node, root_out);
355 
356 	--hists->nr_entries;
357 	if (!he->filtered)
358 		--hists->nr_non_filtered_entries;
359 
360 	hist_entry__delete(he);
361 }
362 
363 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
364 {
365 	struct rb_node *next = rb_first_cached(&hists->entries);
366 	struct hist_entry *n;
367 
368 	while (next) {
369 		n = rb_entry(next, struct hist_entry, rb_node);
370 		next = rb_next(&n->rb_node);
371 		if (((zap_user && n->level == '.') ||
372 		     (zap_kernel && n->level != '.') ||
373 		     hists__decay_entry(hists, n))) {
374 			hists__delete_entry(hists, n);
375 		}
376 	}
377 }
378 
379 void hists__delete_entries(struct hists *hists)
380 {
381 	struct rb_node *next = rb_first_cached(&hists->entries);
382 	struct hist_entry *n;
383 
384 	while (next) {
385 		n = rb_entry(next, struct hist_entry, rb_node);
386 		next = rb_next(&n->rb_node);
387 
388 		hists__delete_entry(hists, n);
389 	}
390 }
391 
392 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
393 {
394 	struct rb_node *next = rb_first_cached(&hists->entries);
395 	struct hist_entry *n;
396 	int i = 0;
397 
398 	while (next) {
399 		n = rb_entry(next, struct hist_entry, rb_node);
400 		if (i == idx)
401 			return n;
402 
403 		next = rb_next(&n->rb_node);
404 		i++;
405 	}
406 
407 	return NULL;
408 }
409 
410 /*
411  * histogram, sorted on item, collects periods
412  */
413 
414 static int hist_entry__init(struct hist_entry *he,
415 			    struct hist_entry *template,
416 			    bool sample_self,
417 			    size_t callchain_size)
418 {
419 	*he = *template;
420 	he->callchain_size = callchain_size;
421 
422 	if (symbol_conf.cumulate_callchain) {
423 		he->stat_acc = malloc(sizeof(he->stat));
424 		if (he->stat_acc == NULL)
425 			return -ENOMEM;
426 		memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
427 		if (!sample_self)
428 			memset(&he->stat, 0, sizeof(he->stat));
429 	}
430 
431 	map__get(he->ms.map);
432 
433 	if (he->branch_info) {
434 		/*
435 		 * This branch info is (a part of) allocated from
436 		 * sample__resolve_bstack() and will be freed after
437 		 * adding new entries.  So we need to save a copy.
438 		 */
439 		he->branch_info = malloc(sizeof(*he->branch_info));
440 		if (he->branch_info == NULL)
441 			goto err;
442 
443 		memcpy(he->branch_info, template->branch_info,
444 		       sizeof(*he->branch_info));
445 
446 		map__get(he->branch_info->from.map);
447 		map__get(he->branch_info->to.map);
448 	}
449 
450 	if (he->mem_info) {
451 		map__get(he->mem_info->iaddr.map);
452 		map__get(he->mem_info->daddr.map);
453 	}
454 
455 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
456 		callchain_init(he->callchain);
457 
458 	if (he->raw_data) {
459 		he->raw_data = memdup(he->raw_data, he->raw_size);
460 		if (he->raw_data == NULL)
461 			goto err_infos;
462 	}
463 
464 	if (he->srcline) {
465 		he->srcline = strdup(he->srcline);
466 		if (he->srcline == NULL)
467 			goto err_rawdata;
468 	}
469 
470 	if (symbol_conf.res_sample) {
471 		he->res_samples = calloc(sizeof(struct res_sample),
472 					symbol_conf.res_sample);
473 		if (!he->res_samples)
474 			goto err_srcline;
475 	}
476 
477 	INIT_LIST_HEAD(&he->pairs.node);
478 	thread__get(he->thread);
479 	he->hroot_in  = RB_ROOT_CACHED;
480 	he->hroot_out = RB_ROOT_CACHED;
481 
482 	if (!symbol_conf.report_hierarchy)
483 		he->leaf = true;
484 
485 	return 0;
486 
487 err_srcline:
488 	zfree(&he->srcline);
489 
490 err_rawdata:
491 	zfree(&he->raw_data);
492 
493 err_infos:
494 	if (he->branch_info) {
495 		map__put(he->branch_info->from.map);
496 		map__put(he->branch_info->to.map);
497 		zfree(&he->branch_info);
498 	}
499 	if (he->mem_info) {
500 		map__put(he->mem_info->iaddr.map);
501 		map__put(he->mem_info->daddr.map);
502 	}
503 err:
504 	map__zput(he->ms.map);
505 	zfree(&he->stat_acc);
506 	return -ENOMEM;
507 }
508 
509 static void *hist_entry__zalloc(size_t size)
510 {
511 	return zalloc(size + sizeof(struct hist_entry));
512 }
513 
514 static void hist_entry__free(void *ptr)
515 {
516 	free(ptr);
517 }
518 
519 static struct hist_entry_ops default_ops = {
520 	.new	= hist_entry__zalloc,
521 	.free	= hist_entry__free,
522 };
523 
524 static struct hist_entry *hist_entry__new(struct hist_entry *template,
525 					  bool sample_self)
526 {
527 	struct hist_entry_ops *ops = template->ops;
528 	size_t callchain_size = 0;
529 	struct hist_entry *he;
530 	int err = 0;
531 
532 	if (!ops)
533 		ops = template->ops = &default_ops;
534 
535 	if (symbol_conf.use_callchain)
536 		callchain_size = sizeof(struct callchain_root);
537 
538 	he = ops->new(callchain_size);
539 	if (he) {
540 		err = hist_entry__init(he, template, sample_self, callchain_size);
541 		if (err) {
542 			ops->free(he);
543 			he = NULL;
544 		}
545 	}
546 
547 	return he;
548 }
549 
550 static u8 symbol__parent_filter(const struct symbol *parent)
551 {
552 	if (symbol_conf.exclude_other && parent == NULL)
553 		return 1 << HIST_FILTER__PARENT;
554 	return 0;
555 }
556 
557 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
558 {
559 	if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
560 		return;
561 
562 	he->hists->callchain_period += period;
563 	if (!he->filtered)
564 		he->hists->callchain_non_filtered_period += period;
565 }
566 
567 static struct hist_entry *hists__findnew_entry(struct hists *hists,
568 					       struct hist_entry *entry,
569 					       struct addr_location *al,
570 					       bool sample_self)
571 {
572 	struct rb_node **p;
573 	struct rb_node *parent = NULL;
574 	struct hist_entry *he;
575 	int64_t cmp;
576 	u64 period = entry->stat.period;
577 	u64 weight = entry->stat.weight;
578 	bool leftmost = true;
579 
580 	p = &hists->entries_in->rb_root.rb_node;
581 
582 	while (*p != NULL) {
583 		parent = *p;
584 		he = rb_entry(parent, struct hist_entry, rb_node_in);
585 
586 		/*
587 		 * Make sure that it receives arguments in a same order as
588 		 * hist_entry__collapse() so that we can use an appropriate
589 		 * function when searching an entry regardless which sort
590 		 * keys were used.
591 		 */
592 		cmp = hist_entry__cmp(he, entry);
593 
594 		if (!cmp) {
595 			if (sample_self) {
596 				he_stat__add_period(&he->stat, period, weight);
597 				hist_entry__add_callchain_period(he, period);
598 			}
599 			if (symbol_conf.cumulate_callchain)
600 				he_stat__add_period(he->stat_acc, period, weight);
601 
602 			/*
603 			 * This mem info was allocated from sample__resolve_mem
604 			 * and will not be used anymore.
605 			 */
606 			mem_info__zput(entry->mem_info);
607 
608 			block_info__zput(entry->block_info);
609 
610 			/* If the map of an existing hist_entry has
611 			 * become out-of-date due to an exec() or
612 			 * similar, update it.  Otherwise we will
613 			 * mis-adjust symbol addresses when computing
614 			 * the history counter to increment.
615 			 */
616 			if (he->ms.map != entry->ms.map) {
617 				map__put(he->ms.map);
618 				he->ms.map = map__get(entry->ms.map);
619 			}
620 			goto out;
621 		}
622 
623 		if (cmp < 0)
624 			p = &(*p)->rb_left;
625 		else {
626 			p = &(*p)->rb_right;
627 			leftmost = false;
628 		}
629 	}
630 
631 	he = hist_entry__new(entry, sample_self);
632 	if (!he)
633 		return NULL;
634 
635 	if (sample_self)
636 		hist_entry__add_callchain_period(he, period);
637 	hists->nr_entries++;
638 
639 	rb_link_node(&he->rb_node_in, parent, p);
640 	rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
641 out:
642 	if (sample_self)
643 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
644 	if (symbol_conf.cumulate_callchain)
645 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
646 	return he;
647 }
648 
649 static unsigned random_max(unsigned high)
650 {
651 	unsigned thresh = -high % high;
652 	for (;;) {
653 		unsigned r = random();
654 		if (r >= thresh)
655 			return r % high;
656 	}
657 }
658 
659 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
660 {
661 	struct res_sample *r;
662 	int j;
663 
664 	if (he->num_res < symbol_conf.res_sample) {
665 		j = he->num_res++;
666 	} else {
667 		j = random_max(symbol_conf.res_sample);
668 	}
669 	r = &he->res_samples[j];
670 	r->time = sample->time;
671 	r->cpu = sample->cpu;
672 	r->tid = sample->tid;
673 }
674 
675 static struct hist_entry*
676 __hists__add_entry(struct hists *hists,
677 		   struct addr_location *al,
678 		   struct symbol *sym_parent,
679 		   struct branch_info *bi,
680 		   struct mem_info *mi,
681 		   struct block_info *block_info,
682 		   struct perf_sample *sample,
683 		   bool sample_self,
684 		   struct hist_entry_ops *ops)
685 {
686 	struct namespaces *ns = thread__namespaces(al->thread);
687 	struct hist_entry entry = {
688 		.thread	= al->thread,
689 		.comm = thread__comm(al->thread),
690 		.cgroup_id = {
691 			.dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
692 			.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
693 		},
694 		.ms = {
695 			.map	= al->map,
696 			.sym	= al->sym,
697 		},
698 		.srcline = (char *) al->srcline,
699 		.socket	 = al->socket,
700 		.cpu	 = al->cpu,
701 		.cpumode = al->cpumode,
702 		.ip	 = al->addr,
703 		.level	 = al->level,
704 		.stat = {
705 			.nr_events = 1,
706 			.period	= sample->period,
707 			.weight = sample->weight,
708 		},
709 		.parent = sym_parent,
710 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
711 		.hists	= hists,
712 		.branch_info = bi,
713 		.mem_info = mi,
714 		.block_info = block_info,
715 		.transaction = sample->transaction,
716 		.raw_data = sample->raw_data,
717 		.raw_size = sample->raw_size,
718 		.ops = ops,
719 		.time = hist_time(sample->time),
720 	}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
721 
722 	if (!hists->has_callchains && he && he->callchain_size != 0)
723 		hists->has_callchains = true;
724 	if (he && symbol_conf.res_sample)
725 		hists__res_sample(he, sample);
726 	return he;
727 }
728 
729 struct hist_entry *hists__add_entry(struct hists *hists,
730 				    struct addr_location *al,
731 				    struct symbol *sym_parent,
732 				    struct branch_info *bi,
733 				    struct mem_info *mi,
734 				    struct perf_sample *sample,
735 				    bool sample_self)
736 {
737 	return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
738 				  sample, sample_self, NULL);
739 }
740 
741 struct hist_entry *hists__add_entry_ops(struct hists *hists,
742 					struct hist_entry_ops *ops,
743 					struct addr_location *al,
744 					struct symbol *sym_parent,
745 					struct branch_info *bi,
746 					struct mem_info *mi,
747 					struct perf_sample *sample,
748 					bool sample_self)
749 {
750 	return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
751 				  sample, sample_self, ops);
752 }
753 
754 struct hist_entry *hists__add_entry_block(struct hists *hists,
755 					  struct addr_location *al,
756 					  struct block_info *block_info)
757 {
758 	struct hist_entry entry = {
759 		.block_info = block_info,
760 		.hists = hists,
761 		.ms = {
762 			.map = al->map,
763 			.sym = al->sym,
764 		},
765 	}, *he = hists__findnew_entry(hists, &entry, al, false);
766 
767 	return he;
768 }
769 
770 static int
771 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
772 		    struct addr_location *al __maybe_unused)
773 {
774 	return 0;
775 }
776 
777 static int
778 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
779 			struct addr_location *al __maybe_unused)
780 {
781 	return 0;
782 }
783 
784 static int
785 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
786 {
787 	struct perf_sample *sample = iter->sample;
788 	struct mem_info *mi;
789 
790 	mi = sample__resolve_mem(sample, al);
791 	if (mi == NULL)
792 		return -ENOMEM;
793 
794 	iter->priv = mi;
795 	return 0;
796 }
797 
798 static int
799 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
800 {
801 	u64 cost;
802 	struct mem_info *mi = iter->priv;
803 	struct hists *hists = evsel__hists(iter->evsel);
804 	struct perf_sample *sample = iter->sample;
805 	struct hist_entry *he;
806 
807 	if (mi == NULL)
808 		return -EINVAL;
809 
810 	cost = sample->weight;
811 	if (!cost)
812 		cost = 1;
813 
814 	/*
815 	 * must pass period=weight in order to get the correct
816 	 * sorting from hists__collapse_resort() which is solely
817 	 * based on periods. We want sorting be done on nr_events * weight
818 	 * and this is indirectly achieved by passing period=weight here
819 	 * and the he_stat__add_period() function.
820 	 */
821 	sample->period = cost;
822 
823 	he = hists__add_entry(hists, al, iter->parent, NULL, mi,
824 			      sample, true);
825 	if (!he)
826 		return -ENOMEM;
827 
828 	iter->he = he;
829 	return 0;
830 }
831 
832 static int
833 iter_finish_mem_entry(struct hist_entry_iter *iter,
834 		      struct addr_location *al __maybe_unused)
835 {
836 	struct evsel *evsel = iter->evsel;
837 	struct hists *hists = evsel__hists(evsel);
838 	struct hist_entry *he = iter->he;
839 	int err = -EINVAL;
840 
841 	if (he == NULL)
842 		goto out;
843 
844 	hists__inc_nr_samples(hists, he->filtered);
845 
846 	err = hist_entry__append_callchain(he, iter->sample);
847 
848 out:
849 	/*
850 	 * We don't need to free iter->priv (mem_info) here since the mem info
851 	 * was either already freed in hists__findnew_entry() or passed to a
852 	 * new hist entry by hist_entry__new().
853 	 */
854 	iter->priv = NULL;
855 
856 	iter->he = NULL;
857 	return err;
858 }
859 
860 static int
861 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
862 {
863 	struct branch_info *bi;
864 	struct perf_sample *sample = iter->sample;
865 
866 	bi = sample__resolve_bstack(sample, al);
867 	if (!bi)
868 		return -ENOMEM;
869 
870 	iter->curr = 0;
871 	iter->total = sample->branch_stack->nr;
872 
873 	iter->priv = bi;
874 	return 0;
875 }
876 
877 static int
878 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
879 			     struct addr_location *al __maybe_unused)
880 {
881 	return 0;
882 }
883 
884 static int
885 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
886 {
887 	struct branch_info *bi = iter->priv;
888 	int i = iter->curr;
889 
890 	if (bi == NULL)
891 		return 0;
892 
893 	if (iter->curr >= iter->total)
894 		return 0;
895 
896 	al->map = bi[i].to.map;
897 	al->sym = bi[i].to.sym;
898 	al->addr = bi[i].to.addr;
899 	return 1;
900 }
901 
902 static int
903 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
904 {
905 	struct branch_info *bi;
906 	struct evsel *evsel = iter->evsel;
907 	struct hists *hists = evsel__hists(evsel);
908 	struct perf_sample *sample = iter->sample;
909 	struct hist_entry *he = NULL;
910 	int i = iter->curr;
911 	int err = 0;
912 
913 	bi = iter->priv;
914 
915 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
916 		goto out;
917 
918 	/*
919 	 * The report shows the percentage of total branches captured
920 	 * and not events sampled. Thus we use a pseudo period of 1.
921 	 */
922 	sample->period = 1;
923 	sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
924 
925 	he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
926 			      sample, true);
927 	if (he == NULL)
928 		return -ENOMEM;
929 
930 	hists__inc_nr_samples(hists, he->filtered);
931 
932 out:
933 	iter->he = he;
934 	iter->curr++;
935 	return err;
936 }
937 
938 static int
939 iter_finish_branch_entry(struct hist_entry_iter *iter,
940 			 struct addr_location *al __maybe_unused)
941 {
942 	zfree(&iter->priv);
943 	iter->he = NULL;
944 
945 	return iter->curr >= iter->total ? 0 : -1;
946 }
947 
948 static int
949 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
950 			  struct addr_location *al __maybe_unused)
951 {
952 	return 0;
953 }
954 
955 static int
956 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
957 {
958 	struct evsel *evsel = iter->evsel;
959 	struct perf_sample *sample = iter->sample;
960 	struct hist_entry *he;
961 
962 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
963 			      sample, true);
964 	if (he == NULL)
965 		return -ENOMEM;
966 
967 	iter->he = he;
968 	return 0;
969 }
970 
971 static int
972 iter_finish_normal_entry(struct hist_entry_iter *iter,
973 			 struct addr_location *al __maybe_unused)
974 {
975 	struct hist_entry *he = iter->he;
976 	struct evsel *evsel = iter->evsel;
977 	struct perf_sample *sample = iter->sample;
978 
979 	if (he == NULL)
980 		return 0;
981 
982 	iter->he = NULL;
983 
984 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
985 
986 	return hist_entry__append_callchain(he, sample);
987 }
988 
989 static int
990 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
991 			      struct addr_location *al __maybe_unused)
992 {
993 	struct hist_entry **he_cache;
994 
995 	callchain_cursor_commit(&callchain_cursor);
996 
997 	/*
998 	 * This is for detecting cycles or recursions so that they're
999 	 * cumulated only one time to prevent entries more than 100%
1000 	 * overhead.
1001 	 */
1002 	he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
1003 	if (he_cache == NULL)
1004 		return -ENOMEM;
1005 
1006 	iter->priv = he_cache;
1007 	iter->curr = 0;
1008 
1009 	return 0;
1010 }
1011 
1012 static int
1013 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1014 				 struct addr_location *al)
1015 {
1016 	struct evsel *evsel = iter->evsel;
1017 	struct hists *hists = evsel__hists(evsel);
1018 	struct perf_sample *sample = iter->sample;
1019 	struct hist_entry **he_cache = iter->priv;
1020 	struct hist_entry *he;
1021 	int err = 0;
1022 
1023 	he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1024 			      sample, true);
1025 	if (he == NULL)
1026 		return -ENOMEM;
1027 
1028 	iter->he = he;
1029 	he_cache[iter->curr++] = he;
1030 
1031 	hist_entry__append_callchain(he, sample);
1032 
1033 	/*
1034 	 * We need to re-initialize the cursor since callchain_append()
1035 	 * advanced the cursor to the end.
1036 	 */
1037 	callchain_cursor_commit(&callchain_cursor);
1038 
1039 	hists__inc_nr_samples(hists, he->filtered);
1040 
1041 	return err;
1042 }
1043 
1044 static int
1045 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1046 			   struct addr_location *al)
1047 {
1048 	struct callchain_cursor_node *node;
1049 
1050 	node = callchain_cursor_current(&callchain_cursor);
1051 	if (node == NULL)
1052 		return 0;
1053 
1054 	return fill_callchain_info(al, node, iter->hide_unresolved);
1055 }
1056 
1057 static int
1058 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1059 			       struct addr_location *al)
1060 {
1061 	struct evsel *evsel = iter->evsel;
1062 	struct perf_sample *sample = iter->sample;
1063 	struct hist_entry **he_cache = iter->priv;
1064 	struct hist_entry *he;
1065 	struct hist_entry he_tmp = {
1066 		.hists = evsel__hists(evsel),
1067 		.cpu = al->cpu,
1068 		.thread = al->thread,
1069 		.comm = thread__comm(al->thread),
1070 		.ip = al->addr,
1071 		.ms = {
1072 			.map = al->map,
1073 			.sym = al->sym,
1074 		},
1075 		.srcline = (char *) al->srcline,
1076 		.parent = iter->parent,
1077 		.raw_data = sample->raw_data,
1078 		.raw_size = sample->raw_size,
1079 	};
1080 	int i;
1081 	struct callchain_cursor cursor;
1082 
1083 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
1084 
1085 	callchain_cursor_advance(&callchain_cursor);
1086 
1087 	/*
1088 	 * Check if there's duplicate entries in the callchain.
1089 	 * It's possible that it has cycles or recursive calls.
1090 	 */
1091 	for (i = 0; i < iter->curr; i++) {
1092 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1093 			/* to avoid calling callback function */
1094 			iter->he = NULL;
1095 			return 0;
1096 		}
1097 	}
1098 
1099 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1100 			      sample, false);
1101 	if (he == NULL)
1102 		return -ENOMEM;
1103 
1104 	iter->he = he;
1105 	he_cache[iter->curr++] = he;
1106 
1107 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1108 		callchain_append(he->callchain, &cursor, sample->period);
1109 	return 0;
1110 }
1111 
1112 static int
1113 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1114 			     struct addr_location *al __maybe_unused)
1115 {
1116 	zfree(&iter->priv);
1117 	iter->he = NULL;
1118 
1119 	return 0;
1120 }
1121 
1122 const struct hist_iter_ops hist_iter_mem = {
1123 	.prepare_entry 		= iter_prepare_mem_entry,
1124 	.add_single_entry 	= iter_add_single_mem_entry,
1125 	.next_entry 		= iter_next_nop_entry,
1126 	.add_next_entry 	= iter_add_next_nop_entry,
1127 	.finish_entry 		= iter_finish_mem_entry,
1128 };
1129 
1130 const struct hist_iter_ops hist_iter_branch = {
1131 	.prepare_entry 		= iter_prepare_branch_entry,
1132 	.add_single_entry 	= iter_add_single_branch_entry,
1133 	.next_entry 		= iter_next_branch_entry,
1134 	.add_next_entry 	= iter_add_next_branch_entry,
1135 	.finish_entry 		= iter_finish_branch_entry,
1136 };
1137 
1138 const struct hist_iter_ops hist_iter_normal = {
1139 	.prepare_entry 		= iter_prepare_normal_entry,
1140 	.add_single_entry 	= iter_add_single_normal_entry,
1141 	.next_entry 		= iter_next_nop_entry,
1142 	.add_next_entry 	= iter_add_next_nop_entry,
1143 	.finish_entry 		= iter_finish_normal_entry,
1144 };
1145 
1146 const struct hist_iter_ops hist_iter_cumulative = {
1147 	.prepare_entry 		= iter_prepare_cumulative_entry,
1148 	.add_single_entry 	= iter_add_single_cumulative_entry,
1149 	.next_entry 		= iter_next_cumulative_entry,
1150 	.add_next_entry 	= iter_add_next_cumulative_entry,
1151 	.finish_entry 		= iter_finish_cumulative_entry,
1152 };
1153 
1154 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1155 			 int max_stack_depth, void *arg)
1156 {
1157 	int err, err2;
1158 	struct map *alm = NULL;
1159 
1160 	if (al)
1161 		alm = map__get(al->map);
1162 
1163 	err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1164 					iter->evsel, al, max_stack_depth);
1165 	if (err) {
1166 		map__put(alm);
1167 		return err;
1168 	}
1169 
1170 	err = iter->ops->prepare_entry(iter, al);
1171 	if (err)
1172 		goto out;
1173 
1174 	err = iter->ops->add_single_entry(iter, al);
1175 	if (err)
1176 		goto out;
1177 
1178 	if (iter->he && iter->add_entry_cb) {
1179 		err = iter->add_entry_cb(iter, al, true, arg);
1180 		if (err)
1181 			goto out;
1182 	}
1183 
1184 	while (iter->ops->next_entry(iter, al)) {
1185 		err = iter->ops->add_next_entry(iter, al);
1186 		if (err)
1187 			break;
1188 
1189 		if (iter->he && iter->add_entry_cb) {
1190 			err = iter->add_entry_cb(iter, al, false, arg);
1191 			if (err)
1192 				goto out;
1193 		}
1194 	}
1195 
1196 out:
1197 	err2 = iter->ops->finish_entry(iter, al);
1198 	if (!err)
1199 		err = err2;
1200 
1201 	map__put(alm);
1202 
1203 	return err;
1204 }
1205 
1206 int64_t
1207 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1208 {
1209 	struct hists *hists = left->hists;
1210 	struct perf_hpp_fmt *fmt;
1211 	int64_t cmp = 0;
1212 
1213 	hists__for_each_sort_list(hists, fmt) {
1214 		if (perf_hpp__is_dynamic_entry(fmt) &&
1215 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1216 			continue;
1217 
1218 		cmp = fmt->cmp(fmt, left, right);
1219 		if (cmp)
1220 			break;
1221 	}
1222 
1223 	return cmp;
1224 }
1225 
1226 int64_t
1227 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1228 {
1229 	struct hists *hists = left->hists;
1230 	struct perf_hpp_fmt *fmt;
1231 	int64_t cmp = 0;
1232 
1233 	hists__for_each_sort_list(hists, fmt) {
1234 		if (perf_hpp__is_dynamic_entry(fmt) &&
1235 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1236 			continue;
1237 
1238 		cmp = fmt->collapse(fmt, left, right);
1239 		if (cmp)
1240 			break;
1241 	}
1242 
1243 	return cmp;
1244 }
1245 
1246 void hist_entry__delete(struct hist_entry *he)
1247 {
1248 	struct hist_entry_ops *ops = he->ops;
1249 
1250 	thread__zput(he->thread);
1251 	map__zput(he->ms.map);
1252 
1253 	if (he->branch_info) {
1254 		map__zput(he->branch_info->from.map);
1255 		map__zput(he->branch_info->to.map);
1256 		free_srcline(he->branch_info->srcline_from);
1257 		free_srcline(he->branch_info->srcline_to);
1258 		zfree(&he->branch_info);
1259 	}
1260 
1261 	if (he->mem_info) {
1262 		map__zput(he->mem_info->iaddr.map);
1263 		map__zput(he->mem_info->daddr.map);
1264 		mem_info__zput(he->mem_info);
1265 	}
1266 
1267 	if (he->block_info)
1268 		block_info__zput(he->block_info);
1269 
1270 	zfree(&he->res_samples);
1271 	zfree(&he->stat_acc);
1272 	free_srcline(he->srcline);
1273 	if (he->srcfile && he->srcfile[0])
1274 		zfree(&he->srcfile);
1275 	free_callchain(he->callchain);
1276 	zfree(&he->trace_output);
1277 	zfree(&he->raw_data);
1278 	ops->free(he);
1279 }
1280 
1281 /*
1282  * If this is not the last column, then we need to pad it according to the
1283  * pre-calculated max length for this column, otherwise don't bother adding
1284  * spaces because that would break viewing this with, for instance, 'less',
1285  * that would show tons of trailing spaces when a long C++ demangled method
1286  * names is sampled.
1287 */
1288 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1289 				   struct perf_hpp_fmt *fmt, int printed)
1290 {
1291 	if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1292 		const int width = fmt->width(fmt, hpp, he->hists);
1293 		if (printed < width) {
1294 			advance_hpp(hpp, printed);
1295 			printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1296 		}
1297 	}
1298 
1299 	return printed;
1300 }
1301 
1302 /*
1303  * collapse the histogram
1304  */
1305 
1306 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1307 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1308 				       enum hist_filter type);
1309 
1310 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1311 
1312 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1313 {
1314 	return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1315 }
1316 
1317 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1318 						enum hist_filter type,
1319 						fmt_chk_fn check)
1320 {
1321 	struct perf_hpp_fmt *fmt;
1322 	bool type_match = false;
1323 	struct hist_entry *parent = he->parent_he;
1324 
1325 	switch (type) {
1326 	case HIST_FILTER__THREAD:
1327 		if (symbol_conf.comm_list == NULL &&
1328 		    symbol_conf.pid_list == NULL &&
1329 		    symbol_conf.tid_list == NULL)
1330 			return;
1331 		break;
1332 	case HIST_FILTER__DSO:
1333 		if (symbol_conf.dso_list == NULL)
1334 			return;
1335 		break;
1336 	case HIST_FILTER__SYMBOL:
1337 		if (symbol_conf.sym_list == NULL)
1338 			return;
1339 		break;
1340 	case HIST_FILTER__PARENT:
1341 	case HIST_FILTER__GUEST:
1342 	case HIST_FILTER__HOST:
1343 	case HIST_FILTER__SOCKET:
1344 	case HIST_FILTER__C2C:
1345 	default:
1346 		return;
1347 	}
1348 
1349 	/* if it's filtered by own fmt, it has to have filter bits */
1350 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1351 		if (check(fmt)) {
1352 			type_match = true;
1353 			break;
1354 		}
1355 	}
1356 
1357 	if (type_match) {
1358 		/*
1359 		 * If the filter is for current level entry, propagate
1360 		 * filter marker to parents.  The marker bit was
1361 		 * already set by default so it only needs to clear
1362 		 * non-filtered entries.
1363 		 */
1364 		if (!(he->filtered & (1 << type))) {
1365 			while (parent) {
1366 				parent->filtered &= ~(1 << type);
1367 				parent = parent->parent_he;
1368 			}
1369 		}
1370 	} else {
1371 		/*
1372 		 * If current entry doesn't have matching formats, set
1373 		 * filter marker for upper level entries.  it will be
1374 		 * cleared if its lower level entries is not filtered.
1375 		 *
1376 		 * For lower-level entries, it inherits parent's
1377 		 * filter bit so that lower level entries of a
1378 		 * non-filtered entry won't set the filter marker.
1379 		 */
1380 		if (parent == NULL)
1381 			he->filtered |= (1 << type);
1382 		else
1383 			he->filtered |= (parent->filtered & (1 << type));
1384 	}
1385 }
1386 
1387 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1388 {
1389 	hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1390 					    check_thread_entry);
1391 
1392 	hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1393 					    perf_hpp__is_dso_entry);
1394 
1395 	hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1396 					    perf_hpp__is_sym_entry);
1397 
1398 	hists__apply_filters(he->hists, he);
1399 }
1400 
1401 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1402 						 struct rb_root_cached *root,
1403 						 struct hist_entry *he,
1404 						 struct hist_entry *parent_he,
1405 						 struct perf_hpp_list *hpp_list)
1406 {
1407 	struct rb_node **p = &root->rb_root.rb_node;
1408 	struct rb_node *parent = NULL;
1409 	struct hist_entry *iter, *new;
1410 	struct perf_hpp_fmt *fmt;
1411 	int64_t cmp;
1412 	bool leftmost = true;
1413 
1414 	while (*p != NULL) {
1415 		parent = *p;
1416 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1417 
1418 		cmp = 0;
1419 		perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1420 			cmp = fmt->collapse(fmt, iter, he);
1421 			if (cmp)
1422 				break;
1423 		}
1424 
1425 		if (!cmp) {
1426 			he_stat__add_stat(&iter->stat, &he->stat);
1427 			return iter;
1428 		}
1429 
1430 		if (cmp < 0)
1431 			p = &parent->rb_left;
1432 		else {
1433 			p = &parent->rb_right;
1434 			leftmost = false;
1435 		}
1436 	}
1437 
1438 	new = hist_entry__new(he, true);
1439 	if (new == NULL)
1440 		return NULL;
1441 
1442 	hists->nr_entries++;
1443 
1444 	/* save related format list for output */
1445 	new->hpp_list = hpp_list;
1446 	new->parent_he = parent_he;
1447 
1448 	hist_entry__apply_hierarchy_filters(new);
1449 
1450 	/* some fields are now passed to 'new' */
1451 	perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1452 		if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1453 			he->trace_output = NULL;
1454 		else
1455 			new->trace_output = NULL;
1456 
1457 		if (perf_hpp__is_srcline_entry(fmt))
1458 			he->srcline = NULL;
1459 		else
1460 			new->srcline = NULL;
1461 
1462 		if (perf_hpp__is_srcfile_entry(fmt))
1463 			he->srcfile = NULL;
1464 		else
1465 			new->srcfile = NULL;
1466 	}
1467 
1468 	rb_link_node(&new->rb_node_in, parent, p);
1469 	rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1470 	return new;
1471 }
1472 
1473 static int hists__hierarchy_insert_entry(struct hists *hists,
1474 					 struct rb_root_cached *root,
1475 					 struct hist_entry *he)
1476 {
1477 	struct perf_hpp_list_node *node;
1478 	struct hist_entry *new_he = NULL;
1479 	struct hist_entry *parent = NULL;
1480 	int depth = 0;
1481 	int ret = 0;
1482 
1483 	list_for_each_entry(node, &hists->hpp_formats, list) {
1484 		/* skip period (overhead) and elided columns */
1485 		if (node->level == 0 || node->skip)
1486 			continue;
1487 
1488 		/* insert copy of 'he' for each fmt into the hierarchy */
1489 		new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1490 		if (new_he == NULL) {
1491 			ret = -1;
1492 			break;
1493 		}
1494 
1495 		root = &new_he->hroot_in;
1496 		new_he->depth = depth++;
1497 		parent = new_he;
1498 	}
1499 
1500 	if (new_he) {
1501 		new_he->leaf = true;
1502 
1503 		if (hist_entry__has_callchains(new_he) &&
1504 		    symbol_conf.use_callchain) {
1505 			callchain_cursor_reset(&callchain_cursor);
1506 			if (callchain_merge(&callchain_cursor,
1507 					    new_he->callchain,
1508 					    he->callchain) < 0)
1509 				ret = -1;
1510 		}
1511 	}
1512 
1513 	/* 'he' is no longer used */
1514 	hist_entry__delete(he);
1515 
1516 	/* return 0 (or -1) since it already applied filters */
1517 	return ret;
1518 }
1519 
1520 static int hists__collapse_insert_entry(struct hists *hists,
1521 					struct rb_root_cached *root,
1522 					struct hist_entry *he)
1523 {
1524 	struct rb_node **p = &root->rb_root.rb_node;
1525 	struct rb_node *parent = NULL;
1526 	struct hist_entry *iter;
1527 	int64_t cmp;
1528 	bool leftmost = true;
1529 
1530 	if (symbol_conf.report_hierarchy)
1531 		return hists__hierarchy_insert_entry(hists, root, he);
1532 
1533 	while (*p != NULL) {
1534 		parent = *p;
1535 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1536 
1537 		cmp = hist_entry__collapse(iter, he);
1538 
1539 		if (!cmp) {
1540 			int ret = 0;
1541 
1542 			he_stat__add_stat(&iter->stat, &he->stat);
1543 			if (symbol_conf.cumulate_callchain)
1544 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
1545 
1546 			if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1547 				callchain_cursor_reset(&callchain_cursor);
1548 				if (callchain_merge(&callchain_cursor,
1549 						    iter->callchain,
1550 						    he->callchain) < 0)
1551 					ret = -1;
1552 			}
1553 			hist_entry__delete(he);
1554 			return ret;
1555 		}
1556 
1557 		if (cmp < 0)
1558 			p = &(*p)->rb_left;
1559 		else {
1560 			p = &(*p)->rb_right;
1561 			leftmost = false;
1562 		}
1563 	}
1564 	hists->nr_entries++;
1565 
1566 	rb_link_node(&he->rb_node_in, parent, p);
1567 	rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1568 	return 1;
1569 }
1570 
1571 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1572 {
1573 	struct rb_root_cached *root;
1574 
1575 	pthread_mutex_lock(&hists->lock);
1576 
1577 	root = hists->entries_in;
1578 	if (++hists->entries_in > &hists->entries_in_array[1])
1579 		hists->entries_in = &hists->entries_in_array[0];
1580 
1581 	pthread_mutex_unlock(&hists->lock);
1582 
1583 	return root;
1584 }
1585 
1586 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1587 {
1588 	hists__filter_entry_by_dso(hists, he);
1589 	hists__filter_entry_by_thread(hists, he);
1590 	hists__filter_entry_by_symbol(hists, he);
1591 	hists__filter_entry_by_socket(hists, he);
1592 }
1593 
1594 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1595 {
1596 	struct rb_root_cached *root;
1597 	struct rb_node *next;
1598 	struct hist_entry *n;
1599 	int ret;
1600 
1601 	if (!hists__has(hists, need_collapse))
1602 		return 0;
1603 
1604 	hists->nr_entries = 0;
1605 
1606 	root = hists__get_rotate_entries_in(hists);
1607 
1608 	next = rb_first_cached(root);
1609 
1610 	while (next) {
1611 		if (session_done())
1612 			break;
1613 		n = rb_entry(next, struct hist_entry, rb_node_in);
1614 		next = rb_next(&n->rb_node_in);
1615 
1616 		rb_erase_cached(&n->rb_node_in, root);
1617 		ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1618 		if (ret < 0)
1619 			return -1;
1620 
1621 		if (ret) {
1622 			/*
1623 			 * If it wasn't combined with one of the entries already
1624 			 * collapsed, we need to apply the filters that may have
1625 			 * been set by, say, the hist_browser.
1626 			 */
1627 			hists__apply_filters(hists, n);
1628 		}
1629 		if (prog)
1630 			ui_progress__update(prog, 1);
1631 	}
1632 	return 0;
1633 }
1634 
1635 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1636 {
1637 	struct hists *hists = a->hists;
1638 	struct perf_hpp_fmt *fmt;
1639 	int64_t cmp = 0;
1640 
1641 	hists__for_each_sort_list(hists, fmt) {
1642 		if (perf_hpp__should_skip(fmt, a->hists))
1643 			continue;
1644 
1645 		cmp = fmt->sort(fmt, a, b);
1646 		if (cmp)
1647 			break;
1648 	}
1649 
1650 	return cmp;
1651 }
1652 
1653 static void hists__reset_filter_stats(struct hists *hists)
1654 {
1655 	hists->nr_non_filtered_entries = 0;
1656 	hists->stats.total_non_filtered_period = 0;
1657 }
1658 
1659 void hists__reset_stats(struct hists *hists)
1660 {
1661 	hists->nr_entries = 0;
1662 	hists->stats.total_period = 0;
1663 
1664 	hists__reset_filter_stats(hists);
1665 }
1666 
1667 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1668 {
1669 	hists->nr_non_filtered_entries++;
1670 	hists->stats.total_non_filtered_period += h->stat.period;
1671 }
1672 
1673 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1674 {
1675 	if (!h->filtered)
1676 		hists__inc_filter_stats(hists, h);
1677 
1678 	hists->nr_entries++;
1679 	hists->stats.total_period += h->stat.period;
1680 }
1681 
1682 static void hierarchy_recalc_total_periods(struct hists *hists)
1683 {
1684 	struct rb_node *node;
1685 	struct hist_entry *he;
1686 
1687 	node = rb_first_cached(&hists->entries);
1688 
1689 	hists->stats.total_period = 0;
1690 	hists->stats.total_non_filtered_period = 0;
1691 
1692 	/*
1693 	 * recalculate total period using top-level entries only
1694 	 * since lower level entries only see non-filtered entries
1695 	 * but upper level entries have sum of both entries.
1696 	 */
1697 	while (node) {
1698 		he = rb_entry(node, struct hist_entry, rb_node);
1699 		node = rb_next(node);
1700 
1701 		hists->stats.total_period += he->stat.period;
1702 		if (!he->filtered)
1703 			hists->stats.total_non_filtered_period += he->stat.period;
1704 	}
1705 }
1706 
1707 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1708 					  struct hist_entry *he)
1709 {
1710 	struct rb_node **p = &root->rb_root.rb_node;
1711 	struct rb_node *parent = NULL;
1712 	struct hist_entry *iter;
1713 	struct perf_hpp_fmt *fmt;
1714 	bool leftmost = true;
1715 
1716 	while (*p != NULL) {
1717 		parent = *p;
1718 		iter = rb_entry(parent, struct hist_entry, rb_node);
1719 
1720 		if (hist_entry__sort(he, iter) > 0)
1721 			p = &parent->rb_left;
1722 		else {
1723 			p = &parent->rb_right;
1724 			leftmost = false;
1725 		}
1726 	}
1727 
1728 	rb_link_node(&he->rb_node, parent, p);
1729 	rb_insert_color_cached(&he->rb_node, root, leftmost);
1730 
1731 	/* update column width of dynamic entry */
1732 	perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1733 		if (perf_hpp__is_dynamic_entry(fmt))
1734 			fmt->sort(fmt, he, NULL);
1735 	}
1736 }
1737 
1738 static void hists__hierarchy_output_resort(struct hists *hists,
1739 					   struct ui_progress *prog,
1740 					   struct rb_root_cached *root_in,
1741 					   struct rb_root_cached *root_out,
1742 					   u64 min_callchain_hits,
1743 					   bool use_callchain)
1744 {
1745 	struct rb_node *node;
1746 	struct hist_entry *he;
1747 
1748 	*root_out = RB_ROOT_CACHED;
1749 	node = rb_first_cached(root_in);
1750 
1751 	while (node) {
1752 		he = rb_entry(node, struct hist_entry, rb_node_in);
1753 		node = rb_next(node);
1754 
1755 		hierarchy_insert_output_entry(root_out, he);
1756 
1757 		if (prog)
1758 			ui_progress__update(prog, 1);
1759 
1760 		hists->nr_entries++;
1761 		if (!he->filtered) {
1762 			hists->nr_non_filtered_entries++;
1763 			hists__calc_col_len(hists, he);
1764 		}
1765 
1766 		if (!he->leaf) {
1767 			hists__hierarchy_output_resort(hists, prog,
1768 						       &he->hroot_in,
1769 						       &he->hroot_out,
1770 						       min_callchain_hits,
1771 						       use_callchain);
1772 			continue;
1773 		}
1774 
1775 		if (!use_callchain)
1776 			continue;
1777 
1778 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1779 			u64 total = he->stat.period;
1780 
1781 			if (symbol_conf.cumulate_callchain)
1782 				total = he->stat_acc->period;
1783 
1784 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1785 		}
1786 
1787 		callchain_param.sort(&he->sorted_chain, he->callchain,
1788 				     min_callchain_hits, &callchain_param);
1789 	}
1790 }
1791 
1792 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1793 					 struct hist_entry *he,
1794 					 u64 min_callchain_hits,
1795 					 bool use_callchain)
1796 {
1797 	struct rb_node **p = &entries->rb_root.rb_node;
1798 	struct rb_node *parent = NULL;
1799 	struct hist_entry *iter;
1800 	struct perf_hpp_fmt *fmt;
1801 	bool leftmost = true;
1802 
1803 	if (use_callchain) {
1804 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1805 			u64 total = he->stat.period;
1806 
1807 			if (symbol_conf.cumulate_callchain)
1808 				total = he->stat_acc->period;
1809 
1810 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1811 		}
1812 		callchain_param.sort(&he->sorted_chain, he->callchain,
1813 				      min_callchain_hits, &callchain_param);
1814 	}
1815 
1816 	while (*p != NULL) {
1817 		parent = *p;
1818 		iter = rb_entry(parent, struct hist_entry, rb_node);
1819 
1820 		if (hist_entry__sort(he, iter) > 0)
1821 			p = &(*p)->rb_left;
1822 		else {
1823 			p = &(*p)->rb_right;
1824 			leftmost = false;
1825 		}
1826 	}
1827 
1828 	rb_link_node(&he->rb_node, parent, p);
1829 	rb_insert_color_cached(&he->rb_node, entries, leftmost);
1830 
1831 	perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1832 		if (perf_hpp__is_dynamic_entry(fmt) &&
1833 		    perf_hpp__defined_dynamic_entry(fmt, he->hists))
1834 			fmt->sort(fmt, he, NULL);  /* update column width */
1835 	}
1836 }
1837 
1838 static void output_resort(struct hists *hists, struct ui_progress *prog,
1839 			  bool use_callchain, hists__resort_cb_t cb,
1840 			  void *cb_arg)
1841 {
1842 	struct rb_root_cached *root;
1843 	struct rb_node *next;
1844 	struct hist_entry *n;
1845 	u64 callchain_total;
1846 	u64 min_callchain_hits;
1847 
1848 	callchain_total = hists->callchain_period;
1849 	if (symbol_conf.filter_relative)
1850 		callchain_total = hists->callchain_non_filtered_period;
1851 
1852 	min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1853 
1854 	hists__reset_stats(hists);
1855 	hists__reset_col_len(hists);
1856 
1857 	if (symbol_conf.report_hierarchy) {
1858 		hists__hierarchy_output_resort(hists, prog,
1859 					       &hists->entries_collapsed,
1860 					       &hists->entries,
1861 					       min_callchain_hits,
1862 					       use_callchain);
1863 		hierarchy_recalc_total_periods(hists);
1864 		return;
1865 	}
1866 
1867 	if (hists__has(hists, need_collapse))
1868 		root = &hists->entries_collapsed;
1869 	else
1870 		root = hists->entries_in;
1871 
1872 	next = rb_first_cached(root);
1873 	hists->entries = RB_ROOT_CACHED;
1874 
1875 	while (next) {
1876 		n = rb_entry(next, struct hist_entry, rb_node_in);
1877 		next = rb_next(&n->rb_node_in);
1878 
1879 		if (cb && cb(n, cb_arg))
1880 			continue;
1881 
1882 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1883 		hists__inc_stats(hists, n);
1884 
1885 		if (!n->filtered)
1886 			hists__calc_col_len(hists, n);
1887 
1888 		if (prog)
1889 			ui_progress__update(prog, 1);
1890 	}
1891 }
1892 
1893 void perf_evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1894 				  hists__resort_cb_t cb, void *cb_arg)
1895 {
1896 	bool use_callchain;
1897 
1898 	if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1899 		use_callchain = evsel__has_callchain(evsel);
1900 	else
1901 		use_callchain = symbol_conf.use_callchain;
1902 
1903 	use_callchain |= symbol_conf.show_branchflag_count;
1904 
1905 	output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1906 }
1907 
1908 void perf_evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1909 {
1910 	return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL);
1911 }
1912 
1913 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1914 {
1915 	output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1916 }
1917 
1918 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1919 			     hists__resort_cb_t cb)
1920 {
1921 	output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1922 }
1923 
1924 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1925 {
1926 	if (he->leaf || hmd == HMD_FORCE_SIBLING)
1927 		return false;
1928 
1929 	if (he->unfolded || hmd == HMD_FORCE_CHILD)
1930 		return true;
1931 
1932 	return false;
1933 }
1934 
1935 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1936 {
1937 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1938 
1939 	while (can_goto_child(he, HMD_NORMAL)) {
1940 		node = rb_last(&he->hroot_out.rb_root);
1941 		he = rb_entry(node, struct hist_entry, rb_node);
1942 	}
1943 	return node;
1944 }
1945 
1946 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1947 {
1948 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1949 
1950 	if (can_goto_child(he, hmd))
1951 		node = rb_first_cached(&he->hroot_out);
1952 	else
1953 		node = rb_next(node);
1954 
1955 	while (node == NULL) {
1956 		he = he->parent_he;
1957 		if (he == NULL)
1958 			break;
1959 
1960 		node = rb_next(&he->rb_node);
1961 	}
1962 	return node;
1963 }
1964 
1965 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1966 {
1967 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1968 
1969 	node = rb_prev(node);
1970 	if (node)
1971 		return rb_hierarchy_last(node);
1972 
1973 	he = he->parent_he;
1974 	if (he == NULL)
1975 		return NULL;
1976 
1977 	return &he->rb_node;
1978 }
1979 
1980 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1981 {
1982 	struct rb_node *node;
1983 	struct hist_entry *child;
1984 	float percent;
1985 
1986 	if (he->leaf)
1987 		return false;
1988 
1989 	node = rb_first_cached(&he->hroot_out);
1990 	child = rb_entry(node, struct hist_entry, rb_node);
1991 
1992 	while (node && child->filtered) {
1993 		node = rb_next(node);
1994 		child = rb_entry(node, struct hist_entry, rb_node);
1995 	}
1996 
1997 	if (node)
1998 		percent = hist_entry__get_percent_limit(child);
1999 	else
2000 		percent = 0;
2001 
2002 	return node && percent >= limit;
2003 }
2004 
2005 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2006 				       enum hist_filter filter)
2007 {
2008 	h->filtered &= ~(1 << filter);
2009 
2010 	if (symbol_conf.report_hierarchy) {
2011 		struct hist_entry *parent = h->parent_he;
2012 
2013 		while (parent) {
2014 			he_stat__add_stat(&parent->stat, &h->stat);
2015 
2016 			parent->filtered &= ~(1 << filter);
2017 
2018 			if (parent->filtered)
2019 				goto next;
2020 
2021 			/* force fold unfiltered entry for simplicity */
2022 			parent->unfolded = false;
2023 			parent->has_no_entry = false;
2024 			parent->row_offset = 0;
2025 			parent->nr_rows = 0;
2026 next:
2027 			parent = parent->parent_he;
2028 		}
2029 	}
2030 
2031 	if (h->filtered)
2032 		return;
2033 
2034 	/* force fold unfiltered entry for simplicity */
2035 	h->unfolded = false;
2036 	h->has_no_entry = false;
2037 	h->row_offset = 0;
2038 	h->nr_rows = 0;
2039 
2040 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2041 
2042 	hists__inc_filter_stats(hists, h);
2043 	hists__calc_col_len(hists, h);
2044 }
2045 
2046 
2047 static bool hists__filter_entry_by_dso(struct hists *hists,
2048 				       struct hist_entry *he)
2049 {
2050 	if (hists->dso_filter != NULL &&
2051 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2052 		he->filtered |= (1 << HIST_FILTER__DSO);
2053 		return true;
2054 	}
2055 
2056 	return false;
2057 }
2058 
2059 static bool hists__filter_entry_by_thread(struct hists *hists,
2060 					  struct hist_entry *he)
2061 {
2062 	if (hists->thread_filter != NULL &&
2063 	    he->thread != hists->thread_filter) {
2064 		he->filtered |= (1 << HIST_FILTER__THREAD);
2065 		return true;
2066 	}
2067 
2068 	return false;
2069 }
2070 
2071 static bool hists__filter_entry_by_symbol(struct hists *hists,
2072 					  struct hist_entry *he)
2073 {
2074 	if (hists->symbol_filter_str != NULL &&
2075 	    (!he->ms.sym || strstr(he->ms.sym->name,
2076 				   hists->symbol_filter_str) == NULL)) {
2077 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
2078 		return true;
2079 	}
2080 
2081 	return false;
2082 }
2083 
2084 static bool hists__filter_entry_by_socket(struct hists *hists,
2085 					  struct hist_entry *he)
2086 {
2087 	if ((hists->socket_filter > -1) &&
2088 	    (he->socket != hists->socket_filter)) {
2089 		he->filtered |= (1 << HIST_FILTER__SOCKET);
2090 		return true;
2091 	}
2092 
2093 	return false;
2094 }
2095 
2096 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2097 
2098 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2099 {
2100 	struct rb_node *nd;
2101 
2102 	hists->stats.nr_non_filtered_samples = 0;
2103 
2104 	hists__reset_filter_stats(hists);
2105 	hists__reset_col_len(hists);
2106 
2107 	for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2108 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2109 
2110 		if (filter(hists, h))
2111 			continue;
2112 
2113 		hists__remove_entry_filter(hists, h, type);
2114 	}
2115 }
2116 
2117 static void resort_filtered_entry(struct rb_root_cached *root,
2118 				  struct hist_entry *he)
2119 {
2120 	struct rb_node **p = &root->rb_root.rb_node;
2121 	struct rb_node *parent = NULL;
2122 	struct hist_entry *iter;
2123 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2124 	struct rb_node *nd;
2125 	bool leftmost = true;
2126 
2127 	while (*p != NULL) {
2128 		parent = *p;
2129 		iter = rb_entry(parent, struct hist_entry, rb_node);
2130 
2131 		if (hist_entry__sort(he, iter) > 0)
2132 			p = &(*p)->rb_left;
2133 		else {
2134 			p = &(*p)->rb_right;
2135 			leftmost = false;
2136 		}
2137 	}
2138 
2139 	rb_link_node(&he->rb_node, parent, p);
2140 	rb_insert_color_cached(&he->rb_node, root, leftmost);
2141 
2142 	if (he->leaf || he->filtered)
2143 		return;
2144 
2145 	nd = rb_first_cached(&he->hroot_out);
2146 	while (nd) {
2147 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2148 
2149 		nd = rb_next(nd);
2150 		rb_erase_cached(&h->rb_node, &he->hroot_out);
2151 
2152 		resort_filtered_entry(&new_root, h);
2153 	}
2154 
2155 	he->hroot_out = new_root;
2156 }
2157 
2158 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2159 {
2160 	struct rb_node *nd;
2161 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2162 
2163 	hists->stats.nr_non_filtered_samples = 0;
2164 
2165 	hists__reset_filter_stats(hists);
2166 	hists__reset_col_len(hists);
2167 
2168 	nd = rb_first_cached(&hists->entries);
2169 	while (nd) {
2170 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2171 		int ret;
2172 
2173 		ret = hist_entry__filter(h, type, arg);
2174 
2175 		/*
2176 		 * case 1. non-matching type
2177 		 * zero out the period, set filter marker and move to child
2178 		 */
2179 		if (ret < 0) {
2180 			memset(&h->stat, 0, sizeof(h->stat));
2181 			h->filtered |= (1 << type);
2182 
2183 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2184 		}
2185 		/*
2186 		 * case 2. matched type (filter out)
2187 		 * set filter marker and move to next
2188 		 */
2189 		else if (ret == 1) {
2190 			h->filtered |= (1 << type);
2191 
2192 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2193 		}
2194 		/*
2195 		 * case 3. ok (not filtered)
2196 		 * add period to hists and parents, erase the filter marker
2197 		 * and move to next sibling
2198 		 */
2199 		else {
2200 			hists__remove_entry_filter(hists, h, type);
2201 
2202 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2203 		}
2204 	}
2205 
2206 	hierarchy_recalc_total_periods(hists);
2207 
2208 	/*
2209 	 * resort output after applying a new filter since filter in a lower
2210 	 * hierarchy can change periods in a upper hierarchy.
2211 	 */
2212 	nd = rb_first_cached(&hists->entries);
2213 	while (nd) {
2214 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2215 
2216 		nd = rb_next(nd);
2217 		rb_erase_cached(&h->rb_node, &hists->entries);
2218 
2219 		resort_filtered_entry(&new_root, h);
2220 	}
2221 
2222 	hists->entries = new_root;
2223 }
2224 
2225 void hists__filter_by_thread(struct hists *hists)
2226 {
2227 	if (symbol_conf.report_hierarchy)
2228 		hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2229 					hists->thread_filter);
2230 	else
2231 		hists__filter_by_type(hists, HIST_FILTER__THREAD,
2232 				      hists__filter_entry_by_thread);
2233 }
2234 
2235 void hists__filter_by_dso(struct hists *hists)
2236 {
2237 	if (symbol_conf.report_hierarchy)
2238 		hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2239 					hists->dso_filter);
2240 	else
2241 		hists__filter_by_type(hists, HIST_FILTER__DSO,
2242 				      hists__filter_entry_by_dso);
2243 }
2244 
2245 void hists__filter_by_symbol(struct hists *hists)
2246 {
2247 	if (symbol_conf.report_hierarchy)
2248 		hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2249 					hists->symbol_filter_str);
2250 	else
2251 		hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2252 				      hists__filter_entry_by_symbol);
2253 }
2254 
2255 void hists__filter_by_socket(struct hists *hists)
2256 {
2257 	if (symbol_conf.report_hierarchy)
2258 		hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2259 					&hists->socket_filter);
2260 	else
2261 		hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2262 				      hists__filter_entry_by_socket);
2263 }
2264 
2265 void events_stats__inc(struct events_stats *stats, u32 type)
2266 {
2267 	++stats->nr_events[0];
2268 	++stats->nr_events[type];
2269 }
2270 
2271 void hists__inc_nr_events(struct hists *hists, u32 type)
2272 {
2273 	events_stats__inc(&hists->stats, type);
2274 }
2275 
2276 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2277 {
2278 	events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2279 	if (!filtered)
2280 		hists->stats.nr_non_filtered_samples++;
2281 }
2282 
2283 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2284 						 struct hist_entry *pair)
2285 {
2286 	struct rb_root_cached *root;
2287 	struct rb_node **p;
2288 	struct rb_node *parent = NULL;
2289 	struct hist_entry *he;
2290 	int64_t cmp;
2291 	bool leftmost = true;
2292 
2293 	if (hists__has(hists, need_collapse))
2294 		root = &hists->entries_collapsed;
2295 	else
2296 		root = hists->entries_in;
2297 
2298 	p = &root->rb_root.rb_node;
2299 
2300 	while (*p != NULL) {
2301 		parent = *p;
2302 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2303 
2304 		cmp = hist_entry__collapse(he, pair);
2305 
2306 		if (!cmp)
2307 			goto out;
2308 
2309 		if (cmp < 0)
2310 			p = &(*p)->rb_left;
2311 		else {
2312 			p = &(*p)->rb_right;
2313 			leftmost = false;
2314 		}
2315 	}
2316 
2317 	he = hist_entry__new(pair, true);
2318 	if (he) {
2319 		memset(&he->stat, 0, sizeof(he->stat));
2320 		he->hists = hists;
2321 		if (symbol_conf.cumulate_callchain)
2322 			memset(he->stat_acc, 0, sizeof(he->stat));
2323 		rb_link_node(&he->rb_node_in, parent, p);
2324 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2325 		hists__inc_stats(hists, he);
2326 		he->dummy = true;
2327 	}
2328 out:
2329 	return he;
2330 }
2331 
2332 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2333 						    struct rb_root_cached *root,
2334 						    struct hist_entry *pair)
2335 {
2336 	struct rb_node **p;
2337 	struct rb_node *parent = NULL;
2338 	struct hist_entry *he;
2339 	struct perf_hpp_fmt *fmt;
2340 	bool leftmost = true;
2341 
2342 	p = &root->rb_root.rb_node;
2343 	while (*p != NULL) {
2344 		int64_t cmp = 0;
2345 
2346 		parent = *p;
2347 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2348 
2349 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2350 			cmp = fmt->collapse(fmt, he, pair);
2351 			if (cmp)
2352 				break;
2353 		}
2354 		if (!cmp)
2355 			goto out;
2356 
2357 		if (cmp < 0)
2358 			p = &parent->rb_left;
2359 		else {
2360 			p = &parent->rb_right;
2361 			leftmost = false;
2362 		}
2363 	}
2364 
2365 	he = hist_entry__new(pair, true);
2366 	if (he) {
2367 		rb_link_node(&he->rb_node_in, parent, p);
2368 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2369 
2370 		he->dummy = true;
2371 		he->hists = hists;
2372 		memset(&he->stat, 0, sizeof(he->stat));
2373 		hists__inc_stats(hists, he);
2374 	}
2375 out:
2376 	return he;
2377 }
2378 
2379 static struct hist_entry *hists__find_entry(struct hists *hists,
2380 					    struct hist_entry *he)
2381 {
2382 	struct rb_node *n;
2383 
2384 	if (hists__has(hists, need_collapse))
2385 		n = hists->entries_collapsed.rb_root.rb_node;
2386 	else
2387 		n = hists->entries_in->rb_root.rb_node;
2388 
2389 	while (n) {
2390 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2391 		int64_t cmp = hist_entry__collapse(iter, he);
2392 
2393 		if (cmp < 0)
2394 			n = n->rb_left;
2395 		else if (cmp > 0)
2396 			n = n->rb_right;
2397 		else
2398 			return iter;
2399 	}
2400 
2401 	return NULL;
2402 }
2403 
2404 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2405 						      struct hist_entry *he)
2406 {
2407 	struct rb_node *n = root->rb_root.rb_node;
2408 
2409 	while (n) {
2410 		struct hist_entry *iter;
2411 		struct perf_hpp_fmt *fmt;
2412 		int64_t cmp = 0;
2413 
2414 		iter = rb_entry(n, struct hist_entry, rb_node_in);
2415 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2416 			cmp = fmt->collapse(fmt, iter, he);
2417 			if (cmp)
2418 				break;
2419 		}
2420 
2421 		if (cmp < 0)
2422 			n = n->rb_left;
2423 		else if (cmp > 0)
2424 			n = n->rb_right;
2425 		else
2426 			return iter;
2427 	}
2428 
2429 	return NULL;
2430 }
2431 
2432 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2433 				   struct rb_root_cached *other_root)
2434 {
2435 	struct rb_node *nd;
2436 	struct hist_entry *pos, *pair;
2437 
2438 	for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2439 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2440 		pair = hists__find_hierarchy_entry(other_root, pos);
2441 
2442 		if (pair) {
2443 			hist_entry__add_pair(pair, pos);
2444 			hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2445 		}
2446 	}
2447 }
2448 
2449 /*
2450  * Look for pairs to link to the leader buckets (hist_entries):
2451  */
2452 void hists__match(struct hists *leader, struct hists *other)
2453 {
2454 	struct rb_root_cached *root;
2455 	struct rb_node *nd;
2456 	struct hist_entry *pos, *pair;
2457 
2458 	if (symbol_conf.report_hierarchy) {
2459 		/* hierarchy report always collapses entries */
2460 		return hists__match_hierarchy(&leader->entries_collapsed,
2461 					      &other->entries_collapsed);
2462 	}
2463 
2464 	if (hists__has(leader, need_collapse))
2465 		root = &leader->entries_collapsed;
2466 	else
2467 		root = leader->entries_in;
2468 
2469 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2470 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2471 		pair = hists__find_entry(other, pos);
2472 
2473 		if (pair)
2474 			hist_entry__add_pair(pair, pos);
2475 	}
2476 }
2477 
2478 static int hists__link_hierarchy(struct hists *leader_hists,
2479 				 struct hist_entry *parent,
2480 				 struct rb_root_cached *leader_root,
2481 				 struct rb_root_cached *other_root)
2482 {
2483 	struct rb_node *nd;
2484 	struct hist_entry *pos, *leader;
2485 
2486 	for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2487 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2488 
2489 		if (hist_entry__has_pairs(pos)) {
2490 			bool found = false;
2491 
2492 			list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2493 				if (leader->hists == leader_hists) {
2494 					found = true;
2495 					break;
2496 				}
2497 			}
2498 			if (!found)
2499 				return -1;
2500 		} else {
2501 			leader = add_dummy_hierarchy_entry(leader_hists,
2502 							   leader_root, pos);
2503 			if (leader == NULL)
2504 				return -1;
2505 
2506 			/* do not point parent in the pos */
2507 			leader->parent_he = parent;
2508 
2509 			hist_entry__add_pair(pos, leader);
2510 		}
2511 
2512 		if (!pos->leaf) {
2513 			if (hists__link_hierarchy(leader_hists, leader,
2514 						  &leader->hroot_in,
2515 						  &pos->hroot_in) < 0)
2516 				return -1;
2517 		}
2518 	}
2519 	return 0;
2520 }
2521 
2522 /*
2523  * Look for entries in the other hists that are not present in the leader, if
2524  * we find them, just add a dummy entry on the leader hists, with period=0,
2525  * nr_events=0, to serve as the list header.
2526  */
2527 int hists__link(struct hists *leader, struct hists *other)
2528 {
2529 	struct rb_root_cached *root;
2530 	struct rb_node *nd;
2531 	struct hist_entry *pos, *pair;
2532 
2533 	if (symbol_conf.report_hierarchy) {
2534 		/* hierarchy report always collapses entries */
2535 		return hists__link_hierarchy(leader, NULL,
2536 					     &leader->entries_collapsed,
2537 					     &other->entries_collapsed);
2538 	}
2539 
2540 	if (hists__has(other, need_collapse))
2541 		root = &other->entries_collapsed;
2542 	else
2543 		root = other->entries_in;
2544 
2545 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2546 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2547 
2548 		if (!hist_entry__has_pairs(pos)) {
2549 			pair = hists__add_dummy_entry(leader, pos);
2550 			if (pair == NULL)
2551 				return -1;
2552 			hist_entry__add_pair(pos, pair);
2553 		}
2554 	}
2555 
2556 	return 0;
2557 }
2558 
2559 int hists__unlink(struct hists *hists)
2560 {
2561 	struct rb_root_cached *root;
2562 	struct rb_node *nd;
2563 	struct hist_entry *pos;
2564 
2565 	if (hists__has(hists, need_collapse))
2566 		root = &hists->entries_collapsed;
2567 	else
2568 		root = hists->entries_in;
2569 
2570 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2571 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2572 		list_del_init(&pos->pairs.node);
2573 	}
2574 
2575 	return 0;
2576 }
2577 
2578 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2579 			  struct perf_sample *sample, bool nonany_branch_mode,
2580 			  u64 *total_cycles)
2581 {
2582 	struct branch_info *bi;
2583 
2584 	/* If we have branch cycles always annotate them. */
2585 	if (bs && bs->nr && bs->entries[0].flags.cycles) {
2586 		int i;
2587 
2588 		bi = sample__resolve_bstack(sample, al);
2589 		if (bi) {
2590 			struct addr_map_symbol *prev = NULL;
2591 
2592 			/*
2593 			 * Ignore errors, still want to process the
2594 			 * other entries.
2595 			 *
2596 			 * For non standard branch modes always
2597 			 * force no IPC (prev == NULL)
2598 			 *
2599 			 * Note that perf stores branches reversed from
2600 			 * program order!
2601 			 */
2602 			for (i = bs->nr - 1; i >= 0; i--) {
2603 				addr_map_symbol__account_cycles(&bi[i].from,
2604 					nonany_branch_mode ? NULL : prev,
2605 					bi[i].flags.cycles);
2606 				prev = &bi[i].to;
2607 
2608 				if (total_cycles)
2609 					*total_cycles += bi[i].flags.cycles;
2610 			}
2611 			free(bi);
2612 		}
2613 	}
2614 }
2615 
2616 size_t perf_evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2617 {
2618 	struct evsel *pos;
2619 	size_t ret = 0;
2620 
2621 	evlist__for_each_entry(evlist, pos) {
2622 		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2623 		ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2624 	}
2625 
2626 	return ret;
2627 }
2628 
2629 
2630 u64 hists__total_period(struct hists *hists)
2631 {
2632 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2633 		hists->stats.total_period;
2634 }
2635 
2636 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2637 {
2638 	char unit;
2639 	int printed;
2640 	const struct dso *dso = hists->dso_filter;
2641 	struct thread *thread = hists->thread_filter;
2642 	int socket_id = hists->socket_filter;
2643 	unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
2644 	u64 nr_events = hists->stats.total_period;
2645 	struct evsel *evsel = hists_to_evsel(hists);
2646 	const char *ev_name = perf_evsel__name(evsel);
2647 	char buf[512], sample_freq_str[64] = "";
2648 	size_t buflen = sizeof(buf);
2649 	char ref[30] = " show reference callgraph, ";
2650 	bool enable_ref = false;
2651 
2652 	if (symbol_conf.filter_relative) {
2653 		nr_samples = hists->stats.nr_non_filtered_samples;
2654 		nr_events = hists->stats.total_non_filtered_period;
2655 	}
2656 
2657 	if (perf_evsel__is_group_event(evsel)) {
2658 		struct evsel *pos;
2659 
2660 		perf_evsel__group_desc(evsel, buf, buflen);
2661 		ev_name = buf;
2662 
2663 		for_each_group_member(pos, evsel) {
2664 			struct hists *pos_hists = evsel__hists(pos);
2665 
2666 			if (symbol_conf.filter_relative) {
2667 				nr_samples += pos_hists->stats.nr_non_filtered_samples;
2668 				nr_events += pos_hists->stats.total_non_filtered_period;
2669 			} else {
2670 				nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
2671 				nr_events += pos_hists->stats.total_period;
2672 			}
2673 		}
2674 	}
2675 
2676 	if (symbol_conf.show_ref_callgraph &&
2677 	    strstr(ev_name, "call-graph=no"))
2678 		enable_ref = true;
2679 
2680 	if (show_freq)
2681 		scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2682 
2683 	nr_samples = convert_unit(nr_samples, &unit);
2684 	printed = scnprintf(bf, size,
2685 			   "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2686 			   nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2687 			   ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2688 
2689 
2690 	if (hists->uid_filter_str)
2691 		printed += snprintf(bf + printed, size - printed,
2692 				    ", UID: %s", hists->uid_filter_str);
2693 	if (thread) {
2694 		if (hists__has(hists, thread)) {
2695 			printed += scnprintf(bf + printed, size - printed,
2696 				    ", Thread: %s(%d)",
2697 				     (thread->comm_set ? thread__comm_str(thread) : ""),
2698 				    thread->tid);
2699 		} else {
2700 			printed += scnprintf(bf + printed, size - printed,
2701 				    ", Thread: %s",
2702 				     (thread->comm_set ? thread__comm_str(thread) : ""));
2703 		}
2704 	}
2705 	if (dso)
2706 		printed += scnprintf(bf + printed, size - printed,
2707 				    ", DSO: %s", dso->short_name);
2708 	if (socket_id > -1)
2709 		printed += scnprintf(bf + printed, size - printed,
2710 				    ", Processor Socket: %d", socket_id);
2711 
2712 	return printed;
2713 }
2714 
2715 int parse_filter_percentage(const struct option *opt __maybe_unused,
2716 			    const char *arg, int unset __maybe_unused)
2717 {
2718 	if (!strcmp(arg, "relative"))
2719 		symbol_conf.filter_relative = true;
2720 	else if (!strcmp(arg, "absolute"))
2721 		symbol_conf.filter_relative = false;
2722 	else {
2723 		pr_debug("Invalid percentage: %s\n", arg);
2724 		return -1;
2725 	}
2726 
2727 	return 0;
2728 }
2729 
2730 int perf_hist_config(const char *var, const char *value)
2731 {
2732 	if (!strcmp(var, "hist.percentage"))
2733 		return parse_filter_percentage(NULL, value, 0);
2734 
2735 	return 0;
2736 }
2737 
2738 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2739 {
2740 	memset(hists, 0, sizeof(*hists));
2741 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2742 	hists->entries_in = &hists->entries_in_array[0];
2743 	hists->entries_collapsed = RB_ROOT_CACHED;
2744 	hists->entries = RB_ROOT_CACHED;
2745 	pthread_mutex_init(&hists->lock, NULL);
2746 	hists->socket_filter = -1;
2747 	hists->hpp_list = hpp_list;
2748 	INIT_LIST_HEAD(&hists->hpp_formats);
2749 	return 0;
2750 }
2751 
2752 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2753 {
2754 	struct rb_node *node;
2755 	struct hist_entry *he;
2756 
2757 	while (!RB_EMPTY_ROOT(&root->rb_root)) {
2758 		node = rb_first_cached(root);
2759 		rb_erase_cached(node, root);
2760 
2761 		he = rb_entry(node, struct hist_entry, rb_node_in);
2762 		hist_entry__delete(he);
2763 	}
2764 }
2765 
2766 static void hists__delete_all_entries(struct hists *hists)
2767 {
2768 	hists__delete_entries(hists);
2769 	hists__delete_remaining_entries(&hists->entries_in_array[0]);
2770 	hists__delete_remaining_entries(&hists->entries_in_array[1]);
2771 	hists__delete_remaining_entries(&hists->entries_collapsed);
2772 }
2773 
2774 static void hists_evsel__exit(struct evsel *evsel)
2775 {
2776 	struct hists *hists = evsel__hists(evsel);
2777 	struct perf_hpp_fmt *fmt, *pos;
2778 	struct perf_hpp_list_node *node, *tmp;
2779 
2780 	hists__delete_all_entries(hists);
2781 
2782 	list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2783 		perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2784 			list_del_init(&fmt->list);
2785 			free(fmt);
2786 		}
2787 		list_del_init(&node->list);
2788 		free(node);
2789 	}
2790 }
2791 
2792 static int hists_evsel__init(struct evsel *evsel)
2793 {
2794 	struct hists *hists = evsel__hists(evsel);
2795 
2796 	__hists__init(hists, &perf_hpp_list);
2797 	return 0;
2798 }
2799 
2800 /*
2801  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2802  * stored in the rbtree...
2803  */
2804 
2805 int hists__init(void)
2806 {
2807 	int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2808 					    hists_evsel__init,
2809 					    hists_evsel__exit);
2810 	if (err)
2811 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2812 
2813 	return err;
2814 }
2815 
2816 void perf_hpp_list__init(struct perf_hpp_list *list)
2817 {
2818 	INIT_LIST_HEAD(&list->fields);
2819 	INIT_LIST_HEAD(&list->sorts);
2820 }
2821