xref: /linux/tools/perf/util/hist.c (revision 448cc2fb3a7b327823a9afd374808c37b8e6194f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "map.h"
8 #include "map_symbol.h"
9 #include "branch.h"
10 #include "mem-events.h"
11 #include "session.h"
12 #include "namespaces.h"
13 #include "cgroup.h"
14 #include "sort.h"
15 #include "units.h"
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "annotate.h"
19 #include "srcline.h"
20 #include "symbol.h"
21 #include "thread.h"
22 #include "block-info.h"
23 #include "ui/progress.h"
24 #include <errno.h>
25 #include <math.h>
26 #include <inttypes.h>
27 #include <sys/param.h>
28 #include <linux/rbtree.h>
29 #include <linux/string.h>
30 #include <linux/time64.h>
31 #include <linux/zalloc.h>
32 
33 static bool hists__filter_entry_by_dso(struct hists *hists,
34 				       struct hist_entry *he);
35 static bool hists__filter_entry_by_thread(struct hists *hists,
36 					  struct hist_entry *he);
37 static bool hists__filter_entry_by_symbol(struct hists *hists,
38 					  struct hist_entry *he);
39 static bool hists__filter_entry_by_socket(struct hists *hists,
40 					  struct hist_entry *he);
41 
42 u16 hists__col_len(struct hists *hists, enum hist_column col)
43 {
44 	return hists->col_len[col];
45 }
46 
47 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
48 {
49 	hists->col_len[col] = len;
50 }
51 
52 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
53 {
54 	if (len > hists__col_len(hists, col)) {
55 		hists__set_col_len(hists, col, len);
56 		return true;
57 	}
58 	return false;
59 }
60 
61 void hists__reset_col_len(struct hists *hists)
62 {
63 	enum hist_column col;
64 
65 	for (col = 0; col < HISTC_NR_COLS; ++col)
66 		hists__set_col_len(hists, col, 0);
67 }
68 
69 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
70 {
71 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
72 
73 	if (hists__col_len(hists, dso) < unresolved_col_width &&
74 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
75 	    !symbol_conf.dso_list)
76 		hists__set_col_len(hists, dso, unresolved_col_width);
77 }
78 
79 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
80 {
81 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
82 	int symlen;
83 	u16 len;
84 
85 	if (h->block_info)
86 		return;
87 	/*
88 	 * +4 accounts for '[x] ' priv level info
89 	 * +2 accounts for 0x prefix on raw addresses
90 	 * +3 accounts for ' y ' symtab origin info
91 	 */
92 	if (h->ms.sym) {
93 		symlen = h->ms.sym->namelen + 4;
94 		if (verbose > 0)
95 			symlen += BITS_PER_LONG / 4 + 2 + 3;
96 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
97 	} else {
98 		symlen = unresolved_col_width + 4 + 2;
99 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
100 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
101 	}
102 
103 	len = thread__comm_len(h->thread);
104 	if (hists__new_col_len(hists, HISTC_COMM, len))
105 		hists__set_col_len(hists, HISTC_THREAD, len + 8);
106 
107 	if (h->ms.map) {
108 		len = dso__name_len(h->ms.map->dso);
109 		hists__new_col_len(hists, HISTC_DSO, len);
110 	}
111 
112 	if (h->parent)
113 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
114 
115 	if (h->branch_info) {
116 		if (h->branch_info->from.ms.sym) {
117 			symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
118 			if (verbose > 0)
119 				symlen += BITS_PER_LONG / 4 + 2 + 3;
120 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
121 
122 			symlen = dso__name_len(h->branch_info->from.ms.map->dso);
123 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
124 		} else {
125 			symlen = unresolved_col_width + 4 + 2;
126 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
127 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
128 		}
129 
130 		if (h->branch_info->to.ms.sym) {
131 			symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
132 			if (verbose > 0)
133 				symlen += BITS_PER_LONG / 4 + 2 + 3;
134 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
135 
136 			symlen = dso__name_len(h->branch_info->to.ms.map->dso);
137 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
138 		} else {
139 			symlen = unresolved_col_width + 4 + 2;
140 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
141 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
142 		}
143 
144 		if (h->branch_info->srcline_from)
145 			hists__new_col_len(hists, HISTC_SRCLINE_FROM,
146 					strlen(h->branch_info->srcline_from));
147 		if (h->branch_info->srcline_to)
148 			hists__new_col_len(hists, HISTC_SRCLINE_TO,
149 					strlen(h->branch_info->srcline_to));
150 	}
151 
152 	if (h->mem_info) {
153 		if (h->mem_info->daddr.ms.sym) {
154 			symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
155 			       + unresolved_col_width + 2;
156 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
157 					   symlen);
158 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
159 					   symlen + 1);
160 		} else {
161 			symlen = unresolved_col_width + 4 + 2;
162 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
163 					   symlen);
164 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
165 					   symlen);
166 		}
167 
168 		if (h->mem_info->iaddr.ms.sym) {
169 			symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
170 			       + unresolved_col_width + 2;
171 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
172 					   symlen);
173 		} else {
174 			symlen = unresolved_col_width + 4 + 2;
175 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
176 					   symlen);
177 		}
178 
179 		if (h->mem_info->daddr.ms.map) {
180 			symlen = dso__name_len(h->mem_info->daddr.ms.map->dso);
181 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
182 					   symlen);
183 		} else {
184 			symlen = unresolved_col_width + 4 + 2;
185 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
186 		}
187 
188 		hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
189 				   unresolved_col_width + 4 + 2);
190 
191 		hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
192 				   unresolved_col_width + 4 + 2);
193 
194 	} else {
195 		symlen = unresolved_col_width + 4 + 2;
196 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
197 		hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
198 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
199 	}
200 
201 	hists__new_col_len(hists, HISTC_CGROUP, 6);
202 	hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
203 	hists__new_col_len(hists, HISTC_CPU, 3);
204 	hists__new_col_len(hists, HISTC_SOCKET, 6);
205 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
206 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
207 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
208 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
209 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
210 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
211 	hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
212 	hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
213 	hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
214 	hists__new_col_len(hists, HISTC_P_STAGE_CYC, 13);
215 	if (symbol_conf.nanosecs)
216 		hists__new_col_len(hists, HISTC_TIME, 16);
217 	else
218 		hists__new_col_len(hists, HISTC_TIME, 12);
219 	hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
220 
221 	if (h->srcline) {
222 		len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
223 		hists__new_col_len(hists, HISTC_SRCLINE, len);
224 	}
225 
226 	if (h->srcfile)
227 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
228 
229 	if (h->transaction)
230 		hists__new_col_len(hists, HISTC_TRANSACTION,
231 				   hist_entry__transaction_len());
232 
233 	if (h->trace_output)
234 		hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
235 
236 	if (h->cgroup) {
237 		const char *cgrp_name = "unknown";
238 		struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env,
239 						   h->cgroup);
240 		if (cgrp != NULL)
241 			cgrp_name = cgrp->name;
242 
243 		hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
244 	}
245 }
246 
247 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
248 {
249 	struct rb_node *next = rb_first_cached(&hists->entries);
250 	struct hist_entry *n;
251 	int row = 0;
252 
253 	hists__reset_col_len(hists);
254 
255 	while (next && row++ < max_rows) {
256 		n = rb_entry(next, struct hist_entry, rb_node);
257 		if (!n->filtered)
258 			hists__calc_col_len(hists, n);
259 		next = rb_next(&n->rb_node);
260 	}
261 }
262 
263 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
264 					unsigned int cpumode, u64 period)
265 {
266 	switch (cpumode) {
267 	case PERF_RECORD_MISC_KERNEL:
268 		he_stat->period_sys += period;
269 		break;
270 	case PERF_RECORD_MISC_USER:
271 		he_stat->period_us += period;
272 		break;
273 	case PERF_RECORD_MISC_GUEST_KERNEL:
274 		he_stat->period_guest_sys += period;
275 		break;
276 	case PERF_RECORD_MISC_GUEST_USER:
277 		he_stat->period_guest_us += period;
278 		break;
279 	default:
280 		break;
281 	}
282 }
283 
284 static long hist_time(unsigned long htime)
285 {
286 	unsigned long time_quantum = symbol_conf.time_quantum;
287 	if (time_quantum)
288 		return (htime / time_quantum) * time_quantum;
289 	return htime;
290 }
291 
292 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
293 {
294 	he_stat->period		+= period;
295 	he_stat->nr_events	+= 1;
296 }
297 
298 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
299 {
300 	dest->period		+= src->period;
301 	dest->period_sys	+= src->period_sys;
302 	dest->period_us		+= src->period_us;
303 	dest->period_guest_sys	+= src->period_guest_sys;
304 	dest->period_guest_us	+= src->period_guest_us;
305 	dest->nr_events		+= src->nr_events;
306 }
307 
308 static void he_stat__decay(struct he_stat *he_stat)
309 {
310 	he_stat->period = (he_stat->period * 7) / 8;
311 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
312 	/* XXX need decay for weight too? */
313 }
314 
315 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
316 
317 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
318 {
319 	u64 prev_period = he->stat.period;
320 	u64 diff;
321 
322 	if (prev_period == 0)
323 		return true;
324 
325 	he_stat__decay(&he->stat);
326 	if (symbol_conf.cumulate_callchain)
327 		he_stat__decay(he->stat_acc);
328 	decay_callchain(he->callchain);
329 
330 	diff = prev_period - he->stat.period;
331 
332 	if (!he->depth) {
333 		hists->stats.total_period -= diff;
334 		if (!he->filtered)
335 			hists->stats.total_non_filtered_period -= diff;
336 	}
337 
338 	if (!he->leaf) {
339 		struct hist_entry *child;
340 		struct rb_node *node = rb_first_cached(&he->hroot_out);
341 		while (node) {
342 			child = rb_entry(node, struct hist_entry, rb_node);
343 			node = rb_next(node);
344 
345 			if (hists__decay_entry(hists, child))
346 				hists__delete_entry(hists, child);
347 		}
348 	}
349 
350 	return he->stat.period == 0;
351 }
352 
353 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
354 {
355 	struct rb_root_cached *root_in;
356 	struct rb_root_cached *root_out;
357 
358 	if (he->parent_he) {
359 		root_in  = &he->parent_he->hroot_in;
360 		root_out = &he->parent_he->hroot_out;
361 	} else {
362 		if (hists__has(hists, need_collapse))
363 			root_in = &hists->entries_collapsed;
364 		else
365 			root_in = hists->entries_in;
366 		root_out = &hists->entries;
367 	}
368 
369 	rb_erase_cached(&he->rb_node_in, root_in);
370 	rb_erase_cached(&he->rb_node, root_out);
371 
372 	--hists->nr_entries;
373 	if (!he->filtered)
374 		--hists->nr_non_filtered_entries;
375 
376 	hist_entry__delete(he);
377 }
378 
379 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
380 {
381 	struct rb_node *next = rb_first_cached(&hists->entries);
382 	struct hist_entry *n;
383 
384 	while (next) {
385 		n = rb_entry(next, struct hist_entry, rb_node);
386 		next = rb_next(&n->rb_node);
387 		if (((zap_user && n->level == '.') ||
388 		     (zap_kernel && n->level != '.') ||
389 		     hists__decay_entry(hists, n))) {
390 			hists__delete_entry(hists, n);
391 		}
392 	}
393 }
394 
395 void hists__delete_entries(struct hists *hists)
396 {
397 	struct rb_node *next = rb_first_cached(&hists->entries);
398 	struct hist_entry *n;
399 
400 	while (next) {
401 		n = rb_entry(next, struct hist_entry, rb_node);
402 		next = rb_next(&n->rb_node);
403 
404 		hists__delete_entry(hists, n);
405 	}
406 }
407 
408 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
409 {
410 	struct rb_node *next = rb_first_cached(&hists->entries);
411 	struct hist_entry *n;
412 	int i = 0;
413 
414 	while (next) {
415 		n = rb_entry(next, struct hist_entry, rb_node);
416 		if (i == idx)
417 			return n;
418 
419 		next = rb_next(&n->rb_node);
420 		i++;
421 	}
422 
423 	return NULL;
424 }
425 
426 /*
427  * histogram, sorted on item, collects periods
428  */
429 
430 static int hist_entry__init(struct hist_entry *he,
431 			    struct hist_entry *template,
432 			    bool sample_self,
433 			    size_t callchain_size)
434 {
435 	*he = *template;
436 	he->callchain_size = callchain_size;
437 
438 	if (symbol_conf.cumulate_callchain) {
439 		he->stat_acc = malloc(sizeof(he->stat));
440 		if (he->stat_acc == NULL)
441 			return -ENOMEM;
442 		memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
443 		if (!sample_self)
444 			memset(&he->stat, 0, sizeof(he->stat));
445 	}
446 
447 	map__get(he->ms.map);
448 
449 	if (he->branch_info) {
450 		/*
451 		 * This branch info is (a part of) allocated from
452 		 * sample__resolve_bstack() and will be freed after
453 		 * adding new entries.  So we need to save a copy.
454 		 */
455 		he->branch_info = malloc(sizeof(*he->branch_info));
456 		if (he->branch_info == NULL)
457 			goto err;
458 
459 		memcpy(he->branch_info, template->branch_info,
460 		       sizeof(*he->branch_info));
461 
462 		map__get(he->branch_info->from.ms.map);
463 		map__get(he->branch_info->to.ms.map);
464 	}
465 
466 	if (he->mem_info) {
467 		map__get(he->mem_info->iaddr.ms.map);
468 		map__get(he->mem_info->daddr.ms.map);
469 	}
470 
471 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
472 		callchain_init(he->callchain);
473 
474 	if (he->raw_data) {
475 		he->raw_data = memdup(he->raw_data, he->raw_size);
476 		if (he->raw_data == NULL)
477 			goto err_infos;
478 	}
479 
480 	if (he->srcline) {
481 		he->srcline = strdup(he->srcline);
482 		if (he->srcline == NULL)
483 			goto err_rawdata;
484 	}
485 
486 	if (symbol_conf.res_sample) {
487 		he->res_samples = calloc(sizeof(struct res_sample),
488 					symbol_conf.res_sample);
489 		if (!he->res_samples)
490 			goto err_srcline;
491 	}
492 
493 	INIT_LIST_HEAD(&he->pairs.node);
494 	thread__get(he->thread);
495 	he->hroot_in  = RB_ROOT_CACHED;
496 	he->hroot_out = RB_ROOT_CACHED;
497 
498 	if (!symbol_conf.report_hierarchy)
499 		he->leaf = true;
500 
501 	return 0;
502 
503 err_srcline:
504 	zfree(&he->srcline);
505 
506 err_rawdata:
507 	zfree(&he->raw_data);
508 
509 err_infos:
510 	if (he->branch_info) {
511 		map__put(he->branch_info->from.ms.map);
512 		map__put(he->branch_info->to.ms.map);
513 		zfree(&he->branch_info);
514 	}
515 	if (he->mem_info) {
516 		map__put(he->mem_info->iaddr.ms.map);
517 		map__put(he->mem_info->daddr.ms.map);
518 	}
519 err:
520 	map__zput(he->ms.map);
521 	zfree(&he->stat_acc);
522 	return -ENOMEM;
523 }
524 
525 static void *hist_entry__zalloc(size_t size)
526 {
527 	return zalloc(size + sizeof(struct hist_entry));
528 }
529 
530 static void hist_entry__free(void *ptr)
531 {
532 	free(ptr);
533 }
534 
535 static struct hist_entry_ops default_ops = {
536 	.new	= hist_entry__zalloc,
537 	.free	= hist_entry__free,
538 };
539 
540 static struct hist_entry *hist_entry__new(struct hist_entry *template,
541 					  bool sample_self)
542 {
543 	struct hist_entry_ops *ops = template->ops;
544 	size_t callchain_size = 0;
545 	struct hist_entry *he;
546 	int err = 0;
547 
548 	if (!ops)
549 		ops = template->ops = &default_ops;
550 
551 	if (symbol_conf.use_callchain)
552 		callchain_size = sizeof(struct callchain_root);
553 
554 	he = ops->new(callchain_size);
555 	if (he) {
556 		err = hist_entry__init(he, template, sample_self, callchain_size);
557 		if (err) {
558 			ops->free(he);
559 			he = NULL;
560 		}
561 	}
562 
563 	return he;
564 }
565 
566 static u8 symbol__parent_filter(const struct symbol *parent)
567 {
568 	if (symbol_conf.exclude_other && parent == NULL)
569 		return 1 << HIST_FILTER__PARENT;
570 	return 0;
571 }
572 
573 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
574 {
575 	if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
576 		return;
577 
578 	he->hists->callchain_period += period;
579 	if (!he->filtered)
580 		he->hists->callchain_non_filtered_period += period;
581 }
582 
583 static struct hist_entry *hists__findnew_entry(struct hists *hists,
584 					       struct hist_entry *entry,
585 					       struct addr_location *al,
586 					       bool sample_self)
587 {
588 	struct rb_node **p;
589 	struct rb_node *parent = NULL;
590 	struct hist_entry *he;
591 	int64_t cmp;
592 	u64 period = entry->stat.period;
593 	bool leftmost = true;
594 
595 	p = &hists->entries_in->rb_root.rb_node;
596 
597 	while (*p != NULL) {
598 		parent = *p;
599 		he = rb_entry(parent, struct hist_entry, rb_node_in);
600 
601 		/*
602 		 * Make sure that it receives arguments in a same order as
603 		 * hist_entry__collapse() so that we can use an appropriate
604 		 * function when searching an entry regardless which sort
605 		 * keys were used.
606 		 */
607 		cmp = hist_entry__cmp(he, entry);
608 
609 		if (!cmp) {
610 			if (sample_self) {
611 				he_stat__add_period(&he->stat, period);
612 				hist_entry__add_callchain_period(he, period);
613 			}
614 			if (symbol_conf.cumulate_callchain)
615 				he_stat__add_period(he->stat_acc, period);
616 
617 			/*
618 			 * This mem info was allocated from sample__resolve_mem
619 			 * and will not be used anymore.
620 			 */
621 			mem_info__zput(entry->mem_info);
622 
623 			block_info__zput(entry->block_info);
624 
625 			/* If the map of an existing hist_entry has
626 			 * become out-of-date due to an exec() or
627 			 * similar, update it.  Otherwise we will
628 			 * mis-adjust symbol addresses when computing
629 			 * the history counter to increment.
630 			 */
631 			if (he->ms.map != entry->ms.map) {
632 				map__put(he->ms.map);
633 				he->ms.map = map__get(entry->ms.map);
634 			}
635 			goto out;
636 		}
637 
638 		if (cmp < 0)
639 			p = &(*p)->rb_left;
640 		else {
641 			p = &(*p)->rb_right;
642 			leftmost = false;
643 		}
644 	}
645 
646 	he = hist_entry__new(entry, sample_self);
647 	if (!he)
648 		return NULL;
649 
650 	if (sample_self)
651 		hist_entry__add_callchain_period(he, period);
652 	hists->nr_entries++;
653 
654 	rb_link_node(&he->rb_node_in, parent, p);
655 	rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
656 out:
657 	if (sample_self)
658 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
659 	if (symbol_conf.cumulate_callchain)
660 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
661 	return he;
662 }
663 
664 static unsigned random_max(unsigned high)
665 {
666 	unsigned thresh = -high % high;
667 	for (;;) {
668 		unsigned r = random();
669 		if (r >= thresh)
670 			return r % high;
671 	}
672 }
673 
674 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
675 {
676 	struct res_sample *r;
677 	int j;
678 
679 	if (he->num_res < symbol_conf.res_sample) {
680 		j = he->num_res++;
681 	} else {
682 		j = random_max(symbol_conf.res_sample);
683 	}
684 	r = &he->res_samples[j];
685 	r->time = sample->time;
686 	r->cpu = sample->cpu;
687 	r->tid = sample->tid;
688 }
689 
690 static struct hist_entry*
691 __hists__add_entry(struct hists *hists,
692 		   struct addr_location *al,
693 		   struct symbol *sym_parent,
694 		   struct branch_info *bi,
695 		   struct mem_info *mi,
696 		   struct block_info *block_info,
697 		   struct perf_sample *sample,
698 		   bool sample_self,
699 		   struct hist_entry_ops *ops)
700 {
701 	struct namespaces *ns = thread__namespaces(al->thread);
702 	struct hist_entry entry = {
703 		.thread	= al->thread,
704 		.comm = thread__comm(al->thread),
705 		.cgroup_id = {
706 			.dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
707 			.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
708 		},
709 		.cgroup = sample->cgroup,
710 		.ms = {
711 			.maps	= al->maps,
712 			.map	= al->map,
713 			.sym	= al->sym,
714 		},
715 		.srcline = (char *) al->srcline,
716 		.socket	 = al->socket,
717 		.cpu	 = al->cpu,
718 		.cpumode = al->cpumode,
719 		.ip	 = al->addr,
720 		.level	 = al->level,
721 		.code_page_size = sample->code_page_size,
722 		.stat = {
723 			.nr_events = 1,
724 			.period	= sample->period,
725 		},
726 		.parent = sym_parent,
727 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
728 		.hists	= hists,
729 		.branch_info = bi,
730 		.mem_info = mi,
731 		.block_info = block_info,
732 		.transaction = sample->transaction,
733 		.raw_data = sample->raw_data,
734 		.raw_size = sample->raw_size,
735 		.ops = ops,
736 		.time = hist_time(sample->time),
737 		.weight = sample->weight,
738 		.ins_lat = sample->ins_lat,
739 		.p_stage_cyc = sample->p_stage_cyc,
740 	}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
741 
742 	if (!hists->has_callchains && he && he->callchain_size != 0)
743 		hists->has_callchains = true;
744 	if (he && symbol_conf.res_sample)
745 		hists__res_sample(he, sample);
746 	return he;
747 }
748 
749 struct hist_entry *hists__add_entry(struct hists *hists,
750 				    struct addr_location *al,
751 				    struct symbol *sym_parent,
752 				    struct branch_info *bi,
753 				    struct mem_info *mi,
754 				    struct perf_sample *sample,
755 				    bool sample_self)
756 {
757 	return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
758 				  sample, sample_self, NULL);
759 }
760 
761 struct hist_entry *hists__add_entry_ops(struct hists *hists,
762 					struct hist_entry_ops *ops,
763 					struct addr_location *al,
764 					struct symbol *sym_parent,
765 					struct branch_info *bi,
766 					struct mem_info *mi,
767 					struct perf_sample *sample,
768 					bool sample_self)
769 {
770 	return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
771 				  sample, sample_self, ops);
772 }
773 
774 struct hist_entry *hists__add_entry_block(struct hists *hists,
775 					  struct addr_location *al,
776 					  struct block_info *block_info)
777 {
778 	struct hist_entry entry = {
779 		.block_info = block_info,
780 		.hists = hists,
781 		.ms = {
782 			.maps = al->maps,
783 			.map = al->map,
784 			.sym = al->sym,
785 		},
786 	}, *he = hists__findnew_entry(hists, &entry, al, false);
787 
788 	return he;
789 }
790 
791 static int
792 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
793 		    struct addr_location *al __maybe_unused)
794 {
795 	return 0;
796 }
797 
798 static int
799 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
800 			struct addr_location *al __maybe_unused)
801 {
802 	return 0;
803 }
804 
805 static int
806 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
807 {
808 	struct perf_sample *sample = iter->sample;
809 	struct mem_info *mi;
810 
811 	mi = sample__resolve_mem(sample, al);
812 	if (mi == NULL)
813 		return -ENOMEM;
814 
815 	iter->priv = mi;
816 	return 0;
817 }
818 
819 static int
820 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
821 {
822 	u64 cost;
823 	struct mem_info *mi = iter->priv;
824 	struct hists *hists = evsel__hists(iter->evsel);
825 	struct perf_sample *sample = iter->sample;
826 	struct hist_entry *he;
827 
828 	if (mi == NULL)
829 		return -EINVAL;
830 
831 	cost = sample->weight;
832 	if (!cost)
833 		cost = 1;
834 
835 	/*
836 	 * must pass period=weight in order to get the correct
837 	 * sorting from hists__collapse_resort() which is solely
838 	 * based on periods. We want sorting be done on nr_events * weight
839 	 * and this is indirectly achieved by passing period=weight here
840 	 * and the he_stat__add_period() function.
841 	 */
842 	sample->period = cost;
843 
844 	he = hists__add_entry(hists, al, iter->parent, NULL, mi,
845 			      sample, true);
846 	if (!he)
847 		return -ENOMEM;
848 
849 	iter->he = he;
850 	return 0;
851 }
852 
853 static int
854 iter_finish_mem_entry(struct hist_entry_iter *iter,
855 		      struct addr_location *al __maybe_unused)
856 {
857 	struct evsel *evsel = iter->evsel;
858 	struct hists *hists = evsel__hists(evsel);
859 	struct hist_entry *he = iter->he;
860 	int err = -EINVAL;
861 
862 	if (he == NULL)
863 		goto out;
864 
865 	hists__inc_nr_samples(hists, he->filtered);
866 
867 	err = hist_entry__append_callchain(he, iter->sample);
868 
869 out:
870 	/*
871 	 * We don't need to free iter->priv (mem_info) here since the mem info
872 	 * was either already freed in hists__findnew_entry() or passed to a
873 	 * new hist entry by hist_entry__new().
874 	 */
875 	iter->priv = NULL;
876 
877 	iter->he = NULL;
878 	return err;
879 }
880 
881 static int
882 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
883 {
884 	struct branch_info *bi;
885 	struct perf_sample *sample = iter->sample;
886 
887 	bi = sample__resolve_bstack(sample, al);
888 	if (!bi)
889 		return -ENOMEM;
890 
891 	iter->curr = 0;
892 	iter->total = sample->branch_stack->nr;
893 
894 	iter->priv = bi;
895 	return 0;
896 }
897 
898 static int
899 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
900 			     struct addr_location *al __maybe_unused)
901 {
902 	return 0;
903 }
904 
905 static int
906 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
907 {
908 	struct branch_info *bi = iter->priv;
909 	int i = iter->curr;
910 
911 	if (bi == NULL)
912 		return 0;
913 
914 	if (iter->curr >= iter->total)
915 		return 0;
916 
917 	al->maps = bi[i].to.ms.maps;
918 	al->map = bi[i].to.ms.map;
919 	al->sym = bi[i].to.ms.sym;
920 	al->addr = bi[i].to.addr;
921 	return 1;
922 }
923 
924 static int
925 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
926 {
927 	struct branch_info *bi;
928 	struct evsel *evsel = iter->evsel;
929 	struct hists *hists = evsel__hists(evsel);
930 	struct perf_sample *sample = iter->sample;
931 	struct hist_entry *he = NULL;
932 	int i = iter->curr;
933 	int err = 0;
934 
935 	bi = iter->priv;
936 
937 	if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
938 		goto out;
939 
940 	/*
941 	 * The report shows the percentage of total branches captured
942 	 * and not events sampled. Thus we use a pseudo period of 1.
943 	 */
944 	sample->period = 1;
945 	sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
946 
947 	he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
948 			      sample, true);
949 	if (he == NULL)
950 		return -ENOMEM;
951 
952 	hists__inc_nr_samples(hists, he->filtered);
953 
954 out:
955 	iter->he = he;
956 	iter->curr++;
957 	return err;
958 }
959 
960 static int
961 iter_finish_branch_entry(struct hist_entry_iter *iter,
962 			 struct addr_location *al __maybe_unused)
963 {
964 	zfree(&iter->priv);
965 	iter->he = NULL;
966 
967 	return iter->curr >= iter->total ? 0 : -1;
968 }
969 
970 static int
971 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
972 			  struct addr_location *al __maybe_unused)
973 {
974 	return 0;
975 }
976 
977 static int
978 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
979 {
980 	struct evsel *evsel = iter->evsel;
981 	struct perf_sample *sample = iter->sample;
982 	struct hist_entry *he;
983 
984 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
985 			      sample, true);
986 	if (he == NULL)
987 		return -ENOMEM;
988 
989 	iter->he = he;
990 	return 0;
991 }
992 
993 static int
994 iter_finish_normal_entry(struct hist_entry_iter *iter,
995 			 struct addr_location *al __maybe_unused)
996 {
997 	struct hist_entry *he = iter->he;
998 	struct evsel *evsel = iter->evsel;
999 	struct perf_sample *sample = iter->sample;
1000 
1001 	if (he == NULL)
1002 		return 0;
1003 
1004 	iter->he = NULL;
1005 
1006 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1007 
1008 	return hist_entry__append_callchain(he, sample);
1009 }
1010 
1011 static int
1012 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1013 			      struct addr_location *al __maybe_unused)
1014 {
1015 	struct hist_entry **he_cache;
1016 
1017 	callchain_cursor_commit(&callchain_cursor);
1018 
1019 	/*
1020 	 * This is for detecting cycles or recursions so that they're
1021 	 * cumulated only one time to prevent entries more than 100%
1022 	 * overhead.
1023 	 */
1024 	he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
1025 	if (he_cache == NULL)
1026 		return -ENOMEM;
1027 
1028 	iter->priv = he_cache;
1029 	iter->curr = 0;
1030 
1031 	return 0;
1032 }
1033 
1034 static int
1035 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1036 				 struct addr_location *al)
1037 {
1038 	struct evsel *evsel = iter->evsel;
1039 	struct hists *hists = evsel__hists(evsel);
1040 	struct perf_sample *sample = iter->sample;
1041 	struct hist_entry **he_cache = iter->priv;
1042 	struct hist_entry *he;
1043 	int err = 0;
1044 
1045 	he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1046 			      sample, true);
1047 	if (he == NULL)
1048 		return -ENOMEM;
1049 
1050 	iter->he = he;
1051 	he_cache[iter->curr++] = he;
1052 
1053 	hist_entry__append_callchain(he, sample);
1054 
1055 	/*
1056 	 * We need to re-initialize the cursor since callchain_append()
1057 	 * advanced the cursor to the end.
1058 	 */
1059 	callchain_cursor_commit(&callchain_cursor);
1060 
1061 	hists__inc_nr_samples(hists, he->filtered);
1062 
1063 	return err;
1064 }
1065 
1066 static int
1067 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1068 			   struct addr_location *al)
1069 {
1070 	struct callchain_cursor_node *node;
1071 
1072 	node = callchain_cursor_current(&callchain_cursor);
1073 	if (node == NULL)
1074 		return 0;
1075 
1076 	return fill_callchain_info(al, node, iter->hide_unresolved);
1077 }
1078 
1079 static bool
1080 hist_entry__fast__sym_diff(struct hist_entry *left,
1081 			   struct hist_entry *right)
1082 {
1083 	struct symbol *sym_l = left->ms.sym;
1084 	struct symbol *sym_r = right->ms.sym;
1085 
1086 	if (!sym_l && !sym_r)
1087 		return left->ip != right->ip;
1088 
1089 	return !!_sort__sym_cmp(sym_l, sym_r);
1090 }
1091 
1092 
1093 static int
1094 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1095 			       struct addr_location *al)
1096 {
1097 	struct evsel *evsel = iter->evsel;
1098 	struct perf_sample *sample = iter->sample;
1099 	struct hist_entry **he_cache = iter->priv;
1100 	struct hist_entry *he;
1101 	struct hist_entry he_tmp = {
1102 		.hists = evsel__hists(evsel),
1103 		.cpu = al->cpu,
1104 		.thread = al->thread,
1105 		.comm = thread__comm(al->thread),
1106 		.ip = al->addr,
1107 		.ms = {
1108 			.maps = al->maps,
1109 			.map = al->map,
1110 			.sym = al->sym,
1111 		},
1112 		.srcline = (char *) al->srcline,
1113 		.parent = iter->parent,
1114 		.raw_data = sample->raw_data,
1115 		.raw_size = sample->raw_size,
1116 	};
1117 	int i;
1118 	struct callchain_cursor cursor;
1119 	bool fast = hists__has(he_tmp.hists, sym);
1120 
1121 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
1122 
1123 	callchain_cursor_advance(&callchain_cursor);
1124 
1125 	/*
1126 	 * Check if there's duplicate entries in the callchain.
1127 	 * It's possible that it has cycles or recursive calls.
1128 	 */
1129 	for (i = 0; i < iter->curr; i++) {
1130 		/*
1131 		 * For most cases, there are no duplicate entries in callchain.
1132 		 * The symbols are usually different. Do a quick check for
1133 		 * symbols first.
1134 		 */
1135 		if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1136 			continue;
1137 
1138 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1139 			/* to avoid calling callback function */
1140 			iter->he = NULL;
1141 			return 0;
1142 		}
1143 	}
1144 
1145 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1146 			      sample, false);
1147 	if (he == NULL)
1148 		return -ENOMEM;
1149 
1150 	iter->he = he;
1151 	he_cache[iter->curr++] = he;
1152 
1153 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1154 		callchain_append(he->callchain, &cursor, sample->period);
1155 	return 0;
1156 }
1157 
1158 static int
1159 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1160 			     struct addr_location *al __maybe_unused)
1161 {
1162 	zfree(&iter->priv);
1163 	iter->he = NULL;
1164 
1165 	return 0;
1166 }
1167 
1168 const struct hist_iter_ops hist_iter_mem = {
1169 	.prepare_entry 		= iter_prepare_mem_entry,
1170 	.add_single_entry 	= iter_add_single_mem_entry,
1171 	.next_entry 		= iter_next_nop_entry,
1172 	.add_next_entry 	= iter_add_next_nop_entry,
1173 	.finish_entry 		= iter_finish_mem_entry,
1174 };
1175 
1176 const struct hist_iter_ops hist_iter_branch = {
1177 	.prepare_entry 		= iter_prepare_branch_entry,
1178 	.add_single_entry 	= iter_add_single_branch_entry,
1179 	.next_entry 		= iter_next_branch_entry,
1180 	.add_next_entry 	= iter_add_next_branch_entry,
1181 	.finish_entry 		= iter_finish_branch_entry,
1182 };
1183 
1184 const struct hist_iter_ops hist_iter_normal = {
1185 	.prepare_entry 		= iter_prepare_normal_entry,
1186 	.add_single_entry 	= iter_add_single_normal_entry,
1187 	.next_entry 		= iter_next_nop_entry,
1188 	.add_next_entry 	= iter_add_next_nop_entry,
1189 	.finish_entry 		= iter_finish_normal_entry,
1190 };
1191 
1192 const struct hist_iter_ops hist_iter_cumulative = {
1193 	.prepare_entry 		= iter_prepare_cumulative_entry,
1194 	.add_single_entry 	= iter_add_single_cumulative_entry,
1195 	.next_entry 		= iter_next_cumulative_entry,
1196 	.add_next_entry 	= iter_add_next_cumulative_entry,
1197 	.finish_entry 		= iter_finish_cumulative_entry,
1198 };
1199 
1200 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1201 			 int max_stack_depth, void *arg)
1202 {
1203 	int err, err2;
1204 	struct map *alm = NULL;
1205 
1206 	if (al)
1207 		alm = map__get(al->map);
1208 
1209 	err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1210 					iter->evsel, al, max_stack_depth);
1211 	if (err) {
1212 		map__put(alm);
1213 		return err;
1214 	}
1215 
1216 	err = iter->ops->prepare_entry(iter, al);
1217 	if (err)
1218 		goto out;
1219 
1220 	err = iter->ops->add_single_entry(iter, al);
1221 	if (err)
1222 		goto out;
1223 
1224 	if (iter->he && iter->add_entry_cb) {
1225 		err = iter->add_entry_cb(iter, al, true, arg);
1226 		if (err)
1227 			goto out;
1228 	}
1229 
1230 	while (iter->ops->next_entry(iter, al)) {
1231 		err = iter->ops->add_next_entry(iter, al);
1232 		if (err)
1233 			break;
1234 
1235 		if (iter->he && iter->add_entry_cb) {
1236 			err = iter->add_entry_cb(iter, al, false, arg);
1237 			if (err)
1238 				goto out;
1239 		}
1240 	}
1241 
1242 out:
1243 	err2 = iter->ops->finish_entry(iter, al);
1244 	if (!err)
1245 		err = err2;
1246 
1247 	map__put(alm);
1248 
1249 	return err;
1250 }
1251 
1252 int64_t
1253 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1254 {
1255 	struct hists *hists = left->hists;
1256 	struct perf_hpp_fmt *fmt;
1257 	int64_t cmp = 0;
1258 
1259 	hists__for_each_sort_list(hists, fmt) {
1260 		if (perf_hpp__is_dynamic_entry(fmt) &&
1261 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1262 			continue;
1263 
1264 		cmp = fmt->cmp(fmt, left, right);
1265 		if (cmp)
1266 			break;
1267 	}
1268 
1269 	return cmp;
1270 }
1271 
1272 int64_t
1273 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1274 {
1275 	struct hists *hists = left->hists;
1276 	struct perf_hpp_fmt *fmt;
1277 	int64_t cmp = 0;
1278 
1279 	hists__for_each_sort_list(hists, fmt) {
1280 		if (perf_hpp__is_dynamic_entry(fmt) &&
1281 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1282 			continue;
1283 
1284 		cmp = fmt->collapse(fmt, left, right);
1285 		if (cmp)
1286 			break;
1287 	}
1288 
1289 	return cmp;
1290 }
1291 
1292 void hist_entry__delete(struct hist_entry *he)
1293 {
1294 	struct hist_entry_ops *ops = he->ops;
1295 
1296 	thread__zput(he->thread);
1297 	map__zput(he->ms.map);
1298 
1299 	if (he->branch_info) {
1300 		map__zput(he->branch_info->from.ms.map);
1301 		map__zput(he->branch_info->to.ms.map);
1302 		free_srcline(he->branch_info->srcline_from);
1303 		free_srcline(he->branch_info->srcline_to);
1304 		zfree(&he->branch_info);
1305 	}
1306 
1307 	if (he->mem_info) {
1308 		map__zput(he->mem_info->iaddr.ms.map);
1309 		map__zput(he->mem_info->daddr.ms.map);
1310 		mem_info__zput(he->mem_info);
1311 	}
1312 
1313 	if (he->block_info)
1314 		block_info__zput(he->block_info);
1315 
1316 	zfree(&he->res_samples);
1317 	zfree(&he->stat_acc);
1318 	free_srcline(he->srcline);
1319 	if (he->srcfile && he->srcfile[0])
1320 		zfree(&he->srcfile);
1321 	free_callchain(he->callchain);
1322 	zfree(&he->trace_output);
1323 	zfree(&he->raw_data);
1324 	ops->free(he);
1325 }
1326 
1327 /*
1328  * If this is not the last column, then we need to pad it according to the
1329  * pre-calculated max length for this column, otherwise don't bother adding
1330  * spaces because that would break viewing this with, for instance, 'less',
1331  * that would show tons of trailing spaces when a long C++ demangled method
1332  * names is sampled.
1333 */
1334 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1335 				   struct perf_hpp_fmt *fmt, int printed)
1336 {
1337 	if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1338 		const int width = fmt->width(fmt, hpp, he->hists);
1339 		if (printed < width) {
1340 			advance_hpp(hpp, printed);
1341 			printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1342 		}
1343 	}
1344 
1345 	return printed;
1346 }
1347 
1348 /*
1349  * collapse the histogram
1350  */
1351 
1352 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1353 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1354 				       enum hist_filter type);
1355 
1356 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1357 
1358 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1359 {
1360 	return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1361 }
1362 
1363 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1364 						enum hist_filter type,
1365 						fmt_chk_fn check)
1366 {
1367 	struct perf_hpp_fmt *fmt;
1368 	bool type_match = false;
1369 	struct hist_entry *parent = he->parent_he;
1370 
1371 	switch (type) {
1372 	case HIST_FILTER__THREAD:
1373 		if (symbol_conf.comm_list == NULL &&
1374 		    symbol_conf.pid_list == NULL &&
1375 		    symbol_conf.tid_list == NULL)
1376 			return;
1377 		break;
1378 	case HIST_FILTER__DSO:
1379 		if (symbol_conf.dso_list == NULL)
1380 			return;
1381 		break;
1382 	case HIST_FILTER__SYMBOL:
1383 		if (symbol_conf.sym_list == NULL)
1384 			return;
1385 		break;
1386 	case HIST_FILTER__PARENT:
1387 	case HIST_FILTER__GUEST:
1388 	case HIST_FILTER__HOST:
1389 	case HIST_FILTER__SOCKET:
1390 	case HIST_FILTER__C2C:
1391 	default:
1392 		return;
1393 	}
1394 
1395 	/* if it's filtered by own fmt, it has to have filter bits */
1396 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1397 		if (check(fmt)) {
1398 			type_match = true;
1399 			break;
1400 		}
1401 	}
1402 
1403 	if (type_match) {
1404 		/*
1405 		 * If the filter is for current level entry, propagate
1406 		 * filter marker to parents.  The marker bit was
1407 		 * already set by default so it only needs to clear
1408 		 * non-filtered entries.
1409 		 */
1410 		if (!(he->filtered & (1 << type))) {
1411 			while (parent) {
1412 				parent->filtered &= ~(1 << type);
1413 				parent = parent->parent_he;
1414 			}
1415 		}
1416 	} else {
1417 		/*
1418 		 * If current entry doesn't have matching formats, set
1419 		 * filter marker for upper level entries.  it will be
1420 		 * cleared if its lower level entries is not filtered.
1421 		 *
1422 		 * For lower-level entries, it inherits parent's
1423 		 * filter bit so that lower level entries of a
1424 		 * non-filtered entry won't set the filter marker.
1425 		 */
1426 		if (parent == NULL)
1427 			he->filtered |= (1 << type);
1428 		else
1429 			he->filtered |= (parent->filtered & (1 << type));
1430 	}
1431 }
1432 
1433 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1434 {
1435 	hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1436 					    check_thread_entry);
1437 
1438 	hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1439 					    perf_hpp__is_dso_entry);
1440 
1441 	hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1442 					    perf_hpp__is_sym_entry);
1443 
1444 	hists__apply_filters(he->hists, he);
1445 }
1446 
1447 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1448 						 struct rb_root_cached *root,
1449 						 struct hist_entry *he,
1450 						 struct hist_entry *parent_he,
1451 						 struct perf_hpp_list *hpp_list)
1452 {
1453 	struct rb_node **p = &root->rb_root.rb_node;
1454 	struct rb_node *parent = NULL;
1455 	struct hist_entry *iter, *new;
1456 	struct perf_hpp_fmt *fmt;
1457 	int64_t cmp;
1458 	bool leftmost = true;
1459 
1460 	while (*p != NULL) {
1461 		parent = *p;
1462 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1463 
1464 		cmp = 0;
1465 		perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1466 			cmp = fmt->collapse(fmt, iter, he);
1467 			if (cmp)
1468 				break;
1469 		}
1470 
1471 		if (!cmp) {
1472 			he_stat__add_stat(&iter->stat, &he->stat);
1473 			return iter;
1474 		}
1475 
1476 		if (cmp < 0)
1477 			p = &parent->rb_left;
1478 		else {
1479 			p = &parent->rb_right;
1480 			leftmost = false;
1481 		}
1482 	}
1483 
1484 	new = hist_entry__new(he, true);
1485 	if (new == NULL)
1486 		return NULL;
1487 
1488 	hists->nr_entries++;
1489 
1490 	/* save related format list for output */
1491 	new->hpp_list = hpp_list;
1492 	new->parent_he = parent_he;
1493 
1494 	hist_entry__apply_hierarchy_filters(new);
1495 
1496 	/* some fields are now passed to 'new' */
1497 	perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1498 		if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1499 			he->trace_output = NULL;
1500 		else
1501 			new->trace_output = NULL;
1502 
1503 		if (perf_hpp__is_srcline_entry(fmt))
1504 			he->srcline = NULL;
1505 		else
1506 			new->srcline = NULL;
1507 
1508 		if (perf_hpp__is_srcfile_entry(fmt))
1509 			he->srcfile = NULL;
1510 		else
1511 			new->srcfile = NULL;
1512 	}
1513 
1514 	rb_link_node(&new->rb_node_in, parent, p);
1515 	rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1516 	return new;
1517 }
1518 
1519 static int hists__hierarchy_insert_entry(struct hists *hists,
1520 					 struct rb_root_cached *root,
1521 					 struct hist_entry *he)
1522 {
1523 	struct perf_hpp_list_node *node;
1524 	struct hist_entry *new_he = NULL;
1525 	struct hist_entry *parent = NULL;
1526 	int depth = 0;
1527 	int ret = 0;
1528 
1529 	list_for_each_entry(node, &hists->hpp_formats, list) {
1530 		/* skip period (overhead) and elided columns */
1531 		if (node->level == 0 || node->skip)
1532 			continue;
1533 
1534 		/* insert copy of 'he' for each fmt into the hierarchy */
1535 		new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1536 		if (new_he == NULL) {
1537 			ret = -1;
1538 			break;
1539 		}
1540 
1541 		root = &new_he->hroot_in;
1542 		new_he->depth = depth++;
1543 		parent = new_he;
1544 	}
1545 
1546 	if (new_he) {
1547 		new_he->leaf = true;
1548 
1549 		if (hist_entry__has_callchains(new_he) &&
1550 		    symbol_conf.use_callchain) {
1551 			callchain_cursor_reset(&callchain_cursor);
1552 			if (callchain_merge(&callchain_cursor,
1553 					    new_he->callchain,
1554 					    he->callchain) < 0)
1555 				ret = -1;
1556 		}
1557 	}
1558 
1559 	/* 'he' is no longer used */
1560 	hist_entry__delete(he);
1561 
1562 	/* return 0 (or -1) since it already applied filters */
1563 	return ret;
1564 }
1565 
1566 static int hists__collapse_insert_entry(struct hists *hists,
1567 					struct rb_root_cached *root,
1568 					struct hist_entry *he)
1569 {
1570 	struct rb_node **p = &root->rb_root.rb_node;
1571 	struct rb_node *parent = NULL;
1572 	struct hist_entry *iter;
1573 	int64_t cmp;
1574 	bool leftmost = true;
1575 
1576 	if (symbol_conf.report_hierarchy)
1577 		return hists__hierarchy_insert_entry(hists, root, he);
1578 
1579 	while (*p != NULL) {
1580 		parent = *p;
1581 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1582 
1583 		cmp = hist_entry__collapse(iter, he);
1584 
1585 		if (!cmp) {
1586 			int ret = 0;
1587 
1588 			he_stat__add_stat(&iter->stat, &he->stat);
1589 			if (symbol_conf.cumulate_callchain)
1590 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
1591 
1592 			if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1593 				callchain_cursor_reset(&callchain_cursor);
1594 				if (callchain_merge(&callchain_cursor,
1595 						    iter->callchain,
1596 						    he->callchain) < 0)
1597 					ret = -1;
1598 			}
1599 			hist_entry__delete(he);
1600 			return ret;
1601 		}
1602 
1603 		if (cmp < 0)
1604 			p = &(*p)->rb_left;
1605 		else {
1606 			p = &(*p)->rb_right;
1607 			leftmost = false;
1608 		}
1609 	}
1610 	hists->nr_entries++;
1611 
1612 	rb_link_node(&he->rb_node_in, parent, p);
1613 	rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1614 	return 1;
1615 }
1616 
1617 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1618 {
1619 	struct rb_root_cached *root;
1620 
1621 	pthread_mutex_lock(&hists->lock);
1622 
1623 	root = hists->entries_in;
1624 	if (++hists->entries_in > &hists->entries_in_array[1])
1625 		hists->entries_in = &hists->entries_in_array[0];
1626 
1627 	pthread_mutex_unlock(&hists->lock);
1628 
1629 	return root;
1630 }
1631 
1632 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1633 {
1634 	hists__filter_entry_by_dso(hists, he);
1635 	hists__filter_entry_by_thread(hists, he);
1636 	hists__filter_entry_by_symbol(hists, he);
1637 	hists__filter_entry_by_socket(hists, he);
1638 }
1639 
1640 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1641 {
1642 	struct rb_root_cached *root;
1643 	struct rb_node *next;
1644 	struct hist_entry *n;
1645 	int ret;
1646 
1647 	if (!hists__has(hists, need_collapse))
1648 		return 0;
1649 
1650 	hists->nr_entries = 0;
1651 
1652 	root = hists__get_rotate_entries_in(hists);
1653 
1654 	next = rb_first_cached(root);
1655 
1656 	while (next) {
1657 		if (session_done())
1658 			break;
1659 		n = rb_entry(next, struct hist_entry, rb_node_in);
1660 		next = rb_next(&n->rb_node_in);
1661 
1662 		rb_erase_cached(&n->rb_node_in, root);
1663 		ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1664 		if (ret < 0)
1665 			return -1;
1666 
1667 		if (ret) {
1668 			/*
1669 			 * If it wasn't combined with one of the entries already
1670 			 * collapsed, we need to apply the filters that may have
1671 			 * been set by, say, the hist_browser.
1672 			 */
1673 			hists__apply_filters(hists, n);
1674 		}
1675 		if (prog)
1676 			ui_progress__update(prog, 1);
1677 	}
1678 	return 0;
1679 }
1680 
1681 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1682 {
1683 	struct hists *hists = a->hists;
1684 	struct perf_hpp_fmt *fmt;
1685 	int64_t cmp = 0;
1686 
1687 	hists__for_each_sort_list(hists, fmt) {
1688 		if (perf_hpp__should_skip(fmt, a->hists))
1689 			continue;
1690 
1691 		cmp = fmt->sort(fmt, a, b);
1692 		if (cmp)
1693 			break;
1694 	}
1695 
1696 	return cmp;
1697 }
1698 
1699 static void hists__reset_filter_stats(struct hists *hists)
1700 {
1701 	hists->nr_non_filtered_entries = 0;
1702 	hists->stats.total_non_filtered_period = 0;
1703 }
1704 
1705 void hists__reset_stats(struct hists *hists)
1706 {
1707 	hists->nr_entries = 0;
1708 	hists->stats.total_period = 0;
1709 
1710 	hists__reset_filter_stats(hists);
1711 }
1712 
1713 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1714 {
1715 	hists->nr_non_filtered_entries++;
1716 	hists->stats.total_non_filtered_period += h->stat.period;
1717 }
1718 
1719 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1720 {
1721 	if (!h->filtered)
1722 		hists__inc_filter_stats(hists, h);
1723 
1724 	hists->nr_entries++;
1725 	hists->stats.total_period += h->stat.period;
1726 }
1727 
1728 static void hierarchy_recalc_total_periods(struct hists *hists)
1729 {
1730 	struct rb_node *node;
1731 	struct hist_entry *he;
1732 
1733 	node = rb_first_cached(&hists->entries);
1734 
1735 	hists->stats.total_period = 0;
1736 	hists->stats.total_non_filtered_period = 0;
1737 
1738 	/*
1739 	 * recalculate total period using top-level entries only
1740 	 * since lower level entries only see non-filtered entries
1741 	 * but upper level entries have sum of both entries.
1742 	 */
1743 	while (node) {
1744 		he = rb_entry(node, struct hist_entry, rb_node);
1745 		node = rb_next(node);
1746 
1747 		hists->stats.total_period += he->stat.period;
1748 		if (!he->filtered)
1749 			hists->stats.total_non_filtered_period += he->stat.period;
1750 	}
1751 }
1752 
1753 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1754 					  struct hist_entry *he)
1755 {
1756 	struct rb_node **p = &root->rb_root.rb_node;
1757 	struct rb_node *parent = NULL;
1758 	struct hist_entry *iter;
1759 	struct perf_hpp_fmt *fmt;
1760 	bool leftmost = true;
1761 
1762 	while (*p != NULL) {
1763 		parent = *p;
1764 		iter = rb_entry(parent, struct hist_entry, rb_node);
1765 
1766 		if (hist_entry__sort(he, iter) > 0)
1767 			p = &parent->rb_left;
1768 		else {
1769 			p = &parent->rb_right;
1770 			leftmost = false;
1771 		}
1772 	}
1773 
1774 	rb_link_node(&he->rb_node, parent, p);
1775 	rb_insert_color_cached(&he->rb_node, root, leftmost);
1776 
1777 	/* update column width of dynamic entry */
1778 	perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1779 		if (perf_hpp__is_dynamic_entry(fmt))
1780 			fmt->sort(fmt, he, NULL);
1781 	}
1782 }
1783 
1784 static void hists__hierarchy_output_resort(struct hists *hists,
1785 					   struct ui_progress *prog,
1786 					   struct rb_root_cached *root_in,
1787 					   struct rb_root_cached *root_out,
1788 					   u64 min_callchain_hits,
1789 					   bool use_callchain)
1790 {
1791 	struct rb_node *node;
1792 	struct hist_entry *he;
1793 
1794 	*root_out = RB_ROOT_CACHED;
1795 	node = rb_first_cached(root_in);
1796 
1797 	while (node) {
1798 		he = rb_entry(node, struct hist_entry, rb_node_in);
1799 		node = rb_next(node);
1800 
1801 		hierarchy_insert_output_entry(root_out, he);
1802 
1803 		if (prog)
1804 			ui_progress__update(prog, 1);
1805 
1806 		hists->nr_entries++;
1807 		if (!he->filtered) {
1808 			hists->nr_non_filtered_entries++;
1809 			hists__calc_col_len(hists, he);
1810 		}
1811 
1812 		if (!he->leaf) {
1813 			hists__hierarchy_output_resort(hists, prog,
1814 						       &he->hroot_in,
1815 						       &he->hroot_out,
1816 						       min_callchain_hits,
1817 						       use_callchain);
1818 			continue;
1819 		}
1820 
1821 		if (!use_callchain)
1822 			continue;
1823 
1824 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1825 			u64 total = he->stat.period;
1826 
1827 			if (symbol_conf.cumulate_callchain)
1828 				total = he->stat_acc->period;
1829 
1830 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1831 		}
1832 
1833 		callchain_param.sort(&he->sorted_chain, he->callchain,
1834 				     min_callchain_hits, &callchain_param);
1835 	}
1836 }
1837 
1838 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1839 					 struct hist_entry *he,
1840 					 u64 min_callchain_hits,
1841 					 bool use_callchain)
1842 {
1843 	struct rb_node **p = &entries->rb_root.rb_node;
1844 	struct rb_node *parent = NULL;
1845 	struct hist_entry *iter;
1846 	struct perf_hpp_fmt *fmt;
1847 	bool leftmost = true;
1848 
1849 	if (use_callchain) {
1850 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1851 			u64 total = he->stat.period;
1852 
1853 			if (symbol_conf.cumulate_callchain)
1854 				total = he->stat_acc->period;
1855 
1856 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1857 		}
1858 		callchain_param.sort(&he->sorted_chain, he->callchain,
1859 				      min_callchain_hits, &callchain_param);
1860 	}
1861 
1862 	while (*p != NULL) {
1863 		parent = *p;
1864 		iter = rb_entry(parent, struct hist_entry, rb_node);
1865 
1866 		if (hist_entry__sort(he, iter) > 0)
1867 			p = &(*p)->rb_left;
1868 		else {
1869 			p = &(*p)->rb_right;
1870 			leftmost = false;
1871 		}
1872 	}
1873 
1874 	rb_link_node(&he->rb_node, parent, p);
1875 	rb_insert_color_cached(&he->rb_node, entries, leftmost);
1876 
1877 	perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1878 		if (perf_hpp__is_dynamic_entry(fmt) &&
1879 		    perf_hpp__defined_dynamic_entry(fmt, he->hists))
1880 			fmt->sort(fmt, he, NULL);  /* update column width */
1881 	}
1882 }
1883 
1884 static void output_resort(struct hists *hists, struct ui_progress *prog,
1885 			  bool use_callchain, hists__resort_cb_t cb,
1886 			  void *cb_arg)
1887 {
1888 	struct rb_root_cached *root;
1889 	struct rb_node *next;
1890 	struct hist_entry *n;
1891 	u64 callchain_total;
1892 	u64 min_callchain_hits;
1893 
1894 	callchain_total = hists->callchain_period;
1895 	if (symbol_conf.filter_relative)
1896 		callchain_total = hists->callchain_non_filtered_period;
1897 
1898 	min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1899 
1900 	hists__reset_stats(hists);
1901 	hists__reset_col_len(hists);
1902 
1903 	if (symbol_conf.report_hierarchy) {
1904 		hists__hierarchy_output_resort(hists, prog,
1905 					       &hists->entries_collapsed,
1906 					       &hists->entries,
1907 					       min_callchain_hits,
1908 					       use_callchain);
1909 		hierarchy_recalc_total_periods(hists);
1910 		return;
1911 	}
1912 
1913 	if (hists__has(hists, need_collapse))
1914 		root = &hists->entries_collapsed;
1915 	else
1916 		root = hists->entries_in;
1917 
1918 	next = rb_first_cached(root);
1919 	hists->entries = RB_ROOT_CACHED;
1920 
1921 	while (next) {
1922 		n = rb_entry(next, struct hist_entry, rb_node_in);
1923 		next = rb_next(&n->rb_node_in);
1924 
1925 		if (cb && cb(n, cb_arg))
1926 			continue;
1927 
1928 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1929 		hists__inc_stats(hists, n);
1930 
1931 		if (!n->filtered)
1932 			hists__calc_col_len(hists, n);
1933 
1934 		if (prog)
1935 			ui_progress__update(prog, 1);
1936 	}
1937 }
1938 
1939 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1940 			     hists__resort_cb_t cb, void *cb_arg)
1941 {
1942 	bool use_callchain;
1943 
1944 	if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1945 		use_callchain = evsel__has_callchain(evsel);
1946 	else
1947 		use_callchain = symbol_conf.use_callchain;
1948 
1949 	use_callchain |= symbol_conf.show_branchflag_count;
1950 
1951 	output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1952 }
1953 
1954 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1955 {
1956 	return evsel__output_resort_cb(evsel, prog, NULL, NULL);
1957 }
1958 
1959 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1960 {
1961 	output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1962 }
1963 
1964 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1965 			     hists__resort_cb_t cb)
1966 {
1967 	output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1968 }
1969 
1970 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1971 {
1972 	if (he->leaf || hmd == HMD_FORCE_SIBLING)
1973 		return false;
1974 
1975 	if (he->unfolded || hmd == HMD_FORCE_CHILD)
1976 		return true;
1977 
1978 	return false;
1979 }
1980 
1981 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1982 {
1983 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1984 
1985 	while (can_goto_child(he, HMD_NORMAL)) {
1986 		node = rb_last(&he->hroot_out.rb_root);
1987 		he = rb_entry(node, struct hist_entry, rb_node);
1988 	}
1989 	return node;
1990 }
1991 
1992 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1993 {
1994 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1995 
1996 	if (can_goto_child(he, hmd))
1997 		node = rb_first_cached(&he->hroot_out);
1998 	else
1999 		node = rb_next(node);
2000 
2001 	while (node == NULL) {
2002 		he = he->parent_he;
2003 		if (he == NULL)
2004 			break;
2005 
2006 		node = rb_next(&he->rb_node);
2007 	}
2008 	return node;
2009 }
2010 
2011 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2012 {
2013 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2014 
2015 	node = rb_prev(node);
2016 	if (node)
2017 		return rb_hierarchy_last(node);
2018 
2019 	he = he->parent_he;
2020 	if (he == NULL)
2021 		return NULL;
2022 
2023 	return &he->rb_node;
2024 }
2025 
2026 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2027 {
2028 	struct rb_node *node;
2029 	struct hist_entry *child;
2030 	float percent;
2031 
2032 	if (he->leaf)
2033 		return false;
2034 
2035 	node = rb_first_cached(&he->hroot_out);
2036 	child = rb_entry(node, struct hist_entry, rb_node);
2037 
2038 	while (node && child->filtered) {
2039 		node = rb_next(node);
2040 		child = rb_entry(node, struct hist_entry, rb_node);
2041 	}
2042 
2043 	if (node)
2044 		percent = hist_entry__get_percent_limit(child);
2045 	else
2046 		percent = 0;
2047 
2048 	return node && percent >= limit;
2049 }
2050 
2051 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2052 				       enum hist_filter filter)
2053 {
2054 	h->filtered &= ~(1 << filter);
2055 
2056 	if (symbol_conf.report_hierarchy) {
2057 		struct hist_entry *parent = h->parent_he;
2058 
2059 		while (parent) {
2060 			he_stat__add_stat(&parent->stat, &h->stat);
2061 
2062 			parent->filtered &= ~(1 << filter);
2063 
2064 			if (parent->filtered)
2065 				goto next;
2066 
2067 			/* force fold unfiltered entry for simplicity */
2068 			parent->unfolded = false;
2069 			parent->has_no_entry = false;
2070 			parent->row_offset = 0;
2071 			parent->nr_rows = 0;
2072 next:
2073 			parent = parent->parent_he;
2074 		}
2075 	}
2076 
2077 	if (h->filtered)
2078 		return;
2079 
2080 	/* force fold unfiltered entry for simplicity */
2081 	h->unfolded = false;
2082 	h->has_no_entry = false;
2083 	h->row_offset = 0;
2084 	h->nr_rows = 0;
2085 
2086 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2087 
2088 	hists__inc_filter_stats(hists, h);
2089 	hists__calc_col_len(hists, h);
2090 }
2091 
2092 
2093 static bool hists__filter_entry_by_dso(struct hists *hists,
2094 				       struct hist_entry *he)
2095 {
2096 	if (hists->dso_filter != NULL &&
2097 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2098 		he->filtered |= (1 << HIST_FILTER__DSO);
2099 		return true;
2100 	}
2101 
2102 	return false;
2103 }
2104 
2105 static bool hists__filter_entry_by_thread(struct hists *hists,
2106 					  struct hist_entry *he)
2107 {
2108 	if (hists->thread_filter != NULL &&
2109 	    he->thread != hists->thread_filter) {
2110 		he->filtered |= (1 << HIST_FILTER__THREAD);
2111 		return true;
2112 	}
2113 
2114 	return false;
2115 }
2116 
2117 static bool hists__filter_entry_by_symbol(struct hists *hists,
2118 					  struct hist_entry *he)
2119 {
2120 	if (hists->symbol_filter_str != NULL &&
2121 	    (!he->ms.sym || strstr(he->ms.sym->name,
2122 				   hists->symbol_filter_str) == NULL)) {
2123 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
2124 		return true;
2125 	}
2126 
2127 	return false;
2128 }
2129 
2130 static bool hists__filter_entry_by_socket(struct hists *hists,
2131 					  struct hist_entry *he)
2132 {
2133 	if ((hists->socket_filter > -1) &&
2134 	    (he->socket != hists->socket_filter)) {
2135 		he->filtered |= (1 << HIST_FILTER__SOCKET);
2136 		return true;
2137 	}
2138 
2139 	return false;
2140 }
2141 
2142 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2143 
2144 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2145 {
2146 	struct rb_node *nd;
2147 
2148 	hists->stats.nr_non_filtered_samples = 0;
2149 
2150 	hists__reset_filter_stats(hists);
2151 	hists__reset_col_len(hists);
2152 
2153 	for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2154 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2155 
2156 		if (filter(hists, h))
2157 			continue;
2158 
2159 		hists__remove_entry_filter(hists, h, type);
2160 	}
2161 }
2162 
2163 static void resort_filtered_entry(struct rb_root_cached *root,
2164 				  struct hist_entry *he)
2165 {
2166 	struct rb_node **p = &root->rb_root.rb_node;
2167 	struct rb_node *parent = NULL;
2168 	struct hist_entry *iter;
2169 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2170 	struct rb_node *nd;
2171 	bool leftmost = true;
2172 
2173 	while (*p != NULL) {
2174 		parent = *p;
2175 		iter = rb_entry(parent, struct hist_entry, rb_node);
2176 
2177 		if (hist_entry__sort(he, iter) > 0)
2178 			p = &(*p)->rb_left;
2179 		else {
2180 			p = &(*p)->rb_right;
2181 			leftmost = false;
2182 		}
2183 	}
2184 
2185 	rb_link_node(&he->rb_node, parent, p);
2186 	rb_insert_color_cached(&he->rb_node, root, leftmost);
2187 
2188 	if (he->leaf || he->filtered)
2189 		return;
2190 
2191 	nd = rb_first_cached(&he->hroot_out);
2192 	while (nd) {
2193 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2194 
2195 		nd = rb_next(nd);
2196 		rb_erase_cached(&h->rb_node, &he->hroot_out);
2197 
2198 		resort_filtered_entry(&new_root, h);
2199 	}
2200 
2201 	he->hroot_out = new_root;
2202 }
2203 
2204 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2205 {
2206 	struct rb_node *nd;
2207 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2208 
2209 	hists->stats.nr_non_filtered_samples = 0;
2210 
2211 	hists__reset_filter_stats(hists);
2212 	hists__reset_col_len(hists);
2213 
2214 	nd = rb_first_cached(&hists->entries);
2215 	while (nd) {
2216 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2217 		int ret;
2218 
2219 		ret = hist_entry__filter(h, type, arg);
2220 
2221 		/*
2222 		 * case 1. non-matching type
2223 		 * zero out the period, set filter marker and move to child
2224 		 */
2225 		if (ret < 0) {
2226 			memset(&h->stat, 0, sizeof(h->stat));
2227 			h->filtered |= (1 << type);
2228 
2229 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2230 		}
2231 		/*
2232 		 * case 2. matched type (filter out)
2233 		 * set filter marker and move to next
2234 		 */
2235 		else if (ret == 1) {
2236 			h->filtered |= (1 << type);
2237 
2238 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2239 		}
2240 		/*
2241 		 * case 3. ok (not filtered)
2242 		 * add period to hists and parents, erase the filter marker
2243 		 * and move to next sibling
2244 		 */
2245 		else {
2246 			hists__remove_entry_filter(hists, h, type);
2247 
2248 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2249 		}
2250 	}
2251 
2252 	hierarchy_recalc_total_periods(hists);
2253 
2254 	/*
2255 	 * resort output after applying a new filter since filter in a lower
2256 	 * hierarchy can change periods in a upper hierarchy.
2257 	 */
2258 	nd = rb_first_cached(&hists->entries);
2259 	while (nd) {
2260 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2261 
2262 		nd = rb_next(nd);
2263 		rb_erase_cached(&h->rb_node, &hists->entries);
2264 
2265 		resort_filtered_entry(&new_root, h);
2266 	}
2267 
2268 	hists->entries = new_root;
2269 }
2270 
2271 void hists__filter_by_thread(struct hists *hists)
2272 {
2273 	if (symbol_conf.report_hierarchy)
2274 		hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2275 					hists->thread_filter);
2276 	else
2277 		hists__filter_by_type(hists, HIST_FILTER__THREAD,
2278 				      hists__filter_entry_by_thread);
2279 }
2280 
2281 void hists__filter_by_dso(struct hists *hists)
2282 {
2283 	if (symbol_conf.report_hierarchy)
2284 		hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2285 					hists->dso_filter);
2286 	else
2287 		hists__filter_by_type(hists, HIST_FILTER__DSO,
2288 				      hists__filter_entry_by_dso);
2289 }
2290 
2291 void hists__filter_by_symbol(struct hists *hists)
2292 {
2293 	if (symbol_conf.report_hierarchy)
2294 		hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2295 					hists->symbol_filter_str);
2296 	else
2297 		hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2298 				      hists__filter_entry_by_symbol);
2299 }
2300 
2301 void hists__filter_by_socket(struct hists *hists)
2302 {
2303 	if (symbol_conf.report_hierarchy)
2304 		hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2305 					&hists->socket_filter);
2306 	else
2307 		hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2308 				      hists__filter_entry_by_socket);
2309 }
2310 
2311 void events_stats__inc(struct events_stats *stats, u32 type)
2312 {
2313 	++stats->nr_events[0];
2314 	++stats->nr_events[type];
2315 }
2316 
2317 static void hists_stats__inc(struct hists_stats *stats)
2318 {
2319 	++stats->nr_samples;
2320 }
2321 
2322 void hists__inc_nr_events(struct hists *hists)
2323 {
2324 	hists_stats__inc(&hists->stats);
2325 }
2326 
2327 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2328 {
2329 	hists_stats__inc(&hists->stats);
2330 	if (!filtered)
2331 		hists->stats.nr_non_filtered_samples++;
2332 }
2333 
2334 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2335 						 struct hist_entry *pair)
2336 {
2337 	struct rb_root_cached *root;
2338 	struct rb_node **p;
2339 	struct rb_node *parent = NULL;
2340 	struct hist_entry *he;
2341 	int64_t cmp;
2342 	bool leftmost = true;
2343 
2344 	if (hists__has(hists, need_collapse))
2345 		root = &hists->entries_collapsed;
2346 	else
2347 		root = hists->entries_in;
2348 
2349 	p = &root->rb_root.rb_node;
2350 
2351 	while (*p != NULL) {
2352 		parent = *p;
2353 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2354 
2355 		cmp = hist_entry__collapse(he, pair);
2356 
2357 		if (!cmp)
2358 			goto out;
2359 
2360 		if (cmp < 0)
2361 			p = &(*p)->rb_left;
2362 		else {
2363 			p = &(*p)->rb_right;
2364 			leftmost = false;
2365 		}
2366 	}
2367 
2368 	he = hist_entry__new(pair, true);
2369 	if (he) {
2370 		memset(&he->stat, 0, sizeof(he->stat));
2371 		he->hists = hists;
2372 		if (symbol_conf.cumulate_callchain)
2373 			memset(he->stat_acc, 0, sizeof(he->stat));
2374 		rb_link_node(&he->rb_node_in, parent, p);
2375 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2376 		hists__inc_stats(hists, he);
2377 		he->dummy = true;
2378 	}
2379 out:
2380 	return he;
2381 }
2382 
2383 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2384 						    struct rb_root_cached *root,
2385 						    struct hist_entry *pair)
2386 {
2387 	struct rb_node **p;
2388 	struct rb_node *parent = NULL;
2389 	struct hist_entry *he;
2390 	struct perf_hpp_fmt *fmt;
2391 	bool leftmost = true;
2392 
2393 	p = &root->rb_root.rb_node;
2394 	while (*p != NULL) {
2395 		int64_t cmp = 0;
2396 
2397 		parent = *p;
2398 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2399 
2400 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2401 			cmp = fmt->collapse(fmt, he, pair);
2402 			if (cmp)
2403 				break;
2404 		}
2405 		if (!cmp)
2406 			goto out;
2407 
2408 		if (cmp < 0)
2409 			p = &parent->rb_left;
2410 		else {
2411 			p = &parent->rb_right;
2412 			leftmost = false;
2413 		}
2414 	}
2415 
2416 	he = hist_entry__new(pair, true);
2417 	if (he) {
2418 		rb_link_node(&he->rb_node_in, parent, p);
2419 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2420 
2421 		he->dummy = true;
2422 		he->hists = hists;
2423 		memset(&he->stat, 0, sizeof(he->stat));
2424 		hists__inc_stats(hists, he);
2425 	}
2426 out:
2427 	return he;
2428 }
2429 
2430 static struct hist_entry *hists__find_entry(struct hists *hists,
2431 					    struct hist_entry *he)
2432 {
2433 	struct rb_node *n;
2434 
2435 	if (hists__has(hists, need_collapse))
2436 		n = hists->entries_collapsed.rb_root.rb_node;
2437 	else
2438 		n = hists->entries_in->rb_root.rb_node;
2439 
2440 	while (n) {
2441 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2442 		int64_t cmp = hist_entry__collapse(iter, he);
2443 
2444 		if (cmp < 0)
2445 			n = n->rb_left;
2446 		else if (cmp > 0)
2447 			n = n->rb_right;
2448 		else
2449 			return iter;
2450 	}
2451 
2452 	return NULL;
2453 }
2454 
2455 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2456 						      struct hist_entry *he)
2457 {
2458 	struct rb_node *n = root->rb_root.rb_node;
2459 
2460 	while (n) {
2461 		struct hist_entry *iter;
2462 		struct perf_hpp_fmt *fmt;
2463 		int64_t cmp = 0;
2464 
2465 		iter = rb_entry(n, struct hist_entry, rb_node_in);
2466 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2467 			cmp = fmt->collapse(fmt, iter, he);
2468 			if (cmp)
2469 				break;
2470 		}
2471 
2472 		if (cmp < 0)
2473 			n = n->rb_left;
2474 		else if (cmp > 0)
2475 			n = n->rb_right;
2476 		else
2477 			return iter;
2478 	}
2479 
2480 	return NULL;
2481 }
2482 
2483 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2484 				   struct rb_root_cached *other_root)
2485 {
2486 	struct rb_node *nd;
2487 	struct hist_entry *pos, *pair;
2488 
2489 	for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2490 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2491 		pair = hists__find_hierarchy_entry(other_root, pos);
2492 
2493 		if (pair) {
2494 			hist_entry__add_pair(pair, pos);
2495 			hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2496 		}
2497 	}
2498 }
2499 
2500 /*
2501  * Look for pairs to link to the leader buckets (hist_entries):
2502  */
2503 void hists__match(struct hists *leader, struct hists *other)
2504 {
2505 	struct rb_root_cached *root;
2506 	struct rb_node *nd;
2507 	struct hist_entry *pos, *pair;
2508 
2509 	if (symbol_conf.report_hierarchy) {
2510 		/* hierarchy report always collapses entries */
2511 		return hists__match_hierarchy(&leader->entries_collapsed,
2512 					      &other->entries_collapsed);
2513 	}
2514 
2515 	if (hists__has(leader, need_collapse))
2516 		root = &leader->entries_collapsed;
2517 	else
2518 		root = leader->entries_in;
2519 
2520 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2521 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2522 		pair = hists__find_entry(other, pos);
2523 
2524 		if (pair)
2525 			hist_entry__add_pair(pair, pos);
2526 	}
2527 }
2528 
2529 static int hists__link_hierarchy(struct hists *leader_hists,
2530 				 struct hist_entry *parent,
2531 				 struct rb_root_cached *leader_root,
2532 				 struct rb_root_cached *other_root)
2533 {
2534 	struct rb_node *nd;
2535 	struct hist_entry *pos, *leader;
2536 
2537 	for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2538 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2539 
2540 		if (hist_entry__has_pairs(pos)) {
2541 			bool found = false;
2542 
2543 			list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2544 				if (leader->hists == leader_hists) {
2545 					found = true;
2546 					break;
2547 				}
2548 			}
2549 			if (!found)
2550 				return -1;
2551 		} else {
2552 			leader = add_dummy_hierarchy_entry(leader_hists,
2553 							   leader_root, pos);
2554 			if (leader == NULL)
2555 				return -1;
2556 
2557 			/* do not point parent in the pos */
2558 			leader->parent_he = parent;
2559 
2560 			hist_entry__add_pair(pos, leader);
2561 		}
2562 
2563 		if (!pos->leaf) {
2564 			if (hists__link_hierarchy(leader_hists, leader,
2565 						  &leader->hroot_in,
2566 						  &pos->hroot_in) < 0)
2567 				return -1;
2568 		}
2569 	}
2570 	return 0;
2571 }
2572 
2573 /*
2574  * Look for entries in the other hists that are not present in the leader, if
2575  * we find them, just add a dummy entry on the leader hists, with period=0,
2576  * nr_events=0, to serve as the list header.
2577  */
2578 int hists__link(struct hists *leader, struct hists *other)
2579 {
2580 	struct rb_root_cached *root;
2581 	struct rb_node *nd;
2582 	struct hist_entry *pos, *pair;
2583 
2584 	if (symbol_conf.report_hierarchy) {
2585 		/* hierarchy report always collapses entries */
2586 		return hists__link_hierarchy(leader, NULL,
2587 					     &leader->entries_collapsed,
2588 					     &other->entries_collapsed);
2589 	}
2590 
2591 	if (hists__has(other, need_collapse))
2592 		root = &other->entries_collapsed;
2593 	else
2594 		root = other->entries_in;
2595 
2596 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2597 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2598 
2599 		if (!hist_entry__has_pairs(pos)) {
2600 			pair = hists__add_dummy_entry(leader, pos);
2601 			if (pair == NULL)
2602 				return -1;
2603 			hist_entry__add_pair(pos, pair);
2604 		}
2605 	}
2606 
2607 	return 0;
2608 }
2609 
2610 int hists__unlink(struct hists *hists)
2611 {
2612 	struct rb_root_cached *root;
2613 	struct rb_node *nd;
2614 	struct hist_entry *pos;
2615 
2616 	if (hists__has(hists, need_collapse))
2617 		root = &hists->entries_collapsed;
2618 	else
2619 		root = hists->entries_in;
2620 
2621 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2622 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2623 		list_del_init(&pos->pairs.node);
2624 	}
2625 
2626 	return 0;
2627 }
2628 
2629 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2630 			  struct perf_sample *sample, bool nonany_branch_mode,
2631 			  u64 *total_cycles)
2632 {
2633 	struct branch_info *bi;
2634 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2635 
2636 	/* If we have branch cycles always annotate them. */
2637 	if (bs && bs->nr && entries[0].flags.cycles) {
2638 		int i;
2639 
2640 		bi = sample__resolve_bstack(sample, al);
2641 		if (bi) {
2642 			struct addr_map_symbol *prev = NULL;
2643 
2644 			/*
2645 			 * Ignore errors, still want to process the
2646 			 * other entries.
2647 			 *
2648 			 * For non standard branch modes always
2649 			 * force no IPC (prev == NULL)
2650 			 *
2651 			 * Note that perf stores branches reversed from
2652 			 * program order!
2653 			 */
2654 			for (i = bs->nr - 1; i >= 0; i--) {
2655 				addr_map_symbol__account_cycles(&bi[i].from,
2656 					nonany_branch_mode ? NULL : prev,
2657 					bi[i].flags.cycles);
2658 				prev = &bi[i].to;
2659 
2660 				if (total_cycles)
2661 					*total_cycles += bi[i].flags.cycles;
2662 			}
2663 			free(bi);
2664 		}
2665 	}
2666 }
2667 
2668 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
2669 				 bool skip_empty)
2670 {
2671 	struct evsel *pos;
2672 	size_t ret = 0;
2673 
2674 	evlist__for_each_entry(evlist, pos) {
2675 		struct hists *hists = evsel__hists(pos);
2676 
2677 		if (skip_empty && !hists->stats.nr_samples)
2678 			continue;
2679 
2680 		ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2681 		ret += fprintf(fp, "%16s events: %10d\n",
2682 			       "SAMPLE", hists->stats.nr_samples);
2683 	}
2684 
2685 	return ret;
2686 }
2687 
2688 
2689 u64 hists__total_period(struct hists *hists)
2690 {
2691 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2692 		hists->stats.total_period;
2693 }
2694 
2695 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2696 {
2697 	char unit;
2698 	int printed;
2699 	const struct dso *dso = hists->dso_filter;
2700 	struct thread *thread = hists->thread_filter;
2701 	int socket_id = hists->socket_filter;
2702 	unsigned long nr_samples = hists->stats.nr_samples;
2703 	u64 nr_events = hists->stats.total_period;
2704 	struct evsel *evsel = hists_to_evsel(hists);
2705 	const char *ev_name = evsel__name(evsel);
2706 	char buf[512], sample_freq_str[64] = "";
2707 	size_t buflen = sizeof(buf);
2708 	char ref[30] = " show reference callgraph, ";
2709 	bool enable_ref = false;
2710 
2711 	if (symbol_conf.filter_relative) {
2712 		nr_samples = hists->stats.nr_non_filtered_samples;
2713 		nr_events = hists->stats.total_non_filtered_period;
2714 	}
2715 
2716 	if (evsel__is_group_event(evsel)) {
2717 		struct evsel *pos;
2718 
2719 		evsel__group_desc(evsel, buf, buflen);
2720 		ev_name = buf;
2721 
2722 		for_each_group_member(pos, evsel) {
2723 			struct hists *pos_hists = evsel__hists(pos);
2724 
2725 			if (symbol_conf.filter_relative) {
2726 				nr_samples += pos_hists->stats.nr_non_filtered_samples;
2727 				nr_events += pos_hists->stats.total_non_filtered_period;
2728 			} else {
2729 				nr_samples += pos_hists->stats.nr_samples;
2730 				nr_events += pos_hists->stats.total_period;
2731 			}
2732 		}
2733 	}
2734 
2735 	if (symbol_conf.show_ref_callgraph &&
2736 	    strstr(ev_name, "call-graph=no"))
2737 		enable_ref = true;
2738 
2739 	if (show_freq)
2740 		scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2741 
2742 	nr_samples = convert_unit(nr_samples, &unit);
2743 	printed = scnprintf(bf, size,
2744 			   "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2745 			   nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2746 			   ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2747 
2748 
2749 	if (hists->uid_filter_str)
2750 		printed += snprintf(bf + printed, size - printed,
2751 				    ", UID: %s", hists->uid_filter_str);
2752 	if (thread) {
2753 		if (hists__has(hists, thread)) {
2754 			printed += scnprintf(bf + printed, size - printed,
2755 				    ", Thread: %s(%d)",
2756 				     (thread->comm_set ? thread__comm_str(thread) : ""),
2757 				    thread->tid);
2758 		} else {
2759 			printed += scnprintf(bf + printed, size - printed,
2760 				    ", Thread: %s",
2761 				     (thread->comm_set ? thread__comm_str(thread) : ""));
2762 		}
2763 	}
2764 	if (dso)
2765 		printed += scnprintf(bf + printed, size - printed,
2766 				    ", DSO: %s", dso->short_name);
2767 	if (socket_id > -1)
2768 		printed += scnprintf(bf + printed, size - printed,
2769 				    ", Processor Socket: %d", socket_id);
2770 
2771 	return printed;
2772 }
2773 
2774 int parse_filter_percentage(const struct option *opt __maybe_unused,
2775 			    const char *arg, int unset __maybe_unused)
2776 {
2777 	if (!strcmp(arg, "relative"))
2778 		symbol_conf.filter_relative = true;
2779 	else if (!strcmp(arg, "absolute"))
2780 		symbol_conf.filter_relative = false;
2781 	else {
2782 		pr_debug("Invalid percentage: %s\n", arg);
2783 		return -1;
2784 	}
2785 
2786 	return 0;
2787 }
2788 
2789 int perf_hist_config(const char *var, const char *value)
2790 {
2791 	if (!strcmp(var, "hist.percentage"))
2792 		return parse_filter_percentage(NULL, value, 0);
2793 
2794 	return 0;
2795 }
2796 
2797 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2798 {
2799 	memset(hists, 0, sizeof(*hists));
2800 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2801 	hists->entries_in = &hists->entries_in_array[0];
2802 	hists->entries_collapsed = RB_ROOT_CACHED;
2803 	hists->entries = RB_ROOT_CACHED;
2804 	pthread_mutex_init(&hists->lock, NULL);
2805 	hists->socket_filter = -1;
2806 	hists->hpp_list = hpp_list;
2807 	INIT_LIST_HEAD(&hists->hpp_formats);
2808 	return 0;
2809 }
2810 
2811 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2812 {
2813 	struct rb_node *node;
2814 	struct hist_entry *he;
2815 
2816 	while (!RB_EMPTY_ROOT(&root->rb_root)) {
2817 		node = rb_first_cached(root);
2818 		rb_erase_cached(node, root);
2819 
2820 		he = rb_entry(node, struct hist_entry, rb_node_in);
2821 		hist_entry__delete(he);
2822 	}
2823 }
2824 
2825 static void hists__delete_all_entries(struct hists *hists)
2826 {
2827 	hists__delete_entries(hists);
2828 	hists__delete_remaining_entries(&hists->entries_in_array[0]);
2829 	hists__delete_remaining_entries(&hists->entries_in_array[1]);
2830 	hists__delete_remaining_entries(&hists->entries_collapsed);
2831 }
2832 
2833 static void hists_evsel__exit(struct evsel *evsel)
2834 {
2835 	struct hists *hists = evsel__hists(evsel);
2836 	struct perf_hpp_fmt *fmt, *pos;
2837 	struct perf_hpp_list_node *node, *tmp;
2838 
2839 	hists__delete_all_entries(hists);
2840 
2841 	list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2842 		perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2843 			list_del_init(&fmt->list);
2844 			free(fmt);
2845 		}
2846 		list_del_init(&node->list);
2847 		free(node);
2848 	}
2849 }
2850 
2851 static int hists_evsel__init(struct evsel *evsel)
2852 {
2853 	struct hists *hists = evsel__hists(evsel);
2854 
2855 	__hists__init(hists, &perf_hpp_list);
2856 	return 0;
2857 }
2858 
2859 /*
2860  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2861  * stored in the rbtree...
2862  */
2863 
2864 int hists__init(void)
2865 {
2866 	int err = evsel__object_config(sizeof(struct hists_evsel),
2867 				       hists_evsel__init, hists_evsel__exit);
2868 	if (err)
2869 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2870 
2871 	return err;
2872 }
2873 
2874 void perf_hpp_list__init(struct perf_hpp_list *list)
2875 {
2876 	INIT_LIST_HEAD(&list->fields);
2877 	INIT_LIST_HEAD(&list->sorts);
2878 }
2879