xref: /linux/tools/perf/util/hist.c (revision 3ce095c16263630dde46d6051854073edaacf3d7)
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11 
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 				       struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 					  struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 					  struct hist_entry *he);
18 
19 u16 hists__col_len(struct hists *hists, enum hist_column col)
20 {
21 	return hists->col_len[col];
22 }
23 
24 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
25 {
26 	hists->col_len[col] = len;
27 }
28 
29 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
30 {
31 	if (len > hists__col_len(hists, col)) {
32 		hists__set_col_len(hists, col, len);
33 		return true;
34 	}
35 	return false;
36 }
37 
38 void hists__reset_col_len(struct hists *hists)
39 {
40 	enum hist_column col;
41 
42 	for (col = 0; col < HISTC_NR_COLS; ++col)
43 		hists__set_col_len(hists, col, 0);
44 }
45 
46 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
47 {
48 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
49 
50 	if (hists__col_len(hists, dso) < unresolved_col_width &&
51 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
52 	    !symbol_conf.dso_list)
53 		hists__set_col_len(hists, dso, unresolved_col_width);
54 }
55 
56 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
57 {
58 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59 	int symlen;
60 	u16 len;
61 
62 	/*
63 	 * +4 accounts for '[x] ' priv level info
64 	 * +2 accounts for 0x prefix on raw addresses
65 	 * +3 accounts for ' y ' symtab origin info
66 	 */
67 	if (h->ms.sym) {
68 		symlen = h->ms.sym->namelen + 4;
69 		if (verbose)
70 			symlen += BITS_PER_LONG / 4 + 2 + 3;
71 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
72 	} else {
73 		symlen = unresolved_col_width + 4 + 2;
74 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
75 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
76 	}
77 
78 	len = thread__comm_len(h->thread);
79 	if (hists__new_col_len(hists, HISTC_COMM, len))
80 		hists__set_col_len(hists, HISTC_THREAD, len + 6);
81 
82 	if (h->ms.map) {
83 		len = dso__name_len(h->ms.map->dso);
84 		hists__new_col_len(hists, HISTC_DSO, len);
85 	}
86 
87 	if (h->parent)
88 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
89 
90 	if (h->branch_info) {
91 		if (h->branch_info->from.sym) {
92 			symlen = (int)h->branch_info->from.sym->namelen + 4;
93 			if (verbose)
94 				symlen += BITS_PER_LONG / 4 + 2 + 3;
95 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
96 
97 			symlen = dso__name_len(h->branch_info->from.map->dso);
98 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
99 		} else {
100 			symlen = unresolved_col_width + 4 + 2;
101 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
103 		}
104 
105 		if (h->branch_info->to.sym) {
106 			symlen = (int)h->branch_info->to.sym->namelen + 4;
107 			if (verbose)
108 				symlen += BITS_PER_LONG / 4 + 2 + 3;
109 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
110 
111 			symlen = dso__name_len(h->branch_info->to.map->dso);
112 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
113 		} else {
114 			symlen = unresolved_col_width + 4 + 2;
115 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
117 		}
118 	}
119 
120 	if (h->mem_info) {
121 		if (h->mem_info->daddr.sym) {
122 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
123 			       + unresolved_col_width + 2;
124 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
125 					   symlen);
126 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
127 					   symlen + 1);
128 		} else {
129 			symlen = unresolved_col_width + 4 + 2;
130 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
131 					   symlen);
132 		}
133 		if (h->mem_info->daddr.map) {
134 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
135 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
136 					   symlen);
137 		} else {
138 			symlen = unresolved_col_width + 4 + 2;
139 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
140 		}
141 	} else {
142 		symlen = unresolved_col_width + 4 + 2;
143 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
144 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
145 	}
146 
147 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
148 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
149 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
150 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
151 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
152 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
153 
154 	if (h->transaction)
155 		hists__new_col_len(hists, HISTC_TRANSACTION,
156 				   hist_entry__transaction_len());
157 }
158 
159 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
160 {
161 	struct rb_node *next = rb_first(&hists->entries);
162 	struct hist_entry *n;
163 	int row = 0;
164 
165 	hists__reset_col_len(hists);
166 
167 	while (next && row++ < max_rows) {
168 		n = rb_entry(next, struct hist_entry, rb_node);
169 		if (!n->filtered)
170 			hists__calc_col_len(hists, n);
171 		next = rb_next(&n->rb_node);
172 	}
173 }
174 
175 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
176 					unsigned int cpumode, u64 period)
177 {
178 	switch (cpumode) {
179 	case PERF_RECORD_MISC_KERNEL:
180 		he_stat->period_sys += period;
181 		break;
182 	case PERF_RECORD_MISC_USER:
183 		he_stat->period_us += period;
184 		break;
185 	case PERF_RECORD_MISC_GUEST_KERNEL:
186 		he_stat->period_guest_sys += period;
187 		break;
188 	case PERF_RECORD_MISC_GUEST_USER:
189 		he_stat->period_guest_us += period;
190 		break;
191 	default:
192 		break;
193 	}
194 }
195 
196 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
197 				u64 weight)
198 {
199 
200 	he_stat->period		+= period;
201 	he_stat->weight		+= weight;
202 	he_stat->nr_events	+= 1;
203 }
204 
205 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
206 {
207 	dest->period		+= src->period;
208 	dest->period_sys	+= src->period_sys;
209 	dest->period_us		+= src->period_us;
210 	dest->period_guest_sys	+= src->period_guest_sys;
211 	dest->period_guest_us	+= src->period_guest_us;
212 	dest->nr_events		+= src->nr_events;
213 	dest->weight		+= src->weight;
214 }
215 
216 static void he_stat__decay(struct he_stat *he_stat)
217 {
218 	he_stat->period = (he_stat->period * 7) / 8;
219 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
220 	/* XXX need decay for weight too? */
221 }
222 
223 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
224 {
225 	u64 prev_period = he->stat.period;
226 	u64 diff;
227 
228 	if (prev_period == 0)
229 		return true;
230 
231 	he_stat__decay(&he->stat);
232 	if (symbol_conf.cumulate_callchain)
233 		he_stat__decay(he->stat_acc);
234 
235 	diff = prev_period - he->stat.period;
236 
237 	hists->stats.total_period -= diff;
238 	if (!he->filtered)
239 		hists->stats.total_non_filtered_period -= diff;
240 
241 	return he->stat.period == 0;
242 }
243 
244 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
245 {
246 	rb_erase(&he->rb_node, &hists->entries);
247 
248 	if (sort__need_collapse)
249 		rb_erase(&he->rb_node_in, &hists->entries_collapsed);
250 
251 	--hists->nr_entries;
252 	if (!he->filtered)
253 		--hists->nr_non_filtered_entries;
254 
255 	hist_entry__delete(he);
256 }
257 
258 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
259 {
260 	struct rb_node *next = rb_first(&hists->entries);
261 	struct hist_entry *n;
262 
263 	while (next) {
264 		n = rb_entry(next, struct hist_entry, rb_node);
265 		next = rb_next(&n->rb_node);
266 		if (((zap_user && n->level == '.') ||
267 		     (zap_kernel && n->level != '.') ||
268 		     hists__decay_entry(hists, n))) {
269 			hists__delete_entry(hists, n);
270 		}
271 	}
272 }
273 
274 void hists__delete_entries(struct hists *hists)
275 {
276 	struct rb_node *next = rb_first(&hists->entries);
277 	struct hist_entry *n;
278 
279 	while (next) {
280 		n = rb_entry(next, struct hist_entry, rb_node);
281 		next = rb_next(&n->rb_node);
282 
283 		hists__delete_entry(hists, n);
284 	}
285 }
286 
287 /*
288  * histogram, sorted on item, collects periods
289  */
290 
291 static struct hist_entry *hist_entry__new(struct hist_entry *template,
292 					  bool sample_self)
293 {
294 	size_t callchain_size = 0;
295 	struct hist_entry *he;
296 
297 	if (symbol_conf.use_callchain)
298 		callchain_size = sizeof(struct callchain_root);
299 
300 	he = zalloc(sizeof(*he) + callchain_size);
301 
302 	if (he != NULL) {
303 		*he = *template;
304 
305 		if (symbol_conf.cumulate_callchain) {
306 			he->stat_acc = malloc(sizeof(he->stat));
307 			if (he->stat_acc == NULL) {
308 				free(he);
309 				return NULL;
310 			}
311 			memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
312 			if (!sample_self)
313 				memset(&he->stat, 0, sizeof(he->stat));
314 		}
315 
316 		if (he->ms.map)
317 			he->ms.map->referenced = true;
318 
319 		if (he->branch_info) {
320 			/*
321 			 * This branch info is (a part of) allocated from
322 			 * sample__resolve_bstack() and will be freed after
323 			 * adding new entries.  So we need to save a copy.
324 			 */
325 			he->branch_info = malloc(sizeof(*he->branch_info));
326 			if (he->branch_info == NULL) {
327 				free(he->stat_acc);
328 				free(he);
329 				return NULL;
330 			}
331 
332 			memcpy(he->branch_info, template->branch_info,
333 			       sizeof(*he->branch_info));
334 
335 			if (he->branch_info->from.map)
336 				he->branch_info->from.map->referenced = true;
337 			if (he->branch_info->to.map)
338 				he->branch_info->to.map->referenced = true;
339 		}
340 
341 		if (he->mem_info) {
342 			if (he->mem_info->iaddr.map)
343 				he->mem_info->iaddr.map->referenced = true;
344 			if (he->mem_info->daddr.map)
345 				he->mem_info->daddr.map->referenced = true;
346 		}
347 
348 		if (symbol_conf.use_callchain)
349 			callchain_init(he->callchain);
350 
351 		INIT_LIST_HEAD(&he->pairs.node);
352 		thread__get(he->thread);
353 	}
354 
355 	return he;
356 }
357 
358 static u8 symbol__parent_filter(const struct symbol *parent)
359 {
360 	if (symbol_conf.exclude_other && parent == NULL)
361 		return 1 << HIST_FILTER__PARENT;
362 	return 0;
363 }
364 
365 static struct hist_entry *add_hist_entry(struct hists *hists,
366 					 struct hist_entry *entry,
367 					 struct addr_location *al,
368 					 bool sample_self)
369 {
370 	struct rb_node **p;
371 	struct rb_node *parent = NULL;
372 	struct hist_entry *he;
373 	int64_t cmp;
374 	u64 period = entry->stat.period;
375 	u64 weight = entry->stat.weight;
376 
377 	p = &hists->entries_in->rb_node;
378 
379 	while (*p != NULL) {
380 		parent = *p;
381 		he = rb_entry(parent, struct hist_entry, rb_node_in);
382 
383 		/*
384 		 * Make sure that it receives arguments in a same order as
385 		 * hist_entry__collapse() so that we can use an appropriate
386 		 * function when searching an entry regardless which sort
387 		 * keys were used.
388 		 */
389 		cmp = hist_entry__cmp(he, entry);
390 
391 		if (!cmp) {
392 			if (sample_self)
393 				he_stat__add_period(&he->stat, period, weight);
394 			if (symbol_conf.cumulate_callchain)
395 				he_stat__add_period(he->stat_acc, period, weight);
396 
397 			/*
398 			 * This mem info was allocated from sample__resolve_mem
399 			 * and will not be used anymore.
400 			 */
401 			zfree(&entry->mem_info);
402 
403 			/* If the map of an existing hist_entry has
404 			 * become out-of-date due to an exec() or
405 			 * similar, update it.  Otherwise we will
406 			 * mis-adjust symbol addresses when computing
407 			 * the history counter to increment.
408 			 */
409 			if (he->ms.map != entry->ms.map) {
410 				he->ms.map = entry->ms.map;
411 				if (he->ms.map)
412 					he->ms.map->referenced = true;
413 			}
414 			goto out;
415 		}
416 
417 		if (cmp < 0)
418 			p = &(*p)->rb_left;
419 		else
420 			p = &(*p)->rb_right;
421 	}
422 
423 	he = hist_entry__new(entry, sample_self);
424 	if (!he)
425 		return NULL;
426 
427 	hists->nr_entries++;
428 
429 	rb_link_node(&he->rb_node_in, parent, p);
430 	rb_insert_color(&he->rb_node_in, hists->entries_in);
431 out:
432 	if (sample_self)
433 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
434 	if (symbol_conf.cumulate_callchain)
435 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
436 	return he;
437 }
438 
439 struct hist_entry *__hists__add_entry(struct hists *hists,
440 				      struct addr_location *al,
441 				      struct symbol *sym_parent,
442 				      struct branch_info *bi,
443 				      struct mem_info *mi,
444 				      u64 period, u64 weight, u64 transaction,
445 				      bool sample_self)
446 {
447 	struct hist_entry entry = {
448 		.thread	= al->thread,
449 		.comm = thread__comm(al->thread),
450 		.ms = {
451 			.map	= al->map,
452 			.sym	= al->sym,
453 		},
454 		.cpu	 = al->cpu,
455 		.cpumode = al->cpumode,
456 		.ip	 = al->addr,
457 		.level	 = al->level,
458 		.stat = {
459 			.nr_events = 1,
460 			.period	= period,
461 			.weight = weight,
462 		},
463 		.parent = sym_parent,
464 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
465 		.hists	= hists,
466 		.branch_info = bi,
467 		.mem_info = mi,
468 		.transaction = transaction,
469 	};
470 
471 	return add_hist_entry(hists, &entry, al, sample_self);
472 }
473 
474 static int
475 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
476 		    struct addr_location *al __maybe_unused)
477 {
478 	return 0;
479 }
480 
481 static int
482 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
483 			struct addr_location *al __maybe_unused)
484 {
485 	return 0;
486 }
487 
488 static int
489 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
490 {
491 	struct perf_sample *sample = iter->sample;
492 	struct mem_info *mi;
493 
494 	mi = sample__resolve_mem(sample, al);
495 	if (mi == NULL)
496 		return -ENOMEM;
497 
498 	iter->priv = mi;
499 	return 0;
500 }
501 
502 static int
503 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
504 {
505 	u64 cost;
506 	struct mem_info *mi = iter->priv;
507 	struct hists *hists = evsel__hists(iter->evsel);
508 	struct hist_entry *he;
509 
510 	if (mi == NULL)
511 		return -EINVAL;
512 
513 	cost = iter->sample->weight;
514 	if (!cost)
515 		cost = 1;
516 
517 	/*
518 	 * must pass period=weight in order to get the correct
519 	 * sorting from hists__collapse_resort() which is solely
520 	 * based on periods. We want sorting be done on nr_events * weight
521 	 * and this is indirectly achieved by passing period=weight here
522 	 * and the he_stat__add_period() function.
523 	 */
524 	he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
525 				cost, cost, 0, true);
526 	if (!he)
527 		return -ENOMEM;
528 
529 	iter->he = he;
530 	return 0;
531 }
532 
533 static int
534 iter_finish_mem_entry(struct hist_entry_iter *iter,
535 		      struct addr_location *al __maybe_unused)
536 {
537 	struct perf_evsel *evsel = iter->evsel;
538 	struct hists *hists = evsel__hists(evsel);
539 	struct hist_entry *he = iter->he;
540 	int err = -EINVAL;
541 
542 	if (he == NULL)
543 		goto out;
544 
545 	hists__inc_nr_samples(hists, he->filtered);
546 
547 	err = hist_entry__append_callchain(he, iter->sample);
548 
549 out:
550 	/*
551 	 * We don't need to free iter->priv (mem_info) here since
552 	 * the mem info was either already freed in add_hist_entry() or
553 	 * passed to a new hist entry by hist_entry__new().
554 	 */
555 	iter->priv = NULL;
556 
557 	iter->he = NULL;
558 	return err;
559 }
560 
561 static int
562 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
563 {
564 	struct branch_info *bi;
565 	struct perf_sample *sample = iter->sample;
566 
567 	bi = sample__resolve_bstack(sample, al);
568 	if (!bi)
569 		return -ENOMEM;
570 
571 	iter->curr = 0;
572 	iter->total = sample->branch_stack->nr;
573 
574 	iter->priv = bi;
575 	return 0;
576 }
577 
578 static int
579 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
580 			     struct addr_location *al __maybe_unused)
581 {
582 	/* to avoid calling callback function */
583 	iter->he = NULL;
584 
585 	return 0;
586 }
587 
588 static int
589 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
590 {
591 	struct branch_info *bi = iter->priv;
592 	int i = iter->curr;
593 
594 	if (bi == NULL)
595 		return 0;
596 
597 	if (iter->curr >= iter->total)
598 		return 0;
599 
600 	al->map = bi[i].to.map;
601 	al->sym = bi[i].to.sym;
602 	al->addr = bi[i].to.addr;
603 	return 1;
604 }
605 
606 static int
607 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
608 {
609 	struct branch_info *bi;
610 	struct perf_evsel *evsel = iter->evsel;
611 	struct hists *hists = evsel__hists(evsel);
612 	struct hist_entry *he = NULL;
613 	int i = iter->curr;
614 	int err = 0;
615 
616 	bi = iter->priv;
617 
618 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
619 		goto out;
620 
621 	/*
622 	 * The report shows the percentage of total branches captured
623 	 * and not events sampled. Thus we use a pseudo period of 1.
624 	 */
625 	he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
626 				1, 1, 0, true);
627 	if (he == NULL)
628 		return -ENOMEM;
629 
630 	hists__inc_nr_samples(hists, he->filtered);
631 
632 out:
633 	iter->he = he;
634 	iter->curr++;
635 	return err;
636 }
637 
638 static int
639 iter_finish_branch_entry(struct hist_entry_iter *iter,
640 			 struct addr_location *al __maybe_unused)
641 {
642 	zfree(&iter->priv);
643 	iter->he = NULL;
644 
645 	return iter->curr >= iter->total ? 0 : -1;
646 }
647 
648 static int
649 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
650 			  struct addr_location *al __maybe_unused)
651 {
652 	return 0;
653 }
654 
655 static int
656 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
657 {
658 	struct perf_evsel *evsel = iter->evsel;
659 	struct perf_sample *sample = iter->sample;
660 	struct hist_entry *he;
661 
662 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
663 				sample->period, sample->weight,
664 				sample->transaction, true);
665 	if (he == NULL)
666 		return -ENOMEM;
667 
668 	iter->he = he;
669 	return 0;
670 }
671 
672 static int
673 iter_finish_normal_entry(struct hist_entry_iter *iter,
674 			 struct addr_location *al __maybe_unused)
675 {
676 	struct hist_entry *he = iter->he;
677 	struct perf_evsel *evsel = iter->evsel;
678 	struct perf_sample *sample = iter->sample;
679 
680 	if (he == NULL)
681 		return 0;
682 
683 	iter->he = NULL;
684 
685 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
686 
687 	return hist_entry__append_callchain(he, sample);
688 }
689 
690 static int
691 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
692 			      struct addr_location *al __maybe_unused)
693 {
694 	struct hist_entry **he_cache;
695 
696 	callchain_cursor_commit(&callchain_cursor);
697 
698 	/*
699 	 * This is for detecting cycles or recursions so that they're
700 	 * cumulated only one time to prevent entries more than 100%
701 	 * overhead.
702 	 */
703 	he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
704 	if (he_cache == NULL)
705 		return -ENOMEM;
706 
707 	iter->priv = he_cache;
708 	iter->curr = 0;
709 
710 	return 0;
711 }
712 
713 static int
714 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
715 				 struct addr_location *al)
716 {
717 	struct perf_evsel *evsel = iter->evsel;
718 	struct hists *hists = evsel__hists(evsel);
719 	struct perf_sample *sample = iter->sample;
720 	struct hist_entry **he_cache = iter->priv;
721 	struct hist_entry *he;
722 	int err = 0;
723 
724 	he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
725 				sample->period, sample->weight,
726 				sample->transaction, true);
727 	if (he == NULL)
728 		return -ENOMEM;
729 
730 	iter->he = he;
731 	he_cache[iter->curr++] = he;
732 
733 	hist_entry__append_callchain(he, sample);
734 
735 	/*
736 	 * We need to re-initialize the cursor since callchain_append()
737 	 * advanced the cursor to the end.
738 	 */
739 	callchain_cursor_commit(&callchain_cursor);
740 
741 	hists__inc_nr_samples(hists, he->filtered);
742 
743 	return err;
744 }
745 
746 static int
747 iter_next_cumulative_entry(struct hist_entry_iter *iter,
748 			   struct addr_location *al)
749 {
750 	struct callchain_cursor_node *node;
751 
752 	node = callchain_cursor_current(&callchain_cursor);
753 	if (node == NULL)
754 		return 0;
755 
756 	return fill_callchain_info(al, node, iter->hide_unresolved);
757 }
758 
759 static int
760 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
761 			       struct addr_location *al)
762 {
763 	struct perf_evsel *evsel = iter->evsel;
764 	struct perf_sample *sample = iter->sample;
765 	struct hist_entry **he_cache = iter->priv;
766 	struct hist_entry *he;
767 	struct hist_entry he_tmp = {
768 		.cpu = al->cpu,
769 		.thread = al->thread,
770 		.comm = thread__comm(al->thread),
771 		.ip = al->addr,
772 		.ms = {
773 			.map = al->map,
774 			.sym = al->sym,
775 		},
776 		.parent = iter->parent,
777 	};
778 	int i;
779 	struct callchain_cursor cursor;
780 
781 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
782 
783 	callchain_cursor_advance(&callchain_cursor);
784 
785 	/*
786 	 * Check if there's duplicate entries in the callchain.
787 	 * It's possible that it has cycles or recursive calls.
788 	 */
789 	for (i = 0; i < iter->curr; i++) {
790 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
791 			/* to avoid calling callback function */
792 			iter->he = NULL;
793 			return 0;
794 		}
795 	}
796 
797 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
798 				sample->period, sample->weight,
799 				sample->transaction, false);
800 	if (he == NULL)
801 		return -ENOMEM;
802 
803 	iter->he = he;
804 	he_cache[iter->curr++] = he;
805 
806 	if (symbol_conf.use_callchain)
807 		callchain_append(he->callchain, &cursor, sample->period);
808 	return 0;
809 }
810 
811 static int
812 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
813 			     struct addr_location *al __maybe_unused)
814 {
815 	zfree(&iter->priv);
816 	iter->he = NULL;
817 
818 	return 0;
819 }
820 
821 const struct hist_iter_ops hist_iter_mem = {
822 	.prepare_entry 		= iter_prepare_mem_entry,
823 	.add_single_entry 	= iter_add_single_mem_entry,
824 	.next_entry 		= iter_next_nop_entry,
825 	.add_next_entry 	= iter_add_next_nop_entry,
826 	.finish_entry 		= iter_finish_mem_entry,
827 };
828 
829 const struct hist_iter_ops hist_iter_branch = {
830 	.prepare_entry 		= iter_prepare_branch_entry,
831 	.add_single_entry 	= iter_add_single_branch_entry,
832 	.next_entry 		= iter_next_branch_entry,
833 	.add_next_entry 	= iter_add_next_branch_entry,
834 	.finish_entry 		= iter_finish_branch_entry,
835 };
836 
837 const struct hist_iter_ops hist_iter_normal = {
838 	.prepare_entry 		= iter_prepare_normal_entry,
839 	.add_single_entry 	= iter_add_single_normal_entry,
840 	.next_entry 		= iter_next_nop_entry,
841 	.add_next_entry 	= iter_add_next_nop_entry,
842 	.finish_entry 		= iter_finish_normal_entry,
843 };
844 
845 const struct hist_iter_ops hist_iter_cumulative = {
846 	.prepare_entry 		= iter_prepare_cumulative_entry,
847 	.add_single_entry 	= iter_add_single_cumulative_entry,
848 	.next_entry 		= iter_next_cumulative_entry,
849 	.add_next_entry 	= iter_add_next_cumulative_entry,
850 	.finish_entry 		= iter_finish_cumulative_entry,
851 };
852 
853 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
854 			 struct perf_evsel *evsel, struct perf_sample *sample,
855 			 int max_stack_depth, void *arg)
856 {
857 	int err, err2;
858 
859 	err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
860 					max_stack_depth);
861 	if (err)
862 		return err;
863 
864 	iter->evsel = evsel;
865 	iter->sample = sample;
866 
867 	err = iter->ops->prepare_entry(iter, al);
868 	if (err)
869 		goto out;
870 
871 	err = iter->ops->add_single_entry(iter, al);
872 	if (err)
873 		goto out;
874 
875 	if (iter->he && iter->add_entry_cb) {
876 		err = iter->add_entry_cb(iter, al, true, arg);
877 		if (err)
878 			goto out;
879 	}
880 
881 	while (iter->ops->next_entry(iter, al)) {
882 		err = iter->ops->add_next_entry(iter, al);
883 		if (err)
884 			break;
885 
886 		if (iter->he && iter->add_entry_cb) {
887 			err = iter->add_entry_cb(iter, al, false, arg);
888 			if (err)
889 				goto out;
890 		}
891 	}
892 
893 out:
894 	err2 = iter->ops->finish_entry(iter, al);
895 	if (!err)
896 		err = err2;
897 
898 	return err;
899 }
900 
901 int64_t
902 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
903 {
904 	struct perf_hpp_fmt *fmt;
905 	int64_t cmp = 0;
906 
907 	perf_hpp__for_each_sort_list(fmt) {
908 		if (perf_hpp__should_skip(fmt))
909 			continue;
910 
911 		cmp = fmt->cmp(fmt, left, right);
912 		if (cmp)
913 			break;
914 	}
915 
916 	return cmp;
917 }
918 
919 int64_t
920 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
921 {
922 	struct perf_hpp_fmt *fmt;
923 	int64_t cmp = 0;
924 
925 	perf_hpp__for_each_sort_list(fmt) {
926 		if (perf_hpp__should_skip(fmt))
927 			continue;
928 
929 		cmp = fmt->collapse(fmt, left, right);
930 		if (cmp)
931 			break;
932 	}
933 
934 	return cmp;
935 }
936 
937 void hist_entry__delete(struct hist_entry *he)
938 {
939 	thread__zput(he->thread);
940 	zfree(&he->branch_info);
941 	zfree(&he->mem_info);
942 	zfree(&he->stat_acc);
943 	free_srcline(he->srcline);
944 	free_callchain(he->callchain);
945 	free(he);
946 }
947 
948 /*
949  * collapse the histogram
950  */
951 
952 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
953 					 struct rb_root *root,
954 					 struct hist_entry *he)
955 {
956 	struct rb_node **p = &root->rb_node;
957 	struct rb_node *parent = NULL;
958 	struct hist_entry *iter;
959 	int64_t cmp;
960 
961 	while (*p != NULL) {
962 		parent = *p;
963 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
964 
965 		cmp = hist_entry__collapse(iter, he);
966 
967 		if (!cmp) {
968 			he_stat__add_stat(&iter->stat, &he->stat);
969 			if (symbol_conf.cumulate_callchain)
970 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
971 
972 			if (symbol_conf.use_callchain) {
973 				callchain_cursor_reset(&callchain_cursor);
974 				callchain_merge(&callchain_cursor,
975 						iter->callchain,
976 						he->callchain);
977 			}
978 			hist_entry__delete(he);
979 			return false;
980 		}
981 
982 		if (cmp < 0)
983 			p = &(*p)->rb_left;
984 		else
985 			p = &(*p)->rb_right;
986 	}
987 	hists->nr_entries++;
988 
989 	rb_link_node(&he->rb_node_in, parent, p);
990 	rb_insert_color(&he->rb_node_in, root);
991 	return true;
992 }
993 
994 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
995 {
996 	struct rb_root *root;
997 
998 	pthread_mutex_lock(&hists->lock);
999 
1000 	root = hists->entries_in;
1001 	if (++hists->entries_in > &hists->entries_in_array[1])
1002 		hists->entries_in = &hists->entries_in_array[0];
1003 
1004 	pthread_mutex_unlock(&hists->lock);
1005 
1006 	return root;
1007 }
1008 
1009 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1010 {
1011 	hists__filter_entry_by_dso(hists, he);
1012 	hists__filter_entry_by_thread(hists, he);
1013 	hists__filter_entry_by_symbol(hists, he);
1014 }
1015 
1016 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1017 {
1018 	struct rb_root *root;
1019 	struct rb_node *next;
1020 	struct hist_entry *n;
1021 
1022 	if (!sort__need_collapse)
1023 		return;
1024 
1025 	hists->nr_entries = 0;
1026 
1027 	root = hists__get_rotate_entries_in(hists);
1028 
1029 	next = rb_first(root);
1030 
1031 	while (next) {
1032 		if (session_done())
1033 			break;
1034 		n = rb_entry(next, struct hist_entry, rb_node_in);
1035 		next = rb_next(&n->rb_node_in);
1036 
1037 		rb_erase(&n->rb_node_in, root);
1038 		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1039 			/*
1040 			 * If it wasn't combined with one of the entries already
1041 			 * collapsed, we need to apply the filters that may have
1042 			 * been set by, say, the hist_browser.
1043 			 */
1044 			hists__apply_filters(hists, n);
1045 		}
1046 		if (prog)
1047 			ui_progress__update(prog, 1);
1048 	}
1049 }
1050 
1051 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1052 {
1053 	struct perf_hpp_fmt *fmt;
1054 	int64_t cmp = 0;
1055 
1056 	perf_hpp__for_each_sort_list(fmt) {
1057 		if (perf_hpp__should_skip(fmt))
1058 			continue;
1059 
1060 		cmp = fmt->sort(fmt, a, b);
1061 		if (cmp)
1062 			break;
1063 	}
1064 
1065 	return cmp;
1066 }
1067 
1068 static void hists__reset_filter_stats(struct hists *hists)
1069 {
1070 	hists->nr_non_filtered_entries = 0;
1071 	hists->stats.total_non_filtered_period = 0;
1072 }
1073 
1074 void hists__reset_stats(struct hists *hists)
1075 {
1076 	hists->nr_entries = 0;
1077 	hists->stats.total_period = 0;
1078 
1079 	hists__reset_filter_stats(hists);
1080 }
1081 
1082 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1083 {
1084 	hists->nr_non_filtered_entries++;
1085 	hists->stats.total_non_filtered_period += h->stat.period;
1086 }
1087 
1088 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1089 {
1090 	if (!h->filtered)
1091 		hists__inc_filter_stats(hists, h);
1092 
1093 	hists->nr_entries++;
1094 	hists->stats.total_period += h->stat.period;
1095 }
1096 
1097 static void __hists__insert_output_entry(struct rb_root *entries,
1098 					 struct hist_entry *he,
1099 					 u64 min_callchain_hits)
1100 {
1101 	struct rb_node **p = &entries->rb_node;
1102 	struct rb_node *parent = NULL;
1103 	struct hist_entry *iter;
1104 
1105 	if (symbol_conf.use_callchain)
1106 		callchain_param.sort(&he->sorted_chain, he->callchain,
1107 				      min_callchain_hits, &callchain_param);
1108 
1109 	while (*p != NULL) {
1110 		parent = *p;
1111 		iter = rb_entry(parent, struct hist_entry, rb_node);
1112 
1113 		if (hist_entry__sort(he, iter) > 0)
1114 			p = &(*p)->rb_left;
1115 		else
1116 			p = &(*p)->rb_right;
1117 	}
1118 
1119 	rb_link_node(&he->rb_node, parent, p);
1120 	rb_insert_color(&he->rb_node, entries);
1121 }
1122 
1123 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1124 {
1125 	struct rb_root *root;
1126 	struct rb_node *next;
1127 	struct hist_entry *n;
1128 	u64 min_callchain_hits;
1129 
1130 	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1131 
1132 	if (sort__need_collapse)
1133 		root = &hists->entries_collapsed;
1134 	else
1135 		root = hists->entries_in;
1136 
1137 	next = rb_first(root);
1138 	hists->entries = RB_ROOT;
1139 
1140 	hists__reset_stats(hists);
1141 	hists__reset_col_len(hists);
1142 
1143 	while (next) {
1144 		n = rb_entry(next, struct hist_entry, rb_node_in);
1145 		next = rb_next(&n->rb_node_in);
1146 
1147 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
1148 		hists__inc_stats(hists, n);
1149 
1150 		if (!n->filtered)
1151 			hists__calc_col_len(hists, n);
1152 
1153 		if (prog)
1154 			ui_progress__update(prog, 1);
1155 	}
1156 }
1157 
1158 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1159 				       enum hist_filter filter)
1160 {
1161 	h->filtered &= ~(1 << filter);
1162 	if (h->filtered)
1163 		return;
1164 
1165 	/* force fold unfiltered entry for simplicity */
1166 	h->ms.unfolded = false;
1167 	h->row_offset = 0;
1168 	h->nr_rows = 0;
1169 
1170 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1171 
1172 	hists__inc_filter_stats(hists, h);
1173 	hists__calc_col_len(hists, h);
1174 }
1175 
1176 
1177 static bool hists__filter_entry_by_dso(struct hists *hists,
1178 				       struct hist_entry *he)
1179 {
1180 	if (hists->dso_filter != NULL &&
1181 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1182 		he->filtered |= (1 << HIST_FILTER__DSO);
1183 		return true;
1184 	}
1185 
1186 	return false;
1187 }
1188 
1189 void hists__filter_by_dso(struct hists *hists)
1190 {
1191 	struct rb_node *nd;
1192 
1193 	hists->stats.nr_non_filtered_samples = 0;
1194 
1195 	hists__reset_filter_stats(hists);
1196 	hists__reset_col_len(hists);
1197 
1198 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1199 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1200 
1201 		if (symbol_conf.exclude_other && !h->parent)
1202 			continue;
1203 
1204 		if (hists__filter_entry_by_dso(hists, h))
1205 			continue;
1206 
1207 		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1208 	}
1209 }
1210 
1211 static bool hists__filter_entry_by_thread(struct hists *hists,
1212 					  struct hist_entry *he)
1213 {
1214 	if (hists->thread_filter != NULL &&
1215 	    he->thread != hists->thread_filter) {
1216 		he->filtered |= (1 << HIST_FILTER__THREAD);
1217 		return true;
1218 	}
1219 
1220 	return false;
1221 }
1222 
1223 void hists__filter_by_thread(struct hists *hists)
1224 {
1225 	struct rb_node *nd;
1226 
1227 	hists->stats.nr_non_filtered_samples = 0;
1228 
1229 	hists__reset_filter_stats(hists);
1230 	hists__reset_col_len(hists);
1231 
1232 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1233 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1234 
1235 		if (hists__filter_entry_by_thread(hists, h))
1236 			continue;
1237 
1238 		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1239 	}
1240 }
1241 
1242 static bool hists__filter_entry_by_symbol(struct hists *hists,
1243 					  struct hist_entry *he)
1244 {
1245 	if (hists->symbol_filter_str != NULL &&
1246 	    (!he->ms.sym || strstr(he->ms.sym->name,
1247 				   hists->symbol_filter_str) == NULL)) {
1248 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
1249 		return true;
1250 	}
1251 
1252 	return false;
1253 }
1254 
1255 void hists__filter_by_symbol(struct hists *hists)
1256 {
1257 	struct rb_node *nd;
1258 
1259 	hists->stats.nr_non_filtered_samples = 0;
1260 
1261 	hists__reset_filter_stats(hists);
1262 	hists__reset_col_len(hists);
1263 
1264 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1265 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1266 
1267 		if (hists__filter_entry_by_symbol(hists, h))
1268 			continue;
1269 
1270 		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1271 	}
1272 }
1273 
1274 void events_stats__inc(struct events_stats *stats, u32 type)
1275 {
1276 	++stats->nr_events[0];
1277 	++stats->nr_events[type];
1278 }
1279 
1280 void hists__inc_nr_events(struct hists *hists, u32 type)
1281 {
1282 	events_stats__inc(&hists->stats, type);
1283 }
1284 
1285 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1286 {
1287 	events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1288 	if (!filtered)
1289 		hists->stats.nr_non_filtered_samples++;
1290 }
1291 
1292 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1293 						 struct hist_entry *pair)
1294 {
1295 	struct rb_root *root;
1296 	struct rb_node **p;
1297 	struct rb_node *parent = NULL;
1298 	struct hist_entry *he;
1299 	int64_t cmp;
1300 
1301 	if (sort__need_collapse)
1302 		root = &hists->entries_collapsed;
1303 	else
1304 		root = hists->entries_in;
1305 
1306 	p = &root->rb_node;
1307 
1308 	while (*p != NULL) {
1309 		parent = *p;
1310 		he = rb_entry(parent, struct hist_entry, rb_node_in);
1311 
1312 		cmp = hist_entry__collapse(he, pair);
1313 
1314 		if (!cmp)
1315 			goto out;
1316 
1317 		if (cmp < 0)
1318 			p = &(*p)->rb_left;
1319 		else
1320 			p = &(*p)->rb_right;
1321 	}
1322 
1323 	he = hist_entry__new(pair, true);
1324 	if (he) {
1325 		memset(&he->stat, 0, sizeof(he->stat));
1326 		he->hists = hists;
1327 		rb_link_node(&he->rb_node_in, parent, p);
1328 		rb_insert_color(&he->rb_node_in, root);
1329 		hists__inc_stats(hists, he);
1330 		he->dummy = true;
1331 	}
1332 out:
1333 	return he;
1334 }
1335 
1336 static struct hist_entry *hists__find_entry(struct hists *hists,
1337 					    struct hist_entry *he)
1338 {
1339 	struct rb_node *n;
1340 
1341 	if (sort__need_collapse)
1342 		n = hists->entries_collapsed.rb_node;
1343 	else
1344 		n = hists->entries_in->rb_node;
1345 
1346 	while (n) {
1347 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1348 		int64_t cmp = hist_entry__collapse(iter, he);
1349 
1350 		if (cmp < 0)
1351 			n = n->rb_left;
1352 		else if (cmp > 0)
1353 			n = n->rb_right;
1354 		else
1355 			return iter;
1356 	}
1357 
1358 	return NULL;
1359 }
1360 
1361 /*
1362  * Look for pairs to link to the leader buckets (hist_entries):
1363  */
1364 void hists__match(struct hists *leader, struct hists *other)
1365 {
1366 	struct rb_root *root;
1367 	struct rb_node *nd;
1368 	struct hist_entry *pos, *pair;
1369 
1370 	if (sort__need_collapse)
1371 		root = &leader->entries_collapsed;
1372 	else
1373 		root = leader->entries_in;
1374 
1375 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1376 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
1377 		pair = hists__find_entry(other, pos);
1378 
1379 		if (pair)
1380 			hist_entry__add_pair(pair, pos);
1381 	}
1382 }
1383 
1384 /*
1385  * Look for entries in the other hists that are not present in the leader, if
1386  * we find them, just add a dummy entry on the leader hists, with period=0,
1387  * nr_events=0, to serve as the list header.
1388  */
1389 int hists__link(struct hists *leader, struct hists *other)
1390 {
1391 	struct rb_root *root;
1392 	struct rb_node *nd;
1393 	struct hist_entry *pos, *pair;
1394 
1395 	if (sort__need_collapse)
1396 		root = &other->entries_collapsed;
1397 	else
1398 		root = other->entries_in;
1399 
1400 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1401 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
1402 
1403 		if (!hist_entry__has_pairs(pos)) {
1404 			pair = hists__add_dummy_entry(leader, pos);
1405 			if (pair == NULL)
1406 				return -1;
1407 			hist_entry__add_pair(pos, pair);
1408 		}
1409 	}
1410 
1411 	return 0;
1412 }
1413 
1414 
1415 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1416 {
1417 	struct perf_evsel *pos;
1418 	size_t ret = 0;
1419 
1420 	evlist__for_each(evlist, pos) {
1421 		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1422 		ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1423 	}
1424 
1425 	return ret;
1426 }
1427 
1428 
1429 u64 hists__total_period(struct hists *hists)
1430 {
1431 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1432 		hists->stats.total_period;
1433 }
1434 
1435 int parse_filter_percentage(const struct option *opt __maybe_unused,
1436 			    const char *arg, int unset __maybe_unused)
1437 {
1438 	if (!strcmp(arg, "relative"))
1439 		symbol_conf.filter_relative = true;
1440 	else if (!strcmp(arg, "absolute"))
1441 		symbol_conf.filter_relative = false;
1442 	else
1443 		return -1;
1444 
1445 	return 0;
1446 }
1447 
1448 int perf_hist_config(const char *var, const char *value)
1449 {
1450 	if (!strcmp(var, "hist.percentage"))
1451 		return parse_filter_percentage(NULL, value, 0);
1452 
1453 	return 0;
1454 }
1455 
1456 static int hists_evsel__init(struct perf_evsel *evsel)
1457 {
1458 	struct hists *hists = evsel__hists(evsel);
1459 
1460 	memset(hists, 0, sizeof(*hists));
1461 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1462 	hists->entries_in = &hists->entries_in_array[0];
1463 	hists->entries_collapsed = RB_ROOT;
1464 	hists->entries = RB_ROOT;
1465 	pthread_mutex_init(&hists->lock, NULL);
1466 	return 0;
1467 }
1468 
1469 /*
1470  * XXX We probably need a hists_evsel__exit() to free the hist_entries
1471  * stored in the rbtree...
1472  */
1473 
1474 int hists__init(void)
1475 {
1476 	int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1477 					    hists_evsel__init, NULL);
1478 	if (err)
1479 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1480 
1481 	return err;
1482 }
1483