xref: /linux/tools/perf/util/hist.c (revision 9e906a9dead17d81d6c2687f65e159231d0e3286)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "kvm-stat.h"
8 #include "map.h"
9 #include "map_symbol.h"
10 #include "branch.h"
11 #include "mem-events.h"
12 #include "mem-info.h"
13 #include "session.h"
14 #include "namespaces.h"
15 #include "cgroup.h"
16 #include "sort.h"
17 #include "units.h"
18 #include "evlist.h"
19 #include "evsel.h"
20 #include "annotate.h"
21 #include "srcline.h"
22 #include "symbol.h"
23 #include "thread.h"
24 #include "block-info.h"
25 #include "ui/progress.h"
26 #include <errno.h>
27 #include <math.h>
28 #include <inttypes.h>
29 #include <sys/param.h>
30 #include <linux/rbtree.h>
31 #include <linux/string.h>
32 #include <linux/time64.h>
33 #include <linux/zalloc.h>
34 
35 static int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
36 static int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
37 
38 static bool hists__filter_entry_by_dso(struct hists *hists,
39 				       struct hist_entry *he);
40 static bool hists__filter_entry_by_thread(struct hists *hists,
41 					  struct hist_entry *he);
42 static bool hists__filter_entry_by_symbol(struct hists *hists,
43 					  struct hist_entry *he);
44 static bool hists__filter_entry_by_socket(struct hists *hists,
45 					  struct hist_entry *he);
46 static bool hists__filter_entry_by_parallelism(struct hists *hists,
47 					       struct hist_entry *he);
48 
hists__col_len(struct hists * hists,enum hist_column col)49 u16 hists__col_len(struct hists *hists, enum hist_column col)
50 {
51 	return hists->col_len[col];
52 }
53 
hists__set_col_len(struct hists * hists,enum hist_column col,u16 len)54 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
55 {
56 	hists->col_len[col] = len;
57 }
58 
hists__new_col_len(struct hists * hists,enum hist_column col,u16 len)59 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
60 {
61 	if (len > hists__col_len(hists, col)) {
62 		hists__set_col_len(hists, col, len);
63 		return true;
64 	}
65 	return false;
66 }
67 
hists__reset_col_len(struct hists * hists)68 void hists__reset_col_len(struct hists *hists)
69 {
70 	enum hist_column col;
71 
72 	for (col = 0; col < HISTC_NR_COLS; ++col)
73 		hists__set_col_len(hists, col, 0);
74 }
75 
hists__set_unres_dso_col_len(struct hists * hists,int dso)76 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
77 {
78 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
79 
80 	if (hists__col_len(hists, dso) < unresolved_col_width &&
81 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
82 	    !symbol_conf.dso_list)
83 		hists__set_col_len(hists, dso, unresolved_col_width);
84 }
85 
hists__calc_col_len(struct hists * hists,struct hist_entry * h)86 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
87 {
88 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
89 	int symlen;
90 	u16 len;
91 
92 	if (h->block_info)
93 		return;
94 	/*
95 	 * +4 accounts for '[x] ' priv level info
96 	 * +2 accounts for 0x prefix on raw addresses
97 	 * +3 accounts for ' y ' symtab origin info
98 	 */
99 	if (h->ms.sym) {
100 		symlen = h->ms.sym->namelen + 4;
101 		if (verbose > 0)
102 			symlen += BITS_PER_LONG / 4 + 2 + 3;
103 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
104 	} else {
105 		symlen = unresolved_col_width + 4 + 2;
106 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
107 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
108 	}
109 
110 	len = thread__comm_len(h->thread);
111 	if (hists__new_col_len(hists, HISTC_COMM, len))
112 		hists__set_col_len(hists, HISTC_THREAD, len + 8);
113 
114 	if (h->ms.map) {
115 		len = dso__name_len(map__dso(h->ms.map));
116 		hists__new_col_len(hists, HISTC_DSO, len);
117 	}
118 
119 	if (h->parent)
120 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
121 
122 	if (h->branch_info) {
123 		if (h->branch_info->from.ms.sym) {
124 			symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
125 			if (verbose > 0)
126 				symlen += BITS_PER_LONG / 4 + 2 + 3;
127 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
128 
129 			symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
130 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
131 		} else {
132 			symlen = unresolved_col_width + 4 + 2;
133 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
134 			hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
135 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
136 		}
137 
138 		if (h->branch_info->to.ms.sym) {
139 			symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
140 			if (verbose > 0)
141 				symlen += BITS_PER_LONG / 4 + 2 + 3;
142 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
143 
144 			symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
145 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
146 		} else {
147 			symlen = unresolved_col_width + 4 + 2;
148 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
149 			hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
150 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
151 		}
152 
153 		if (h->branch_info->srcline_from)
154 			hists__new_col_len(hists, HISTC_SRCLINE_FROM,
155 					strlen(h->branch_info->srcline_from));
156 		if (h->branch_info->srcline_to)
157 			hists__new_col_len(hists, HISTC_SRCLINE_TO,
158 					strlen(h->branch_info->srcline_to));
159 	}
160 
161 	if (h->mem_info) {
162 		if (mem_info__daddr(h->mem_info)->ms.sym) {
163 			symlen = (int)mem_info__daddr(h->mem_info)->ms.sym->namelen + 4
164 			       + unresolved_col_width + 2;
165 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
166 					   symlen);
167 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
168 					   symlen + 1);
169 		} else {
170 			symlen = unresolved_col_width + 4 + 2;
171 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
172 					   symlen);
173 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
174 					   symlen);
175 		}
176 
177 		if (mem_info__iaddr(h->mem_info)->ms.sym) {
178 			symlen = (int)mem_info__iaddr(h->mem_info)->ms.sym->namelen + 4
179 			       + unresolved_col_width + 2;
180 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
181 					   symlen);
182 		} else {
183 			symlen = unresolved_col_width + 4 + 2;
184 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
185 					   symlen);
186 		}
187 
188 		if (mem_info__daddr(h->mem_info)->ms.map) {
189 			symlen = dso__name_len(map__dso(mem_info__daddr(h->mem_info)->ms.map));
190 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
191 					   symlen);
192 		} else {
193 			symlen = unresolved_col_width + 4 + 2;
194 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
195 		}
196 
197 		hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
198 				   unresolved_col_width + 4 + 2);
199 
200 		hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
201 				   unresolved_col_width + 4 + 2);
202 
203 	} else {
204 		symlen = unresolved_col_width + 4 + 2;
205 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
206 		hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
207 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
208 	}
209 
210 	hists__new_col_len(hists, HISTC_CGROUP, 6);
211 	hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
212 	hists__new_col_len(hists, HISTC_PARALLELISM, 11);
213 	hists__new_col_len(hists, HISTC_CPU, 3);
214 	hists__new_col_len(hists, HISTC_SOCKET, 6);
215 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
216 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
217 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
218 	hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
219 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
220 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
221 	hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
222 	hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
223 	hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
224 	hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
225 	hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
226 	hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
227 	hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_PREDICTED, 9);
228 	hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_ABORT, 5);
229 	hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_CYCLES, 6);
230 
231 	if (symbol_conf.nanosecs)
232 		hists__new_col_len(hists, HISTC_TIME, 16);
233 	else
234 		hists__new_col_len(hists, HISTC_TIME, 12);
235 	hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
236 
237 	if (h->srcline) {
238 		len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
239 		hists__new_col_len(hists, HISTC_SRCLINE, len);
240 	}
241 
242 	if (h->srcfile)
243 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
244 
245 	if (h->transaction)
246 		hists__new_col_len(hists, HISTC_TRANSACTION,
247 				   hist_entry__transaction_len());
248 
249 	if (h->trace_output)
250 		hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
251 
252 	if (h->cgroup) {
253 		const char *cgrp_name = "unknown";
254 		struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
255 						   h->cgroup);
256 		if (cgrp != NULL)
257 			cgrp_name = cgrp->name;
258 
259 		hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
260 	}
261 }
262 
hists__output_recalc_col_len(struct hists * hists,int max_rows)263 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
264 {
265 	struct rb_node *next = rb_first_cached(&hists->entries);
266 	struct hist_entry *n;
267 	int row = 0;
268 
269 	hists__reset_col_len(hists);
270 
271 	while (next && row++ < max_rows) {
272 		n = rb_entry(next, struct hist_entry, rb_node);
273 		if (!n->filtered)
274 			hists__calc_col_len(hists, n);
275 		next = rb_next(&n->rb_node);
276 	}
277 }
278 
he_stat__add_cpumode_period(struct he_stat * he_stat,unsigned int cpumode,u64 period)279 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
280 					unsigned int cpumode, u64 period)
281 {
282 	switch (cpumode) {
283 	case PERF_RECORD_MISC_KERNEL:
284 		he_stat->period_sys += period;
285 		break;
286 	case PERF_RECORD_MISC_USER:
287 		he_stat->period_us += period;
288 		break;
289 	case PERF_RECORD_MISC_GUEST_KERNEL:
290 		he_stat->period_guest_sys += period;
291 		break;
292 	case PERF_RECORD_MISC_GUEST_USER:
293 		he_stat->period_guest_us += period;
294 		break;
295 	default:
296 		break;
297 	}
298 }
299 
hist_time(unsigned long htime)300 static long hist_time(unsigned long htime)
301 {
302 	unsigned long time_quantum = symbol_conf.time_quantum;
303 	if (time_quantum)
304 		return (htime / time_quantum) * time_quantum;
305 	return htime;
306 }
307 
he_stat__add_period(struct he_stat * he_stat,u64 period,u64 latency)308 static void he_stat__add_period(struct he_stat *he_stat, u64 period, u64 latency)
309 {
310 	he_stat->period		+= period;
311 	he_stat->latency	+= latency;
312 	he_stat->nr_events	+= 1;
313 }
314 
he_stat__add_stat(struct he_stat * dest,struct he_stat * src)315 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
316 {
317 	dest->period		+= src->period;
318 	dest->period_sys	+= src->period_sys;
319 	dest->period_us		+= src->period_us;
320 	dest->period_guest_sys	+= src->period_guest_sys;
321 	dest->period_guest_us	+= src->period_guest_us;
322 	dest->weight1		+= src->weight1;
323 	dest->weight2		+= src->weight2;
324 	dest->weight3		+= src->weight3;
325 	dest->nr_events		+= src->nr_events;
326 	dest->latency		+= src->latency;
327 }
328 
he_stat__decay(struct he_stat * he_stat)329 static void he_stat__decay(struct he_stat *he_stat)
330 {
331 	he_stat->period = (he_stat->period * 7) / 8;
332 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
333 	he_stat->weight1 = (he_stat->weight1 * 7) / 8;
334 	he_stat->weight2 = (he_stat->weight2 * 7) / 8;
335 	he_stat->weight3 = (he_stat->weight3 * 7) / 8;
336 	he_stat->latency = (he_stat->latency * 7) / 8;
337 }
338 
hists__update_mem_stat(struct hists * hists,struct hist_entry * he,struct mem_info * mi,u64 period)339 static int hists__update_mem_stat(struct hists *hists, struct hist_entry *he,
340 				  struct mem_info *mi, u64 period)
341 {
342 	if (hists->nr_mem_stats == 0)
343 		return 0;
344 
345 	if (he->mem_stat == NULL) {
346 		he->mem_stat = calloc(hists->nr_mem_stats, sizeof(*he->mem_stat));
347 		if (he->mem_stat == NULL)
348 			return -1;
349 	}
350 
351 	for (int i = 0; i < hists->nr_mem_stats; i++) {
352 		int idx = mem_stat_index(hists->mem_stat_types[i],
353 					 mem_info__const_data_src(mi)->val);
354 
355 		assert(0 <= idx && idx < MEM_STAT_LEN);
356 		he->mem_stat[i].entries[idx] += period;
357 		hists->mem_stat_total[i].entries[idx] += period;
358 	}
359 	return 0;
360 }
361 
hists__add_mem_stat(struct hists * hists,struct hist_entry * dst,struct hist_entry * src)362 static void hists__add_mem_stat(struct hists *hists, struct hist_entry *dst,
363 				struct hist_entry *src)
364 {
365 	if (hists->nr_mem_stats == 0)
366 		return;
367 
368 	for (int i = 0; i < hists->nr_mem_stats; i++) {
369 		for (int k = 0; k < MEM_STAT_LEN; k++)
370 			dst->mem_stat[i].entries[k] += src->mem_stat[i].entries[k];
371 	}
372 }
373 
hists__clone_mem_stat(struct hists * hists,struct hist_entry * dst,struct hist_entry * src)374 static int hists__clone_mem_stat(struct hists *hists, struct hist_entry *dst,
375 				  struct hist_entry *src)
376 {
377 	if (hists->nr_mem_stats == 0)
378 		return 0;
379 
380 	dst->mem_stat = calloc(hists->nr_mem_stats, sizeof(*dst->mem_stat));
381 	if (dst->mem_stat == NULL)
382 		return -1;
383 
384 	for (int i = 0; i < hists->nr_mem_stats; i++) {
385 		for (int k = 0; k < MEM_STAT_LEN; k++)
386 			dst->mem_stat[i].entries[k] = src->mem_stat[i].entries[k];
387 	}
388 	return 0;
389 }
390 
hists__decay_mem_stat(struct hists * hists,struct hist_entry * he)391 static void hists__decay_mem_stat(struct hists *hists, struct hist_entry *he)
392 {
393 	if (hists->nr_mem_stats == 0)
394 		return;
395 
396 	for (int i = 0; i < hists->nr_mem_stats; i++) {
397 		for (int k = 0; k < MEM_STAT_LEN; k++)
398 			he->mem_stat[i].entries[k] = (he->mem_stat[i].entries[k] * 7) / 8;
399 	}
400 }
401 
402 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
403 
hists__decay_entry(struct hists * hists,struct hist_entry * he)404 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
405 {
406 	u64 prev_period = he->stat.period;
407 	u64 prev_latency = he->stat.latency;
408 
409 	if (prev_period == 0)
410 		return true;
411 
412 	he_stat__decay(&he->stat);
413 	if (symbol_conf.cumulate_callchain)
414 		he_stat__decay(he->stat_acc);
415 	decay_callchain(he->callchain);
416 	hists__decay_mem_stat(hists, he);
417 
418 	if (!he->depth) {
419 		u64 period_diff = prev_period - he->stat.period;
420 		u64 latency_diff = prev_latency - he->stat.latency;
421 
422 		hists->stats.total_period -= period_diff;
423 		hists->stats.total_latency -= latency_diff;
424 		if (!he->filtered) {
425 			hists->stats.total_non_filtered_period -= period_diff;
426 			hists->stats.total_non_filtered_latency -= latency_diff;
427 		}
428 	}
429 
430 	if (!he->leaf) {
431 		struct hist_entry *child;
432 		struct rb_node *node = rb_first_cached(&he->hroot_out);
433 		while (node) {
434 			child = rb_entry(node, struct hist_entry, rb_node);
435 			node = rb_next(node);
436 
437 			if (hists__decay_entry(hists, child))
438 				hists__delete_entry(hists, child);
439 		}
440 	}
441 
442 	return he->stat.period == 0 && he->stat.latency == 0;
443 }
444 
hists__delete_entry(struct hists * hists,struct hist_entry * he)445 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
446 {
447 	struct rb_root_cached *root_in;
448 	struct rb_root_cached *root_out;
449 
450 	if (he->parent_he) {
451 		root_in  = &he->parent_he->hroot_in;
452 		root_out = &he->parent_he->hroot_out;
453 	} else {
454 		if (hists__has(hists, need_collapse))
455 			root_in = &hists->entries_collapsed;
456 		else
457 			root_in = hists->entries_in;
458 		root_out = &hists->entries;
459 	}
460 
461 	rb_erase_cached(&he->rb_node_in, root_in);
462 	rb_erase_cached(&he->rb_node, root_out);
463 
464 	--hists->nr_entries;
465 	if (!he->filtered)
466 		--hists->nr_non_filtered_entries;
467 
468 	hist_entry__delete(he);
469 }
470 
hists__decay_entries(struct hists * hists,bool zap_user,bool zap_kernel)471 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
472 {
473 	struct rb_node *next = rb_first_cached(&hists->entries);
474 	struct hist_entry *n;
475 
476 	while (next) {
477 		n = rb_entry(next, struct hist_entry, rb_node);
478 		next = rb_next(&n->rb_node);
479 		if (((zap_user && n->level == '.') ||
480 		     (zap_kernel && n->level != '.') ||
481 		     hists__decay_entry(hists, n))) {
482 			hists__delete_entry(hists, n);
483 		}
484 	}
485 }
486 
hists__delete_entries(struct hists * hists)487 void hists__delete_entries(struct hists *hists)
488 {
489 	struct rb_node *next = rb_first_cached(&hists->entries);
490 	struct hist_entry *n;
491 
492 	while (next) {
493 		n = rb_entry(next, struct hist_entry, rb_node);
494 		next = rb_next(&n->rb_node);
495 
496 		hists__delete_entry(hists, n);
497 	}
498 }
499 
hists__get_entry(struct hists * hists,int idx)500 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
501 {
502 	struct rb_node *next = rb_first_cached(&hists->entries);
503 	struct hist_entry *n;
504 	int i = 0;
505 
506 	while (next) {
507 		n = rb_entry(next, struct hist_entry, rb_node);
508 		if (i == idx)
509 			return n;
510 
511 		next = rb_next(&n->rb_node);
512 		i++;
513 	}
514 
515 	return NULL;
516 }
517 
518 /*
519  * histogram, sorted on item, collects periods
520  */
521 
hist_entry__init(struct hist_entry * he,struct hist_entry * template,bool sample_self,size_t callchain_size)522 static int hist_entry__init(struct hist_entry *he,
523 			    struct hist_entry *template,
524 			    bool sample_self,
525 			    size_t callchain_size)
526 {
527 	*he = *template;
528 	he->callchain_size = callchain_size;
529 
530 	if (symbol_conf.cumulate_callchain) {
531 		he->stat_acc = malloc(sizeof(he->stat));
532 		if (he->stat_acc == NULL)
533 			return -ENOMEM;
534 		memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
535 		if (!sample_self)
536 			memset(&he->stat, 0, sizeof(he->stat));
537 	}
538 
539 	he->ms.maps = maps__get(he->ms.maps);
540 	he->ms.map = map__get(he->ms.map);
541 
542 	if (he->branch_info) {
543 		/*
544 		 * This branch info is (a part of) allocated from
545 		 * sample__resolve_bstack() and will be freed after
546 		 * adding new entries.  So we need to save a copy.
547 		 */
548 		he->branch_info = malloc(sizeof(*he->branch_info));
549 		if (he->branch_info == NULL)
550 			goto err;
551 
552 		memcpy(he->branch_info, template->branch_info,
553 		       sizeof(*he->branch_info));
554 
555 		he->branch_info->from.ms.maps = maps__get(he->branch_info->from.ms.maps);
556 		he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
557 		he->branch_info->to.ms.maps = maps__get(he->branch_info->to.ms.maps);
558 		he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
559 	}
560 
561 	if (he->mem_info) {
562 		he->mem_info = mem_info__clone(template->mem_info);
563 		if (he->mem_info == NULL)
564 			goto err_infos;
565 	}
566 
567 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
568 		callchain_init(he->callchain);
569 
570 	if (he->raw_data) {
571 		he->raw_data = memdup(he->raw_data, he->raw_size);
572 		if (he->raw_data == NULL)
573 			goto err_infos;
574 	}
575 
576 	if (he->srcline && he->srcline != SRCLINE_UNKNOWN) {
577 		he->srcline = strdup(he->srcline);
578 		if (he->srcline == NULL)
579 			goto err_rawdata;
580 	}
581 
582 	if (symbol_conf.res_sample) {
583 		he->res_samples = calloc(symbol_conf.res_sample,
584 					sizeof(struct res_sample));
585 		if (!he->res_samples)
586 			goto err_srcline;
587 	}
588 
589 	INIT_LIST_HEAD(&he->pairs.node);
590 	he->thread = thread__get(he->thread);
591 	he->hroot_in  = RB_ROOT_CACHED;
592 	he->hroot_out = RB_ROOT_CACHED;
593 
594 	if (!symbol_conf.report_hierarchy)
595 		he->leaf = true;
596 
597 	return 0;
598 
599 err_srcline:
600 	zfree(&he->srcline);
601 
602 err_rawdata:
603 	zfree(&he->raw_data);
604 
605 err_infos:
606 	if (he->branch_info) {
607 		map_symbol__exit(&he->branch_info->from.ms);
608 		map_symbol__exit(&he->branch_info->to.ms);
609 		zfree(&he->branch_info);
610 	}
611 	if (he->mem_info)
612 		mem_info__zput(he->mem_info);
613 err:
614 	map_symbol__exit(&he->ms);
615 	zfree(&he->stat_acc);
616 	return -ENOMEM;
617 }
618 
hist_entry__zalloc(size_t size)619 static void *hist_entry__zalloc(size_t size)
620 {
621 	return zalloc(size + sizeof(struct hist_entry));
622 }
623 
hist_entry__free(void * ptr)624 static void hist_entry__free(void *ptr)
625 {
626 	free(ptr);
627 }
628 
629 static struct hist_entry_ops default_ops = {
630 	.new	= hist_entry__zalloc,
631 	.free	= hist_entry__free,
632 };
633 
hist_entry__new(struct hist_entry * template,bool sample_self)634 static struct hist_entry *hist_entry__new(struct hist_entry *template,
635 					  bool sample_self)
636 {
637 	struct hist_entry_ops *ops = template->ops;
638 	size_t callchain_size = 0;
639 	struct hist_entry *he;
640 	int err = 0;
641 
642 	if (!ops)
643 		ops = template->ops = &default_ops;
644 
645 	if (symbol_conf.use_callchain)
646 		callchain_size = sizeof(struct callchain_root);
647 
648 	he = ops->new(callchain_size);
649 	if (he) {
650 		err = hist_entry__init(he, template, sample_self, callchain_size);
651 		if (err) {
652 			ops->free(he);
653 			he = NULL;
654 		}
655 	}
656 	return he;
657 }
658 
symbol__parent_filter(const struct symbol * parent)659 static filter_mask_t symbol__parent_filter(const struct symbol *parent)
660 {
661 	if (symbol_conf.exclude_other && parent == NULL)
662 		return 1 << HIST_FILTER__PARENT;
663 	return 0;
664 }
665 
hist_entry__add_callchain_period(struct hist_entry * he,u64 period,u64 latency)666 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period, u64 latency)
667 {
668 	if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
669 		return;
670 
671 	he->hists->callchain_period += period;
672 	he->hists->callchain_latency += latency;
673 	if (!he->filtered) {
674 		he->hists->callchain_non_filtered_period += period;
675 		he->hists->callchain_non_filtered_latency += latency;
676 	}
677 }
678 
hists__findnew_entry(struct hists * hists,struct hist_entry * entry,const struct addr_location * al,bool sample_self)679 static struct hist_entry *hists__findnew_entry(struct hists *hists,
680 					       struct hist_entry *entry,
681 					       const struct addr_location *al,
682 					       bool sample_self)
683 {
684 	struct rb_node **p;
685 	struct rb_node *parent = NULL;
686 	struct hist_entry *he;
687 	int64_t cmp;
688 	u64 period = entry->stat.period;
689 	u64 latency = entry->stat.latency;
690 	bool leftmost = true;
691 
692 	p = &hists->entries_in->rb_root.rb_node;
693 
694 	while (*p != NULL) {
695 		parent = *p;
696 		he = rb_entry(parent, struct hist_entry, rb_node_in);
697 
698 		/*
699 		 * Make sure that it receives arguments in a same order as
700 		 * hist_entry__collapse() so that we can use an appropriate
701 		 * function when searching an entry regardless which sort
702 		 * keys were used.
703 		 */
704 		cmp = hist_entry__cmp(he, entry);
705 		if (!cmp) {
706 			if (sample_self) {
707 				he_stat__add_stat(&he->stat, &entry->stat);
708 				hist_entry__add_callchain_period(he, period, latency);
709 			}
710 			if (symbol_conf.cumulate_callchain)
711 				he_stat__add_period(he->stat_acc, period, latency);
712 
713 			block_info__delete(entry->block_info);
714 
715 			kvm_info__zput(entry->kvm_info);
716 
717 			/* If the map of an existing hist_entry has
718 			 * become out-of-date due to an exec() or
719 			 * similar, update it.  Otherwise we will
720 			 * mis-adjust symbol addresses when computing
721 			 * the history counter to increment.
722 			 */
723 			if (hists__has(hists, sym) && he->ms.map != entry->ms.map) {
724 				if (he->ms.sym) {
725 					u64 addr = he->ms.sym->start;
726 					he->ms.sym = map__find_symbol(entry->ms.map, addr);
727 				}
728 
729 				map__put(he->ms.map);
730 				he->ms.map = map__get(entry->ms.map);
731 			}
732 			goto out;
733 		}
734 
735 		if (cmp < 0)
736 			p = &(*p)->rb_left;
737 		else {
738 			p = &(*p)->rb_right;
739 			leftmost = false;
740 		}
741 	}
742 
743 	he = hist_entry__new(entry, sample_self);
744 	if (!he)
745 		return NULL;
746 
747 	if (sample_self)
748 		hist_entry__add_callchain_period(he, period, latency);
749 	hists->nr_entries++;
750 
751 	rb_link_node(&he->rb_node_in, parent, p);
752 	rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
753 out:
754 	if (sample_self)
755 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
756 	if (symbol_conf.cumulate_callchain)
757 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
758 	if (hists__update_mem_stat(hists, he, entry->mem_info, period) < 0) {
759 		hist_entry__delete(he);
760 		return NULL;
761 	}
762 	return he;
763 }
764 
random_max(unsigned high)765 static unsigned random_max(unsigned high)
766 {
767 	unsigned thresh = -high % high;
768 	for (;;) {
769 		unsigned r = random();
770 		if (r >= thresh)
771 			return r % high;
772 	}
773 }
774 
hists__res_sample(struct hist_entry * he,struct perf_sample * sample)775 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
776 {
777 	struct res_sample *r;
778 	int j;
779 
780 	if (he->num_res < symbol_conf.res_sample) {
781 		j = he->num_res++;
782 	} else {
783 		j = random_max(symbol_conf.res_sample);
784 	}
785 	r = &he->res_samples[j];
786 	r->time = sample->time;
787 	r->cpu = sample->cpu;
788 	r->tid = sample->tid;
789 }
790 
791 static struct hist_entry*
__hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct block_info * block_info,struct perf_sample * sample,bool sample_self,struct hist_entry_ops * ops)792 __hists__add_entry(struct hists *hists,
793 		   struct addr_location *al,
794 		   struct symbol *sym_parent,
795 		   struct branch_info *bi,
796 		   struct mem_info *mi,
797 		   struct kvm_info *ki,
798 		   struct block_info *block_info,
799 		   struct perf_sample *sample,
800 		   bool sample_self,
801 		   struct hist_entry_ops *ops)
802 {
803 	struct namespaces *ns = thread__namespaces(al->thread);
804 	struct hist_entry entry = {
805 		.thread	= al->thread,
806 		.comm = thread__comm(al->thread),
807 		.cgroup_id = {
808 			.dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
809 			.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
810 		},
811 		.cgroup = sample->cgroup,
812 		.ms = {
813 			.maps	= al->maps,
814 			.map	= al->map,
815 			.sym	= al->sym,
816 		},
817 		.srcline = (char *) al->srcline,
818 		.socket	 = al->socket,
819 		.cpu	 = al->cpu,
820 		.cpumode = al->cpumode,
821 		.ip	 = al->addr,
822 		.level	 = al->level,
823 		.code_page_size = sample->code_page_size,
824 		.parallelism	= al->parallelism,
825 		.stat = {
826 			.nr_events = 1,
827 			.period	= sample->period,
828 			.weight1 = sample->weight,
829 			.weight2 = sample->ins_lat,
830 			.weight3 = sample->weight3,
831 			.latency = al->latency,
832 		},
833 		.parent = sym_parent,
834 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
835 		.hists	= hists,
836 		.branch_info = bi,
837 		.mem_info = mi,
838 		.kvm_info = ki,
839 		.block_info = block_info,
840 		.transaction = sample->transaction,
841 		.raw_data = sample->raw_data,
842 		.raw_size = sample->raw_size,
843 		.ops = ops,
844 		.time = hist_time(sample->time),
845 		.weight = sample->weight,
846 		.ins_lat = sample->ins_lat,
847 		.weight3 = sample->weight3,
848 		.simd_flags = sample->simd_flags,
849 	}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
850 
851 	if (!hists->has_callchains && he && he->callchain_size != 0)
852 		hists->has_callchains = true;
853 	if (he && symbol_conf.res_sample)
854 		hists__res_sample(he, sample);
855 	return he;
856 }
857 
hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct perf_sample * sample,bool sample_self)858 struct hist_entry *hists__add_entry(struct hists *hists,
859 				    struct addr_location *al,
860 				    struct symbol *sym_parent,
861 				    struct branch_info *bi,
862 				    struct mem_info *mi,
863 				    struct kvm_info *ki,
864 				    struct perf_sample *sample,
865 				    bool sample_self)
866 {
867 	return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
868 				  sample, sample_self, NULL);
869 }
870 
hists__add_entry_ops(struct hists * hists,struct hist_entry_ops * ops,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct perf_sample * sample,bool sample_self)871 struct hist_entry *hists__add_entry_ops(struct hists *hists,
872 					struct hist_entry_ops *ops,
873 					struct addr_location *al,
874 					struct symbol *sym_parent,
875 					struct branch_info *bi,
876 					struct mem_info *mi,
877 					struct kvm_info *ki,
878 					struct perf_sample *sample,
879 					bool sample_self)
880 {
881 	return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
882 				  sample, sample_self, ops);
883 }
884 
hists__add_entry_block(struct hists * hists,struct addr_location * al,struct block_info * block_info)885 struct hist_entry *hists__add_entry_block(struct hists *hists,
886 					  struct addr_location *al,
887 					  struct block_info *block_info)
888 {
889 	struct hist_entry entry = {
890 		.block_info = block_info,
891 		.hists = hists,
892 		.ms = {
893 			.maps = al->maps,
894 			.map = al->map,
895 			.sym = al->sym,
896 		},
897 	}, *he = hists__findnew_entry(hists, &entry, al, false);
898 
899 	return he;
900 }
901 
902 static int
iter_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)903 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
904 		    struct addr_location *al __maybe_unused)
905 {
906 	return 0;
907 }
908 
909 static int
iter_add_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)910 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
911 			struct addr_location *al __maybe_unused)
912 {
913 	return 0;
914 }
915 
916 static int
iter_prepare_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)917 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
918 {
919 	struct perf_sample *sample = iter->sample;
920 	struct mem_info *mi;
921 
922 	mi = sample__resolve_mem(sample, al);
923 	if (mi == NULL)
924 		return -ENOMEM;
925 
926 	iter->mi = mi;
927 	return 0;
928 }
929 
930 static int
iter_add_single_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)931 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
932 {
933 	u64 cost;
934 	struct mem_info *mi = iter->mi;
935 	struct hists *hists = evsel__hists(iter->evsel);
936 	struct perf_sample *sample = iter->sample;
937 	struct hist_entry *he;
938 
939 	if (mi == NULL)
940 		return -EINVAL;
941 
942 	cost = sample->weight;
943 	if (!cost)
944 		cost = 1;
945 
946 	/*
947 	 * must pass period=weight in order to get the correct
948 	 * sorting from hists__collapse_resort() which is solely
949 	 * based on periods. We want sorting be done on nr_events * weight
950 	 * and this is indirectly achieved by passing period=weight here
951 	 * and the he_stat__add_period() function.
952 	 */
953 	sample->period = cost;
954 
955 	he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
956 			      sample, true);
957 	if (!he)
958 		return -ENOMEM;
959 
960 	iter->he = he;
961 	return 0;
962 }
963 
964 static int
iter_finish_mem_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)965 iter_finish_mem_entry(struct hist_entry_iter *iter,
966 		      struct addr_location *al __maybe_unused)
967 {
968 	struct evsel *evsel = iter->evsel;
969 	struct hists *hists = evsel__hists(evsel);
970 	struct hist_entry *he = iter->he;
971 	int err = -EINVAL;
972 
973 	if (he == NULL)
974 		goto out;
975 
976 	hists__inc_nr_samples(hists, he->filtered);
977 
978 	err = hist_entry__append_callchain(he, iter->sample);
979 
980 out:
981 	mem_info__zput(iter->mi);
982 
983 	iter->he = NULL;
984 	return err;
985 }
986 
987 static int
iter_prepare_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)988 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
989 {
990 	struct branch_info *bi;
991 	struct perf_sample *sample = iter->sample;
992 
993 	bi = sample__resolve_bstack(sample, al);
994 	if (!bi)
995 		return -ENOMEM;
996 
997 	iter->curr = 0;
998 	iter->total = sample->branch_stack->nr;
999 
1000 	iter->bi = bi;
1001 	return 0;
1002 }
1003 
1004 static int
iter_add_single_branch_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)1005 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
1006 			     struct addr_location *al __maybe_unused)
1007 {
1008 	return 0;
1009 }
1010 
1011 static int
iter_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)1012 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
1013 {
1014 	struct branch_info *bi = iter->bi;
1015 	int i = iter->curr;
1016 
1017 	if (bi == NULL)
1018 		return 0;
1019 
1020 	if (iter->curr >= iter->total)
1021 		return 0;
1022 
1023 	maps__put(al->maps);
1024 	al->maps = maps__get(bi[i].to.ms.maps);
1025 	map__put(al->map);
1026 	al->map = map__get(bi[i].to.ms.map);
1027 	al->sym = bi[i].to.ms.sym;
1028 	al->addr = bi[i].to.addr;
1029 	return 1;
1030 }
1031 
1032 static int
iter_add_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)1033 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
1034 {
1035 	struct branch_info *bi;
1036 	struct evsel *evsel = iter->evsel;
1037 	struct hists *hists = evsel__hists(evsel);
1038 	struct perf_sample *sample = iter->sample;
1039 	struct hist_entry *he = NULL;
1040 	int i = iter->curr;
1041 	int err = 0;
1042 
1043 	bi = iter->bi;
1044 
1045 	if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
1046 		goto out;
1047 
1048 	/*
1049 	 * The report shows the percentage of total branches captured
1050 	 * and not events sampled. Thus we use a pseudo period of 1.
1051 	 */
1052 	sample->period = 1;
1053 	sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
1054 
1055 	he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
1056 			      sample, true);
1057 	if (he == NULL)
1058 		return -ENOMEM;
1059 
1060 out:
1061 	iter->he = he;
1062 	iter->curr++;
1063 	return err;
1064 }
1065 
branch_info__exit(struct branch_info * bi)1066 static void branch_info__exit(struct branch_info *bi)
1067 {
1068 	map_symbol__exit(&bi->from.ms);
1069 	map_symbol__exit(&bi->to.ms);
1070 	zfree_srcline(&bi->srcline_from);
1071 	zfree_srcline(&bi->srcline_to);
1072 }
1073 
1074 static int
iter_finish_branch_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1075 iter_finish_branch_entry(struct hist_entry_iter *iter,
1076 			 struct addr_location *al __maybe_unused)
1077 {
1078 	struct evsel *evsel = iter->evsel;
1079 	struct hists *hists = evsel__hists(evsel);
1080 
1081 	for (int i = 0; i < iter->total; i++)
1082 		branch_info__exit(&iter->bi[i]);
1083 
1084 	if (iter->he)
1085 		hists__inc_nr_samples(hists, iter->he->filtered);
1086 
1087 	zfree(&iter->bi);
1088 	iter->he = NULL;
1089 
1090 	return iter->curr >= iter->total ? 0 : -1;
1091 }
1092 
1093 static int
iter_prepare_normal_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)1094 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
1095 			  struct addr_location *al __maybe_unused)
1096 {
1097 	return 0;
1098 }
1099 
1100 static int
iter_add_single_normal_entry(struct hist_entry_iter * iter,struct addr_location * al)1101 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
1102 {
1103 	struct evsel *evsel = iter->evsel;
1104 	struct perf_sample *sample = iter->sample;
1105 	struct hist_entry *he;
1106 
1107 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1108 			      NULL, sample, true);
1109 	if (he == NULL)
1110 		return -ENOMEM;
1111 
1112 	iter->he = he;
1113 	return 0;
1114 }
1115 
1116 static int
iter_finish_normal_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1117 iter_finish_normal_entry(struct hist_entry_iter *iter,
1118 			 struct addr_location *al __maybe_unused)
1119 {
1120 	struct hist_entry *he = iter->he;
1121 	struct evsel *evsel = iter->evsel;
1122 	struct perf_sample *sample = iter->sample;
1123 
1124 	if (he == NULL)
1125 		return 0;
1126 
1127 	iter->he = NULL;
1128 
1129 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1130 
1131 	return hist_entry__append_callchain(he, sample);
1132 }
1133 
1134 static int
iter_prepare_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1135 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1136 			      struct addr_location *al __maybe_unused)
1137 {
1138 	struct hist_entry **he_cache;
1139 	struct callchain_cursor *cursor = get_tls_callchain_cursor();
1140 
1141 	if (cursor == NULL)
1142 		return -ENOMEM;
1143 
1144 	callchain_cursor_commit(cursor);
1145 
1146 	/*
1147 	 * This is for detecting cycles or recursions so that they're
1148 	 * cumulated only one time to prevent entries more than 100%
1149 	 * overhead.
1150 	 */
1151 	he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1));
1152 	if (he_cache == NULL)
1153 		return -ENOMEM;
1154 
1155 	iter->he_cache = he_cache;
1156 	iter->curr = 0;
1157 
1158 	return 0;
1159 }
1160 
1161 static int
iter_add_single_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1162 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1163 				 struct addr_location *al)
1164 {
1165 	struct evsel *evsel = iter->evsel;
1166 	struct hists *hists = evsel__hists(evsel);
1167 	struct perf_sample *sample = iter->sample;
1168 	struct hist_entry **he_cache = iter->he_cache;
1169 	struct hist_entry *he;
1170 	int err = 0;
1171 
1172 	he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
1173 			      sample, true);
1174 	if (he == NULL)
1175 		return -ENOMEM;
1176 
1177 	iter->he = he;
1178 	he_cache[iter->curr++] = he;
1179 
1180 	hist_entry__append_callchain(he, sample);
1181 
1182 	/*
1183 	 * We need to re-initialize the cursor since callchain_append()
1184 	 * advanced the cursor to the end.
1185 	 */
1186 	callchain_cursor_commit(get_tls_callchain_cursor());
1187 
1188 	hists__inc_nr_samples(hists, he->filtered);
1189 
1190 	return err;
1191 }
1192 
1193 static int
iter_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1194 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1195 			   struct addr_location *al)
1196 {
1197 	struct callchain_cursor_node *node;
1198 
1199 	node = callchain_cursor_current(get_tls_callchain_cursor());
1200 	if (node == NULL)
1201 		return 0;
1202 
1203 	return fill_callchain_info(al, node, iter->hide_unresolved);
1204 }
1205 
1206 static bool
hist_entry__fast__sym_diff(struct hist_entry * left,struct hist_entry * right)1207 hist_entry__fast__sym_diff(struct hist_entry *left,
1208 			   struct hist_entry *right)
1209 {
1210 	struct symbol *sym_l = left->ms.sym;
1211 	struct symbol *sym_r = right->ms.sym;
1212 
1213 	if (!sym_l && !sym_r)
1214 		return left->ip != right->ip;
1215 
1216 	return !!_sort__sym_cmp(sym_l, sym_r);
1217 }
1218 
1219 
1220 static int
iter_add_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1221 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1222 			       struct addr_location *al)
1223 {
1224 	struct evsel *evsel = iter->evsel;
1225 	struct perf_sample *sample = iter->sample;
1226 	struct hist_entry **he_cache = iter->he_cache;
1227 	struct hist_entry *he;
1228 	struct hist_entry he_tmp = {
1229 		.hists = evsel__hists(evsel),
1230 		.cpu = al->cpu,
1231 		.thread = al->thread,
1232 		.comm = thread__comm(al->thread),
1233 		.ip = al->addr,
1234 		.ms = {
1235 			.maps = al->maps,
1236 			.map = al->map,
1237 			.sym = al->sym,
1238 		},
1239 		.srcline = (char *) al->srcline,
1240 		.parent = iter->parent,
1241 		.raw_data = sample->raw_data,
1242 		.raw_size = sample->raw_size,
1243 	};
1244 	int i;
1245 	struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor();
1246 	bool fast = hists__has(he_tmp.hists, sym);
1247 
1248 	if (tls_cursor == NULL)
1249 		return -ENOMEM;
1250 
1251 	callchain_cursor_snapshot(&cursor, tls_cursor);
1252 
1253 	callchain_cursor_advance(tls_cursor);
1254 
1255 	/*
1256 	 * Check if there's duplicate entries in the callchain.
1257 	 * It's possible that it has cycles or recursive calls.
1258 	 */
1259 	for (i = 0; i < iter->curr; i++) {
1260 		/*
1261 		 * For most cases, there are no duplicate entries in callchain.
1262 		 * The symbols are usually different. Do a quick check for
1263 		 * symbols first.
1264 		 */
1265 		if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1266 			continue;
1267 
1268 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1269 			/* to avoid calling callback function */
1270 			iter->he = NULL;
1271 			return 0;
1272 		}
1273 	}
1274 
1275 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1276 			      NULL, sample, false);
1277 	if (he == NULL)
1278 		return -ENOMEM;
1279 
1280 	iter->he = he;
1281 	he_cache[iter->curr++] = he;
1282 
1283 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1284 		callchain_append(he->callchain, &cursor, sample->period);
1285 	return 0;
1286 }
1287 
1288 static int
iter_finish_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1289 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1290 			     struct addr_location *al __maybe_unused)
1291 {
1292 	mem_info__zput(iter->mi);
1293 	zfree(&iter->bi);
1294 	zfree(&iter->he_cache);
1295 	iter->he = NULL;
1296 
1297 	return 0;
1298 }
1299 
1300 const struct hist_iter_ops hist_iter_mem = {
1301 	.prepare_entry 		= iter_prepare_mem_entry,
1302 	.add_single_entry 	= iter_add_single_mem_entry,
1303 	.next_entry 		= iter_next_nop_entry,
1304 	.add_next_entry 	= iter_add_next_nop_entry,
1305 	.finish_entry 		= iter_finish_mem_entry,
1306 };
1307 
1308 const struct hist_iter_ops hist_iter_branch = {
1309 	.prepare_entry 		= iter_prepare_branch_entry,
1310 	.add_single_entry 	= iter_add_single_branch_entry,
1311 	.next_entry 		= iter_next_branch_entry,
1312 	.add_next_entry 	= iter_add_next_branch_entry,
1313 	.finish_entry 		= iter_finish_branch_entry,
1314 };
1315 
1316 const struct hist_iter_ops hist_iter_normal = {
1317 	.prepare_entry 		= iter_prepare_normal_entry,
1318 	.add_single_entry 	= iter_add_single_normal_entry,
1319 	.next_entry 		= iter_next_nop_entry,
1320 	.add_next_entry 	= iter_add_next_nop_entry,
1321 	.finish_entry 		= iter_finish_normal_entry,
1322 };
1323 
1324 const struct hist_iter_ops hist_iter_cumulative = {
1325 	.prepare_entry 		= iter_prepare_cumulative_entry,
1326 	.add_single_entry 	= iter_add_single_cumulative_entry,
1327 	.next_entry 		= iter_next_cumulative_entry,
1328 	.add_next_entry 	= iter_add_next_cumulative_entry,
1329 	.finish_entry 		= iter_finish_cumulative_entry,
1330 };
1331 
hist_entry_iter__add(struct hist_entry_iter * iter,struct addr_location * al,int max_stack_depth,void * arg)1332 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1333 			 int max_stack_depth, void *arg)
1334 {
1335 	int err, err2;
1336 	struct map *alm = NULL;
1337 
1338 	if (al)
1339 		alm = map__get(al->map);
1340 
1341 	err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent,
1342 					iter->evsel, al, max_stack_depth);
1343 	if (err) {
1344 		map__put(alm);
1345 		return err;
1346 	}
1347 
1348 	err = iter->ops->prepare_entry(iter, al);
1349 	if (err)
1350 		goto out;
1351 
1352 	err = iter->ops->add_single_entry(iter, al);
1353 	if (err)
1354 		goto out;
1355 
1356 	if (iter->he && iter->add_entry_cb) {
1357 		err = iter->add_entry_cb(iter, al, true, arg);
1358 		if (err)
1359 			goto out;
1360 	}
1361 
1362 	while (iter->ops->next_entry(iter, al)) {
1363 		err = iter->ops->add_next_entry(iter, al);
1364 		if (err)
1365 			break;
1366 
1367 		if (iter->he && iter->add_entry_cb) {
1368 			err = iter->add_entry_cb(iter, al, false, arg);
1369 			if (err)
1370 				goto out;
1371 		}
1372 	}
1373 
1374 out:
1375 	err2 = iter->ops->finish_entry(iter, al);
1376 	if (!err)
1377 		err = err2;
1378 
1379 	map__put(alm);
1380 
1381 	return err;
1382 }
1383 
1384 static int64_t
hist_entry__cmp_impl(struct perf_hpp_list * hpp_list,struct hist_entry * left,struct hist_entry * right,unsigned long fn_offset,bool ignore_dynamic,bool ignore_skipped)1385 hist_entry__cmp_impl(struct perf_hpp_list *hpp_list, struct hist_entry *left,
1386 		     struct hist_entry *right, unsigned long fn_offset,
1387 		     bool ignore_dynamic, bool ignore_skipped)
1388 {
1389 	struct hists *hists = left->hists;
1390 	struct perf_hpp_fmt *fmt;
1391 	perf_hpp_fmt_cmp_t *fn;
1392 	int64_t cmp;
1393 
1394 	/*
1395 	 * Never collapse filtered and non-filtered entries.
1396 	 * Note this is not the same as having an extra (invisible) fmt
1397 	 * that corresponds to the filtered status.
1398 	 */
1399 	cmp = (int64_t)!!left->filtered - (int64_t)!!right->filtered;
1400 	if (cmp)
1401 		return cmp;
1402 
1403 	perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1404 		if (ignore_dynamic && perf_hpp__is_dynamic_entry(fmt) &&
1405 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1406 			continue;
1407 
1408 		if (ignore_skipped && perf_hpp__should_skip(fmt, hists))
1409 			continue;
1410 
1411 		fn = (void *)fmt + fn_offset;
1412 		cmp = (*fn)(fmt, left, right);
1413 		if (cmp)
1414 			break;
1415 	}
1416 
1417 	return cmp;
1418 }
1419 
1420 int64_t
hist_entry__cmp(struct hist_entry * left,struct hist_entry * right)1421 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1422 {
1423 	return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1424 		offsetof(struct perf_hpp_fmt, cmp), true, false);
1425 }
1426 
1427 static int64_t
hist_entry__sort(struct hist_entry * left,struct hist_entry * right)1428 hist_entry__sort(struct hist_entry *left, struct hist_entry *right)
1429 {
1430 	return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1431 		offsetof(struct perf_hpp_fmt, sort), false, true);
1432 }
1433 
1434 int64_t
hist_entry__collapse(struct hist_entry * left,struct hist_entry * right)1435 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1436 {
1437 	return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1438 		offsetof(struct perf_hpp_fmt, collapse), true, false);
1439 }
1440 
1441 static int64_t
hist_entry__collapse_hierarchy(struct perf_hpp_list * hpp_list,struct hist_entry * left,struct hist_entry * right)1442 hist_entry__collapse_hierarchy(struct perf_hpp_list *hpp_list,
1443 			       struct hist_entry *left,
1444 			       struct hist_entry *right)
1445 {
1446 	return hist_entry__cmp_impl(hpp_list, left, right,
1447 		offsetof(struct perf_hpp_fmt, collapse), false, false);
1448 }
1449 
hist_entry__delete(struct hist_entry * he)1450 void hist_entry__delete(struct hist_entry *he)
1451 {
1452 	struct hist_entry_ops *ops = he->ops;
1453 
1454 	if (symbol_conf.report_hierarchy) {
1455 		struct rb_root *root = &he->hroot_out.rb_root;
1456 		struct hist_entry *child, *tmp;
1457 
1458 		rbtree_postorder_for_each_entry_safe(child, tmp, root, rb_node)
1459 			hist_entry__delete(child);
1460 
1461 		*root = RB_ROOT;
1462 	}
1463 
1464 	thread__zput(he->thread);
1465 	map_symbol__exit(&he->ms);
1466 
1467 	if (he->branch_info) {
1468 		branch_info__exit(he->branch_info);
1469 		zfree(&he->branch_info);
1470 	}
1471 
1472 	if (he->mem_info) {
1473 		map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
1474 		map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
1475 		mem_info__zput(he->mem_info);
1476 	}
1477 
1478 	if (he->block_info)
1479 		block_info__delete(he->block_info);
1480 
1481 	if (he->kvm_info)
1482 		kvm_info__zput(he->kvm_info);
1483 
1484 	zfree(&he->res_samples);
1485 	zfree(&he->stat_acc);
1486 	zfree_srcline(&he->srcline);
1487 	if (he->srcfile && he->srcfile[0])
1488 		zfree(&he->srcfile);
1489 	free_callchain(he->callchain);
1490 	zfree(&he->trace_output);
1491 	zfree(&he->raw_data);
1492 	zfree(&he->mem_stat);
1493 	ops->free(he);
1494 }
1495 
1496 /*
1497  * If this is not the last column, then we need to pad it according to the
1498  * pre-calculated max length for this column, otherwise don't bother adding
1499  * spaces because that would break viewing this with, for instance, 'less',
1500  * that would show tons of trailing spaces when a long C++ demangled method
1501  * names is sampled.
1502 */
hist_entry__snprintf_alignment(struct hist_entry * he,struct perf_hpp * hpp,struct perf_hpp_fmt * fmt,int printed)1503 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1504 				   struct perf_hpp_fmt *fmt, int printed)
1505 {
1506 	if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1507 		const int width = fmt->width(fmt, hpp, he->hists);
1508 		if (printed < width) {
1509 			advance_hpp(hpp, printed);
1510 			printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1511 		}
1512 	}
1513 
1514 	return printed;
1515 }
1516 
1517 /*
1518  * collapse the histogram
1519  */
1520 
1521 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1522 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1523 				       enum hist_filter type);
1524 
1525 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1526 
check_thread_entry(struct perf_hpp_fmt * fmt)1527 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1528 {
1529 	return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1530 }
1531 
hist_entry__check_and_remove_filter(struct hist_entry * he,enum hist_filter type,fmt_chk_fn check)1532 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1533 						enum hist_filter type,
1534 						fmt_chk_fn check)
1535 {
1536 	struct perf_hpp_fmt *fmt;
1537 	bool type_match = false;
1538 	struct hist_entry *parent = he->parent_he;
1539 
1540 	switch (type) {
1541 	case HIST_FILTER__THREAD:
1542 		if (symbol_conf.comm_list == NULL &&
1543 		    symbol_conf.pid_list == NULL &&
1544 		    symbol_conf.tid_list == NULL)
1545 			return;
1546 		break;
1547 	case HIST_FILTER__DSO:
1548 		if (symbol_conf.dso_list == NULL)
1549 			return;
1550 		break;
1551 	case HIST_FILTER__SYMBOL:
1552 		if (symbol_conf.sym_list == NULL)
1553 			return;
1554 		break;
1555 	case HIST_FILTER__PARALLELISM:
1556 		if (__bitmap_weight(symbol_conf.parallelism_filter, MAX_NR_CPUS + 1) == 0)
1557 			return;
1558 		break;
1559 	case HIST_FILTER__PARENT:
1560 	case HIST_FILTER__GUEST:
1561 	case HIST_FILTER__HOST:
1562 	case HIST_FILTER__SOCKET:
1563 	case HIST_FILTER__C2C:
1564 	default:
1565 		return;
1566 	}
1567 
1568 	/* if it's filtered by own fmt, it has to have filter bits */
1569 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1570 		if (check(fmt)) {
1571 			type_match = true;
1572 			break;
1573 		}
1574 	}
1575 
1576 	if (type_match) {
1577 		/*
1578 		 * If the filter is for current level entry, propagate
1579 		 * filter marker to parents.  The marker bit was
1580 		 * already set by default so it only needs to clear
1581 		 * non-filtered entries.
1582 		 */
1583 		if (!(he->filtered & (1 << type))) {
1584 			while (parent) {
1585 				parent->filtered &= ~(1 << type);
1586 				parent = parent->parent_he;
1587 			}
1588 		}
1589 	} else {
1590 		/*
1591 		 * If current entry doesn't have matching formats, set
1592 		 * filter marker for upper level entries.  it will be
1593 		 * cleared if its lower level entries is not filtered.
1594 		 *
1595 		 * For lower-level entries, it inherits parent's
1596 		 * filter bit so that lower level entries of a
1597 		 * non-filtered entry won't set the filter marker.
1598 		 */
1599 		if (parent == NULL)
1600 			he->filtered |= (1 << type);
1601 		else
1602 			he->filtered |= (parent->filtered & (1 << type));
1603 	}
1604 }
1605 
hist_entry__apply_hierarchy_filters(struct hist_entry * he)1606 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1607 {
1608 	hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1609 					    check_thread_entry);
1610 
1611 	hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1612 					    perf_hpp__is_dso_entry);
1613 
1614 	hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1615 					    perf_hpp__is_sym_entry);
1616 
1617 	hist_entry__check_and_remove_filter(he, HIST_FILTER__PARALLELISM,
1618 					    perf_hpp__is_parallelism_entry);
1619 
1620 	hists__apply_filters(he->hists, he);
1621 }
1622 
hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he,struct hist_entry * parent_he,struct perf_hpp_list * hpp_list)1623 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1624 						 struct rb_root_cached *root,
1625 						 struct hist_entry *he,
1626 						 struct hist_entry *parent_he,
1627 						 struct perf_hpp_list *hpp_list)
1628 {
1629 	struct rb_node **p = &root->rb_root.rb_node;
1630 	struct rb_node *parent = NULL;
1631 	struct hist_entry *iter, *new;
1632 	struct perf_hpp_fmt *fmt;
1633 	int64_t cmp;
1634 	bool leftmost = true;
1635 
1636 	while (*p != NULL) {
1637 		parent = *p;
1638 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1639 		cmp = hist_entry__collapse_hierarchy(hpp_list, iter, he);
1640 		if (!cmp) {
1641 			he_stat__add_stat(&iter->stat, &he->stat);
1642 			hists__add_mem_stat(hists, iter, he);
1643 			return iter;
1644 		}
1645 
1646 		if (cmp < 0)
1647 			p = &parent->rb_left;
1648 		else {
1649 			p = &parent->rb_right;
1650 			leftmost = false;
1651 		}
1652 	}
1653 
1654 	new = hist_entry__new(he, true);
1655 	if (new == NULL)
1656 		return NULL;
1657 
1658 	hists->nr_entries++;
1659 
1660 	/* save related format list for output */
1661 	new->hpp_list = hpp_list;
1662 	new->parent_he = parent_he;
1663 
1664 	hist_entry__apply_hierarchy_filters(new);
1665 
1666 	/* some fields are now passed to 'new' */
1667 	perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1668 		if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1669 			he->trace_output = NULL;
1670 		else
1671 			new->trace_output = NULL;
1672 
1673 		if (perf_hpp__is_srcline_entry(fmt))
1674 			he->srcline = NULL;
1675 		else
1676 			new->srcline = NULL;
1677 
1678 		if (perf_hpp__is_srcfile_entry(fmt))
1679 			he->srcfile = NULL;
1680 		else
1681 			new->srcfile = NULL;
1682 	}
1683 
1684 	if (hists__clone_mem_stat(hists, new, he) < 0) {
1685 		hist_entry__delete(new);
1686 		return NULL;
1687 	}
1688 
1689 	rb_link_node(&new->rb_node_in, parent, p);
1690 	rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1691 	return new;
1692 }
1693 
hists__hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1694 static int hists__hierarchy_insert_entry(struct hists *hists,
1695 					 struct rb_root_cached *root,
1696 					 struct hist_entry *he)
1697 {
1698 	struct perf_hpp_list_node *node;
1699 	struct hist_entry *new_he = NULL;
1700 	struct hist_entry *parent = NULL;
1701 	int depth = 0;
1702 	int ret = 0;
1703 
1704 	list_for_each_entry(node, &hists->hpp_formats, list) {
1705 		/* skip period (overhead) and elided columns */
1706 		if (node->level == 0 || node->skip)
1707 			continue;
1708 
1709 		/* insert copy of 'he' for each fmt into the hierarchy */
1710 		new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1711 		if (new_he == NULL) {
1712 			ret = -1;
1713 			break;
1714 		}
1715 
1716 		root = &new_he->hroot_in;
1717 		new_he->depth = depth++;
1718 		parent = new_he;
1719 	}
1720 
1721 	if (new_he) {
1722 		new_he->leaf = true;
1723 
1724 		if (hist_entry__has_callchains(new_he) &&
1725 		    symbol_conf.use_callchain) {
1726 			struct callchain_cursor *cursor = get_tls_callchain_cursor();
1727 
1728 			if (cursor == NULL)
1729 				return -1;
1730 
1731 			callchain_cursor_reset(cursor);
1732 			if (callchain_merge(cursor,
1733 					    new_he->callchain,
1734 					    he->callchain) < 0)
1735 				ret = -1;
1736 		}
1737 	}
1738 
1739 	/* 'he' is no longer used */
1740 	hist_entry__delete(he);
1741 
1742 	/* return 0 (or -1) since it already applied filters */
1743 	return ret;
1744 }
1745 
hists__collapse_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1746 static int hists__collapse_insert_entry(struct hists *hists,
1747 					struct rb_root_cached *root,
1748 					struct hist_entry *he)
1749 {
1750 	struct rb_node **p = &root->rb_root.rb_node;
1751 	struct rb_node *parent = NULL;
1752 	struct hist_entry *iter;
1753 	int64_t cmp;
1754 	bool leftmost = true;
1755 
1756 	if (symbol_conf.report_hierarchy)
1757 		return hists__hierarchy_insert_entry(hists, root, he);
1758 
1759 	while (*p != NULL) {
1760 		parent = *p;
1761 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1762 
1763 		cmp = hist_entry__collapse(iter, he);
1764 
1765 		if (!cmp) {
1766 			int ret = 0;
1767 
1768 			he_stat__add_stat(&iter->stat, &he->stat);
1769 			if (symbol_conf.cumulate_callchain)
1770 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
1771 			hists__add_mem_stat(hists, iter, he);
1772 
1773 			if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1774 				struct callchain_cursor *cursor = get_tls_callchain_cursor();
1775 
1776 				if (cursor != NULL) {
1777 					callchain_cursor_reset(cursor);
1778 					if (callchain_merge(cursor, iter->callchain, he->callchain) < 0)
1779 						ret = -1;
1780 				} else {
1781 					ret = 0;
1782 				}
1783 			}
1784 			hist_entry__delete(he);
1785 			return ret;
1786 		}
1787 
1788 		if (cmp < 0)
1789 			p = &(*p)->rb_left;
1790 		else {
1791 			p = &(*p)->rb_right;
1792 			leftmost = false;
1793 		}
1794 	}
1795 	hists->nr_entries++;
1796 
1797 	rb_link_node(&he->rb_node_in, parent, p);
1798 	rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1799 	return 1;
1800 }
1801 
hists__get_rotate_entries_in(struct hists * hists)1802 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1803 {
1804 	struct rb_root_cached *root;
1805 
1806 	mutex_lock(&hists->lock);
1807 
1808 	root = hists->entries_in;
1809 	if (++hists->entries_in > &hists->entries_in_array[1])
1810 		hists->entries_in = &hists->entries_in_array[0];
1811 
1812 	mutex_unlock(&hists->lock);
1813 
1814 	return root;
1815 }
1816 
hists__apply_filters(struct hists * hists,struct hist_entry * he)1817 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1818 {
1819 	hists__filter_entry_by_dso(hists, he);
1820 	hists__filter_entry_by_thread(hists, he);
1821 	hists__filter_entry_by_symbol(hists, he);
1822 	hists__filter_entry_by_socket(hists, he);
1823 	hists__filter_entry_by_parallelism(hists, he);
1824 }
1825 
hists__collapse_resort(struct hists * hists,struct ui_progress * prog)1826 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1827 {
1828 	struct rb_root_cached *root;
1829 	struct rb_node *next;
1830 	struct hist_entry *n;
1831 	int ret;
1832 
1833 	if (!hists__has(hists, need_collapse))
1834 		return 0;
1835 
1836 	hists->nr_entries = 0;
1837 
1838 	root = hists__get_rotate_entries_in(hists);
1839 
1840 	next = rb_first_cached(root);
1841 
1842 	while (next) {
1843 		if (session_done())
1844 			break;
1845 		n = rb_entry(next, struct hist_entry, rb_node_in);
1846 		next = rb_next(&n->rb_node_in);
1847 
1848 		rb_erase_cached(&n->rb_node_in, root);
1849 		ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1850 		if (ret < 0)
1851 			return -1;
1852 
1853 		if (ret) {
1854 			/*
1855 			 * If it wasn't combined with one of the entries already
1856 			 * collapsed, we need to apply the filters that may have
1857 			 * been set by, say, the hist_browser.
1858 			 */
1859 			hists__apply_filters(hists, n);
1860 		}
1861 		if (prog)
1862 			ui_progress__update(prog, 1);
1863 	}
1864 	return 0;
1865 }
1866 
hists__reset_filter_stats(struct hists * hists)1867 static void hists__reset_filter_stats(struct hists *hists)
1868 {
1869 	hists->nr_non_filtered_entries = 0;
1870 	hists->stats.total_non_filtered_period = 0;
1871 	hists->stats.total_non_filtered_latency = 0;
1872 }
1873 
hists__reset_stats(struct hists * hists)1874 void hists__reset_stats(struct hists *hists)
1875 {
1876 	hists->nr_entries = 0;
1877 	hists->stats.total_period = 0;
1878 	hists->stats.total_latency = 0;
1879 
1880 	hists__reset_filter_stats(hists);
1881 }
1882 
hists__inc_filter_stats(struct hists * hists,struct hist_entry * h)1883 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1884 {
1885 	hists->nr_non_filtered_entries++;
1886 	hists->stats.total_non_filtered_period += h->stat.period;
1887 	hists->stats.total_non_filtered_latency += h->stat.latency;
1888 }
1889 
hists__inc_stats(struct hists * hists,struct hist_entry * h)1890 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1891 {
1892 	if (!h->filtered)
1893 		hists__inc_filter_stats(hists, h);
1894 
1895 	hists->nr_entries++;
1896 	hists->stats.total_period += h->stat.period;
1897 	hists->stats.total_latency += h->stat.latency;
1898 }
1899 
hierarchy_recalc_total_periods(struct hists * hists)1900 static void hierarchy_recalc_total_periods(struct hists *hists)
1901 {
1902 	struct rb_node *node;
1903 	struct hist_entry *he;
1904 
1905 	node = rb_first_cached(&hists->entries);
1906 
1907 	hists->stats.total_period = 0;
1908 	hists->stats.total_non_filtered_period = 0;
1909 	hists->stats.total_latency = 0;
1910 	hists->stats.total_non_filtered_latency = 0;
1911 
1912 	/*
1913 	 * recalculate total period using top-level entries only
1914 	 * since lower level entries only see non-filtered entries
1915 	 * but upper level entries have sum of both entries.
1916 	 */
1917 	while (node) {
1918 		he = rb_entry(node, struct hist_entry, rb_node);
1919 		node = rb_next(node);
1920 
1921 		hists->stats.total_period += he->stat.period;
1922 		hists->stats.total_latency += he->stat.latency;
1923 		if (!he->filtered) {
1924 			hists->stats.total_non_filtered_period += he->stat.period;
1925 			hists->stats.total_non_filtered_latency += he->stat.latency;
1926 		}
1927 	}
1928 }
1929 
hierarchy_insert_output_entry(struct rb_root_cached * root,struct hist_entry * he)1930 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1931 					  struct hist_entry *he)
1932 {
1933 	struct rb_node **p = &root->rb_root.rb_node;
1934 	struct rb_node *parent = NULL;
1935 	struct hist_entry *iter;
1936 	struct perf_hpp_fmt *fmt;
1937 	bool leftmost = true;
1938 
1939 	while (*p != NULL) {
1940 		parent = *p;
1941 		iter = rb_entry(parent, struct hist_entry, rb_node);
1942 
1943 		if (hist_entry__sort(he, iter) > 0)
1944 			p = &parent->rb_left;
1945 		else {
1946 			p = &parent->rb_right;
1947 			leftmost = false;
1948 		}
1949 	}
1950 
1951 	rb_link_node(&he->rb_node, parent, p);
1952 	rb_insert_color_cached(&he->rb_node, root, leftmost);
1953 
1954 	/* update column width of dynamic entry */
1955 	perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1956 		if (fmt->init)
1957 			fmt->init(fmt, he);
1958 	}
1959 }
1960 
hists__hierarchy_output_resort(struct hists * hists,struct ui_progress * prog,struct rb_root_cached * root_in,struct rb_root_cached * root_out,u64 min_callchain_hits,bool use_callchain)1961 static void hists__hierarchy_output_resort(struct hists *hists,
1962 					   struct ui_progress *prog,
1963 					   struct rb_root_cached *root_in,
1964 					   struct rb_root_cached *root_out,
1965 					   u64 min_callchain_hits,
1966 					   bool use_callchain)
1967 {
1968 	struct rb_node *node;
1969 	struct hist_entry *he;
1970 
1971 	*root_out = RB_ROOT_CACHED;
1972 	node = rb_first_cached(root_in);
1973 
1974 	while (node) {
1975 		he = rb_entry(node, struct hist_entry, rb_node_in);
1976 		node = rb_next(node);
1977 
1978 		hierarchy_insert_output_entry(root_out, he);
1979 
1980 		if (prog)
1981 			ui_progress__update(prog, 1);
1982 
1983 		hists->nr_entries++;
1984 		if (!he->filtered) {
1985 			hists->nr_non_filtered_entries++;
1986 			hists__calc_col_len(hists, he);
1987 		}
1988 
1989 		if (!he->leaf) {
1990 			hists__hierarchy_output_resort(hists, prog,
1991 						       &he->hroot_in,
1992 						       &he->hroot_out,
1993 						       min_callchain_hits,
1994 						       use_callchain);
1995 			continue;
1996 		}
1997 
1998 		if (!use_callchain)
1999 			continue;
2000 
2001 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
2002 			u64 total = he->stat.period;
2003 
2004 			if (symbol_conf.cumulate_callchain)
2005 				total = he->stat_acc->period;
2006 
2007 			min_callchain_hits = total * (callchain_param.min_percent / 100);
2008 		}
2009 
2010 		callchain_param.sort(&he->sorted_chain, he->callchain,
2011 				     min_callchain_hits, &callchain_param);
2012 	}
2013 }
2014 
__hists__insert_output_entry(struct rb_root_cached * entries,struct hist_entry * he,u64 min_callchain_hits,bool use_callchain)2015 static void __hists__insert_output_entry(struct rb_root_cached *entries,
2016 					 struct hist_entry *he,
2017 					 u64 min_callchain_hits,
2018 					 bool use_callchain)
2019 {
2020 	struct rb_node **p = &entries->rb_root.rb_node;
2021 	struct rb_node *parent = NULL;
2022 	struct hist_entry *iter;
2023 	struct perf_hpp_fmt *fmt;
2024 	bool leftmost = true;
2025 
2026 	if (use_callchain) {
2027 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
2028 			u64 total = he->stat.period;
2029 
2030 			if (symbol_conf.cumulate_callchain)
2031 				total = he->stat_acc->period;
2032 
2033 			min_callchain_hits = total * (callchain_param.min_percent / 100);
2034 		}
2035 		callchain_param.sort(&he->sorted_chain, he->callchain,
2036 				      min_callchain_hits, &callchain_param);
2037 	}
2038 
2039 	while (*p != NULL) {
2040 		parent = *p;
2041 		iter = rb_entry(parent, struct hist_entry, rb_node);
2042 
2043 		if (hist_entry__sort(he, iter) > 0)
2044 			p = &(*p)->rb_left;
2045 		else {
2046 			p = &(*p)->rb_right;
2047 			leftmost = false;
2048 		}
2049 	}
2050 
2051 	rb_link_node(&he->rb_node, parent, p);
2052 	rb_insert_color_cached(&he->rb_node, entries, leftmost);
2053 
2054 	/* update column width of dynamic entries */
2055 	perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
2056 		if (fmt->init)
2057 			fmt->init(fmt, he);
2058 	}
2059 }
2060 
output_resort(struct hists * hists,struct ui_progress * prog,bool use_callchain,hists__resort_cb_t cb,void * cb_arg)2061 static void output_resort(struct hists *hists, struct ui_progress *prog,
2062 			  bool use_callchain, hists__resort_cb_t cb,
2063 			  void *cb_arg)
2064 {
2065 	struct rb_root_cached *root;
2066 	struct rb_node *next;
2067 	struct hist_entry *n;
2068 	u64 callchain_total;
2069 	u64 min_callchain_hits;
2070 
2071 	callchain_total = hists->callchain_period;
2072 	if (symbol_conf.filter_relative)
2073 		callchain_total = hists->callchain_non_filtered_period;
2074 
2075 	min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
2076 
2077 	hists__reset_stats(hists);
2078 	hists__reset_col_len(hists);
2079 
2080 	if (symbol_conf.report_hierarchy) {
2081 		hists__hierarchy_output_resort(hists, prog,
2082 					       &hists->entries_collapsed,
2083 					       &hists->entries,
2084 					       min_callchain_hits,
2085 					       use_callchain);
2086 		hierarchy_recalc_total_periods(hists);
2087 		return;
2088 	}
2089 
2090 	if (hists__has(hists, need_collapse))
2091 		root = &hists->entries_collapsed;
2092 	else
2093 		root = hists->entries_in;
2094 
2095 	next = rb_first_cached(root);
2096 	hists->entries = RB_ROOT_CACHED;
2097 
2098 	while (next) {
2099 		n = rb_entry(next, struct hist_entry, rb_node_in);
2100 		next = rb_next(&n->rb_node_in);
2101 
2102 		if (cb && cb(n, cb_arg))
2103 			continue;
2104 
2105 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
2106 		hists__inc_stats(hists, n);
2107 
2108 		if (!n->filtered)
2109 			hists__calc_col_len(hists, n);
2110 
2111 		if (prog)
2112 			ui_progress__update(prog, 1);
2113 	}
2114 }
2115 
evsel__output_resort_cb(struct evsel * evsel,struct ui_progress * prog,hists__resort_cb_t cb,void * cb_arg)2116 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
2117 			     hists__resort_cb_t cb, void *cb_arg)
2118 {
2119 	bool use_callchain;
2120 
2121 	if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
2122 		use_callchain = evsel__has_callchain(evsel);
2123 	else
2124 		use_callchain = symbol_conf.use_callchain;
2125 
2126 	use_callchain |= symbol_conf.show_branchflag_count;
2127 
2128 	output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
2129 }
2130 
evsel__output_resort(struct evsel * evsel,struct ui_progress * prog)2131 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
2132 {
2133 	return evsel__output_resort_cb(evsel, prog, NULL, NULL);
2134 }
2135 
hists__output_resort(struct hists * hists,struct ui_progress * prog)2136 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
2137 {
2138 	output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
2139 }
2140 
hists__output_resort_cb(struct hists * hists,struct ui_progress * prog,hists__resort_cb_t cb)2141 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
2142 			     hists__resort_cb_t cb)
2143 {
2144 	output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
2145 }
2146 
can_goto_child(struct hist_entry * he,enum hierarchy_move_dir hmd)2147 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
2148 {
2149 	if (he->leaf || hmd == HMD_FORCE_SIBLING)
2150 		return false;
2151 
2152 	if (he->unfolded || hmd == HMD_FORCE_CHILD)
2153 		return true;
2154 
2155 	return false;
2156 }
2157 
rb_hierarchy_last(struct rb_node * node)2158 struct rb_node *rb_hierarchy_last(struct rb_node *node)
2159 {
2160 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2161 
2162 	while (can_goto_child(he, HMD_NORMAL)) {
2163 		node = rb_last(&he->hroot_out.rb_root);
2164 		he = rb_entry(node, struct hist_entry, rb_node);
2165 	}
2166 	return node;
2167 }
2168 
__rb_hierarchy_next(struct rb_node * node,enum hierarchy_move_dir hmd)2169 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
2170 {
2171 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2172 
2173 	if (can_goto_child(he, hmd))
2174 		node = rb_first_cached(&he->hroot_out);
2175 	else
2176 		node = rb_next(node);
2177 
2178 	while (node == NULL) {
2179 		he = he->parent_he;
2180 		if (he == NULL)
2181 			break;
2182 
2183 		node = rb_next(&he->rb_node);
2184 	}
2185 	return node;
2186 }
2187 
rb_hierarchy_prev(struct rb_node * node)2188 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2189 {
2190 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2191 
2192 	node = rb_prev(node);
2193 	if (node)
2194 		return rb_hierarchy_last(node);
2195 
2196 	he = he->parent_he;
2197 	if (he == NULL)
2198 		return NULL;
2199 
2200 	return &he->rb_node;
2201 }
2202 
hist_entry__has_hierarchy_children(struct hist_entry * he,float limit)2203 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2204 {
2205 	struct rb_node *node;
2206 	struct hist_entry *child;
2207 	float percent;
2208 
2209 	if (he->leaf)
2210 		return false;
2211 
2212 	node = rb_first_cached(&he->hroot_out);
2213 	child = rb_entry(node, struct hist_entry, rb_node);
2214 
2215 	while (node && child->filtered) {
2216 		node = rb_next(node);
2217 		child = rb_entry(node, struct hist_entry, rb_node);
2218 	}
2219 
2220 	if (node)
2221 		percent = hist_entry__get_percent_limit(child);
2222 	else
2223 		percent = 0;
2224 
2225 	return node && percent >= limit;
2226 }
2227 
hists__remove_entry_filter(struct hists * hists,struct hist_entry * h,enum hist_filter filter)2228 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2229 				       enum hist_filter filter)
2230 {
2231 	h->filtered &= ~(1 << filter);
2232 
2233 	if (symbol_conf.report_hierarchy) {
2234 		struct hist_entry *parent = h->parent_he;
2235 
2236 		while (parent) {
2237 			he_stat__add_stat(&parent->stat, &h->stat);
2238 
2239 			parent->filtered &= ~(1 << filter);
2240 
2241 			if (parent->filtered)
2242 				goto next;
2243 
2244 			/* force fold unfiltered entry for simplicity */
2245 			parent->unfolded = false;
2246 			parent->has_no_entry = false;
2247 			parent->row_offset = 0;
2248 			parent->nr_rows = 0;
2249 next:
2250 			parent = parent->parent_he;
2251 		}
2252 	}
2253 
2254 	if (h->filtered)
2255 		return;
2256 
2257 	/* force fold unfiltered entry for simplicity */
2258 	h->unfolded = false;
2259 	h->has_no_entry = false;
2260 	h->row_offset = 0;
2261 	h->nr_rows = 0;
2262 
2263 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2264 
2265 	hists__inc_filter_stats(hists, h);
2266 	hists__calc_col_len(hists, h);
2267 }
2268 
2269 
hists__filter_entry_by_dso(struct hists * hists,struct hist_entry * he)2270 static bool hists__filter_entry_by_dso(struct hists *hists,
2271 				       struct hist_entry *he)
2272 {
2273 	if (hists->dso_filter != NULL &&
2274 	    (he->ms.map == NULL || !RC_CHK_EQUAL(map__dso(he->ms.map), hists->dso_filter))) {
2275 		he->filtered |= (1 << HIST_FILTER__DSO);
2276 		return true;
2277 	}
2278 
2279 	return false;
2280 }
2281 
hists__filter_entry_by_thread(struct hists * hists,struct hist_entry * he)2282 static bool hists__filter_entry_by_thread(struct hists *hists,
2283 					  struct hist_entry *he)
2284 {
2285 	if (hists->thread_filter != NULL &&
2286 	    !RC_CHK_EQUAL(he->thread, hists->thread_filter)) {
2287 		he->filtered |= (1 << HIST_FILTER__THREAD);
2288 		return true;
2289 	}
2290 
2291 	return false;
2292 }
2293 
hists__filter_entry_by_symbol(struct hists * hists,struct hist_entry * he)2294 static bool hists__filter_entry_by_symbol(struct hists *hists,
2295 					  struct hist_entry *he)
2296 {
2297 	if (hists->symbol_filter_str != NULL &&
2298 	    (!he->ms.sym || strstr(he->ms.sym->name,
2299 				   hists->symbol_filter_str) == NULL)) {
2300 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
2301 		return true;
2302 	}
2303 
2304 	return false;
2305 }
2306 
hists__filter_entry_by_socket(struct hists * hists,struct hist_entry * he)2307 static bool hists__filter_entry_by_socket(struct hists *hists,
2308 					  struct hist_entry *he)
2309 {
2310 	if ((hists->socket_filter > -1) &&
2311 	    (he->socket != hists->socket_filter)) {
2312 		he->filtered |= (1 << HIST_FILTER__SOCKET);
2313 		return true;
2314 	}
2315 
2316 	return false;
2317 }
2318 
hists__filter_entry_by_parallelism(struct hists * hists,struct hist_entry * he)2319 static bool hists__filter_entry_by_parallelism(struct hists *hists,
2320 					       struct hist_entry *he)
2321 {
2322 	if (test_bit(he->parallelism, hists->parallelism_filter)) {
2323 		he->filtered |= (1 << HIST_FILTER__PARALLELISM);
2324 		return true;
2325 	}
2326 	return false;
2327 }
2328 
2329 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2330 
hists__filter_by_type(struct hists * hists,int type,filter_fn_t filter)2331 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2332 {
2333 	struct rb_node *nd;
2334 
2335 	hists->stats.nr_non_filtered_samples = 0;
2336 
2337 	hists__reset_filter_stats(hists);
2338 	hists__reset_col_len(hists);
2339 
2340 	for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2341 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2342 
2343 		if (filter(hists, h))
2344 			continue;
2345 
2346 		hists__remove_entry_filter(hists, h, type);
2347 	}
2348 }
2349 
resort_filtered_entry(struct rb_root_cached * root,struct hist_entry * he)2350 static void resort_filtered_entry(struct rb_root_cached *root,
2351 				  struct hist_entry *he)
2352 {
2353 	struct rb_node **p = &root->rb_root.rb_node;
2354 	struct rb_node *parent = NULL;
2355 	struct hist_entry *iter;
2356 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2357 	struct rb_node *nd;
2358 	bool leftmost = true;
2359 
2360 	while (*p != NULL) {
2361 		parent = *p;
2362 		iter = rb_entry(parent, struct hist_entry, rb_node);
2363 
2364 		if (hist_entry__sort(he, iter) > 0)
2365 			p = &(*p)->rb_left;
2366 		else {
2367 			p = &(*p)->rb_right;
2368 			leftmost = false;
2369 		}
2370 	}
2371 
2372 	rb_link_node(&he->rb_node, parent, p);
2373 	rb_insert_color_cached(&he->rb_node, root, leftmost);
2374 
2375 	if (he->leaf || he->filtered)
2376 		return;
2377 
2378 	nd = rb_first_cached(&he->hroot_out);
2379 	while (nd) {
2380 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2381 
2382 		nd = rb_next(nd);
2383 		rb_erase_cached(&h->rb_node, &he->hroot_out);
2384 
2385 		resort_filtered_entry(&new_root, h);
2386 	}
2387 
2388 	he->hroot_out = new_root;
2389 }
2390 
hists__filter_hierarchy(struct hists * hists,int type,const void * arg)2391 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2392 {
2393 	struct rb_node *nd;
2394 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2395 
2396 	hists->stats.nr_non_filtered_samples = 0;
2397 
2398 	hists__reset_filter_stats(hists);
2399 	hists__reset_col_len(hists);
2400 
2401 	nd = rb_first_cached(&hists->entries);
2402 	while (nd) {
2403 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2404 		int ret;
2405 
2406 		ret = hist_entry__filter(h, type, arg);
2407 
2408 		/*
2409 		 * case 1. non-matching type
2410 		 * zero out the period, set filter marker and move to child
2411 		 */
2412 		if (ret < 0) {
2413 			memset(&h->stat, 0, sizeof(h->stat));
2414 			h->filtered |= (1 << type);
2415 
2416 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2417 		}
2418 		/*
2419 		 * case 2. matched type (filter out)
2420 		 * set filter marker and move to next
2421 		 */
2422 		else if (ret == 1) {
2423 			h->filtered |= (1 << type);
2424 
2425 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2426 		}
2427 		/*
2428 		 * case 3. ok (not filtered)
2429 		 * add period to hists and parents, erase the filter marker
2430 		 * and move to next sibling
2431 		 */
2432 		else {
2433 			hists__remove_entry_filter(hists, h, type);
2434 
2435 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2436 		}
2437 	}
2438 
2439 	hierarchy_recalc_total_periods(hists);
2440 
2441 	/*
2442 	 * resort output after applying a new filter since filter in a lower
2443 	 * hierarchy can change periods in a upper hierarchy.
2444 	 */
2445 	nd = rb_first_cached(&hists->entries);
2446 	while (nd) {
2447 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2448 
2449 		nd = rb_next(nd);
2450 		rb_erase_cached(&h->rb_node, &hists->entries);
2451 
2452 		resort_filtered_entry(&new_root, h);
2453 	}
2454 
2455 	hists->entries = new_root;
2456 }
2457 
hists__filter_by_thread(struct hists * hists)2458 void hists__filter_by_thread(struct hists *hists)
2459 {
2460 	if (symbol_conf.report_hierarchy)
2461 		hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2462 					hists->thread_filter);
2463 	else
2464 		hists__filter_by_type(hists, HIST_FILTER__THREAD,
2465 				      hists__filter_entry_by_thread);
2466 }
2467 
hists__filter_by_dso(struct hists * hists)2468 void hists__filter_by_dso(struct hists *hists)
2469 {
2470 	if (symbol_conf.report_hierarchy)
2471 		hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2472 					hists->dso_filter);
2473 	else
2474 		hists__filter_by_type(hists, HIST_FILTER__DSO,
2475 				      hists__filter_entry_by_dso);
2476 }
2477 
hists__filter_by_symbol(struct hists * hists)2478 void hists__filter_by_symbol(struct hists *hists)
2479 {
2480 	if (symbol_conf.report_hierarchy)
2481 		hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2482 					hists->symbol_filter_str);
2483 	else
2484 		hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2485 				      hists__filter_entry_by_symbol);
2486 }
2487 
hists__filter_by_socket(struct hists * hists)2488 void hists__filter_by_socket(struct hists *hists)
2489 {
2490 	if (symbol_conf.report_hierarchy)
2491 		hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2492 					&hists->socket_filter);
2493 	else
2494 		hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2495 				      hists__filter_entry_by_socket);
2496 }
2497 
hists__filter_by_parallelism(struct hists * hists)2498 void hists__filter_by_parallelism(struct hists *hists)
2499 {
2500 	if (symbol_conf.report_hierarchy)
2501 		hists__filter_hierarchy(hists, HIST_FILTER__PARALLELISM,
2502 					hists->parallelism_filter);
2503 	else
2504 		hists__filter_by_type(hists, HIST_FILTER__PARALLELISM,
2505 				      hists__filter_entry_by_parallelism);
2506 }
2507 
events_stats__inc(struct events_stats * stats,u32 type)2508 void events_stats__inc(struct events_stats *stats, u32 type)
2509 {
2510 	++stats->nr_events[0];
2511 	++stats->nr_events[type];
2512 }
2513 
hists_stats__inc(struct hists_stats * stats)2514 static void hists_stats__inc(struct hists_stats *stats)
2515 {
2516 	++stats->nr_samples;
2517 }
2518 
hists__inc_nr_events(struct hists * hists)2519 void hists__inc_nr_events(struct hists *hists)
2520 {
2521 	hists_stats__inc(&hists->stats);
2522 }
2523 
hists__inc_nr_samples(struct hists * hists,bool filtered)2524 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2525 {
2526 	hists_stats__inc(&hists->stats);
2527 	if (!filtered)
2528 		hists->stats.nr_non_filtered_samples++;
2529 }
2530 
hists__inc_nr_lost_samples(struct hists * hists,u32 lost)2531 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
2532 {
2533 	hists->stats.nr_lost_samples += lost;
2534 }
2535 
hists__inc_nr_dropped_samples(struct hists * hists,u32 lost)2536 void hists__inc_nr_dropped_samples(struct hists *hists, u32 lost)
2537 {
2538 	hists->stats.nr_dropped_samples += lost;
2539 }
2540 
hists__add_dummy_entry(struct hists * hists,struct hist_entry * pair)2541 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2542 						 struct hist_entry *pair)
2543 {
2544 	struct rb_root_cached *root;
2545 	struct rb_node **p;
2546 	struct rb_node *parent = NULL;
2547 	struct hist_entry *he;
2548 	int64_t cmp;
2549 	bool leftmost = true;
2550 
2551 	if (hists__has(hists, need_collapse))
2552 		root = &hists->entries_collapsed;
2553 	else
2554 		root = hists->entries_in;
2555 
2556 	p = &root->rb_root.rb_node;
2557 
2558 	while (*p != NULL) {
2559 		parent = *p;
2560 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2561 
2562 		cmp = hist_entry__collapse(he, pair);
2563 
2564 		if (!cmp)
2565 			goto out;
2566 
2567 		if (cmp < 0)
2568 			p = &(*p)->rb_left;
2569 		else {
2570 			p = &(*p)->rb_right;
2571 			leftmost = false;
2572 		}
2573 	}
2574 
2575 	he = hist_entry__new(pair, true);
2576 	if (he) {
2577 		memset(&he->stat, 0, sizeof(he->stat));
2578 		he->hists = hists;
2579 		if (symbol_conf.cumulate_callchain)
2580 			memset(he->stat_acc, 0, sizeof(he->stat));
2581 		rb_link_node(&he->rb_node_in, parent, p);
2582 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2583 		hists__inc_stats(hists, he);
2584 		he->dummy = true;
2585 	}
2586 out:
2587 	return he;
2588 }
2589 
add_dummy_hierarchy_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * pair)2590 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2591 						    struct rb_root_cached *root,
2592 						    struct hist_entry *pair)
2593 {
2594 	struct rb_node **p;
2595 	struct rb_node *parent = NULL;
2596 	struct hist_entry *he;
2597 	bool leftmost = true;
2598 
2599 	p = &root->rb_root.rb_node;
2600 	while (*p != NULL) {
2601 		int64_t cmp;
2602 
2603 		parent = *p;
2604 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2605 		cmp = hist_entry__collapse_hierarchy(he->hpp_list, he, pair);
2606 		if (!cmp)
2607 			goto out;
2608 
2609 		if (cmp < 0)
2610 			p = &parent->rb_left;
2611 		else {
2612 			p = &parent->rb_right;
2613 			leftmost = false;
2614 		}
2615 	}
2616 
2617 	he = hist_entry__new(pair, true);
2618 	if (he) {
2619 		rb_link_node(&he->rb_node_in, parent, p);
2620 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2621 
2622 		he->dummy = true;
2623 		he->hists = hists;
2624 		memset(&he->stat, 0, sizeof(he->stat));
2625 		hists__inc_stats(hists, he);
2626 	}
2627 out:
2628 	return he;
2629 }
2630 
hists__find_entry(struct hists * hists,struct hist_entry * he)2631 static struct hist_entry *hists__find_entry(struct hists *hists,
2632 					    struct hist_entry *he)
2633 {
2634 	struct rb_node *n;
2635 
2636 	if (hists__has(hists, need_collapse))
2637 		n = hists->entries_collapsed.rb_root.rb_node;
2638 	else
2639 		n = hists->entries_in->rb_root.rb_node;
2640 
2641 	while (n) {
2642 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2643 		int64_t cmp = hist_entry__collapse(iter, he);
2644 
2645 		if (cmp < 0)
2646 			n = n->rb_left;
2647 		else if (cmp > 0)
2648 			n = n->rb_right;
2649 		else
2650 			return iter;
2651 	}
2652 
2653 	return NULL;
2654 }
2655 
hists__find_hierarchy_entry(struct rb_root_cached * root,struct hist_entry * he)2656 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2657 						      struct hist_entry *he)
2658 {
2659 	struct rb_node *n = root->rb_root.rb_node;
2660 
2661 	while (n) {
2662 		struct hist_entry *iter;
2663 		int64_t cmp;
2664 
2665 		iter = rb_entry(n, struct hist_entry, rb_node_in);
2666 		cmp = hist_entry__collapse_hierarchy(he->hpp_list, iter, he);
2667 		if (cmp < 0)
2668 			n = n->rb_left;
2669 		else if (cmp > 0)
2670 			n = n->rb_right;
2671 		else
2672 			return iter;
2673 	}
2674 
2675 	return NULL;
2676 }
2677 
hists__match_hierarchy(struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2678 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2679 				   struct rb_root_cached *other_root)
2680 {
2681 	struct rb_node *nd;
2682 	struct hist_entry *pos, *pair;
2683 
2684 	for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2685 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2686 		pair = hists__find_hierarchy_entry(other_root, pos);
2687 
2688 		if (pair) {
2689 			hist_entry__add_pair(pair, pos);
2690 			hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2691 		}
2692 	}
2693 }
2694 
2695 /*
2696  * Look for pairs to link to the leader buckets (hist_entries):
2697  */
hists__match(struct hists * leader,struct hists * other)2698 void hists__match(struct hists *leader, struct hists *other)
2699 {
2700 	struct rb_root_cached *root;
2701 	struct rb_node *nd;
2702 	struct hist_entry *pos, *pair;
2703 
2704 	if (symbol_conf.report_hierarchy) {
2705 		/* hierarchy report always collapses entries */
2706 		return hists__match_hierarchy(&leader->entries_collapsed,
2707 					      &other->entries_collapsed);
2708 	}
2709 
2710 	if (hists__has(leader, need_collapse))
2711 		root = &leader->entries_collapsed;
2712 	else
2713 		root = leader->entries_in;
2714 
2715 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2716 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2717 		pair = hists__find_entry(other, pos);
2718 
2719 		if (pair)
2720 			hist_entry__add_pair(pair, pos);
2721 	}
2722 }
2723 
hists__link_hierarchy(struct hists * leader_hists,struct hist_entry * parent,struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2724 static int hists__link_hierarchy(struct hists *leader_hists,
2725 				 struct hist_entry *parent,
2726 				 struct rb_root_cached *leader_root,
2727 				 struct rb_root_cached *other_root)
2728 {
2729 	struct rb_node *nd;
2730 	struct hist_entry *pos, *leader;
2731 
2732 	for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2733 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2734 
2735 		if (hist_entry__has_pairs(pos)) {
2736 			bool found = false;
2737 
2738 			list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2739 				if (leader->hists == leader_hists) {
2740 					found = true;
2741 					break;
2742 				}
2743 			}
2744 			if (!found)
2745 				return -1;
2746 		} else {
2747 			leader = add_dummy_hierarchy_entry(leader_hists,
2748 							   leader_root, pos);
2749 			if (leader == NULL)
2750 				return -1;
2751 
2752 			/* do not point parent in the pos */
2753 			leader->parent_he = parent;
2754 
2755 			hist_entry__add_pair(pos, leader);
2756 		}
2757 
2758 		if (!pos->leaf) {
2759 			if (hists__link_hierarchy(leader_hists, leader,
2760 						  &leader->hroot_in,
2761 						  &pos->hroot_in) < 0)
2762 				return -1;
2763 		}
2764 	}
2765 	return 0;
2766 }
2767 
2768 /*
2769  * Look for entries in the other hists that are not present in the leader, if
2770  * we find them, just add a dummy entry on the leader hists, with period=0,
2771  * nr_events=0, to serve as the list header.
2772  */
hists__link(struct hists * leader,struct hists * other)2773 int hists__link(struct hists *leader, struct hists *other)
2774 {
2775 	struct rb_root_cached *root;
2776 	struct rb_node *nd;
2777 	struct hist_entry *pos, *pair;
2778 
2779 	if (symbol_conf.report_hierarchy) {
2780 		/* hierarchy report always collapses entries */
2781 		return hists__link_hierarchy(leader, NULL,
2782 					     &leader->entries_collapsed,
2783 					     &other->entries_collapsed);
2784 	}
2785 
2786 	if (hists__has(other, need_collapse))
2787 		root = &other->entries_collapsed;
2788 	else
2789 		root = other->entries_in;
2790 
2791 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2792 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2793 
2794 		if (!hist_entry__has_pairs(pos)) {
2795 			pair = hists__add_dummy_entry(leader, pos);
2796 			if (pair == NULL)
2797 				return -1;
2798 			hist_entry__add_pair(pos, pair);
2799 		}
2800 	}
2801 
2802 	return 0;
2803 }
2804 
hists__unlink(struct hists * hists)2805 int hists__unlink(struct hists *hists)
2806 {
2807 	struct rb_root_cached *root;
2808 	struct rb_node *nd;
2809 	struct hist_entry *pos;
2810 
2811 	if (hists__has(hists, need_collapse))
2812 		root = &hists->entries_collapsed;
2813 	else
2814 		root = hists->entries_in;
2815 
2816 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2817 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2818 		list_del_init(&pos->pairs.node);
2819 	}
2820 
2821 	return 0;
2822 }
2823 
hist__account_cycles(struct branch_stack * bs,struct addr_location * al,struct perf_sample * sample,bool nonany_branch_mode,u64 * total_cycles,struct evsel * evsel)2824 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2825 			  struct perf_sample *sample, bool nonany_branch_mode,
2826 			  u64 *total_cycles, struct evsel *evsel)
2827 {
2828 	struct branch_info *bi;
2829 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2830 
2831 	/* If we have branch cycles always annotate them. */
2832 	if (bs && bs->nr && entries[0].flags.cycles) {
2833 		bi = sample__resolve_bstack(sample, al);
2834 		if (bi) {
2835 			struct addr_map_symbol *prev = NULL;
2836 
2837 			/*
2838 			 * Ignore errors, still want to process the
2839 			 * other entries.
2840 			 *
2841 			 * For non standard branch modes always
2842 			 * force no IPC (prev == NULL)
2843 			 *
2844 			 * Note that perf stores branches reversed from
2845 			 * program order!
2846 			 */
2847 			for (int i = bs->nr - 1; i >= 0; i--) {
2848 				addr_map_symbol__account_cycles(&bi[i].from,
2849 					nonany_branch_mode ? NULL : prev,
2850 					bi[i].flags.cycles, evsel,
2851 					bi[i].branch_stack_cntr);
2852 				prev = &bi[i].to;
2853 
2854 				if (total_cycles)
2855 					*total_cycles += bi[i].flags.cycles;
2856 			}
2857 			for (unsigned int i = 0; i < bs->nr; i++) {
2858 				map_symbol__exit(&bi[i].to.ms);
2859 				map_symbol__exit(&bi[i].from.ms);
2860 			}
2861 			free(bi);
2862 		}
2863 	}
2864 }
2865 
evlist__fprintf_nr_events(struct evlist * evlist,FILE * fp)2866 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2867 {
2868 	struct evsel *pos;
2869 	size_t ret = 0;
2870 
2871 	evlist__for_each_entry(evlist, pos) {
2872 		struct hists *hists = evsel__hists(pos);
2873 		u64 total_samples = hists->stats.nr_samples;
2874 
2875 		total_samples += hists->stats.nr_lost_samples;
2876 		total_samples += hists->stats.nr_dropped_samples;
2877 
2878 		if (symbol_conf.skip_empty && total_samples == 0)
2879 			continue;
2880 
2881 		ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2882 		if (hists->stats.nr_samples)
2883 			ret += fprintf(fp, "%20s events: %10d\n",
2884 				       "SAMPLE", hists->stats.nr_samples);
2885 		if (hists->stats.nr_lost_samples)
2886 			ret += fprintf(fp, "%20s events: %10d\n",
2887 				       "LOST_SAMPLES", hists->stats.nr_lost_samples);
2888 		if (hists->stats.nr_dropped_samples)
2889 			ret += fprintf(fp, "%20s events: %10d\n",
2890 				       "LOST_SAMPLES (BPF)", hists->stats.nr_dropped_samples);
2891 	}
2892 
2893 	return ret;
2894 }
2895 
2896 
hists__total_period(struct hists * hists)2897 u64 hists__total_period(struct hists *hists)
2898 {
2899 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2900 		hists->stats.total_period;
2901 }
2902 
hists__total_latency(struct hists * hists)2903 u64 hists__total_latency(struct hists *hists)
2904 {
2905 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_latency :
2906 		hists->stats.total_latency;
2907 }
2908 
__hists__scnprintf_title(struct hists * hists,char * bf,size_t size,bool show_freq)2909 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2910 {
2911 	char unit;
2912 	int printed;
2913 	const struct dso *dso = hists->dso_filter;
2914 	struct thread *thread = hists->thread_filter;
2915 	int socket_id = hists->socket_filter;
2916 	unsigned long nr_samples = hists->stats.nr_samples;
2917 	u64 nr_events = hists->stats.total_period;
2918 	struct evsel *evsel = hists_to_evsel(hists);
2919 	const char *ev_name = evsel__name(evsel);
2920 	char buf[512], sample_freq_str[64] = "";
2921 	size_t buflen = sizeof(buf);
2922 	char ref[30] = " show reference callgraph, ";
2923 	bool enable_ref = false;
2924 
2925 	if (symbol_conf.filter_relative) {
2926 		nr_samples = hists->stats.nr_non_filtered_samples;
2927 		nr_events = hists->stats.total_non_filtered_period;
2928 	}
2929 
2930 	if (evsel__is_group_event(evsel)) {
2931 		struct evsel *pos;
2932 
2933 		evsel__group_desc(evsel, buf, buflen);
2934 		ev_name = buf;
2935 
2936 		for_each_group_member(pos, evsel) {
2937 			struct hists *pos_hists = evsel__hists(pos);
2938 
2939 			if (symbol_conf.filter_relative) {
2940 				nr_samples += pos_hists->stats.nr_non_filtered_samples;
2941 				nr_events += pos_hists->stats.total_non_filtered_period;
2942 			} else {
2943 				nr_samples += pos_hists->stats.nr_samples;
2944 				nr_events += pos_hists->stats.total_period;
2945 			}
2946 		}
2947 	}
2948 
2949 	if (symbol_conf.show_ref_callgraph &&
2950 	    strstr(ev_name, "call-graph=no"))
2951 		enable_ref = true;
2952 
2953 	if (show_freq)
2954 		scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2955 
2956 	nr_samples = convert_unit(nr_samples, &unit);
2957 	printed = scnprintf(bf, size,
2958 			   "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2959 			   nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2960 			   ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2961 
2962 
2963 	if (hists->uid_filter_str)
2964 		printed += snprintf(bf + printed, size - printed,
2965 				    ", UID: %s", hists->uid_filter_str);
2966 	if (thread) {
2967 		if (hists__has(hists, thread)) {
2968 			printed += scnprintf(bf + printed, size - printed,
2969 				    ", Thread: %s(%d)",
2970 				    (thread__comm_set(thread) ? thread__comm_str(thread) : ""),
2971 					thread__tid(thread));
2972 		} else {
2973 			printed += scnprintf(bf + printed, size - printed,
2974 				    ", Thread: %s",
2975 				    (thread__comm_set(thread) ? thread__comm_str(thread) : ""));
2976 		}
2977 	}
2978 	if (dso)
2979 		printed += scnprintf(bf + printed, size - printed,
2980 				     ", DSO: %s", dso__short_name(dso));
2981 	if (socket_id > -1)
2982 		printed += scnprintf(bf + printed, size - printed,
2983 				    ", Processor Socket: %d", socket_id);
2984 
2985 	return printed;
2986 }
2987 
parse_filter_percentage(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)2988 int parse_filter_percentage(const struct option *opt __maybe_unused,
2989 			    const char *arg, int unset __maybe_unused)
2990 {
2991 	if (!strcmp(arg, "relative"))
2992 		symbol_conf.filter_relative = true;
2993 	else if (!strcmp(arg, "absolute"))
2994 		symbol_conf.filter_relative = false;
2995 	else {
2996 		pr_debug("Invalid percentage: %s\n", arg);
2997 		return -1;
2998 	}
2999 
3000 	return 0;
3001 }
3002 
perf_hist_config(const char * var,const char * value)3003 int perf_hist_config(const char *var, const char *value)
3004 {
3005 	if (!strcmp(var, "hist.percentage"))
3006 		return parse_filter_percentage(NULL, value, 0);
3007 
3008 	return 0;
3009 }
3010 
__hists__init(struct hists * hists,struct perf_hpp_list * hpp_list)3011 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
3012 {
3013 	memset(hists, 0, sizeof(*hists));
3014 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
3015 	hists->entries_in = &hists->entries_in_array[0];
3016 	hists->entries_collapsed = RB_ROOT_CACHED;
3017 	hists->entries = RB_ROOT_CACHED;
3018 	mutex_init(&hists->lock);
3019 	hists->socket_filter = -1;
3020 	hists->parallelism_filter = symbol_conf.parallelism_filter;
3021 	hists->hpp_list = hpp_list;
3022 	INIT_LIST_HEAD(&hists->hpp_formats);
3023 	return 0;
3024 }
3025 
hists__delete_remaining_entries(struct rb_root_cached * root)3026 static void hists__delete_remaining_entries(struct rb_root_cached *root)
3027 {
3028 	struct rb_node *node;
3029 	struct hist_entry *he;
3030 
3031 	while (!RB_EMPTY_ROOT(&root->rb_root)) {
3032 		node = rb_first_cached(root);
3033 		rb_erase_cached(node, root);
3034 
3035 		he = rb_entry(node, struct hist_entry, rb_node_in);
3036 		hist_entry__delete(he);
3037 	}
3038 }
3039 
hists__delete_all_entries(struct hists * hists)3040 static void hists__delete_all_entries(struct hists *hists)
3041 {
3042 	hists__delete_entries(hists);
3043 	hists__delete_remaining_entries(&hists->entries_in_array[0]);
3044 	hists__delete_remaining_entries(&hists->entries_in_array[1]);
3045 	hists__delete_remaining_entries(&hists->entries_collapsed);
3046 }
3047 
hists_evsel__exit(struct evsel * evsel)3048 static void hists_evsel__exit(struct evsel *evsel)
3049 {
3050 	struct hists *hists = evsel__hists(evsel);
3051 	struct perf_hpp_fmt *fmt, *pos;
3052 	struct perf_hpp_list_node *node, *tmp;
3053 
3054 	hists__delete_all_entries(hists);
3055 	zfree(&hists->mem_stat_types);
3056 	zfree(&hists->mem_stat_total);
3057 
3058 	list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
3059 		perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
3060 			list_del_init(&fmt->list);
3061 			free(fmt);
3062 		}
3063 		list_del_init(&node->list);
3064 		free(node);
3065 	}
3066 }
3067 
hists_evsel__init(struct evsel * evsel)3068 static int hists_evsel__init(struct evsel *evsel)
3069 {
3070 	struct hists *hists = evsel__hists(evsel);
3071 
3072 	__hists__init(hists, &perf_hpp_list);
3073 	return 0;
3074 }
3075 
3076 /*
3077  * XXX We probably need a hists_evsel__exit() to free the hist_entries
3078  * stored in the rbtree...
3079  */
3080 
hists__init(void)3081 int hists__init(void)
3082 {
3083 	int err = evsel__object_config(sizeof(struct hists_evsel),
3084 				       hists_evsel__init, hists_evsel__exit);
3085 	if (err)
3086 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
3087 
3088 	return err;
3089 }
3090 
perf_hpp_list__init(struct perf_hpp_list * list)3091 void perf_hpp_list__init(struct perf_hpp_list *list)
3092 {
3093 	INIT_LIST_HEAD(&list->fields);
3094 	INIT_LIST_HEAD(&list->sorts);
3095 }
3096