xref: /linux/tools/perf/util/hist.c (revision f4f346c3465949ebba80c6cc52cd8d2eeaa545fd)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "kvm-stat.h"
8 #include "map.h"
9 #include "map_symbol.h"
10 #include "branch.h"
11 #include "mem-events.h"
12 #include "mem-info.h"
13 #include "session.h"
14 #include "namespaces.h"
15 #include "cgroup.h"
16 #include "sort.h"
17 #include "units.h"
18 #include "evlist.h"
19 #include "evsel.h"
20 #include "annotate.h"
21 #include "srcline.h"
22 #include "symbol.h"
23 #include "thread.h"
24 #include "block-info.h"
25 #include "ui/progress.h"
26 #include <errno.h>
27 #include <math.h>
28 #include <inttypes.h>
29 #include <sys/param.h>
30 #include <linux/rbtree.h>
31 #include <linux/string.h>
32 #include <linux/time64.h>
33 #include <linux/zalloc.h>
34 
35 static int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
36 static int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
37 
38 static bool hists__filter_entry_by_dso(struct hists *hists,
39 				       struct hist_entry *he);
40 static bool hists__filter_entry_by_thread(struct hists *hists,
41 					  struct hist_entry *he);
42 static bool hists__filter_entry_by_symbol(struct hists *hists,
43 					  struct hist_entry *he);
44 static bool hists__filter_entry_by_socket(struct hists *hists,
45 					  struct hist_entry *he);
46 static bool hists__filter_entry_by_parallelism(struct hists *hists,
47 					       struct hist_entry *he);
48 
hists__col_len(struct hists * hists,enum hist_column col)49 u16 hists__col_len(struct hists *hists, enum hist_column col)
50 {
51 	return hists->col_len[col];
52 }
53 
hists__set_col_len(struct hists * hists,enum hist_column col,u16 len)54 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
55 {
56 	hists->col_len[col] = len;
57 }
58 
hists__new_col_len(struct hists * hists,enum hist_column col,u16 len)59 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
60 {
61 	if (len > hists__col_len(hists, col)) {
62 		hists__set_col_len(hists, col, len);
63 		return true;
64 	}
65 	return false;
66 }
67 
hists__reset_col_len(struct hists * hists)68 void hists__reset_col_len(struct hists *hists)
69 {
70 	enum hist_column col;
71 
72 	for (col = 0; col < HISTC_NR_COLS; ++col)
73 		hists__set_col_len(hists, col, 0);
74 }
75 
hists__set_unres_dso_col_len(struct hists * hists,int dso)76 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
77 {
78 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
79 
80 	if (hists__col_len(hists, dso) < unresolved_col_width &&
81 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
82 	    !symbol_conf.dso_list)
83 		hists__set_col_len(hists, dso, unresolved_col_width);
84 }
85 
hists__calc_col_len(struct hists * hists,struct hist_entry * h)86 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
87 {
88 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
89 	int symlen;
90 	u16 len;
91 
92 	if (h->block_info)
93 		return;
94 	/*
95 	 * +4 accounts for '[x] ' priv level info
96 	 * +2 accounts for 0x prefix on raw addresses
97 	 * +3 accounts for ' y ' symtab origin info
98 	 */
99 	if (h->ms.sym) {
100 		symlen = h->ms.sym->namelen + 4;
101 		if (verbose > 0)
102 			symlen += BITS_PER_LONG / 4 + 2 + 3;
103 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
104 	} else {
105 		symlen = unresolved_col_width + 4 + 2;
106 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
107 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
108 	}
109 
110 	len = thread__comm_len(h->thread);
111 	if (hists__new_col_len(hists, HISTC_COMM, len))
112 		hists__set_col_len(hists, HISTC_THREAD, len + 8);
113 
114 	if (h->ms.map) {
115 		len = dso__name_len(map__dso(h->ms.map));
116 		hists__new_col_len(hists, HISTC_DSO, len);
117 	}
118 
119 	if (h->parent)
120 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
121 
122 	if (h->branch_info) {
123 		if (h->branch_info->from.ms.sym) {
124 			symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
125 			if (verbose > 0)
126 				symlen += BITS_PER_LONG / 4 + 2 + 3;
127 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
128 
129 			symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
130 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
131 		} else {
132 			symlen = unresolved_col_width + 4 + 2;
133 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
134 			hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
135 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
136 		}
137 
138 		if (h->branch_info->to.ms.sym) {
139 			symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
140 			if (verbose > 0)
141 				symlen += BITS_PER_LONG / 4 + 2 + 3;
142 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
143 
144 			symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
145 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
146 		} else {
147 			symlen = unresolved_col_width + 4 + 2;
148 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
149 			hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
150 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
151 		}
152 
153 		if (h->branch_info->srcline_from)
154 			hists__new_col_len(hists, HISTC_SRCLINE_FROM,
155 					strlen(h->branch_info->srcline_from));
156 		if (h->branch_info->srcline_to)
157 			hists__new_col_len(hists, HISTC_SRCLINE_TO,
158 					strlen(h->branch_info->srcline_to));
159 	}
160 
161 	if (h->mem_info) {
162 		if (mem_info__daddr(h->mem_info)->ms.sym) {
163 			symlen = (int)mem_info__daddr(h->mem_info)->ms.sym->namelen + 4
164 			       + unresolved_col_width + 2;
165 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
166 					   symlen);
167 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
168 					   symlen + 1);
169 		} else {
170 			symlen = unresolved_col_width + 4 + 2;
171 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
172 					   symlen);
173 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
174 					   symlen);
175 		}
176 
177 		if (mem_info__iaddr(h->mem_info)->ms.sym) {
178 			symlen = (int)mem_info__iaddr(h->mem_info)->ms.sym->namelen + 4
179 			       + unresolved_col_width + 2;
180 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
181 					   symlen);
182 		} else {
183 			symlen = unresolved_col_width + 4 + 2;
184 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
185 					   symlen);
186 		}
187 
188 		if (mem_info__daddr(h->mem_info)->ms.map) {
189 			symlen = dso__name_len(map__dso(mem_info__daddr(h->mem_info)->ms.map));
190 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
191 					   symlen);
192 		} else {
193 			symlen = unresolved_col_width + 4 + 2;
194 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
195 		}
196 
197 		hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
198 				   unresolved_col_width + 4 + 2);
199 
200 		hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
201 				   unresolved_col_width + 4 + 2);
202 
203 	} else {
204 		symlen = unresolved_col_width + 4 + 2;
205 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
206 		hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
207 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
208 	}
209 
210 	hists__new_col_len(hists, HISTC_CGROUP, 6);
211 	hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
212 	hists__new_col_len(hists, HISTC_PARALLELISM, 11);
213 	hists__new_col_len(hists, HISTC_CPU, 3);
214 	hists__new_col_len(hists, HISTC_SOCKET, 6);
215 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
216 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
217 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
218 	hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
219 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
220 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
221 	hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
222 	hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
223 	hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
224 	hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
225 	hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
226 	hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
227 	hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_PREDICTED, 9);
228 	hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_ABORT, 5);
229 	hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_CYCLES, 6);
230 
231 	if (symbol_conf.nanosecs)
232 		hists__new_col_len(hists, HISTC_TIME, 16);
233 	else
234 		hists__new_col_len(hists, HISTC_TIME, 12);
235 	hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
236 
237 	if (h->srcline) {
238 		len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
239 		hists__new_col_len(hists, HISTC_SRCLINE, len);
240 	}
241 
242 	if (h->srcfile)
243 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
244 
245 	if (h->transaction)
246 		hists__new_col_len(hists, HISTC_TRANSACTION,
247 				   hist_entry__transaction_len());
248 
249 	if (h->trace_output)
250 		hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
251 
252 	if (h->cgroup) {
253 		const char *cgrp_name = "unknown";
254 		struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
255 						   h->cgroup);
256 		if (cgrp != NULL)
257 			cgrp_name = cgrp->name;
258 
259 		hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
260 	}
261 }
262 
hists__output_recalc_col_len(struct hists * hists,int max_rows)263 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
264 {
265 	struct rb_node *next = rb_first_cached(&hists->entries);
266 	struct hist_entry *n;
267 	int row = 0;
268 
269 	hists__reset_col_len(hists);
270 
271 	while (next && row++ < max_rows) {
272 		n = rb_entry(next, struct hist_entry, rb_node);
273 		if (!n->filtered)
274 			hists__calc_col_len(hists, n);
275 		next = rb_next(&n->rb_node);
276 	}
277 }
278 
he_stat__add_cpumode_period(struct he_stat * he_stat,unsigned int cpumode,u64 period)279 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
280 					unsigned int cpumode, u64 period)
281 {
282 	switch (cpumode) {
283 	case PERF_RECORD_MISC_KERNEL:
284 		he_stat->period_sys += period;
285 		break;
286 	case PERF_RECORD_MISC_USER:
287 		he_stat->period_us += period;
288 		break;
289 	case PERF_RECORD_MISC_GUEST_KERNEL:
290 		he_stat->period_guest_sys += period;
291 		break;
292 	case PERF_RECORD_MISC_GUEST_USER:
293 		he_stat->period_guest_us += period;
294 		break;
295 	default:
296 		break;
297 	}
298 }
299 
hist_time(unsigned long htime)300 static long hist_time(unsigned long htime)
301 {
302 	unsigned long time_quantum = symbol_conf.time_quantum;
303 	if (time_quantum)
304 		return (htime / time_quantum) * time_quantum;
305 	return htime;
306 }
307 
he_stat__add_period(struct he_stat * he_stat,u64 period,u64 latency)308 static void he_stat__add_period(struct he_stat *he_stat, u64 period, u64 latency)
309 {
310 	he_stat->period		+= period;
311 	he_stat->latency	+= latency;
312 	he_stat->nr_events	+= 1;
313 }
314 
he_stat__add_stat(struct he_stat * dest,struct he_stat * src)315 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
316 {
317 	dest->period		+= src->period;
318 	dest->period_sys	+= src->period_sys;
319 	dest->period_us		+= src->period_us;
320 	dest->period_guest_sys	+= src->period_guest_sys;
321 	dest->period_guest_us	+= src->period_guest_us;
322 	dest->weight1		+= src->weight1;
323 	dest->weight2		+= src->weight2;
324 	dest->weight3		+= src->weight3;
325 	dest->nr_events		+= src->nr_events;
326 	dest->latency		+= src->latency;
327 }
328 
he_stat__decay(struct he_stat * he_stat)329 static void he_stat__decay(struct he_stat *he_stat)
330 {
331 	he_stat->period = (he_stat->period * 7) / 8;
332 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
333 	he_stat->weight1 = (he_stat->weight1 * 7) / 8;
334 	he_stat->weight2 = (he_stat->weight2 * 7) / 8;
335 	he_stat->weight3 = (he_stat->weight3 * 7) / 8;
336 	he_stat->latency = (he_stat->latency * 7) / 8;
337 }
338 
hists__update_mem_stat(struct hists * hists,struct hist_entry * he,struct mem_info * mi,u64 period)339 static int hists__update_mem_stat(struct hists *hists, struct hist_entry *he,
340 				  struct mem_info *mi, u64 period)
341 {
342 	if (hists->nr_mem_stats == 0)
343 		return 0;
344 
345 	if (he->mem_stat == NULL) {
346 		he->mem_stat = calloc(hists->nr_mem_stats, sizeof(*he->mem_stat));
347 		if (he->mem_stat == NULL)
348 			return -1;
349 	}
350 
351 	for (int i = 0; i < hists->nr_mem_stats; i++) {
352 		int idx = mem_stat_index(hists->mem_stat_types[i],
353 					 mem_info__const_data_src(mi)->val);
354 
355 		assert(0 <= idx && idx < MEM_STAT_LEN);
356 		he->mem_stat[i].entries[idx] += period;
357 		hists->mem_stat_total[i].entries[idx] += period;
358 	}
359 	return 0;
360 }
361 
hists__add_mem_stat(struct hists * hists,struct hist_entry * dst,struct hist_entry * src)362 static void hists__add_mem_stat(struct hists *hists, struct hist_entry *dst,
363 				struct hist_entry *src)
364 {
365 	if (hists->nr_mem_stats == 0)
366 		return;
367 
368 	for (int i = 0; i < hists->nr_mem_stats; i++) {
369 		for (int k = 0; k < MEM_STAT_LEN; k++)
370 			dst->mem_stat[i].entries[k] += src->mem_stat[i].entries[k];
371 	}
372 }
373 
hists__clone_mem_stat(struct hists * hists,struct hist_entry * dst,struct hist_entry * src)374 static int hists__clone_mem_stat(struct hists *hists, struct hist_entry *dst,
375 				  struct hist_entry *src)
376 {
377 	if (hists->nr_mem_stats == 0)
378 		return 0;
379 
380 	dst->mem_stat = calloc(hists->nr_mem_stats, sizeof(*dst->mem_stat));
381 	if (dst->mem_stat == NULL)
382 		return -1;
383 
384 	for (int i = 0; i < hists->nr_mem_stats; i++) {
385 		for (int k = 0; k < MEM_STAT_LEN; k++)
386 			dst->mem_stat[i].entries[k] = src->mem_stat[i].entries[k];
387 	}
388 	return 0;
389 }
390 
hists__decay_mem_stat(struct hists * hists,struct hist_entry * he)391 static void hists__decay_mem_stat(struct hists *hists, struct hist_entry *he)
392 {
393 	if (hists->nr_mem_stats == 0)
394 		return;
395 
396 	for (int i = 0; i < hists->nr_mem_stats; i++) {
397 		for (int k = 0; k < MEM_STAT_LEN; k++)
398 			he->mem_stat[i].entries[k] = (he->mem_stat[i].entries[k] * 7) / 8;
399 	}
400 }
401 
402 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
403 
hists__decay_entry(struct hists * hists,struct hist_entry * he)404 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
405 {
406 	u64 prev_period = he->stat.period;
407 	u64 prev_latency = he->stat.latency;
408 
409 	if (prev_period == 0)
410 		return true;
411 
412 	he_stat__decay(&he->stat);
413 	if (symbol_conf.cumulate_callchain)
414 		he_stat__decay(he->stat_acc);
415 	decay_callchain(he->callchain);
416 	hists__decay_mem_stat(hists, he);
417 
418 	if (!he->depth) {
419 		u64 period_diff = prev_period - he->stat.period;
420 		u64 latency_diff = prev_latency - he->stat.latency;
421 
422 		hists->stats.total_period -= period_diff;
423 		hists->stats.total_latency -= latency_diff;
424 		if (!he->filtered) {
425 			hists->stats.total_non_filtered_period -= period_diff;
426 			hists->stats.total_non_filtered_latency -= latency_diff;
427 		}
428 	}
429 
430 	if (!he->leaf) {
431 		struct hist_entry *child;
432 		struct rb_node *node = rb_first_cached(&he->hroot_out);
433 		while (node) {
434 			child = rb_entry(node, struct hist_entry, rb_node);
435 			node = rb_next(node);
436 
437 			if (hists__decay_entry(hists, child))
438 				hists__delete_entry(hists, child);
439 		}
440 	}
441 
442 	return he->stat.period == 0 && he->stat.latency == 0;
443 }
444 
hists__delete_entry(struct hists * hists,struct hist_entry * he)445 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
446 {
447 	struct rb_root_cached *root_in;
448 	struct rb_root_cached *root_out;
449 
450 	if (he->parent_he) {
451 		root_in  = &he->parent_he->hroot_in;
452 		root_out = &he->parent_he->hroot_out;
453 	} else {
454 		if (hists__has(hists, need_collapse))
455 			root_in = &hists->entries_collapsed;
456 		else
457 			root_in = hists->entries_in;
458 		root_out = &hists->entries;
459 	}
460 
461 	rb_erase_cached(&he->rb_node_in, root_in);
462 	rb_erase_cached(&he->rb_node, root_out);
463 
464 	--hists->nr_entries;
465 	if (!he->filtered)
466 		--hists->nr_non_filtered_entries;
467 
468 	hist_entry__delete(he);
469 }
470 
hists__decay_entries(struct hists * hists,bool zap_user,bool zap_kernel)471 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
472 {
473 	struct rb_node *next = rb_first_cached(&hists->entries);
474 	struct hist_entry *n;
475 
476 	while (next) {
477 		n = rb_entry(next, struct hist_entry, rb_node);
478 		next = rb_next(&n->rb_node);
479 		if (((zap_user && n->level == '.') ||
480 		     (zap_kernel && n->level != '.') ||
481 		     hists__decay_entry(hists, n))) {
482 			hists__delete_entry(hists, n);
483 		}
484 	}
485 }
486 
hists__delete_entries(struct hists * hists)487 void hists__delete_entries(struct hists *hists)
488 {
489 	struct rb_node *next = rb_first_cached(&hists->entries);
490 	struct hist_entry *n;
491 
492 	while (next) {
493 		n = rb_entry(next, struct hist_entry, rb_node);
494 		next = rb_next(&n->rb_node);
495 
496 		hists__delete_entry(hists, n);
497 	}
498 }
499 
hists__get_entry(struct hists * hists,int idx)500 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
501 {
502 	struct rb_node *next = rb_first_cached(&hists->entries);
503 	struct hist_entry *n;
504 	int i = 0;
505 
506 	while (next) {
507 		n = rb_entry(next, struct hist_entry, rb_node);
508 		if (i == idx)
509 			return n;
510 
511 		next = rb_next(&n->rb_node);
512 		i++;
513 	}
514 
515 	return NULL;
516 }
517 
518 /*
519  * histogram, sorted on item, collects periods
520  */
521 
hist_entry__init(struct hist_entry * he,struct hist_entry * template,bool sample_self,size_t callchain_size)522 static int hist_entry__init(struct hist_entry *he,
523 			    struct hist_entry *template,
524 			    bool sample_self,
525 			    size_t callchain_size)
526 {
527 	*he = *template;
528 	he->callchain_size = callchain_size;
529 
530 	if (symbol_conf.cumulate_callchain) {
531 		he->stat_acc = malloc(sizeof(he->stat));
532 		if (he->stat_acc == NULL)
533 			return -ENOMEM;
534 		memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
535 		if (!sample_self)
536 			memset(&he->stat, 0, sizeof(he->stat));
537 	}
538 
539 	he->ms.maps = maps__get(he->ms.maps);
540 	he->ms.map = map__get(he->ms.map);
541 
542 	if (he->branch_info) {
543 		/*
544 		 * This branch info is (a part of) allocated from
545 		 * sample__resolve_bstack() and will be freed after
546 		 * adding new entries.  So we need to save a copy.
547 		 */
548 		he->branch_info = malloc(sizeof(*he->branch_info));
549 		if (he->branch_info == NULL)
550 			goto err;
551 
552 		memcpy(he->branch_info, template->branch_info,
553 		       sizeof(*he->branch_info));
554 
555 		he->branch_info->from.ms.maps = maps__get(he->branch_info->from.ms.maps);
556 		he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
557 		he->branch_info->to.ms.maps = maps__get(he->branch_info->to.ms.maps);
558 		he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
559 	}
560 
561 	if (he->mem_info) {
562 		he->mem_info = mem_info__clone(template->mem_info);
563 		if (he->mem_info == NULL)
564 			goto err_infos;
565 	}
566 
567 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
568 		callchain_init(he->callchain);
569 
570 	if (he->raw_data) {
571 		he->raw_data = memdup(he->raw_data, he->raw_size);
572 		if (he->raw_data == NULL)
573 			goto err_infos;
574 	}
575 
576 	if (he->srcline && he->srcline != SRCLINE_UNKNOWN) {
577 		he->srcline = strdup(he->srcline);
578 		if (he->srcline == NULL)
579 			goto err_rawdata;
580 	}
581 
582 	if (symbol_conf.res_sample) {
583 		he->res_samples = calloc(symbol_conf.res_sample,
584 					sizeof(struct res_sample));
585 		if (!he->res_samples)
586 			goto err_srcline;
587 	}
588 
589 	INIT_LIST_HEAD(&he->pairs.node);
590 	he->thread = thread__get(he->thread);
591 	he->hroot_in  = RB_ROOT_CACHED;
592 	he->hroot_out = RB_ROOT_CACHED;
593 
594 	if (!symbol_conf.report_hierarchy)
595 		he->leaf = true;
596 
597 	return 0;
598 
599 err_srcline:
600 	zfree(&he->srcline);
601 
602 err_rawdata:
603 	zfree(&he->raw_data);
604 
605 err_infos:
606 	if (he->branch_info) {
607 		map_symbol__exit(&he->branch_info->from.ms);
608 		map_symbol__exit(&he->branch_info->to.ms);
609 		zfree(&he->branch_info);
610 	}
611 	if (he->mem_info) {
612 		map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
613 		map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
614 	}
615 err:
616 	map_symbol__exit(&he->ms);
617 	zfree(&he->stat_acc);
618 	return -ENOMEM;
619 }
620 
hist_entry__zalloc(size_t size)621 static void *hist_entry__zalloc(size_t size)
622 {
623 	return zalloc(size + sizeof(struct hist_entry));
624 }
625 
hist_entry__free(void * ptr)626 static void hist_entry__free(void *ptr)
627 {
628 	free(ptr);
629 }
630 
631 static struct hist_entry_ops default_ops = {
632 	.new	= hist_entry__zalloc,
633 	.free	= hist_entry__free,
634 };
635 
hist_entry__new(struct hist_entry * template,bool sample_self)636 static struct hist_entry *hist_entry__new(struct hist_entry *template,
637 					  bool sample_self)
638 {
639 	struct hist_entry_ops *ops = template->ops;
640 	size_t callchain_size = 0;
641 	struct hist_entry *he;
642 	int err = 0;
643 
644 	if (!ops)
645 		ops = template->ops = &default_ops;
646 
647 	if (symbol_conf.use_callchain)
648 		callchain_size = sizeof(struct callchain_root);
649 
650 	he = ops->new(callchain_size);
651 	if (he) {
652 		err = hist_entry__init(he, template, sample_self, callchain_size);
653 		if (err) {
654 			ops->free(he);
655 			he = NULL;
656 		}
657 	}
658 	return he;
659 }
660 
symbol__parent_filter(const struct symbol * parent)661 static filter_mask_t symbol__parent_filter(const struct symbol *parent)
662 {
663 	if (symbol_conf.exclude_other && parent == NULL)
664 		return 1 << HIST_FILTER__PARENT;
665 	return 0;
666 }
667 
hist_entry__add_callchain_period(struct hist_entry * he,u64 period,u64 latency)668 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period, u64 latency)
669 {
670 	if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
671 		return;
672 
673 	he->hists->callchain_period += period;
674 	he->hists->callchain_latency += latency;
675 	if (!he->filtered) {
676 		he->hists->callchain_non_filtered_period += period;
677 		he->hists->callchain_non_filtered_latency += latency;
678 	}
679 }
680 
hists__findnew_entry(struct hists * hists,struct hist_entry * entry,const struct addr_location * al,bool sample_self)681 static struct hist_entry *hists__findnew_entry(struct hists *hists,
682 					       struct hist_entry *entry,
683 					       const struct addr_location *al,
684 					       bool sample_self)
685 {
686 	struct rb_node **p;
687 	struct rb_node *parent = NULL;
688 	struct hist_entry *he;
689 	int64_t cmp;
690 	u64 period = entry->stat.period;
691 	u64 latency = entry->stat.latency;
692 	bool leftmost = true;
693 
694 	p = &hists->entries_in->rb_root.rb_node;
695 
696 	while (*p != NULL) {
697 		parent = *p;
698 		he = rb_entry(parent, struct hist_entry, rb_node_in);
699 
700 		/*
701 		 * Make sure that it receives arguments in a same order as
702 		 * hist_entry__collapse() so that we can use an appropriate
703 		 * function when searching an entry regardless which sort
704 		 * keys were used.
705 		 */
706 		cmp = hist_entry__cmp(he, entry);
707 		if (!cmp) {
708 			if (sample_self) {
709 				he_stat__add_stat(&he->stat, &entry->stat);
710 				hist_entry__add_callchain_period(he, period, latency);
711 			}
712 			if (symbol_conf.cumulate_callchain)
713 				he_stat__add_period(he->stat_acc, period, latency);
714 
715 			block_info__delete(entry->block_info);
716 
717 			kvm_info__zput(entry->kvm_info);
718 
719 			/* If the map of an existing hist_entry has
720 			 * become out-of-date due to an exec() or
721 			 * similar, update it.  Otherwise we will
722 			 * mis-adjust symbol addresses when computing
723 			 * the history counter to increment.
724 			 */
725 			if (hists__has(hists, sym) && he->ms.map != entry->ms.map) {
726 				if (he->ms.sym) {
727 					u64 addr = he->ms.sym->start;
728 					he->ms.sym = map__find_symbol(entry->ms.map, addr);
729 				}
730 
731 				map__put(he->ms.map);
732 				he->ms.map = map__get(entry->ms.map);
733 			}
734 			goto out;
735 		}
736 
737 		if (cmp < 0)
738 			p = &(*p)->rb_left;
739 		else {
740 			p = &(*p)->rb_right;
741 			leftmost = false;
742 		}
743 	}
744 
745 	he = hist_entry__new(entry, sample_self);
746 	if (!he)
747 		return NULL;
748 
749 	if (sample_self)
750 		hist_entry__add_callchain_period(he, period, latency);
751 	hists->nr_entries++;
752 
753 	rb_link_node(&he->rb_node_in, parent, p);
754 	rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
755 out:
756 	if (sample_self)
757 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
758 	if (symbol_conf.cumulate_callchain)
759 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
760 	if (hists__update_mem_stat(hists, he, entry->mem_info, period) < 0) {
761 		hist_entry__delete(he);
762 		return NULL;
763 	}
764 	return he;
765 }
766 
random_max(unsigned high)767 static unsigned random_max(unsigned high)
768 {
769 	unsigned thresh = -high % high;
770 	for (;;) {
771 		unsigned r = random();
772 		if (r >= thresh)
773 			return r % high;
774 	}
775 }
776 
hists__res_sample(struct hist_entry * he,struct perf_sample * sample)777 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
778 {
779 	struct res_sample *r;
780 	int j;
781 
782 	if (he->num_res < symbol_conf.res_sample) {
783 		j = he->num_res++;
784 	} else {
785 		j = random_max(symbol_conf.res_sample);
786 	}
787 	r = &he->res_samples[j];
788 	r->time = sample->time;
789 	r->cpu = sample->cpu;
790 	r->tid = sample->tid;
791 }
792 
793 static struct hist_entry*
__hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct block_info * block_info,struct perf_sample * sample,bool sample_self,struct hist_entry_ops * ops)794 __hists__add_entry(struct hists *hists,
795 		   struct addr_location *al,
796 		   struct symbol *sym_parent,
797 		   struct branch_info *bi,
798 		   struct mem_info *mi,
799 		   struct kvm_info *ki,
800 		   struct block_info *block_info,
801 		   struct perf_sample *sample,
802 		   bool sample_self,
803 		   struct hist_entry_ops *ops)
804 {
805 	struct namespaces *ns = thread__namespaces(al->thread);
806 	struct hist_entry entry = {
807 		.thread	= al->thread,
808 		.comm = thread__comm(al->thread),
809 		.cgroup_id = {
810 			.dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
811 			.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
812 		},
813 		.cgroup = sample->cgroup,
814 		.ms = {
815 			.maps	= al->maps,
816 			.map	= al->map,
817 			.sym	= al->sym,
818 		},
819 		.srcline = (char *) al->srcline,
820 		.socket	 = al->socket,
821 		.cpu	 = al->cpu,
822 		.cpumode = al->cpumode,
823 		.ip	 = al->addr,
824 		.level	 = al->level,
825 		.code_page_size = sample->code_page_size,
826 		.parallelism	= al->parallelism,
827 		.stat = {
828 			.nr_events = 1,
829 			.period	= sample->period,
830 			.weight1 = sample->weight,
831 			.weight2 = sample->ins_lat,
832 			.weight3 = sample->weight3,
833 			.latency = al->latency,
834 		},
835 		.parent = sym_parent,
836 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
837 		.hists	= hists,
838 		.branch_info = bi,
839 		.mem_info = mi,
840 		.kvm_info = ki,
841 		.block_info = block_info,
842 		.transaction = sample->transaction,
843 		.raw_data = sample->raw_data,
844 		.raw_size = sample->raw_size,
845 		.ops = ops,
846 		.time = hist_time(sample->time),
847 		.weight = sample->weight,
848 		.ins_lat = sample->ins_lat,
849 		.weight3 = sample->weight3,
850 		.simd_flags = sample->simd_flags,
851 	}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
852 
853 	if (!hists->has_callchains && he && he->callchain_size != 0)
854 		hists->has_callchains = true;
855 	if (he && symbol_conf.res_sample)
856 		hists__res_sample(he, sample);
857 	return he;
858 }
859 
hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct perf_sample * sample,bool sample_self)860 struct hist_entry *hists__add_entry(struct hists *hists,
861 				    struct addr_location *al,
862 				    struct symbol *sym_parent,
863 				    struct branch_info *bi,
864 				    struct mem_info *mi,
865 				    struct kvm_info *ki,
866 				    struct perf_sample *sample,
867 				    bool sample_self)
868 {
869 	return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
870 				  sample, sample_self, NULL);
871 }
872 
hists__add_entry_ops(struct hists * hists,struct hist_entry_ops * ops,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct perf_sample * sample,bool sample_self)873 struct hist_entry *hists__add_entry_ops(struct hists *hists,
874 					struct hist_entry_ops *ops,
875 					struct addr_location *al,
876 					struct symbol *sym_parent,
877 					struct branch_info *bi,
878 					struct mem_info *mi,
879 					struct kvm_info *ki,
880 					struct perf_sample *sample,
881 					bool sample_self)
882 {
883 	return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
884 				  sample, sample_self, ops);
885 }
886 
hists__add_entry_block(struct hists * hists,struct addr_location * al,struct block_info * block_info)887 struct hist_entry *hists__add_entry_block(struct hists *hists,
888 					  struct addr_location *al,
889 					  struct block_info *block_info)
890 {
891 	struct hist_entry entry = {
892 		.block_info = block_info,
893 		.hists = hists,
894 		.ms = {
895 			.maps = al->maps,
896 			.map = al->map,
897 			.sym = al->sym,
898 		},
899 	}, *he = hists__findnew_entry(hists, &entry, al, false);
900 
901 	return he;
902 }
903 
904 static int
iter_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)905 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
906 		    struct addr_location *al __maybe_unused)
907 {
908 	return 0;
909 }
910 
911 static int
iter_add_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)912 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
913 			struct addr_location *al __maybe_unused)
914 {
915 	return 0;
916 }
917 
918 static int
iter_prepare_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)919 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
920 {
921 	struct perf_sample *sample = iter->sample;
922 	struct mem_info *mi;
923 
924 	mi = sample__resolve_mem(sample, al);
925 	if (mi == NULL)
926 		return -ENOMEM;
927 
928 	iter->mi = mi;
929 	return 0;
930 }
931 
932 static int
iter_add_single_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)933 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
934 {
935 	u64 cost;
936 	struct mem_info *mi = iter->mi;
937 	struct hists *hists = evsel__hists(iter->evsel);
938 	struct perf_sample *sample = iter->sample;
939 	struct hist_entry *he;
940 
941 	if (mi == NULL)
942 		return -EINVAL;
943 
944 	cost = sample->weight;
945 	if (!cost)
946 		cost = 1;
947 
948 	/*
949 	 * must pass period=weight in order to get the correct
950 	 * sorting from hists__collapse_resort() which is solely
951 	 * based on periods. We want sorting be done on nr_events * weight
952 	 * and this is indirectly achieved by passing period=weight here
953 	 * and the he_stat__add_period() function.
954 	 */
955 	sample->period = cost;
956 
957 	he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
958 			      sample, true);
959 	if (!he)
960 		return -ENOMEM;
961 
962 	iter->he = he;
963 	return 0;
964 }
965 
966 static int
iter_finish_mem_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)967 iter_finish_mem_entry(struct hist_entry_iter *iter,
968 		      struct addr_location *al __maybe_unused)
969 {
970 	struct evsel *evsel = iter->evsel;
971 	struct hists *hists = evsel__hists(evsel);
972 	struct hist_entry *he = iter->he;
973 	int err = -EINVAL;
974 
975 	if (he == NULL)
976 		goto out;
977 
978 	hists__inc_nr_samples(hists, he->filtered);
979 
980 	err = hist_entry__append_callchain(he, iter->sample);
981 
982 out:
983 	mem_info__zput(iter->mi);
984 
985 	iter->he = NULL;
986 	return err;
987 }
988 
989 static int
iter_prepare_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)990 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
991 {
992 	struct branch_info *bi;
993 	struct perf_sample *sample = iter->sample;
994 
995 	bi = sample__resolve_bstack(sample, al);
996 	if (!bi)
997 		return -ENOMEM;
998 
999 	iter->curr = 0;
1000 	iter->total = sample->branch_stack->nr;
1001 
1002 	iter->bi = bi;
1003 	return 0;
1004 }
1005 
1006 static int
iter_add_single_branch_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)1007 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
1008 			     struct addr_location *al __maybe_unused)
1009 {
1010 	return 0;
1011 }
1012 
1013 static int
iter_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)1014 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
1015 {
1016 	struct branch_info *bi = iter->bi;
1017 	int i = iter->curr;
1018 
1019 	if (bi == NULL)
1020 		return 0;
1021 
1022 	if (iter->curr >= iter->total)
1023 		return 0;
1024 
1025 	maps__put(al->maps);
1026 	al->maps = maps__get(bi[i].to.ms.maps);
1027 	map__put(al->map);
1028 	al->map = map__get(bi[i].to.ms.map);
1029 	al->sym = bi[i].to.ms.sym;
1030 	al->addr = bi[i].to.addr;
1031 	return 1;
1032 }
1033 
1034 static int
iter_add_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)1035 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
1036 {
1037 	struct branch_info *bi;
1038 	struct evsel *evsel = iter->evsel;
1039 	struct hists *hists = evsel__hists(evsel);
1040 	struct perf_sample *sample = iter->sample;
1041 	struct hist_entry *he = NULL;
1042 	int i = iter->curr;
1043 	int err = 0;
1044 
1045 	bi = iter->bi;
1046 
1047 	if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
1048 		goto out;
1049 
1050 	/*
1051 	 * The report shows the percentage of total branches captured
1052 	 * and not events sampled. Thus we use a pseudo period of 1.
1053 	 */
1054 	sample->period = 1;
1055 	sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
1056 
1057 	he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
1058 			      sample, true);
1059 	if (he == NULL)
1060 		return -ENOMEM;
1061 
1062 out:
1063 	iter->he = he;
1064 	iter->curr++;
1065 	return err;
1066 }
1067 
branch_info__exit(struct branch_info * bi)1068 static void branch_info__exit(struct branch_info *bi)
1069 {
1070 	map_symbol__exit(&bi->from.ms);
1071 	map_symbol__exit(&bi->to.ms);
1072 	zfree_srcline(&bi->srcline_from);
1073 	zfree_srcline(&bi->srcline_to);
1074 }
1075 
1076 static int
iter_finish_branch_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1077 iter_finish_branch_entry(struct hist_entry_iter *iter,
1078 			 struct addr_location *al __maybe_unused)
1079 {
1080 	struct evsel *evsel = iter->evsel;
1081 	struct hists *hists = evsel__hists(evsel);
1082 
1083 	for (int i = 0; i < iter->total; i++)
1084 		branch_info__exit(&iter->bi[i]);
1085 
1086 	if (iter->he)
1087 		hists__inc_nr_samples(hists, iter->he->filtered);
1088 
1089 	zfree(&iter->bi);
1090 	iter->he = NULL;
1091 
1092 	return iter->curr >= iter->total ? 0 : -1;
1093 }
1094 
1095 static int
iter_prepare_normal_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)1096 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
1097 			  struct addr_location *al __maybe_unused)
1098 {
1099 	return 0;
1100 }
1101 
1102 static int
iter_add_single_normal_entry(struct hist_entry_iter * iter,struct addr_location * al)1103 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
1104 {
1105 	struct evsel *evsel = iter->evsel;
1106 	struct perf_sample *sample = iter->sample;
1107 	struct hist_entry *he;
1108 
1109 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1110 			      NULL, sample, true);
1111 	if (he == NULL)
1112 		return -ENOMEM;
1113 
1114 	iter->he = he;
1115 	return 0;
1116 }
1117 
1118 static int
iter_finish_normal_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1119 iter_finish_normal_entry(struct hist_entry_iter *iter,
1120 			 struct addr_location *al __maybe_unused)
1121 {
1122 	struct hist_entry *he = iter->he;
1123 	struct evsel *evsel = iter->evsel;
1124 	struct perf_sample *sample = iter->sample;
1125 
1126 	if (he == NULL)
1127 		return 0;
1128 
1129 	iter->he = NULL;
1130 
1131 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1132 
1133 	return hist_entry__append_callchain(he, sample);
1134 }
1135 
1136 static int
iter_prepare_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1137 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1138 			      struct addr_location *al __maybe_unused)
1139 {
1140 	struct hist_entry **he_cache;
1141 	struct callchain_cursor *cursor = get_tls_callchain_cursor();
1142 
1143 	if (cursor == NULL)
1144 		return -ENOMEM;
1145 
1146 	callchain_cursor_commit(cursor);
1147 
1148 	/*
1149 	 * This is for detecting cycles or recursions so that they're
1150 	 * cumulated only one time to prevent entries more than 100%
1151 	 * overhead.
1152 	 */
1153 	he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1));
1154 	if (he_cache == NULL)
1155 		return -ENOMEM;
1156 
1157 	iter->he_cache = he_cache;
1158 	iter->curr = 0;
1159 
1160 	return 0;
1161 }
1162 
1163 static int
iter_add_single_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1164 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1165 				 struct addr_location *al)
1166 {
1167 	struct evsel *evsel = iter->evsel;
1168 	struct hists *hists = evsel__hists(evsel);
1169 	struct perf_sample *sample = iter->sample;
1170 	struct hist_entry **he_cache = iter->he_cache;
1171 	struct hist_entry *he;
1172 	int err = 0;
1173 
1174 	he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
1175 			      sample, true);
1176 	if (he == NULL)
1177 		return -ENOMEM;
1178 
1179 	iter->he = he;
1180 	he_cache[iter->curr++] = he;
1181 
1182 	hist_entry__append_callchain(he, sample);
1183 
1184 	/*
1185 	 * We need to re-initialize the cursor since callchain_append()
1186 	 * advanced the cursor to the end.
1187 	 */
1188 	callchain_cursor_commit(get_tls_callchain_cursor());
1189 
1190 	hists__inc_nr_samples(hists, he->filtered);
1191 
1192 	return err;
1193 }
1194 
1195 static int
iter_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1196 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1197 			   struct addr_location *al)
1198 {
1199 	struct callchain_cursor_node *node;
1200 
1201 	node = callchain_cursor_current(get_tls_callchain_cursor());
1202 	if (node == NULL)
1203 		return 0;
1204 
1205 	return fill_callchain_info(al, node, iter->hide_unresolved);
1206 }
1207 
1208 static bool
hist_entry__fast__sym_diff(struct hist_entry * left,struct hist_entry * right)1209 hist_entry__fast__sym_diff(struct hist_entry *left,
1210 			   struct hist_entry *right)
1211 {
1212 	struct symbol *sym_l = left->ms.sym;
1213 	struct symbol *sym_r = right->ms.sym;
1214 
1215 	if (!sym_l && !sym_r)
1216 		return left->ip != right->ip;
1217 
1218 	return !!_sort__sym_cmp(sym_l, sym_r);
1219 }
1220 
1221 
1222 static int
iter_add_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1223 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1224 			       struct addr_location *al)
1225 {
1226 	struct evsel *evsel = iter->evsel;
1227 	struct perf_sample *sample = iter->sample;
1228 	struct hist_entry **he_cache = iter->he_cache;
1229 	struct hist_entry *he;
1230 	struct hist_entry he_tmp = {
1231 		.hists = evsel__hists(evsel),
1232 		.cpu = al->cpu,
1233 		.thread = al->thread,
1234 		.comm = thread__comm(al->thread),
1235 		.ip = al->addr,
1236 		.ms = {
1237 			.maps = al->maps,
1238 			.map = al->map,
1239 			.sym = al->sym,
1240 		},
1241 		.srcline = (char *) al->srcline,
1242 		.parent = iter->parent,
1243 		.raw_data = sample->raw_data,
1244 		.raw_size = sample->raw_size,
1245 	};
1246 	int i;
1247 	struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor();
1248 	bool fast = hists__has(he_tmp.hists, sym);
1249 
1250 	if (tls_cursor == NULL)
1251 		return -ENOMEM;
1252 
1253 	callchain_cursor_snapshot(&cursor, tls_cursor);
1254 
1255 	callchain_cursor_advance(tls_cursor);
1256 
1257 	/*
1258 	 * Check if there's duplicate entries in the callchain.
1259 	 * It's possible that it has cycles or recursive calls.
1260 	 */
1261 	for (i = 0; i < iter->curr; i++) {
1262 		/*
1263 		 * For most cases, there are no duplicate entries in callchain.
1264 		 * The symbols are usually different. Do a quick check for
1265 		 * symbols first.
1266 		 */
1267 		if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1268 			continue;
1269 
1270 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1271 			/* to avoid calling callback function */
1272 			iter->he = NULL;
1273 			return 0;
1274 		}
1275 	}
1276 
1277 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1278 			      NULL, sample, false);
1279 	if (he == NULL)
1280 		return -ENOMEM;
1281 
1282 	iter->he = he;
1283 	he_cache[iter->curr++] = he;
1284 
1285 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1286 		callchain_append(he->callchain, &cursor, sample->period);
1287 	return 0;
1288 }
1289 
1290 static int
iter_finish_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1291 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1292 			     struct addr_location *al __maybe_unused)
1293 {
1294 	mem_info__zput(iter->mi);
1295 	zfree(&iter->bi);
1296 	zfree(&iter->he_cache);
1297 	iter->he = NULL;
1298 
1299 	return 0;
1300 }
1301 
1302 const struct hist_iter_ops hist_iter_mem = {
1303 	.prepare_entry 		= iter_prepare_mem_entry,
1304 	.add_single_entry 	= iter_add_single_mem_entry,
1305 	.next_entry 		= iter_next_nop_entry,
1306 	.add_next_entry 	= iter_add_next_nop_entry,
1307 	.finish_entry 		= iter_finish_mem_entry,
1308 };
1309 
1310 const struct hist_iter_ops hist_iter_branch = {
1311 	.prepare_entry 		= iter_prepare_branch_entry,
1312 	.add_single_entry 	= iter_add_single_branch_entry,
1313 	.next_entry 		= iter_next_branch_entry,
1314 	.add_next_entry 	= iter_add_next_branch_entry,
1315 	.finish_entry 		= iter_finish_branch_entry,
1316 };
1317 
1318 const struct hist_iter_ops hist_iter_normal = {
1319 	.prepare_entry 		= iter_prepare_normal_entry,
1320 	.add_single_entry 	= iter_add_single_normal_entry,
1321 	.next_entry 		= iter_next_nop_entry,
1322 	.add_next_entry 	= iter_add_next_nop_entry,
1323 	.finish_entry 		= iter_finish_normal_entry,
1324 };
1325 
1326 const struct hist_iter_ops hist_iter_cumulative = {
1327 	.prepare_entry 		= iter_prepare_cumulative_entry,
1328 	.add_single_entry 	= iter_add_single_cumulative_entry,
1329 	.next_entry 		= iter_next_cumulative_entry,
1330 	.add_next_entry 	= iter_add_next_cumulative_entry,
1331 	.finish_entry 		= iter_finish_cumulative_entry,
1332 };
1333 
hist_entry_iter__add(struct hist_entry_iter * iter,struct addr_location * al,int max_stack_depth,void * arg)1334 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1335 			 int max_stack_depth, void *arg)
1336 {
1337 	int err, err2;
1338 	struct map *alm = NULL;
1339 
1340 	if (al)
1341 		alm = map__get(al->map);
1342 
1343 	err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent,
1344 					iter->evsel, al, max_stack_depth);
1345 	if (err) {
1346 		map__put(alm);
1347 		return err;
1348 	}
1349 
1350 	err = iter->ops->prepare_entry(iter, al);
1351 	if (err)
1352 		goto out;
1353 
1354 	err = iter->ops->add_single_entry(iter, al);
1355 	if (err)
1356 		goto out;
1357 
1358 	if (iter->he && iter->add_entry_cb) {
1359 		err = iter->add_entry_cb(iter, al, true, arg);
1360 		if (err)
1361 			goto out;
1362 	}
1363 
1364 	while (iter->ops->next_entry(iter, al)) {
1365 		err = iter->ops->add_next_entry(iter, al);
1366 		if (err)
1367 			break;
1368 
1369 		if (iter->he && iter->add_entry_cb) {
1370 			err = iter->add_entry_cb(iter, al, false, arg);
1371 			if (err)
1372 				goto out;
1373 		}
1374 	}
1375 
1376 out:
1377 	err2 = iter->ops->finish_entry(iter, al);
1378 	if (!err)
1379 		err = err2;
1380 
1381 	map__put(alm);
1382 
1383 	return err;
1384 }
1385 
1386 static int64_t
hist_entry__cmp_impl(struct perf_hpp_list * hpp_list,struct hist_entry * left,struct hist_entry * right,unsigned long fn_offset,bool ignore_dynamic,bool ignore_skipped)1387 hist_entry__cmp_impl(struct perf_hpp_list *hpp_list, struct hist_entry *left,
1388 		     struct hist_entry *right, unsigned long fn_offset,
1389 		     bool ignore_dynamic, bool ignore_skipped)
1390 {
1391 	struct hists *hists = left->hists;
1392 	struct perf_hpp_fmt *fmt;
1393 	perf_hpp_fmt_cmp_t *fn;
1394 	int64_t cmp;
1395 
1396 	/*
1397 	 * Never collapse filtered and non-filtered entries.
1398 	 * Note this is not the same as having an extra (invisible) fmt
1399 	 * that corresponds to the filtered status.
1400 	 */
1401 	cmp = (int64_t)!!left->filtered - (int64_t)!!right->filtered;
1402 	if (cmp)
1403 		return cmp;
1404 
1405 	perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1406 		if (ignore_dynamic && perf_hpp__is_dynamic_entry(fmt) &&
1407 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1408 			continue;
1409 
1410 		if (ignore_skipped && perf_hpp__should_skip(fmt, hists))
1411 			continue;
1412 
1413 		fn = (void *)fmt + fn_offset;
1414 		cmp = (*fn)(fmt, left, right);
1415 		if (cmp)
1416 			break;
1417 	}
1418 
1419 	return cmp;
1420 }
1421 
1422 int64_t
hist_entry__cmp(struct hist_entry * left,struct hist_entry * right)1423 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1424 {
1425 	return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1426 		offsetof(struct perf_hpp_fmt, cmp), true, false);
1427 }
1428 
1429 static int64_t
hist_entry__sort(struct hist_entry * left,struct hist_entry * right)1430 hist_entry__sort(struct hist_entry *left, struct hist_entry *right)
1431 {
1432 	return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1433 		offsetof(struct perf_hpp_fmt, sort), false, true);
1434 }
1435 
1436 int64_t
hist_entry__collapse(struct hist_entry * left,struct hist_entry * right)1437 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1438 {
1439 	return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1440 		offsetof(struct perf_hpp_fmt, collapse), true, false);
1441 }
1442 
1443 static int64_t
hist_entry__collapse_hierarchy(struct perf_hpp_list * hpp_list,struct hist_entry * left,struct hist_entry * right)1444 hist_entry__collapse_hierarchy(struct perf_hpp_list *hpp_list,
1445 			       struct hist_entry *left,
1446 			       struct hist_entry *right)
1447 {
1448 	return hist_entry__cmp_impl(hpp_list, left, right,
1449 		offsetof(struct perf_hpp_fmt, collapse), false, false);
1450 }
1451 
hist_entry__delete(struct hist_entry * he)1452 void hist_entry__delete(struct hist_entry *he)
1453 {
1454 	struct hist_entry_ops *ops = he->ops;
1455 
1456 	if (symbol_conf.report_hierarchy) {
1457 		struct rb_root *root = &he->hroot_out.rb_root;
1458 		struct hist_entry *child, *tmp;
1459 
1460 		rbtree_postorder_for_each_entry_safe(child, tmp, root, rb_node)
1461 			hist_entry__delete(child);
1462 
1463 		*root = RB_ROOT;
1464 	}
1465 
1466 	thread__zput(he->thread);
1467 	map_symbol__exit(&he->ms);
1468 
1469 	if (he->branch_info) {
1470 		branch_info__exit(he->branch_info);
1471 		zfree(&he->branch_info);
1472 	}
1473 
1474 	if (he->mem_info) {
1475 		map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
1476 		map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
1477 		mem_info__zput(he->mem_info);
1478 	}
1479 
1480 	if (he->block_info)
1481 		block_info__delete(he->block_info);
1482 
1483 	if (he->kvm_info)
1484 		kvm_info__zput(he->kvm_info);
1485 
1486 	zfree(&he->res_samples);
1487 	zfree(&he->stat_acc);
1488 	zfree_srcline(&he->srcline);
1489 	if (he->srcfile && he->srcfile[0])
1490 		zfree(&he->srcfile);
1491 	free_callchain(he->callchain);
1492 	zfree(&he->trace_output);
1493 	zfree(&he->raw_data);
1494 	zfree(&he->mem_stat);
1495 	ops->free(he);
1496 }
1497 
1498 /*
1499  * If this is not the last column, then we need to pad it according to the
1500  * pre-calculated max length for this column, otherwise don't bother adding
1501  * spaces because that would break viewing this with, for instance, 'less',
1502  * that would show tons of trailing spaces when a long C++ demangled method
1503  * names is sampled.
1504 */
hist_entry__snprintf_alignment(struct hist_entry * he,struct perf_hpp * hpp,struct perf_hpp_fmt * fmt,int printed)1505 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1506 				   struct perf_hpp_fmt *fmt, int printed)
1507 {
1508 	if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1509 		const int width = fmt->width(fmt, hpp, he->hists);
1510 		if (printed < width) {
1511 			advance_hpp(hpp, printed);
1512 			printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1513 		}
1514 	}
1515 
1516 	return printed;
1517 }
1518 
1519 /*
1520  * collapse the histogram
1521  */
1522 
1523 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1524 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1525 				       enum hist_filter type);
1526 
1527 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1528 
check_thread_entry(struct perf_hpp_fmt * fmt)1529 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1530 {
1531 	return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1532 }
1533 
hist_entry__check_and_remove_filter(struct hist_entry * he,enum hist_filter type,fmt_chk_fn check)1534 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1535 						enum hist_filter type,
1536 						fmt_chk_fn check)
1537 {
1538 	struct perf_hpp_fmt *fmt;
1539 	bool type_match = false;
1540 	struct hist_entry *parent = he->parent_he;
1541 
1542 	switch (type) {
1543 	case HIST_FILTER__THREAD:
1544 		if (symbol_conf.comm_list == NULL &&
1545 		    symbol_conf.pid_list == NULL &&
1546 		    symbol_conf.tid_list == NULL)
1547 			return;
1548 		break;
1549 	case HIST_FILTER__DSO:
1550 		if (symbol_conf.dso_list == NULL)
1551 			return;
1552 		break;
1553 	case HIST_FILTER__SYMBOL:
1554 		if (symbol_conf.sym_list == NULL)
1555 			return;
1556 		break;
1557 	case HIST_FILTER__PARALLELISM:
1558 		if (__bitmap_weight(symbol_conf.parallelism_filter, MAX_NR_CPUS + 1) == 0)
1559 			return;
1560 		break;
1561 	case HIST_FILTER__PARENT:
1562 	case HIST_FILTER__GUEST:
1563 	case HIST_FILTER__HOST:
1564 	case HIST_FILTER__SOCKET:
1565 	case HIST_FILTER__C2C:
1566 	default:
1567 		return;
1568 	}
1569 
1570 	/* if it's filtered by own fmt, it has to have filter bits */
1571 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1572 		if (check(fmt)) {
1573 			type_match = true;
1574 			break;
1575 		}
1576 	}
1577 
1578 	if (type_match) {
1579 		/*
1580 		 * If the filter is for current level entry, propagate
1581 		 * filter marker to parents.  The marker bit was
1582 		 * already set by default so it only needs to clear
1583 		 * non-filtered entries.
1584 		 */
1585 		if (!(he->filtered & (1 << type))) {
1586 			while (parent) {
1587 				parent->filtered &= ~(1 << type);
1588 				parent = parent->parent_he;
1589 			}
1590 		}
1591 	} else {
1592 		/*
1593 		 * If current entry doesn't have matching formats, set
1594 		 * filter marker for upper level entries.  it will be
1595 		 * cleared if its lower level entries is not filtered.
1596 		 *
1597 		 * For lower-level entries, it inherits parent's
1598 		 * filter bit so that lower level entries of a
1599 		 * non-filtered entry won't set the filter marker.
1600 		 */
1601 		if (parent == NULL)
1602 			he->filtered |= (1 << type);
1603 		else
1604 			he->filtered |= (parent->filtered & (1 << type));
1605 	}
1606 }
1607 
hist_entry__apply_hierarchy_filters(struct hist_entry * he)1608 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1609 {
1610 	hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1611 					    check_thread_entry);
1612 
1613 	hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1614 					    perf_hpp__is_dso_entry);
1615 
1616 	hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1617 					    perf_hpp__is_sym_entry);
1618 
1619 	hist_entry__check_and_remove_filter(he, HIST_FILTER__PARALLELISM,
1620 					    perf_hpp__is_parallelism_entry);
1621 
1622 	hists__apply_filters(he->hists, he);
1623 }
1624 
hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he,struct hist_entry * parent_he,struct perf_hpp_list * hpp_list)1625 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1626 						 struct rb_root_cached *root,
1627 						 struct hist_entry *he,
1628 						 struct hist_entry *parent_he,
1629 						 struct perf_hpp_list *hpp_list)
1630 {
1631 	struct rb_node **p = &root->rb_root.rb_node;
1632 	struct rb_node *parent = NULL;
1633 	struct hist_entry *iter, *new;
1634 	struct perf_hpp_fmt *fmt;
1635 	int64_t cmp;
1636 	bool leftmost = true;
1637 
1638 	while (*p != NULL) {
1639 		parent = *p;
1640 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1641 		cmp = hist_entry__collapse_hierarchy(hpp_list, iter, he);
1642 		if (!cmp) {
1643 			he_stat__add_stat(&iter->stat, &he->stat);
1644 			hists__add_mem_stat(hists, iter, he);
1645 			return iter;
1646 		}
1647 
1648 		if (cmp < 0)
1649 			p = &parent->rb_left;
1650 		else {
1651 			p = &parent->rb_right;
1652 			leftmost = false;
1653 		}
1654 	}
1655 
1656 	new = hist_entry__new(he, true);
1657 	if (new == NULL)
1658 		return NULL;
1659 
1660 	hists->nr_entries++;
1661 
1662 	/* save related format list for output */
1663 	new->hpp_list = hpp_list;
1664 	new->parent_he = parent_he;
1665 
1666 	hist_entry__apply_hierarchy_filters(new);
1667 
1668 	/* some fields are now passed to 'new' */
1669 	perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1670 		if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1671 			he->trace_output = NULL;
1672 		else
1673 			new->trace_output = NULL;
1674 
1675 		if (perf_hpp__is_srcline_entry(fmt))
1676 			he->srcline = NULL;
1677 		else
1678 			new->srcline = NULL;
1679 
1680 		if (perf_hpp__is_srcfile_entry(fmt))
1681 			he->srcfile = NULL;
1682 		else
1683 			new->srcfile = NULL;
1684 	}
1685 
1686 	if (hists__clone_mem_stat(hists, new, he) < 0) {
1687 		hist_entry__delete(new);
1688 		return NULL;
1689 	}
1690 
1691 	rb_link_node(&new->rb_node_in, parent, p);
1692 	rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1693 	return new;
1694 }
1695 
hists__hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1696 static int hists__hierarchy_insert_entry(struct hists *hists,
1697 					 struct rb_root_cached *root,
1698 					 struct hist_entry *he)
1699 {
1700 	struct perf_hpp_list_node *node;
1701 	struct hist_entry *new_he = NULL;
1702 	struct hist_entry *parent = NULL;
1703 	int depth = 0;
1704 	int ret = 0;
1705 
1706 	list_for_each_entry(node, &hists->hpp_formats, list) {
1707 		/* skip period (overhead) and elided columns */
1708 		if (node->level == 0 || node->skip)
1709 			continue;
1710 
1711 		/* insert copy of 'he' for each fmt into the hierarchy */
1712 		new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1713 		if (new_he == NULL) {
1714 			ret = -1;
1715 			break;
1716 		}
1717 
1718 		root = &new_he->hroot_in;
1719 		new_he->depth = depth++;
1720 		parent = new_he;
1721 	}
1722 
1723 	if (new_he) {
1724 		new_he->leaf = true;
1725 
1726 		if (hist_entry__has_callchains(new_he) &&
1727 		    symbol_conf.use_callchain) {
1728 			struct callchain_cursor *cursor = get_tls_callchain_cursor();
1729 
1730 			if (cursor == NULL)
1731 				return -1;
1732 
1733 			callchain_cursor_reset(cursor);
1734 			if (callchain_merge(cursor,
1735 					    new_he->callchain,
1736 					    he->callchain) < 0)
1737 				ret = -1;
1738 		}
1739 	}
1740 
1741 	/* 'he' is no longer used */
1742 	hist_entry__delete(he);
1743 
1744 	/* return 0 (or -1) since it already applied filters */
1745 	return ret;
1746 }
1747 
hists__collapse_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1748 static int hists__collapse_insert_entry(struct hists *hists,
1749 					struct rb_root_cached *root,
1750 					struct hist_entry *he)
1751 {
1752 	struct rb_node **p = &root->rb_root.rb_node;
1753 	struct rb_node *parent = NULL;
1754 	struct hist_entry *iter;
1755 	int64_t cmp;
1756 	bool leftmost = true;
1757 
1758 	if (symbol_conf.report_hierarchy)
1759 		return hists__hierarchy_insert_entry(hists, root, he);
1760 
1761 	while (*p != NULL) {
1762 		parent = *p;
1763 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1764 
1765 		cmp = hist_entry__collapse(iter, he);
1766 
1767 		if (!cmp) {
1768 			int ret = 0;
1769 
1770 			he_stat__add_stat(&iter->stat, &he->stat);
1771 			if (symbol_conf.cumulate_callchain)
1772 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
1773 			hists__add_mem_stat(hists, iter, he);
1774 
1775 			if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1776 				struct callchain_cursor *cursor = get_tls_callchain_cursor();
1777 
1778 				if (cursor != NULL) {
1779 					callchain_cursor_reset(cursor);
1780 					if (callchain_merge(cursor, iter->callchain, he->callchain) < 0)
1781 						ret = -1;
1782 				} else {
1783 					ret = 0;
1784 				}
1785 			}
1786 			hist_entry__delete(he);
1787 			return ret;
1788 		}
1789 
1790 		if (cmp < 0)
1791 			p = &(*p)->rb_left;
1792 		else {
1793 			p = &(*p)->rb_right;
1794 			leftmost = false;
1795 		}
1796 	}
1797 	hists->nr_entries++;
1798 
1799 	rb_link_node(&he->rb_node_in, parent, p);
1800 	rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1801 	return 1;
1802 }
1803 
hists__get_rotate_entries_in(struct hists * hists)1804 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1805 {
1806 	struct rb_root_cached *root;
1807 
1808 	mutex_lock(&hists->lock);
1809 
1810 	root = hists->entries_in;
1811 	if (++hists->entries_in > &hists->entries_in_array[1])
1812 		hists->entries_in = &hists->entries_in_array[0];
1813 
1814 	mutex_unlock(&hists->lock);
1815 
1816 	return root;
1817 }
1818 
hists__apply_filters(struct hists * hists,struct hist_entry * he)1819 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1820 {
1821 	hists__filter_entry_by_dso(hists, he);
1822 	hists__filter_entry_by_thread(hists, he);
1823 	hists__filter_entry_by_symbol(hists, he);
1824 	hists__filter_entry_by_socket(hists, he);
1825 	hists__filter_entry_by_parallelism(hists, he);
1826 }
1827 
hists__collapse_resort(struct hists * hists,struct ui_progress * prog)1828 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1829 {
1830 	struct rb_root_cached *root;
1831 	struct rb_node *next;
1832 	struct hist_entry *n;
1833 	int ret;
1834 
1835 	if (!hists__has(hists, need_collapse))
1836 		return 0;
1837 
1838 	hists->nr_entries = 0;
1839 
1840 	root = hists__get_rotate_entries_in(hists);
1841 
1842 	next = rb_first_cached(root);
1843 
1844 	while (next) {
1845 		if (session_done())
1846 			break;
1847 		n = rb_entry(next, struct hist_entry, rb_node_in);
1848 		next = rb_next(&n->rb_node_in);
1849 
1850 		rb_erase_cached(&n->rb_node_in, root);
1851 		ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1852 		if (ret < 0)
1853 			return -1;
1854 
1855 		if (ret) {
1856 			/*
1857 			 * If it wasn't combined with one of the entries already
1858 			 * collapsed, we need to apply the filters that may have
1859 			 * been set by, say, the hist_browser.
1860 			 */
1861 			hists__apply_filters(hists, n);
1862 		}
1863 		if (prog)
1864 			ui_progress__update(prog, 1);
1865 	}
1866 	return 0;
1867 }
1868 
hists__reset_filter_stats(struct hists * hists)1869 static void hists__reset_filter_stats(struct hists *hists)
1870 {
1871 	hists->nr_non_filtered_entries = 0;
1872 	hists->stats.total_non_filtered_period = 0;
1873 	hists->stats.total_non_filtered_latency = 0;
1874 }
1875 
hists__reset_stats(struct hists * hists)1876 void hists__reset_stats(struct hists *hists)
1877 {
1878 	hists->nr_entries = 0;
1879 	hists->stats.total_period = 0;
1880 	hists->stats.total_latency = 0;
1881 
1882 	hists__reset_filter_stats(hists);
1883 }
1884 
hists__inc_filter_stats(struct hists * hists,struct hist_entry * h)1885 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1886 {
1887 	hists->nr_non_filtered_entries++;
1888 	hists->stats.total_non_filtered_period += h->stat.period;
1889 	hists->stats.total_non_filtered_latency += h->stat.latency;
1890 }
1891 
hists__inc_stats(struct hists * hists,struct hist_entry * h)1892 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1893 {
1894 	if (!h->filtered)
1895 		hists__inc_filter_stats(hists, h);
1896 
1897 	hists->nr_entries++;
1898 	hists->stats.total_period += h->stat.period;
1899 	hists->stats.total_latency += h->stat.latency;
1900 }
1901 
hierarchy_recalc_total_periods(struct hists * hists)1902 static void hierarchy_recalc_total_periods(struct hists *hists)
1903 {
1904 	struct rb_node *node;
1905 	struct hist_entry *he;
1906 
1907 	node = rb_first_cached(&hists->entries);
1908 
1909 	hists->stats.total_period = 0;
1910 	hists->stats.total_non_filtered_period = 0;
1911 	hists->stats.total_latency = 0;
1912 	hists->stats.total_non_filtered_latency = 0;
1913 
1914 	/*
1915 	 * recalculate total period using top-level entries only
1916 	 * since lower level entries only see non-filtered entries
1917 	 * but upper level entries have sum of both entries.
1918 	 */
1919 	while (node) {
1920 		he = rb_entry(node, struct hist_entry, rb_node);
1921 		node = rb_next(node);
1922 
1923 		hists->stats.total_period += he->stat.period;
1924 		hists->stats.total_latency += he->stat.latency;
1925 		if (!he->filtered) {
1926 			hists->stats.total_non_filtered_period += he->stat.period;
1927 			hists->stats.total_non_filtered_latency += he->stat.latency;
1928 		}
1929 	}
1930 }
1931 
hierarchy_insert_output_entry(struct rb_root_cached * root,struct hist_entry * he)1932 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1933 					  struct hist_entry *he)
1934 {
1935 	struct rb_node **p = &root->rb_root.rb_node;
1936 	struct rb_node *parent = NULL;
1937 	struct hist_entry *iter;
1938 	struct perf_hpp_fmt *fmt;
1939 	bool leftmost = true;
1940 
1941 	while (*p != NULL) {
1942 		parent = *p;
1943 		iter = rb_entry(parent, struct hist_entry, rb_node);
1944 
1945 		if (hist_entry__sort(he, iter) > 0)
1946 			p = &parent->rb_left;
1947 		else {
1948 			p = &parent->rb_right;
1949 			leftmost = false;
1950 		}
1951 	}
1952 
1953 	rb_link_node(&he->rb_node, parent, p);
1954 	rb_insert_color_cached(&he->rb_node, root, leftmost);
1955 
1956 	/* update column width of dynamic entry */
1957 	perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1958 		if (fmt->init)
1959 			fmt->init(fmt, he);
1960 	}
1961 }
1962 
hists__hierarchy_output_resort(struct hists * hists,struct ui_progress * prog,struct rb_root_cached * root_in,struct rb_root_cached * root_out,u64 min_callchain_hits,bool use_callchain)1963 static void hists__hierarchy_output_resort(struct hists *hists,
1964 					   struct ui_progress *prog,
1965 					   struct rb_root_cached *root_in,
1966 					   struct rb_root_cached *root_out,
1967 					   u64 min_callchain_hits,
1968 					   bool use_callchain)
1969 {
1970 	struct rb_node *node;
1971 	struct hist_entry *he;
1972 
1973 	*root_out = RB_ROOT_CACHED;
1974 	node = rb_first_cached(root_in);
1975 
1976 	while (node) {
1977 		he = rb_entry(node, struct hist_entry, rb_node_in);
1978 		node = rb_next(node);
1979 
1980 		hierarchy_insert_output_entry(root_out, he);
1981 
1982 		if (prog)
1983 			ui_progress__update(prog, 1);
1984 
1985 		hists->nr_entries++;
1986 		if (!he->filtered) {
1987 			hists->nr_non_filtered_entries++;
1988 			hists__calc_col_len(hists, he);
1989 		}
1990 
1991 		if (!he->leaf) {
1992 			hists__hierarchy_output_resort(hists, prog,
1993 						       &he->hroot_in,
1994 						       &he->hroot_out,
1995 						       min_callchain_hits,
1996 						       use_callchain);
1997 			continue;
1998 		}
1999 
2000 		if (!use_callchain)
2001 			continue;
2002 
2003 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
2004 			u64 total = he->stat.period;
2005 
2006 			if (symbol_conf.cumulate_callchain)
2007 				total = he->stat_acc->period;
2008 
2009 			min_callchain_hits = total * (callchain_param.min_percent / 100);
2010 		}
2011 
2012 		callchain_param.sort(&he->sorted_chain, he->callchain,
2013 				     min_callchain_hits, &callchain_param);
2014 	}
2015 }
2016 
__hists__insert_output_entry(struct rb_root_cached * entries,struct hist_entry * he,u64 min_callchain_hits,bool use_callchain)2017 static void __hists__insert_output_entry(struct rb_root_cached *entries,
2018 					 struct hist_entry *he,
2019 					 u64 min_callchain_hits,
2020 					 bool use_callchain)
2021 {
2022 	struct rb_node **p = &entries->rb_root.rb_node;
2023 	struct rb_node *parent = NULL;
2024 	struct hist_entry *iter;
2025 	struct perf_hpp_fmt *fmt;
2026 	bool leftmost = true;
2027 
2028 	if (use_callchain) {
2029 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
2030 			u64 total = he->stat.period;
2031 
2032 			if (symbol_conf.cumulate_callchain)
2033 				total = he->stat_acc->period;
2034 
2035 			min_callchain_hits = total * (callchain_param.min_percent / 100);
2036 		}
2037 		callchain_param.sort(&he->sorted_chain, he->callchain,
2038 				      min_callchain_hits, &callchain_param);
2039 	}
2040 
2041 	while (*p != NULL) {
2042 		parent = *p;
2043 		iter = rb_entry(parent, struct hist_entry, rb_node);
2044 
2045 		if (hist_entry__sort(he, iter) > 0)
2046 			p = &(*p)->rb_left;
2047 		else {
2048 			p = &(*p)->rb_right;
2049 			leftmost = false;
2050 		}
2051 	}
2052 
2053 	rb_link_node(&he->rb_node, parent, p);
2054 	rb_insert_color_cached(&he->rb_node, entries, leftmost);
2055 
2056 	/* update column width of dynamic entries */
2057 	perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
2058 		if (fmt->init)
2059 			fmt->init(fmt, he);
2060 	}
2061 }
2062 
output_resort(struct hists * hists,struct ui_progress * prog,bool use_callchain,hists__resort_cb_t cb,void * cb_arg)2063 static void output_resort(struct hists *hists, struct ui_progress *prog,
2064 			  bool use_callchain, hists__resort_cb_t cb,
2065 			  void *cb_arg)
2066 {
2067 	struct rb_root_cached *root;
2068 	struct rb_node *next;
2069 	struct hist_entry *n;
2070 	u64 callchain_total;
2071 	u64 min_callchain_hits;
2072 
2073 	callchain_total = hists->callchain_period;
2074 	if (symbol_conf.filter_relative)
2075 		callchain_total = hists->callchain_non_filtered_period;
2076 
2077 	min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
2078 
2079 	hists__reset_stats(hists);
2080 	hists__reset_col_len(hists);
2081 
2082 	if (symbol_conf.report_hierarchy) {
2083 		hists__hierarchy_output_resort(hists, prog,
2084 					       &hists->entries_collapsed,
2085 					       &hists->entries,
2086 					       min_callchain_hits,
2087 					       use_callchain);
2088 		hierarchy_recalc_total_periods(hists);
2089 		return;
2090 	}
2091 
2092 	if (hists__has(hists, need_collapse))
2093 		root = &hists->entries_collapsed;
2094 	else
2095 		root = hists->entries_in;
2096 
2097 	next = rb_first_cached(root);
2098 	hists->entries = RB_ROOT_CACHED;
2099 
2100 	while (next) {
2101 		n = rb_entry(next, struct hist_entry, rb_node_in);
2102 		next = rb_next(&n->rb_node_in);
2103 
2104 		if (cb && cb(n, cb_arg))
2105 			continue;
2106 
2107 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
2108 		hists__inc_stats(hists, n);
2109 
2110 		if (!n->filtered)
2111 			hists__calc_col_len(hists, n);
2112 
2113 		if (prog)
2114 			ui_progress__update(prog, 1);
2115 	}
2116 }
2117 
evsel__output_resort_cb(struct evsel * evsel,struct ui_progress * prog,hists__resort_cb_t cb,void * cb_arg)2118 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
2119 			     hists__resort_cb_t cb, void *cb_arg)
2120 {
2121 	bool use_callchain;
2122 
2123 	if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
2124 		use_callchain = evsel__has_callchain(evsel);
2125 	else
2126 		use_callchain = symbol_conf.use_callchain;
2127 
2128 	use_callchain |= symbol_conf.show_branchflag_count;
2129 
2130 	output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
2131 }
2132 
evsel__output_resort(struct evsel * evsel,struct ui_progress * prog)2133 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
2134 {
2135 	return evsel__output_resort_cb(evsel, prog, NULL, NULL);
2136 }
2137 
hists__output_resort(struct hists * hists,struct ui_progress * prog)2138 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
2139 {
2140 	output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
2141 }
2142 
hists__output_resort_cb(struct hists * hists,struct ui_progress * prog,hists__resort_cb_t cb)2143 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
2144 			     hists__resort_cb_t cb)
2145 {
2146 	output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
2147 }
2148 
can_goto_child(struct hist_entry * he,enum hierarchy_move_dir hmd)2149 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
2150 {
2151 	if (he->leaf || hmd == HMD_FORCE_SIBLING)
2152 		return false;
2153 
2154 	if (he->unfolded || hmd == HMD_FORCE_CHILD)
2155 		return true;
2156 
2157 	return false;
2158 }
2159 
rb_hierarchy_last(struct rb_node * node)2160 struct rb_node *rb_hierarchy_last(struct rb_node *node)
2161 {
2162 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2163 
2164 	while (can_goto_child(he, HMD_NORMAL)) {
2165 		node = rb_last(&he->hroot_out.rb_root);
2166 		he = rb_entry(node, struct hist_entry, rb_node);
2167 	}
2168 	return node;
2169 }
2170 
__rb_hierarchy_next(struct rb_node * node,enum hierarchy_move_dir hmd)2171 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
2172 {
2173 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2174 
2175 	if (can_goto_child(he, hmd))
2176 		node = rb_first_cached(&he->hroot_out);
2177 	else
2178 		node = rb_next(node);
2179 
2180 	while (node == NULL) {
2181 		he = he->parent_he;
2182 		if (he == NULL)
2183 			break;
2184 
2185 		node = rb_next(&he->rb_node);
2186 	}
2187 	return node;
2188 }
2189 
rb_hierarchy_prev(struct rb_node * node)2190 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2191 {
2192 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2193 
2194 	node = rb_prev(node);
2195 	if (node)
2196 		return rb_hierarchy_last(node);
2197 
2198 	he = he->parent_he;
2199 	if (he == NULL)
2200 		return NULL;
2201 
2202 	return &he->rb_node;
2203 }
2204 
hist_entry__has_hierarchy_children(struct hist_entry * he,float limit)2205 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2206 {
2207 	struct rb_node *node;
2208 	struct hist_entry *child;
2209 	float percent;
2210 
2211 	if (he->leaf)
2212 		return false;
2213 
2214 	node = rb_first_cached(&he->hroot_out);
2215 	child = rb_entry(node, struct hist_entry, rb_node);
2216 
2217 	while (node && child->filtered) {
2218 		node = rb_next(node);
2219 		child = rb_entry(node, struct hist_entry, rb_node);
2220 	}
2221 
2222 	if (node)
2223 		percent = hist_entry__get_percent_limit(child);
2224 	else
2225 		percent = 0;
2226 
2227 	return node && percent >= limit;
2228 }
2229 
hists__remove_entry_filter(struct hists * hists,struct hist_entry * h,enum hist_filter filter)2230 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2231 				       enum hist_filter filter)
2232 {
2233 	h->filtered &= ~(1 << filter);
2234 
2235 	if (symbol_conf.report_hierarchy) {
2236 		struct hist_entry *parent = h->parent_he;
2237 
2238 		while (parent) {
2239 			he_stat__add_stat(&parent->stat, &h->stat);
2240 
2241 			parent->filtered &= ~(1 << filter);
2242 
2243 			if (parent->filtered)
2244 				goto next;
2245 
2246 			/* force fold unfiltered entry for simplicity */
2247 			parent->unfolded = false;
2248 			parent->has_no_entry = false;
2249 			parent->row_offset = 0;
2250 			parent->nr_rows = 0;
2251 next:
2252 			parent = parent->parent_he;
2253 		}
2254 	}
2255 
2256 	if (h->filtered)
2257 		return;
2258 
2259 	/* force fold unfiltered entry for simplicity */
2260 	h->unfolded = false;
2261 	h->has_no_entry = false;
2262 	h->row_offset = 0;
2263 	h->nr_rows = 0;
2264 
2265 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2266 
2267 	hists__inc_filter_stats(hists, h);
2268 	hists__calc_col_len(hists, h);
2269 }
2270 
2271 
hists__filter_entry_by_dso(struct hists * hists,struct hist_entry * he)2272 static bool hists__filter_entry_by_dso(struct hists *hists,
2273 				       struct hist_entry *he)
2274 {
2275 	if (hists->dso_filter != NULL &&
2276 	    (he->ms.map == NULL || !RC_CHK_EQUAL(map__dso(he->ms.map), hists->dso_filter))) {
2277 		he->filtered |= (1 << HIST_FILTER__DSO);
2278 		return true;
2279 	}
2280 
2281 	return false;
2282 }
2283 
hists__filter_entry_by_thread(struct hists * hists,struct hist_entry * he)2284 static bool hists__filter_entry_by_thread(struct hists *hists,
2285 					  struct hist_entry *he)
2286 {
2287 	if (hists->thread_filter != NULL &&
2288 	    !RC_CHK_EQUAL(he->thread, hists->thread_filter)) {
2289 		he->filtered |= (1 << HIST_FILTER__THREAD);
2290 		return true;
2291 	}
2292 
2293 	return false;
2294 }
2295 
hists__filter_entry_by_symbol(struct hists * hists,struct hist_entry * he)2296 static bool hists__filter_entry_by_symbol(struct hists *hists,
2297 					  struct hist_entry *he)
2298 {
2299 	if (hists->symbol_filter_str != NULL &&
2300 	    (!he->ms.sym || strstr(he->ms.sym->name,
2301 				   hists->symbol_filter_str) == NULL)) {
2302 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
2303 		return true;
2304 	}
2305 
2306 	return false;
2307 }
2308 
hists__filter_entry_by_socket(struct hists * hists,struct hist_entry * he)2309 static bool hists__filter_entry_by_socket(struct hists *hists,
2310 					  struct hist_entry *he)
2311 {
2312 	if ((hists->socket_filter > -1) &&
2313 	    (he->socket != hists->socket_filter)) {
2314 		he->filtered |= (1 << HIST_FILTER__SOCKET);
2315 		return true;
2316 	}
2317 
2318 	return false;
2319 }
2320 
hists__filter_entry_by_parallelism(struct hists * hists,struct hist_entry * he)2321 static bool hists__filter_entry_by_parallelism(struct hists *hists,
2322 					       struct hist_entry *he)
2323 {
2324 	if (test_bit(he->parallelism, hists->parallelism_filter)) {
2325 		he->filtered |= (1 << HIST_FILTER__PARALLELISM);
2326 		return true;
2327 	}
2328 	return false;
2329 }
2330 
2331 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2332 
hists__filter_by_type(struct hists * hists,int type,filter_fn_t filter)2333 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2334 {
2335 	struct rb_node *nd;
2336 
2337 	hists->stats.nr_non_filtered_samples = 0;
2338 
2339 	hists__reset_filter_stats(hists);
2340 	hists__reset_col_len(hists);
2341 
2342 	for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2343 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2344 
2345 		if (filter(hists, h))
2346 			continue;
2347 
2348 		hists__remove_entry_filter(hists, h, type);
2349 	}
2350 }
2351 
resort_filtered_entry(struct rb_root_cached * root,struct hist_entry * he)2352 static void resort_filtered_entry(struct rb_root_cached *root,
2353 				  struct hist_entry *he)
2354 {
2355 	struct rb_node **p = &root->rb_root.rb_node;
2356 	struct rb_node *parent = NULL;
2357 	struct hist_entry *iter;
2358 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2359 	struct rb_node *nd;
2360 	bool leftmost = true;
2361 
2362 	while (*p != NULL) {
2363 		parent = *p;
2364 		iter = rb_entry(parent, struct hist_entry, rb_node);
2365 
2366 		if (hist_entry__sort(he, iter) > 0)
2367 			p = &(*p)->rb_left;
2368 		else {
2369 			p = &(*p)->rb_right;
2370 			leftmost = false;
2371 		}
2372 	}
2373 
2374 	rb_link_node(&he->rb_node, parent, p);
2375 	rb_insert_color_cached(&he->rb_node, root, leftmost);
2376 
2377 	if (he->leaf || he->filtered)
2378 		return;
2379 
2380 	nd = rb_first_cached(&he->hroot_out);
2381 	while (nd) {
2382 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2383 
2384 		nd = rb_next(nd);
2385 		rb_erase_cached(&h->rb_node, &he->hroot_out);
2386 
2387 		resort_filtered_entry(&new_root, h);
2388 	}
2389 
2390 	he->hroot_out = new_root;
2391 }
2392 
hists__filter_hierarchy(struct hists * hists,int type,const void * arg)2393 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2394 {
2395 	struct rb_node *nd;
2396 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2397 
2398 	hists->stats.nr_non_filtered_samples = 0;
2399 
2400 	hists__reset_filter_stats(hists);
2401 	hists__reset_col_len(hists);
2402 
2403 	nd = rb_first_cached(&hists->entries);
2404 	while (nd) {
2405 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2406 		int ret;
2407 
2408 		ret = hist_entry__filter(h, type, arg);
2409 
2410 		/*
2411 		 * case 1. non-matching type
2412 		 * zero out the period, set filter marker and move to child
2413 		 */
2414 		if (ret < 0) {
2415 			memset(&h->stat, 0, sizeof(h->stat));
2416 			h->filtered |= (1 << type);
2417 
2418 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2419 		}
2420 		/*
2421 		 * case 2. matched type (filter out)
2422 		 * set filter marker and move to next
2423 		 */
2424 		else if (ret == 1) {
2425 			h->filtered |= (1 << type);
2426 
2427 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2428 		}
2429 		/*
2430 		 * case 3. ok (not filtered)
2431 		 * add period to hists and parents, erase the filter marker
2432 		 * and move to next sibling
2433 		 */
2434 		else {
2435 			hists__remove_entry_filter(hists, h, type);
2436 
2437 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2438 		}
2439 	}
2440 
2441 	hierarchy_recalc_total_periods(hists);
2442 
2443 	/*
2444 	 * resort output after applying a new filter since filter in a lower
2445 	 * hierarchy can change periods in a upper hierarchy.
2446 	 */
2447 	nd = rb_first_cached(&hists->entries);
2448 	while (nd) {
2449 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2450 
2451 		nd = rb_next(nd);
2452 		rb_erase_cached(&h->rb_node, &hists->entries);
2453 
2454 		resort_filtered_entry(&new_root, h);
2455 	}
2456 
2457 	hists->entries = new_root;
2458 }
2459 
hists__filter_by_thread(struct hists * hists)2460 void hists__filter_by_thread(struct hists *hists)
2461 {
2462 	if (symbol_conf.report_hierarchy)
2463 		hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2464 					hists->thread_filter);
2465 	else
2466 		hists__filter_by_type(hists, HIST_FILTER__THREAD,
2467 				      hists__filter_entry_by_thread);
2468 }
2469 
hists__filter_by_dso(struct hists * hists)2470 void hists__filter_by_dso(struct hists *hists)
2471 {
2472 	if (symbol_conf.report_hierarchy)
2473 		hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2474 					hists->dso_filter);
2475 	else
2476 		hists__filter_by_type(hists, HIST_FILTER__DSO,
2477 				      hists__filter_entry_by_dso);
2478 }
2479 
hists__filter_by_symbol(struct hists * hists)2480 void hists__filter_by_symbol(struct hists *hists)
2481 {
2482 	if (symbol_conf.report_hierarchy)
2483 		hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2484 					hists->symbol_filter_str);
2485 	else
2486 		hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2487 				      hists__filter_entry_by_symbol);
2488 }
2489 
hists__filter_by_socket(struct hists * hists)2490 void hists__filter_by_socket(struct hists *hists)
2491 {
2492 	if (symbol_conf.report_hierarchy)
2493 		hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2494 					&hists->socket_filter);
2495 	else
2496 		hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2497 				      hists__filter_entry_by_socket);
2498 }
2499 
hists__filter_by_parallelism(struct hists * hists)2500 void hists__filter_by_parallelism(struct hists *hists)
2501 {
2502 	if (symbol_conf.report_hierarchy)
2503 		hists__filter_hierarchy(hists, HIST_FILTER__PARALLELISM,
2504 					hists->parallelism_filter);
2505 	else
2506 		hists__filter_by_type(hists, HIST_FILTER__PARALLELISM,
2507 				      hists__filter_entry_by_parallelism);
2508 }
2509 
events_stats__inc(struct events_stats * stats,u32 type)2510 void events_stats__inc(struct events_stats *stats, u32 type)
2511 {
2512 	++stats->nr_events[0];
2513 	++stats->nr_events[type];
2514 }
2515 
hists_stats__inc(struct hists_stats * stats)2516 static void hists_stats__inc(struct hists_stats *stats)
2517 {
2518 	++stats->nr_samples;
2519 }
2520 
hists__inc_nr_events(struct hists * hists)2521 void hists__inc_nr_events(struct hists *hists)
2522 {
2523 	hists_stats__inc(&hists->stats);
2524 }
2525 
hists__inc_nr_samples(struct hists * hists,bool filtered)2526 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2527 {
2528 	hists_stats__inc(&hists->stats);
2529 	if (!filtered)
2530 		hists->stats.nr_non_filtered_samples++;
2531 }
2532 
hists__inc_nr_lost_samples(struct hists * hists,u32 lost)2533 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
2534 {
2535 	hists->stats.nr_lost_samples += lost;
2536 }
2537 
hists__inc_nr_dropped_samples(struct hists * hists,u32 lost)2538 void hists__inc_nr_dropped_samples(struct hists *hists, u32 lost)
2539 {
2540 	hists->stats.nr_dropped_samples += lost;
2541 }
2542 
hists__add_dummy_entry(struct hists * hists,struct hist_entry * pair)2543 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2544 						 struct hist_entry *pair)
2545 {
2546 	struct rb_root_cached *root;
2547 	struct rb_node **p;
2548 	struct rb_node *parent = NULL;
2549 	struct hist_entry *he;
2550 	int64_t cmp;
2551 	bool leftmost = true;
2552 
2553 	if (hists__has(hists, need_collapse))
2554 		root = &hists->entries_collapsed;
2555 	else
2556 		root = hists->entries_in;
2557 
2558 	p = &root->rb_root.rb_node;
2559 
2560 	while (*p != NULL) {
2561 		parent = *p;
2562 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2563 
2564 		cmp = hist_entry__collapse(he, pair);
2565 
2566 		if (!cmp)
2567 			goto out;
2568 
2569 		if (cmp < 0)
2570 			p = &(*p)->rb_left;
2571 		else {
2572 			p = &(*p)->rb_right;
2573 			leftmost = false;
2574 		}
2575 	}
2576 
2577 	he = hist_entry__new(pair, true);
2578 	if (he) {
2579 		memset(&he->stat, 0, sizeof(he->stat));
2580 		he->hists = hists;
2581 		if (symbol_conf.cumulate_callchain)
2582 			memset(he->stat_acc, 0, sizeof(he->stat));
2583 		rb_link_node(&he->rb_node_in, parent, p);
2584 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2585 		hists__inc_stats(hists, he);
2586 		he->dummy = true;
2587 	}
2588 out:
2589 	return he;
2590 }
2591 
add_dummy_hierarchy_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * pair)2592 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2593 						    struct rb_root_cached *root,
2594 						    struct hist_entry *pair)
2595 {
2596 	struct rb_node **p;
2597 	struct rb_node *parent = NULL;
2598 	struct hist_entry *he;
2599 	bool leftmost = true;
2600 
2601 	p = &root->rb_root.rb_node;
2602 	while (*p != NULL) {
2603 		int64_t cmp;
2604 
2605 		parent = *p;
2606 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2607 		cmp = hist_entry__collapse_hierarchy(he->hpp_list, he, pair);
2608 		if (!cmp)
2609 			goto out;
2610 
2611 		if (cmp < 0)
2612 			p = &parent->rb_left;
2613 		else {
2614 			p = &parent->rb_right;
2615 			leftmost = false;
2616 		}
2617 	}
2618 
2619 	he = hist_entry__new(pair, true);
2620 	if (he) {
2621 		rb_link_node(&he->rb_node_in, parent, p);
2622 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2623 
2624 		he->dummy = true;
2625 		he->hists = hists;
2626 		memset(&he->stat, 0, sizeof(he->stat));
2627 		hists__inc_stats(hists, he);
2628 	}
2629 out:
2630 	return he;
2631 }
2632 
hists__find_entry(struct hists * hists,struct hist_entry * he)2633 static struct hist_entry *hists__find_entry(struct hists *hists,
2634 					    struct hist_entry *he)
2635 {
2636 	struct rb_node *n;
2637 
2638 	if (hists__has(hists, need_collapse))
2639 		n = hists->entries_collapsed.rb_root.rb_node;
2640 	else
2641 		n = hists->entries_in->rb_root.rb_node;
2642 
2643 	while (n) {
2644 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2645 		int64_t cmp = hist_entry__collapse(iter, he);
2646 
2647 		if (cmp < 0)
2648 			n = n->rb_left;
2649 		else if (cmp > 0)
2650 			n = n->rb_right;
2651 		else
2652 			return iter;
2653 	}
2654 
2655 	return NULL;
2656 }
2657 
hists__find_hierarchy_entry(struct rb_root_cached * root,struct hist_entry * he)2658 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2659 						      struct hist_entry *he)
2660 {
2661 	struct rb_node *n = root->rb_root.rb_node;
2662 
2663 	while (n) {
2664 		struct hist_entry *iter;
2665 		int64_t cmp;
2666 
2667 		iter = rb_entry(n, struct hist_entry, rb_node_in);
2668 		cmp = hist_entry__collapse_hierarchy(he->hpp_list, iter, he);
2669 		if (cmp < 0)
2670 			n = n->rb_left;
2671 		else if (cmp > 0)
2672 			n = n->rb_right;
2673 		else
2674 			return iter;
2675 	}
2676 
2677 	return NULL;
2678 }
2679 
hists__match_hierarchy(struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2680 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2681 				   struct rb_root_cached *other_root)
2682 {
2683 	struct rb_node *nd;
2684 	struct hist_entry *pos, *pair;
2685 
2686 	for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2687 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2688 		pair = hists__find_hierarchy_entry(other_root, pos);
2689 
2690 		if (pair) {
2691 			hist_entry__add_pair(pair, pos);
2692 			hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2693 		}
2694 	}
2695 }
2696 
2697 /*
2698  * Look for pairs to link to the leader buckets (hist_entries):
2699  */
hists__match(struct hists * leader,struct hists * other)2700 void hists__match(struct hists *leader, struct hists *other)
2701 {
2702 	struct rb_root_cached *root;
2703 	struct rb_node *nd;
2704 	struct hist_entry *pos, *pair;
2705 
2706 	if (symbol_conf.report_hierarchy) {
2707 		/* hierarchy report always collapses entries */
2708 		return hists__match_hierarchy(&leader->entries_collapsed,
2709 					      &other->entries_collapsed);
2710 	}
2711 
2712 	if (hists__has(leader, need_collapse))
2713 		root = &leader->entries_collapsed;
2714 	else
2715 		root = leader->entries_in;
2716 
2717 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2718 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2719 		pair = hists__find_entry(other, pos);
2720 
2721 		if (pair)
2722 			hist_entry__add_pair(pair, pos);
2723 	}
2724 }
2725 
hists__link_hierarchy(struct hists * leader_hists,struct hist_entry * parent,struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2726 static int hists__link_hierarchy(struct hists *leader_hists,
2727 				 struct hist_entry *parent,
2728 				 struct rb_root_cached *leader_root,
2729 				 struct rb_root_cached *other_root)
2730 {
2731 	struct rb_node *nd;
2732 	struct hist_entry *pos, *leader;
2733 
2734 	for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2735 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2736 
2737 		if (hist_entry__has_pairs(pos)) {
2738 			bool found = false;
2739 
2740 			list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2741 				if (leader->hists == leader_hists) {
2742 					found = true;
2743 					break;
2744 				}
2745 			}
2746 			if (!found)
2747 				return -1;
2748 		} else {
2749 			leader = add_dummy_hierarchy_entry(leader_hists,
2750 							   leader_root, pos);
2751 			if (leader == NULL)
2752 				return -1;
2753 
2754 			/* do not point parent in the pos */
2755 			leader->parent_he = parent;
2756 
2757 			hist_entry__add_pair(pos, leader);
2758 		}
2759 
2760 		if (!pos->leaf) {
2761 			if (hists__link_hierarchy(leader_hists, leader,
2762 						  &leader->hroot_in,
2763 						  &pos->hroot_in) < 0)
2764 				return -1;
2765 		}
2766 	}
2767 	return 0;
2768 }
2769 
2770 /*
2771  * Look for entries in the other hists that are not present in the leader, if
2772  * we find them, just add a dummy entry on the leader hists, with period=0,
2773  * nr_events=0, to serve as the list header.
2774  */
hists__link(struct hists * leader,struct hists * other)2775 int hists__link(struct hists *leader, struct hists *other)
2776 {
2777 	struct rb_root_cached *root;
2778 	struct rb_node *nd;
2779 	struct hist_entry *pos, *pair;
2780 
2781 	if (symbol_conf.report_hierarchy) {
2782 		/* hierarchy report always collapses entries */
2783 		return hists__link_hierarchy(leader, NULL,
2784 					     &leader->entries_collapsed,
2785 					     &other->entries_collapsed);
2786 	}
2787 
2788 	if (hists__has(other, need_collapse))
2789 		root = &other->entries_collapsed;
2790 	else
2791 		root = other->entries_in;
2792 
2793 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2794 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2795 
2796 		if (!hist_entry__has_pairs(pos)) {
2797 			pair = hists__add_dummy_entry(leader, pos);
2798 			if (pair == NULL)
2799 				return -1;
2800 			hist_entry__add_pair(pos, pair);
2801 		}
2802 	}
2803 
2804 	return 0;
2805 }
2806 
hists__unlink(struct hists * hists)2807 int hists__unlink(struct hists *hists)
2808 {
2809 	struct rb_root_cached *root;
2810 	struct rb_node *nd;
2811 	struct hist_entry *pos;
2812 
2813 	if (hists__has(hists, need_collapse))
2814 		root = &hists->entries_collapsed;
2815 	else
2816 		root = hists->entries_in;
2817 
2818 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2819 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2820 		list_del_init(&pos->pairs.node);
2821 	}
2822 
2823 	return 0;
2824 }
2825 
hist__account_cycles(struct branch_stack * bs,struct addr_location * al,struct perf_sample * sample,bool nonany_branch_mode,u64 * total_cycles,struct evsel * evsel)2826 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2827 			  struct perf_sample *sample, bool nonany_branch_mode,
2828 			  u64 *total_cycles, struct evsel *evsel)
2829 {
2830 	struct branch_info *bi;
2831 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2832 
2833 	/* If we have branch cycles always annotate them. */
2834 	if (bs && bs->nr && entries[0].flags.cycles) {
2835 		bi = sample__resolve_bstack(sample, al);
2836 		if (bi) {
2837 			struct addr_map_symbol *prev = NULL;
2838 
2839 			/*
2840 			 * Ignore errors, still want to process the
2841 			 * other entries.
2842 			 *
2843 			 * For non standard branch modes always
2844 			 * force no IPC (prev == NULL)
2845 			 *
2846 			 * Note that perf stores branches reversed from
2847 			 * program order!
2848 			 */
2849 			for (int i = bs->nr - 1; i >= 0; i--) {
2850 				addr_map_symbol__account_cycles(&bi[i].from,
2851 					nonany_branch_mode ? NULL : prev,
2852 					bi[i].flags.cycles, evsel,
2853 					bi[i].branch_stack_cntr);
2854 				prev = &bi[i].to;
2855 
2856 				if (total_cycles)
2857 					*total_cycles += bi[i].flags.cycles;
2858 			}
2859 			for (unsigned int i = 0; i < bs->nr; i++) {
2860 				map_symbol__exit(&bi[i].to.ms);
2861 				map_symbol__exit(&bi[i].from.ms);
2862 			}
2863 			free(bi);
2864 		}
2865 	}
2866 }
2867 
evlist__fprintf_nr_events(struct evlist * evlist,FILE * fp)2868 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2869 {
2870 	struct evsel *pos;
2871 	size_t ret = 0;
2872 
2873 	evlist__for_each_entry(evlist, pos) {
2874 		struct hists *hists = evsel__hists(pos);
2875 		u64 total_samples = hists->stats.nr_samples;
2876 
2877 		total_samples += hists->stats.nr_lost_samples;
2878 		total_samples += hists->stats.nr_dropped_samples;
2879 
2880 		if (symbol_conf.skip_empty && total_samples == 0)
2881 			continue;
2882 
2883 		ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2884 		if (hists->stats.nr_samples)
2885 			ret += fprintf(fp, "%20s events: %10d\n",
2886 				       "SAMPLE", hists->stats.nr_samples);
2887 		if (hists->stats.nr_lost_samples)
2888 			ret += fprintf(fp, "%20s events: %10d\n",
2889 				       "LOST_SAMPLES", hists->stats.nr_lost_samples);
2890 		if (hists->stats.nr_dropped_samples)
2891 			ret += fprintf(fp, "%20s events: %10d\n",
2892 				       "LOST_SAMPLES (BPF)", hists->stats.nr_dropped_samples);
2893 	}
2894 
2895 	return ret;
2896 }
2897 
2898 
hists__total_period(struct hists * hists)2899 u64 hists__total_period(struct hists *hists)
2900 {
2901 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2902 		hists->stats.total_period;
2903 }
2904 
hists__total_latency(struct hists * hists)2905 u64 hists__total_latency(struct hists *hists)
2906 {
2907 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_latency :
2908 		hists->stats.total_latency;
2909 }
2910 
__hists__scnprintf_title(struct hists * hists,char * bf,size_t size,bool show_freq)2911 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2912 {
2913 	char unit;
2914 	int printed;
2915 	const struct dso *dso = hists->dso_filter;
2916 	struct thread *thread = hists->thread_filter;
2917 	int socket_id = hists->socket_filter;
2918 	unsigned long nr_samples = hists->stats.nr_samples;
2919 	u64 nr_events = hists->stats.total_period;
2920 	struct evsel *evsel = hists_to_evsel(hists);
2921 	const char *ev_name = evsel__name(evsel);
2922 	char buf[512], sample_freq_str[64] = "";
2923 	size_t buflen = sizeof(buf);
2924 	char ref[30] = " show reference callgraph, ";
2925 	bool enable_ref = false;
2926 
2927 	if (symbol_conf.filter_relative) {
2928 		nr_samples = hists->stats.nr_non_filtered_samples;
2929 		nr_events = hists->stats.total_non_filtered_period;
2930 	}
2931 
2932 	if (evsel__is_group_event(evsel)) {
2933 		struct evsel *pos;
2934 
2935 		evsel__group_desc(evsel, buf, buflen);
2936 		ev_name = buf;
2937 
2938 		for_each_group_member(pos, evsel) {
2939 			struct hists *pos_hists = evsel__hists(pos);
2940 
2941 			if (symbol_conf.filter_relative) {
2942 				nr_samples += pos_hists->stats.nr_non_filtered_samples;
2943 				nr_events += pos_hists->stats.total_non_filtered_period;
2944 			} else {
2945 				nr_samples += pos_hists->stats.nr_samples;
2946 				nr_events += pos_hists->stats.total_period;
2947 			}
2948 		}
2949 	}
2950 
2951 	if (symbol_conf.show_ref_callgraph &&
2952 	    strstr(ev_name, "call-graph=no"))
2953 		enable_ref = true;
2954 
2955 	if (show_freq)
2956 		scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2957 
2958 	nr_samples = convert_unit(nr_samples, &unit);
2959 	printed = scnprintf(bf, size,
2960 			   "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2961 			   nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2962 			   ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2963 
2964 
2965 	if (hists->uid_filter_str)
2966 		printed += snprintf(bf + printed, size - printed,
2967 				    ", UID: %s", hists->uid_filter_str);
2968 	if (thread) {
2969 		if (hists__has(hists, thread)) {
2970 			printed += scnprintf(bf + printed, size - printed,
2971 				    ", Thread: %s(%d)",
2972 				    (thread__comm_set(thread) ? thread__comm_str(thread) : ""),
2973 					thread__tid(thread));
2974 		} else {
2975 			printed += scnprintf(bf + printed, size - printed,
2976 				    ", Thread: %s",
2977 				    (thread__comm_set(thread) ? thread__comm_str(thread) : ""));
2978 		}
2979 	}
2980 	if (dso)
2981 		printed += scnprintf(bf + printed, size - printed,
2982 				     ", DSO: %s", dso__short_name(dso));
2983 	if (socket_id > -1)
2984 		printed += scnprintf(bf + printed, size - printed,
2985 				    ", Processor Socket: %d", socket_id);
2986 
2987 	return printed;
2988 }
2989 
parse_filter_percentage(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)2990 int parse_filter_percentage(const struct option *opt __maybe_unused,
2991 			    const char *arg, int unset __maybe_unused)
2992 {
2993 	if (!strcmp(arg, "relative"))
2994 		symbol_conf.filter_relative = true;
2995 	else if (!strcmp(arg, "absolute"))
2996 		symbol_conf.filter_relative = false;
2997 	else {
2998 		pr_debug("Invalid percentage: %s\n", arg);
2999 		return -1;
3000 	}
3001 
3002 	return 0;
3003 }
3004 
perf_hist_config(const char * var,const char * value)3005 int perf_hist_config(const char *var, const char *value)
3006 {
3007 	if (!strcmp(var, "hist.percentage"))
3008 		return parse_filter_percentage(NULL, value, 0);
3009 
3010 	return 0;
3011 }
3012 
__hists__init(struct hists * hists,struct perf_hpp_list * hpp_list)3013 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
3014 {
3015 	memset(hists, 0, sizeof(*hists));
3016 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
3017 	hists->entries_in = &hists->entries_in_array[0];
3018 	hists->entries_collapsed = RB_ROOT_CACHED;
3019 	hists->entries = RB_ROOT_CACHED;
3020 	mutex_init(&hists->lock);
3021 	hists->socket_filter = -1;
3022 	hists->parallelism_filter = symbol_conf.parallelism_filter;
3023 	hists->hpp_list = hpp_list;
3024 	INIT_LIST_HEAD(&hists->hpp_formats);
3025 	return 0;
3026 }
3027 
hists__delete_remaining_entries(struct rb_root_cached * root)3028 static void hists__delete_remaining_entries(struct rb_root_cached *root)
3029 {
3030 	struct rb_node *node;
3031 	struct hist_entry *he;
3032 
3033 	while (!RB_EMPTY_ROOT(&root->rb_root)) {
3034 		node = rb_first_cached(root);
3035 		rb_erase_cached(node, root);
3036 
3037 		he = rb_entry(node, struct hist_entry, rb_node_in);
3038 		hist_entry__delete(he);
3039 	}
3040 }
3041 
hists__delete_all_entries(struct hists * hists)3042 static void hists__delete_all_entries(struct hists *hists)
3043 {
3044 	hists__delete_entries(hists);
3045 	hists__delete_remaining_entries(&hists->entries_in_array[0]);
3046 	hists__delete_remaining_entries(&hists->entries_in_array[1]);
3047 	hists__delete_remaining_entries(&hists->entries_collapsed);
3048 }
3049 
hists_evsel__exit(struct evsel * evsel)3050 static void hists_evsel__exit(struct evsel *evsel)
3051 {
3052 	struct hists *hists = evsel__hists(evsel);
3053 	struct perf_hpp_fmt *fmt, *pos;
3054 	struct perf_hpp_list_node *node, *tmp;
3055 
3056 	hists__delete_all_entries(hists);
3057 	zfree(&hists->mem_stat_types);
3058 	zfree(&hists->mem_stat_total);
3059 
3060 	list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
3061 		perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
3062 			list_del_init(&fmt->list);
3063 			free(fmt);
3064 		}
3065 		list_del_init(&node->list);
3066 		free(node);
3067 	}
3068 }
3069 
hists_evsel__init(struct evsel * evsel)3070 static int hists_evsel__init(struct evsel *evsel)
3071 {
3072 	struct hists *hists = evsel__hists(evsel);
3073 
3074 	__hists__init(hists, &perf_hpp_list);
3075 	return 0;
3076 }
3077 
3078 /*
3079  * XXX We probably need a hists_evsel__exit() to free the hist_entries
3080  * stored in the rbtree...
3081  */
3082 
hists__init(void)3083 int hists__init(void)
3084 {
3085 	int err = evsel__object_config(sizeof(struct hists_evsel),
3086 				       hists_evsel__init, hists_evsel__exit);
3087 	if (err)
3088 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
3089 
3090 	return err;
3091 }
3092 
perf_hpp_list__init(struct perf_hpp_list * list)3093 void perf_hpp_list__init(struct perf_hpp_list *list)
3094 {
3095 	INIT_LIST_HEAD(&list->fields);
3096 	INIT_LIST_HEAD(&list->sorts);
3097 }
3098