1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "kvm-stat.h"
8 #include "map.h"
9 #include "map_symbol.h"
10 #include "branch.h"
11 #include "mem-events.h"
12 #include "mem-info.h"
13 #include "session.h"
14 #include "namespaces.h"
15 #include "cgroup.h"
16 #include "sort.h"
17 #include "units.h"
18 #include "evlist.h"
19 #include "evsel.h"
20 #include "annotate.h"
21 #include "srcline.h"
22 #include "symbol.h"
23 #include "thread.h"
24 #include "block-info.h"
25 #include "ui/progress.h"
26 #include <errno.h>
27 #include <math.h>
28 #include <inttypes.h>
29 #include <sys/param.h>
30 #include <linux/rbtree.h>
31 #include <linux/string.h>
32 #include <linux/time64.h>
33 #include <linux/zalloc.h>
34
35 static bool hists__filter_entry_by_dso(struct hists *hists,
36 struct hist_entry *he);
37 static bool hists__filter_entry_by_thread(struct hists *hists,
38 struct hist_entry *he);
39 static bool hists__filter_entry_by_symbol(struct hists *hists,
40 struct hist_entry *he);
41 static bool hists__filter_entry_by_socket(struct hists *hists,
42 struct hist_entry *he);
43
hists__col_len(struct hists * hists,enum hist_column col)44 u16 hists__col_len(struct hists *hists, enum hist_column col)
45 {
46 return hists->col_len[col];
47 }
48
hists__set_col_len(struct hists * hists,enum hist_column col,u16 len)49 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
50 {
51 hists->col_len[col] = len;
52 }
53
hists__new_col_len(struct hists * hists,enum hist_column col,u16 len)54 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
55 {
56 if (len > hists__col_len(hists, col)) {
57 hists__set_col_len(hists, col, len);
58 return true;
59 }
60 return false;
61 }
62
hists__reset_col_len(struct hists * hists)63 void hists__reset_col_len(struct hists *hists)
64 {
65 enum hist_column col;
66
67 for (col = 0; col < HISTC_NR_COLS; ++col)
68 hists__set_col_len(hists, col, 0);
69 }
70
hists__set_unres_dso_col_len(struct hists * hists,int dso)71 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
72 {
73 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
74
75 if (hists__col_len(hists, dso) < unresolved_col_width &&
76 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
77 !symbol_conf.dso_list)
78 hists__set_col_len(hists, dso, unresolved_col_width);
79 }
80
hists__calc_col_len(struct hists * hists,struct hist_entry * h)81 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
82 {
83 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
84 int symlen;
85 u16 len;
86
87 if (h->block_info)
88 return;
89 /*
90 * +4 accounts for '[x] ' priv level info
91 * +2 accounts for 0x prefix on raw addresses
92 * +3 accounts for ' y ' symtab origin info
93 */
94 if (h->ms.sym) {
95 symlen = h->ms.sym->namelen + 4;
96 if (verbose > 0)
97 symlen += BITS_PER_LONG / 4 + 2 + 3;
98 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
99 } else {
100 symlen = unresolved_col_width + 4 + 2;
101 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
102 hists__set_unres_dso_col_len(hists, HISTC_DSO);
103 }
104
105 len = thread__comm_len(h->thread);
106 if (hists__new_col_len(hists, HISTC_COMM, len))
107 hists__set_col_len(hists, HISTC_THREAD, len + 8);
108
109 if (h->ms.map) {
110 len = dso__name_len(map__dso(h->ms.map));
111 hists__new_col_len(hists, HISTC_DSO, len);
112 }
113
114 if (h->parent)
115 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
116
117 if (h->branch_info) {
118 if (h->branch_info->from.ms.sym) {
119 symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
120 if (verbose > 0)
121 symlen += BITS_PER_LONG / 4 + 2 + 3;
122 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
123
124 symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
125 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
126 } else {
127 symlen = unresolved_col_width + 4 + 2;
128 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
129 hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
130 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
131 }
132
133 if (h->branch_info->to.ms.sym) {
134 symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
135 if (verbose > 0)
136 symlen += BITS_PER_LONG / 4 + 2 + 3;
137 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
138
139 symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
140 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
141 } else {
142 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
144 hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
145 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
146 }
147
148 if (h->branch_info->srcline_from)
149 hists__new_col_len(hists, HISTC_SRCLINE_FROM,
150 strlen(h->branch_info->srcline_from));
151 if (h->branch_info->srcline_to)
152 hists__new_col_len(hists, HISTC_SRCLINE_TO,
153 strlen(h->branch_info->srcline_to));
154 }
155
156 if (h->mem_info) {
157 if (mem_info__daddr(h->mem_info)->ms.sym) {
158 symlen = (int)mem_info__daddr(h->mem_info)->ms.sym->namelen + 4
159 + unresolved_col_width + 2;
160 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
161 symlen);
162 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
163 symlen + 1);
164 } else {
165 symlen = unresolved_col_width + 4 + 2;
166 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
167 symlen);
168 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
169 symlen);
170 }
171
172 if (mem_info__iaddr(h->mem_info)->ms.sym) {
173 symlen = (int)mem_info__iaddr(h->mem_info)->ms.sym->namelen + 4
174 + unresolved_col_width + 2;
175 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
176 symlen);
177 } else {
178 symlen = unresolved_col_width + 4 + 2;
179 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
180 symlen);
181 }
182
183 if (mem_info__daddr(h->mem_info)->ms.map) {
184 symlen = dso__name_len(map__dso(mem_info__daddr(h->mem_info)->ms.map));
185 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
186 symlen);
187 } else {
188 symlen = unresolved_col_width + 4 + 2;
189 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
190 }
191
192 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
193 unresolved_col_width + 4 + 2);
194
195 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
196 unresolved_col_width + 4 + 2);
197
198 } else {
199 symlen = unresolved_col_width + 4 + 2;
200 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
201 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
202 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
203 }
204
205 hists__new_col_len(hists, HISTC_CGROUP, 6);
206 hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
207 hists__new_col_len(hists, HISTC_CPU, 3);
208 hists__new_col_len(hists, HISTC_SOCKET, 6);
209 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
210 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
211 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
212 hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
213 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
214 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
215 hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
216 hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
217 hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
218 hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
219 hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
220 hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
221
222 if (symbol_conf.nanosecs)
223 hists__new_col_len(hists, HISTC_TIME, 16);
224 else
225 hists__new_col_len(hists, HISTC_TIME, 12);
226 hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
227
228 if (h->srcline) {
229 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
230 hists__new_col_len(hists, HISTC_SRCLINE, len);
231 }
232
233 if (h->srcfile)
234 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
235
236 if (h->transaction)
237 hists__new_col_len(hists, HISTC_TRANSACTION,
238 hist_entry__transaction_len());
239
240 if (h->trace_output)
241 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
242
243 if (h->cgroup) {
244 const char *cgrp_name = "unknown";
245 struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
246 h->cgroup);
247 if (cgrp != NULL)
248 cgrp_name = cgrp->name;
249
250 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
251 }
252 }
253
hists__output_recalc_col_len(struct hists * hists,int max_rows)254 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
255 {
256 struct rb_node *next = rb_first_cached(&hists->entries);
257 struct hist_entry *n;
258 int row = 0;
259
260 hists__reset_col_len(hists);
261
262 while (next && row++ < max_rows) {
263 n = rb_entry(next, struct hist_entry, rb_node);
264 if (!n->filtered)
265 hists__calc_col_len(hists, n);
266 next = rb_next(&n->rb_node);
267 }
268 }
269
he_stat__add_cpumode_period(struct he_stat * he_stat,unsigned int cpumode,u64 period)270 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
271 unsigned int cpumode, u64 period)
272 {
273 switch (cpumode) {
274 case PERF_RECORD_MISC_KERNEL:
275 he_stat->period_sys += period;
276 break;
277 case PERF_RECORD_MISC_USER:
278 he_stat->period_us += period;
279 break;
280 case PERF_RECORD_MISC_GUEST_KERNEL:
281 he_stat->period_guest_sys += period;
282 break;
283 case PERF_RECORD_MISC_GUEST_USER:
284 he_stat->period_guest_us += period;
285 break;
286 default:
287 break;
288 }
289 }
290
hist_time(unsigned long htime)291 static long hist_time(unsigned long htime)
292 {
293 unsigned long time_quantum = symbol_conf.time_quantum;
294 if (time_quantum)
295 return (htime / time_quantum) * time_quantum;
296 return htime;
297 }
298
he_stat__add_period(struct he_stat * he_stat,u64 period)299 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
300 {
301 he_stat->period += period;
302 he_stat->nr_events += 1;
303 }
304
he_stat__add_stat(struct he_stat * dest,struct he_stat * src)305 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
306 {
307 dest->period += src->period;
308 dest->period_sys += src->period_sys;
309 dest->period_us += src->period_us;
310 dest->period_guest_sys += src->period_guest_sys;
311 dest->period_guest_us += src->period_guest_us;
312 dest->weight1 += src->weight1;
313 dest->weight2 += src->weight2;
314 dest->weight3 += src->weight3;
315 dest->nr_events += src->nr_events;
316 }
317
he_stat__decay(struct he_stat * he_stat)318 static void he_stat__decay(struct he_stat *he_stat)
319 {
320 he_stat->period = (he_stat->period * 7) / 8;
321 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
322 he_stat->weight1 = (he_stat->weight1 * 7) / 8;
323 he_stat->weight2 = (he_stat->weight2 * 7) / 8;
324 he_stat->weight3 = (he_stat->weight3 * 7) / 8;
325 }
326
327 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
328
hists__decay_entry(struct hists * hists,struct hist_entry * he)329 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
330 {
331 u64 prev_period = he->stat.period;
332 u64 diff;
333
334 if (prev_period == 0)
335 return true;
336
337 he_stat__decay(&he->stat);
338 if (symbol_conf.cumulate_callchain)
339 he_stat__decay(he->stat_acc);
340 decay_callchain(he->callchain);
341
342 diff = prev_period - he->stat.period;
343
344 if (!he->depth) {
345 hists->stats.total_period -= diff;
346 if (!he->filtered)
347 hists->stats.total_non_filtered_period -= diff;
348 }
349
350 if (!he->leaf) {
351 struct hist_entry *child;
352 struct rb_node *node = rb_first_cached(&he->hroot_out);
353 while (node) {
354 child = rb_entry(node, struct hist_entry, rb_node);
355 node = rb_next(node);
356
357 if (hists__decay_entry(hists, child))
358 hists__delete_entry(hists, child);
359 }
360 }
361
362 return he->stat.period == 0;
363 }
364
hists__delete_entry(struct hists * hists,struct hist_entry * he)365 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
366 {
367 struct rb_root_cached *root_in;
368 struct rb_root_cached *root_out;
369
370 if (he->parent_he) {
371 root_in = &he->parent_he->hroot_in;
372 root_out = &he->parent_he->hroot_out;
373 } else {
374 if (hists__has(hists, need_collapse))
375 root_in = &hists->entries_collapsed;
376 else
377 root_in = hists->entries_in;
378 root_out = &hists->entries;
379 }
380
381 rb_erase_cached(&he->rb_node_in, root_in);
382 rb_erase_cached(&he->rb_node, root_out);
383
384 --hists->nr_entries;
385 if (!he->filtered)
386 --hists->nr_non_filtered_entries;
387
388 hist_entry__delete(he);
389 }
390
hists__decay_entries(struct hists * hists,bool zap_user,bool zap_kernel)391 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
392 {
393 struct rb_node *next = rb_first_cached(&hists->entries);
394 struct hist_entry *n;
395
396 while (next) {
397 n = rb_entry(next, struct hist_entry, rb_node);
398 next = rb_next(&n->rb_node);
399 if (((zap_user && n->level == '.') ||
400 (zap_kernel && n->level != '.') ||
401 hists__decay_entry(hists, n))) {
402 hists__delete_entry(hists, n);
403 }
404 }
405 }
406
hists__delete_entries(struct hists * hists)407 void hists__delete_entries(struct hists *hists)
408 {
409 struct rb_node *next = rb_first_cached(&hists->entries);
410 struct hist_entry *n;
411
412 while (next) {
413 n = rb_entry(next, struct hist_entry, rb_node);
414 next = rb_next(&n->rb_node);
415
416 hists__delete_entry(hists, n);
417 }
418 }
419
hists__get_entry(struct hists * hists,int idx)420 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
421 {
422 struct rb_node *next = rb_first_cached(&hists->entries);
423 struct hist_entry *n;
424 int i = 0;
425
426 while (next) {
427 n = rb_entry(next, struct hist_entry, rb_node);
428 if (i == idx)
429 return n;
430
431 next = rb_next(&n->rb_node);
432 i++;
433 }
434
435 return NULL;
436 }
437
438 /*
439 * histogram, sorted on item, collects periods
440 */
441
hist_entry__init(struct hist_entry * he,struct hist_entry * template,bool sample_self,size_t callchain_size)442 static int hist_entry__init(struct hist_entry *he,
443 struct hist_entry *template,
444 bool sample_self,
445 size_t callchain_size)
446 {
447 *he = *template;
448 he->callchain_size = callchain_size;
449
450 if (symbol_conf.cumulate_callchain) {
451 he->stat_acc = malloc(sizeof(he->stat));
452 if (he->stat_acc == NULL)
453 return -ENOMEM;
454 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
455 if (!sample_self)
456 memset(&he->stat, 0, sizeof(he->stat));
457 }
458
459 he->ms.maps = maps__get(he->ms.maps);
460 he->ms.map = map__get(he->ms.map);
461
462 if (he->branch_info) {
463 /*
464 * This branch info is (a part of) allocated from
465 * sample__resolve_bstack() and will be freed after
466 * adding new entries. So we need to save a copy.
467 */
468 he->branch_info = malloc(sizeof(*he->branch_info));
469 if (he->branch_info == NULL)
470 goto err;
471
472 memcpy(he->branch_info, template->branch_info,
473 sizeof(*he->branch_info));
474
475 he->branch_info->from.ms.maps = maps__get(he->branch_info->from.ms.maps);
476 he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
477 he->branch_info->to.ms.maps = maps__get(he->branch_info->to.ms.maps);
478 he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
479 }
480
481 if (he->mem_info) {
482 he->mem_info = mem_info__clone(template->mem_info);
483 if (he->mem_info == NULL)
484 goto err_infos;
485 }
486
487 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
488 callchain_init(he->callchain);
489
490 if (he->raw_data) {
491 he->raw_data = memdup(he->raw_data, he->raw_size);
492 if (he->raw_data == NULL)
493 goto err_infos;
494 }
495
496 if (he->srcline && he->srcline != SRCLINE_UNKNOWN) {
497 he->srcline = strdup(he->srcline);
498 if (he->srcline == NULL)
499 goto err_rawdata;
500 }
501
502 if (symbol_conf.res_sample) {
503 he->res_samples = calloc(symbol_conf.res_sample,
504 sizeof(struct res_sample));
505 if (!he->res_samples)
506 goto err_srcline;
507 }
508
509 INIT_LIST_HEAD(&he->pairs.node);
510 he->thread = thread__get(he->thread);
511 he->hroot_in = RB_ROOT_CACHED;
512 he->hroot_out = RB_ROOT_CACHED;
513
514 if (!symbol_conf.report_hierarchy)
515 he->leaf = true;
516
517 return 0;
518
519 err_srcline:
520 zfree(&he->srcline);
521
522 err_rawdata:
523 zfree(&he->raw_data);
524
525 err_infos:
526 if (he->branch_info) {
527 map_symbol__exit(&he->branch_info->from.ms);
528 map_symbol__exit(&he->branch_info->to.ms);
529 zfree(&he->branch_info);
530 }
531 if (he->mem_info) {
532 map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
533 map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
534 }
535 err:
536 map_symbol__exit(&he->ms);
537 zfree(&he->stat_acc);
538 return -ENOMEM;
539 }
540
hist_entry__zalloc(size_t size)541 static void *hist_entry__zalloc(size_t size)
542 {
543 return zalloc(size + sizeof(struct hist_entry));
544 }
545
hist_entry__free(void * ptr)546 static void hist_entry__free(void *ptr)
547 {
548 free(ptr);
549 }
550
551 static struct hist_entry_ops default_ops = {
552 .new = hist_entry__zalloc,
553 .free = hist_entry__free,
554 };
555
hist_entry__new(struct hist_entry * template,bool sample_self)556 static struct hist_entry *hist_entry__new(struct hist_entry *template,
557 bool sample_self)
558 {
559 struct hist_entry_ops *ops = template->ops;
560 size_t callchain_size = 0;
561 struct hist_entry *he;
562 int err = 0;
563
564 if (!ops)
565 ops = template->ops = &default_ops;
566
567 if (symbol_conf.use_callchain)
568 callchain_size = sizeof(struct callchain_root);
569
570 he = ops->new(callchain_size);
571 if (he) {
572 err = hist_entry__init(he, template, sample_self, callchain_size);
573 if (err) {
574 ops->free(he);
575 he = NULL;
576 }
577 }
578 return he;
579 }
580
symbol__parent_filter(const struct symbol * parent)581 static u8 symbol__parent_filter(const struct symbol *parent)
582 {
583 if (symbol_conf.exclude_other && parent == NULL)
584 return 1 << HIST_FILTER__PARENT;
585 return 0;
586 }
587
hist_entry__add_callchain_period(struct hist_entry * he,u64 period)588 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
589 {
590 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
591 return;
592
593 he->hists->callchain_period += period;
594 if (!he->filtered)
595 he->hists->callchain_non_filtered_period += period;
596 }
597
hists__findnew_entry(struct hists * hists,struct hist_entry * entry,const struct addr_location * al,bool sample_self)598 static struct hist_entry *hists__findnew_entry(struct hists *hists,
599 struct hist_entry *entry,
600 const struct addr_location *al,
601 bool sample_self)
602 {
603 struct rb_node **p;
604 struct rb_node *parent = NULL;
605 struct hist_entry *he;
606 int64_t cmp;
607 u64 period = entry->stat.period;
608 bool leftmost = true;
609
610 p = &hists->entries_in->rb_root.rb_node;
611
612 while (*p != NULL) {
613 parent = *p;
614 he = rb_entry(parent, struct hist_entry, rb_node_in);
615
616 /*
617 * Make sure that it receives arguments in a same order as
618 * hist_entry__collapse() so that we can use an appropriate
619 * function when searching an entry regardless which sort
620 * keys were used.
621 */
622 cmp = hist_entry__cmp(he, entry);
623 if (!cmp) {
624 if (sample_self) {
625 he_stat__add_stat(&he->stat, &entry->stat);
626 hist_entry__add_callchain_period(he, period);
627 }
628 if (symbol_conf.cumulate_callchain)
629 he_stat__add_period(he->stat_acc, period);
630
631 block_info__delete(entry->block_info);
632
633 kvm_info__zput(entry->kvm_info);
634
635 /* If the map of an existing hist_entry has
636 * become out-of-date due to an exec() or
637 * similar, update it. Otherwise we will
638 * mis-adjust symbol addresses when computing
639 * the history counter to increment.
640 */
641 if (hists__has(hists, sym) && he->ms.map != entry->ms.map) {
642 if (he->ms.sym) {
643 u64 addr = he->ms.sym->start;
644 he->ms.sym = map__find_symbol(entry->ms.map, addr);
645 }
646
647 map__put(he->ms.map);
648 he->ms.map = map__get(entry->ms.map);
649 }
650 goto out;
651 }
652
653 if (cmp < 0)
654 p = &(*p)->rb_left;
655 else {
656 p = &(*p)->rb_right;
657 leftmost = false;
658 }
659 }
660
661 he = hist_entry__new(entry, sample_self);
662 if (!he)
663 return NULL;
664
665 if (sample_self)
666 hist_entry__add_callchain_period(he, period);
667 hists->nr_entries++;
668
669 rb_link_node(&he->rb_node_in, parent, p);
670 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
671 out:
672 if (sample_self)
673 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
674 if (symbol_conf.cumulate_callchain)
675 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
676 return he;
677 }
678
random_max(unsigned high)679 static unsigned random_max(unsigned high)
680 {
681 unsigned thresh = -high % high;
682 for (;;) {
683 unsigned r = random();
684 if (r >= thresh)
685 return r % high;
686 }
687 }
688
hists__res_sample(struct hist_entry * he,struct perf_sample * sample)689 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
690 {
691 struct res_sample *r;
692 int j;
693
694 if (he->num_res < symbol_conf.res_sample) {
695 j = he->num_res++;
696 } else {
697 j = random_max(symbol_conf.res_sample);
698 }
699 r = &he->res_samples[j];
700 r->time = sample->time;
701 r->cpu = sample->cpu;
702 r->tid = sample->tid;
703 }
704
705 static struct hist_entry*
__hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct block_info * block_info,struct perf_sample * sample,bool sample_self,struct hist_entry_ops * ops)706 __hists__add_entry(struct hists *hists,
707 struct addr_location *al,
708 struct symbol *sym_parent,
709 struct branch_info *bi,
710 struct mem_info *mi,
711 struct kvm_info *ki,
712 struct block_info *block_info,
713 struct perf_sample *sample,
714 bool sample_self,
715 struct hist_entry_ops *ops)
716 {
717 struct namespaces *ns = thread__namespaces(al->thread);
718 struct hist_entry entry = {
719 .thread = al->thread,
720 .comm = thread__comm(al->thread),
721 .cgroup_id = {
722 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
723 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
724 },
725 .cgroup = sample->cgroup,
726 .ms = {
727 .maps = al->maps,
728 .map = al->map,
729 .sym = al->sym,
730 },
731 .srcline = (char *) al->srcline,
732 .socket = al->socket,
733 .cpu = al->cpu,
734 .cpumode = al->cpumode,
735 .ip = al->addr,
736 .level = al->level,
737 .code_page_size = sample->code_page_size,
738 .stat = {
739 .nr_events = 1,
740 .period = sample->period,
741 .weight1 = sample->weight,
742 .weight2 = sample->ins_lat,
743 .weight3 = sample->p_stage_cyc,
744 },
745 .parent = sym_parent,
746 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
747 .hists = hists,
748 .branch_info = bi,
749 .mem_info = mi,
750 .kvm_info = ki,
751 .block_info = block_info,
752 .transaction = sample->transaction,
753 .raw_data = sample->raw_data,
754 .raw_size = sample->raw_size,
755 .ops = ops,
756 .time = hist_time(sample->time),
757 .weight = sample->weight,
758 .ins_lat = sample->ins_lat,
759 .p_stage_cyc = sample->p_stage_cyc,
760 .simd_flags = sample->simd_flags,
761 }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
762
763 if (!hists->has_callchains && he && he->callchain_size != 0)
764 hists->has_callchains = true;
765 if (he && symbol_conf.res_sample)
766 hists__res_sample(he, sample);
767 return he;
768 }
769
hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct perf_sample * sample,bool sample_self)770 struct hist_entry *hists__add_entry(struct hists *hists,
771 struct addr_location *al,
772 struct symbol *sym_parent,
773 struct branch_info *bi,
774 struct mem_info *mi,
775 struct kvm_info *ki,
776 struct perf_sample *sample,
777 bool sample_self)
778 {
779 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
780 sample, sample_self, NULL);
781 }
782
hists__add_entry_ops(struct hists * hists,struct hist_entry_ops * ops,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct perf_sample * sample,bool sample_self)783 struct hist_entry *hists__add_entry_ops(struct hists *hists,
784 struct hist_entry_ops *ops,
785 struct addr_location *al,
786 struct symbol *sym_parent,
787 struct branch_info *bi,
788 struct mem_info *mi,
789 struct kvm_info *ki,
790 struct perf_sample *sample,
791 bool sample_self)
792 {
793 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
794 sample, sample_self, ops);
795 }
796
hists__add_entry_block(struct hists * hists,struct addr_location * al,struct block_info * block_info)797 struct hist_entry *hists__add_entry_block(struct hists *hists,
798 struct addr_location *al,
799 struct block_info *block_info)
800 {
801 struct hist_entry entry = {
802 .block_info = block_info,
803 .hists = hists,
804 .ms = {
805 .maps = al->maps,
806 .map = al->map,
807 .sym = al->sym,
808 },
809 }, *he = hists__findnew_entry(hists, &entry, al, false);
810
811 return he;
812 }
813
814 static int
iter_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)815 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
816 struct addr_location *al __maybe_unused)
817 {
818 return 0;
819 }
820
821 static int
iter_add_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)822 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
823 struct addr_location *al __maybe_unused)
824 {
825 return 0;
826 }
827
828 static int
iter_prepare_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)829 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
830 {
831 struct perf_sample *sample = iter->sample;
832 struct mem_info *mi;
833
834 mi = sample__resolve_mem(sample, al);
835 if (mi == NULL)
836 return -ENOMEM;
837
838 iter->mi = mi;
839 return 0;
840 }
841
842 static int
iter_add_single_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)843 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
844 {
845 u64 cost;
846 struct mem_info *mi = iter->mi;
847 struct hists *hists = evsel__hists(iter->evsel);
848 struct perf_sample *sample = iter->sample;
849 struct hist_entry *he;
850
851 if (mi == NULL)
852 return -EINVAL;
853
854 cost = sample->weight;
855 if (!cost)
856 cost = 1;
857
858 /*
859 * must pass period=weight in order to get the correct
860 * sorting from hists__collapse_resort() which is solely
861 * based on periods. We want sorting be done on nr_events * weight
862 * and this is indirectly achieved by passing period=weight here
863 * and the he_stat__add_period() function.
864 */
865 sample->period = cost;
866
867 he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
868 sample, true);
869 if (!he)
870 return -ENOMEM;
871
872 iter->he = he;
873 return 0;
874 }
875
876 static int
iter_finish_mem_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)877 iter_finish_mem_entry(struct hist_entry_iter *iter,
878 struct addr_location *al __maybe_unused)
879 {
880 struct evsel *evsel = iter->evsel;
881 struct hists *hists = evsel__hists(evsel);
882 struct hist_entry *he = iter->he;
883 int err = -EINVAL;
884
885 if (he == NULL)
886 goto out;
887
888 hists__inc_nr_samples(hists, he->filtered);
889
890 err = hist_entry__append_callchain(he, iter->sample);
891
892 out:
893 mem_info__zput(iter->mi);
894
895 iter->he = NULL;
896 return err;
897 }
898
899 static int
iter_prepare_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)900 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
901 {
902 struct branch_info *bi;
903 struct perf_sample *sample = iter->sample;
904
905 bi = sample__resolve_bstack(sample, al);
906 if (!bi)
907 return -ENOMEM;
908
909 iter->curr = 0;
910 iter->total = sample->branch_stack->nr;
911
912 iter->bi = bi;
913 return 0;
914 }
915
916 static int
iter_add_single_branch_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)917 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
918 struct addr_location *al __maybe_unused)
919 {
920 return 0;
921 }
922
923 static int
iter_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)924 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
925 {
926 struct branch_info *bi = iter->bi;
927 int i = iter->curr;
928
929 if (bi == NULL)
930 return 0;
931
932 if (iter->curr >= iter->total)
933 return 0;
934
935 maps__put(al->maps);
936 al->maps = maps__get(bi[i].to.ms.maps);
937 map__put(al->map);
938 al->map = map__get(bi[i].to.ms.map);
939 al->sym = bi[i].to.ms.sym;
940 al->addr = bi[i].to.addr;
941 return 1;
942 }
943
944 static int
iter_add_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)945 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
946 {
947 struct branch_info *bi;
948 struct evsel *evsel = iter->evsel;
949 struct hists *hists = evsel__hists(evsel);
950 struct perf_sample *sample = iter->sample;
951 struct hist_entry *he = NULL;
952 int i = iter->curr;
953 int err = 0;
954
955 bi = iter->bi;
956
957 if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
958 goto out;
959
960 /*
961 * The report shows the percentage of total branches captured
962 * and not events sampled. Thus we use a pseudo period of 1.
963 */
964 sample->period = 1;
965 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
966
967 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
968 sample, true);
969 if (he == NULL)
970 return -ENOMEM;
971
972 hists__inc_nr_samples(hists, he->filtered);
973
974 out:
975 iter->he = he;
976 iter->curr++;
977 return err;
978 }
979
branch_info__exit(struct branch_info * bi)980 static void branch_info__exit(struct branch_info *bi)
981 {
982 map_symbol__exit(&bi->from.ms);
983 map_symbol__exit(&bi->to.ms);
984 zfree_srcline(&bi->srcline_from);
985 zfree_srcline(&bi->srcline_to);
986 }
987
988 static int
iter_finish_branch_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)989 iter_finish_branch_entry(struct hist_entry_iter *iter,
990 struct addr_location *al __maybe_unused)
991 {
992 for (int i = 0; i < iter->total; i++)
993 branch_info__exit(&iter->bi[i]);
994
995 zfree(&iter->bi);
996 iter->he = NULL;
997
998 return iter->curr >= iter->total ? 0 : -1;
999 }
1000
1001 static int
iter_prepare_normal_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)1002 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
1003 struct addr_location *al __maybe_unused)
1004 {
1005 return 0;
1006 }
1007
1008 static int
iter_add_single_normal_entry(struct hist_entry_iter * iter,struct addr_location * al)1009 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
1010 {
1011 struct evsel *evsel = iter->evsel;
1012 struct perf_sample *sample = iter->sample;
1013 struct hist_entry *he;
1014
1015 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1016 NULL, sample, true);
1017 if (he == NULL)
1018 return -ENOMEM;
1019
1020 iter->he = he;
1021 return 0;
1022 }
1023
1024 static int
iter_finish_normal_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1025 iter_finish_normal_entry(struct hist_entry_iter *iter,
1026 struct addr_location *al __maybe_unused)
1027 {
1028 struct hist_entry *he = iter->he;
1029 struct evsel *evsel = iter->evsel;
1030 struct perf_sample *sample = iter->sample;
1031
1032 if (he == NULL)
1033 return 0;
1034
1035 iter->he = NULL;
1036
1037 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1038
1039 return hist_entry__append_callchain(he, sample);
1040 }
1041
1042 static int
iter_prepare_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1043 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1044 struct addr_location *al __maybe_unused)
1045 {
1046 struct hist_entry **he_cache;
1047 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1048
1049 if (cursor == NULL)
1050 return -ENOMEM;
1051
1052 callchain_cursor_commit(cursor);
1053
1054 /*
1055 * This is for detecting cycles or recursions so that they're
1056 * cumulated only one time to prevent entries more than 100%
1057 * overhead.
1058 */
1059 he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1));
1060 if (he_cache == NULL)
1061 return -ENOMEM;
1062
1063 iter->he_cache = he_cache;
1064 iter->curr = 0;
1065
1066 return 0;
1067 }
1068
1069 static int
iter_add_single_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1070 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1071 struct addr_location *al)
1072 {
1073 struct evsel *evsel = iter->evsel;
1074 struct hists *hists = evsel__hists(evsel);
1075 struct perf_sample *sample = iter->sample;
1076 struct hist_entry **he_cache = iter->he_cache;
1077 struct hist_entry *he;
1078 int err = 0;
1079
1080 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
1081 sample, true);
1082 if (he == NULL)
1083 return -ENOMEM;
1084
1085 iter->he = he;
1086 he_cache[iter->curr++] = he;
1087
1088 hist_entry__append_callchain(he, sample);
1089
1090 /*
1091 * We need to re-initialize the cursor since callchain_append()
1092 * advanced the cursor to the end.
1093 */
1094 callchain_cursor_commit(get_tls_callchain_cursor());
1095
1096 hists__inc_nr_samples(hists, he->filtered);
1097
1098 return err;
1099 }
1100
1101 static int
iter_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1102 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1103 struct addr_location *al)
1104 {
1105 struct callchain_cursor_node *node;
1106
1107 node = callchain_cursor_current(get_tls_callchain_cursor());
1108 if (node == NULL)
1109 return 0;
1110
1111 return fill_callchain_info(al, node, iter->hide_unresolved);
1112 }
1113
1114 static bool
hist_entry__fast__sym_diff(struct hist_entry * left,struct hist_entry * right)1115 hist_entry__fast__sym_diff(struct hist_entry *left,
1116 struct hist_entry *right)
1117 {
1118 struct symbol *sym_l = left->ms.sym;
1119 struct symbol *sym_r = right->ms.sym;
1120
1121 if (!sym_l && !sym_r)
1122 return left->ip != right->ip;
1123
1124 return !!_sort__sym_cmp(sym_l, sym_r);
1125 }
1126
1127
1128 static int
iter_add_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1129 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1130 struct addr_location *al)
1131 {
1132 struct evsel *evsel = iter->evsel;
1133 struct perf_sample *sample = iter->sample;
1134 struct hist_entry **he_cache = iter->he_cache;
1135 struct hist_entry *he;
1136 struct hist_entry he_tmp = {
1137 .hists = evsel__hists(evsel),
1138 .cpu = al->cpu,
1139 .thread = al->thread,
1140 .comm = thread__comm(al->thread),
1141 .ip = al->addr,
1142 .ms = {
1143 .maps = al->maps,
1144 .map = al->map,
1145 .sym = al->sym,
1146 },
1147 .srcline = (char *) al->srcline,
1148 .parent = iter->parent,
1149 .raw_data = sample->raw_data,
1150 .raw_size = sample->raw_size,
1151 };
1152 int i;
1153 struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor();
1154 bool fast = hists__has(he_tmp.hists, sym);
1155
1156 if (tls_cursor == NULL)
1157 return -ENOMEM;
1158
1159 callchain_cursor_snapshot(&cursor, tls_cursor);
1160
1161 callchain_cursor_advance(tls_cursor);
1162
1163 /*
1164 * Check if there's duplicate entries in the callchain.
1165 * It's possible that it has cycles or recursive calls.
1166 */
1167 for (i = 0; i < iter->curr; i++) {
1168 /*
1169 * For most cases, there are no duplicate entries in callchain.
1170 * The symbols are usually different. Do a quick check for
1171 * symbols first.
1172 */
1173 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1174 continue;
1175
1176 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1177 /* to avoid calling callback function */
1178 iter->he = NULL;
1179 return 0;
1180 }
1181 }
1182
1183 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1184 NULL, sample, false);
1185 if (he == NULL)
1186 return -ENOMEM;
1187
1188 iter->he = he;
1189 he_cache[iter->curr++] = he;
1190
1191 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1192 callchain_append(he->callchain, &cursor, sample->period);
1193 return 0;
1194 }
1195
1196 static int
iter_finish_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1197 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1198 struct addr_location *al __maybe_unused)
1199 {
1200 mem_info__zput(iter->mi);
1201 zfree(&iter->bi);
1202 zfree(&iter->he_cache);
1203 iter->he = NULL;
1204
1205 return 0;
1206 }
1207
1208 const struct hist_iter_ops hist_iter_mem = {
1209 .prepare_entry = iter_prepare_mem_entry,
1210 .add_single_entry = iter_add_single_mem_entry,
1211 .next_entry = iter_next_nop_entry,
1212 .add_next_entry = iter_add_next_nop_entry,
1213 .finish_entry = iter_finish_mem_entry,
1214 };
1215
1216 const struct hist_iter_ops hist_iter_branch = {
1217 .prepare_entry = iter_prepare_branch_entry,
1218 .add_single_entry = iter_add_single_branch_entry,
1219 .next_entry = iter_next_branch_entry,
1220 .add_next_entry = iter_add_next_branch_entry,
1221 .finish_entry = iter_finish_branch_entry,
1222 };
1223
1224 const struct hist_iter_ops hist_iter_normal = {
1225 .prepare_entry = iter_prepare_normal_entry,
1226 .add_single_entry = iter_add_single_normal_entry,
1227 .next_entry = iter_next_nop_entry,
1228 .add_next_entry = iter_add_next_nop_entry,
1229 .finish_entry = iter_finish_normal_entry,
1230 };
1231
1232 const struct hist_iter_ops hist_iter_cumulative = {
1233 .prepare_entry = iter_prepare_cumulative_entry,
1234 .add_single_entry = iter_add_single_cumulative_entry,
1235 .next_entry = iter_next_cumulative_entry,
1236 .add_next_entry = iter_add_next_cumulative_entry,
1237 .finish_entry = iter_finish_cumulative_entry,
1238 };
1239
hist_entry_iter__add(struct hist_entry_iter * iter,struct addr_location * al,int max_stack_depth,void * arg)1240 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1241 int max_stack_depth, void *arg)
1242 {
1243 int err, err2;
1244 struct map *alm = NULL;
1245
1246 if (al)
1247 alm = map__get(al->map);
1248
1249 err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent,
1250 iter->evsel, al, max_stack_depth);
1251 if (err) {
1252 map__put(alm);
1253 return err;
1254 }
1255
1256 err = iter->ops->prepare_entry(iter, al);
1257 if (err)
1258 goto out;
1259
1260 err = iter->ops->add_single_entry(iter, al);
1261 if (err)
1262 goto out;
1263
1264 if (iter->he && iter->add_entry_cb) {
1265 err = iter->add_entry_cb(iter, al, true, arg);
1266 if (err)
1267 goto out;
1268 }
1269
1270 while (iter->ops->next_entry(iter, al)) {
1271 err = iter->ops->add_next_entry(iter, al);
1272 if (err)
1273 break;
1274
1275 if (iter->he && iter->add_entry_cb) {
1276 err = iter->add_entry_cb(iter, al, false, arg);
1277 if (err)
1278 goto out;
1279 }
1280 }
1281
1282 out:
1283 err2 = iter->ops->finish_entry(iter, al);
1284 if (!err)
1285 err = err2;
1286
1287 map__put(alm);
1288
1289 return err;
1290 }
1291
1292 int64_t
hist_entry__cmp(struct hist_entry * left,struct hist_entry * right)1293 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1294 {
1295 struct hists *hists = left->hists;
1296 struct perf_hpp_fmt *fmt;
1297 int64_t cmp = 0;
1298
1299 hists__for_each_sort_list(hists, fmt) {
1300 if (perf_hpp__is_dynamic_entry(fmt) &&
1301 !perf_hpp__defined_dynamic_entry(fmt, hists))
1302 continue;
1303
1304 cmp = fmt->cmp(fmt, left, right);
1305 if (cmp)
1306 break;
1307 }
1308
1309 return cmp;
1310 }
1311
1312 int64_t
hist_entry__collapse(struct hist_entry * left,struct hist_entry * right)1313 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1314 {
1315 struct hists *hists = left->hists;
1316 struct perf_hpp_fmt *fmt;
1317 int64_t cmp = 0;
1318
1319 hists__for_each_sort_list(hists, fmt) {
1320 if (perf_hpp__is_dynamic_entry(fmt) &&
1321 !perf_hpp__defined_dynamic_entry(fmt, hists))
1322 continue;
1323
1324 cmp = fmt->collapse(fmt, left, right);
1325 if (cmp)
1326 break;
1327 }
1328
1329 return cmp;
1330 }
1331
hist_entry__delete(struct hist_entry * he)1332 void hist_entry__delete(struct hist_entry *he)
1333 {
1334 struct hist_entry_ops *ops = he->ops;
1335
1336 thread__zput(he->thread);
1337 map_symbol__exit(&he->ms);
1338
1339 if (he->branch_info) {
1340 branch_info__exit(he->branch_info);
1341 zfree(&he->branch_info);
1342 }
1343
1344 if (he->mem_info) {
1345 map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
1346 map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
1347 mem_info__zput(he->mem_info);
1348 }
1349
1350 if (he->block_info)
1351 block_info__delete(he->block_info);
1352
1353 if (he->kvm_info)
1354 kvm_info__zput(he->kvm_info);
1355
1356 zfree(&he->res_samples);
1357 zfree(&he->stat_acc);
1358 zfree_srcline(&he->srcline);
1359 if (he->srcfile && he->srcfile[0])
1360 zfree(&he->srcfile);
1361 free_callchain(he->callchain);
1362 zfree(&he->trace_output);
1363 zfree(&he->raw_data);
1364 ops->free(he);
1365 }
1366
1367 /*
1368 * If this is not the last column, then we need to pad it according to the
1369 * pre-calculated max length for this column, otherwise don't bother adding
1370 * spaces because that would break viewing this with, for instance, 'less',
1371 * that would show tons of trailing spaces when a long C++ demangled method
1372 * names is sampled.
1373 */
hist_entry__snprintf_alignment(struct hist_entry * he,struct perf_hpp * hpp,struct perf_hpp_fmt * fmt,int printed)1374 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1375 struct perf_hpp_fmt *fmt, int printed)
1376 {
1377 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1378 const int width = fmt->width(fmt, hpp, he->hists);
1379 if (printed < width) {
1380 advance_hpp(hpp, printed);
1381 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1382 }
1383 }
1384
1385 return printed;
1386 }
1387
1388 /*
1389 * collapse the histogram
1390 */
1391
1392 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1393 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1394 enum hist_filter type);
1395
1396 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1397
check_thread_entry(struct perf_hpp_fmt * fmt)1398 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1399 {
1400 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1401 }
1402
hist_entry__check_and_remove_filter(struct hist_entry * he,enum hist_filter type,fmt_chk_fn check)1403 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1404 enum hist_filter type,
1405 fmt_chk_fn check)
1406 {
1407 struct perf_hpp_fmt *fmt;
1408 bool type_match = false;
1409 struct hist_entry *parent = he->parent_he;
1410
1411 switch (type) {
1412 case HIST_FILTER__THREAD:
1413 if (symbol_conf.comm_list == NULL &&
1414 symbol_conf.pid_list == NULL &&
1415 symbol_conf.tid_list == NULL)
1416 return;
1417 break;
1418 case HIST_FILTER__DSO:
1419 if (symbol_conf.dso_list == NULL)
1420 return;
1421 break;
1422 case HIST_FILTER__SYMBOL:
1423 if (symbol_conf.sym_list == NULL)
1424 return;
1425 break;
1426 case HIST_FILTER__PARENT:
1427 case HIST_FILTER__GUEST:
1428 case HIST_FILTER__HOST:
1429 case HIST_FILTER__SOCKET:
1430 case HIST_FILTER__C2C:
1431 default:
1432 return;
1433 }
1434
1435 /* if it's filtered by own fmt, it has to have filter bits */
1436 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1437 if (check(fmt)) {
1438 type_match = true;
1439 break;
1440 }
1441 }
1442
1443 if (type_match) {
1444 /*
1445 * If the filter is for current level entry, propagate
1446 * filter marker to parents. The marker bit was
1447 * already set by default so it only needs to clear
1448 * non-filtered entries.
1449 */
1450 if (!(he->filtered & (1 << type))) {
1451 while (parent) {
1452 parent->filtered &= ~(1 << type);
1453 parent = parent->parent_he;
1454 }
1455 }
1456 } else {
1457 /*
1458 * If current entry doesn't have matching formats, set
1459 * filter marker for upper level entries. it will be
1460 * cleared if its lower level entries is not filtered.
1461 *
1462 * For lower-level entries, it inherits parent's
1463 * filter bit so that lower level entries of a
1464 * non-filtered entry won't set the filter marker.
1465 */
1466 if (parent == NULL)
1467 he->filtered |= (1 << type);
1468 else
1469 he->filtered |= (parent->filtered & (1 << type));
1470 }
1471 }
1472
hist_entry__apply_hierarchy_filters(struct hist_entry * he)1473 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1474 {
1475 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1476 check_thread_entry);
1477
1478 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1479 perf_hpp__is_dso_entry);
1480
1481 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1482 perf_hpp__is_sym_entry);
1483
1484 hists__apply_filters(he->hists, he);
1485 }
1486
hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he,struct hist_entry * parent_he,struct perf_hpp_list * hpp_list)1487 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1488 struct rb_root_cached *root,
1489 struct hist_entry *he,
1490 struct hist_entry *parent_he,
1491 struct perf_hpp_list *hpp_list)
1492 {
1493 struct rb_node **p = &root->rb_root.rb_node;
1494 struct rb_node *parent = NULL;
1495 struct hist_entry *iter, *new;
1496 struct perf_hpp_fmt *fmt;
1497 int64_t cmp;
1498 bool leftmost = true;
1499
1500 while (*p != NULL) {
1501 parent = *p;
1502 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1503
1504 cmp = 0;
1505 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1506 cmp = fmt->collapse(fmt, iter, he);
1507 if (cmp)
1508 break;
1509 }
1510
1511 if (!cmp) {
1512 he_stat__add_stat(&iter->stat, &he->stat);
1513 return iter;
1514 }
1515
1516 if (cmp < 0)
1517 p = &parent->rb_left;
1518 else {
1519 p = &parent->rb_right;
1520 leftmost = false;
1521 }
1522 }
1523
1524 new = hist_entry__new(he, true);
1525 if (new == NULL)
1526 return NULL;
1527
1528 hists->nr_entries++;
1529
1530 /* save related format list for output */
1531 new->hpp_list = hpp_list;
1532 new->parent_he = parent_he;
1533
1534 hist_entry__apply_hierarchy_filters(new);
1535
1536 /* some fields are now passed to 'new' */
1537 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1538 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1539 he->trace_output = NULL;
1540 else
1541 new->trace_output = NULL;
1542
1543 if (perf_hpp__is_srcline_entry(fmt))
1544 he->srcline = NULL;
1545 else
1546 new->srcline = NULL;
1547
1548 if (perf_hpp__is_srcfile_entry(fmt))
1549 he->srcfile = NULL;
1550 else
1551 new->srcfile = NULL;
1552 }
1553
1554 rb_link_node(&new->rb_node_in, parent, p);
1555 rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1556 return new;
1557 }
1558
hists__hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1559 static int hists__hierarchy_insert_entry(struct hists *hists,
1560 struct rb_root_cached *root,
1561 struct hist_entry *he)
1562 {
1563 struct perf_hpp_list_node *node;
1564 struct hist_entry *new_he = NULL;
1565 struct hist_entry *parent = NULL;
1566 int depth = 0;
1567 int ret = 0;
1568
1569 list_for_each_entry(node, &hists->hpp_formats, list) {
1570 /* skip period (overhead) and elided columns */
1571 if (node->level == 0 || node->skip)
1572 continue;
1573
1574 /* insert copy of 'he' for each fmt into the hierarchy */
1575 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1576 if (new_he == NULL) {
1577 ret = -1;
1578 break;
1579 }
1580
1581 root = &new_he->hroot_in;
1582 new_he->depth = depth++;
1583 parent = new_he;
1584 }
1585
1586 if (new_he) {
1587 new_he->leaf = true;
1588
1589 if (hist_entry__has_callchains(new_he) &&
1590 symbol_conf.use_callchain) {
1591 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1592
1593 if (cursor == NULL)
1594 return -1;
1595
1596 callchain_cursor_reset(cursor);
1597 if (callchain_merge(cursor,
1598 new_he->callchain,
1599 he->callchain) < 0)
1600 ret = -1;
1601 }
1602 }
1603
1604 /* 'he' is no longer used */
1605 hist_entry__delete(he);
1606
1607 /* return 0 (or -1) since it already applied filters */
1608 return ret;
1609 }
1610
hists__collapse_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1611 static int hists__collapse_insert_entry(struct hists *hists,
1612 struct rb_root_cached *root,
1613 struct hist_entry *he)
1614 {
1615 struct rb_node **p = &root->rb_root.rb_node;
1616 struct rb_node *parent = NULL;
1617 struct hist_entry *iter;
1618 int64_t cmp;
1619 bool leftmost = true;
1620
1621 if (symbol_conf.report_hierarchy)
1622 return hists__hierarchy_insert_entry(hists, root, he);
1623
1624 while (*p != NULL) {
1625 parent = *p;
1626 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1627
1628 cmp = hist_entry__collapse(iter, he);
1629
1630 if (!cmp) {
1631 int ret = 0;
1632
1633 he_stat__add_stat(&iter->stat, &he->stat);
1634 if (symbol_conf.cumulate_callchain)
1635 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1636
1637 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1638 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1639
1640 if (cursor != NULL) {
1641 callchain_cursor_reset(cursor);
1642 if (callchain_merge(cursor, iter->callchain, he->callchain) < 0)
1643 ret = -1;
1644 } else {
1645 ret = 0;
1646 }
1647 }
1648 hist_entry__delete(he);
1649 return ret;
1650 }
1651
1652 if (cmp < 0)
1653 p = &(*p)->rb_left;
1654 else {
1655 p = &(*p)->rb_right;
1656 leftmost = false;
1657 }
1658 }
1659 hists->nr_entries++;
1660
1661 rb_link_node(&he->rb_node_in, parent, p);
1662 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1663 return 1;
1664 }
1665
hists__get_rotate_entries_in(struct hists * hists)1666 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1667 {
1668 struct rb_root_cached *root;
1669
1670 mutex_lock(&hists->lock);
1671
1672 root = hists->entries_in;
1673 if (++hists->entries_in > &hists->entries_in_array[1])
1674 hists->entries_in = &hists->entries_in_array[0];
1675
1676 mutex_unlock(&hists->lock);
1677
1678 return root;
1679 }
1680
hists__apply_filters(struct hists * hists,struct hist_entry * he)1681 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1682 {
1683 hists__filter_entry_by_dso(hists, he);
1684 hists__filter_entry_by_thread(hists, he);
1685 hists__filter_entry_by_symbol(hists, he);
1686 hists__filter_entry_by_socket(hists, he);
1687 }
1688
hists__collapse_resort(struct hists * hists,struct ui_progress * prog)1689 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1690 {
1691 struct rb_root_cached *root;
1692 struct rb_node *next;
1693 struct hist_entry *n;
1694 int ret;
1695
1696 if (!hists__has(hists, need_collapse))
1697 return 0;
1698
1699 hists->nr_entries = 0;
1700
1701 root = hists__get_rotate_entries_in(hists);
1702
1703 next = rb_first_cached(root);
1704
1705 while (next) {
1706 if (session_done())
1707 break;
1708 n = rb_entry(next, struct hist_entry, rb_node_in);
1709 next = rb_next(&n->rb_node_in);
1710
1711 rb_erase_cached(&n->rb_node_in, root);
1712 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1713 if (ret < 0)
1714 return -1;
1715
1716 if (ret) {
1717 /*
1718 * If it wasn't combined with one of the entries already
1719 * collapsed, we need to apply the filters that may have
1720 * been set by, say, the hist_browser.
1721 */
1722 hists__apply_filters(hists, n);
1723 }
1724 if (prog)
1725 ui_progress__update(prog, 1);
1726 }
1727 return 0;
1728 }
1729
hist_entry__sort(struct hist_entry * a,struct hist_entry * b)1730 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1731 {
1732 struct hists *hists = a->hists;
1733 struct perf_hpp_fmt *fmt;
1734 int64_t cmp = 0;
1735
1736 hists__for_each_sort_list(hists, fmt) {
1737 if (perf_hpp__should_skip(fmt, a->hists))
1738 continue;
1739
1740 cmp = fmt->sort(fmt, a, b);
1741 if (cmp)
1742 break;
1743 }
1744
1745 return cmp;
1746 }
1747
hists__reset_filter_stats(struct hists * hists)1748 static void hists__reset_filter_stats(struct hists *hists)
1749 {
1750 hists->nr_non_filtered_entries = 0;
1751 hists->stats.total_non_filtered_period = 0;
1752 }
1753
hists__reset_stats(struct hists * hists)1754 void hists__reset_stats(struct hists *hists)
1755 {
1756 hists->nr_entries = 0;
1757 hists->stats.total_period = 0;
1758
1759 hists__reset_filter_stats(hists);
1760 }
1761
hists__inc_filter_stats(struct hists * hists,struct hist_entry * h)1762 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1763 {
1764 hists->nr_non_filtered_entries++;
1765 hists->stats.total_non_filtered_period += h->stat.period;
1766 }
1767
hists__inc_stats(struct hists * hists,struct hist_entry * h)1768 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1769 {
1770 if (!h->filtered)
1771 hists__inc_filter_stats(hists, h);
1772
1773 hists->nr_entries++;
1774 hists->stats.total_period += h->stat.period;
1775 }
1776
hierarchy_recalc_total_periods(struct hists * hists)1777 static void hierarchy_recalc_total_periods(struct hists *hists)
1778 {
1779 struct rb_node *node;
1780 struct hist_entry *he;
1781
1782 node = rb_first_cached(&hists->entries);
1783
1784 hists->stats.total_period = 0;
1785 hists->stats.total_non_filtered_period = 0;
1786
1787 /*
1788 * recalculate total period using top-level entries only
1789 * since lower level entries only see non-filtered entries
1790 * but upper level entries have sum of both entries.
1791 */
1792 while (node) {
1793 he = rb_entry(node, struct hist_entry, rb_node);
1794 node = rb_next(node);
1795
1796 hists->stats.total_period += he->stat.period;
1797 if (!he->filtered)
1798 hists->stats.total_non_filtered_period += he->stat.period;
1799 }
1800 }
1801
hierarchy_insert_output_entry(struct rb_root_cached * root,struct hist_entry * he)1802 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1803 struct hist_entry *he)
1804 {
1805 struct rb_node **p = &root->rb_root.rb_node;
1806 struct rb_node *parent = NULL;
1807 struct hist_entry *iter;
1808 struct perf_hpp_fmt *fmt;
1809 bool leftmost = true;
1810
1811 while (*p != NULL) {
1812 parent = *p;
1813 iter = rb_entry(parent, struct hist_entry, rb_node);
1814
1815 if (hist_entry__sort(he, iter) > 0)
1816 p = &parent->rb_left;
1817 else {
1818 p = &parent->rb_right;
1819 leftmost = false;
1820 }
1821 }
1822
1823 rb_link_node(&he->rb_node, parent, p);
1824 rb_insert_color_cached(&he->rb_node, root, leftmost);
1825
1826 /* update column width of dynamic entry */
1827 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1828 if (fmt->init)
1829 fmt->init(fmt, he);
1830 }
1831 }
1832
hists__hierarchy_output_resort(struct hists * hists,struct ui_progress * prog,struct rb_root_cached * root_in,struct rb_root_cached * root_out,u64 min_callchain_hits,bool use_callchain)1833 static void hists__hierarchy_output_resort(struct hists *hists,
1834 struct ui_progress *prog,
1835 struct rb_root_cached *root_in,
1836 struct rb_root_cached *root_out,
1837 u64 min_callchain_hits,
1838 bool use_callchain)
1839 {
1840 struct rb_node *node;
1841 struct hist_entry *he;
1842
1843 *root_out = RB_ROOT_CACHED;
1844 node = rb_first_cached(root_in);
1845
1846 while (node) {
1847 he = rb_entry(node, struct hist_entry, rb_node_in);
1848 node = rb_next(node);
1849
1850 hierarchy_insert_output_entry(root_out, he);
1851
1852 if (prog)
1853 ui_progress__update(prog, 1);
1854
1855 hists->nr_entries++;
1856 if (!he->filtered) {
1857 hists->nr_non_filtered_entries++;
1858 hists__calc_col_len(hists, he);
1859 }
1860
1861 if (!he->leaf) {
1862 hists__hierarchy_output_resort(hists, prog,
1863 &he->hroot_in,
1864 &he->hroot_out,
1865 min_callchain_hits,
1866 use_callchain);
1867 continue;
1868 }
1869
1870 if (!use_callchain)
1871 continue;
1872
1873 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1874 u64 total = he->stat.period;
1875
1876 if (symbol_conf.cumulate_callchain)
1877 total = he->stat_acc->period;
1878
1879 min_callchain_hits = total * (callchain_param.min_percent / 100);
1880 }
1881
1882 callchain_param.sort(&he->sorted_chain, he->callchain,
1883 min_callchain_hits, &callchain_param);
1884 }
1885 }
1886
__hists__insert_output_entry(struct rb_root_cached * entries,struct hist_entry * he,u64 min_callchain_hits,bool use_callchain)1887 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1888 struct hist_entry *he,
1889 u64 min_callchain_hits,
1890 bool use_callchain)
1891 {
1892 struct rb_node **p = &entries->rb_root.rb_node;
1893 struct rb_node *parent = NULL;
1894 struct hist_entry *iter;
1895 struct perf_hpp_fmt *fmt;
1896 bool leftmost = true;
1897
1898 if (use_callchain) {
1899 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1900 u64 total = he->stat.period;
1901
1902 if (symbol_conf.cumulate_callchain)
1903 total = he->stat_acc->period;
1904
1905 min_callchain_hits = total * (callchain_param.min_percent / 100);
1906 }
1907 callchain_param.sort(&he->sorted_chain, he->callchain,
1908 min_callchain_hits, &callchain_param);
1909 }
1910
1911 while (*p != NULL) {
1912 parent = *p;
1913 iter = rb_entry(parent, struct hist_entry, rb_node);
1914
1915 if (hist_entry__sort(he, iter) > 0)
1916 p = &(*p)->rb_left;
1917 else {
1918 p = &(*p)->rb_right;
1919 leftmost = false;
1920 }
1921 }
1922
1923 rb_link_node(&he->rb_node, parent, p);
1924 rb_insert_color_cached(&he->rb_node, entries, leftmost);
1925
1926 /* update column width of dynamic entries */
1927 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1928 if (fmt->init)
1929 fmt->init(fmt, he);
1930 }
1931 }
1932
output_resort(struct hists * hists,struct ui_progress * prog,bool use_callchain,hists__resort_cb_t cb,void * cb_arg)1933 static void output_resort(struct hists *hists, struct ui_progress *prog,
1934 bool use_callchain, hists__resort_cb_t cb,
1935 void *cb_arg)
1936 {
1937 struct rb_root_cached *root;
1938 struct rb_node *next;
1939 struct hist_entry *n;
1940 u64 callchain_total;
1941 u64 min_callchain_hits;
1942
1943 callchain_total = hists->callchain_period;
1944 if (symbol_conf.filter_relative)
1945 callchain_total = hists->callchain_non_filtered_period;
1946
1947 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1948
1949 hists__reset_stats(hists);
1950 hists__reset_col_len(hists);
1951
1952 if (symbol_conf.report_hierarchy) {
1953 hists__hierarchy_output_resort(hists, prog,
1954 &hists->entries_collapsed,
1955 &hists->entries,
1956 min_callchain_hits,
1957 use_callchain);
1958 hierarchy_recalc_total_periods(hists);
1959 return;
1960 }
1961
1962 if (hists__has(hists, need_collapse))
1963 root = &hists->entries_collapsed;
1964 else
1965 root = hists->entries_in;
1966
1967 next = rb_first_cached(root);
1968 hists->entries = RB_ROOT_CACHED;
1969
1970 while (next) {
1971 n = rb_entry(next, struct hist_entry, rb_node_in);
1972 next = rb_next(&n->rb_node_in);
1973
1974 if (cb && cb(n, cb_arg))
1975 continue;
1976
1977 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1978 hists__inc_stats(hists, n);
1979
1980 if (!n->filtered)
1981 hists__calc_col_len(hists, n);
1982
1983 if (prog)
1984 ui_progress__update(prog, 1);
1985 }
1986 }
1987
evsel__output_resort_cb(struct evsel * evsel,struct ui_progress * prog,hists__resort_cb_t cb,void * cb_arg)1988 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1989 hists__resort_cb_t cb, void *cb_arg)
1990 {
1991 bool use_callchain;
1992
1993 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1994 use_callchain = evsel__has_callchain(evsel);
1995 else
1996 use_callchain = symbol_conf.use_callchain;
1997
1998 use_callchain |= symbol_conf.show_branchflag_count;
1999
2000 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
2001 }
2002
evsel__output_resort(struct evsel * evsel,struct ui_progress * prog)2003 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
2004 {
2005 return evsel__output_resort_cb(evsel, prog, NULL, NULL);
2006 }
2007
hists__output_resort(struct hists * hists,struct ui_progress * prog)2008 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
2009 {
2010 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
2011 }
2012
hists__output_resort_cb(struct hists * hists,struct ui_progress * prog,hists__resort_cb_t cb)2013 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
2014 hists__resort_cb_t cb)
2015 {
2016 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
2017 }
2018
can_goto_child(struct hist_entry * he,enum hierarchy_move_dir hmd)2019 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
2020 {
2021 if (he->leaf || hmd == HMD_FORCE_SIBLING)
2022 return false;
2023
2024 if (he->unfolded || hmd == HMD_FORCE_CHILD)
2025 return true;
2026
2027 return false;
2028 }
2029
rb_hierarchy_last(struct rb_node * node)2030 struct rb_node *rb_hierarchy_last(struct rb_node *node)
2031 {
2032 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2033
2034 while (can_goto_child(he, HMD_NORMAL)) {
2035 node = rb_last(&he->hroot_out.rb_root);
2036 he = rb_entry(node, struct hist_entry, rb_node);
2037 }
2038 return node;
2039 }
2040
__rb_hierarchy_next(struct rb_node * node,enum hierarchy_move_dir hmd)2041 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
2042 {
2043 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2044
2045 if (can_goto_child(he, hmd))
2046 node = rb_first_cached(&he->hroot_out);
2047 else
2048 node = rb_next(node);
2049
2050 while (node == NULL) {
2051 he = he->parent_he;
2052 if (he == NULL)
2053 break;
2054
2055 node = rb_next(&he->rb_node);
2056 }
2057 return node;
2058 }
2059
rb_hierarchy_prev(struct rb_node * node)2060 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2061 {
2062 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2063
2064 node = rb_prev(node);
2065 if (node)
2066 return rb_hierarchy_last(node);
2067
2068 he = he->parent_he;
2069 if (he == NULL)
2070 return NULL;
2071
2072 return &he->rb_node;
2073 }
2074
hist_entry__has_hierarchy_children(struct hist_entry * he,float limit)2075 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2076 {
2077 struct rb_node *node;
2078 struct hist_entry *child;
2079 float percent;
2080
2081 if (he->leaf)
2082 return false;
2083
2084 node = rb_first_cached(&he->hroot_out);
2085 child = rb_entry(node, struct hist_entry, rb_node);
2086
2087 while (node && child->filtered) {
2088 node = rb_next(node);
2089 child = rb_entry(node, struct hist_entry, rb_node);
2090 }
2091
2092 if (node)
2093 percent = hist_entry__get_percent_limit(child);
2094 else
2095 percent = 0;
2096
2097 return node && percent >= limit;
2098 }
2099
hists__remove_entry_filter(struct hists * hists,struct hist_entry * h,enum hist_filter filter)2100 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2101 enum hist_filter filter)
2102 {
2103 h->filtered &= ~(1 << filter);
2104
2105 if (symbol_conf.report_hierarchy) {
2106 struct hist_entry *parent = h->parent_he;
2107
2108 while (parent) {
2109 he_stat__add_stat(&parent->stat, &h->stat);
2110
2111 parent->filtered &= ~(1 << filter);
2112
2113 if (parent->filtered)
2114 goto next;
2115
2116 /* force fold unfiltered entry for simplicity */
2117 parent->unfolded = false;
2118 parent->has_no_entry = false;
2119 parent->row_offset = 0;
2120 parent->nr_rows = 0;
2121 next:
2122 parent = parent->parent_he;
2123 }
2124 }
2125
2126 if (h->filtered)
2127 return;
2128
2129 /* force fold unfiltered entry for simplicity */
2130 h->unfolded = false;
2131 h->has_no_entry = false;
2132 h->row_offset = 0;
2133 h->nr_rows = 0;
2134
2135 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2136
2137 hists__inc_filter_stats(hists, h);
2138 hists__calc_col_len(hists, h);
2139 }
2140
2141
hists__filter_entry_by_dso(struct hists * hists,struct hist_entry * he)2142 static bool hists__filter_entry_by_dso(struct hists *hists,
2143 struct hist_entry *he)
2144 {
2145 if (hists->dso_filter != NULL &&
2146 (he->ms.map == NULL || !RC_CHK_EQUAL(map__dso(he->ms.map), hists->dso_filter))) {
2147 he->filtered |= (1 << HIST_FILTER__DSO);
2148 return true;
2149 }
2150
2151 return false;
2152 }
2153
hists__filter_entry_by_thread(struct hists * hists,struct hist_entry * he)2154 static bool hists__filter_entry_by_thread(struct hists *hists,
2155 struct hist_entry *he)
2156 {
2157 if (hists->thread_filter != NULL &&
2158 !RC_CHK_EQUAL(he->thread, hists->thread_filter)) {
2159 he->filtered |= (1 << HIST_FILTER__THREAD);
2160 return true;
2161 }
2162
2163 return false;
2164 }
2165
hists__filter_entry_by_symbol(struct hists * hists,struct hist_entry * he)2166 static bool hists__filter_entry_by_symbol(struct hists *hists,
2167 struct hist_entry *he)
2168 {
2169 if (hists->symbol_filter_str != NULL &&
2170 (!he->ms.sym || strstr(he->ms.sym->name,
2171 hists->symbol_filter_str) == NULL)) {
2172 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2173 return true;
2174 }
2175
2176 return false;
2177 }
2178
hists__filter_entry_by_socket(struct hists * hists,struct hist_entry * he)2179 static bool hists__filter_entry_by_socket(struct hists *hists,
2180 struct hist_entry *he)
2181 {
2182 if ((hists->socket_filter > -1) &&
2183 (he->socket != hists->socket_filter)) {
2184 he->filtered |= (1 << HIST_FILTER__SOCKET);
2185 return true;
2186 }
2187
2188 return false;
2189 }
2190
2191 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2192
hists__filter_by_type(struct hists * hists,int type,filter_fn_t filter)2193 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2194 {
2195 struct rb_node *nd;
2196
2197 hists->stats.nr_non_filtered_samples = 0;
2198
2199 hists__reset_filter_stats(hists);
2200 hists__reset_col_len(hists);
2201
2202 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2203 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2204
2205 if (filter(hists, h))
2206 continue;
2207
2208 hists__remove_entry_filter(hists, h, type);
2209 }
2210 }
2211
resort_filtered_entry(struct rb_root_cached * root,struct hist_entry * he)2212 static void resort_filtered_entry(struct rb_root_cached *root,
2213 struct hist_entry *he)
2214 {
2215 struct rb_node **p = &root->rb_root.rb_node;
2216 struct rb_node *parent = NULL;
2217 struct hist_entry *iter;
2218 struct rb_root_cached new_root = RB_ROOT_CACHED;
2219 struct rb_node *nd;
2220 bool leftmost = true;
2221
2222 while (*p != NULL) {
2223 parent = *p;
2224 iter = rb_entry(parent, struct hist_entry, rb_node);
2225
2226 if (hist_entry__sort(he, iter) > 0)
2227 p = &(*p)->rb_left;
2228 else {
2229 p = &(*p)->rb_right;
2230 leftmost = false;
2231 }
2232 }
2233
2234 rb_link_node(&he->rb_node, parent, p);
2235 rb_insert_color_cached(&he->rb_node, root, leftmost);
2236
2237 if (he->leaf || he->filtered)
2238 return;
2239
2240 nd = rb_first_cached(&he->hroot_out);
2241 while (nd) {
2242 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2243
2244 nd = rb_next(nd);
2245 rb_erase_cached(&h->rb_node, &he->hroot_out);
2246
2247 resort_filtered_entry(&new_root, h);
2248 }
2249
2250 he->hroot_out = new_root;
2251 }
2252
hists__filter_hierarchy(struct hists * hists,int type,const void * arg)2253 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2254 {
2255 struct rb_node *nd;
2256 struct rb_root_cached new_root = RB_ROOT_CACHED;
2257
2258 hists->stats.nr_non_filtered_samples = 0;
2259
2260 hists__reset_filter_stats(hists);
2261 hists__reset_col_len(hists);
2262
2263 nd = rb_first_cached(&hists->entries);
2264 while (nd) {
2265 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2266 int ret;
2267
2268 ret = hist_entry__filter(h, type, arg);
2269
2270 /*
2271 * case 1. non-matching type
2272 * zero out the period, set filter marker and move to child
2273 */
2274 if (ret < 0) {
2275 memset(&h->stat, 0, sizeof(h->stat));
2276 h->filtered |= (1 << type);
2277
2278 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2279 }
2280 /*
2281 * case 2. matched type (filter out)
2282 * set filter marker and move to next
2283 */
2284 else if (ret == 1) {
2285 h->filtered |= (1 << type);
2286
2287 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2288 }
2289 /*
2290 * case 3. ok (not filtered)
2291 * add period to hists and parents, erase the filter marker
2292 * and move to next sibling
2293 */
2294 else {
2295 hists__remove_entry_filter(hists, h, type);
2296
2297 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2298 }
2299 }
2300
2301 hierarchy_recalc_total_periods(hists);
2302
2303 /*
2304 * resort output after applying a new filter since filter in a lower
2305 * hierarchy can change periods in a upper hierarchy.
2306 */
2307 nd = rb_first_cached(&hists->entries);
2308 while (nd) {
2309 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2310
2311 nd = rb_next(nd);
2312 rb_erase_cached(&h->rb_node, &hists->entries);
2313
2314 resort_filtered_entry(&new_root, h);
2315 }
2316
2317 hists->entries = new_root;
2318 }
2319
hists__filter_by_thread(struct hists * hists)2320 void hists__filter_by_thread(struct hists *hists)
2321 {
2322 if (symbol_conf.report_hierarchy)
2323 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2324 hists->thread_filter);
2325 else
2326 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2327 hists__filter_entry_by_thread);
2328 }
2329
hists__filter_by_dso(struct hists * hists)2330 void hists__filter_by_dso(struct hists *hists)
2331 {
2332 if (symbol_conf.report_hierarchy)
2333 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2334 hists->dso_filter);
2335 else
2336 hists__filter_by_type(hists, HIST_FILTER__DSO,
2337 hists__filter_entry_by_dso);
2338 }
2339
hists__filter_by_symbol(struct hists * hists)2340 void hists__filter_by_symbol(struct hists *hists)
2341 {
2342 if (symbol_conf.report_hierarchy)
2343 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2344 hists->symbol_filter_str);
2345 else
2346 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2347 hists__filter_entry_by_symbol);
2348 }
2349
hists__filter_by_socket(struct hists * hists)2350 void hists__filter_by_socket(struct hists *hists)
2351 {
2352 if (symbol_conf.report_hierarchy)
2353 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2354 &hists->socket_filter);
2355 else
2356 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2357 hists__filter_entry_by_socket);
2358 }
2359
events_stats__inc(struct events_stats * stats,u32 type)2360 void events_stats__inc(struct events_stats *stats, u32 type)
2361 {
2362 ++stats->nr_events[0];
2363 ++stats->nr_events[type];
2364 }
2365
hists_stats__inc(struct hists_stats * stats)2366 static void hists_stats__inc(struct hists_stats *stats)
2367 {
2368 ++stats->nr_samples;
2369 }
2370
hists__inc_nr_events(struct hists * hists)2371 void hists__inc_nr_events(struct hists *hists)
2372 {
2373 hists_stats__inc(&hists->stats);
2374 }
2375
hists__inc_nr_samples(struct hists * hists,bool filtered)2376 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2377 {
2378 hists_stats__inc(&hists->stats);
2379 if (!filtered)
2380 hists->stats.nr_non_filtered_samples++;
2381 }
2382
hists__inc_nr_lost_samples(struct hists * hists,u32 lost)2383 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
2384 {
2385 hists->stats.nr_lost_samples += lost;
2386 }
2387
hists__inc_nr_dropped_samples(struct hists * hists,u32 lost)2388 void hists__inc_nr_dropped_samples(struct hists *hists, u32 lost)
2389 {
2390 hists->stats.nr_dropped_samples += lost;
2391 }
2392
hists__add_dummy_entry(struct hists * hists,struct hist_entry * pair)2393 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2394 struct hist_entry *pair)
2395 {
2396 struct rb_root_cached *root;
2397 struct rb_node **p;
2398 struct rb_node *parent = NULL;
2399 struct hist_entry *he;
2400 int64_t cmp;
2401 bool leftmost = true;
2402
2403 if (hists__has(hists, need_collapse))
2404 root = &hists->entries_collapsed;
2405 else
2406 root = hists->entries_in;
2407
2408 p = &root->rb_root.rb_node;
2409
2410 while (*p != NULL) {
2411 parent = *p;
2412 he = rb_entry(parent, struct hist_entry, rb_node_in);
2413
2414 cmp = hist_entry__collapse(he, pair);
2415
2416 if (!cmp)
2417 goto out;
2418
2419 if (cmp < 0)
2420 p = &(*p)->rb_left;
2421 else {
2422 p = &(*p)->rb_right;
2423 leftmost = false;
2424 }
2425 }
2426
2427 he = hist_entry__new(pair, true);
2428 if (he) {
2429 memset(&he->stat, 0, sizeof(he->stat));
2430 he->hists = hists;
2431 if (symbol_conf.cumulate_callchain)
2432 memset(he->stat_acc, 0, sizeof(he->stat));
2433 rb_link_node(&he->rb_node_in, parent, p);
2434 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2435 hists__inc_stats(hists, he);
2436 he->dummy = true;
2437 }
2438 out:
2439 return he;
2440 }
2441
add_dummy_hierarchy_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * pair)2442 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2443 struct rb_root_cached *root,
2444 struct hist_entry *pair)
2445 {
2446 struct rb_node **p;
2447 struct rb_node *parent = NULL;
2448 struct hist_entry *he;
2449 struct perf_hpp_fmt *fmt;
2450 bool leftmost = true;
2451
2452 p = &root->rb_root.rb_node;
2453 while (*p != NULL) {
2454 int64_t cmp = 0;
2455
2456 parent = *p;
2457 he = rb_entry(parent, struct hist_entry, rb_node_in);
2458
2459 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2460 cmp = fmt->collapse(fmt, he, pair);
2461 if (cmp)
2462 break;
2463 }
2464 if (!cmp)
2465 goto out;
2466
2467 if (cmp < 0)
2468 p = &parent->rb_left;
2469 else {
2470 p = &parent->rb_right;
2471 leftmost = false;
2472 }
2473 }
2474
2475 he = hist_entry__new(pair, true);
2476 if (he) {
2477 rb_link_node(&he->rb_node_in, parent, p);
2478 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2479
2480 he->dummy = true;
2481 he->hists = hists;
2482 memset(&he->stat, 0, sizeof(he->stat));
2483 hists__inc_stats(hists, he);
2484 }
2485 out:
2486 return he;
2487 }
2488
hists__find_entry(struct hists * hists,struct hist_entry * he)2489 static struct hist_entry *hists__find_entry(struct hists *hists,
2490 struct hist_entry *he)
2491 {
2492 struct rb_node *n;
2493
2494 if (hists__has(hists, need_collapse))
2495 n = hists->entries_collapsed.rb_root.rb_node;
2496 else
2497 n = hists->entries_in->rb_root.rb_node;
2498
2499 while (n) {
2500 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2501 int64_t cmp = hist_entry__collapse(iter, he);
2502
2503 if (cmp < 0)
2504 n = n->rb_left;
2505 else if (cmp > 0)
2506 n = n->rb_right;
2507 else
2508 return iter;
2509 }
2510
2511 return NULL;
2512 }
2513
hists__find_hierarchy_entry(struct rb_root_cached * root,struct hist_entry * he)2514 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2515 struct hist_entry *he)
2516 {
2517 struct rb_node *n = root->rb_root.rb_node;
2518
2519 while (n) {
2520 struct hist_entry *iter;
2521 struct perf_hpp_fmt *fmt;
2522 int64_t cmp = 0;
2523
2524 iter = rb_entry(n, struct hist_entry, rb_node_in);
2525 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2526 cmp = fmt->collapse(fmt, iter, he);
2527 if (cmp)
2528 break;
2529 }
2530
2531 if (cmp < 0)
2532 n = n->rb_left;
2533 else if (cmp > 0)
2534 n = n->rb_right;
2535 else
2536 return iter;
2537 }
2538
2539 return NULL;
2540 }
2541
hists__match_hierarchy(struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2542 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2543 struct rb_root_cached *other_root)
2544 {
2545 struct rb_node *nd;
2546 struct hist_entry *pos, *pair;
2547
2548 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2549 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2550 pair = hists__find_hierarchy_entry(other_root, pos);
2551
2552 if (pair) {
2553 hist_entry__add_pair(pair, pos);
2554 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2555 }
2556 }
2557 }
2558
2559 /*
2560 * Look for pairs to link to the leader buckets (hist_entries):
2561 */
hists__match(struct hists * leader,struct hists * other)2562 void hists__match(struct hists *leader, struct hists *other)
2563 {
2564 struct rb_root_cached *root;
2565 struct rb_node *nd;
2566 struct hist_entry *pos, *pair;
2567
2568 if (symbol_conf.report_hierarchy) {
2569 /* hierarchy report always collapses entries */
2570 return hists__match_hierarchy(&leader->entries_collapsed,
2571 &other->entries_collapsed);
2572 }
2573
2574 if (hists__has(leader, need_collapse))
2575 root = &leader->entries_collapsed;
2576 else
2577 root = leader->entries_in;
2578
2579 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2580 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2581 pair = hists__find_entry(other, pos);
2582
2583 if (pair)
2584 hist_entry__add_pair(pair, pos);
2585 }
2586 }
2587
hists__link_hierarchy(struct hists * leader_hists,struct hist_entry * parent,struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2588 static int hists__link_hierarchy(struct hists *leader_hists,
2589 struct hist_entry *parent,
2590 struct rb_root_cached *leader_root,
2591 struct rb_root_cached *other_root)
2592 {
2593 struct rb_node *nd;
2594 struct hist_entry *pos, *leader;
2595
2596 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2597 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2598
2599 if (hist_entry__has_pairs(pos)) {
2600 bool found = false;
2601
2602 list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2603 if (leader->hists == leader_hists) {
2604 found = true;
2605 break;
2606 }
2607 }
2608 if (!found)
2609 return -1;
2610 } else {
2611 leader = add_dummy_hierarchy_entry(leader_hists,
2612 leader_root, pos);
2613 if (leader == NULL)
2614 return -1;
2615
2616 /* do not point parent in the pos */
2617 leader->parent_he = parent;
2618
2619 hist_entry__add_pair(pos, leader);
2620 }
2621
2622 if (!pos->leaf) {
2623 if (hists__link_hierarchy(leader_hists, leader,
2624 &leader->hroot_in,
2625 &pos->hroot_in) < 0)
2626 return -1;
2627 }
2628 }
2629 return 0;
2630 }
2631
2632 /*
2633 * Look for entries in the other hists that are not present in the leader, if
2634 * we find them, just add a dummy entry on the leader hists, with period=0,
2635 * nr_events=0, to serve as the list header.
2636 */
hists__link(struct hists * leader,struct hists * other)2637 int hists__link(struct hists *leader, struct hists *other)
2638 {
2639 struct rb_root_cached *root;
2640 struct rb_node *nd;
2641 struct hist_entry *pos, *pair;
2642
2643 if (symbol_conf.report_hierarchy) {
2644 /* hierarchy report always collapses entries */
2645 return hists__link_hierarchy(leader, NULL,
2646 &leader->entries_collapsed,
2647 &other->entries_collapsed);
2648 }
2649
2650 if (hists__has(other, need_collapse))
2651 root = &other->entries_collapsed;
2652 else
2653 root = other->entries_in;
2654
2655 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2656 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2657
2658 if (!hist_entry__has_pairs(pos)) {
2659 pair = hists__add_dummy_entry(leader, pos);
2660 if (pair == NULL)
2661 return -1;
2662 hist_entry__add_pair(pos, pair);
2663 }
2664 }
2665
2666 return 0;
2667 }
2668
hists__unlink(struct hists * hists)2669 int hists__unlink(struct hists *hists)
2670 {
2671 struct rb_root_cached *root;
2672 struct rb_node *nd;
2673 struct hist_entry *pos;
2674
2675 if (hists__has(hists, need_collapse))
2676 root = &hists->entries_collapsed;
2677 else
2678 root = hists->entries_in;
2679
2680 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2681 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2682 list_del_init(&pos->pairs.node);
2683 }
2684
2685 return 0;
2686 }
2687
hist__account_cycles(struct branch_stack * bs,struct addr_location * al,struct perf_sample * sample,bool nonany_branch_mode,u64 * total_cycles,struct evsel * evsel)2688 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2689 struct perf_sample *sample, bool nonany_branch_mode,
2690 u64 *total_cycles, struct evsel *evsel)
2691 {
2692 struct branch_info *bi;
2693 struct branch_entry *entries = perf_sample__branch_entries(sample);
2694
2695 /* If we have branch cycles always annotate them. */
2696 if (bs && bs->nr && entries[0].flags.cycles) {
2697 bi = sample__resolve_bstack(sample, al);
2698 if (bi) {
2699 struct addr_map_symbol *prev = NULL;
2700
2701 /*
2702 * Ignore errors, still want to process the
2703 * other entries.
2704 *
2705 * For non standard branch modes always
2706 * force no IPC (prev == NULL)
2707 *
2708 * Note that perf stores branches reversed from
2709 * program order!
2710 */
2711 for (int i = bs->nr - 1; i >= 0; i--) {
2712 addr_map_symbol__account_cycles(&bi[i].from,
2713 nonany_branch_mode ? NULL : prev,
2714 bi[i].flags.cycles, evsel,
2715 bi[i].branch_stack_cntr);
2716 prev = &bi[i].to;
2717
2718 if (total_cycles)
2719 *total_cycles += bi[i].flags.cycles;
2720 }
2721 for (unsigned int i = 0; i < bs->nr; i++) {
2722 map_symbol__exit(&bi[i].to.ms);
2723 map_symbol__exit(&bi[i].from.ms);
2724 }
2725 free(bi);
2726 }
2727 }
2728 }
2729
evlist__fprintf_nr_events(struct evlist * evlist,FILE * fp)2730 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2731 {
2732 struct evsel *pos;
2733 size_t ret = 0;
2734
2735 evlist__for_each_entry(evlist, pos) {
2736 struct hists *hists = evsel__hists(pos);
2737 u64 total_samples = hists->stats.nr_samples;
2738
2739 total_samples += hists->stats.nr_lost_samples;
2740 total_samples += hists->stats.nr_dropped_samples;
2741
2742 if (symbol_conf.skip_empty && total_samples == 0)
2743 continue;
2744
2745 ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2746 if (hists->stats.nr_samples)
2747 ret += fprintf(fp, "%20s events: %10d\n",
2748 "SAMPLE", hists->stats.nr_samples);
2749 if (hists->stats.nr_lost_samples)
2750 ret += fprintf(fp, "%20s events: %10d\n",
2751 "LOST_SAMPLES", hists->stats.nr_lost_samples);
2752 if (hists->stats.nr_dropped_samples)
2753 ret += fprintf(fp, "%20s events: %10d\n",
2754 "LOST_SAMPLES (BPF)", hists->stats.nr_dropped_samples);
2755 }
2756
2757 return ret;
2758 }
2759
2760
hists__total_period(struct hists * hists)2761 u64 hists__total_period(struct hists *hists)
2762 {
2763 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2764 hists->stats.total_period;
2765 }
2766
__hists__scnprintf_title(struct hists * hists,char * bf,size_t size,bool show_freq)2767 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2768 {
2769 char unit;
2770 int printed;
2771 const struct dso *dso = hists->dso_filter;
2772 struct thread *thread = hists->thread_filter;
2773 int socket_id = hists->socket_filter;
2774 unsigned long nr_samples = hists->stats.nr_samples;
2775 u64 nr_events = hists->stats.total_period;
2776 struct evsel *evsel = hists_to_evsel(hists);
2777 const char *ev_name = evsel__name(evsel);
2778 char buf[512], sample_freq_str[64] = "";
2779 size_t buflen = sizeof(buf);
2780 char ref[30] = " show reference callgraph, ";
2781 bool enable_ref = false;
2782
2783 if (symbol_conf.filter_relative) {
2784 nr_samples = hists->stats.nr_non_filtered_samples;
2785 nr_events = hists->stats.total_non_filtered_period;
2786 }
2787
2788 if (evsel__is_group_event(evsel)) {
2789 struct evsel *pos;
2790
2791 evsel__group_desc(evsel, buf, buflen);
2792 ev_name = buf;
2793
2794 for_each_group_member(pos, evsel) {
2795 struct hists *pos_hists = evsel__hists(pos);
2796
2797 if (symbol_conf.filter_relative) {
2798 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2799 nr_events += pos_hists->stats.total_non_filtered_period;
2800 } else {
2801 nr_samples += pos_hists->stats.nr_samples;
2802 nr_events += pos_hists->stats.total_period;
2803 }
2804 }
2805 }
2806
2807 if (symbol_conf.show_ref_callgraph &&
2808 strstr(ev_name, "call-graph=no"))
2809 enable_ref = true;
2810
2811 if (show_freq)
2812 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2813
2814 nr_samples = convert_unit(nr_samples, &unit);
2815 printed = scnprintf(bf, size,
2816 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2817 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2818 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2819
2820
2821 if (hists->uid_filter_str)
2822 printed += snprintf(bf + printed, size - printed,
2823 ", UID: %s", hists->uid_filter_str);
2824 if (thread) {
2825 if (hists__has(hists, thread)) {
2826 printed += scnprintf(bf + printed, size - printed,
2827 ", Thread: %s(%d)",
2828 (thread__comm_set(thread) ? thread__comm_str(thread) : ""),
2829 thread__tid(thread));
2830 } else {
2831 printed += scnprintf(bf + printed, size - printed,
2832 ", Thread: %s",
2833 (thread__comm_set(thread) ? thread__comm_str(thread) : ""));
2834 }
2835 }
2836 if (dso)
2837 printed += scnprintf(bf + printed, size - printed,
2838 ", DSO: %s", dso__short_name(dso));
2839 if (socket_id > -1)
2840 printed += scnprintf(bf + printed, size - printed,
2841 ", Processor Socket: %d", socket_id);
2842
2843 return printed;
2844 }
2845
parse_filter_percentage(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)2846 int parse_filter_percentage(const struct option *opt __maybe_unused,
2847 const char *arg, int unset __maybe_unused)
2848 {
2849 if (!strcmp(arg, "relative"))
2850 symbol_conf.filter_relative = true;
2851 else if (!strcmp(arg, "absolute"))
2852 symbol_conf.filter_relative = false;
2853 else {
2854 pr_debug("Invalid percentage: %s\n", arg);
2855 return -1;
2856 }
2857
2858 return 0;
2859 }
2860
perf_hist_config(const char * var,const char * value)2861 int perf_hist_config(const char *var, const char *value)
2862 {
2863 if (!strcmp(var, "hist.percentage"))
2864 return parse_filter_percentage(NULL, value, 0);
2865
2866 return 0;
2867 }
2868
__hists__init(struct hists * hists,struct perf_hpp_list * hpp_list)2869 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2870 {
2871 memset(hists, 0, sizeof(*hists));
2872 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2873 hists->entries_in = &hists->entries_in_array[0];
2874 hists->entries_collapsed = RB_ROOT_CACHED;
2875 hists->entries = RB_ROOT_CACHED;
2876 mutex_init(&hists->lock);
2877 hists->socket_filter = -1;
2878 hists->hpp_list = hpp_list;
2879 INIT_LIST_HEAD(&hists->hpp_formats);
2880 return 0;
2881 }
2882
hists__delete_remaining_entries(struct rb_root_cached * root)2883 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2884 {
2885 struct rb_node *node;
2886 struct hist_entry *he;
2887
2888 while (!RB_EMPTY_ROOT(&root->rb_root)) {
2889 node = rb_first_cached(root);
2890 rb_erase_cached(node, root);
2891
2892 he = rb_entry(node, struct hist_entry, rb_node_in);
2893 hist_entry__delete(he);
2894 }
2895 }
2896
hists__delete_all_entries(struct hists * hists)2897 static void hists__delete_all_entries(struct hists *hists)
2898 {
2899 hists__delete_entries(hists);
2900 hists__delete_remaining_entries(&hists->entries_in_array[0]);
2901 hists__delete_remaining_entries(&hists->entries_in_array[1]);
2902 hists__delete_remaining_entries(&hists->entries_collapsed);
2903 }
2904
hists_evsel__exit(struct evsel * evsel)2905 static void hists_evsel__exit(struct evsel *evsel)
2906 {
2907 struct hists *hists = evsel__hists(evsel);
2908 struct perf_hpp_fmt *fmt, *pos;
2909 struct perf_hpp_list_node *node, *tmp;
2910
2911 hists__delete_all_entries(hists);
2912
2913 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2914 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2915 list_del_init(&fmt->list);
2916 free(fmt);
2917 }
2918 list_del_init(&node->list);
2919 free(node);
2920 }
2921 }
2922
hists_evsel__init(struct evsel * evsel)2923 static int hists_evsel__init(struct evsel *evsel)
2924 {
2925 struct hists *hists = evsel__hists(evsel);
2926
2927 __hists__init(hists, &perf_hpp_list);
2928 return 0;
2929 }
2930
2931 /*
2932 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2933 * stored in the rbtree...
2934 */
2935
hists__init(void)2936 int hists__init(void)
2937 {
2938 int err = evsel__object_config(sizeof(struct hists_evsel),
2939 hists_evsel__init, hists_evsel__exit);
2940 if (err)
2941 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2942
2943 return err;
2944 }
2945
perf_hpp_list__init(struct perf_hpp_list * list)2946 void perf_hpp_list__init(struct perf_hpp_list *list)
2947 {
2948 INIT_LIST_HEAD(&list->fields);
2949 INIT_LIST_HEAD(&list->sorts);
2950 }
2951