1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <stdlib.h>
6 #include <linux/mman.h>
7 #include <linux/time64.h>
8 #include "debug.h"
9 #include "dso.h"
10 #include "sort.h"
11 #include "hist.h"
12 #include "cacheline.h"
13 #include "comm.h"
14 #include "map.h"
15 #include "maps.h"
16 #include "symbol.h"
17 #include "map_symbol.h"
18 #include "branch.h"
19 #include "thread.h"
20 #include "evsel.h"
21 #include "evlist.h"
22 #include "srcline.h"
23 #include "strlist.h"
24 #include "strbuf.h"
25 #include "mem-events.h"
26 #include "mem-info.h"
27 #include "annotate.h"
28 #include "annotate-data.h"
29 #include "event.h"
30 #include "time-utils.h"
31 #include "cgroup.h"
32 #include "machine.h"
33 #include "trace-event.h"
34 #include <linux/kernel.h>
35 #include <linux/string.h>
36
37 #ifdef HAVE_LIBTRACEEVENT
38 #include <event-parse.h>
39 #endif
40
41 regex_t parent_regex;
42 const char default_parent_pattern[] = "^sys_|^do_page_fault";
43 const char *parent_pattern = default_parent_pattern;
44 const char *default_sort_order = "comm,dso,symbol";
45 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
46 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
47 const char default_top_sort_order[] = "dso,symbol";
48 const char default_diff_sort_order[] = "dso,symbol";
49 const char default_tracepoint_sort_order[] = "trace";
50 const char *sort_order;
51 const char *field_order;
52 regex_t ignore_callees_regex;
53 int have_ignore_callees = 0;
54 enum sort_mode sort__mode = SORT_MODE__NORMAL;
55 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
56 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
57
58 /*
59 * Some architectures have Adjacent Cacheline Prefetch feature, which
60 * behaves like the cacheline size is doubled. Enable this flag to
61 * check things in double cacheline granularity.
62 */
63 bool chk_double_cl;
64
65 /*
66 * Replaces all occurrences of a char used with the:
67 *
68 * -t, --field-separator
69 *
70 * option, that uses a special separator character and don't pad with spaces,
71 * replacing all occurrences of this separator in symbol names (and other
72 * output) with a '.' character, that thus it's the only non valid separator.
73 */
repsep_snprintf(char * bf,size_t size,const char * fmt,...)74 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
75 {
76 int n;
77 va_list ap;
78
79 va_start(ap, fmt);
80 n = vsnprintf(bf, size, fmt, ap);
81 if (symbol_conf.field_sep && n > 0) {
82 char *sep = bf;
83
84 while (1) {
85 sep = strchr(sep, *symbol_conf.field_sep);
86 if (sep == NULL)
87 break;
88 *sep = '.';
89 }
90 }
91 va_end(ap);
92
93 if (n >= (int)size)
94 return size - 1;
95 return n;
96 }
97
cmp_null(const void * l,const void * r)98 static int64_t cmp_null(const void *l, const void *r)
99 {
100 if (!l && !r)
101 return 0;
102 else if (!l)
103 return -1;
104 else
105 return 1;
106 }
107
108 /* --sort pid */
109
110 static int64_t
sort__thread_cmp(struct hist_entry * left,struct hist_entry * right)111 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
112 {
113 return thread__tid(right->thread) - thread__tid(left->thread);
114 }
115
hist_entry__thread_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)116 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
117 size_t size, unsigned int width)
118 {
119 const char *comm = thread__comm_str(he->thread);
120
121 width = max(7U, width) - 8;
122 return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
123 width, width, comm ?: "");
124 }
125
hist_entry__thread_filter(struct hist_entry * he,int type,const void * arg)126 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
127 {
128 const struct thread *th = arg;
129
130 if (type != HIST_FILTER__THREAD)
131 return -1;
132
133 return th && !RC_CHK_EQUAL(he->thread, th);
134 }
135
136 struct sort_entry sort_thread = {
137 .se_header = " Pid:Command",
138 .se_cmp = sort__thread_cmp,
139 .se_snprintf = hist_entry__thread_snprintf,
140 .se_filter = hist_entry__thread_filter,
141 .se_width_idx = HISTC_THREAD,
142 };
143
144 /* --sort tgid */
145
146 static int64_t
sort__tgid_cmp(struct hist_entry * left,struct hist_entry * right)147 sort__tgid_cmp(struct hist_entry *left, struct hist_entry *right)
148 {
149 return thread__pid(right->thread) - thread__pid(left->thread);
150 }
151
hist_entry__tgid_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)152 static int hist_entry__tgid_snprintf(struct hist_entry *he, char *bf,
153 size_t size, unsigned int width)
154 {
155 int tgid = thread__pid(he->thread);
156 const char *comm = NULL;
157
158 /* display comm of the thread-group leader */
159 if (thread__pid(he->thread) == thread__tid(he->thread)) {
160 comm = thread__comm_str(he->thread);
161 } else {
162 struct maps *maps = thread__maps(he->thread);
163 struct thread *leader = machine__find_thread(maps__machine(maps),
164 tgid, tgid);
165 if (leader) {
166 comm = thread__comm_str(leader);
167 thread__put(leader);
168 }
169 }
170 width = max(7U, width) - 8;
171 return repsep_snprintf(bf, size, "%7d:%-*.*s", tgid, width, width, comm ?: "");
172 }
173
174 struct sort_entry sort_tgid = {
175 .se_header = " Tgid:Command",
176 .se_cmp = sort__tgid_cmp,
177 .se_snprintf = hist_entry__tgid_snprintf,
178 .se_width_idx = HISTC_TGID,
179 };
180
181 /* --sort simd */
182
183 static int64_t
sort__simd_cmp(struct hist_entry * left,struct hist_entry * right)184 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
185 {
186 if (left->simd_flags.arch != right->simd_flags.arch)
187 return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
188
189 return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
190 }
191
hist_entry__get_simd_name(struct simd_flags * simd_flags)192 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
193 {
194 u64 arch = simd_flags->arch;
195
196 if (arch & SIMD_OP_FLAGS_ARCH_SVE)
197 return "SVE";
198 else
199 return "n/a";
200 }
201
hist_entry__simd_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)202 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
203 size_t size, unsigned int width __maybe_unused)
204 {
205 const char *name;
206
207 if (!he->simd_flags.arch)
208 return repsep_snprintf(bf, size, "");
209
210 name = hist_entry__get_simd_name(&he->simd_flags);
211
212 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
213 return repsep_snprintf(bf, size, "[e] %s", name);
214 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
215 return repsep_snprintf(bf, size, "[p] %s", name);
216
217 return repsep_snprintf(bf, size, "[.] %s", name);
218 }
219
220 struct sort_entry sort_simd = {
221 .se_header = "Simd ",
222 .se_cmp = sort__simd_cmp,
223 .se_snprintf = hist_entry__simd_snprintf,
224 .se_width_idx = HISTC_SIMD,
225 };
226
227 /* --sort comm */
228
229 /*
230 * We can't use pointer comparison in functions below,
231 * because it gives different results based on pointer
232 * values, which could break some sorting assumptions.
233 */
234 static int64_t
sort__comm_cmp(struct hist_entry * left,struct hist_entry * right)235 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
236 {
237 return strcmp(comm__str(right->comm), comm__str(left->comm));
238 }
239
240 static int64_t
sort__comm_collapse(struct hist_entry * left,struct hist_entry * right)241 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
242 {
243 return strcmp(comm__str(right->comm), comm__str(left->comm));
244 }
245
246 static int64_t
sort__comm_sort(struct hist_entry * left,struct hist_entry * right)247 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
248 {
249 return strcmp(comm__str(right->comm), comm__str(left->comm));
250 }
251
hist_entry__comm_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)252 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
253 size_t size, unsigned int width)
254 {
255 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
256 }
257
258 struct sort_entry sort_comm = {
259 .se_header = "Command",
260 .se_cmp = sort__comm_cmp,
261 .se_collapse = sort__comm_collapse,
262 .se_sort = sort__comm_sort,
263 .se_snprintf = hist_entry__comm_snprintf,
264 .se_filter = hist_entry__thread_filter,
265 .se_width_idx = HISTC_COMM,
266 };
267
268 /* --sort dso */
269
_sort__dso_cmp(struct map * map_l,struct map * map_r)270 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
271 {
272 struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
273 struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
274 const char *dso_name_l, *dso_name_r;
275
276 if (!dso_l || !dso_r)
277 return cmp_null(dso_r, dso_l);
278
279 if (verbose > 0) {
280 dso_name_l = dso__long_name(dso_l);
281 dso_name_r = dso__long_name(dso_r);
282 } else {
283 dso_name_l = dso__short_name(dso_l);
284 dso_name_r = dso__short_name(dso_r);
285 }
286
287 return strcmp(dso_name_l, dso_name_r);
288 }
289
290 static int64_t
sort__dso_cmp(struct hist_entry * left,struct hist_entry * right)291 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
292 {
293 return _sort__dso_cmp(right->ms.map, left->ms.map);
294 }
295
_hist_entry__dso_snprintf(struct map * map,char * bf,size_t size,unsigned int width)296 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
297 size_t size, unsigned int width)
298 {
299 const struct dso *dso = map ? map__dso(map) : NULL;
300 const char *dso_name = "[unknown]";
301
302 if (dso)
303 dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso);
304
305 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
306 }
307
hist_entry__dso_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)308 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
309 size_t size, unsigned int width)
310 {
311 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
312 }
313
hist_entry__dso_filter(struct hist_entry * he,int type,const void * arg)314 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
315 {
316 const struct dso *dso = arg;
317
318 if (type != HIST_FILTER__DSO)
319 return -1;
320
321 return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
322 }
323
324 struct sort_entry sort_dso = {
325 .se_header = "Shared Object",
326 .se_cmp = sort__dso_cmp,
327 .se_snprintf = hist_entry__dso_snprintf,
328 .se_filter = hist_entry__dso_filter,
329 .se_width_idx = HISTC_DSO,
330 };
331
332 /* --sort symbol */
333
_sort__addr_cmp(u64 left_ip,u64 right_ip)334 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
335 {
336 return (int64_t)(right_ip - left_ip);
337 }
338
_sort__sym_cmp(struct symbol * sym_l,struct symbol * sym_r)339 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
340 {
341 if (!sym_l || !sym_r)
342 return cmp_null(sym_l, sym_r);
343
344 if (sym_l == sym_r)
345 return 0;
346
347 if (sym_l->inlined || sym_r->inlined) {
348 int ret = strcmp(sym_l->name, sym_r->name);
349
350 if (ret)
351 return ret;
352 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
353 return 0;
354 }
355
356 if (sym_l->start != sym_r->start)
357 return (int64_t)(sym_r->start - sym_l->start);
358
359 return (int64_t)(sym_r->end - sym_l->end);
360 }
361
362 static int64_t
sort__sym_cmp(struct hist_entry * left,struct hist_entry * right)363 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
364 {
365 int64_t ret;
366
367 if (!left->ms.sym && !right->ms.sym)
368 return _sort__addr_cmp(left->ip, right->ip);
369
370 /*
371 * comparing symbol address alone is not enough since it's a
372 * relative address within a dso.
373 */
374 if (!hists__has(left->hists, dso)) {
375 ret = sort__dso_cmp(left, right);
376 if (ret != 0)
377 return ret;
378 }
379
380 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
381 }
382
383 static int64_t
sort__sym_sort(struct hist_entry * left,struct hist_entry * right)384 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
385 {
386 if (!left->ms.sym || !right->ms.sym)
387 return cmp_null(left->ms.sym, right->ms.sym);
388
389 return strcmp(right->ms.sym->name, left->ms.sym->name);
390 }
391
_hist_entry__sym_snprintf(struct map_symbol * ms,u64 ip,char level,char * bf,size_t size,unsigned int width)392 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
393 u64 ip, char level, char *bf, size_t size,
394 unsigned int width)
395 {
396 struct symbol *sym = ms->sym;
397 struct map *map = ms->map;
398 size_t ret = 0;
399
400 if (verbose > 0) {
401 struct dso *dso = map ? map__dso(map) : NULL;
402 char o = dso ? dso__symtab_origin(dso) : '!';
403 u64 rip = ip;
404
405 if (dso && dso__kernel(dso) && dso__adjust_symbols(dso))
406 rip = map__unmap_ip(map, ip);
407
408 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
409 BITS_PER_LONG / 4 + 2, rip, o);
410 }
411
412 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
413 if (sym && map) {
414 if (sym->type == STT_OBJECT) {
415 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
416 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
417 ip - map__unmap_ip(map, sym->start));
418 } else {
419 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
420 width - ret,
421 sym->name);
422 if (sym->inlined)
423 ret += repsep_snprintf(bf + ret, size - ret,
424 " (inlined)");
425 }
426 } else {
427 size_t len = BITS_PER_LONG / 4;
428 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
429 len, ip);
430 }
431
432 return ret;
433 }
434
hist_entry__sym_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)435 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
436 {
437 return _hist_entry__sym_snprintf(&he->ms, he->ip,
438 he->level, bf, size, width);
439 }
440
hist_entry__sym_filter(struct hist_entry * he,int type,const void * arg)441 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
442 {
443 const char *sym = arg;
444
445 if (type != HIST_FILTER__SYMBOL)
446 return -1;
447
448 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
449 }
450
451 struct sort_entry sort_sym = {
452 .se_header = "Symbol",
453 .se_cmp = sort__sym_cmp,
454 .se_sort = sort__sym_sort,
455 .se_snprintf = hist_entry__sym_snprintf,
456 .se_filter = hist_entry__sym_filter,
457 .se_width_idx = HISTC_SYMBOL,
458 };
459
460 /* --sort symoff */
461
462 static int64_t
sort__symoff_cmp(struct hist_entry * left,struct hist_entry * right)463 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
464 {
465 int64_t ret;
466
467 ret = sort__sym_cmp(left, right);
468 if (ret)
469 return ret;
470
471 return left->ip - right->ip;
472 }
473
474 static int64_t
sort__symoff_sort(struct hist_entry * left,struct hist_entry * right)475 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
476 {
477 int64_t ret;
478
479 ret = sort__sym_sort(left, right);
480 if (ret)
481 return ret;
482
483 return left->ip - right->ip;
484 }
485
486 static int
hist_entry__symoff_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)487 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
488 {
489 struct symbol *sym = he->ms.sym;
490
491 if (sym == NULL)
492 return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
493
494 return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
495 }
496
497 struct sort_entry sort_sym_offset = {
498 .se_header = "Symbol Offset",
499 .se_cmp = sort__symoff_cmp,
500 .se_sort = sort__symoff_sort,
501 .se_snprintf = hist_entry__symoff_snprintf,
502 .se_filter = hist_entry__sym_filter,
503 .se_width_idx = HISTC_SYMBOL_OFFSET,
504 };
505
506 /* --sort srcline */
507
hist_entry__srcline(struct hist_entry * he)508 char *hist_entry__srcline(struct hist_entry *he)
509 {
510 return map__srcline(he->ms.map, he->ip, he->ms.sym);
511 }
512
513 static int64_t
sort__srcline_cmp(struct hist_entry * left,struct hist_entry * right)514 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
515 {
516 int64_t ret;
517
518 ret = _sort__addr_cmp(left->ip, right->ip);
519 if (ret)
520 return ret;
521
522 return sort__dso_cmp(left, right);
523 }
524
525 static int64_t
sort__srcline_collapse(struct hist_entry * left,struct hist_entry * right)526 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
527 {
528 if (!left->srcline)
529 left->srcline = hist_entry__srcline(left);
530 if (!right->srcline)
531 right->srcline = hist_entry__srcline(right);
532
533 return strcmp(right->srcline, left->srcline);
534 }
535
536 static int64_t
sort__srcline_sort(struct hist_entry * left,struct hist_entry * right)537 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
538 {
539 return sort__srcline_collapse(left, right);
540 }
541
542 static void
sort__srcline_init(struct hist_entry * he)543 sort__srcline_init(struct hist_entry *he)
544 {
545 if (!he->srcline)
546 he->srcline = hist_entry__srcline(he);
547 }
548
hist_entry__srcline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)549 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
550 size_t size, unsigned int width)
551 {
552 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
553 }
554
555 struct sort_entry sort_srcline = {
556 .se_header = "Source:Line",
557 .se_cmp = sort__srcline_cmp,
558 .se_collapse = sort__srcline_collapse,
559 .se_sort = sort__srcline_sort,
560 .se_init = sort__srcline_init,
561 .se_snprintf = hist_entry__srcline_snprintf,
562 .se_width_idx = HISTC_SRCLINE,
563 };
564
565 /* --sort srcline_from */
566
addr_map_symbol__srcline(struct addr_map_symbol * ams)567 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
568 {
569 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
570 }
571
572 static int64_t
sort__srcline_from_cmp(struct hist_entry * left,struct hist_entry * right)573 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
574 {
575 return left->branch_info->from.addr - right->branch_info->from.addr;
576 }
577
578 static int64_t
sort__srcline_from_collapse(struct hist_entry * left,struct hist_entry * right)579 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
580 {
581 if (!left->branch_info->srcline_from)
582 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
583
584 if (!right->branch_info->srcline_from)
585 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
586
587 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
588 }
589
590 static int64_t
sort__srcline_from_sort(struct hist_entry * left,struct hist_entry * right)591 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
592 {
593 return sort__srcline_from_collapse(left, right);
594 }
595
sort__srcline_from_init(struct hist_entry * he)596 static void sort__srcline_from_init(struct hist_entry *he)
597 {
598 if (!he->branch_info->srcline_from)
599 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
600 }
601
hist_entry__srcline_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)602 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
603 size_t size, unsigned int width)
604 {
605 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
606 }
607
608 struct sort_entry sort_srcline_from = {
609 .se_header = "From Source:Line",
610 .se_cmp = sort__srcline_from_cmp,
611 .se_collapse = sort__srcline_from_collapse,
612 .se_sort = sort__srcline_from_sort,
613 .se_init = sort__srcline_from_init,
614 .se_snprintf = hist_entry__srcline_from_snprintf,
615 .se_width_idx = HISTC_SRCLINE_FROM,
616 };
617
618 /* --sort srcline_to */
619
620 static int64_t
sort__srcline_to_cmp(struct hist_entry * left,struct hist_entry * right)621 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
622 {
623 return left->branch_info->to.addr - right->branch_info->to.addr;
624 }
625
626 static int64_t
sort__srcline_to_collapse(struct hist_entry * left,struct hist_entry * right)627 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
628 {
629 if (!left->branch_info->srcline_to)
630 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
631
632 if (!right->branch_info->srcline_to)
633 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
634
635 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
636 }
637
638 static int64_t
sort__srcline_to_sort(struct hist_entry * left,struct hist_entry * right)639 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
640 {
641 return sort__srcline_to_collapse(left, right);
642 }
643
sort__srcline_to_init(struct hist_entry * he)644 static void sort__srcline_to_init(struct hist_entry *he)
645 {
646 if (!he->branch_info->srcline_to)
647 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
648 }
649
hist_entry__srcline_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)650 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
651 size_t size, unsigned int width)
652 {
653 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
654 }
655
656 struct sort_entry sort_srcline_to = {
657 .se_header = "To Source:Line",
658 .se_cmp = sort__srcline_to_cmp,
659 .se_collapse = sort__srcline_to_collapse,
660 .se_sort = sort__srcline_to_sort,
661 .se_init = sort__srcline_to_init,
662 .se_snprintf = hist_entry__srcline_to_snprintf,
663 .se_width_idx = HISTC_SRCLINE_TO,
664 };
665
hist_entry__sym_ipc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)666 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
667 size_t size, unsigned int width)
668 {
669
670 struct symbol *sym = he->ms.sym;
671 struct annotated_branch *branch;
672 double ipc = 0.0, coverage = 0.0;
673 char tmp[64];
674
675 if (!sym)
676 return repsep_snprintf(bf, size, "%-*s", width, "-");
677
678 branch = symbol__annotation(sym)->branch;
679
680 if (branch && branch->hit_cycles)
681 ipc = branch->hit_insn / ((double)branch->hit_cycles);
682
683 if (branch && branch->total_insn) {
684 coverage = branch->cover_insn * 100.0 /
685 ((double)branch->total_insn);
686 }
687
688 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
689 return repsep_snprintf(bf, size, "%-*s", width, tmp);
690 }
691
692 struct sort_entry sort_sym_ipc = {
693 .se_header = "IPC [IPC Coverage]",
694 .se_cmp = sort__sym_cmp,
695 .se_snprintf = hist_entry__sym_ipc_snprintf,
696 .se_width_idx = HISTC_SYMBOL_IPC,
697 };
698
hist_entry__sym_ipc_null_snprintf(struct hist_entry * he __maybe_unused,char * bf,size_t size,unsigned int width)699 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
700 __maybe_unused,
701 char *bf, size_t size,
702 unsigned int width)
703 {
704 char tmp[64];
705
706 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
707 return repsep_snprintf(bf, size, "%-*s", width, tmp);
708 }
709
710 struct sort_entry sort_sym_ipc_null = {
711 .se_header = "IPC [IPC Coverage]",
712 .se_cmp = sort__sym_cmp,
713 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
714 .se_width_idx = HISTC_SYMBOL_IPC,
715 };
716
717 /* --sort callchain_branch_predicted */
718
719 static int64_t
sort__callchain_branch_predicted_cmp(struct hist_entry * left __maybe_unused,struct hist_entry * right __maybe_unused)720 sort__callchain_branch_predicted_cmp(struct hist_entry *left __maybe_unused,
721 struct hist_entry *right __maybe_unused)
722 {
723 return 0;
724 }
725
hist_entry__callchain_branch_predicted_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)726 static int hist_entry__callchain_branch_predicted_snprintf(
727 struct hist_entry *he, char *bf, size_t size, unsigned int width)
728 {
729 u64 branch_count, predicted_count;
730 double percent = 0.0;
731 char str[32];
732
733 callchain_branch_counts(he->callchain, &branch_count,
734 &predicted_count, NULL, NULL);
735
736 if (branch_count)
737 percent = predicted_count * 100.0 / branch_count;
738
739 snprintf(str, sizeof(str), "%.1f%%", percent);
740 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
741 }
742
743 struct sort_entry sort_callchain_branch_predicted = {
744 .se_header = "Predicted",
745 .se_cmp = sort__callchain_branch_predicted_cmp,
746 .se_snprintf = hist_entry__callchain_branch_predicted_snprintf,
747 .se_width_idx = HISTC_CALLCHAIN_BRANCH_PREDICTED,
748 };
749
750 /* --sort callchain_branch_abort */
751
752 static int64_t
sort__callchain_branch_abort_cmp(struct hist_entry * left __maybe_unused,struct hist_entry * right __maybe_unused)753 sort__callchain_branch_abort_cmp(struct hist_entry *left __maybe_unused,
754 struct hist_entry *right __maybe_unused)
755 {
756 return 0;
757 }
758
hist_entry__callchain_branch_abort_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)759 static int hist_entry__callchain_branch_abort_snprintf(struct hist_entry *he,
760 char *bf, size_t size,
761 unsigned int width)
762 {
763 u64 branch_count, abort_count;
764 char str[32];
765
766 callchain_branch_counts(he->callchain, &branch_count,
767 NULL, &abort_count, NULL);
768
769 snprintf(str, sizeof(str), "%" PRId64, abort_count);
770 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
771 }
772
773 struct sort_entry sort_callchain_branch_abort = {
774 .se_header = "Abort",
775 .se_cmp = sort__callchain_branch_abort_cmp,
776 .se_snprintf = hist_entry__callchain_branch_abort_snprintf,
777 .se_width_idx = HISTC_CALLCHAIN_BRANCH_ABORT,
778 };
779
780 /* --sort callchain_branch_cycles */
781
782 static int64_t
sort__callchain_branch_cycles_cmp(struct hist_entry * left __maybe_unused,struct hist_entry * right __maybe_unused)783 sort__callchain_branch_cycles_cmp(struct hist_entry *left __maybe_unused,
784 struct hist_entry *right __maybe_unused)
785 {
786 return 0;
787 }
788
hist_entry__callchain_branch_cycles_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)789 static int hist_entry__callchain_branch_cycles_snprintf(struct hist_entry *he,
790 char *bf, size_t size,
791 unsigned int width)
792 {
793 u64 branch_count, cycles_count, cycles = 0;
794 char str[32];
795
796 callchain_branch_counts(he->callchain, &branch_count,
797 NULL, NULL, &cycles_count);
798
799 if (branch_count)
800 cycles = cycles_count / branch_count;
801
802 snprintf(str, sizeof(str), "%" PRId64 "", cycles);
803 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
804 }
805
806 struct sort_entry sort_callchain_branch_cycles = {
807 .se_header = "Cycles",
808 .se_cmp = sort__callchain_branch_cycles_cmp,
809 .se_snprintf = hist_entry__callchain_branch_cycles_snprintf,
810 .se_width_idx = HISTC_CALLCHAIN_BRANCH_CYCLES,
811 };
812
813 /* --sort srcfile */
814
815 static char no_srcfile[1];
816
hist_entry__get_srcfile(struct hist_entry * e)817 static char *hist_entry__get_srcfile(struct hist_entry *e)
818 {
819 char *sf, *p;
820 struct map *map = e->ms.map;
821
822 if (!map)
823 return no_srcfile;
824
825 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
826 e->ms.sym, false, true, true, e->ip);
827 if (sf == SRCLINE_UNKNOWN)
828 return no_srcfile;
829 p = strchr(sf, ':');
830 if (p && *sf) {
831 *p = 0;
832 return sf;
833 }
834 free(sf);
835 return no_srcfile;
836 }
837
838 static int64_t
sort__srcfile_cmp(struct hist_entry * left,struct hist_entry * right)839 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
840 {
841 return sort__srcline_cmp(left, right);
842 }
843
844 static int64_t
sort__srcfile_collapse(struct hist_entry * left,struct hist_entry * right)845 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
846 {
847 if (!left->srcfile)
848 left->srcfile = hist_entry__get_srcfile(left);
849 if (!right->srcfile)
850 right->srcfile = hist_entry__get_srcfile(right);
851
852 return strcmp(right->srcfile, left->srcfile);
853 }
854
855 static int64_t
sort__srcfile_sort(struct hist_entry * left,struct hist_entry * right)856 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
857 {
858 return sort__srcfile_collapse(left, right);
859 }
860
sort__srcfile_init(struct hist_entry * he)861 static void sort__srcfile_init(struct hist_entry *he)
862 {
863 if (!he->srcfile)
864 he->srcfile = hist_entry__get_srcfile(he);
865 }
866
hist_entry__srcfile_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)867 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
868 size_t size, unsigned int width)
869 {
870 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
871 }
872
873 struct sort_entry sort_srcfile = {
874 .se_header = "Source File",
875 .se_cmp = sort__srcfile_cmp,
876 .se_collapse = sort__srcfile_collapse,
877 .se_sort = sort__srcfile_sort,
878 .se_init = sort__srcfile_init,
879 .se_snprintf = hist_entry__srcfile_snprintf,
880 .se_width_idx = HISTC_SRCFILE,
881 };
882
883 /* --sort parent */
884
885 static int64_t
sort__parent_cmp(struct hist_entry * left,struct hist_entry * right)886 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
887 {
888 struct symbol *sym_l = left->parent;
889 struct symbol *sym_r = right->parent;
890
891 if (!sym_l || !sym_r)
892 return cmp_null(sym_l, sym_r);
893
894 return strcmp(sym_r->name, sym_l->name);
895 }
896
hist_entry__parent_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)897 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
898 size_t size, unsigned int width)
899 {
900 return repsep_snprintf(bf, size, "%-*.*s", width, width,
901 he->parent ? he->parent->name : "[other]");
902 }
903
904 struct sort_entry sort_parent = {
905 .se_header = "Parent symbol",
906 .se_cmp = sort__parent_cmp,
907 .se_snprintf = hist_entry__parent_snprintf,
908 .se_width_idx = HISTC_PARENT,
909 };
910
911 /* --sort cpu */
912
913 static int64_t
sort__cpu_cmp(struct hist_entry * left,struct hist_entry * right)914 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
915 {
916 return right->cpu - left->cpu;
917 }
918
hist_entry__cpu_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)919 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
920 size_t size, unsigned int width)
921 {
922 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
923 }
924
925 struct sort_entry sort_cpu = {
926 .se_header = "CPU",
927 .se_cmp = sort__cpu_cmp,
928 .se_snprintf = hist_entry__cpu_snprintf,
929 .se_width_idx = HISTC_CPU,
930 };
931
932 /* --sort parallelism */
933
934 static int64_t
sort__parallelism_cmp(struct hist_entry * left,struct hist_entry * right)935 sort__parallelism_cmp(struct hist_entry *left, struct hist_entry *right)
936 {
937 return right->parallelism - left->parallelism;
938 }
939
hist_entry__parallelism_filter(struct hist_entry * he,int type,const void * arg)940 static int hist_entry__parallelism_filter(struct hist_entry *he, int type, const void *arg)
941 {
942 const unsigned long *parallelism_filter = arg;
943
944 if (type != HIST_FILTER__PARALLELISM)
945 return -1;
946
947 return test_bit(he->parallelism, parallelism_filter);
948 }
949
hist_entry__parallelism_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)950 static int hist_entry__parallelism_snprintf(struct hist_entry *he, char *bf,
951 size_t size, unsigned int width)
952 {
953 return repsep_snprintf(bf, size, "%*d", width, he->parallelism);
954 }
955
956 struct sort_entry sort_parallelism = {
957 .se_header = "Parallelism",
958 .se_cmp = sort__parallelism_cmp,
959 .se_filter = hist_entry__parallelism_filter,
960 .se_snprintf = hist_entry__parallelism_snprintf,
961 .se_width_idx = HISTC_PARALLELISM,
962 };
963
964 /* --sort cgroup_id */
965
_sort__cgroup_dev_cmp(u64 left_dev,u64 right_dev)966 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
967 {
968 return (int64_t)(right_dev - left_dev);
969 }
970
_sort__cgroup_inode_cmp(u64 left_ino,u64 right_ino)971 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
972 {
973 return (int64_t)(right_ino - left_ino);
974 }
975
976 static int64_t
sort__cgroup_id_cmp(struct hist_entry * left,struct hist_entry * right)977 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
978 {
979 int64_t ret;
980
981 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
982 if (ret != 0)
983 return ret;
984
985 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
986 left->cgroup_id.ino);
987 }
988
hist_entry__cgroup_id_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)989 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
990 char *bf, size_t size,
991 unsigned int width __maybe_unused)
992 {
993 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
994 he->cgroup_id.ino);
995 }
996
997 struct sort_entry sort_cgroup_id = {
998 .se_header = "cgroup id (dev/inode)",
999 .se_cmp = sort__cgroup_id_cmp,
1000 .se_snprintf = hist_entry__cgroup_id_snprintf,
1001 .se_width_idx = HISTC_CGROUP_ID,
1002 };
1003
1004 /* --sort cgroup */
1005
1006 static int64_t
sort__cgroup_cmp(struct hist_entry * left,struct hist_entry * right)1007 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
1008 {
1009 return right->cgroup - left->cgroup;
1010 }
1011
hist_entry__cgroup_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)1012 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
1013 char *bf, size_t size,
1014 unsigned int width __maybe_unused)
1015 {
1016 const char *cgrp_name = "N/A";
1017
1018 if (he->cgroup) {
1019 struct cgroup *cgrp = cgroup__find(maps__machine(thread__maps(he->ms.thread))->env,
1020 he->cgroup);
1021 if (cgrp != NULL)
1022 cgrp_name = cgrp->name;
1023 else
1024 cgrp_name = "unknown";
1025 }
1026
1027 return repsep_snprintf(bf, size, "%s", cgrp_name);
1028 }
1029
1030 struct sort_entry sort_cgroup = {
1031 .se_header = "Cgroup",
1032 .se_cmp = sort__cgroup_cmp,
1033 .se_snprintf = hist_entry__cgroup_snprintf,
1034 .se_width_idx = HISTC_CGROUP,
1035 };
1036
1037 /* --sort socket */
1038
1039 static int64_t
sort__socket_cmp(struct hist_entry * left,struct hist_entry * right)1040 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
1041 {
1042 return right->socket - left->socket;
1043 }
1044
hist_entry__socket_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1045 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
1046 size_t size, unsigned int width)
1047 {
1048 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
1049 }
1050
hist_entry__socket_filter(struct hist_entry * he,int type,const void * arg)1051 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
1052 {
1053 int sk = *(const int *)arg;
1054
1055 if (type != HIST_FILTER__SOCKET)
1056 return -1;
1057
1058 return sk >= 0 && he->socket != sk;
1059 }
1060
1061 struct sort_entry sort_socket = {
1062 .se_header = "Socket",
1063 .se_cmp = sort__socket_cmp,
1064 .se_snprintf = hist_entry__socket_snprintf,
1065 .se_filter = hist_entry__socket_filter,
1066 .se_width_idx = HISTC_SOCKET,
1067 };
1068
1069 /* --sort time */
1070
1071 static int64_t
sort__time_cmp(struct hist_entry * left,struct hist_entry * right)1072 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
1073 {
1074 return right->time - left->time;
1075 }
1076
hist_entry__time_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1077 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
1078 size_t size, unsigned int width)
1079 {
1080 char he_time[32];
1081
1082 if (symbol_conf.nanosecs)
1083 timestamp__scnprintf_nsec(he->time, he_time,
1084 sizeof(he_time));
1085 else
1086 timestamp__scnprintf_usec(he->time, he_time,
1087 sizeof(he_time));
1088
1089 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
1090 }
1091
1092 struct sort_entry sort_time = {
1093 .se_header = "Time",
1094 .se_cmp = sort__time_cmp,
1095 .se_snprintf = hist_entry__time_snprintf,
1096 .se_width_idx = HISTC_TIME,
1097 };
1098
1099 /* --sort trace */
1100
1101 #ifdef HAVE_LIBTRACEEVENT
get_trace_output(struct hist_entry * he)1102 static char *get_trace_output(struct hist_entry *he)
1103 {
1104 struct trace_seq seq;
1105 struct evsel *evsel;
1106 struct tep_record rec = {
1107 .data = he->raw_data,
1108 .size = he->raw_size,
1109 };
1110 struct tep_event *tp_format;
1111
1112 evsel = hists_to_evsel(he->hists);
1113
1114 trace_seq_init(&seq);
1115 tp_format = evsel__tp_format(evsel);
1116 if (tp_format) {
1117 if (symbol_conf.raw_trace)
1118 tep_print_fields(&seq, he->raw_data, he->raw_size, tp_format);
1119 else
1120 tep_print_event(tp_format->tep, &seq, &rec, "%s", TEP_PRINT_INFO);
1121 }
1122
1123 /*
1124 * Trim the buffer, it starts at 4KB and we're not going to
1125 * add anything more to this buffer.
1126 */
1127 return realloc(seq.buffer, seq.len + 1);
1128 }
1129
1130 static int64_t
sort__trace_cmp(struct hist_entry * left,struct hist_entry * right)1131 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
1132 {
1133 struct evsel *evsel;
1134
1135 evsel = hists_to_evsel(left->hists);
1136 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1137 return 0;
1138
1139 if (left->trace_output == NULL)
1140 left->trace_output = get_trace_output(left);
1141 if (right->trace_output == NULL)
1142 right->trace_output = get_trace_output(right);
1143
1144 return strcmp(right->trace_output, left->trace_output);
1145 }
1146
hist_entry__trace_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1147 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
1148 size_t size, unsigned int width)
1149 {
1150 struct evsel *evsel;
1151
1152 evsel = hists_to_evsel(he->hists);
1153 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1154 return scnprintf(bf, size, "%-.*s", width, "N/A");
1155
1156 if (he->trace_output == NULL)
1157 he->trace_output = get_trace_output(he);
1158 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
1159 }
1160
1161 struct sort_entry sort_trace = {
1162 .se_header = "Trace output",
1163 .se_cmp = sort__trace_cmp,
1164 .se_snprintf = hist_entry__trace_snprintf,
1165 .se_width_idx = HISTC_TRACE,
1166 };
1167 #endif /* HAVE_LIBTRACEEVENT */
1168
1169 /* sort keys for branch stacks */
1170
1171 static int64_t
sort__dso_from_cmp(struct hist_entry * left,struct hist_entry * right)1172 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
1173 {
1174 if (!left->branch_info || !right->branch_info)
1175 return cmp_null(left->branch_info, right->branch_info);
1176
1177 return _sort__dso_cmp(left->branch_info->from.ms.map,
1178 right->branch_info->from.ms.map);
1179 }
1180
hist_entry__dso_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1181 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
1182 size_t size, unsigned int width)
1183 {
1184 if (he->branch_info)
1185 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
1186 bf, size, width);
1187 else
1188 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1189 }
1190
hist_entry__dso_from_filter(struct hist_entry * he,int type,const void * arg)1191 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
1192 const void *arg)
1193 {
1194 const struct dso *dso = arg;
1195
1196 if (type != HIST_FILTER__DSO)
1197 return -1;
1198
1199 return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
1200 map__dso(he->branch_info->from.ms.map) != dso);
1201 }
1202
1203 static int64_t
sort__dso_to_cmp(struct hist_entry * left,struct hist_entry * right)1204 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
1205 {
1206 if (!left->branch_info || !right->branch_info)
1207 return cmp_null(left->branch_info, right->branch_info);
1208
1209 return _sort__dso_cmp(left->branch_info->to.ms.map,
1210 right->branch_info->to.ms.map);
1211 }
1212
hist_entry__dso_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1213 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
1214 size_t size, unsigned int width)
1215 {
1216 if (he->branch_info)
1217 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1218 bf, size, width);
1219 else
1220 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1221 }
1222
hist_entry__dso_to_filter(struct hist_entry * he,int type,const void * arg)1223 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1224 const void *arg)
1225 {
1226 const struct dso *dso = arg;
1227
1228 if (type != HIST_FILTER__DSO)
1229 return -1;
1230
1231 return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1232 map__dso(he->branch_info->to.ms.map) != dso);
1233 }
1234
1235 static int64_t
sort__sym_from_cmp(struct hist_entry * left,struct hist_entry * right)1236 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1237 {
1238 struct addr_map_symbol *from_l, *from_r;
1239
1240 if (!left->branch_info || !right->branch_info)
1241 return cmp_null(left->branch_info, right->branch_info);
1242
1243 from_l = &left->branch_info->from;
1244 from_r = &right->branch_info->from;
1245
1246 if (!from_l->ms.sym && !from_r->ms.sym)
1247 return _sort__addr_cmp(from_l->addr, from_r->addr);
1248
1249 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1250 }
1251
1252 static int64_t
sort__sym_to_cmp(struct hist_entry * left,struct hist_entry * right)1253 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1254 {
1255 struct addr_map_symbol *to_l, *to_r;
1256
1257 if (!left->branch_info || !right->branch_info)
1258 return cmp_null(left->branch_info, right->branch_info);
1259
1260 to_l = &left->branch_info->to;
1261 to_r = &right->branch_info->to;
1262
1263 if (!to_l->ms.sym && !to_r->ms.sym)
1264 return _sort__addr_cmp(to_l->addr, to_r->addr);
1265
1266 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1267 }
1268
hist_entry__sym_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1269 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1270 size_t size, unsigned int width)
1271 {
1272 if (he->branch_info) {
1273 struct addr_map_symbol *from = &he->branch_info->from;
1274
1275 return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1276 from->al_level, bf, size, width);
1277 }
1278
1279 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1280 }
1281
hist_entry__sym_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1282 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1283 size_t size, unsigned int width)
1284 {
1285 if (he->branch_info) {
1286 struct addr_map_symbol *to = &he->branch_info->to;
1287
1288 return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1289 to->al_level, bf, size, width);
1290 }
1291
1292 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1293 }
1294
hist_entry__sym_from_filter(struct hist_entry * he,int type,const void * arg)1295 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1296 const void *arg)
1297 {
1298 const char *sym = arg;
1299
1300 if (type != HIST_FILTER__SYMBOL)
1301 return -1;
1302
1303 return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1304 strstr(he->branch_info->from.ms.sym->name, sym));
1305 }
1306
hist_entry__sym_to_filter(struct hist_entry * he,int type,const void * arg)1307 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1308 const void *arg)
1309 {
1310 const char *sym = arg;
1311
1312 if (type != HIST_FILTER__SYMBOL)
1313 return -1;
1314
1315 return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1316 strstr(he->branch_info->to.ms.sym->name, sym));
1317 }
1318
1319 struct sort_entry sort_dso_from = {
1320 .se_header = "Source Shared Object",
1321 .se_cmp = sort__dso_from_cmp,
1322 .se_snprintf = hist_entry__dso_from_snprintf,
1323 .se_filter = hist_entry__dso_from_filter,
1324 .se_width_idx = HISTC_DSO_FROM,
1325 };
1326
1327 struct sort_entry sort_dso_to = {
1328 .se_header = "Target Shared Object",
1329 .se_cmp = sort__dso_to_cmp,
1330 .se_snprintf = hist_entry__dso_to_snprintf,
1331 .se_filter = hist_entry__dso_to_filter,
1332 .se_width_idx = HISTC_DSO_TO,
1333 };
1334
1335 struct sort_entry sort_sym_from = {
1336 .se_header = "Source Symbol",
1337 .se_cmp = sort__sym_from_cmp,
1338 .se_snprintf = hist_entry__sym_from_snprintf,
1339 .se_filter = hist_entry__sym_from_filter,
1340 .se_width_idx = HISTC_SYMBOL_FROM,
1341 };
1342
1343 struct sort_entry sort_sym_to = {
1344 .se_header = "Target Symbol",
1345 .se_cmp = sort__sym_to_cmp,
1346 .se_snprintf = hist_entry__sym_to_snprintf,
1347 .se_filter = hist_entry__sym_to_filter,
1348 .se_width_idx = HISTC_SYMBOL_TO,
1349 };
1350
_hist_entry__addr_snprintf(struct map_symbol * ms,u64 ip,char level,char * bf,size_t size,unsigned int width)1351 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1352 u64 ip, char level, char *bf, size_t size,
1353 unsigned int width)
1354 {
1355 struct symbol *sym = ms->sym;
1356 struct map *map = ms->map;
1357 size_t ret = 0, offs;
1358
1359 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1360 if (sym && map) {
1361 if (sym->type == STT_OBJECT) {
1362 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1363 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1364 ip - map__unmap_ip(map, sym->start));
1365 } else {
1366 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1367 width - ret,
1368 sym->name);
1369 offs = ip - sym->start;
1370 if (offs)
1371 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1372 }
1373 } else {
1374 size_t len = BITS_PER_LONG / 4;
1375 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1376 len, ip);
1377 }
1378
1379 return ret;
1380 }
1381
hist_entry__addr_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1382 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1383 size_t size, unsigned int width)
1384 {
1385 if (he->branch_info) {
1386 struct addr_map_symbol *from = &he->branch_info->from;
1387
1388 return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1389 he->level, bf, size, width);
1390 }
1391
1392 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1393 }
1394
hist_entry__addr_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1395 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1396 size_t size, unsigned int width)
1397 {
1398 if (he->branch_info) {
1399 struct addr_map_symbol *to = &he->branch_info->to;
1400
1401 return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1402 he->level, bf, size, width);
1403 }
1404
1405 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1406 }
1407
1408 static int64_t
sort__addr_from_cmp(struct hist_entry * left,struct hist_entry * right)1409 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1410 {
1411 struct addr_map_symbol *from_l;
1412 struct addr_map_symbol *from_r;
1413 int64_t ret;
1414
1415 if (!left->branch_info || !right->branch_info)
1416 return cmp_null(left->branch_info, right->branch_info);
1417
1418 from_l = &left->branch_info->from;
1419 from_r = &right->branch_info->from;
1420
1421 /*
1422 * comparing symbol address alone is not enough since it's a
1423 * relative address within a dso.
1424 */
1425 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1426 if (ret != 0)
1427 return ret;
1428
1429 return _sort__addr_cmp(from_l->addr, from_r->addr);
1430 }
1431
1432 static int64_t
sort__addr_to_cmp(struct hist_entry * left,struct hist_entry * right)1433 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1434 {
1435 struct addr_map_symbol *to_l;
1436 struct addr_map_symbol *to_r;
1437 int64_t ret;
1438
1439 if (!left->branch_info || !right->branch_info)
1440 return cmp_null(left->branch_info, right->branch_info);
1441
1442 to_l = &left->branch_info->to;
1443 to_r = &right->branch_info->to;
1444
1445 /*
1446 * comparing symbol address alone is not enough since it's a
1447 * relative address within a dso.
1448 */
1449 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1450 if (ret != 0)
1451 return ret;
1452
1453 return _sort__addr_cmp(to_l->addr, to_r->addr);
1454 }
1455
1456 struct sort_entry sort_addr_from = {
1457 .se_header = "Source Address",
1458 .se_cmp = sort__addr_from_cmp,
1459 .se_snprintf = hist_entry__addr_from_snprintf,
1460 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */
1461 .se_width_idx = HISTC_ADDR_FROM,
1462 };
1463
1464 struct sort_entry sort_addr_to = {
1465 .se_header = "Target Address",
1466 .se_cmp = sort__addr_to_cmp,
1467 .se_snprintf = hist_entry__addr_to_snprintf,
1468 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */
1469 .se_width_idx = HISTC_ADDR_TO,
1470 };
1471
1472
1473 static int64_t
sort__mispredict_cmp(struct hist_entry * left,struct hist_entry * right)1474 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1475 {
1476 unsigned char mp, p;
1477
1478 if (!left->branch_info || !right->branch_info)
1479 return cmp_null(left->branch_info, right->branch_info);
1480
1481 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1482 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1483 return mp || p;
1484 }
1485
hist_entry__mispredict_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1486 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1487 size_t size, unsigned int width){
1488 static const char *out = "N/A";
1489
1490 if (he->branch_info) {
1491 if (he->branch_info->flags.predicted)
1492 out = "N";
1493 else if (he->branch_info->flags.mispred)
1494 out = "Y";
1495 }
1496
1497 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1498 }
1499
1500 static int64_t
sort__cycles_cmp(struct hist_entry * left,struct hist_entry * right)1501 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1502 {
1503 if (!left->branch_info || !right->branch_info)
1504 return cmp_null(left->branch_info, right->branch_info);
1505
1506 return left->branch_info->flags.cycles -
1507 right->branch_info->flags.cycles;
1508 }
1509
hist_entry__cycles_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1510 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1511 size_t size, unsigned int width)
1512 {
1513 if (!he->branch_info)
1514 return scnprintf(bf, size, "%-.*s", width, "N/A");
1515 if (he->branch_info->flags.cycles == 0)
1516 return repsep_snprintf(bf, size, "%-*s", width, "-");
1517 return repsep_snprintf(bf, size, "%-*hd", width,
1518 he->branch_info->flags.cycles);
1519 }
1520
1521 struct sort_entry sort_cycles = {
1522 .se_header = "Basic Block Cycles",
1523 .se_cmp = sort__cycles_cmp,
1524 .se_snprintf = hist_entry__cycles_snprintf,
1525 .se_width_idx = HISTC_CYCLES,
1526 };
1527
1528 /* --sort daddr_sym */
1529 int64_t
sort__daddr_cmp(struct hist_entry * left,struct hist_entry * right)1530 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1531 {
1532 uint64_t l = 0, r = 0;
1533
1534 if (left->mem_info)
1535 l = mem_info__daddr(left->mem_info)->addr;
1536 if (right->mem_info)
1537 r = mem_info__daddr(right->mem_info)->addr;
1538
1539 return (int64_t)(r - l);
1540 }
1541
hist_entry__daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1542 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1543 size_t size, unsigned int width)
1544 {
1545 uint64_t addr = 0;
1546 struct map_symbol *ms = NULL;
1547
1548 if (he->mem_info) {
1549 addr = mem_info__daddr(he->mem_info)->addr;
1550 ms = &mem_info__daddr(he->mem_info)->ms;
1551 }
1552 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1553 }
1554
1555 int64_t
sort__iaddr_cmp(struct hist_entry * left,struct hist_entry * right)1556 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1557 {
1558 uint64_t l = 0, r = 0;
1559
1560 if (left->mem_info)
1561 l = mem_info__iaddr(left->mem_info)->addr;
1562 if (right->mem_info)
1563 r = mem_info__iaddr(right->mem_info)->addr;
1564
1565 return (int64_t)(r - l);
1566 }
1567
hist_entry__iaddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1568 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1569 size_t size, unsigned int width)
1570 {
1571 uint64_t addr = 0;
1572 struct map_symbol *ms = NULL;
1573
1574 if (he->mem_info) {
1575 addr = mem_info__iaddr(he->mem_info)->addr;
1576 ms = &mem_info__iaddr(he->mem_info)->ms;
1577 }
1578 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1579 }
1580
1581 static int64_t
sort__dso_daddr_cmp(struct hist_entry * left,struct hist_entry * right)1582 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1583 {
1584 struct map *map_l = NULL;
1585 struct map *map_r = NULL;
1586
1587 if (left->mem_info)
1588 map_l = mem_info__daddr(left->mem_info)->ms.map;
1589 if (right->mem_info)
1590 map_r = mem_info__daddr(right->mem_info)->ms.map;
1591
1592 return _sort__dso_cmp(map_l, map_r);
1593 }
1594
hist_entry__dso_daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1595 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1596 size_t size, unsigned int width)
1597 {
1598 struct map *map = NULL;
1599
1600 if (he->mem_info)
1601 map = mem_info__daddr(he->mem_info)->ms.map;
1602
1603 return _hist_entry__dso_snprintf(map, bf, size, width);
1604 }
1605
1606 static int64_t
sort__locked_cmp(struct hist_entry * left,struct hist_entry * right)1607 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1608 {
1609 union perf_mem_data_src data_src_l;
1610 union perf_mem_data_src data_src_r;
1611
1612 if (left->mem_info)
1613 data_src_l = *mem_info__data_src(left->mem_info);
1614 else
1615 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1616
1617 if (right->mem_info)
1618 data_src_r = *mem_info__data_src(right->mem_info);
1619 else
1620 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1621
1622 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1623 }
1624
hist_entry__locked_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1625 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1626 size_t size, unsigned int width)
1627 {
1628 char out[10];
1629
1630 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1631 return repsep_snprintf(bf, size, "%.*s", width, out);
1632 }
1633
1634 static int64_t
sort__tlb_cmp(struct hist_entry * left,struct hist_entry * right)1635 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1636 {
1637 union perf_mem_data_src data_src_l;
1638 union perf_mem_data_src data_src_r;
1639
1640 if (left->mem_info)
1641 data_src_l = *mem_info__data_src(left->mem_info);
1642 else
1643 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1644
1645 if (right->mem_info)
1646 data_src_r = *mem_info__data_src(right->mem_info);
1647 else
1648 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1649
1650 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1651 }
1652
hist_entry__tlb_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1653 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1654 size_t size, unsigned int width)
1655 {
1656 char out[64];
1657
1658 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1659 return repsep_snprintf(bf, size, "%-*s", width, out);
1660 }
1661
1662 static int64_t
sort__lvl_cmp(struct hist_entry * left,struct hist_entry * right)1663 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1664 {
1665 union perf_mem_data_src data_src_l;
1666 union perf_mem_data_src data_src_r;
1667
1668 if (left->mem_info)
1669 data_src_l = *mem_info__data_src(left->mem_info);
1670 else
1671 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1672
1673 if (right->mem_info)
1674 data_src_r = *mem_info__data_src(right->mem_info);
1675 else
1676 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1677
1678 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1679 }
1680
hist_entry__lvl_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1681 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1682 size_t size, unsigned int width)
1683 {
1684 char out[64];
1685
1686 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1687 return repsep_snprintf(bf, size, "%-*s", width, out);
1688 }
1689
1690 static int64_t
sort__snoop_cmp(struct hist_entry * left,struct hist_entry * right)1691 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1692 {
1693 union perf_mem_data_src data_src_l;
1694 union perf_mem_data_src data_src_r;
1695
1696 if (left->mem_info)
1697 data_src_l = *mem_info__data_src(left->mem_info);
1698 else
1699 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1700
1701 if (right->mem_info)
1702 data_src_r = *mem_info__data_src(right->mem_info);
1703 else
1704 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1705
1706 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1707 }
1708
hist_entry__snoop_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1709 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1710 size_t size, unsigned int width)
1711 {
1712 char out[64];
1713
1714 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1715 return repsep_snprintf(bf, size, "%-*s", width, out);
1716 }
1717
1718 int64_t
sort__dcacheline_cmp(struct hist_entry * left,struct hist_entry * right)1719 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1720 {
1721 u64 l, r;
1722 struct map *l_map, *r_map;
1723 struct dso *l_dso, *r_dso;
1724 int rc;
1725
1726 if (!left->mem_info) return -1;
1727 if (!right->mem_info) return 1;
1728
1729 /* group event types together */
1730 if (left->cpumode > right->cpumode) return -1;
1731 if (left->cpumode < right->cpumode) return 1;
1732
1733 l_map = mem_info__daddr(left->mem_info)->ms.map;
1734 r_map = mem_info__daddr(right->mem_info)->ms.map;
1735
1736 /* if both are NULL, jump to sort on al_addr instead */
1737 if (!l_map && !r_map)
1738 goto addr;
1739
1740 if (!l_map) return -1;
1741 if (!r_map) return 1;
1742
1743 l_dso = map__dso(l_map);
1744 r_dso = map__dso(r_map);
1745 rc = dso__cmp_id(l_dso, r_dso);
1746 if (rc)
1747 return rc;
1748 /*
1749 * Addresses with no major/minor numbers or build ID are assumed to be
1750 * anonymous in userspace. Sort those on pid then address.
1751 *
1752 * The kernel and non-zero major/minor mapped areas are
1753 * assumed to be unity mapped. Sort those on address.
1754 */
1755 if (left->cpumode != PERF_RECORD_MISC_KERNEL && (map__flags(l_map) & MAP_SHARED) == 0) {
1756 const struct dso_id *dso_id = dso__id_const(l_dso);
1757
1758 if (!dso_id->mmap2_valid)
1759 dso_id = dso__id_const(r_dso);
1760
1761 if (!build_id__is_defined(&dso_id->build_id) &&
1762 (!dso_id->mmap2_valid || (dso_id->maj == 0 && dso_id->min == 0))) {
1763 /* userspace anonymous */
1764
1765 if (thread__pid(left->thread) > thread__pid(right->thread))
1766 return -1;
1767 if (thread__pid(left->thread) < thread__pid(right->thread))
1768 return 1;
1769 }
1770 }
1771
1772 addr:
1773 /* al_addr does all the right addr - start + offset calculations */
1774 l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl);
1775 r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl);
1776
1777 if (l > r) return -1;
1778 if (l < r) return 1;
1779
1780 return 0;
1781 }
1782
hist_entry__dcacheline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1783 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1784 size_t size, unsigned int width)
1785 {
1786
1787 uint64_t addr = 0;
1788 struct map_symbol *ms = NULL;
1789 char level = he->level;
1790
1791 if (he->mem_info) {
1792 struct map *map = mem_info__daddr(he->mem_info)->ms.map;
1793 struct dso *dso = map ? map__dso(map) : NULL;
1794 const struct dso_id *dso_id = dso ? dso__id_const(dso) : &dso_id_empty;
1795
1796 addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl);
1797 ms = &mem_info__daddr(he->mem_info)->ms;
1798
1799 /* print [s] for shared data mmaps */
1800 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1801 map && !(map__prot(map) & PROT_EXEC) &&
1802 (map__flags(map) & MAP_SHARED) &&
1803 (!dso_id->mmap2_valid || (dso_id->maj == 0 && dso_id->min == 0)))
1804 level = 's';
1805 else if (!map)
1806 level = 'X';
1807 }
1808 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1809 }
1810
1811 struct sort_entry sort_mispredict = {
1812 .se_header = "Branch Mispredicted",
1813 .se_cmp = sort__mispredict_cmp,
1814 .se_snprintf = hist_entry__mispredict_snprintf,
1815 .se_width_idx = HISTC_MISPREDICT,
1816 };
1817
1818 static int64_t
sort__weight_cmp(struct hist_entry * left,struct hist_entry * right)1819 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1820 {
1821 return left->weight - right->weight;
1822 }
1823
hist_entry__local_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1824 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1825 size_t size, unsigned int width)
1826 {
1827 return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1828 }
1829
1830 struct sort_entry sort_local_weight = {
1831 .se_header = "Local Weight",
1832 .se_cmp = sort__weight_cmp,
1833 .se_snprintf = hist_entry__local_weight_snprintf,
1834 .se_width_idx = HISTC_LOCAL_WEIGHT,
1835 };
1836
hist_entry__global_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1837 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1838 size_t size, unsigned int width)
1839 {
1840 return repsep_snprintf(bf, size, "%-*llu", width,
1841 he->weight * he->stat.nr_events);
1842 }
1843
1844 struct sort_entry sort_global_weight = {
1845 .se_header = "Weight",
1846 .se_cmp = sort__weight_cmp,
1847 .se_snprintf = hist_entry__global_weight_snprintf,
1848 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1849 };
1850
1851 static int64_t
sort__ins_lat_cmp(struct hist_entry * left,struct hist_entry * right)1852 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1853 {
1854 return left->ins_lat - right->ins_lat;
1855 }
1856
hist_entry__local_ins_lat_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1857 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1858 size_t size, unsigned int width)
1859 {
1860 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1861 }
1862
1863 struct sort_entry sort_local_ins_lat = {
1864 .se_header = "Local INSTR Latency",
1865 .se_cmp = sort__ins_lat_cmp,
1866 .se_snprintf = hist_entry__local_ins_lat_snprintf,
1867 .se_width_idx = HISTC_LOCAL_INS_LAT,
1868 };
1869
hist_entry__global_ins_lat_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1870 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1871 size_t size, unsigned int width)
1872 {
1873 return repsep_snprintf(bf, size, "%-*u", width,
1874 he->ins_lat * he->stat.nr_events);
1875 }
1876
1877 struct sort_entry sort_global_ins_lat = {
1878 .se_header = "INSTR Latency",
1879 .se_cmp = sort__ins_lat_cmp,
1880 .se_snprintf = hist_entry__global_ins_lat_snprintf,
1881 .se_width_idx = HISTC_GLOBAL_INS_LAT,
1882 };
1883
1884 static int64_t
sort__p_stage_cyc_cmp(struct hist_entry * left,struct hist_entry * right)1885 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1886 {
1887 return left->weight3 - right->weight3;
1888 }
1889
hist_entry__global_p_stage_cyc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1890 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1891 size_t size, unsigned int width)
1892 {
1893 return repsep_snprintf(bf, size, "%-*u", width, he->weight3 * he->stat.nr_events);
1894 }
1895
1896
hist_entry__p_stage_cyc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1897 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1898 size_t size, unsigned int width)
1899 {
1900 return repsep_snprintf(bf, size, "%-*u", width, he->weight3);
1901 }
1902
1903 struct sort_entry sort_local_p_stage_cyc = {
1904 .se_header = "Local Pipeline Stage Cycle",
1905 .se_cmp = sort__p_stage_cyc_cmp,
1906 .se_snprintf = hist_entry__p_stage_cyc_snprintf,
1907 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC,
1908 };
1909
1910 struct sort_entry sort_global_p_stage_cyc = {
1911 .se_header = "Pipeline Stage Cycle",
1912 .se_cmp = sort__p_stage_cyc_cmp,
1913 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf,
1914 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC,
1915 };
1916
1917 struct sort_entry sort_mem_daddr_sym = {
1918 .se_header = "Data Symbol",
1919 .se_cmp = sort__daddr_cmp,
1920 .se_snprintf = hist_entry__daddr_snprintf,
1921 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1922 };
1923
1924 struct sort_entry sort_mem_iaddr_sym = {
1925 .se_header = "Code Symbol",
1926 .se_cmp = sort__iaddr_cmp,
1927 .se_snprintf = hist_entry__iaddr_snprintf,
1928 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1929 };
1930
1931 struct sort_entry sort_mem_daddr_dso = {
1932 .se_header = "Data Object",
1933 .se_cmp = sort__dso_daddr_cmp,
1934 .se_snprintf = hist_entry__dso_daddr_snprintf,
1935 .se_width_idx = HISTC_MEM_DADDR_DSO,
1936 };
1937
1938 struct sort_entry sort_mem_locked = {
1939 .se_header = "Locked",
1940 .se_cmp = sort__locked_cmp,
1941 .se_snprintf = hist_entry__locked_snprintf,
1942 .se_width_idx = HISTC_MEM_LOCKED,
1943 };
1944
1945 struct sort_entry sort_mem_tlb = {
1946 .se_header = "TLB access",
1947 .se_cmp = sort__tlb_cmp,
1948 .se_snprintf = hist_entry__tlb_snprintf,
1949 .se_width_idx = HISTC_MEM_TLB,
1950 };
1951
1952 struct sort_entry sort_mem_lvl = {
1953 .se_header = "Memory access",
1954 .se_cmp = sort__lvl_cmp,
1955 .se_snprintf = hist_entry__lvl_snprintf,
1956 .se_width_idx = HISTC_MEM_LVL,
1957 };
1958
1959 struct sort_entry sort_mem_snoop = {
1960 .se_header = "Snoop",
1961 .se_cmp = sort__snoop_cmp,
1962 .se_snprintf = hist_entry__snoop_snprintf,
1963 .se_width_idx = HISTC_MEM_SNOOP,
1964 };
1965
1966 struct sort_entry sort_mem_dcacheline = {
1967 .se_header = "Data Cacheline",
1968 .se_cmp = sort__dcacheline_cmp,
1969 .se_snprintf = hist_entry__dcacheline_snprintf,
1970 .se_width_idx = HISTC_MEM_DCACHELINE,
1971 };
1972
1973 static int64_t
sort__blocked_cmp(struct hist_entry * left,struct hist_entry * right)1974 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1975 {
1976 union perf_mem_data_src data_src_l;
1977 union perf_mem_data_src data_src_r;
1978
1979 if (left->mem_info)
1980 data_src_l = *mem_info__data_src(left->mem_info);
1981 else
1982 data_src_l.mem_blk = PERF_MEM_BLK_NA;
1983
1984 if (right->mem_info)
1985 data_src_r = *mem_info__data_src(right->mem_info);
1986 else
1987 data_src_r.mem_blk = PERF_MEM_BLK_NA;
1988
1989 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1990 }
1991
hist_entry__blocked_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1992 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1993 size_t size, unsigned int width)
1994 {
1995 char out[16];
1996
1997 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1998 return repsep_snprintf(bf, size, "%.*s", width, out);
1999 }
2000
2001 struct sort_entry sort_mem_blocked = {
2002 .se_header = "Blocked",
2003 .se_cmp = sort__blocked_cmp,
2004 .se_snprintf = hist_entry__blocked_snprintf,
2005 .se_width_idx = HISTC_MEM_BLOCKED,
2006 };
2007
2008 static int64_t
sort__phys_daddr_cmp(struct hist_entry * left,struct hist_entry * right)2009 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
2010 {
2011 uint64_t l = 0, r = 0;
2012
2013 if (left->mem_info)
2014 l = mem_info__daddr(left->mem_info)->phys_addr;
2015 if (right->mem_info)
2016 r = mem_info__daddr(right->mem_info)->phys_addr;
2017
2018 return (int64_t)(r - l);
2019 }
2020
hist_entry__phys_daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2021 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
2022 size_t size, unsigned int width)
2023 {
2024 uint64_t addr = 0;
2025 size_t ret = 0;
2026 size_t len = BITS_PER_LONG / 4;
2027
2028 addr = mem_info__daddr(he->mem_info)->phys_addr;
2029
2030 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
2031
2032 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
2033
2034 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
2035
2036 if (ret > width)
2037 bf[width] = '\0';
2038
2039 return width;
2040 }
2041
2042 struct sort_entry sort_mem_phys_daddr = {
2043 .se_header = "Data Physical Address",
2044 .se_cmp = sort__phys_daddr_cmp,
2045 .se_snprintf = hist_entry__phys_daddr_snprintf,
2046 .se_width_idx = HISTC_MEM_PHYS_DADDR,
2047 };
2048
2049 static int64_t
sort__data_page_size_cmp(struct hist_entry * left,struct hist_entry * right)2050 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
2051 {
2052 uint64_t l = 0, r = 0;
2053
2054 if (left->mem_info)
2055 l = mem_info__daddr(left->mem_info)->data_page_size;
2056 if (right->mem_info)
2057 r = mem_info__daddr(right->mem_info)->data_page_size;
2058
2059 return (int64_t)(r - l);
2060 }
2061
hist_entry__data_page_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2062 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
2063 size_t size, unsigned int width)
2064 {
2065 char str[PAGE_SIZE_NAME_LEN];
2066
2067 return repsep_snprintf(bf, size, "%-*s", width,
2068 get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str));
2069 }
2070
2071 struct sort_entry sort_mem_data_page_size = {
2072 .se_header = "Data Page Size",
2073 .se_cmp = sort__data_page_size_cmp,
2074 .se_snprintf = hist_entry__data_page_size_snprintf,
2075 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE,
2076 };
2077
2078 static int64_t
sort__code_page_size_cmp(struct hist_entry * left,struct hist_entry * right)2079 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
2080 {
2081 uint64_t l = left->code_page_size;
2082 uint64_t r = right->code_page_size;
2083
2084 return (int64_t)(r - l);
2085 }
2086
hist_entry__code_page_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2087 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
2088 size_t size, unsigned int width)
2089 {
2090 char str[PAGE_SIZE_NAME_LEN];
2091
2092 return repsep_snprintf(bf, size, "%-*s", width,
2093 get_page_size_name(he->code_page_size, str));
2094 }
2095
2096 struct sort_entry sort_code_page_size = {
2097 .se_header = "Code Page Size",
2098 .se_cmp = sort__code_page_size_cmp,
2099 .se_snprintf = hist_entry__code_page_size_snprintf,
2100 .se_width_idx = HISTC_CODE_PAGE_SIZE,
2101 };
2102
2103 static int64_t
sort__abort_cmp(struct hist_entry * left,struct hist_entry * right)2104 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
2105 {
2106 if (!left->branch_info || !right->branch_info)
2107 return cmp_null(left->branch_info, right->branch_info);
2108
2109 return left->branch_info->flags.abort !=
2110 right->branch_info->flags.abort;
2111 }
2112
hist_entry__abort_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2113 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
2114 size_t size, unsigned int width)
2115 {
2116 static const char *out = "N/A";
2117
2118 if (he->branch_info) {
2119 if (he->branch_info->flags.abort)
2120 out = "A";
2121 else
2122 out = ".";
2123 }
2124
2125 return repsep_snprintf(bf, size, "%-*s", width, out);
2126 }
2127
2128 struct sort_entry sort_abort = {
2129 .se_header = "Transaction abort",
2130 .se_cmp = sort__abort_cmp,
2131 .se_snprintf = hist_entry__abort_snprintf,
2132 .se_width_idx = HISTC_ABORT,
2133 };
2134
2135 static int64_t
sort__in_tx_cmp(struct hist_entry * left,struct hist_entry * right)2136 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
2137 {
2138 if (!left->branch_info || !right->branch_info)
2139 return cmp_null(left->branch_info, right->branch_info);
2140
2141 return left->branch_info->flags.in_tx !=
2142 right->branch_info->flags.in_tx;
2143 }
2144
hist_entry__in_tx_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2145 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
2146 size_t size, unsigned int width)
2147 {
2148 static const char *out = "N/A";
2149
2150 if (he->branch_info) {
2151 if (he->branch_info->flags.in_tx)
2152 out = "T";
2153 else
2154 out = ".";
2155 }
2156
2157 return repsep_snprintf(bf, size, "%-*s", width, out);
2158 }
2159
2160 struct sort_entry sort_in_tx = {
2161 .se_header = "Branch in transaction",
2162 .se_cmp = sort__in_tx_cmp,
2163 .se_snprintf = hist_entry__in_tx_snprintf,
2164 .se_width_idx = HISTC_IN_TX,
2165 };
2166
2167 static int64_t
sort__transaction_cmp(struct hist_entry * left,struct hist_entry * right)2168 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
2169 {
2170 return left->transaction - right->transaction;
2171 }
2172
add_str(char * p,const char * str)2173 static inline char *add_str(char *p, const char *str)
2174 {
2175 strcpy(p, str);
2176 return p + strlen(str);
2177 }
2178
2179 static struct txbit {
2180 unsigned flag;
2181 const char *name;
2182 int skip_for_len;
2183 } txbits[] = {
2184 { PERF_TXN_ELISION, "EL ", 0 },
2185 { PERF_TXN_TRANSACTION, "TX ", 1 },
2186 { PERF_TXN_SYNC, "SYNC ", 1 },
2187 { PERF_TXN_ASYNC, "ASYNC ", 0 },
2188 { PERF_TXN_RETRY, "RETRY ", 0 },
2189 { PERF_TXN_CONFLICT, "CON ", 0 },
2190 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
2191 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
2192 { 0, NULL, 0 }
2193 };
2194
hist_entry__transaction_len(void)2195 int hist_entry__transaction_len(void)
2196 {
2197 int i;
2198 int len = 0;
2199
2200 for (i = 0; txbits[i].name; i++) {
2201 if (!txbits[i].skip_for_len)
2202 len += strlen(txbits[i].name);
2203 }
2204 len += 4; /* :XX<space> */
2205 return len;
2206 }
2207
hist_entry__transaction_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2208 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
2209 size_t size, unsigned int width)
2210 {
2211 u64 t = he->transaction;
2212 char buf[128];
2213 char *p = buf;
2214 int i;
2215
2216 buf[0] = 0;
2217 for (i = 0; txbits[i].name; i++)
2218 if (txbits[i].flag & t)
2219 p = add_str(p, txbits[i].name);
2220 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
2221 p = add_str(p, "NEITHER ");
2222 if (t & PERF_TXN_ABORT_MASK) {
2223 sprintf(p, ":%" PRIx64,
2224 (t & PERF_TXN_ABORT_MASK) >>
2225 PERF_TXN_ABORT_SHIFT);
2226 p += strlen(p);
2227 }
2228
2229 return repsep_snprintf(bf, size, "%-*s", width, buf);
2230 }
2231
2232 struct sort_entry sort_transaction = {
2233 .se_header = "Transaction ",
2234 .se_cmp = sort__transaction_cmp,
2235 .se_snprintf = hist_entry__transaction_snprintf,
2236 .se_width_idx = HISTC_TRANSACTION,
2237 };
2238
2239 /* --sort symbol_size */
2240
_sort__sym_size_cmp(struct symbol * sym_l,struct symbol * sym_r)2241 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2242 {
2243 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2244 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2245
2246 return size_l < size_r ? -1 :
2247 size_l == size_r ? 0 : 1;
2248 }
2249
2250 static int64_t
sort__sym_size_cmp(struct hist_entry * left,struct hist_entry * right)2251 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2252 {
2253 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2254 }
2255
_hist_entry__sym_size_snprintf(struct symbol * sym,char * bf,size_t bf_size,unsigned int width)2256 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2257 size_t bf_size, unsigned int width)
2258 {
2259 if (sym)
2260 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2261
2262 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2263 }
2264
hist_entry__sym_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2265 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2266 size_t size, unsigned int width)
2267 {
2268 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2269 }
2270
2271 struct sort_entry sort_sym_size = {
2272 .se_header = "Symbol size",
2273 .se_cmp = sort__sym_size_cmp,
2274 .se_snprintf = hist_entry__sym_size_snprintf,
2275 .se_width_idx = HISTC_SYM_SIZE,
2276 };
2277
2278 /* --sort dso_size */
2279
_sort__dso_size_cmp(struct map * map_l,struct map * map_r)2280 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2281 {
2282 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2283 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2284
2285 return size_l < size_r ? -1 :
2286 size_l == size_r ? 0 : 1;
2287 }
2288
2289 static int64_t
sort__dso_size_cmp(struct hist_entry * left,struct hist_entry * right)2290 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2291 {
2292 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2293 }
2294
_hist_entry__dso_size_snprintf(struct map * map,char * bf,size_t bf_size,unsigned int width)2295 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2296 size_t bf_size, unsigned int width)
2297 {
2298 if (map && map__dso(map))
2299 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2300
2301 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2302 }
2303
hist_entry__dso_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2304 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2305 size_t size, unsigned int width)
2306 {
2307 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2308 }
2309
2310 struct sort_entry sort_dso_size = {
2311 .se_header = "DSO size",
2312 .se_cmp = sort__dso_size_cmp,
2313 .se_snprintf = hist_entry__dso_size_snprintf,
2314 .se_width_idx = HISTC_DSO_SIZE,
2315 };
2316
2317 /* --sort addr */
2318
2319 static int64_t
sort__addr_cmp(struct hist_entry * left,struct hist_entry * right)2320 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2321 {
2322 u64 left_ip = left->ip;
2323 u64 right_ip = right->ip;
2324 struct map *left_map = left->ms.map;
2325 struct map *right_map = right->ms.map;
2326
2327 if (left_map)
2328 left_ip = map__unmap_ip(left_map, left_ip);
2329 if (right_map)
2330 right_ip = map__unmap_ip(right_map, right_ip);
2331
2332 return _sort__addr_cmp(left_ip, right_ip);
2333 }
2334
hist_entry__addr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2335 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2336 size_t size, unsigned int width)
2337 {
2338 u64 ip = he->ip;
2339 struct map *map = he->ms.map;
2340
2341 if (map)
2342 ip = map__unmap_ip(map, ip);
2343
2344 return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2345 }
2346
2347 struct sort_entry sort_addr = {
2348 .se_header = "Address",
2349 .se_cmp = sort__addr_cmp,
2350 .se_snprintf = hist_entry__addr_snprintf,
2351 .se_width_idx = HISTC_ADDR,
2352 };
2353
2354 /* --sort type */
2355
2356 struct annotated_data_type unknown_type = {
2357 .self = {
2358 .type_name = (char *)"(unknown)",
2359 .children = LIST_HEAD_INIT(unknown_type.self.children),
2360 },
2361 };
2362
2363 static int64_t
sort__type_cmp(struct hist_entry * left,struct hist_entry * right)2364 sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
2365 {
2366 return sort__addr_cmp(left, right);
2367 }
2368
sort__type_init(struct hist_entry * he)2369 static void sort__type_init(struct hist_entry *he)
2370 {
2371 if (he->mem_type)
2372 return;
2373
2374 he->mem_type = hist_entry__get_data_type(he);
2375 if (he->mem_type == NULL) {
2376 he->mem_type = &unknown_type;
2377 he->mem_type_off = 0;
2378 }
2379 }
2380
2381 static int64_t
sort__type_collapse(struct hist_entry * left,struct hist_entry * right)2382 sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
2383 {
2384 struct annotated_data_type *left_type = left->mem_type;
2385 struct annotated_data_type *right_type = right->mem_type;
2386
2387 if (!left_type) {
2388 sort__type_init(left);
2389 left_type = left->mem_type;
2390 }
2391
2392 if (!right_type) {
2393 sort__type_init(right);
2394 right_type = right->mem_type;
2395 }
2396
2397 return strcmp(left_type->self.type_name, right_type->self.type_name);
2398 }
2399
2400 static int64_t
sort__type_sort(struct hist_entry * left,struct hist_entry * right)2401 sort__type_sort(struct hist_entry *left, struct hist_entry *right)
2402 {
2403 return sort__type_collapse(left, right);
2404 }
2405
hist_entry__type_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2406 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
2407 size_t size, unsigned int width)
2408 {
2409 return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
2410 }
2411
2412 struct sort_entry sort_type = {
2413 .se_header = "Data Type",
2414 .se_cmp = sort__type_cmp,
2415 .se_collapse = sort__type_collapse,
2416 .se_sort = sort__type_sort,
2417 .se_init = sort__type_init,
2418 .se_snprintf = hist_entry__type_snprintf,
2419 .se_width_idx = HISTC_TYPE,
2420 };
2421
2422 /* --sort typeoff */
2423
2424 static int64_t
sort__typeoff_sort(struct hist_entry * left,struct hist_entry * right)2425 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
2426 {
2427 struct annotated_data_type *left_type = left->mem_type;
2428 struct annotated_data_type *right_type = right->mem_type;
2429 int64_t ret;
2430
2431 if (!left_type) {
2432 sort__type_init(left);
2433 left_type = left->mem_type;
2434 }
2435
2436 if (!right_type) {
2437 sort__type_init(right);
2438 right_type = right->mem_type;
2439 }
2440
2441 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2442 if (ret)
2443 return ret;
2444 return left->mem_type_off - right->mem_type_off;
2445 }
2446
hist_entry__typeoff_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)2447 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
2448 size_t size, unsigned int width __maybe_unused)
2449 {
2450 struct annotated_data_type *he_type = he->mem_type;
2451 char buf[4096];
2452
2453 if (he_type == &unknown_type || he_type == &stackop_type ||
2454 he_type == &canary_type)
2455 return repsep_snprintf(bf, size, "%s", he_type->self.type_name);
2456
2457 if (!annotated_data_type__get_member_name(he_type, buf, sizeof(buf),
2458 he->mem_type_off))
2459 scnprintf(buf, sizeof(buf), "no field");
2460
2461 return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name,
2462 he->mem_type_off, buf);
2463 }
2464
2465 struct sort_entry sort_type_offset = {
2466 .se_header = "Data Type Offset",
2467 .se_cmp = sort__type_cmp,
2468 .se_collapse = sort__typeoff_sort,
2469 .se_sort = sort__typeoff_sort,
2470 .se_init = sort__type_init,
2471 .se_snprintf = hist_entry__typeoff_snprintf,
2472 .se_width_idx = HISTC_TYPE_OFFSET,
2473 };
2474
2475 /* --sort typecln */
2476
2477 #define DEFAULT_CACHELINE_SIZE 64
2478
2479 static int64_t
sort__typecln_sort(struct hist_entry * left,struct hist_entry * right)2480 sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
2481 {
2482 struct annotated_data_type *left_type = left->mem_type;
2483 struct annotated_data_type *right_type = right->mem_type;
2484 int64_t left_cln, right_cln;
2485 int64_t ret;
2486 int cln_size = cacheline_size();
2487
2488 if (cln_size == 0)
2489 cln_size = DEFAULT_CACHELINE_SIZE;
2490
2491 if (!left_type) {
2492 sort__type_init(left);
2493 left_type = left->mem_type;
2494 }
2495
2496 if (!right_type) {
2497 sort__type_init(right);
2498 right_type = right->mem_type;
2499 }
2500
2501 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2502 if (ret)
2503 return ret;
2504
2505 left_cln = left->mem_type_off / cln_size;
2506 right_cln = right->mem_type_off / cln_size;
2507 return left_cln - right_cln;
2508 }
2509
hist_entry__typecln_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)2510 static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf,
2511 size_t size, unsigned int width __maybe_unused)
2512 {
2513 struct annotated_data_type *he_type = he->mem_type;
2514 int cln_size = cacheline_size();
2515
2516 if (cln_size == 0)
2517 cln_size = DEFAULT_CACHELINE_SIZE;
2518
2519 return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
2520 he->mem_type_off / cln_size);
2521 }
2522
2523 struct sort_entry sort_type_cacheline = {
2524 .se_header = "Data Type Cacheline",
2525 .se_cmp = sort__type_cmp,
2526 .se_collapse = sort__typecln_sort,
2527 .se_sort = sort__typecln_sort,
2528 .se_init = sort__type_init,
2529 .se_snprintf = hist_entry__typecln_snprintf,
2530 .se_width_idx = HISTC_TYPE_CACHELINE,
2531 };
2532
2533
2534 struct sort_dimension {
2535 const char *name;
2536 struct sort_entry *entry;
2537 int taken;
2538 };
2539
arch_support_sort_key(const char * sort_key,struct perf_env * env)2540 static int arch_support_sort_key(const char *sort_key, struct perf_env *env)
2541 {
2542 const char *arch = perf_env__arch(env);
2543
2544 if (!strcmp("x86", arch) || !strcmp("powerpc", arch)) {
2545 if (!strcmp(sort_key, "p_stage_cyc"))
2546 return 1;
2547 if (!strcmp(sort_key, "local_p_stage_cyc"))
2548 return 1;
2549 }
2550 return 0;
2551 }
2552
arch_perf_header_entry(const char * se_header,struct perf_env * env)2553 static const char *arch_perf_header_entry(const char *se_header, struct perf_env *env)
2554 {
2555 const char *arch = perf_env__arch(env);
2556
2557 if (!strcmp("x86", arch)) {
2558 if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
2559 return "Local Retire Latency";
2560 else if (!strcmp(se_header, "Pipeline Stage Cycle"))
2561 return "Retire Latency";
2562 } else if (!strcmp("powerpc", arch)) {
2563 if (!strcmp(se_header, "Local INSTR Latency"))
2564 return "Finish Cyc";
2565 else if (!strcmp(se_header, "INSTR Latency"))
2566 return "Global Finish_cyc";
2567 else if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
2568 return "Dispatch Cyc";
2569 else if (!strcmp(se_header, "Pipeline Stage Cycle"))
2570 return "Global Dispatch_cyc";
2571 }
2572 return se_header;
2573 }
2574
sort_dimension_add_dynamic_header(struct sort_dimension * sd,struct perf_env * env)2575 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd, struct perf_env *env)
2576 {
2577 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header, env);
2578 }
2579
2580 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2581
2582 static struct sort_dimension common_sort_dimensions[] = {
2583 DIM(SORT_PID, "pid", sort_thread),
2584 DIM(SORT_TGID, "tgid", sort_tgid),
2585 DIM(SORT_COMM, "comm", sort_comm),
2586 DIM(SORT_DSO, "dso", sort_dso),
2587 DIM(SORT_SYM, "symbol", sort_sym),
2588 DIM(SORT_PARENT, "parent", sort_parent),
2589 DIM(SORT_CPU, "cpu", sort_cpu),
2590 DIM(SORT_SOCKET, "socket", sort_socket),
2591 DIM(SORT_SRCLINE, "srcline", sort_srcline),
2592 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2593 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2594 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2595 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2596 #ifdef HAVE_LIBTRACEEVENT
2597 DIM(SORT_TRACE, "trace", sort_trace),
2598 #endif
2599 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2600 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2601 DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2602 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2603 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2604 DIM(SORT_TIME, "time", sort_time),
2605 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2606 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2607 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2608 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2609 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2610 DIM(SORT_ADDR, "addr", sort_addr),
2611 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2612 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2613 DIM(SORT_SIMD, "simd", sort_simd),
2614 DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
2615 DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
2616 DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
2617 DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline),
2618 DIM(SORT_PARALLELISM, "parallelism", sort_parallelism),
2619 };
2620
2621 #undef DIM
2622
2623 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2624
2625 static struct sort_dimension bstack_sort_dimensions[] = {
2626 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2627 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2628 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2629 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2630 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2631 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2632 DIM(SORT_ABORT, "abort", sort_abort),
2633 DIM(SORT_CYCLES, "cycles", sort_cycles),
2634 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2635 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2636 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2637 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2638 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2639 DIM(SORT_CALLCHAIN_BRANCH_PREDICTED,
2640 "callchain_branch_predicted",
2641 sort_callchain_branch_predicted),
2642 DIM(SORT_CALLCHAIN_BRANCH_ABORT,
2643 "callchain_branch_abort",
2644 sort_callchain_branch_abort),
2645 DIM(SORT_CALLCHAIN_BRANCH_CYCLES,
2646 "callchain_branch_cycles",
2647 sort_callchain_branch_cycles)
2648 };
2649
2650 #undef DIM
2651
2652 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2653
2654 static struct sort_dimension memory_sort_dimensions[] = {
2655 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2656 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2657 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2658 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2659 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2660 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2661 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2662 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2663 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2664 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2665 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2666 };
2667
2668 #undef DIM
2669
2670 struct hpp_dimension {
2671 const char *name;
2672 struct perf_hpp_fmt *fmt;
2673 int taken;
2674 int was_taken;
2675 int mem_mode;
2676 };
2677
2678 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2679 #define DIM_MEM(d, n) { .name = n, .fmt = &perf_hpp__format[d], .mem_mode = 1, }
2680
2681 static struct hpp_dimension hpp_sort_dimensions[] = {
2682 DIM(PERF_HPP__OVERHEAD, "overhead"),
2683 DIM(PERF_HPP__LATENCY, "latency"),
2684 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2685 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2686 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2687 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2688 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2689 DIM(PERF_HPP__LATENCY_ACC, "latency_children"),
2690 DIM(PERF_HPP__SAMPLES, "sample"),
2691 DIM(PERF_HPP__PERIOD, "period"),
2692 DIM(PERF_HPP__WEIGHT1, "weight1"),
2693 DIM(PERF_HPP__WEIGHT2, "weight2"),
2694 DIM(PERF_HPP__WEIGHT3, "weight3"),
2695 /* aliases for weight_struct */
2696 DIM(PERF_HPP__WEIGHT2, "ins_lat"),
2697 DIM(PERF_HPP__WEIGHT3, "retire_lat"),
2698 DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
2699 /* used for output only when SORT_MODE__MEM */
2700 DIM_MEM(PERF_HPP__MEM_STAT_OP, "op"),
2701 DIM_MEM(PERF_HPP__MEM_STAT_CACHE, "cache"),
2702 DIM_MEM(PERF_HPP__MEM_STAT_MEMORY, "memory"),
2703 DIM_MEM(PERF_HPP__MEM_STAT_SNOOP, "snoop"),
2704 DIM_MEM(PERF_HPP__MEM_STAT_DTLB, "dtlb"),
2705 };
2706
2707 #undef DIM_MEM
2708 #undef DIM
2709
2710 struct hpp_sort_entry {
2711 struct perf_hpp_fmt hpp;
2712 struct sort_entry *se;
2713 };
2714
perf_hpp__reset_sort_width(struct perf_hpp_fmt * fmt,struct hists * hists)2715 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2716 {
2717 struct hpp_sort_entry *hse;
2718
2719 if (!perf_hpp__is_sort_entry(fmt))
2720 return;
2721
2722 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2723 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2724 }
2725
__sort__hpp_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line,int * span __maybe_unused)2726 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2727 struct hists *hists, int line,
2728 int *span __maybe_unused)
2729 {
2730 struct hpp_sort_entry *hse;
2731 size_t len = fmt->user_len;
2732 const char *hdr = "";
2733
2734 if (line == hists->hpp_list->nr_header_lines - 1)
2735 hdr = fmt->name;
2736
2737 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2738
2739 if (!len)
2740 len = hists__col_len(hists, hse->se->se_width_idx);
2741
2742 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, hdr);
2743 }
2744
__sort__hpp_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists)2745 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2746 struct perf_hpp *hpp __maybe_unused,
2747 struct hists *hists)
2748 {
2749 struct hpp_sort_entry *hse;
2750 size_t len = fmt->user_len;
2751
2752 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2753
2754 if (!len)
2755 len = hists__col_len(hists, hse->se->se_width_idx);
2756
2757 return len;
2758 }
2759
__sort__hpp_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)2760 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2761 struct hist_entry *he)
2762 {
2763 struct hpp_sort_entry *hse;
2764 size_t len = fmt->user_len;
2765
2766 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2767
2768 if (!len)
2769 len = hists__col_len(he->hists, hse->se->se_width_idx);
2770
2771 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2772 }
2773
__sort__hpp_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2774 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2775 struct hist_entry *a, struct hist_entry *b)
2776 {
2777 struct hpp_sort_entry *hse;
2778
2779 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2780 return hse->se->se_cmp(a, b);
2781 }
2782
__sort__hpp_collapse(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2783 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2784 struct hist_entry *a, struct hist_entry *b)
2785 {
2786 struct hpp_sort_entry *hse;
2787 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2788
2789 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2790 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2791 return collapse_fn(a, b);
2792 }
2793
__sort__hpp_sort(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2794 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2795 struct hist_entry *a, struct hist_entry *b)
2796 {
2797 struct hpp_sort_entry *hse;
2798 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2799
2800 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2801 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2802 return sort_fn(a, b);
2803 }
2804
perf_hpp__is_sort_entry(struct perf_hpp_fmt * format)2805 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2806 {
2807 return format->header == __sort__hpp_header;
2808 }
2809
2810 #define MK_SORT_ENTRY_CHK(key) \
2811 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
2812 { \
2813 struct hpp_sort_entry *hse; \
2814 \
2815 if (!perf_hpp__is_sort_entry(fmt)) \
2816 return false; \
2817 \
2818 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
2819 return hse->se == &sort_ ## key ; \
2820 }
2821
2822 #ifdef HAVE_LIBTRACEEVENT
2823 MK_SORT_ENTRY_CHK(trace)
2824 #else
2825 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2826 {
2827 return false;
2828 }
2829 #endif
MK_SORT_ENTRY_CHK(srcline)2830 MK_SORT_ENTRY_CHK(srcline)
2831 MK_SORT_ENTRY_CHK(srcfile)
2832 MK_SORT_ENTRY_CHK(thread)
2833 MK_SORT_ENTRY_CHK(comm)
2834 MK_SORT_ENTRY_CHK(dso)
2835 MK_SORT_ENTRY_CHK(sym)
2836 MK_SORT_ENTRY_CHK(parallelism)
2837
2838
2839 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2840 {
2841 struct hpp_sort_entry *hse_a;
2842 struct hpp_sort_entry *hse_b;
2843
2844 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2845 return false;
2846
2847 hse_a = container_of(a, struct hpp_sort_entry, hpp);
2848 hse_b = container_of(b, struct hpp_sort_entry, hpp);
2849
2850 return hse_a->se == hse_b->se;
2851 }
2852
hse_free(struct perf_hpp_fmt * fmt)2853 static void hse_free(struct perf_hpp_fmt *fmt)
2854 {
2855 struct hpp_sort_entry *hse;
2856
2857 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2858 free(hse);
2859 }
2860
hse_init(struct perf_hpp_fmt * fmt,struct hist_entry * he)2861 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2862 {
2863 struct hpp_sort_entry *hse;
2864
2865 if (!perf_hpp__is_sort_entry(fmt))
2866 return;
2867
2868 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2869
2870 if (hse->se->se_init)
2871 hse->se->se_init(he);
2872 }
2873
2874 static struct hpp_sort_entry *
__sort_dimension__alloc_hpp(struct sort_dimension * sd,int level)2875 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2876 {
2877 struct hpp_sort_entry *hse;
2878
2879 hse = malloc(sizeof(*hse));
2880 if (hse == NULL) {
2881 pr_err("Memory allocation failed\n");
2882 return NULL;
2883 }
2884
2885 hse->se = sd->entry;
2886 hse->hpp.name = sd->entry->se_header;
2887 hse->hpp.header = __sort__hpp_header;
2888 hse->hpp.width = __sort__hpp_width;
2889 hse->hpp.entry = __sort__hpp_entry;
2890 hse->hpp.color = NULL;
2891
2892 hse->hpp.cmp = __sort__hpp_cmp;
2893 hse->hpp.collapse = __sort__hpp_collapse;
2894 hse->hpp.sort = __sort__hpp_sort;
2895 hse->hpp.equal = __sort__hpp_equal;
2896 hse->hpp.free = hse_free;
2897 hse->hpp.init = hse_init;
2898
2899 INIT_LIST_HEAD(&hse->hpp.list);
2900 INIT_LIST_HEAD(&hse->hpp.sort_list);
2901 hse->hpp.elide = false;
2902 hse->hpp.len = 0;
2903 hse->hpp.user_len = 0;
2904 hse->hpp.level = level;
2905
2906 return hse;
2907 }
2908
hpp_free(struct perf_hpp_fmt * fmt)2909 static void hpp_free(struct perf_hpp_fmt *fmt)
2910 {
2911 free(fmt);
2912 }
2913
__hpp_dimension__alloc_hpp(struct hpp_dimension * hd,int level)2914 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2915 int level)
2916 {
2917 struct perf_hpp_fmt *fmt;
2918
2919 fmt = memdup(hd->fmt, sizeof(*fmt));
2920 if (fmt) {
2921 INIT_LIST_HEAD(&fmt->list);
2922 INIT_LIST_HEAD(&fmt->sort_list);
2923 fmt->free = hpp_free;
2924 fmt->level = level;
2925 }
2926
2927 return fmt;
2928 }
2929
hist_entry__filter(struct hist_entry * he,int type,const void * arg)2930 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2931 {
2932 struct perf_hpp_fmt *fmt;
2933 struct hpp_sort_entry *hse;
2934 int ret = -1;
2935 int r;
2936
2937 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2938 if (!perf_hpp__is_sort_entry(fmt))
2939 continue;
2940
2941 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2942 if (hse->se->se_filter == NULL)
2943 continue;
2944
2945 /*
2946 * hist entry is filtered if any of sort key in the hpp list
2947 * is applied. But it should skip non-matched filter types.
2948 */
2949 r = hse->se->se_filter(he, type, arg);
2950 if (r >= 0) {
2951 if (ret < 0)
2952 ret = 0;
2953 ret |= r;
2954 }
2955 }
2956
2957 return ret;
2958 }
2959
__sort_dimension__add_hpp_sort(struct sort_dimension * sd,struct perf_hpp_list * list,int level)2960 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2961 struct perf_hpp_list *list,
2962 int level)
2963 {
2964 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2965
2966 if (hse == NULL)
2967 return -1;
2968
2969 perf_hpp_list__register_sort_field(list, &hse->hpp);
2970 return 0;
2971 }
2972
__sort_dimension__add_hpp_output(struct sort_dimension * sd,struct perf_hpp_list * list,int level)2973 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2974 struct perf_hpp_list *list,
2975 int level)
2976 {
2977 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2978
2979 if (hse == NULL)
2980 return -1;
2981
2982 perf_hpp_list__column_register(list, &hse->hpp);
2983 return 0;
2984 }
2985
2986 #ifndef HAVE_LIBTRACEEVENT
perf_hpp__is_dynamic_entry(struct perf_hpp_fmt * fmt __maybe_unused)2987 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2988 {
2989 return false;
2990 }
perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt * fmt __maybe_unused,struct hists * hists __maybe_unused)2991 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2992 struct hists *hists __maybe_unused)
2993 {
2994 return false;
2995 }
2996 #else
2997 struct hpp_dynamic_entry {
2998 struct perf_hpp_fmt hpp;
2999 struct evsel *evsel;
3000 struct tep_format_field *field;
3001 unsigned dynamic_len;
3002 bool raw_trace;
3003 };
3004
hde_width(struct hpp_dynamic_entry * hde)3005 static int hde_width(struct hpp_dynamic_entry *hde)
3006 {
3007 if (!hde->hpp.len) {
3008 int len = hde->dynamic_len;
3009 int namelen = strlen(hde->field->name);
3010 int fieldlen = hde->field->size;
3011
3012 if (namelen > len)
3013 len = namelen;
3014
3015 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
3016 /* length for print hex numbers */
3017 fieldlen = hde->field->size * 2 + 2;
3018 }
3019 if (fieldlen > len)
3020 len = fieldlen;
3021
3022 hde->hpp.len = len;
3023 }
3024 return hde->hpp.len;
3025 }
3026
update_dynamic_len(struct hpp_dynamic_entry * hde,struct hist_entry * he)3027 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
3028 struct hist_entry *he)
3029 {
3030 char *str, *pos;
3031 struct tep_format_field *field = hde->field;
3032 size_t namelen;
3033 bool last = false;
3034
3035 if (hde->raw_trace)
3036 return;
3037
3038 /* parse pretty print result and update max length */
3039 if (!he->trace_output)
3040 he->trace_output = get_trace_output(he);
3041
3042 namelen = strlen(field->name);
3043 str = he->trace_output;
3044
3045 while (str) {
3046 pos = strchr(str, ' ');
3047 if (pos == NULL) {
3048 last = true;
3049 pos = str + strlen(str);
3050 }
3051
3052 if (!strncmp(str, field->name, namelen)) {
3053 size_t len;
3054
3055 str += namelen + 1;
3056 len = pos - str;
3057
3058 if (len > hde->dynamic_len)
3059 hde->dynamic_len = len;
3060 break;
3061 }
3062
3063 if (last)
3064 str = NULL;
3065 else
3066 str = pos + 1;
3067 }
3068 }
3069
__sort__hde_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists __maybe_unused,int line __maybe_unused,int * span __maybe_unused)3070 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
3071 struct hists *hists __maybe_unused,
3072 int line __maybe_unused,
3073 int *span __maybe_unused)
3074 {
3075 struct hpp_dynamic_entry *hde;
3076 size_t len = fmt->user_len;
3077
3078 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3079
3080 if (!len)
3081 len = hde_width(hde);
3082
3083 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
3084 }
3085
__sort__hde_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists __maybe_unused)3086 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
3087 struct perf_hpp *hpp __maybe_unused,
3088 struct hists *hists __maybe_unused)
3089 {
3090 struct hpp_dynamic_entry *hde;
3091 size_t len = fmt->user_len;
3092
3093 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3094
3095 if (!len)
3096 len = hde_width(hde);
3097
3098 return len;
3099 }
3100
perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt * fmt,struct hists * hists)3101 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
3102 {
3103 struct hpp_dynamic_entry *hde;
3104
3105 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3106
3107 return hists_to_evsel(hists) == hde->evsel;
3108 }
3109
__sort__hde_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)3110 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
3111 struct hist_entry *he)
3112 {
3113 struct hpp_dynamic_entry *hde;
3114 size_t len = fmt->user_len;
3115 char *str, *pos;
3116 struct tep_format_field *field;
3117 size_t namelen;
3118 bool last = false;
3119 int ret;
3120
3121 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3122
3123 if (!len)
3124 len = hde_width(hde);
3125
3126 if (hde->raw_trace)
3127 goto raw_field;
3128
3129 if (!he->trace_output)
3130 he->trace_output = get_trace_output(he);
3131
3132 field = hde->field;
3133 namelen = strlen(field->name);
3134 str = he->trace_output;
3135
3136 while (str) {
3137 pos = strchr(str, ' ');
3138 if (pos == NULL) {
3139 last = true;
3140 pos = str + strlen(str);
3141 }
3142
3143 if (!strncmp(str, field->name, namelen)) {
3144 str += namelen + 1;
3145 str = strndup(str, pos - str);
3146
3147 if (str == NULL)
3148 return scnprintf(hpp->buf, hpp->size,
3149 "%*.*s", len, len, "ERROR");
3150 break;
3151 }
3152
3153 if (last)
3154 str = NULL;
3155 else
3156 str = pos + 1;
3157 }
3158
3159 if (str == NULL) {
3160 struct trace_seq seq;
3161 raw_field:
3162 trace_seq_init(&seq);
3163 tep_print_field(&seq, he->raw_data, hde->field);
3164 str = seq.buffer;
3165 }
3166
3167 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
3168 free(str);
3169 return ret;
3170 }
3171
__sort__hde_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)3172 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
3173 struct hist_entry *a, struct hist_entry *b)
3174 {
3175 struct hpp_dynamic_entry *hde;
3176 struct tep_format_field *field;
3177 unsigned offset, size;
3178
3179 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3180
3181 field = hde->field;
3182 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
3183 unsigned long long dyn;
3184
3185 tep_read_number_field(field, a->raw_data, &dyn);
3186 offset = dyn & 0xffff;
3187 size = (dyn >> 16) & 0xffff;
3188 if (tep_field_is_relative(field->flags))
3189 offset += field->offset + field->size;
3190 /* record max width for output */
3191 if (size > hde->dynamic_len)
3192 hde->dynamic_len = size;
3193 } else {
3194 offset = field->offset;
3195 size = field->size;
3196 }
3197
3198 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
3199 }
3200
perf_hpp__is_dynamic_entry(struct perf_hpp_fmt * fmt)3201 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
3202 {
3203 return fmt->cmp == __sort__hde_cmp;
3204 }
3205
__sort__hde_equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)3206 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
3207 {
3208 struct hpp_dynamic_entry *hde_a;
3209 struct hpp_dynamic_entry *hde_b;
3210
3211 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
3212 return false;
3213
3214 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
3215 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
3216
3217 return hde_a->field == hde_b->field;
3218 }
3219
hde_free(struct perf_hpp_fmt * fmt)3220 static void hde_free(struct perf_hpp_fmt *fmt)
3221 {
3222 struct hpp_dynamic_entry *hde;
3223
3224 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3225 free(hde);
3226 }
3227
__sort__hde_init(struct perf_hpp_fmt * fmt,struct hist_entry * he)3228 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
3229 {
3230 struct hpp_dynamic_entry *hde;
3231
3232 if (!perf_hpp__is_dynamic_entry(fmt))
3233 return;
3234
3235 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3236 update_dynamic_len(hde, he);
3237 }
3238
3239 static struct hpp_dynamic_entry *
__alloc_dynamic_entry(struct evsel * evsel,struct tep_format_field * field,int level)3240 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
3241 int level)
3242 {
3243 struct hpp_dynamic_entry *hde;
3244
3245 hde = malloc(sizeof(*hde));
3246 if (hde == NULL) {
3247 pr_debug("Memory allocation failed\n");
3248 return NULL;
3249 }
3250
3251 hde->evsel = evsel;
3252 hde->field = field;
3253 hde->dynamic_len = 0;
3254
3255 hde->hpp.name = field->name;
3256 hde->hpp.header = __sort__hde_header;
3257 hde->hpp.width = __sort__hde_width;
3258 hde->hpp.entry = __sort__hde_entry;
3259 hde->hpp.color = NULL;
3260
3261 hde->hpp.init = __sort__hde_init;
3262 hde->hpp.cmp = __sort__hde_cmp;
3263 hde->hpp.collapse = __sort__hde_cmp;
3264 hde->hpp.sort = __sort__hde_cmp;
3265 hde->hpp.equal = __sort__hde_equal;
3266 hde->hpp.free = hde_free;
3267
3268 INIT_LIST_HEAD(&hde->hpp.list);
3269 INIT_LIST_HEAD(&hde->hpp.sort_list);
3270 hde->hpp.elide = false;
3271 hde->hpp.len = 0;
3272 hde->hpp.user_len = 0;
3273 hde->hpp.level = level;
3274
3275 return hde;
3276 }
3277 #endif /* HAVE_LIBTRACEEVENT */
3278
perf_hpp_fmt__dup(struct perf_hpp_fmt * fmt)3279 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
3280 {
3281 struct perf_hpp_fmt *new_fmt = NULL;
3282
3283 if (perf_hpp__is_sort_entry(fmt)) {
3284 struct hpp_sort_entry *hse, *new_hse;
3285
3286 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3287 new_hse = memdup(hse, sizeof(*hse));
3288 if (new_hse)
3289 new_fmt = &new_hse->hpp;
3290 #ifdef HAVE_LIBTRACEEVENT
3291 } else if (perf_hpp__is_dynamic_entry(fmt)) {
3292 struct hpp_dynamic_entry *hde, *new_hde;
3293
3294 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3295 new_hde = memdup(hde, sizeof(*hde));
3296 if (new_hde)
3297 new_fmt = &new_hde->hpp;
3298 #endif
3299 } else {
3300 new_fmt = memdup(fmt, sizeof(*fmt));
3301 }
3302
3303 INIT_LIST_HEAD(&new_fmt->list);
3304 INIT_LIST_HEAD(&new_fmt->sort_list);
3305
3306 return new_fmt;
3307 }
3308
parse_field_name(char * str,char ** event,char ** field,char ** opt)3309 static int parse_field_name(char *str, char **event, char **field, char **opt)
3310 {
3311 char *event_name, *field_name, *opt_name;
3312
3313 event_name = str;
3314 field_name = strchr(str, '.');
3315
3316 if (field_name) {
3317 *field_name++ = '\0';
3318 } else {
3319 event_name = NULL;
3320 field_name = str;
3321 }
3322
3323 opt_name = strchr(field_name, '/');
3324 if (opt_name)
3325 *opt_name++ = '\0';
3326
3327 *event = event_name;
3328 *field = field_name;
3329 *opt = opt_name;
3330
3331 return 0;
3332 }
3333
3334 /* find match evsel using a given event name. The event name can be:
3335 * 1. '%' + event index (e.g. '%1' for first event)
3336 * 2. full event name (e.g. sched:sched_switch)
3337 * 3. partial event name (should not contain ':')
3338 */
find_evsel(struct evlist * evlist,char * event_name)3339 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
3340 {
3341 struct evsel *evsel = NULL;
3342 struct evsel *pos;
3343 bool full_name;
3344
3345 /* case 1 */
3346 if (event_name[0] == '%') {
3347 int nr = strtol(event_name+1, NULL, 0);
3348
3349 if (nr > evlist->core.nr_entries)
3350 return NULL;
3351
3352 evsel = evlist__first(evlist);
3353 while (--nr > 0)
3354 evsel = evsel__next(evsel);
3355
3356 return evsel;
3357 }
3358
3359 full_name = !!strchr(event_name, ':');
3360 evlist__for_each_entry(evlist, pos) {
3361 /* case 2 */
3362 if (full_name && evsel__name_is(pos, event_name))
3363 return pos;
3364 /* case 3 */
3365 if (!full_name && strstr(pos->name, event_name)) {
3366 if (evsel) {
3367 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
3368 event_name, evsel->name, pos->name);
3369 return NULL;
3370 }
3371 evsel = pos;
3372 }
3373 }
3374
3375 return evsel;
3376 }
3377
3378 #ifdef HAVE_LIBTRACEEVENT
__dynamic_dimension__add(struct evsel * evsel,struct tep_format_field * field,bool raw_trace,int level)3379 static int __dynamic_dimension__add(struct evsel *evsel,
3380 struct tep_format_field *field,
3381 bool raw_trace, int level)
3382 {
3383 struct hpp_dynamic_entry *hde;
3384
3385 hde = __alloc_dynamic_entry(evsel, field, level);
3386 if (hde == NULL)
3387 return -ENOMEM;
3388
3389 hde->raw_trace = raw_trace;
3390
3391 perf_hpp__register_sort_field(&hde->hpp);
3392 return 0;
3393 }
3394
add_evsel_fields(struct evsel * evsel,bool raw_trace,int level)3395 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
3396 {
3397 int ret;
3398 struct tep_event *tp_format = evsel__tp_format(evsel);
3399 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL;
3400 while (field) {
3401 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3402 if (ret < 0)
3403 return ret;
3404
3405 field = field->next;
3406 }
3407 return 0;
3408 }
3409
add_all_dynamic_fields(struct evlist * evlist,bool raw_trace,int level)3410 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
3411 int level)
3412 {
3413 int ret;
3414 struct evsel *evsel;
3415
3416 evlist__for_each_entry(evlist, evsel) {
3417 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3418 continue;
3419
3420 ret = add_evsel_fields(evsel, raw_trace, level);
3421 if (ret < 0)
3422 return ret;
3423 }
3424 return 0;
3425 }
3426
add_all_matching_fields(struct evlist * evlist,char * field_name,bool raw_trace,int level)3427 static int add_all_matching_fields(struct evlist *evlist,
3428 char *field_name, bool raw_trace, int level)
3429 {
3430 int ret = -ESRCH;
3431 struct evsel *evsel;
3432
3433 evlist__for_each_entry(evlist, evsel) {
3434 struct tep_event *tp_format;
3435 struct tep_format_field *field;
3436
3437 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3438 continue;
3439
3440 tp_format = evsel__tp_format(evsel);
3441 if (tp_format == NULL)
3442 continue;
3443
3444 field = tep_find_any_field(tp_format, field_name);
3445 if (field == NULL)
3446 continue;
3447
3448 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3449 if (ret < 0)
3450 break;
3451 }
3452 return ret;
3453 }
3454 #endif /* HAVE_LIBTRACEEVENT */
3455
add_dynamic_entry(struct evlist * evlist,const char * tok,int level)3456 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
3457 int level)
3458 {
3459 char *str, *event_name, *field_name, *opt_name;
3460 struct evsel *evsel;
3461 bool raw_trace = symbol_conf.raw_trace;
3462 int ret = 0;
3463
3464 if (evlist == NULL)
3465 return -ENOENT;
3466
3467 str = strdup(tok);
3468 if (str == NULL)
3469 return -ENOMEM;
3470
3471 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3472 ret = -EINVAL;
3473 goto out;
3474 }
3475
3476 if (opt_name) {
3477 if (strcmp(opt_name, "raw")) {
3478 pr_debug("unsupported field option %s\n", opt_name);
3479 ret = -EINVAL;
3480 goto out;
3481 }
3482 raw_trace = true;
3483 }
3484
3485 #ifdef HAVE_LIBTRACEEVENT
3486 if (!strcmp(field_name, "trace_fields")) {
3487 ret = add_all_dynamic_fields(evlist, raw_trace, level);
3488 goto out;
3489 }
3490
3491 if (event_name == NULL) {
3492 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3493 goto out;
3494 }
3495 #else
3496 evlist__for_each_entry(evlist, evsel) {
3497 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3498 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3499 ret = -ENOTSUP;
3500 }
3501 }
3502
3503 if (ret) {
3504 pr_err("\n");
3505 goto out;
3506 }
3507 #endif
3508
3509 evsel = find_evsel(evlist, event_name);
3510 if (evsel == NULL) {
3511 pr_debug("Cannot find event: %s\n", event_name);
3512 ret = -ENOENT;
3513 goto out;
3514 }
3515
3516 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3517 pr_debug("%s is not a tracepoint event\n", event_name);
3518 ret = -EINVAL;
3519 goto out;
3520 }
3521
3522 #ifdef HAVE_LIBTRACEEVENT
3523 if (!strcmp(field_name, "*")) {
3524 ret = add_evsel_fields(evsel, raw_trace, level);
3525 } else {
3526 struct tep_event *tp_format = evsel__tp_format(evsel);
3527 struct tep_format_field *field =
3528 tp_format ? tep_find_any_field(tp_format, field_name) : NULL;
3529
3530 if (field == NULL) {
3531 pr_debug("Cannot find event field for %s.%s\n",
3532 event_name, field_name);
3533 return -ENOENT;
3534 }
3535
3536 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3537 }
3538 #else
3539 (void)level;
3540 (void)raw_trace;
3541 #endif /* HAVE_LIBTRACEEVENT */
3542
3543 out:
3544 free(str);
3545 return ret;
3546 }
3547
__sort_dimension__update(struct sort_dimension * sd,struct perf_hpp_list * list)3548 static int __sort_dimension__update(struct sort_dimension *sd,
3549 struct perf_hpp_list *list)
3550 {
3551 if (sd->entry == &sort_parent && parent_pattern) {
3552 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3553 if (ret) {
3554 char err[BUFSIZ];
3555
3556 regerror(ret, &parent_regex, err, sizeof(err));
3557 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3558 return -EINVAL;
3559 }
3560 list->parent = 1;
3561 } else if (sd->entry == &sort_sym) {
3562 list->sym = 1;
3563 /*
3564 * perf diff displays the performance difference amongst
3565 * two or more perf.data files. Those files could come
3566 * from different binaries. So we should not compare
3567 * their ips, but the name of symbol.
3568 */
3569 if (sort__mode == SORT_MODE__DIFF)
3570 sd->entry->se_collapse = sort__sym_sort;
3571
3572 } else if (sd->entry == &sort_sym_offset) {
3573 list->sym = 1;
3574 } else if (sd->entry == &sort_dso) {
3575 list->dso = 1;
3576 } else if (sd->entry == &sort_socket) {
3577 list->socket = 1;
3578 } else if (sd->entry == &sort_thread) {
3579 list->thread = 1;
3580 } else if (sd->entry == &sort_comm) {
3581 list->comm = 1;
3582 } else if (sd->entry == &sort_type_offset) {
3583 symbol_conf.annotate_data_member = true;
3584 } else if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) {
3585 list->sym = 1;
3586 } else if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) {
3587 return -EINVAL;
3588 } else if (sd->entry == &sort_mem_daddr_sym) {
3589 list->sym = 1;
3590 }
3591
3592 if (sd->entry->se_collapse)
3593 list->need_collapse = 1;
3594
3595 return 0;
3596 }
3597
__sort_dimension__add(struct sort_dimension * sd,struct perf_hpp_list * list,int level)3598 static int __sort_dimension__add(struct sort_dimension *sd,
3599 struct perf_hpp_list *list,
3600 int level)
3601 {
3602 if (sd->taken)
3603 return 0;
3604
3605 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3606 return -1;
3607
3608 if (__sort_dimension__update(sd, list) < 0)
3609 return -1;
3610
3611 sd->taken = 1;
3612
3613 return 0;
3614 }
3615
__hpp_dimension__add(struct hpp_dimension * hd,struct perf_hpp_list * list,int level)3616 static int __hpp_dimension__add(struct hpp_dimension *hd,
3617 struct perf_hpp_list *list,
3618 int level)
3619 {
3620 struct perf_hpp_fmt *fmt;
3621
3622 if (hd->taken)
3623 return 0;
3624
3625 fmt = __hpp_dimension__alloc_hpp(hd, level);
3626 if (!fmt)
3627 return -1;
3628
3629 hd->taken = 1;
3630 hd->was_taken = 1;
3631 perf_hpp_list__register_sort_field(list, fmt);
3632 return 0;
3633 }
3634
__sort_dimension__add_output(struct perf_hpp_list * list,struct sort_dimension * sd,int level)3635 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3636 struct sort_dimension *sd,
3637 int level)
3638 {
3639 if (sd->taken)
3640 return 0;
3641
3642 if (__sort_dimension__add_hpp_output(sd, list, level) < 0)
3643 return -1;
3644
3645 if (__sort_dimension__update(sd, list) < 0)
3646 return -1;
3647
3648 sd->taken = 1;
3649 return 0;
3650 }
3651
__hpp_dimension__add_output(struct perf_hpp_list * list,struct hpp_dimension * hd,int level)3652 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3653 struct hpp_dimension *hd,
3654 int level)
3655 {
3656 struct perf_hpp_fmt *fmt;
3657
3658 if (hd->taken)
3659 return 0;
3660
3661 fmt = __hpp_dimension__alloc_hpp(hd, level);
3662 if (!fmt)
3663 return -1;
3664
3665 hd->taken = 1;
3666 perf_hpp_list__column_register(list, fmt);
3667 return 0;
3668 }
3669
hpp_dimension__add_output(unsigned col,bool implicit)3670 int hpp_dimension__add_output(unsigned col, bool implicit)
3671 {
3672 struct hpp_dimension *hd;
3673
3674 BUG_ON(col >= PERF_HPP__MAX_INDEX);
3675 hd = &hpp_sort_dimensions[col];
3676 if (implicit && !hd->was_taken)
3677 return 0;
3678 return __hpp_dimension__add_output(&perf_hpp_list, hd, /*level=*/0);
3679 }
3680
sort_dimension__add(struct perf_hpp_list * list,const char * tok,struct evlist * evlist,struct perf_env * env,int level)3681 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3682 struct evlist *evlist, struct perf_env *env,
3683 int level)
3684 {
3685 unsigned int i, j;
3686
3687 /*
3688 * Check to see if there are any arch specific
3689 * sort dimensions not applicable for the current
3690 * architecture. If so, Skip that sort key since
3691 * we don't want to display it in the output fields.
3692 */
3693 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3694 if (!strcmp(arch_specific_sort_keys[j], tok) &&
3695 !arch_support_sort_key(tok, env)) {
3696 return 0;
3697 }
3698 }
3699
3700 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3701 struct sort_dimension *sd = &common_sort_dimensions[i];
3702
3703 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3704 continue;
3705
3706 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3707 if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3708 sort_dimension_add_dynamic_header(sd, env);
3709 }
3710
3711 return __sort_dimension__add(sd, list, level);
3712 }
3713
3714 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3715 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3716
3717 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3718 continue;
3719
3720 if ((sort__mode != SORT_MODE__BRANCH) &&
3721 strncasecmp(tok, "callchain_branch_predicted",
3722 strlen(tok)) &&
3723 strncasecmp(tok, "callchain_branch_abort",
3724 strlen(tok)) &&
3725 strncasecmp(tok, "callchain_branch_cycles",
3726 strlen(tok)))
3727 return -EINVAL;
3728
3729 __sort_dimension__add(sd, list, level);
3730 return 0;
3731 }
3732
3733 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3734 struct sort_dimension *sd = &memory_sort_dimensions[i];
3735
3736 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3737 continue;
3738
3739 if (sort__mode != SORT_MODE__MEMORY)
3740 return -EINVAL;
3741
3742 __sort_dimension__add(sd, list, level);
3743 return 0;
3744 }
3745
3746 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3747 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3748
3749 if (strncasecmp(tok, hd->name, strlen(tok)))
3750 continue;
3751
3752 return __hpp_dimension__add(hd, list, level);
3753 }
3754
3755 if (!add_dynamic_entry(evlist, tok, level))
3756 return 0;
3757
3758 return -ESRCH;
3759 }
3760
3761 /* This should match with sort_dimension__add() above */
is_hpp_sort_key(const char * key,struct perf_env * env)3762 static bool is_hpp_sort_key(const char *key, struct perf_env *env)
3763 {
3764 unsigned i;
3765
3766 for (i = 0; i < ARRAY_SIZE(arch_specific_sort_keys); i++) {
3767 if (!strcmp(arch_specific_sort_keys[i], key) &&
3768 !arch_support_sort_key(key, env)) {
3769 return false;
3770 }
3771 }
3772
3773 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3774 struct sort_dimension *sd = &common_sort_dimensions[i];
3775
3776 if (sd->name && !strncasecmp(key, sd->name, strlen(key)))
3777 return false;
3778 }
3779
3780 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3781 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3782
3783 if (!strncasecmp(key, hd->name, strlen(key)))
3784 return true;
3785 }
3786 return false;
3787 }
3788
setup_sort_list(struct perf_hpp_list * list,char * str,struct evlist * evlist,struct perf_env * env)3789 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3790 struct evlist *evlist, struct perf_env *env)
3791 {
3792 char *tmp, *tok;
3793 int ret = 0;
3794 int level = 0;
3795 int next_level = 1;
3796 int prev_level = 0;
3797 bool in_group = false;
3798 bool prev_was_hpp = false;
3799
3800 do {
3801 tok = str;
3802 tmp = strpbrk(str, "{}, ");
3803 if (tmp) {
3804 if (in_group)
3805 next_level = level;
3806 else
3807 next_level = level + 1;
3808
3809 if (*tmp == '{')
3810 in_group = true;
3811 else if (*tmp == '}')
3812 in_group = false;
3813
3814 *tmp = '\0';
3815 str = tmp + 1;
3816 }
3817
3818 if (*tok) {
3819 if (is_hpp_sort_key(tok, env)) {
3820 /* keep output (hpp) sort keys in the same level */
3821 if (prev_was_hpp) {
3822 bool next_same = (level == next_level);
3823
3824 level = prev_level;
3825 next_level = next_same ? level : level+1;
3826 }
3827 prev_was_hpp = true;
3828 } else {
3829 prev_was_hpp = false;
3830 }
3831
3832 ret = sort_dimension__add(list, tok, evlist, env, level);
3833 if (ret == -EINVAL) {
3834 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3835 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3836 else
3837 ui__error("Invalid --sort key: `%s'", tok);
3838 break;
3839 } else if (ret == -ESRCH) {
3840 ui__error("Unknown --sort key: `%s'", tok);
3841 break;
3842 }
3843 prev_level = level;
3844 }
3845
3846 level = next_level;
3847 } while (tmp);
3848
3849 return ret;
3850 }
3851
get_default_sort_order(struct evlist * evlist)3852 static const char *get_default_sort_order(struct evlist *evlist)
3853 {
3854 const char *default_sort_orders[] = {
3855 default_sort_order,
3856 default_branch_sort_order,
3857 default_mem_sort_order,
3858 default_top_sort_order,
3859 default_diff_sort_order,
3860 default_tracepoint_sort_order,
3861 };
3862 bool use_trace = true;
3863 struct evsel *evsel;
3864
3865 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3866
3867 if (evlist == NULL || evlist__empty(evlist))
3868 goto out_no_evlist;
3869
3870 evlist__for_each_entry(evlist, evsel) {
3871 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3872 use_trace = false;
3873 break;
3874 }
3875 }
3876
3877 if (use_trace) {
3878 sort__mode = SORT_MODE__TRACEPOINT;
3879 if (symbol_conf.raw_trace)
3880 return "trace_fields";
3881 }
3882 out_no_evlist:
3883 return default_sort_orders[sort__mode];
3884 }
3885
setup_sort_order(struct evlist * evlist)3886 static int setup_sort_order(struct evlist *evlist)
3887 {
3888 char *new_sort_order;
3889
3890 /*
3891 * Append '+'-prefixed sort order to the default sort
3892 * order string.
3893 */
3894 if (!sort_order || is_strict_order(sort_order))
3895 return 0;
3896
3897 if (sort_order[1] == '\0') {
3898 ui__error("Invalid --sort key: `+'");
3899 return -EINVAL;
3900 }
3901
3902 /*
3903 * We allocate new sort_order string, but we never free it,
3904 * because it's checked over the rest of the code.
3905 */
3906 if (asprintf(&new_sort_order, "%s,%s",
3907 get_default_sort_order(evlist), sort_order + 1) < 0) {
3908 pr_err("Not enough memory to set up --sort");
3909 return -ENOMEM;
3910 }
3911
3912 sort_order = new_sort_order;
3913 return 0;
3914 }
3915
3916 /*
3917 * Adds 'pre,' prefix into 'str' is 'pre' is
3918 * not already part of 'str'.
3919 */
prefix_if_not_in(const char * pre,char * str)3920 static char *prefix_if_not_in(const char *pre, char *str)
3921 {
3922 char *n;
3923
3924 if (!str || strstr(str, pre))
3925 return str;
3926
3927 if (asprintf(&n, "%s,%s", pre, str) < 0)
3928 n = NULL;
3929
3930 free(str);
3931 return n;
3932 }
3933
setup_overhead(char * keys)3934 static char *setup_overhead(char *keys)
3935 {
3936 if (sort__mode == SORT_MODE__DIFF)
3937 return keys;
3938
3939 if (symbol_conf.prefer_latency) {
3940 keys = prefix_if_not_in("overhead", keys);
3941 keys = prefix_if_not_in("latency", keys);
3942 if (symbol_conf.cumulate_callchain) {
3943 keys = prefix_if_not_in("overhead_children", keys);
3944 keys = prefix_if_not_in("latency_children", keys);
3945 }
3946 } else if (!keys || (!strstr(keys, "overhead") &&
3947 !strstr(keys, "latency"))) {
3948 if (symbol_conf.enable_latency)
3949 keys = prefix_if_not_in("latency", keys);
3950 keys = prefix_if_not_in("overhead", keys);
3951 if (symbol_conf.cumulate_callchain) {
3952 if (symbol_conf.enable_latency)
3953 keys = prefix_if_not_in("latency_children", keys);
3954 keys = prefix_if_not_in("overhead_children", keys);
3955 }
3956 }
3957
3958 return keys;
3959 }
3960
__setup_sorting(struct evlist * evlist,struct perf_env * env)3961 static int __setup_sorting(struct evlist *evlist, struct perf_env *env)
3962 {
3963 char *str;
3964 const char *sort_keys;
3965 int ret = 0;
3966
3967 ret = setup_sort_order(evlist);
3968 if (ret)
3969 return ret;
3970
3971 sort_keys = sort_order;
3972 if (sort_keys == NULL) {
3973 if (is_strict_order(field_order)) {
3974 /*
3975 * If user specified field order but no sort order,
3976 * we'll honor it and not add default sort orders.
3977 */
3978 return 0;
3979 }
3980
3981 sort_keys = get_default_sort_order(evlist);
3982 }
3983
3984 str = strdup(sort_keys);
3985 if (str == NULL) {
3986 pr_err("Not enough memory to setup sort keys");
3987 return -ENOMEM;
3988 }
3989
3990 /*
3991 * Prepend overhead fields for backward compatibility.
3992 */
3993 if (!is_strict_order(field_order)) {
3994 str = setup_overhead(str);
3995 if (str == NULL) {
3996 pr_err("Not enough memory to setup overhead keys");
3997 return -ENOMEM;
3998 }
3999 }
4000
4001 ret = setup_sort_list(&perf_hpp_list, str, evlist, env);
4002
4003 free(str);
4004 return ret;
4005 }
4006
perf_hpp__set_elide(int idx,bool elide)4007 void perf_hpp__set_elide(int idx, bool elide)
4008 {
4009 struct perf_hpp_fmt *fmt;
4010 struct hpp_sort_entry *hse;
4011
4012 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
4013 if (!perf_hpp__is_sort_entry(fmt))
4014 continue;
4015
4016 hse = container_of(fmt, struct hpp_sort_entry, hpp);
4017 if (hse->se->se_width_idx == idx) {
4018 fmt->elide = elide;
4019 break;
4020 }
4021 }
4022 }
4023
__get_elide(struct strlist * list,const char * list_name,FILE * fp)4024 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
4025 {
4026 if (list && strlist__nr_entries(list) == 1) {
4027 if (fp != NULL)
4028 fprintf(fp, "# %s: %s\n", list_name,
4029 strlist__entry(list, 0)->s);
4030 return true;
4031 }
4032 return false;
4033 }
4034
get_elide(int idx,FILE * output)4035 static bool get_elide(int idx, FILE *output)
4036 {
4037 switch (idx) {
4038 case HISTC_SYMBOL:
4039 return __get_elide(symbol_conf.sym_list, "symbol", output);
4040 case HISTC_DSO:
4041 return __get_elide(symbol_conf.dso_list, "dso", output);
4042 case HISTC_COMM:
4043 return __get_elide(symbol_conf.comm_list, "comm", output);
4044 default:
4045 break;
4046 }
4047
4048 if (sort__mode != SORT_MODE__BRANCH)
4049 return false;
4050
4051 switch (idx) {
4052 case HISTC_SYMBOL_FROM:
4053 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
4054 case HISTC_SYMBOL_TO:
4055 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
4056 case HISTC_DSO_FROM:
4057 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
4058 case HISTC_DSO_TO:
4059 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
4060 case HISTC_ADDR_FROM:
4061 return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
4062 case HISTC_ADDR_TO:
4063 return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
4064 default:
4065 break;
4066 }
4067
4068 return false;
4069 }
4070
sort__setup_elide(FILE * output)4071 void sort__setup_elide(FILE *output)
4072 {
4073 struct perf_hpp_fmt *fmt;
4074 struct hpp_sort_entry *hse;
4075
4076 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
4077 if (!perf_hpp__is_sort_entry(fmt))
4078 continue;
4079
4080 hse = container_of(fmt, struct hpp_sort_entry, hpp);
4081 fmt->elide = get_elide(hse->se->se_width_idx, output);
4082 }
4083
4084 /*
4085 * It makes no sense to elide all of sort entries.
4086 * Just revert them to show up again.
4087 */
4088 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
4089 if (!perf_hpp__is_sort_entry(fmt))
4090 continue;
4091
4092 if (!fmt->elide)
4093 return;
4094 }
4095
4096 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
4097 if (!perf_hpp__is_sort_entry(fmt))
4098 continue;
4099
4100 fmt->elide = false;
4101 }
4102 }
4103
output_field_add(struct perf_hpp_list * list,const char * tok,int * level)4104 int output_field_add(struct perf_hpp_list *list, const char *tok, int *level)
4105 {
4106 unsigned int i;
4107
4108 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
4109 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
4110
4111 if (strncasecmp(tok, hd->name, strlen(tok)))
4112 continue;
4113
4114 if (!strcasecmp(tok, "weight"))
4115 ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
4116
4117 if (hd->mem_mode && sort__mode != SORT_MODE__MEMORY)
4118 continue;
4119
4120 return __hpp_dimension__add_output(list, hd, *level);
4121 }
4122
4123 /*
4124 * A non-output field will increase level so that it can be in a
4125 * different hierarchy.
4126 */
4127 (*level)++;
4128
4129 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
4130 struct sort_dimension *sd = &common_sort_dimensions[i];
4131
4132 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4133 continue;
4134
4135 return __sort_dimension__add_output(list, sd, *level);
4136 }
4137
4138 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
4139 struct sort_dimension *sd = &bstack_sort_dimensions[i];
4140
4141 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4142 continue;
4143
4144 if (sort__mode != SORT_MODE__BRANCH)
4145 return -EINVAL;
4146
4147 return __sort_dimension__add_output(list, sd, *level);
4148 }
4149
4150 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
4151 struct sort_dimension *sd = &memory_sort_dimensions[i];
4152
4153 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4154 continue;
4155
4156 if (sort__mode != SORT_MODE__MEMORY)
4157 return -EINVAL;
4158
4159 return __sort_dimension__add_output(list, sd, *level);
4160 }
4161
4162 return -ESRCH;
4163 }
4164
setup_output_list(struct perf_hpp_list * list,char * str)4165 static int setup_output_list(struct perf_hpp_list *list, char *str)
4166 {
4167 char *tmp, *tok;
4168 int ret = 0;
4169 int level = 0;
4170
4171 for (tok = strtok_r(str, ", ", &tmp);
4172 tok; tok = strtok_r(NULL, ", ", &tmp)) {
4173 ret = output_field_add(list, tok, &level);
4174 if (ret == -EINVAL) {
4175 ui__error("Invalid --fields key: `%s'", tok);
4176 break;
4177 } else if (ret == -ESRCH) {
4178 ui__error("Unknown --fields key: `%s'", tok);
4179 break;
4180 }
4181 }
4182
4183 return ret;
4184 }
4185
reset_dimensions(void)4186 void reset_dimensions(void)
4187 {
4188 unsigned int i;
4189
4190 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
4191 common_sort_dimensions[i].taken = 0;
4192
4193 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
4194 hpp_sort_dimensions[i].taken = 0;
4195
4196 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
4197 bstack_sort_dimensions[i].taken = 0;
4198
4199 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
4200 memory_sort_dimensions[i].taken = 0;
4201 }
4202
is_strict_order(const char * order)4203 bool is_strict_order(const char *order)
4204 {
4205 return order && (*order != '+');
4206 }
4207
__setup_output_field(void)4208 static int __setup_output_field(void)
4209 {
4210 char *str, *strp;
4211 int ret = -EINVAL;
4212
4213 if (field_order == NULL)
4214 return 0;
4215
4216 strp = str = strdup(field_order);
4217 if (str == NULL) {
4218 pr_err("Not enough memory to setup output fields");
4219 return -ENOMEM;
4220 }
4221
4222 if (!is_strict_order(field_order))
4223 strp++;
4224
4225 if (!strlen(strp)) {
4226 ui__error("Invalid --fields key: `+'");
4227 goto out;
4228 }
4229
4230 ret = setup_output_list(&perf_hpp_list, strp);
4231
4232 out:
4233 free(str);
4234 return ret;
4235 }
4236
setup_sorting(struct evlist * evlist,struct perf_env * env)4237 int setup_sorting(struct evlist *evlist, struct perf_env *env)
4238 {
4239 int err;
4240
4241 err = __setup_sorting(evlist, env);
4242 if (err < 0)
4243 return err;
4244
4245 if (parent_pattern != default_parent_pattern) {
4246 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, env, -1);
4247 if (err < 0)
4248 return err;
4249 }
4250
4251 reset_dimensions();
4252
4253 /*
4254 * perf diff doesn't use default hpp output fields.
4255 */
4256 if (sort__mode != SORT_MODE__DIFF)
4257 perf_hpp__init();
4258
4259 err = __setup_output_field();
4260 if (err < 0)
4261 return err;
4262
4263 err = perf_hpp__alloc_mem_stats(&perf_hpp_list, evlist);
4264 if (err < 0)
4265 return err;
4266
4267 /* copy sort keys to output fields */
4268 perf_hpp__setup_output_field(&perf_hpp_list);
4269 /* and then copy output fields to sort keys */
4270 perf_hpp__append_sort_keys(&perf_hpp_list);
4271
4272 /* setup hists-specific output fields */
4273 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
4274 return -1;
4275
4276 return 0;
4277 }
4278
reset_output_field(void)4279 void reset_output_field(void)
4280 {
4281 perf_hpp_list.need_collapse = 0;
4282 perf_hpp_list.parent = 0;
4283 perf_hpp_list.sym = 0;
4284 perf_hpp_list.dso = 0;
4285
4286 field_order = NULL;
4287 sort_order = NULL;
4288
4289 reset_dimensions();
4290 perf_hpp__reset_output_field(&perf_hpp_list);
4291 }
4292
4293 #define INDENT (3*8 + 1)
4294
add_key(struct strbuf * sb,const char * str,int * llen)4295 static void add_key(struct strbuf *sb, const char *str, int *llen)
4296 {
4297 if (!str)
4298 return;
4299
4300 if (*llen >= 75) {
4301 strbuf_addstr(sb, "\n\t\t\t ");
4302 *llen = INDENT;
4303 }
4304 strbuf_addf(sb, " %s", str);
4305 *llen += strlen(str) + 1;
4306 }
4307
add_sort_string(struct strbuf * sb,struct sort_dimension * s,int n,int * llen)4308 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
4309 int *llen)
4310 {
4311 int i;
4312
4313 for (i = 0; i < n; i++)
4314 add_key(sb, s[i].name, llen);
4315 }
4316
add_hpp_sort_string(struct strbuf * sb,struct hpp_dimension * s,int n,int * llen)4317 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
4318 int *llen)
4319 {
4320 int i;
4321
4322 for (i = 0; i < n; i++)
4323 add_key(sb, s[i].name, llen);
4324 }
4325
sort_help(const char * prefix,enum sort_mode mode)4326 char *sort_help(const char *prefix, enum sort_mode mode)
4327 {
4328 struct strbuf sb;
4329 char *s;
4330 int len = strlen(prefix) + INDENT;
4331
4332 strbuf_init(&sb, 300);
4333 strbuf_addstr(&sb, prefix);
4334 add_hpp_sort_string(&sb, hpp_sort_dimensions,
4335 ARRAY_SIZE(hpp_sort_dimensions), &len);
4336 add_sort_string(&sb, common_sort_dimensions,
4337 ARRAY_SIZE(common_sort_dimensions), &len);
4338 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH)
4339 add_sort_string(&sb, bstack_sort_dimensions,
4340 ARRAY_SIZE(bstack_sort_dimensions), &len);
4341 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY)
4342 add_sort_string(&sb, memory_sort_dimensions,
4343 ARRAY_SIZE(memory_sort_dimensions), &len);
4344 s = strbuf_detach(&sb, NULL);
4345 strbuf_release(&sb);
4346 return s;
4347 }
4348