xref: /linux/tools/perf/util/sort.c (revision 173b0b5b0e865348684c02bd9cb1d22b5d46e458)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <stdlib.h>
6 #include <linux/mman.h>
7 #include <linux/time64.h>
8 #include "debug.h"
9 #include "dso.h"
10 #include "sort.h"
11 #include "hist.h"
12 #include "cacheline.h"
13 #include "comm.h"
14 #include "map.h"
15 #include "maps.h"
16 #include "symbol.h"
17 #include "map_symbol.h"
18 #include "branch.h"
19 #include "thread.h"
20 #include "evsel.h"
21 #include "evlist.h"
22 #include "srcline.h"
23 #include "strlist.h"
24 #include "strbuf.h"
25 #include "mem-events.h"
26 #include "annotate.h"
27 #include "annotate-data.h"
28 #include "event.h"
29 #include "time-utils.h"
30 #include "cgroup.h"
31 #include "machine.h"
32 #include "trace-event.h"
33 #include <linux/kernel.h>
34 #include <linux/string.h>
35 
36 #ifdef HAVE_LIBTRACEEVENT
37 #include <traceevent/event-parse.h>
38 #endif
39 
40 regex_t		parent_regex;
41 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
42 const char	*parent_pattern = default_parent_pattern;
43 const char	*default_sort_order = "comm,dso,symbol";
44 const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
45 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
46 const char	default_top_sort_order[] = "dso,symbol";
47 const char	default_diff_sort_order[] = "dso,symbol";
48 const char	default_tracepoint_sort_order[] = "trace";
49 const char	*sort_order;
50 const char	*field_order;
51 regex_t		ignore_callees_regex;
52 int		have_ignore_callees = 0;
53 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
54 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
55 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
56 
57 /*
58  * Some architectures have Adjacent Cacheline Prefetch feature, which
59  * behaves like the cacheline size is doubled. Enable this flag to
60  * check things in double cacheline granularity.
61  */
62 bool chk_double_cl;
63 
64 /*
65  * Replaces all occurrences of a char used with the:
66  *
67  * -t, --field-separator
68  *
69  * option, that uses a special separator character and don't pad with spaces,
70  * replacing all occurrences of this separator in symbol names (and other
71  * output) with a '.' character, that thus it's the only non valid separator.
72 */
73 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
74 {
75 	int n;
76 	va_list ap;
77 
78 	va_start(ap, fmt);
79 	n = vsnprintf(bf, size, fmt, ap);
80 	if (symbol_conf.field_sep && n > 0) {
81 		char *sep = bf;
82 
83 		while (1) {
84 			sep = strchr(sep, *symbol_conf.field_sep);
85 			if (sep == NULL)
86 				break;
87 			*sep = '.';
88 		}
89 	}
90 	va_end(ap);
91 
92 	if (n >= (int)size)
93 		return size - 1;
94 	return n;
95 }
96 
97 static int64_t cmp_null(const void *l, const void *r)
98 {
99 	if (!l && !r)
100 		return 0;
101 	else if (!l)
102 		return -1;
103 	else
104 		return 1;
105 }
106 
107 /* --sort pid */
108 
109 static int64_t
110 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
111 {
112 	return thread__tid(right->thread) - thread__tid(left->thread);
113 }
114 
115 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
116 				       size_t size, unsigned int width)
117 {
118 	const char *comm = thread__comm_str(he->thread);
119 
120 	width = max(7U, width) - 8;
121 	return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
122 			       width, width, comm ?: "");
123 }
124 
125 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
126 {
127 	const struct thread *th = arg;
128 
129 	if (type != HIST_FILTER__THREAD)
130 		return -1;
131 
132 	return th && !RC_CHK_EQUAL(he->thread, th);
133 }
134 
135 struct sort_entry sort_thread = {
136 	.se_header	= "    Pid:Command",
137 	.se_cmp		= sort__thread_cmp,
138 	.se_snprintf	= hist_entry__thread_snprintf,
139 	.se_filter	= hist_entry__thread_filter,
140 	.se_width_idx	= HISTC_THREAD,
141 };
142 
143 /* --sort simd */
144 
145 static int64_t
146 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
147 {
148 	if (left->simd_flags.arch != right->simd_flags.arch)
149 		return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
150 
151 	return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
152 }
153 
154 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
155 {
156 	u64 arch = simd_flags->arch;
157 
158 	if (arch & SIMD_OP_FLAGS_ARCH_SVE)
159 		return "SVE";
160 	else
161 		return "n/a";
162 }
163 
164 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
165 				     size_t size, unsigned int width __maybe_unused)
166 {
167 	const char *name;
168 
169 	if (!he->simd_flags.arch)
170 		return repsep_snprintf(bf, size, "");
171 
172 	name = hist_entry__get_simd_name(&he->simd_flags);
173 
174 	if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
175 		return repsep_snprintf(bf, size, "[e] %s", name);
176 	else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
177 		return repsep_snprintf(bf, size, "[p] %s", name);
178 
179 	return repsep_snprintf(bf, size, "[.] %s", name);
180 }
181 
182 struct sort_entry sort_simd = {
183 	.se_header	= "Simd   ",
184 	.se_cmp		= sort__simd_cmp,
185 	.se_snprintf	= hist_entry__simd_snprintf,
186 	.se_width_idx	= HISTC_SIMD,
187 };
188 
189 /* --sort comm */
190 
191 /*
192  * We can't use pointer comparison in functions below,
193  * because it gives different results based on pointer
194  * values, which could break some sorting assumptions.
195  */
196 static int64_t
197 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
198 {
199 	return strcmp(comm__str(right->comm), comm__str(left->comm));
200 }
201 
202 static int64_t
203 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
204 {
205 	return strcmp(comm__str(right->comm), comm__str(left->comm));
206 }
207 
208 static int64_t
209 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
210 {
211 	return strcmp(comm__str(right->comm), comm__str(left->comm));
212 }
213 
214 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
215 				     size_t size, unsigned int width)
216 {
217 	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
218 }
219 
220 struct sort_entry sort_comm = {
221 	.se_header	= "Command",
222 	.se_cmp		= sort__comm_cmp,
223 	.se_collapse	= sort__comm_collapse,
224 	.se_sort	= sort__comm_sort,
225 	.se_snprintf	= hist_entry__comm_snprintf,
226 	.se_filter	= hist_entry__thread_filter,
227 	.se_width_idx	= HISTC_COMM,
228 };
229 
230 /* --sort dso */
231 
232 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
233 {
234 	struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
235 	struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
236 	const char *dso_name_l, *dso_name_r;
237 
238 	if (!dso_l || !dso_r)
239 		return cmp_null(dso_r, dso_l);
240 
241 	if (verbose > 0) {
242 		dso_name_l = dso_l->long_name;
243 		dso_name_r = dso_r->long_name;
244 	} else {
245 		dso_name_l = dso_l->short_name;
246 		dso_name_r = dso_r->short_name;
247 	}
248 
249 	return strcmp(dso_name_l, dso_name_r);
250 }
251 
252 static int64_t
253 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
254 {
255 	return _sort__dso_cmp(right->ms.map, left->ms.map);
256 }
257 
258 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
259 				     size_t size, unsigned int width)
260 {
261 	const struct dso *dso = map ? map__dso(map) : NULL;
262 	const char *dso_name = "[unknown]";
263 
264 	if (dso)
265 		dso_name = verbose > 0 ? dso->long_name : dso->short_name;
266 
267 	return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
268 }
269 
270 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
271 				    size_t size, unsigned int width)
272 {
273 	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
274 }
275 
276 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
277 {
278 	const struct dso *dso = arg;
279 
280 	if (type != HIST_FILTER__DSO)
281 		return -1;
282 
283 	return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
284 }
285 
286 struct sort_entry sort_dso = {
287 	.se_header	= "Shared Object",
288 	.se_cmp		= sort__dso_cmp,
289 	.se_snprintf	= hist_entry__dso_snprintf,
290 	.se_filter	= hist_entry__dso_filter,
291 	.se_width_idx	= HISTC_DSO,
292 };
293 
294 /* --sort symbol */
295 
296 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
297 {
298 	return (int64_t)(right_ip - left_ip);
299 }
300 
301 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
302 {
303 	if (!sym_l || !sym_r)
304 		return cmp_null(sym_l, sym_r);
305 
306 	if (sym_l == sym_r)
307 		return 0;
308 
309 	if (sym_l->inlined || sym_r->inlined) {
310 		int ret = strcmp(sym_l->name, sym_r->name);
311 
312 		if (ret)
313 			return ret;
314 		if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
315 			return 0;
316 	}
317 
318 	if (sym_l->start != sym_r->start)
319 		return (int64_t)(sym_r->start - sym_l->start);
320 
321 	return (int64_t)(sym_r->end - sym_l->end);
322 }
323 
324 static int64_t
325 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
326 {
327 	int64_t ret;
328 
329 	if (!left->ms.sym && !right->ms.sym)
330 		return _sort__addr_cmp(left->ip, right->ip);
331 
332 	/*
333 	 * comparing symbol address alone is not enough since it's a
334 	 * relative address within a dso.
335 	 */
336 	if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
337 		ret = sort__dso_cmp(left, right);
338 		if (ret != 0)
339 			return ret;
340 	}
341 
342 	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
343 }
344 
345 static int64_t
346 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
347 {
348 	if (!left->ms.sym || !right->ms.sym)
349 		return cmp_null(left->ms.sym, right->ms.sym);
350 
351 	return strcmp(right->ms.sym->name, left->ms.sym->name);
352 }
353 
354 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
355 				     u64 ip, char level, char *bf, size_t size,
356 				     unsigned int width)
357 {
358 	struct symbol *sym = ms->sym;
359 	struct map *map = ms->map;
360 	size_t ret = 0;
361 
362 	if (verbose > 0) {
363 		struct dso *dso = map ? map__dso(map) : NULL;
364 		char o = dso ? dso__symtab_origin(dso) : '!';
365 		u64 rip = ip;
366 
367 		if (dso && dso->kernel && dso->adjust_symbols)
368 			rip = map__unmap_ip(map, ip);
369 
370 		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
371 				       BITS_PER_LONG / 4 + 2, rip, o);
372 	}
373 
374 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
375 	if (sym && map) {
376 		if (sym->type == STT_OBJECT) {
377 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
378 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
379 					ip - map__unmap_ip(map, sym->start));
380 		} else {
381 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
382 					       width - ret,
383 					       sym->name);
384 			if (sym->inlined)
385 				ret += repsep_snprintf(bf + ret, size - ret,
386 						       " (inlined)");
387 		}
388 	} else {
389 		size_t len = BITS_PER_LONG / 4;
390 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
391 				       len, ip);
392 	}
393 
394 	return ret;
395 }
396 
397 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
398 {
399 	return _hist_entry__sym_snprintf(&he->ms, he->ip,
400 					 he->level, bf, size, width);
401 }
402 
403 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
404 {
405 	const char *sym = arg;
406 
407 	if (type != HIST_FILTER__SYMBOL)
408 		return -1;
409 
410 	return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
411 }
412 
413 struct sort_entry sort_sym = {
414 	.se_header	= "Symbol",
415 	.se_cmp		= sort__sym_cmp,
416 	.se_sort	= sort__sym_sort,
417 	.se_snprintf	= hist_entry__sym_snprintf,
418 	.se_filter	= hist_entry__sym_filter,
419 	.se_width_idx	= HISTC_SYMBOL,
420 };
421 
422 /* --sort symoff */
423 
424 static int64_t
425 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
426 {
427 	int64_t ret;
428 
429 	ret = sort__sym_cmp(left, right);
430 	if (ret)
431 		return ret;
432 
433 	return left->ip - right->ip;
434 }
435 
436 static int64_t
437 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
438 {
439 	int64_t ret;
440 
441 	ret = sort__sym_sort(left, right);
442 	if (ret)
443 		return ret;
444 
445 	return left->ip - right->ip;
446 }
447 
448 static int
449 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
450 {
451 	struct symbol *sym = he->ms.sym;
452 
453 	if (sym == NULL)
454 		return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
455 
456 	return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
457 }
458 
459 struct sort_entry sort_sym_offset = {
460 	.se_header	= "Symbol Offset",
461 	.se_cmp		= sort__symoff_cmp,
462 	.se_sort	= sort__symoff_sort,
463 	.se_snprintf	= hist_entry__symoff_snprintf,
464 	.se_filter	= hist_entry__sym_filter,
465 	.se_width_idx	= HISTC_SYMBOL_OFFSET,
466 };
467 
468 /* --sort srcline */
469 
470 char *hist_entry__srcline(struct hist_entry *he)
471 {
472 	return map__srcline(he->ms.map, he->ip, he->ms.sym);
473 }
474 
475 static int64_t
476 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
477 {
478 	int64_t ret;
479 
480 	ret = _sort__addr_cmp(left->ip, right->ip);
481 	if (ret)
482 		return ret;
483 
484 	return sort__dso_cmp(left, right);
485 }
486 
487 static int64_t
488 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
489 {
490 	if (!left->srcline)
491 		left->srcline = hist_entry__srcline(left);
492 	if (!right->srcline)
493 		right->srcline = hist_entry__srcline(right);
494 
495 	return strcmp(right->srcline, left->srcline);
496 }
497 
498 static int64_t
499 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
500 {
501 	return sort__srcline_collapse(left, right);
502 }
503 
504 static void
505 sort__srcline_init(struct hist_entry *he)
506 {
507 	if (!he->srcline)
508 		he->srcline = hist_entry__srcline(he);
509 }
510 
511 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
512 					size_t size, unsigned int width)
513 {
514 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
515 }
516 
517 struct sort_entry sort_srcline = {
518 	.se_header	= "Source:Line",
519 	.se_cmp		= sort__srcline_cmp,
520 	.se_collapse	= sort__srcline_collapse,
521 	.se_sort	= sort__srcline_sort,
522 	.se_init	= sort__srcline_init,
523 	.se_snprintf	= hist_entry__srcline_snprintf,
524 	.se_width_idx	= HISTC_SRCLINE,
525 };
526 
527 /* --sort srcline_from */
528 
529 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
530 {
531 	return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
532 }
533 
534 static int64_t
535 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
536 {
537 	return left->branch_info->from.addr - right->branch_info->from.addr;
538 }
539 
540 static int64_t
541 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
542 {
543 	if (!left->branch_info->srcline_from)
544 		left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
545 
546 	if (!right->branch_info->srcline_from)
547 		right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
548 
549 	return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
550 }
551 
552 static int64_t
553 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
554 {
555 	return sort__srcline_from_collapse(left, right);
556 }
557 
558 static void sort__srcline_from_init(struct hist_entry *he)
559 {
560 	if (!he->branch_info->srcline_from)
561 		he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
562 }
563 
564 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
565 					size_t size, unsigned int width)
566 {
567 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
568 }
569 
570 struct sort_entry sort_srcline_from = {
571 	.se_header	= "From Source:Line",
572 	.se_cmp		= sort__srcline_from_cmp,
573 	.se_collapse	= sort__srcline_from_collapse,
574 	.se_sort	= sort__srcline_from_sort,
575 	.se_init	= sort__srcline_from_init,
576 	.se_snprintf	= hist_entry__srcline_from_snprintf,
577 	.se_width_idx	= HISTC_SRCLINE_FROM,
578 };
579 
580 /* --sort srcline_to */
581 
582 static int64_t
583 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
584 {
585 	return left->branch_info->to.addr - right->branch_info->to.addr;
586 }
587 
588 static int64_t
589 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
590 {
591 	if (!left->branch_info->srcline_to)
592 		left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
593 
594 	if (!right->branch_info->srcline_to)
595 		right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
596 
597 	return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
598 }
599 
600 static int64_t
601 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
602 {
603 	return sort__srcline_to_collapse(left, right);
604 }
605 
606 static void sort__srcline_to_init(struct hist_entry *he)
607 {
608 	if (!he->branch_info->srcline_to)
609 		he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
610 }
611 
612 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
613 					size_t size, unsigned int width)
614 {
615 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
616 }
617 
618 struct sort_entry sort_srcline_to = {
619 	.se_header	= "To Source:Line",
620 	.se_cmp		= sort__srcline_to_cmp,
621 	.se_collapse	= sort__srcline_to_collapse,
622 	.se_sort	= sort__srcline_to_sort,
623 	.se_init	= sort__srcline_to_init,
624 	.se_snprintf	= hist_entry__srcline_to_snprintf,
625 	.se_width_idx	= HISTC_SRCLINE_TO,
626 };
627 
628 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
629 					size_t size, unsigned int width)
630 {
631 
632 	struct symbol *sym = he->ms.sym;
633 	struct annotated_branch *branch;
634 	double ipc = 0.0, coverage = 0.0;
635 	char tmp[64];
636 
637 	if (!sym)
638 		return repsep_snprintf(bf, size, "%-*s", width, "-");
639 
640 	branch = symbol__annotation(sym)->branch;
641 
642 	if (branch && branch->hit_cycles)
643 		ipc = branch->hit_insn / ((double)branch->hit_cycles);
644 
645 	if (branch && branch->total_insn) {
646 		coverage = branch->cover_insn * 100.0 /
647 			((double)branch->total_insn);
648 	}
649 
650 	snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
651 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
652 }
653 
654 struct sort_entry sort_sym_ipc = {
655 	.se_header	= "IPC   [IPC Coverage]",
656 	.se_cmp		= sort__sym_cmp,
657 	.se_snprintf	= hist_entry__sym_ipc_snprintf,
658 	.se_width_idx	= HISTC_SYMBOL_IPC,
659 };
660 
661 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
662 					     __maybe_unused,
663 					     char *bf, size_t size,
664 					     unsigned int width)
665 {
666 	char tmp[64];
667 
668 	snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
669 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
670 }
671 
672 struct sort_entry sort_sym_ipc_null = {
673 	.se_header	= "IPC   [IPC Coverage]",
674 	.se_cmp		= sort__sym_cmp,
675 	.se_snprintf	= hist_entry__sym_ipc_null_snprintf,
676 	.se_width_idx	= HISTC_SYMBOL_IPC,
677 };
678 
679 /* --sort srcfile */
680 
681 static char no_srcfile[1];
682 
683 static char *hist_entry__get_srcfile(struct hist_entry *e)
684 {
685 	char *sf, *p;
686 	struct map *map = e->ms.map;
687 
688 	if (!map)
689 		return no_srcfile;
690 
691 	sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
692 			 e->ms.sym, false, true, true, e->ip);
693 	if (sf == SRCLINE_UNKNOWN)
694 		return no_srcfile;
695 	p = strchr(sf, ':');
696 	if (p && *sf) {
697 		*p = 0;
698 		return sf;
699 	}
700 	free(sf);
701 	return no_srcfile;
702 }
703 
704 static int64_t
705 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
706 {
707 	return sort__srcline_cmp(left, right);
708 }
709 
710 static int64_t
711 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
712 {
713 	if (!left->srcfile)
714 		left->srcfile = hist_entry__get_srcfile(left);
715 	if (!right->srcfile)
716 		right->srcfile = hist_entry__get_srcfile(right);
717 
718 	return strcmp(right->srcfile, left->srcfile);
719 }
720 
721 static int64_t
722 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
723 {
724 	return sort__srcfile_collapse(left, right);
725 }
726 
727 static void sort__srcfile_init(struct hist_entry *he)
728 {
729 	if (!he->srcfile)
730 		he->srcfile = hist_entry__get_srcfile(he);
731 }
732 
733 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
734 					size_t size, unsigned int width)
735 {
736 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
737 }
738 
739 struct sort_entry sort_srcfile = {
740 	.se_header	= "Source File",
741 	.se_cmp		= sort__srcfile_cmp,
742 	.se_collapse	= sort__srcfile_collapse,
743 	.se_sort	= sort__srcfile_sort,
744 	.se_init	= sort__srcfile_init,
745 	.se_snprintf	= hist_entry__srcfile_snprintf,
746 	.se_width_idx	= HISTC_SRCFILE,
747 };
748 
749 /* --sort parent */
750 
751 static int64_t
752 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
753 {
754 	struct symbol *sym_l = left->parent;
755 	struct symbol *sym_r = right->parent;
756 
757 	if (!sym_l || !sym_r)
758 		return cmp_null(sym_l, sym_r);
759 
760 	return strcmp(sym_r->name, sym_l->name);
761 }
762 
763 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
764 				       size_t size, unsigned int width)
765 {
766 	return repsep_snprintf(bf, size, "%-*.*s", width, width,
767 			      he->parent ? he->parent->name : "[other]");
768 }
769 
770 struct sort_entry sort_parent = {
771 	.se_header	= "Parent symbol",
772 	.se_cmp		= sort__parent_cmp,
773 	.se_snprintf	= hist_entry__parent_snprintf,
774 	.se_width_idx	= HISTC_PARENT,
775 };
776 
777 /* --sort cpu */
778 
779 static int64_t
780 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
781 {
782 	return right->cpu - left->cpu;
783 }
784 
785 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
786 				    size_t size, unsigned int width)
787 {
788 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
789 }
790 
791 struct sort_entry sort_cpu = {
792 	.se_header      = "CPU",
793 	.se_cmp	        = sort__cpu_cmp,
794 	.se_snprintf    = hist_entry__cpu_snprintf,
795 	.se_width_idx	= HISTC_CPU,
796 };
797 
798 /* --sort cgroup_id */
799 
800 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
801 {
802 	return (int64_t)(right_dev - left_dev);
803 }
804 
805 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
806 {
807 	return (int64_t)(right_ino - left_ino);
808 }
809 
810 static int64_t
811 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
812 {
813 	int64_t ret;
814 
815 	ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
816 	if (ret != 0)
817 		return ret;
818 
819 	return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
820 				       left->cgroup_id.ino);
821 }
822 
823 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
824 					  char *bf, size_t size,
825 					  unsigned int width __maybe_unused)
826 {
827 	return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
828 			       he->cgroup_id.ino);
829 }
830 
831 struct sort_entry sort_cgroup_id = {
832 	.se_header      = "cgroup id (dev/inode)",
833 	.se_cmp	        = sort__cgroup_id_cmp,
834 	.se_snprintf    = hist_entry__cgroup_id_snprintf,
835 	.se_width_idx	= HISTC_CGROUP_ID,
836 };
837 
838 /* --sort cgroup */
839 
840 static int64_t
841 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
842 {
843 	return right->cgroup - left->cgroup;
844 }
845 
846 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
847 				       char *bf, size_t size,
848 				       unsigned int width __maybe_unused)
849 {
850 	const char *cgrp_name = "N/A";
851 
852 	if (he->cgroup) {
853 		struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
854 						   he->cgroup);
855 		if (cgrp != NULL)
856 			cgrp_name = cgrp->name;
857 		else
858 			cgrp_name = "unknown";
859 	}
860 
861 	return repsep_snprintf(bf, size, "%s", cgrp_name);
862 }
863 
864 struct sort_entry sort_cgroup = {
865 	.se_header      = "Cgroup",
866 	.se_cmp	        = sort__cgroup_cmp,
867 	.se_snprintf    = hist_entry__cgroup_snprintf,
868 	.se_width_idx	= HISTC_CGROUP,
869 };
870 
871 /* --sort socket */
872 
873 static int64_t
874 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
875 {
876 	return right->socket - left->socket;
877 }
878 
879 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
880 				    size_t size, unsigned int width)
881 {
882 	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
883 }
884 
885 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
886 {
887 	int sk = *(const int *)arg;
888 
889 	if (type != HIST_FILTER__SOCKET)
890 		return -1;
891 
892 	return sk >= 0 && he->socket != sk;
893 }
894 
895 struct sort_entry sort_socket = {
896 	.se_header      = "Socket",
897 	.se_cmp	        = sort__socket_cmp,
898 	.se_snprintf    = hist_entry__socket_snprintf,
899 	.se_filter      = hist_entry__socket_filter,
900 	.se_width_idx	= HISTC_SOCKET,
901 };
902 
903 /* --sort time */
904 
905 static int64_t
906 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
907 {
908 	return right->time - left->time;
909 }
910 
911 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
912 				    size_t size, unsigned int width)
913 {
914 	char he_time[32];
915 
916 	if (symbol_conf.nanosecs)
917 		timestamp__scnprintf_nsec(he->time, he_time,
918 					  sizeof(he_time));
919 	else
920 		timestamp__scnprintf_usec(he->time, he_time,
921 					  sizeof(he_time));
922 
923 	return repsep_snprintf(bf, size, "%-.*s", width, he_time);
924 }
925 
926 struct sort_entry sort_time = {
927 	.se_header      = "Time",
928 	.se_cmp	        = sort__time_cmp,
929 	.se_snprintf    = hist_entry__time_snprintf,
930 	.se_width_idx	= HISTC_TIME,
931 };
932 
933 /* --sort trace */
934 
935 #ifdef HAVE_LIBTRACEEVENT
936 static char *get_trace_output(struct hist_entry *he)
937 {
938 	struct trace_seq seq;
939 	struct evsel *evsel;
940 	struct tep_record rec = {
941 		.data = he->raw_data,
942 		.size = he->raw_size,
943 	};
944 
945 	evsel = hists_to_evsel(he->hists);
946 
947 	trace_seq_init(&seq);
948 	if (symbol_conf.raw_trace) {
949 		tep_print_fields(&seq, he->raw_data, he->raw_size,
950 				 evsel->tp_format);
951 	} else {
952 		tep_print_event(evsel->tp_format->tep,
953 				&seq, &rec, "%s", TEP_PRINT_INFO);
954 	}
955 	/*
956 	 * Trim the buffer, it starts at 4KB and we're not going to
957 	 * add anything more to this buffer.
958 	 */
959 	return realloc(seq.buffer, seq.len + 1);
960 }
961 
962 static int64_t
963 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
964 {
965 	struct evsel *evsel;
966 
967 	evsel = hists_to_evsel(left->hists);
968 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
969 		return 0;
970 
971 	if (left->trace_output == NULL)
972 		left->trace_output = get_trace_output(left);
973 	if (right->trace_output == NULL)
974 		right->trace_output = get_trace_output(right);
975 
976 	return strcmp(right->trace_output, left->trace_output);
977 }
978 
979 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
980 				    size_t size, unsigned int width)
981 {
982 	struct evsel *evsel;
983 
984 	evsel = hists_to_evsel(he->hists);
985 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
986 		return scnprintf(bf, size, "%-.*s", width, "N/A");
987 
988 	if (he->trace_output == NULL)
989 		he->trace_output = get_trace_output(he);
990 	return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
991 }
992 
993 struct sort_entry sort_trace = {
994 	.se_header      = "Trace output",
995 	.se_cmp	        = sort__trace_cmp,
996 	.se_snprintf    = hist_entry__trace_snprintf,
997 	.se_width_idx	= HISTC_TRACE,
998 };
999 #endif /* HAVE_LIBTRACEEVENT */
1000 
1001 /* sort keys for branch stacks */
1002 
1003 static int64_t
1004 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
1005 {
1006 	if (!left->branch_info || !right->branch_info)
1007 		return cmp_null(left->branch_info, right->branch_info);
1008 
1009 	return _sort__dso_cmp(left->branch_info->from.ms.map,
1010 			      right->branch_info->from.ms.map);
1011 }
1012 
1013 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
1014 				    size_t size, unsigned int width)
1015 {
1016 	if (he->branch_info)
1017 		return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
1018 						 bf, size, width);
1019 	else
1020 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1021 }
1022 
1023 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
1024 				       const void *arg)
1025 {
1026 	const struct dso *dso = arg;
1027 
1028 	if (type != HIST_FILTER__DSO)
1029 		return -1;
1030 
1031 	return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
1032 		map__dso(he->branch_info->from.ms.map) != dso);
1033 }
1034 
1035 static int64_t
1036 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
1037 {
1038 	if (!left->branch_info || !right->branch_info)
1039 		return cmp_null(left->branch_info, right->branch_info);
1040 
1041 	return _sort__dso_cmp(left->branch_info->to.ms.map,
1042 			      right->branch_info->to.ms.map);
1043 }
1044 
1045 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
1046 				       size_t size, unsigned int width)
1047 {
1048 	if (he->branch_info)
1049 		return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1050 						 bf, size, width);
1051 	else
1052 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1053 }
1054 
1055 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1056 				     const void *arg)
1057 {
1058 	const struct dso *dso = arg;
1059 
1060 	if (type != HIST_FILTER__DSO)
1061 		return -1;
1062 
1063 	return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1064 		map__dso(he->branch_info->to.ms.map) != dso);
1065 }
1066 
1067 static int64_t
1068 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1069 {
1070 	struct addr_map_symbol *from_l, *from_r;
1071 
1072 	if (!left->branch_info || !right->branch_info)
1073 		return cmp_null(left->branch_info, right->branch_info);
1074 
1075 	from_l = &left->branch_info->from;
1076 	from_r = &right->branch_info->from;
1077 
1078 	if (!from_l->ms.sym && !from_r->ms.sym)
1079 		return _sort__addr_cmp(from_l->addr, from_r->addr);
1080 
1081 	return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1082 }
1083 
1084 static int64_t
1085 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1086 {
1087 	struct addr_map_symbol *to_l, *to_r;
1088 
1089 	if (!left->branch_info || !right->branch_info)
1090 		return cmp_null(left->branch_info, right->branch_info);
1091 
1092 	to_l = &left->branch_info->to;
1093 	to_r = &right->branch_info->to;
1094 
1095 	if (!to_l->ms.sym && !to_r->ms.sym)
1096 		return _sort__addr_cmp(to_l->addr, to_r->addr);
1097 
1098 	return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1099 }
1100 
1101 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1102 					 size_t size, unsigned int width)
1103 {
1104 	if (he->branch_info) {
1105 		struct addr_map_symbol *from = &he->branch_info->from;
1106 
1107 		return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1108 						 from->al_level, bf, size, width);
1109 	}
1110 
1111 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1112 }
1113 
1114 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1115 				       size_t size, unsigned int width)
1116 {
1117 	if (he->branch_info) {
1118 		struct addr_map_symbol *to = &he->branch_info->to;
1119 
1120 		return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1121 						 to->al_level, bf, size, width);
1122 	}
1123 
1124 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1125 }
1126 
1127 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1128 				       const void *arg)
1129 {
1130 	const char *sym = arg;
1131 
1132 	if (type != HIST_FILTER__SYMBOL)
1133 		return -1;
1134 
1135 	return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1136 			strstr(he->branch_info->from.ms.sym->name, sym));
1137 }
1138 
1139 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1140 				       const void *arg)
1141 {
1142 	const char *sym = arg;
1143 
1144 	if (type != HIST_FILTER__SYMBOL)
1145 		return -1;
1146 
1147 	return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1148 		        strstr(he->branch_info->to.ms.sym->name, sym));
1149 }
1150 
1151 struct sort_entry sort_dso_from = {
1152 	.se_header	= "Source Shared Object",
1153 	.se_cmp		= sort__dso_from_cmp,
1154 	.se_snprintf	= hist_entry__dso_from_snprintf,
1155 	.se_filter	= hist_entry__dso_from_filter,
1156 	.se_width_idx	= HISTC_DSO_FROM,
1157 };
1158 
1159 struct sort_entry sort_dso_to = {
1160 	.se_header	= "Target Shared Object",
1161 	.se_cmp		= sort__dso_to_cmp,
1162 	.se_snprintf	= hist_entry__dso_to_snprintf,
1163 	.se_filter	= hist_entry__dso_to_filter,
1164 	.se_width_idx	= HISTC_DSO_TO,
1165 };
1166 
1167 struct sort_entry sort_sym_from = {
1168 	.se_header	= "Source Symbol",
1169 	.se_cmp		= sort__sym_from_cmp,
1170 	.se_snprintf	= hist_entry__sym_from_snprintf,
1171 	.se_filter	= hist_entry__sym_from_filter,
1172 	.se_width_idx	= HISTC_SYMBOL_FROM,
1173 };
1174 
1175 struct sort_entry sort_sym_to = {
1176 	.se_header	= "Target Symbol",
1177 	.se_cmp		= sort__sym_to_cmp,
1178 	.se_snprintf	= hist_entry__sym_to_snprintf,
1179 	.se_filter	= hist_entry__sym_to_filter,
1180 	.se_width_idx	= HISTC_SYMBOL_TO,
1181 };
1182 
1183 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1184 				     u64 ip, char level, char *bf, size_t size,
1185 				     unsigned int width)
1186 {
1187 	struct symbol *sym = ms->sym;
1188 	struct map *map = ms->map;
1189 	size_t ret = 0, offs;
1190 
1191 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1192 	if (sym && map) {
1193 		if (sym->type == STT_OBJECT) {
1194 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1195 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1196 					ip - map__unmap_ip(map, sym->start));
1197 		} else {
1198 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1199 					       width - ret,
1200 					       sym->name);
1201 			offs = ip - sym->start;
1202 			if (offs)
1203 				ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1204 		}
1205 	} else {
1206 		size_t len = BITS_PER_LONG / 4;
1207 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1208 				       len, ip);
1209 	}
1210 
1211 	return ret;
1212 }
1213 
1214 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1215 					 size_t size, unsigned int width)
1216 {
1217 	if (he->branch_info) {
1218 		struct addr_map_symbol *from = &he->branch_info->from;
1219 
1220 		return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1221 						 he->level, bf, size, width);
1222 	}
1223 
1224 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1225 }
1226 
1227 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1228 				       size_t size, unsigned int width)
1229 {
1230 	if (he->branch_info) {
1231 		struct addr_map_symbol *to = &he->branch_info->to;
1232 
1233 		return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1234 						 he->level, bf, size, width);
1235 	}
1236 
1237 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1238 }
1239 
1240 static int64_t
1241 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1242 {
1243 	struct addr_map_symbol *from_l;
1244 	struct addr_map_symbol *from_r;
1245 	int64_t ret;
1246 
1247 	if (!left->branch_info || !right->branch_info)
1248 		return cmp_null(left->branch_info, right->branch_info);
1249 
1250 	from_l = &left->branch_info->from;
1251 	from_r = &right->branch_info->from;
1252 
1253 	/*
1254 	 * comparing symbol address alone is not enough since it's a
1255 	 * relative address within a dso.
1256 	 */
1257 	ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1258 	if (ret != 0)
1259 		return ret;
1260 
1261 	return _sort__addr_cmp(from_l->addr, from_r->addr);
1262 }
1263 
1264 static int64_t
1265 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1266 {
1267 	struct addr_map_symbol *to_l;
1268 	struct addr_map_symbol *to_r;
1269 	int64_t ret;
1270 
1271 	if (!left->branch_info || !right->branch_info)
1272 		return cmp_null(left->branch_info, right->branch_info);
1273 
1274 	to_l = &left->branch_info->to;
1275 	to_r = &right->branch_info->to;
1276 
1277 	/*
1278 	 * comparing symbol address alone is not enough since it's a
1279 	 * relative address within a dso.
1280 	 */
1281 	ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1282 	if (ret != 0)
1283 		return ret;
1284 
1285 	return _sort__addr_cmp(to_l->addr, to_r->addr);
1286 }
1287 
1288 struct sort_entry sort_addr_from = {
1289 	.se_header	= "Source Address",
1290 	.se_cmp		= sort__addr_from_cmp,
1291 	.se_snprintf	= hist_entry__addr_from_snprintf,
1292 	.se_filter	= hist_entry__sym_from_filter, /* shared with sym_from */
1293 	.se_width_idx	= HISTC_ADDR_FROM,
1294 };
1295 
1296 struct sort_entry sort_addr_to = {
1297 	.se_header	= "Target Address",
1298 	.se_cmp		= sort__addr_to_cmp,
1299 	.se_snprintf	= hist_entry__addr_to_snprintf,
1300 	.se_filter	= hist_entry__sym_to_filter, /* shared with sym_to */
1301 	.se_width_idx	= HISTC_ADDR_TO,
1302 };
1303 
1304 
1305 static int64_t
1306 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1307 {
1308 	unsigned char mp, p;
1309 
1310 	if (!left->branch_info || !right->branch_info)
1311 		return cmp_null(left->branch_info, right->branch_info);
1312 
1313 	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1314 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1315 	return mp || p;
1316 }
1317 
1318 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1319 				    size_t size, unsigned int width){
1320 	static const char *out = "N/A";
1321 
1322 	if (he->branch_info) {
1323 		if (he->branch_info->flags.predicted)
1324 			out = "N";
1325 		else if (he->branch_info->flags.mispred)
1326 			out = "Y";
1327 	}
1328 
1329 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1330 }
1331 
1332 static int64_t
1333 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1334 {
1335 	if (!left->branch_info || !right->branch_info)
1336 		return cmp_null(left->branch_info, right->branch_info);
1337 
1338 	return left->branch_info->flags.cycles -
1339 		right->branch_info->flags.cycles;
1340 }
1341 
1342 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1343 				    size_t size, unsigned int width)
1344 {
1345 	if (!he->branch_info)
1346 		return scnprintf(bf, size, "%-.*s", width, "N/A");
1347 	if (he->branch_info->flags.cycles == 0)
1348 		return repsep_snprintf(bf, size, "%-*s", width, "-");
1349 	return repsep_snprintf(bf, size, "%-*hd", width,
1350 			       he->branch_info->flags.cycles);
1351 }
1352 
1353 struct sort_entry sort_cycles = {
1354 	.se_header	= "Basic Block Cycles",
1355 	.se_cmp		= sort__cycles_cmp,
1356 	.se_snprintf	= hist_entry__cycles_snprintf,
1357 	.se_width_idx	= HISTC_CYCLES,
1358 };
1359 
1360 /* --sort daddr_sym */
1361 int64_t
1362 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1363 {
1364 	uint64_t l = 0, r = 0;
1365 
1366 	if (left->mem_info)
1367 		l = left->mem_info->daddr.addr;
1368 	if (right->mem_info)
1369 		r = right->mem_info->daddr.addr;
1370 
1371 	return (int64_t)(r - l);
1372 }
1373 
1374 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1375 				    size_t size, unsigned int width)
1376 {
1377 	uint64_t addr = 0;
1378 	struct map_symbol *ms = NULL;
1379 
1380 	if (he->mem_info) {
1381 		addr = he->mem_info->daddr.addr;
1382 		ms = &he->mem_info->daddr.ms;
1383 	}
1384 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1385 }
1386 
1387 int64_t
1388 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1389 {
1390 	uint64_t l = 0, r = 0;
1391 
1392 	if (left->mem_info)
1393 		l = left->mem_info->iaddr.addr;
1394 	if (right->mem_info)
1395 		r = right->mem_info->iaddr.addr;
1396 
1397 	return (int64_t)(r - l);
1398 }
1399 
1400 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1401 				    size_t size, unsigned int width)
1402 {
1403 	uint64_t addr = 0;
1404 	struct map_symbol *ms = NULL;
1405 
1406 	if (he->mem_info) {
1407 		addr = he->mem_info->iaddr.addr;
1408 		ms   = &he->mem_info->iaddr.ms;
1409 	}
1410 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1411 }
1412 
1413 static int64_t
1414 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1415 {
1416 	struct map *map_l = NULL;
1417 	struct map *map_r = NULL;
1418 
1419 	if (left->mem_info)
1420 		map_l = left->mem_info->daddr.ms.map;
1421 	if (right->mem_info)
1422 		map_r = right->mem_info->daddr.ms.map;
1423 
1424 	return _sort__dso_cmp(map_l, map_r);
1425 }
1426 
1427 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1428 				    size_t size, unsigned int width)
1429 {
1430 	struct map *map = NULL;
1431 
1432 	if (he->mem_info)
1433 		map = he->mem_info->daddr.ms.map;
1434 
1435 	return _hist_entry__dso_snprintf(map, bf, size, width);
1436 }
1437 
1438 static int64_t
1439 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1440 {
1441 	union perf_mem_data_src data_src_l;
1442 	union perf_mem_data_src data_src_r;
1443 
1444 	if (left->mem_info)
1445 		data_src_l = left->mem_info->data_src;
1446 	else
1447 		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1448 
1449 	if (right->mem_info)
1450 		data_src_r = right->mem_info->data_src;
1451 	else
1452 		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1453 
1454 	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1455 }
1456 
1457 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1458 				    size_t size, unsigned int width)
1459 {
1460 	char out[10];
1461 
1462 	perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1463 	return repsep_snprintf(bf, size, "%.*s", width, out);
1464 }
1465 
1466 static int64_t
1467 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1468 {
1469 	union perf_mem_data_src data_src_l;
1470 	union perf_mem_data_src data_src_r;
1471 
1472 	if (left->mem_info)
1473 		data_src_l = left->mem_info->data_src;
1474 	else
1475 		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1476 
1477 	if (right->mem_info)
1478 		data_src_r = right->mem_info->data_src;
1479 	else
1480 		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1481 
1482 	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1483 }
1484 
1485 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1486 				    size_t size, unsigned int width)
1487 {
1488 	char out[64];
1489 
1490 	perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1491 	return repsep_snprintf(bf, size, "%-*s", width, out);
1492 }
1493 
1494 static int64_t
1495 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1496 {
1497 	union perf_mem_data_src data_src_l;
1498 	union perf_mem_data_src data_src_r;
1499 
1500 	if (left->mem_info)
1501 		data_src_l = left->mem_info->data_src;
1502 	else
1503 		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1504 
1505 	if (right->mem_info)
1506 		data_src_r = right->mem_info->data_src;
1507 	else
1508 		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1509 
1510 	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1511 }
1512 
1513 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1514 				    size_t size, unsigned int width)
1515 {
1516 	char out[64];
1517 
1518 	perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1519 	return repsep_snprintf(bf, size, "%-*s", width, out);
1520 }
1521 
1522 static int64_t
1523 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1524 {
1525 	union perf_mem_data_src data_src_l;
1526 	union perf_mem_data_src data_src_r;
1527 
1528 	if (left->mem_info)
1529 		data_src_l = left->mem_info->data_src;
1530 	else
1531 		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1532 
1533 	if (right->mem_info)
1534 		data_src_r = right->mem_info->data_src;
1535 	else
1536 		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1537 
1538 	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1539 }
1540 
1541 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1542 				    size_t size, unsigned int width)
1543 {
1544 	char out[64];
1545 
1546 	perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1547 	return repsep_snprintf(bf, size, "%-*s", width, out);
1548 }
1549 
1550 int64_t
1551 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1552 {
1553 	u64 l, r;
1554 	struct map *l_map, *r_map;
1555 	struct dso *l_dso, *r_dso;
1556 	int rc;
1557 
1558 	if (!left->mem_info)  return -1;
1559 	if (!right->mem_info) return 1;
1560 
1561 	/* group event types together */
1562 	if (left->cpumode > right->cpumode) return -1;
1563 	if (left->cpumode < right->cpumode) return 1;
1564 
1565 	l_map = left->mem_info->daddr.ms.map;
1566 	r_map = right->mem_info->daddr.ms.map;
1567 
1568 	/* if both are NULL, jump to sort on al_addr instead */
1569 	if (!l_map && !r_map)
1570 		goto addr;
1571 
1572 	if (!l_map) return -1;
1573 	if (!r_map) return 1;
1574 
1575 	l_dso = map__dso(l_map);
1576 	r_dso = map__dso(r_map);
1577 	rc = dso__cmp_id(l_dso, r_dso);
1578 	if (rc)
1579 		return rc;
1580 	/*
1581 	 * Addresses with no major/minor numbers are assumed to be
1582 	 * anonymous in userspace.  Sort those on pid then address.
1583 	 *
1584 	 * The kernel and non-zero major/minor mapped areas are
1585 	 * assumed to be unity mapped.  Sort those on address.
1586 	 */
1587 
1588 	if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1589 	    (!(map__flags(l_map) & MAP_SHARED)) && !l_dso->id.maj && !l_dso->id.min &&
1590 	    !l_dso->id.ino && !l_dso->id.ino_generation) {
1591 		/* userspace anonymous */
1592 
1593 		if (thread__pid(left->thread) > thread__pid(right->thread))
1594 			return -1;
1595 		if (thread__pid(left->thread) < thread__pid(right->thread))
1596 			return 1;
1597 	}
1598 
1599 addr:
1600 	/* al_addr does all the right addr - start + offset calculations */
1601 	l = cl_address(left->mem_info->daddr.al_addr, chk_double_cl);
1602 	r = cl_address(right->mem_info->daddr.al_addr, chk_double_cl);
1603 
1604 	if (l > r) return -1;
1605 	if (l < r) return 1;
1606 
1607 	return 0;
1608 }
1609 
1610 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1611 					  size_t size, unsigned int width)
1612 {
1613 
1614 	uint64_t addr = 0;
1615 	struct map_symbol *ms = NULL;
1616 	char level = he->level;
1617 
1618 	if (he->mem_info) {
1619 		struct map *map = he->mem_info->daddr.ms.map;
1620 		struct dso *dso = map ? map__dso(map) : NULL;
1621 
1622 		addr = cl_address(he->mem_info->daddr.al_addr, chk_double_cl);
1623 		ms = &he->mem_info->daddr.ms;
1624 
1625 		/* print [s] for shared data mmaps */
1626 		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1627 		     map && !(map__prot(map) & PROT_EXEC) &&
1628 		     (map__flags(map) & MAP_SHARED) &&
1629 		    (dso->id.maj || dso->id.min || dso->id.ino || dso->id.ino_generation))
1630 			level = 's';
1631 		else if (!map)
1632 			level = 'X';
1633 	}
1634 	return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1635 }
1636 
1637 struct sort_entry sort_mispredict = {
1638 	.se_header	= "Branch Mispredicted",
1639 	.se_cmp		= sort__mispredict_cmp,
1640 	.se_snprintf	= hist_entry__mispredict_snprintf,
1641 	.se_width_idx	= HISTC_MISPREDICT,
1642 };
1643 
1644 static int64_t
1645 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1646 {
1647 	return left->weight - right->weight;
1648 }
1649 
1650 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1651 				    size_t size, unsigned int width)
1652 {
1653 	return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1654 }
1655 
1656 struct sort_entry sort_local_weight = {
1657 	.se_header	= "Local Weight",
1658 	.se_cmp		= sort__weight_cmp,
1659 	.se_snprintf	= hist_entry__local_weight_snprintf,
1660 	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1661 };
1662 
1663 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1664 					      size_t size, unsigned int width)
1665 {
1666 	return repsep_snprintf(bf, size, "%-*llu", width,
1667 			       he->weight * he->stat.nr_events);
1668 }
1669 
1670 struct sort_entry sort_global_weight = {
1671 	.se_header	= "Weight",
1672 	.se_cmp		= sort__weight_cmp,
1673 	.se_snprintf	= hist_entry__global_weight_snprintf,
1674 	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1675 };
1676 
1677 static int64_t
1678 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1679 {
1680 	return left->ins_lat - right->ins_lat;
1681 }
1682 
1683 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1684 					      size_t size, unsigned int width)
1685 {
1686 	return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1687 }
1688 
1689 struct sort_entry sort_local_ins_lat = {
1690 	.se_header	= "Local INSTR Latency",
1691 	.se_cmp		= sort__ins_lat_cmp,
1692 	.se_snprintf	= hist_entry__local_ins_lat_snprintf,
1693 	.se_width_idx	= HISTC_LOCAL_INS_LAT,
1694 };
1695 
1696 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1697 					       size_t size, unsigned int width)
1698 {
1699 	return repsep_snprintf(bf, size, "%-*u", width,
1700 			       he->ins_lat * he->stat.nr_events);
1701 }
1702 
1703 struct sort_entry sort_global_ins_lat = {
1704 	.se_header	= "INSTR Latency",
1705 	.se_cmp		= sort__ins_lat_cmp,
1706 	.se_snprintf	= hist_entry__global_ins_lat_snprintf,
1707 	.se_width_idx	= HISTC_GLOBAL_INS_LAT,
1708 };
1709 
1710 static int64_t
1711 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1712 {
1713 	return left->p_stage_cyc - right->p_stage_cyc;
1714 }
1715 
1716 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1717 					size_t size, unsigned int width)
1718 {
1719 	return repsep_snprintf(bf, size, "%-*u", width,
1720 			he->p_stage_cyc * he->stat.nr_events);
1721 }
1722 
1723 
1724 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1725 					size_t size, unsigned int width)
1726 {
1727 	return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1728 }
1729 
1730 struct sort_entry sort_local_p_stage_cyc = {
1731 	.se_header      = "Local Pipeline Stage Cycle",
1732 	.se_cmp         = sort__p_stage_cyc_cmp,
1733 	.se_snprintf	= hist_entry__p_stage_cyc_snprintf,
1734 	.se_width_idx	= HISTC_LOCAL_P_STAGE_CYC,
1735 };
1736 
1737 struct sort_entry sort_global_p_stage_cyc = {
1738 	.se_header      = "Pipeline Stage Cycle",
1739 	.se_cmp         = sort__p_stage_cyc_cmp,
1740 	.se_snprintf    = hist_entry__global_p_stage_cyc_snprintf,
1741 	.se_width_idx   = HISTC_GLOBAL_P_STAGE_CYC,
1742 };
1743 
1744 struct sort_entry sort_mem_daddr_sym = {
1745 	.se_header	= "Data Symbol",
1746 	.se_cmp		= sort__daddr_cmp,
1747 	.se_snprintf	= hist_entry__daddr_snprintf,
1748 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1749 };
1750 
1751 struct sort_entry sort_mem_iaddr_sym = {
1752 	.se_header	= "Code Symbol",
1753 	.se_cmp		= sort__iaddr_cmp,
1754 	.se_snprintf	= hist_entry__iaddr_snprintf,
1755 	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
1756 };
1757 
1758 struct sort_entry sort_mem_daddr_dso = {
1759 	.se_header	= "Data Object",
1760 	.se_cmp		= sort__dso_daddr_cmp,
1761 	.se_snprintf	= hist_entry__dso_daddr_snprintf,
1762 	.se_width_idx	= HISTC_MEM_DADDR_DSO,
1763 };
1764 
1765 struct sort_entry sort_mem_locked = {
1766 	.se_header	= "Locked",
1767 	.se_cmp		= sort__locked_cmp,
1768 	.se_snprintf	= hist_entry__locked_snprintf,
1769 	.se_width_idx	= HISTC_MEM_LOCKED,
1770 };
1771 
1772 struct sort_entry sort_mem_tlb = {
1773 	.se_header	= "TLB access",
1774 	.se_cmp		= sort__tlb_cmp,
1775 	.se_snprintf	= hist_entry__tlb_snprintf,
1776 	.se_width_idx	= HISTC_MEM_TLB,
1777 };
1778 
1779 struct sort_entry sort_mem_lvl = {
1780 	.se_header	= "Memory access",
1781 	.se_cmp		= sort__lvl_cmp,
1782 	.se_snprintf	= hist_entry__lvl_snprintf,
1783 	.se_width_idx	= HISTC_MEM_LVL,
1784 };
1785 
1786 struct sort_entry sort_mem_snoop = {
1787 	.se_header	= "Snoop",
1788 	.se_cmp		= sort__snoop_cmp,
1789 	.se_snprintf	= hist_entry__snoop_snprintf,
1790 	.se_width_idx	= HISTC_MEM_SNOOP,
1791 };
1792 
1793 struct sort_entry sort_mem_dcacheline = {
1794 	.se_header	= "Data Cacheline",
1795 	.se_cmp		= sort__dcacheline_cmp,
1796 	.se_snprintf	= hist_entry__dcacheline_snprintf,
1797 	.se_width_idx	= HISTC_MEM_DCACHELINE,
1798 };
1799 
1800 static int64_t
1801 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1802 {
1803 	union perf_mem_data_src data_src_l;
1804 	union perf_mem_data_src data_src_r;
1805 
1806 	if (left->mem_info)
1807 		data_src_l = left->mem_info->data_src;
1808 	else
1809 		data_src_l.mem_blk = PERF_MEM_BLK_NA;
1810 
1811 	if (right->mem_info)
1812 		data_src_r = right->mem_info->data_src;
1813 	else
1814 		data_src_r.mem_blk = PERF_MEM_BLK_NA;
1815 
1816 	return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1817 }
1818 
1819 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1820 					size_t size, unsigned int width)
1821 {
1822 	char out[16];
1823 
1824 	perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1825 	return repsep_snprintf(bf, size, "%.*s", width, out);
1826 }
1827 
1828 struct sort_entry sort_mem_blocked = {
1829 	.se_header	= "Blocked",
1830 	.se_cmp		= sort__blocked_cmp,
1831 	.se_snprintf	= hist_entry__blocked_snprintf,
1832 	.se_width_idx	= HISTC_MEM_BLOCKED,
1833 };
1834 
1835 static int64_t
1836 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1837 {
1838 	uint64_t l = 0, r = 0;
1839 
1840 	if (left->mem_info)
1841 		l = left->mem_info->daddr.phys_addr;
1842 	if (right->mem_info)
1843 		r = right->mem_info->daddr.phys_addr;
1844 
1845 	return (int64_t)(r - l);
1846 }
1847 
1848 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1849 					   size_t size, unsigned int width)
1850 {
1851 	uint64_t addr = 0;
1852 	size_t ret = 0;
1853 	size_t len = BITS_PER_LONG / 4;
1854 
1855 	addr = he->mem_info->daddr.phys_addr;
1856 
1857 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1858 
1859 	ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1860 
1861 	ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1862 
1863 	if (ret > width)
1864 		bf[width] = '\0';
1865 
1866 	return width;
1867 }
1868 
1869 struct sort_entry sort_mem_phys_daddr = {
1870 	.se_header	= "Data Physical Address",
1871 	.se_cmp		= sort__phys_daddr_cmp,
1872 	.se_snprintf	= hist_entry__phys_daddr_snprintf,
1873 	.se_width_idx	= HISTC_MEM_PHYS_DADDR,
1874 };
1875 
1876 static int64_t
1877 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1878 {
1879 	uint64_t l = 0, r = 0;
1880 
1881 	if (left->mem_info)
1882 		l = left->mem_info->daddr.data_page_size;
1883 	if (right->mem_info)
1884 		r = right->mem_info->daddr.data_page_size;
1885 
1886 	return (int64_t)(r - l);
1887 }
1888 
1889 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1890 					  size_t size, unsigned int width)
1891 {
1892 	char str[PAGE_SIZE_NAME_LEN];
1893 
1894 	return repsep_snprintf(bf, size, "%-*s", width,
1895 			       get_page_size_name(he->mem_info->daddr.data_page_size, str));
1896 }
1897 
1898 struct sort_entry sort_mem_data_page_size = {
1899 	.se_header	= "Data Page Size",
1900 	.se_cmp		= sort__data_page_size_cmp,
1901 	.se_snprintf	= hist_entry__data_page_size_snprintf,
1902 	.se_width_idx	= HISTC_MEM_DATA_PAGE_SIZE,
1903 };
1904 
1905 static int64_t
1906 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1907 {
1908 	uint64_t l = left->code_page_size;
1909 	uint64_t r = right->code_page_size;
1910 
1911 	return (int64_t)(r - l);
1912 }
1913 
1914 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
1915 					  size_t size, unsigned int width)
1916 {
1917 	char str[PAGE_SIZE_NAME_LEN];
1918 
1919 	return repsep_snprintf(bf, size, "%-*s", width,
1920 			       get_page_size_name(he->code_page_size, str));
1921 }
1922 
1923 struct sort_entry sort_code_page_size = {
1924 	.se_header	= "Code Page Size",
1925 	.se_cmp		= sort__code_page_size_cmp,
1926 	.se_snprintf	= hist_entry__code_page_size_snprintf,
1927 	.se_width_idx	= HISTC_CODE_PAGE_SIZE,
1928 };
1929 
1930 static int64_t
1931 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1932 {
1933 	if (!left->branch_info || !right->branch_info)
1934 		return cmp_null(left->branch_info, right->branch_info);
1935 
1936 	return left->branch_info->flags.abort !=
1937 		right->branch_info->flags.abort;
1938 }
1939 
1940 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1941 				    size_t size, unsigned int width)
1942 {
1943 	static const char *out = "N/A";
1944 
1945 	if (he->branch_info) {
1946 		if (he->branch_info->flags.abort)
1947 			out = "A";
1948 		else
1949 			out = ".";
1950 	}
1951 
1952 	return repsep_snprintf(bf, size, "%-*s", width, out);
1953 }
1954 
1955 struct sort_entry sort_abort = {
1956 	.se_header	= "Transaction abort",
1957 	.se_cmp		= sort__abort_cmp,
1958 	.se_snprintf	= hist_entry__abort_snprintf,
1959 	.se_width_idx	= HISTC_ABORT,
1960 };
1961 
1962 static int64_t
1963 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1964 {
1965 	if (!left->branch_info || !right->branch_info)
1966 		return cmp_null(left->branch_info, right->branch_info);
1967 
1968 	return left->branch_info->flags.in_tx !=
1969 		right->branch_info->flags.in_tx;
1970 }
1971 
1972 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1973 				    size_t size, unsigned int width)
1974 {
1975 	static const char *out = "N/A";
1976 
1977 	if (he->branch_info) {
1978 		if (he->branch_info->flags.in_tx)
1979 			out = "T";
1980 		else
1981 			out = ".";
1982 	}
1983 
1984 	return repsep_snprintf(bf, size, "%-*s", width, out);
1985 }
1986 
1987 struct sort_entry sort_in_tx = {
1988 	.se_header	= "Branch in transaction",
1989 	.se_cmp		= sort__in_tx_cmp,
1990 	.se_snprintf	= hist_entry__in_tx_snprintf,
1991 	.se_width_idx	= HISTC_IN_TX,
1992 };
1993 
1994 static int64_t
1995 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1996 {
1997 	return left->transaction - right->transaction;
1998 }
1999 
2000 static inline char *add_str(char *p, const char *str)
2001 {
2002 	strcpy(p, str);
2003 	return p + strlen(str);
2004 }
2005 
2006 static struct txbit {
2007 	unsigned flag;
2008 	const char *name;
2009 	int skip_for_len;
2010 } txbits[] = {
2011 	{ PERF_TXN_ELISION,        "EL ",        0 },
2012 	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
2013 	{ PERF_TXN_SYNC,           "SYNC ",      1 },
2014 	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
2015 	{ PERF_TXN_RETRY,          "RETRY ",     0 },
2016 	{ PERF_TXN_CONFLICT,       "CON ",       0 },
2017 	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
2018 	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
2019 	{ 0, NULL, 0 }
2020 };
2021 
2022 int hist_entry__transaction_len(void)
2023 {
2024 	int i;
2025 	int len = 0;
2026 
2027 	for (i = 0; txbits[i].name; i++) {
2028 		if (!txbits[i].skip_for_len)
2029 			len += strlen(txbits[i].name);
2030 	}
2031 	len += 4; /* :XX<space> */
2032 	return len;
2033 }
2034 
2035 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
2036 					    size_t size, unsigned int width)
2037 {
2038 	u64 t = he->transaction;
2039 	char buf[128];
2040 	char *p = buf;
2041 	int i;
2042 
2043 	buf[0] = 0;
2044 	for (i = 0; txbits[i].name; i++)
2045 		if (txbits[i].flag & t)
2046 			p = add_str(p, txbits[i].name);
2047 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
2048 		p = add_str(p, "NEITHER ");
2049 	if (t & PERF_TXN_ABORT_MASK) {
2050 		sprintf(p, ":%" PRIx64,
2051 			(t & PERF_TXN_ABORT_MASK) >>
2052 			PERF_TXN_ABORT_SHIFT);
2053 		p += strlen(p);
2054 	}
2055 
2056 	return repsep_snprintf(bf, size, "%-*s", width, buf);
2057 }
2058 
2059 struct sort_entry sort_transaction = {
2060 	.se_header	= "Transaction                ",
2061 	.se_cmp		= sort__transaction_cmp,
2062 	.se_snprintf	= hist_entry__transaction_snprintf,
2063 	.se_width_idx	= HISTC_TRANSACTION,
2064 };
2065 
2066 /* --sort symbol_size */
2067 
2068 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2069 {
2070 	int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2071 	int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2072 
2073 	return size_l < size_r ? -1 :
2074 		size_l == size_r ? 0 : 1;
2075 }
2076 
2077 static int64_t
2078 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2079 {
2080 	return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2081 }
2082 
2083 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2084 					  size_t bf_size, unsigned int width)
2085 {
2086 	if (sym)
2087 		return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2088 
2089 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2090 }
2091 
2092 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2093 					 size_t size, unsigned int width)
2094 {
2095 	return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2096 }
2097 
2098 struct sort_entry sort_sym_size = {
2099 	.se_header	= "Symbol size",
2100 	.se_cmp		= sort__sym_size_cmp,
2101 	.se_snprintf	= hist_entry__sym_size_snprintf,
2102 	.se_width_idx	= HISTC_SYM_SIZE,
2103 };
2104 
2105 /* --sort dso_size */
2106 
2107 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2108 {
2109 	int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2110 	int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2111 
2112 	return size_l < size_r ? -1 :
2113 		size_l == size_r ? 0 : 1;
2114 }
2115 
2116 static int64_t
2117 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2118 {
2119 	return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2120 }
2121 
2122 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2123 					  size_t bf_size, unsigned int width)
2124 {
2125 	if (map && map__dso(map))
2126 		return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2127 
2128 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2129 }
2130 
2131 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2132 					 size_t size, unsigned int width)
2133 {
2134 	return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2135 }
2136 
2137 struct sort_entry sort_dso_size = {
2138 	.se_header	= "DSO size",
2139 	.se_cmp		= sort__dso_size_cmp,
2140 	.se_snprintf	= hist_entry__dso_size_snprintf,
2141 	.se_width_idx	= HISTC_DSO_SIZE,
2142 };
2143 
2144 /* --sort addr */
2145 
2146 static int64_t
2147 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2148 {
2149 	u64 left_ip = left->ip;
2150 	u64 right_ip = right->ip;
2151 	struct map *left_map = left->ms.map;
2152 	struct map *right_map = right->ms.map;
2153 
2154 	if (left_map)
2155 		left_ip = map__unmap_ip(left_map, left_ip);
2156 	if (right_map)
2157 		right_ip = map__unmap_ip(right_map, right_ip);
2158 
2159 	return _sort__addr_cmp(left_ip, right_ip);
2160 }
2161 
2162 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2163 				     size_t size, unsigned int width)
2164 {
2165 	u64 ip = he->ip;
2166 	struct map *map = he->ms.map;
2167 
2168 	if (map)
2169 		ip = map__unmap_ip(map, ip);
2170 
2171 	return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2172 }
2173 
2174 struct sort_entry sort_addr = {
2175 	.se_header	= "Address",
2176 	.se_cmp		= sort__addr_cmp,
2177 	.se_snprintf	= hist_entry__addr_snprintf,
2178 	.se_width_idx	= HISTC_ADDR,
2179 };
2180 
2181 /* --sort type */
2182 
2183 struct annotated_data_type unknown_type = {
2184 	.self = {
2185 		.type_name = (char *)"(unknown)",
2186 		.children = LIST_HEAD_INIT(unknown_type.self.children),
2187 	},
2188 };
2189 
2190 static int64_t
2191 sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
2192 {
2193 	return sort__addr_cmp(left, right);
2194 }
2195 
2196 static void sort__type_init(struct hist_entry *he)
2197 {
2198 	if (he->mem_type)
2199 		return;
2200 
2201 	he->mem_type = hist_entry__get_data_type(he);
2202 	if (he->mem_type == NULL) {
2203 		he->mem_type = &unknown_type;
2204 		he->mem_type_off = 0;
2205 	}
2206 }
2207 
2208 static int64_t
2209 sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
2210 {
2211 	struct annotated_data_type *left_type = left->mem_type;
2212 	struct annotated_data_type *right_type = right->mem_type;
2213 
2214 	if (!left_type) {
2215 		sort__type_init(left);
2216 		left_type = left->mem_type;
2217 	}
2218 
2219 	if (!right_type) {
2220 		sort__type_init(right);
2221 		right_type = right->mem_type;
2222 	}
2223 
2224 	return strcmp(left_type->self.type_name, right_type->self.type_name);
2225 }
2226 
2227 static int64_t
2228 sort__type_sort(struct hist_entry *left, struct hist_entry *right)
2229 {
2230 	return sort__type_collapse(left, right);
2231 }
2232 
2233 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
2234 				     size_t size, unsigned int width)
2235 {
2236 	return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
2237 }
2238 
2239 struct sort_entry sort_type = {
2240 	.se_header	= "Data Type",
2241 	.se_cmp		= sort__type_cmp,
2242 	.se_collapse	= sort__type_collapse,
2243 	.se_sort	= sort__type_sort,
2244 	.se_init	= sort__type_init,
2245 	.se_snprintf	= hist_entry__type_snprintf,
2246 	.se_width_idx	= HISTC_TYPE,
2247 };
2248 
2249 /* --sort typeoff */
2250 
2251 static int64_t
2252 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
2253 {
2254 	struct annotated_data_type *left_type = left->mem_type;
2255 	struct annotated_data_type *right_type = right->mem_type;
2256 	int64_t ret;
2257 
2258 	if (!left_type) {
2259 		sort__type_init(left);
2260 		left_type = left->mem_type;
2261 	}
2262 
2263 	if (!right_type) {
2264 		sort__type_init(right);
2265 		right_type = right->mem_type;
2266 	}
2267 
2268 	ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2269 	if (ret)
2270 		return ret;
2271 	return left->mem_type_off - right->mem_type_off;
2272 }
2273 
2274 static void fill_member_name(char *buf, size_t sz, struct annotated_member *m,
2275 			     int offset, bool first)
2276 {
2277 	struct annotated_member *child;
2278 
2279 	if (list_empty(&m->children))
2280 		return;
2281 
2282 	list_for_each_entry(child, &m->children, node) {
2283 		if (child->offset <= offset && offset < child->offset + child->size) {
2284 			int len = 0;
2285 
2286 			/* It can have anonymous struct/union members */
2287 			if (child->var_name) {
2288 				len = scnprintf(buf, sz, "%s%s",
2289 						first ? "" : ".", child->var_name);
2290 				first = false;
2291 			}
2292 
2293 			fill_member_name(buf + len, sz - len, child, offset, first);
2294 			return;
2295 		}
2296 	}
2297 }
2298 
2299 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
2300 				     size_t size, unsigned int width __maybe_unused)
2301 {
2302 	struct annotated_data_type *he_type = he->mem_type;
2303 	char buf[4096];
2304 
2305 	buf[0] = '\0';
2306 	if (list_empty(&he_type->self.children))
2307 		snprintf(buf, sizeof(buf), "no field");
2308 	else
2309 		fill_member_name(buf, sizeof(buf), &he_type->self,
2310 				 he->mem_type_off, true);
2311 	buf[4095] = '\0';
2312 
2313 	return repsep_snprintf(bf, size, "%s %+d (%s)", he_type->self.type_name,
2314 			       he->mem_type_off, buf);
2315 }
2316 
2317 struct sort_entry sort_type_offset = {
2318 	.se_header	= "Data Type Offset",
2319 	.se_cmp		= sort__type_cmp,
2320 	.se_collapse	= sort__typeoff_sort,
2321 	.se_sort	= sort__typeoff_sort,
2322 	.se_init	= sort__type_init,
2323 	.se_snprintf	= hist_entry__typeoff_snprintf,
2324 	.se_width_idx	= HISTC_TYPE_OFFSET,
2325 };
2326 
2327 
2328 struct sort_dimension {
2329 	const char		*name;
2330 	struct sort_entry	*entry;
2331 	int			taken;
2332 };
2333 
2334 int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2335 {
2336 	return 0;
2337 }
2338 
2339 const char * __weak arch_perf_header_entry(const char *se_header)
2340 {
2341 	return se_header;
2342 }
2343 
2344 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2345 {
2346 	sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2347 }
2348 
2349 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2350 
2351 static struct sort_dimension common_sort_dimensions[] = {
2352 	DIM(SORT_PID, "pid", sort_thread),
2353 	DIM(SORT_COMM, "comm", sort_comm),
2354 	DIM(SORT_DSO, "dso", sort_dso),
2355 	DIM(SORT_SYM, "symbol", sort_sym),
2356 	DIM(SORT_PARENT, "parent", sort_parent),
2357 	DIM(SORT_CPU, "cpu", sort_cpu),
2358 	DIM(SORT_SOCKET, "socket", sort_socket),
2359 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
2360 	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2361 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2362 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2363 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2364 #ifdef HAVE_LIBTRACEEVENT
2365 	DIM(SORT_TRACE, "trace", sort_trace),
2366 #endif
2367 	DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2368 	DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2369 	DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2370 	DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2371 	DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2372 	DIM(SORT_TIME, "time", sort_time),
2373 	DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2374 	DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2375 	DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2376 	DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2377 	DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2378 	DIM(SORT_ADDR, "addr", sort_addr),
2379 	DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2380 	DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2381 	DIM(SORT_SIMD, "simd", sort_simd),
2382 	DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
2383 	DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
2384 	DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
2385 };
2386 
2387 #undef DIM
2388 
2389 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2390 
2391 static struct sort_dimension bstack_sort_dimensions[] = {
2392 	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2393 	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2394 	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2395 	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2396 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2397 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2398 	DIM(SORT_ABORT, "abort", sort_abort),
2399 	DIM(SORT_CYCLES, "cycles", sort_cycles),
2400 	DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2401 	DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2402 	DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2403 	DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2404 	DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2405 };
2406 
2407 #undef DIM
2408 
2409 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2410 
2411 static struct sort_dimension memory_sort_dimensions[] = {
2412 	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2413 	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2414 	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2415 	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2416 	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2417 	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2418 	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2419 	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2420 	DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2421 	DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2422 	DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2423 };
2424 
2425 #undef DIM
2426 
2427 struct hpp_dimension {
2428 	const char		*name;
2429 	struct perf_hpp_fmt	*fmt;
2430 	int			taken;
2431 };
2432 
2433 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2434 
2435 static struct hpp_dimension hpp_sort_dimensions[] = {
2436 	DIM(PERF_HPP__OVERHEAD, "overhead"),
2437 	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2438 	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2439 	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2440 	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2441 	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2442 	DIM(PERF_HPP__SAMPLES, "sample"),
2443 	DIM(PERF_HPP__PERIOD, "period"),
2444 	DIM(PERF_HPP__WEIGHT1, "weight1"),
2445 	DIM(PERF_HPP__WEIGHT2, "weight2"),
2446 	DIM(PERF_HPP__WEIGHT3, "weight3"),
2447 	/* aliases for weight_struct */
2448 	DIM(PERF_HPP__WEIGHT2, "ins_lat"),
2449 	DIM(PERF_HPP__WEIGHT3, "retire_lat"),
2450 	DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
2451 };
2452 
2453 #undef DIM
2454 
2455 struct hpp_sort_entry {
2456 	struct perf_hpp_fmt hpp;
2457 	struct sort_entry *se;
2458 };
2459 
2460 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2461 {
2462 	struct hpp_sort_entry *hse;
2463 
2464 	if (!perf_hpp__is_sort_entry(fmt))
2465 		return;
2466 
2467 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2468 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2469 }
2470 
2471 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2472 			      struct hists *hists, int line __maybe_unused,
2473 			      int *span __maybe_unused)
2474 {
2475 	struct hpp_sort_entry *hse;
2476 	size_t len = fmt->user_len;
2477 
2478 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2479 
2480 	if (!len)
2481 		len = hists__col_len(hists, hse->se->se_width_idx);
2482 
2483 	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2484 }
2485 
2486 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2487 			     struct perf_hpp *hpp __maybe_unused,
2488 			     struct hists *hists)
2489 {
2490 	struct hpp_sort_entry *hse;
2491 	size_t len = fmt->user_len;
2492 
2493 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2494 
2495 	if (!len)
2496 		len = hists__col_len(hists, hse->se->se_width_idx);
2497 
2498 	return len;
2499 }
2500 
2501 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2502 			     struct hist_entry *he)
2503 {
2504 	struct hpp_sort_entry *hse;
2505 	size_t len = fmt->user_len;
2506 
2507 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2508 
2509 	if (!len)
2510 		len = hists__col_len(he->hists, hse->se->se_width_idx);
2511 
2512 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2513 }
2514 
2515 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2516 			       struct hist_entry *a, struct hist_entry *b)
2517 {
2518 	struct hpp_sort_entry *hse;
2519 
2520 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2521 	return hse->se->se_cmp(a, b);
2522 }
2523 
2524 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2525 				    struct hist_entry *a, struct hist_entry *b)
2526 {
2527 	struct hpp_sort_entry *hse;
2528 	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2529 
2530 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2531 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2532 	return collapse_fn(a, b);
2533 }
2534 
2535 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2536 				struct hist_entry *a, struct hist_entry *b)
2537 {
2538 	struct hpp_sort_entry *hse;
2539 	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2540 
2541 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2542 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2543 	return sort_fn(a, b);
2544 }
2545 
2546 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2547 {
2548 	return format->header == __sort__hpp_header;
2549 }
2550 
2551 #define MK_SORT_ENTRY_CHK(key)					\
2552 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt)	\
2553 {								\
2554 	struct hpp_sort_entry *hse;				\
2555 								\
2556 	if (!perf_hpp__is_sort_entry(fmt))			\
2557 		return false;					\
2558 								\
2559 	hse = container_of(fmt, struct hpp_sort_entry, hpp);	\
2560 	return hse->se == &sort_ ## key ;			\
2561 }
2562 
2563 #ifdef HAVE_LIBTRACEEVENT
2564 MK_SORT_ENTRY_CHK(trace)
2565 #else
2566 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2567 {
2568 	return false;
2569 }
2570 #endif
2571 MK_SORT_ENTRY_CHK(srcline)
2572 MK_SORT_ENTRY_CHK(srcfile)
2573 MK_SORT_ENTRY_CHK(thread)
2574 MK_SORT_ENTRY_CHK(comm)
2575 MK_SORT_ENTRY_CHK(dso)
2576 MK_SORT_ENTRY_CHK(sym)
2577 
2578 
2579 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2580 {
2581 	struct hpp_sort_entry *hse_a;
2582 	struct hpp_sort_entry *hse_b;
2583 
2584 	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2585 		return false;
2586 
2587 	hse_a = container_of(a, struct hpp_sort_entry, hpp);
2588 	hse_b = container_of(b, struct hpp_sort_entry, hpp);
2589 
2590 	return hse_a->se == hse_b->se;
2591 }
2592 
2593 static void hse_free(struct perf_hpp_fmt *fmt)
2594 {
2595 	struct hpp_sort_entry *hse;
2596 
2597 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2598 	free(hse);
2599 }
2600 
2601 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2602 {
2603 	struct hpp_sort_entry *hse;
2604 
2605 	if (!perf_hpp__is_sort_entry(fmt))
2606 		return;
2607 
2608 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2609 
2610 	if (hse->se->se_init)
2611 		hse->se->se_init(he);
2612 }
2613 
2614 static struct hpp_sort_entry *
2615 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2616 {
2617 	struct hpp_sort_entry *hse;
2618 
2619 	hse = malloc(sizeof(*hse));
2620 	if (hse == NULL) {
2621 		pr_err("Memory allocation failed\n");
2622 		return NULL;
2623 	}
2624 
2625 	hse->se = sd->entry;
2626 	hse->hpp.name = sd->entry->se_header;
2627 	hse->hpp.header = __sort__hpp_header;
2628 	hse->hpp.width = __sort__hpp_width;
2629 	hse->hpp.entry = __sort__hpp_entry;
2630 	hse->hpp.color = NULL;
2631 
2632 	hse->hpp.cmp = __sort__hpp_cmp;
2633 	hse->hpp.collapse = __sort__hpp_collapse;
2634 	hse->hpp.sort = __sort__hpp_sort;
2635 	hse->hpp.equal = __sort__hpp_equal;
2636 	hse->hpp.free = hse_free;
2637 	hse->hpp.init = hse_init;
2638 
2639 	INIT_LIST_HEAD(&hse->hpp.list);
2640 	INIT_LIST_HEAD(&hse->hpp.sort_list);
2641 	hse->hpp.elide = false;
2642 	hse->hpp.len = 0;
2643 	hse->hpp.user_len = 0;
2644 	hse->hpp.level = level;
2645 
2646 	return hse;
2647 }
2648 
2649 static void hpp_free(struct perf_hpp_fmt *fmt)
2650 {
2651 	free(fmt);
2652 }
2653 
2654 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2655 						       int level)
2656 {
2657 	struct perf_hpp_fmt *fmt;
2658 
2659 	fmt = memdup(hd->fmt, sizeof(*fmt));
2660 	if (fmt) {
2661 		INIT_LIST_HEAD(&fmt->list);
2662 		INIT_LIST_HEAD(&fmt->sort_list);
2663 		fmt->free = hpp_free;
2664 		fmt->level = level;
2665 	}
2666 
2667 	return fmt;
2668 }
2669 
2670 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2671 {
2672 	struct perf_hpp_fmt *fmt;
2673 	struct hpp_sort_entry *hse;
2674 	int ret = -1;
2675 	int r;
2676 
2677 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2678 		if (!perf_hpp__is_sort_entry(fmt))
2679 			continue;
2680 
2681 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2682 		if (hse->se->se_filter == NULL)
2683 			continue;
2684 
2685 		/*
2686 		 * hist entry is filtered if any of sort key in the hpp list
2687 		 * is applied.  But it should skip non-matched filter types.
2688 		 */
2689 		r = hse->se->se_filter(he, type, arg);
2690 		if (r >= 0) {
2691 			if (ret < 0)
2692 				ret = 0;
2693 			ret |= r;
2694 		}
2695 	}
2696 
2697 	return ret;
2698 }
2699 
2700 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2701 					  struct perf_hpp_list *list,
2702 					  int level)
2703 {
2704 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2705 
2706 	if (hse == NULL)
2707 		return -1;
2708 
2709 	perf_hpp_list__register_sort_field(list, &hse->hpp);
2710 	return 0;
2711 }
2712 
2713 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2714 					    struct perf_hpp_list *list)
2715 {
2716 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2717 
2718 	if (hse == NULL)
2719 		return -1;
2720 
2721 	perf_hpp_list__column_register(list, &hse->hpp);
2722 	return 0;
2723 }
2724 
2725 #ifndef HAVE_LIBTRACEEVENT
2726 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2727 {
2728 	return false;
2729 }
2730 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2731 				     struct hists *hists __maybe_unused)
2732 {
2733 	return false;
2734 }
2735 #else
2736 struct hpp_dynamic_entry {
2737 	struct perf_hpp_fmt hpp;
2738 	struct evsel *evsel;
2739 	struct tep_format_field *field;
2740 	unsigned dynamic_len;
2741 	bool raw_trace;
2742 };
2743 
2744 static int hde_width(struct hpp_dynamic_entry *hde)
2745 {
2746 	if (!hde->hpp.len) {
2747 		int len = hde->dynamic_len;
2748 		int namelen = strlen(hde->field->name);
2749 		int fieldlen = hde->field->size;
2750 
2751 		if (namelen > len)
2752 			len = namelen;
2753 
2754 		if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2755 			/* length for print hex numbers */
2756 			fieldlen = hde->field->size * 2 + 2;
2757 		}
2758 		if (fieldlen > len)
2759 			len = fieldlen;
2760 
2761 		hde->hpp.len = len;
2762 	}
2763 	return hde->hpp.len;
2764 }
2765 
2766 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2767 			       struct hist_entry *he)
2768 {
2769 	char *str, *pos;
2770 	struct tep_format_field *field = hde->field;
2771 	size_t namelen;
2772 	bool last = false;
2773 
2774 	if (hde->raw_trace)
2775 		return;
2776 
2777 	/* parse pretty print result and update max length */
2778 	if (!he->trace_output)
2779 		he->trace_output = get_trace_output(he);
2780 
2781 	namelen = strlen(field->name);
2782 	str = he->trace_output;
2783 
2784 	while (str) {
2785 		pos = strchr(str, ' ');
2786 		if (pos == NULL) {
2787 			last = true;
2788 			pos = str + strlen(str);
2789 		}
2790 
2791 		if (!strncmp(str, field->name, namelen)) {
2792 			size_t len;
2793 
2794 			str += namelen + 1;
2795 			len = pos - str;
2796 
2797 			if (len > hde->dynamic_len)
2798 				hde->dynamic_len = len;
2799 			break;
2800 		}
2801 
2802 		if (last)
2803 			str = NULL;
2804 		else
2805 			str = pos + 1;
2806 	}
2807 }
2808 
2809 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2810 			      struct hists *hists __maybe_unused,
2811 			      int line __maybe_unused,
2812 			      int *span __maybe_unused)
2813 {
2814 	struct hpp_dynamic_entry *hde;
2815 	size_t len = fmt->user_len;
2816 
2817 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2818 
2819 	if (!len)
2820 		len = hde_width(hde);
2821 
2822 	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2823 }
2824 
2825 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2826 			     struct perf_hpp *hpp __maybe_unused,
2827 			     struct hists *hists __maybe_unused)
2828 {
2829 	struct hpp_dynamic_entry *hde;
2830 	size_t len = fmt->user_len;
2831 
2832 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2833 
2834 	if (!len)
2835 		len = hde_width(hde);
2836 
2837 	return len;
2838 }
2839 
2840 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2841 {
2842 	struct hpp_dynamic_entry *hde;
2843 
2844 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2845 
2846 	return hists_to_evsel(hists) == hde->evsel;
2847 }
2848 
2849 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2850 			     struct hist_entry *he)
2851 {
2852 	struct hpp_dynamic_entry *hde;
2853 	size_t len = fmt->user_len;
2854 	char *str, *pos;
2855 	struct tep_format_field *field;
2856 	size_t namelen;
2857 	bool last = false;
2858 	int ret;
2859 
2860 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2861 
2862 	if (!len)
2863 		len = hde_width(hde);
2864 
2865 	if (hde->raw_trace)
2866 		goto raw_field;
2867 
2868 	if (!he->trace_output)
2869 		he->trace_output = get_trace_output(he);
2870 
2871 	field = hde->field;
2872 	namelen = strlen(field->name);
2873 	str = he->trace_output;
2874 
2875 	while (str) {
2876 		pos = strchr(str, ' ');
2877 		if (pos == NULL) {
2878 			last = true;
2879 			pos = str + strlen(str);
2880 		}
2881 
2882 		if (!strncmp(str, field->name, namelen)) {
2883 			str += namelen + 1;
2884 			str = strndup(str, pos - str);
2885 
2886 			if (str == NULL)
2887 				return scnprintf(hpp->buf, hpp->size,
2888 						 "%*.*s", len, len, "ERROR");
2889 			break;
2890 		}
2891 
2892 		if (last)
2893 			str = NULL;
2894 		else
2895 			str = pos + 1;
2896 	}
2897 
2898 	if (str == NULL) {
2899 		struct trace_seq seq;
2900 raw_field:
2901 		trace_seq_init(&seq);
2902 		tep_print_field(&seq, he->raw_data, hde->field);
2903 		str = seq.buffer;
2904 	}
2905 
2906 	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2907 	free(str);
2908 	return ret;
2909 }
2910 
2911 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2912 			       struct hist_entry *a, struct hist_entry *b)
2913 {
2914 	struct hpp_dynamic_entry *hde;
2915 	struct tep_format_field *field;
2916 	unsigned offset, size;
2917 
2918 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2919 
2920 	field = hde->field;
2921 	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2922 		unsigned long long dyn;
2923 
2924 		tep_read_number_field(field, a->raw_data, &dyn);
2925 		offset = dyn & 0xffff;
2926 		size = (dyn >> 16) & 0xffff;
2927 		if (tep_field_is_relative(field->flags))
2928 			offset += field->offset + field->size;
2929 		/* record max width for output */
2930 		if (size > hde->dynamic_len)
2931 			hde->dynamic_len = size;
2932 	} else {
2933 		offset = field->offset;
2934 		size = field->size;
2935 	}
2936 
2937 	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2938 }
2939 
2940 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2941 {
2942 	return fmt->cmp == __sort__hde_cmp;
2943 }
2944 
2945 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2946 {
2947 	struct hpp_dynamic_entry *hde_a;
2948 	struct hpp_dynamic_entry *hde_b;
2949 
2950 	if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2951 		return false;
2952 
2953 	hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2954 	hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2955 
2956 	return hde_a->field == hde_b->field;
2957 }
2958 
2959 static void hde_free(struct perf_hpp_fmt *fmt)
2960 {
2961 	struct hpp_dynamic_entry *hde;
2962 
2963 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2964 	free(hde);
2965 }
2966 
2967 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2968 {
2969 	struct hpp_dynamic_entry *hde;
2970 
2971 	if (!perf_hpp__is_dynamic_entry(fmt))
2972 		return;
2973 
2974 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2975 	update_dynamic_len(hde, he);
2976 }
2977 
2978 static struct hpp_dynamic_entry *
2979 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
2980 		      int level)
2981 {
2982 	struct hpp_dynamic_entry *hde;
2983 
2984 	hde = malloc(sizeof(*hde));
2985 	if (hde == NULL) {
2986 		pr_debug("Memory allocation failed\n");
2987 		return NULL;
2988 	}
2989 
2990 	hde->evsel = evsel;
2991 	hde->field = field;
2992 	hde->dynamic_len = 0;
2993 
2994 	hde->hpp.name = field->name;
2995 	hde->hpp.header = __sort__hde_header;
2996 	hde->hpp.width  = __sort__hde_width;
2997 	hde->hpp.entry  = __sort__hde_entry;
2998 	hde->hpp.color  = NULL;
2999 
3000 	hde->hpp.init = __sort__hde_init;
3001 	hde->hpp.cmp = __sort__hde_cmp;
3002 	hde->hpp.collapse = __sort__hde_cmp;
3003 	hde->hpp.sort = __sort__hde_cmp;
3004 	hde->hpp.equal = __sort__hde_equal;
3005 	hde->hpp.free = hde_free;
3006 
3007 	INIT_LIST_HEAD(&hde->hpp.list);
3008 	INIT_LIST_HEAD(&hde->hpp.sort_list);
3009 	hde->hpp.elide = false;
3010 	hde->hpp.len = 0;
3011 	hde->hpp.user_len = 0;
3012 	hde->hpp.level = level;
3013 
3014 	return hde;
3015 }
3016 #endif /* HAVE_LIBTRACEEVENT */
3017 
3018 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
3019 {
3020 	struct perf_hpp_fmt *new_fmt = NULL;
3021 
3022 	if (perf_hpp__is_sort_entry(fmt)) {
3023 		struct hpp_sort_entry *hse, *new_hse;
3024 
3025 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3026 		new_hse = memdup(hse, sizeof(*hse));
3027 		if (new_hse)
3028 			new_fmt = &new_hse->hpp;
3029 #ifdef HAVE_LIBTRACEEVENT
3030 	} else if (perf_hpp__is_dynamic_entry(fmt)) {
3031 		struct hpp_dynamic_entry *hde, *new_hde;
3032 
3033 		hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3034 		new_hde = memdup(hde, sizeof(*hde));
3035 		if (new_hde)
3036 			new_fmt = &new_hde->hpp;
3037 #endif
3038 	} else {
3039 		new_fmt = memdup(fmt, sizeof(*fmt));
3040 	}
3041 
3042 	INIT_LIST_HEAD(&new_fmt->list);
3043 	INIT_LIST_HEAD(&new_fmt->sort_list);
3044 
3045 	return new_fmt;
3046 }
3047 
3048 static int parse_field_name(char *str, char **event, char **field, char **opt)
3049 {
3050 	char *event_name, *field_name, *opt_name;
3051 
3052 	event_name = str;
3053 	field_name = strchr(str, '.');
3054 
3055 	if (field_name) {
3056 		*field_name++ = '\0';
3057 	} else {
3058 		event_name = NULL;
3059 		field_name = str;
3060 	}
3061 
3062 	opt_name = strchr(field_name, '/');
3063 	if (opt_name)
3064 		*opt_name++ = '\0';
3065 
3066 	*event = event_name;
3067 	*field = field_name;
3068 	*opt   = opt_name;
3069 
3070 	return 0;
3071 }
3072 
3073 /* find match evsel using a given event name.  The event name can be:
3074  *   1. '%' + event index (e.g. '%1' for first event)
3075  *   2. full event name (e.g. sched:sched_switch)
3076  *   3. partial event name (should not contain ':')
3077  */
3078 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
3079 {
3080 	struct evsel *evsel = NULL;
3081 	struct evsel *pos;
3082 	bool full_name;
3083 
3084 	/* case 1 */
3085 	if (event_name[0] == '%') {
3086 		int nr = strtol(event_name+1, NULL, 0);
3087 
3088 		if (nr > evlist->core.nr_entries)
3089 			return NULL;
3090 
3091 		evsel = evlist__first(evlist);
3092 		while (--nr > 0)
3093 			evsel = evsel__next(evsel);
3094 
3095 		return evsel;
3096 	}
3097 
3098 	full_name = !!strchr(event_name, ':');
3099 	evlist__for_each_entry(evlist, pos) {
3100 		/* case 2 */
3101 		if (full_name && evsel__name_is(pos, event_name))
3102 			return pos;
3103 		/* case 3 */
3104 		if (!full_name && strstr(pos->name, event_name)) {
3105 			if (evsel) {
3106 				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
3107 					 event_name, evsel->name, pos->name);
3108 				return NULL;
3109 			}
3110 			evsel = pos;
3111 		}
3112 	}
3113 
3114 	return evsel;
3115 }
3116 
3117 #ifdef HAVE_LIBTRACEEVENT
3118 static int __dynamic_dimension__add(struct evsel *evsel,
3119 				    struct tep_format_field *field,
3120 				    bool raw_trace, int level)
3121 {
3122 	struct hpp_dynamic_entry *hde;
3123 
3124 	hde = __alloc_dynamic_entry(evsel, field, level);
3125 	if (hde == NULL)
3126 		return -ENOMEM;
3127 
3128 	hde->raw_trace = raw_trace;
3129 
3130 	perf_hpp__register_sort_field(&hde->hpp);
3131 	return 0;
3132 }
3133 
3134 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
3135 {
3136 	int ret;
3137 	struct tep_format_field *field;
3138 
3139 	field = evsel->tp_format->format.fields;
3140 	while (field) {
3141 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3142 		if (ret < 0)
3143 			return ret;
3144 
3145 		field = field->next;
3146 	}
3147 	return 0;
3148 }
3149 
3150 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
3151 				  int level)
3152 {
3153 	int ret;
3154 	struct evsel *evsel;
3155 
3156 	evlist__for_each_entry(evlist, evsel) {
3157 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3158 			continue;
3159 
3160 		ret = add_evsel_fields(evsel, raw_trace, level);
3161 		if (ret < 0)
3162 			return ret;
3163 	}
3164 	return 0;
3165 }
3166 
3167 static int add_all_matching_fields(struct evlist *evlist,
3168 				   char *field_name, bool raw_trace, int level)
3169 {
3170 	int ret = -ESRCH;
3171 	struct evsel *evsel;
3172 	struct tep_format_field *field;
3173 
3174 	evlist__for_each_entry(evlist, evsel) {
3175 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3176 			continue;
3177 
3178 		field = tep_find_any_field(evsel->tp_format, field_name);
3179 		if (field == NULL)
3180 			continue;
3181 
3182 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3183 		if (ret < 0)
3184 			break;
3185 	}
3186 	return ret;
3187 }
3188 #endif /* HAVE_LIBTRACEEVENT */
3189 
3190 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
3191 			     int level)
3192 {
3193 	char *str, *event_name, *field_name, *opt_name;
3194 	struct evsel *evsel;
3195 	bool raw_trace = symbol_conf.raw_trace;
3196 	int ret = 0;
3197 
3198 	if (evlist == NULL)
3199 		return -ENOENT;
3200 
3201 	str = strdup(tok);
3202 	if (str == NULL)
3203 		return -ENOMEM;
3204 
3205 	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3206 		ret = -EINVAL;
3207 		goto out;
3208 	}
3209 
3210 	if (opt_name) {
3211 		if (strcmp(opt_name, "raw")) {
3212 			pr_debug("unsupported field option %s\n", opt_name);
3213 			ret = -EINVAL;
3214 			goto out;
3215 		}
3216 		raw_trace = true;
3217 	}
3218 
3219 #ifdef HAVE_LIBTRACEEVENT
3220 	if (!strcmp(field_name, "trace_fields")) {
3221 		ret = add_all_dynamic_fields(evlist, raw_trace, level);
3222 		goto out;
3223 	}
3224 
3225 	if (event_name == NULL) {
3226 		ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3227 		goto out;
3228 	}
3229 #else
3230 	evlist__for_each_entry(evlist, evsel) {
3231 		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3232 			pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3233 			ret = -ENOTSUP;
3234 		}
3235 	}
3236 
3237 	if (ret) {
3238 		pr_err("\n");
3239 		goto out;
3240 	}
3241 #endif
3242 
3243 	evsel = find_evsel(evlist, event_name);
3244 	if (evsel == NULL) {
3245 		pr_debug("Cannot find event: %s\n", event_name);
3246 		ret = -ENOENT;
3247 		goto out;
3248 	}
3249 
3250 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3251 		pr_debug("%s is not a tracepoint event\n", event_name);
3252 		ret = -EINVAL;
3253 		goto out;
3254 	}
3255 
3256 #ifdef HAVE_LIBTRACEEVENT
3257 	if (!strcmp(field_name, "*")) {
3258 		ret = add_evsel_fields(evsel, raw_trace, level);
3259 	} else {
3260 		struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
3261 
3262 		if (field == NULL) {
3263 			pr_debug("Cannot find event field for %s.%s\n",
3264 				 event_name, field_name);
3265 			return -ENOENT;
3266 		}
3267 
3268 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3269 	}
3270 #else
3271 	(void)level;
3272 	(void)raw_trace;
3273 #endif /* HAVE_LIBTRACEEVENT */
3274 
3275 out:
3276 	free(str);
3277 	return ret;
3278 }
3279 
3280 static int __sort_dimension__add(struct sort_dimension *sd,
3281 				 struct perf_hpp_list *list,
3282 				 int level)
3283 {
3284 	if (sd->taken)
3285 		return 0;
3286 
3287 	if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3288 		return -1;
3289 
3290 	if (sd->entry->se_collapse)
3291 		list->need_collapse = 1;
3292 
3293 	sd->taken = 1;
3294 
3295 	return 0;
3296 }
3297 
3298 static int __hpp_dimension__add(struct hpp_dimension *hd,
3299 				struct perf_hpp_list *list,
3300 				int level)
3301 {
3302 	struct perf_hpp_fmt *fmt;
3303 
3304 	if (hd->taken)
3305 		return 0;
3306 
3307 	fmt = __hpp_dimension__alloc_hpp(hd, level);
3308 	if (!fmt)
3309 		return -1;
3310 
3311 	hd->taken = 1;
3312 	perf_hpp_list__register_sort_field(list, fmt);
3313 	return 0;
3314 }
3315 
3316 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3317 					struct sort_dimension *sd)
3318 {
3319 	if (sd->taken)
3320 		return 0;
3321 
3322 	if (__sort_dimension__add_hpp_output(sd, list) < 0)
3323 		return -1;
3324 
3325 	sd->taken = 1;
3326 	return 0;
3327 }
3328 
3329 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3330 				       struct hpp_dimension *hd)
3331 {
3332 	struct perf_hpp_fmt *fmt;
3333 
3334 	if (hd->taken)
3335 		return 0;
3336 
3337 	fmt = __hpp_dimension__alloc_hpp(hd, 0);
3338 	if (!fmt)
3339 		return -1;
3340 
3341 	hd->taken = 1;
3342 	perf_hpp_list__column_register(list, fmt);
3343 	return 0;
3344 }
3345 
3346 int hpp_dimension__add_output(unsigned col)
3347 {
3348 	BUG_ON(col >= PERF_HPP__MAX_INDEX);
3349 	return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
3350 }
3351 
3352 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3353 			struct evlist *evlist,
3354 			int level)
3355 {
3356 	unsigned int i, j;
3357 
3358 	/*
3359 	 * Check to see if there are any arch specific
3360 	 * sort dimensions not applicable for the current
3361 	 * architecture. If so, Skip that sort key since
3362 	 * we don't want to display it in the output fields.
3363 	 */
3364 	for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3365 		if (!strcmp(arch_specific_sort_keys[j], tok) &&
3366 				!arch_support_sort_key(tok)) {
3367 			return 0;
3368 		}
3369 	}
3370 
3371 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3372 		struct sort_dimension *sd = &common_sort_dimensions[i];
3373 
3374 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3375 			continue;
3376 
3377 		for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3378 			if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3379 				sort_dimension_add_dynamic_header(sd);
3380 		}
3381 
3382 		if (sd->entry == &sort_parent && parent_pattern) {
3383 			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3384 			if (ret) {
3385 				char err[BUFSIZ];
3386 
3387 				regerror(ret, &parent_regex, err, sizeof(err));
3388 				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3389 				return -EINVAL;
3390 			}
3391 			list->parent = 1;
3392 		} else if (sd->entry == &sort_sym) {
3393 			list->sym = 1;
3394 			/*
3395 			 * perf diff displays the performance difference amongst
3396 			 * two or more perf.data files. Those files could come
3397 			 * from different binaries. So we should not compare
3398 			 * their ips, but the name of symbol.
3399 			 */
3400 			if (sort__mode == SORT_MODE__DIFF)
3401 				sd->entry->se_collapse = sort__sym_sort;
3402 
3403 		} else if (sd->entry == &sort_dso) {
3404 			list->dso = 1;
3405 		} else if (sd->entry == &sort_socket) {
3406 			list->socket = 1;
3407 		} else if (sd->entry == &sort_thread) {
3408 			list->thread = 1;
3409 		} else if (sd->entry == &sort_comm) {
3410 			list->comm = 1;
3411 		} else if (sd->entry == &sort_type_offset) {
3412 			symbol_conf.annotate_data_member = true;
3413 		}
3414 
3415 		return __sort_dimension__add(sd, list, level);
3416 	}
3417 
3418 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3419 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3420 
3421 		if (strncasecmp(tok, hd->name, strlen(tok)))
3422 			continue;
3423 
3424 		return __hpp_dimension__add(hd, list, level);
3425 	}
3426 
3427 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3428 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3429 
3430 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3431 			continue;
3432 
3433 		if (sort__mode != SORT_MODE__BRANCH)
3434 			return -EINVAL;
3435 
3436 		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3437 			list->sym = 1;
3438 
3439 		__sort_dimension__add(sd, list, level);
3440 		return 0;
3441 	}
3442 
3443 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3444 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3445 
3446 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3447 			continue;
3448 
3449 		if (sort__mode != SORT_MODE__MEMORY)
3450 			return -EINVAL;
3451 
3452 		if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3453 			return -EINVAL;
3454 
3455 		if (sd->entry == &sort_mem_daddr_sym)
3456 			list->sym = 1;
3457 
3458 		__sort_dimension__add(sd, list, level);
3459 		return 0;
3460 	}
3461 
3462 	if (!add_dynamic_entry(evlist, tok, level))
3463 		return 0;
3464 
3465 	return -ESRCH;
3466 }
3467 
3468 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3469 			   struct evlist *evlist)
3470 {
3471 	char *tmp, *tok;
3472 	int ret = 0;
3473 	int level = 0;
3474 	int next_level = 1;
3475 	bool in_group = false;
3476 
3477 	do {
3478 		tok = str;
3479 		tmp = strpbrk(str, "{}, ");
3480 		if (tmp) {
3481 			if (in_group)
3482 				next_level = level;
3483 			else
3484 				next_level = level + 1;
3485 
3486 			if (*tmp == '{')
3487 				in_group = true;
3488 			else if (*tmp == '}')
3489 				in_group = false;
3490 
3491 			*tmp = '\0';
3492 			str = tmp + 1;
3493 		}
3494 
3495 		if (*tok) {
3496 			ret = sort_dimension__add(list, tok, evlist, level);
3497 			if (ret == -EINVAL) {
3498 				if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3499 					ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3500 				else
3501 					ui__error("Invalid --sort key: `%s'", tok);
3502 				break;
3503 			} else if (ret == -ESRCH) {
3504 				ui__error("Unknown --sort key: `%s'", tok);
3505 				break;
3506 			}
3507 		}
3508 
3509 		level = next_level;
3510 	} while (tmp);
3511 
3512 	return ret;
3513 }
3514 
3515 static const char *get_default_sort_order(struct evlist *evlist)
3516 {
3517 	const char *default_sort_orders[] = {
3518 		default_sort_order,
3519 		default_branch_sort_order,
3520 		default_mem_sort_order,
3521 		default_top_sort_order,
3522 		default_diff_sort_order,
3523 		default_tracepoint_sort_order,
3524 	};
3525 	bool use_trace = true;
3526 	struct evsel *evsel;
3527 
3528 	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3529 
3530 	if (evlist == NULL || evlist__empty(evlist))
3531 		goto out_no_evlist;
3532 
3533 	evlist__for_each_entry(evlist, evsel) {
3534 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3535 			use_trace = false;
3536 			break;
3537 		}
3538 	}
3539 
3540 	if (use_trace) {
3541 		sort__mode = SORT_MODE__TRACEPOINT;
3542 		if (symbol_conf.raw_trace)
3543 			return "trace_fields";
3544 	}
3545 out_no_evlist:
3546 	return default_sort_orders[sort__mode];
3547 }
3548 
3549 static int setup_sort_order(struct evlist *evlist)
3550 {
3551 	char *new_sort_order;
3552 
3553 	/*
3554 	 * Append '+'-prefixed sort order to the default sort
3555 	 * order string.
3556 	 */
3557 	if (!sort_order || is_strict_order(sort_order))
3558 		return 0;
3559 
3560 	if (sort_order[1] == '\0') {
3561 		ui__error("Invalid --sort key: `+'");
3562 		return -EINVAL;
3563 	}
3564 
3565 	/*
3566 	 * We allocate new sort_order string, but we never free it,
3567 	 * because it's checked over the rest of the code.
3568 	 */
3569 	if (asprintf(&new_sort_order, "%s,%s",
3570 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
3571 		pr_err("Not enough memory to set up --sort");
3572 		return -ENOMEM;
3573 	}
3574 
3575 	sort_order = new_sort_order;
3576 	return 0;
3577 }
3578 
3579 /*
3580  * Adds 'pre,' prefix into 'str' is 'pre' is
3581  * not already part of 'str'.
3582  */
3583 static char *prefix_if_not_in(const char *pre, char *str)
3584 {
3585 	char *n;
3586 
3587 	if (!str || strstr(str, pre))
3588 		return str;
3589 
3590 	if (asprintf(&n, "%s,%s", pre, str) < 0)
3591 		n = NULL;
3592 
3593 	free(str);
3594 	return n;
3595 }
3596 
3597 static char *setup_overhead(char *keys)
3598 {
3599 	if (sort__mode == SORT_MODE__DIFF)
3600 		return keys;
3601 
3602 	keys = prefix_if_not_in("overhead", keys);
3603 
3604 	if (symbol_conf.cumulate_callchain)
3605 		keys = prefix_if_not_in("overhead_children", keys);
3606 
3607 	return keys;
3608 }
3609 
3610 static int __setup_sorting(struct evlist *evlist)
3611 {
3612 	char *str;
3613 	const char *sort_keys;
3614 	int ret = 0;
3615 
3616 	ret = setup_sort_order(evlist);
3617 	if (ret)
3618 		return ret;
3619 
3620 	sort_keys = sort_order;
3621 	if (sort_keys == NULL) {
3622 		if (is_strict_order(field_order)) {
3623 			/*
3624 			 * If user specified field order but no sort order,
3625 			 * we'll honor it and not add default sort orders.
3626 			 */
3627 			return 0;
3628 		}
3629 
3630 		sort_keys = get_default_sort_order(evlist);
3631 	}
3632 
3633 	str = strdup(sort_keys);
3634 	if (str == NULL) {
3635 		pr_err("Not enough memory to setup sort keys");
3636 		return -ENOMEM;
3637 	}
3638 
3639 	/*
3640 	 * Prepend overhead fields for backward compatibility.
3641 	 */
3642 	if (!is_strict_order(field_order)) {
3643 		str = setup_overhead(str);
3644 		if (str == NULL) {
3645 			pr_err("Not enough memory to setup overhead keys");
3646 			return -ENOMEM;
3647 		}
3648 	}
3649 
3650 	ret = setup_sort_list(&perf_hpp_list, str, evlist);
3651 
3652 	free(str);
3653 	return ret;
3654 }
3655 
3656 void perf_hpp__set_elide(int idx, bool elide)
3657 {
3658 	struct perf_hpp_fmt *fmt;
3659 	struct hpp_sort_entry *hse;
3660 
3661 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3662 		if (!perf_hpp__is_sort_entry(fmt))
3663 			continue;
3664 
3665 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3666 		if (hse->se->se_width_idx == idx) {
3667 			fmt->elide = elide;
3668 			break;
3669 		}
3670 	}
3671 }
3672 
3673 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3674 {
3675 	if (list && strlist__nr_entries(list) == 1) {
3676 		if (fp != NULL)
3677 			fprintf(fp, "# %s: %s\n", list_name,
3678 				strlist__entry(list, 0)->s);
3679 		return true;
3680 	}
3681 	return false;
3682 }
3683 
3684 static bool get_elide(int idx, FILE *output)
3685 {
3686 	switch (idx) {
3687 	case HISTC_SYMBOL:
3688 		return __get_elide(symbol_conf.sym_list, "symbol", output);
3689 	case HISTC_DSO:
3690 		return __get_elide(symbol_conf.dso_list, "dso", output);
3691 	case HISTC_COMM:
3692 		return __get_elide(symbol_conf.comm_list, "comm", output);
3693 	default:
3694 		break;
3695 	}
3696 
3697 	if (sort__mode != SORT_MODE__BRANCH)
3698 		return false;
3699 
3700 	switch (idx) {
3701 	case HISTC_SYMBOL_FROM:
3702 		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3703 	case HISTC_SYMBOL_TO:
3704 		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3705 	case HISTC_DSO_FROM:
3706 		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3707 	case HISTC_DSO_TO:
3708 		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3709 	case HISTC_ADDR_FROM:
3710 		return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3711 	case HISTC_ADDR_TO:
3712 		return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3713 	default:
3714 		break;
3715 	}
3716 
3717 	return false;
3718 }
3719 
3720 void sort__setup_elide(FILE *output)
3721 {
3722 	struct perf_hpp_fmt *fmt;
3723 	struct hpp_sort_entry *hse;
3724 
3725 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3726 		if (!perf_hpp__is_sort_entry(fmt))
3727 			continue;
3728 
3729 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3730 		fmt->elide = get_elide(hse->se->se_width_idx, output);
3731 	}
3732 
3733 	/*
3734 	 * It makes no sense to elide all of sort entries.
3735 	 * Just revert them to show up again.
3736 	 */
3737 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3738 		if (!perf_hpp__is_sort_entry(fmt))
3739 			continue;
3740 
3741 		if (!fmt->elide)
3742 			return;
3743 	}
3744 
3745 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3746 		if (!perf_hpp__is_sort_entry(fmt))
3747 			continue;
3748 
3749 		fmt->elide = false;
3750 	}
3751 }
3752 
3753 int output_field_add(struct perf_hpp_list *list, const char *tok)
3754 {
3755 	unsigned int i;
3756 
3757 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3758 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3759 
3760 		if (strncasecmp(tok, hd->name, strlen(tok)))
3761 			continue;
3762 
3763 		if (!strcasecmp(tok, "weight"))
3764 			ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
3765 
3766 		return __hpp_dimension__add_output(list, hd);
3767 	}
3768 
3769 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3770 		struct sort_dimension *sd = &common_sort_dimensions[i];
3771 
3772 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3773 			continue;
3774 
3775 		return __sort_dimension__add_output(list, sd);
3776 	}
3777 
3778 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3779 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3780 
3781 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3782 			continue;
3783 
3784 		if (sort__mode != SORT_MODE__BRANCH)
3785 			return -EINVAL;
3786 
3787 		return __sort_dimension__add_output(list, sd);
3788 	}
3789 
3790 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3791 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3792 
3793 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3794 			continue;
3795 
3796 		if (sort__mode != SORT_MODE__MEMORY)
3797 			return -EINVAL;
3798 
3799 		return __sort_dimension__add_output(list, sd);
3800 	}
3801 
3802 	return -ESRCH;
3803 }
3804 
3805 static int setup_output_list(struct perf_hpp_list *list, char *str)
3806 {
3807 	char *tmp, *tok;
3808 	int ret = 0;
3809 
3810 	for (tok = strtok_r(str, ", ", &tmp);
3811 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
3812 		ret = output_field_add(list, tok);
3813 		if (ret == -EINVAL) {
3814 			ui__error("Invalid --fields key: `%s'", tok);
3815 			break;
3816 		} else if (ret == -ESRCH) {
3817 			ui__error("Unknown --fields key: `%s'", tok);
3818 			break;
3819 		}
3820 	}
3821 
3822 	return ret;
3823 }
3824 
3825 void reset_dimensions(void)
3826 {
3827 	unsigned int i;
3828 
3829 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3830 		common_sort_dimensions[i].taken = 0;
3831 
3832 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3833 		hpp_sort_dimensions[i].taken = 0;
3834 
3835 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3836 		bstack_sort_dimensions[i].taken = 0;
3837 
3838 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3839 		memory_sort_dimensions[i].taken = 0;
3840 }
3841 
3842 bool is_strict_order(const char *order)
3843 {
3844 	return order && (*order != '+');
3845 }
3846 
3847 static int __setup_output_field(void)
3848 {
3849 	char *str, *strp;
3850 	int ret = -EINVAL;
3851 
3852 	if (field_order == NULL)
3853 		return 0;
3854 
3855 	strp = str = strdup(field_order);
3856 	if (str == NULL) {
3857 		pr_err("Not enough memory to setup output fields");
3858 		return -ENOMEM;
3859 	}
3860 
3861 	if (!is_strict_order(field_order))
3862 		strp++;
3863 
3864 	if (!strlen(strp)) {
3865 		ui__error("Invalid --fields key: `+'");
3866 		goto out;
3867 	}
3868 
3869 	ret = setup_output_list(&perf_hpp_list, strp);
3870 
3871 out:
3872 	free(str);
3873 	return ret;
3874 }
3875 
3876 int setup_sorting(struct evlist *evlist)
3877 {
3878 	int err;
3879 
3880 	err = __setup_sorting(evlist);
3881 	if (err < 0)
3882 		return err;
3883 
3884 	if (parent_pattern != default_parent_pattern) {
3885 		err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3886 		if (err < 0)
3887 			return err;
3888 	}
3889 
3890 	reset_dimensions();
3891 
3892 	/*
3893 	 * perf diff doesn't use default hpp output fields.
3894 	 */
3895 	if (sort__mode != SORT_MODE__DIFF)
3896 		perf_hpp__init();
3897 
3898 	err = __setup_output_field();
3899 	if (err < 0)
3900 		return err;
3901 
3902 	/* copy sort keys to output fields */
3903 	perf_hpp__setup_output_field(&perf_hpp_list);
3904 	/* and then copy output fields to sort keys */
3905 	perf_hpp__append_sort_keys(&perf_hpp_list);
3906 
3907 	/* setup hists-specific output fields */
3908 	if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3909 		return -1;
3910 
3911 	return 0;
3912 }
3913 
3914 void reset_output_field(void)
3915 {
3916 	perf_hpp_list.need_collapse = 0;
3917 	perf_hpp_list.parent = 0;
3918 	perf_hpp_list.sym = 0;
3919 	perf_hpp_list.dso = 0;
3920 
3921 	field_order = NULL;
3922 	sort_order = NULL;
3923 
3924 	reset_dimensions();
3925 	perf_hpp__reset_output_field(&perf_hpp_list);
3926 }
3927 
3928 #define INDENT (3*8 + 1)
3929 
3930 static void add_key(struct strbuf *sb, const char *str, int *llen)
3931 {
3932 	if (!str)
3933 		return;
3934 
3935 	if (*llen >= 75) {
3936 		strbuf_addstr(sb, "\n\t\t\t ");
3937 		*llen = INDENT;
3938 	}
3939 	strbuf_addf(sb, " %s", str);
3940 	*llen += strlen(str) + 1;
3941 }
3942 
3943 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3944 			    int *llen)
3945 {
3946 	int i;
3947 
3948 	for (i = 0; i < n; i++)
3949 		add_key(sb, s[i].name, llen);
3950 }
3951 
3952 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3953 				int *llen)
3954 {
3955 	int i;
3956 
3957 	for (i = 0; i < n; i++)
3958 		add_key(sb, s[i].name, llen);
3959 }
3960 
3961 char *sort_help(const char *prefix)
3962 {
3963 	struct strbuf sb;
3964 	char *s;
3965 	int len = strlen(prefix) + INDENT;
3966 
3967 	strbuf_init(&sb, 300);
3968 	strbuf_addstr(&sb, prefix);
3969 	add_hpp_sort_string(&sb, hpp_sort_dimensions,
3970 			    ARRAY_SIZE(hpp_sort_dimensions), &len);
3971 	add_sort_string(&sb, common_sort_dimensions,
3972 			    ARRAY_SIZE(common_sort_dimensions), &len);
3973 	add_sort_string(&sb, bstack_sort_dimensions,
3974 			    ARRAY_SIZE(bstack_sort_dimensions), &len);
3975 	add_sort_string(&sb, memory_sort_dimensions,
3976 			    ARRAY_SIZE(memory_sort_dimensions), &len);
3977 	s = strbuf_detach(&sb, NULL);
3978 	strbuf_release(&sb);
3979 	return s;
3980 }
3981