xref: /linux/tools/perf/util/sort.c (revision c34e9ab9a612ee8b18273398ef75c207b01f516d)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <stdlib.h>
6 #include <linux/mman.h>
7 #include <linux/time64.h>
8 #include "debug.h"
9 #include "dso.h"
10 #include "sort.h"
11 #include "hist.h"
12 #include "cacheline.h"
13 #include "comm.h"
14 #include "map.h"
15 #include "maps.h"
16 #include "symbol.h"
17 #include "map_symbol.h"
18 #include "branch.h"
19 #include "thread.h"
20 #include "evsel.h"
21 #include "evlist.h"
22 #include "srcline.h"
23 #include "strlist.h"
24 #include "strbuf.h"
25 #include "mem-events.h"
26 #include "mem-info.h"
27 #include "annotate.h"
28 #include "annotate-data.h"
29 #include "event.h"
30 #include "time-utils.h"
31 #include "cgroup.h"
32 #include "machine.h"
33 #include "trace-event.h"
34 #include <linux/kernel.h>
35 #include <linux/string.h>
36 
37 #ifdef HAVE_LIBTRACEEVENT
38 #include <event-parse.h>
39 #endif
40 
41 regex_t		parent_regex;
42 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
43 const char	*parent_pattern = default_parent_pattern;
44 const char	*default_sort_order = "comm,dso,symbol";
45 const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
46 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
47 const char	default_top_sort_order[] = "dso,symbol";
48 const char	default_diff_sort_order[] = "dso,symbol";
49 const char	default_tracepoint_sort_order[] = "trace";
50 const char	*sort_order;
51 const char	*field_order;
52 regex_t		ignore_callees_regex;
53 int		have_ignore_callees = 0;
54 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
55 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
56 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
57 
58 /*
59  * Some architectures have Adjacent Cacheline Prefetch feature, which
60  * behaves like the cacheline size is doubled. Enable this flag to
61  * check things in double cacheline granularity.
62  */
63 bool chk_double_cl;
64 
65 /*
66  * Replaces all occurrences of a char used with the:
67  *
68  * -t, --field-separator
69  *
70  * option, that uses a special separator character and don't pad with spaces,
71  * replacing all occurrences of this separator in symbol names (and other
72  * output) with a '.' character, that thus it's the only non valid separator.
73 */
74 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
75 {
76 	int n;
77 	va_list ap;
78 
79 	va_start(ap, fmt);
80 	n = vsnprintf(bf, size, fmt, ap);
81 	if (symbol_conf.field_sep && n > 0) {
82 		char *sep = bf;
83 
84 		while (1) {
85 			sep = strchr(sep, *symbol_conf.field_sep);
86 			if (sep == NULL)
87 				break;
88 			*sep = '.';
89 		}
90 	}
91 	va_end(ap);
92 
93 	if (n >= (int)size)
94 		return size - 1;
95 	return n;
96 }
97 
98 static int64_t cmp_null(const void *l, const void *r)
99 {
100 	if (!l && !r)
101 		return 0;
102 	else if (!l)
103 		return -1;
104 	else
105 		return 1;
106 }
107 
108 /* --sort pid */
109 
110 static int64_t
111 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
112 {
113 	return thread__tid(right->thread) - thread__tid(left->thread);
114 }
115 
116 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
117 				       size_t size, unsigned int width)
118 {
119 	const char *comm = thread__comm_str(he->thread);
120 
121 	width = max(7U, width) - 8;
122 	return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
123 			       width, width, comm ?: "");
124 }
125 
126 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
127 {
128 	const struct thread *th = arg;
129 
130 	if (type != HIST_FILTER__THREAD)
131 		return -1;
132 
133 	return th && !RC_CHK_EQUAL(he->thread, th);
134 }
135 
136 struct sort_entry sort_thread = {
137 	.se_header	= "    Pid:Command",
138 	.se_cmp		= sort__thread_cmp,
139 	.se_snprintf	= hist_entry__thread_snprintf,
140 	.se_filter	= hist_entry__thread_filter,
141 	.se_width_idx	= HISTC_THREAD,
142 };
143 
144 /* --sort simd */
145 
146 static int64_t
147 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
148 {
149 	if (left->simd_flags.arch != right->simd_flags.arch)
150 		return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
151 
152 	return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
153 }
154 
155 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
156 {
157 	u64 arch = simd_flags->arch;
158 
159 	if (arch & SIMD_OP_FLAGS_ARCH_SVE)
160 		return "SVE";
161 	else
162 		return "n/a";
163 }
164 
165 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
166 				     size_t size, unsigned int width __maybe_unused)
167 {
168 	const char *name;
169 
170 	if (!he->simd_flags.arch)
171 		return repsep_snprintf(bf, size, "");
172 
173 	name = hist_entry__get_simd_name(&he->simd_flags);
174 
175 	if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
176 		return repsep_snprintf(bf, size, "[e] %s", name);
177 	else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
178 		return repsep_snprintf(bf, size, "[p] %s", name);
179 
180 	return repsep_snprintf(bf, size, "[.] %s", name);
181 }
182 
183 struct sort_entry sort_simd = {
184 	.se_header	= "Simd   ",
185 	.se_cmp		= sort__simd_cmp,
186 	.se_snprintf	= hist_entry__simd_snprintf,
187 	.se_width_idx	= HISTC_SIMD,
188 };
189 
190 /* --sort comm */
191 
192 /*
193  * We can't use pointer comparison in functions below,
194  * because it gives different results based on pointer
195  * values, which could break some sorting assumptions.
196  */
197 static int64_t
198 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
199 {
200 	return strcmp(comm__str(right->comm), comm__str(left->comm));
201 }
202 
203 static int64_t
204 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
205 {
206 	return strcmp(comm__str(right->comm), comm__str(left->comm));
207 }
208 
209 static int64_t
210 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
211 {
212 	return strcmp(comm__str(right->comm), comm__str(left->comm));
213 }
214 
215 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
216 				     size_t size, unsigned int width)
217 {
218 	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
219 }
220 
221 struct sort_entry sort_comm = {
222 	.se_header	= "Command",
223 	.se_cmp		= sort__comm_cmp,
224 	.se_collapse	= sort__comm_collapse,
225 	.se_sort	= sort__comm_sort,
226 	.se_snprintf	= hist_entry__comm_snprintf,
227 	.se_filter	= hist_entry__thread_filter,
228 	.se_width_idx	= HISTC_COMM,
229 };
230 
231 /* --sort dso */
232 
233 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
234 {
235 	struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
236 	struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
237 	const char *dso_name_l, *dso_name_r;
238 
239 	if (!dso_l || !dso_r)
240 		return cmp_null(dso_r, dso_l);
241 
242 	if (verbose > 0) {
243 		dso_name_l = dso__long_name(dso_l);
244 		dso_name_r = dso__long_name(dso_r);
245 	} else {
246 		dso_name_l = dso__short_name(dso_l);
247 		dso_name_r = dso__short_name(dso_r);
248 	}
249 
250 	return strcmp(dso_name_l, dso_name_r);
251 }
252 
253 static int64_t
254 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
255 {
256 	return _sort__dso_cmp(right->ms.map, left->ms.map);
257 }
258 
259 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
260 				     size_t size, unsigned int width)
261 {
262 	const struct dso *dso = map ? map__dso(map) : NULL;
263 	const char *dso_name = "[unknown]";
264 
265 	if (dso)
266 		dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso);
267 
268 	return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
269 }
270 
271 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
272 				    size_t size, unsigned int width)
273 {
274 	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
275 }
276 
277 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
278 {
279 	const struct dso *dso = arg;
280 
281 	if (type != HIST_FILTER__DSO)
282 		return -1;
283 
284 	return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
285 }
286 
287 struct sort_entry sort_dso = {
288 	.se_header	= "Shared Object",
289 	.se_cmp		= sort__dso_cmp,
290 	.se_snprintf	= hist_entry__dso_snprintf,
291 	.se_filter	= hist_entry__dso_filter,
292 	.se_width_idx	= HISTC_DSO,
293 };
294 
295 /* --sort symbol */
296 
297 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
298 {
299 	return (int64_t)(right_ip - left_ip);
300 }
301 
302 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
303 {
304 	if (!sym_l || !sym_r)
305 		return cmp_null(sym_l, sym_r);
306 
307 	if (sym_l == sym_r)
308 		return 0;
309 
310 	if (sym_l->inlined || sym_r->inlined) {
311 		int ret = strcmp(sym_l->name, sym_r->name);
312 
313 		if (ret)
314 			return ret;
315 		if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
316 			return 0;
317 	}
318 
319 	if (sym_l->start != sym_r->start)
320 		return (int64_t)(sym_r->start - sym_l->start);
321 
322 	return (int64_t)(sym_r->end - sym_l->end);
323 }
324 
325 static int64_t
326 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
327 {
328 	int64_t ret;
329 
330 	if (!left->ms.sym && !right->ms.sym)
331 		return _sort__addr_cmp(left->ip, right->ip);
332 
333 	/*
334 	 * comparing symbol address alone is not enough since it's a
335 	 * relative address within a dso.
336 	 */
337 	if (!hists__has(left->hists, dso)) {
338 		ret = sort__dso_cmp(left, right);
339 		if (ret != 0)
340 			return ret;
341 	}
342 
343 	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
344 }
345 
346 static int64_t
347 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
348 {
349 	if (!left->ms.sym || !right->ms.sym)
350 		return cmp_null(left->ms.sym, right->ms.sym);
351 
352 	return strcmp(right->ms.sym->name, left->ms.sym->name);
353 }
354 
355 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
356 				     u64 ip, char level, char *bf, size_t size,
357 				     unsigned int width)
358 {
359 	struct symbol *sym = ms->sym;
360 	struct map *map = ms->map;
361 	size_t ret = 0;
362 
363 	if (verbose > 0) {
364 		struct dso *dso = map ? map__dso(map) : NULL;
365 		char o = dso ? dso__symtab_origin(dso) : '!';
366 		u64 rip = ip;
367 
368 		if (dso && dso__kernel(dso) && dso__adjust_symbols(dso))
369 			rip = map__unmap_ip(map, ip);
370 
371 		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
372 				       BITS_PER_LONG / 4 + 2, rip, o);
373 	}
374 
375 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
376 	if (sym && map) {
377 		if (sym->type == STT_OBJECT) {
378 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
379 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
380 					ip - map__unmap_ip(map, sym->start));
381 		} else {
382 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
383 					       width - ret,
384 					       sym->name);
385 			if (sym->inlined)
386 				ret += repsep_snprintf(bf + ret, size - ret,
387 						       " (inlined)");
388 		}
389 	} else {
390 		size_t len = BITS_PER_LONG / 4;
391 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
392 				       len, ip);
393 	}
394 
395 	return ret;
396 }
397 
398 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
399 {
400 	return _hist_entry__sym_snprintf(&he->ms, he->ip,
401 					 he->level, bf, size, width);
402 }
403 
404 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
405 {
406 	const char *sym = arg;
407 
408 	if (type != HIST_FILTER__SYMBOL)
409 		return -1;
410 
411 	return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
412 }
413 
414 struct sort_entry sort_sym = {
415 	.se_header	= "Symbol",
416 	.se_cmp		= sort__sym_cmp,
417 	.se_sort	= sort__sym_sort,
418 	.se_snprintf	= hist_entry__sym_snprintf,
419 	.se_filter	= hist_entry__sym_filter,
420 	.se_width_idx	= HISTC_SYMBOL,
421 };
422 
423 /* --sort symoff */
424 
425 static int64_t
426 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
427 {
428 	int64_t ret;
429 
430 	ret = sort__sym_cmp(left, right);
431 	if (ret)
432 		return ret;
433 
434 	return left->ip - right->ip;
435 }
436 
437 static int64_t
438 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
439 {
440 	int64_t ret;
441 
442 	ret = sort__sym_sort(left, right);
443 	if (ret)
444 		return ret;
445 
446 	return left->ip - right->ip;
447 }
448 
449 static int
450 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
451 {
452 	struct symbol *sym = he->ms.sym;
453 
454 	if (sym == NULL)
455 		return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
456 
457 	return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
458 }
459 
460 struct sort_entry sort_sym_offset = {
461 	.se_header	= "Symbol Offset",
462 	.se_cmp		= sort__symoff_cmp,
463 	.se_sort	= sort__symoff_sort,
464 	.se_snprintf	= hist_entry__symoff_snprintf,
465 	.se_filter	= hist_entry__sym_filter,
466 	.se_width_idx	= HISTC_SYMBOL_OFFSET,
467 };
468 
469 /* --sort srcline */
470 
471 char *hist_entry__srcline(struct hist_entry *he)
472 {
473 	return map__srcline(he->ms.map, he->ip, he->ms.sym);
474 }
475 
476 static int64_t
477 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
478 {
479 	int64_t ret;
480 
481 	ret = _sort__addr_cmp(left->ip, right->ip);
482 	if (ret)
483 		return ret;
484 
485 	return sort__dso_cmp(left, right);
486 }
487 
488 static int64_t
489 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
490 {
491 	if (!left->srcline)
492 		left->srcline = hist_entry__srcline(left);
493 	if (!right->srcline)
494 		right->srcline = hist_entry__srcline(right);
495 
496 	return strcmp(right->srcline, left->srcline);
497 }
498 
499 static int64_t
500 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
501 {
502 	return sort__srcline_collapse(left, right);
503 }
504 
505 static void
506 sort__srcline_init(struct hist_entry *he)
507 {
508 	if (!he->srcline)
509 		he->srcline = hist_entry__srcline(he);
510 }
511 
512 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
513 					size_t size, unsigned int width)
514 {
515 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
516 }
517 
518 struct sort_entry sort_srcline = {
519 	.se_header	= "Source:Line",
520 	.se_cmp		= sort__srcline_cmp,
521 	.se_collapse	= sort__srcline_collapse,
522 	.se_sort	= sort__srcline_sort,
523 	.se_init	= sort__srcline_init,
524 	.se_snprintf	= hist_entry__srcline_snprintf,
525 	.se_width_idx	= HISTC_SRCLINE,
526 };
527 
528 /* --sort srcline_from */
529 
530 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
531 {
532 	return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
533 }
534 
535 static int64_t
536 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
537 {
538 	return left->branch_info->from.addr - right->branch_info->from.addr;
539 }
540 
541 static int64_t
542 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
543 {
544 	if (!left->branch_info->srcline_from)
545 		left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
546 
547 	if (!right->branch_info->srcline_from)
548 		right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
549 
550 	return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
551 }
552 
553 static int64_t
554 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
555 {
556 	return sort__srcline_from_collapse(left, right);
557 }
558 
559 static void sort__srcline_from_init(struct hist_entry *he)
560 {
561 	if (!he->branch_info->srcline_from)
562 		he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
563 }
564 
565 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
566 					size_t size, unsigned int width)
567 {
568 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
569 }
570 
571 struct sort_entry sort_srcline_from = {
572 	.se_header	= "From Source:Line",
573 	.se_cmp		= sort__srcline_from_cmp,
574 	.se_collapse	= sort__srcline_from_collapse,
575 	.se_sort	= sort__srcline_from_sort,
576 	.se_init	= sort__srcline_from_init,
577 	.se_snprintf	= hist_entry__srcline_from_snprintf,
578 	.se_width_idx	= HISTC_SRCLINE_FROM,
579 };
580 
581 /* --sort srcline_to */
582 
583 static int64_t
584 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
585 {
586 	return left->branch_info->to.addr - right->branch_info->to.addr;
587 }
588 
589 static int64_t
590 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
591 {
592 	if (!left->branch_info->srcline_to)
593 		left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
594 
595 	if (!right->branch_info->srcline_to)
596 		right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
597 
598 	return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
599 }
600 
601 static int64_t
602 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
603 {
604 	return sort__srcline_to_collapse(left, right);
605 }
606 
607 static void sort__srcline_to_init(struct hist_entry *he)
608 {
609 	if (!he->branch_info->srcline_to)
610 		he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
611 }
612 
613 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
614 					size_t size, unsigned int width)
615 {
616 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
617 }
618 
619 struct sort_entry sort_srcline_to = {
620 	.se_header	= "To Source:Line",
621 	.se_cmp		= sort__srcline_to_cmp,
622 	.se_collapse	= sort__srcline_to_collapse,
623 	.se_sort	= sort__srcline_to_sort,
624 	.se_init	= sort__srcline_to_init,
625 	.se_snprintf	= hist_entry__srcline_to_snprintf,
626 	.se_width_idx	= HISTC_SRCLINE_TO,
627 };
628 
629 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
630 					size_t size, unsigned int width)
631 {
632 
633 	struct symbol *sym = he->ms.sym;
634 	struct annotated_branch *branch;
635 	double ipc = 0.0, coverage = 0.0;
636 	char tmp[64];
637 
638 	if (!sym)
639 		return repsep_snprintf(bf, size, "%-*s", width, "-");
640 
641 	branch = symbol__annotation(sym)->branch;
642 
643 	if (branch && branch->hit_cycles)
644 		ipc = branch->hit_insn / ((double)branch->hit_cycles);
645 
646 	if (branch && branch->total_insn) {
647 		coverage = branch->cover_insn * 100.0 /
648 			((double)branch->total_insn);
649 	}
650 
651 	snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
652 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
653 }
654 
655 struct sort_entry sort_sym_ipc = {
656 	.se_header	= "IPC   [IPC Coverage]",
657 	.se_cmp		= sort__sym_cmp,
658 	.se_snprintf	= hist_entry__sym_ipc_snprintf,
659 	.se_width_idx	= HISTC_SYMBOL_IPC,
660 };
661 
662 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
663 					     __maybe_unused,
664 					     char *bf, size_t size,
665 					     unsigned int width)
666 {
667 	char tmp[64];
668 
669 	snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
670 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
671 }
672 
673 struct sort_entry sort_sym_ipc_null = {
674 	.se_header	= "IPC   [IPC Coverage]",
675 	.se_cmp		= sort__sym_cmp,
676 	.se_snprintf	= hist_entry__sym_ipc_null_snprintf,
677 	.se_width_idx	= HISTC_SYMBOL_IPC,
678 };
679 
680 /* --sort callchain_branch_predicted */
681 
682 static int64_t
683 sort__callchain_branch_predicted_cmp(struct hist_entry *left __maybe_unused,
684 				     struct hist_entry *right __maybe_unused)
685 {
686 	return 0;
687 }
688 
689 static int hist_entry__callchain_branch_predicted_snprintf(
690 	struct hist_entry *he, char *bf, size_t size, unsigned int width)
691 {
692 	u64 branch_count, predicted_count;
693 	double percent = 0.0;
694 	char str[32];
695 
696 	callchain_branch_counts(he->callchain, &branch_count,
697 				&predicted_count, NULL, NULL);
698 
699 	if (branch_count)
700 		percent = predicted_count * 100.0 / branch_count;
701 
702 	snprintf(str, sizeof(str), "%.1f%%", percent);
703 	return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
704 }
705 
706 struct sort_entry sort_callchain_branch_predicted = {
707 	.se_header	= "Predicted",
708 	.se_cmp		= sort__callchain_branch_predicted_cmp,
709 	.se_snprintf	= hist_entry__callchain_branch_predicted_snprintf,
710 	.se_width_idx	= HISTC_CALLCHAIN_BRANCH_PREDICTED,
711 };
712 
713 /* --sort callchain_branch_abort */
714 
715 static int64_t
716 sort__callchain_branch_abort_cmp(struct hist_entry *left __maybe_unused,
717 				 struct hist_entry *right __maybe_unused)
718 {
719 	return 0;
720 }
721 
722 static int hist_entry__callchain_branch_abort_snprintf(struct hist_entry *he,
723 						       char *bf, size_t size,
724 						       unsigned int width)
725 {
726 	u64 branch_count, abort_count;
727 	char str[32];
728 
729 	callchain_branch_counts(he->callchain, &branch_count,
730 				NULL, &abort_count, NULL);
731 
732 	snprintf(str, sizeof(str), "%" PRId64, abort_count);
733 	return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
734 }
735 
736 struct sort_entry sort_callchain_branch_abort = {
737 	.se_header	= "Abort",
738 	.se_cmp		= sort__callchain_branch_abort_cmp,
739 	.se_snprintf	= hist_entry__callchain_branch_abort_snprintf,
740 	.se_width_idx	= HISTC_CALLCHAIN_BRANCH_ABORT,
741 };
742 
743 /* --sort callchain_branch_cycles */
744 
745 static int64_t
746 sort__callchain_branch_cycles_cmp(struct hist_entry *left __maybe_unused,
747 				  struct hist_entry *right __maybe_unused)
748 {
749 	return 0;
750 }
751 
752 static int hist_entry__callchain_branch_cycles_snprintf(struct hist_entry *he,
753 							char *bf, size_t size,
754 							unsigned int width)
755 {
756 	u64 branch_count, cycles_count, cycles = 0;
757 	char str[32];
758 
759 	callchain_branch_counts(he->callchain, &branch_count,
760 				NULL, NULL, &cycles_count);
761 
762 	if (branch_count)
763 		cycles = cycles_count / branch_count;
764 
765 	snprintf(str, sizeof(str), "%" PRId64 "", cycles);
766 	return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
767 }
768 
769 struct sort_entry sort_callchain_branch_cycles = {
770 	.se_header	= "Cycles",
771 	.se_cmp		= sort__callchain_branch_cycles_cmp,
772 	.se_snprintf	= hist_entry__callchain_branch_cycles_snprintf,
773 	.se_width_idx	= HISTC_CALLCHAIN_BRANCH_CYCLES,
774 };
775 
776 /* --sort srcfile */
777 
778 static char no_srcfile[1];
779 
780 static char *hist_entry__get_srcfile(struct hist_entry *e)
781 {
782 	char *sf, *p;
783 	struct map *map = e->ms.map;
784 
785 	if (!map)
786 		return no_srcfile;
787 
788 	sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
789 			 e->ms.sym, false, true, true, e->ip);
790 	if (sf == SRCLINE_UNKNOWN)
791 		return no_srcfile;
792 	p = strchr(sf, ':');
793 	if (p && *sf) {
794 		*p = 0;
795 		return sf;
796 	}
797 	free(sf);
798 	return no_srcfile;
799 }
800 
801 static int64_t
802 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
803 {
804 	return sort__srcline_cmp(left, right);
805 }
806 
807 static int64_t
808 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
809 {
810 	if (!left->srcfile)
811 		left->srcfile = hist_entry__get_srcfile(left);
812 	if (!right->srcfile)
813 		right->srcfile = hist_entry__get_srcfile(right);
814 
815 	return strcmp(right->srcfile, left->srcfile);
816 }
817 
818 static int64_t
819 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
820 {
821 	return sort__srcfile_collapse(left, right);
822 }
823 
824 static void sort__srcfile_init(struct hist_entry *he)
825 {
826 	if (!he->srcfile)
827 		he->srcfile = hist_entry__get_srcfile(he);
828 }
829 
830 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
831 					size_t size, unsigned int width)
832 {
833 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
834 }
835 
836 struct sort_entry sort_srcfile = {
837 	.se_header	= "Source File",
838 	.se_cmp		= sort__srcfile_cmp,
839 	.se_collapse	= sort__srcfile_collapse,
840 	.se_sort	= sort__srcfile_sort,
841 	.se_init	= sort__srcfile_init,
842 	.se_snprintf	= hist_entry__srcfile_snprintf,
843 	.se_width_idx	= HISTC_SRCFILE,
844 };
845 
846 /* --sort parent */
847 
848 static int64_t
849 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
850 {
851 	struct symbol *sym_l = left->parent;
852 	struct symbol *sym_r = right->parent;
853 
854 	if (!sym_l || !sym_r)
855 		return cmp_null(sym_l, sym_r);
856 
857 	return strcmp(sym_r->name, sym_l->name);
858 }
859 
860 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
861 				       size_t size, unsigned int width)
862 {
863 	return repsep_snprintf(bf, size, "%-*.*s", width, width,
864 			      he->parent ? he->parent->name : "[other]");
865 }
866 
867 struct sort_entry sort_parent = {
868 	.se_header	= "Parent symbol",
869 	.se_cmp		= sort__parent_cmp,
870 	.se_snprintf	= hist_entry__parent_snprintf,
871 	.se_width_idx	= HISTC_PARENT,
872 };
873 
874 /* --sort cpu */
875 
876 static int64_t
877 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
878 {
879 	return right->cpu - left->cpu;
880 }
881 
882 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
883 				    size_t size, unsigned int width)
884 {
885 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
886 }
887 
888 struct sort_entry sort_cpu = {
889 	.se_header      = "CPU",
890 	.se_cmp	        = sort__cpu_cmp,
891 	.se_snprintf    = hist_entry__cpu_snprintf,
892 	.se_width_idx	= HISTC_CPU,
893 };
894 
895 /* --sort cgroup_id */
896 
897 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
898 {
899 	return (int64_t)(right_dev - left_dev);
900 }
901 
902 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
903 {
904 	return (int64_t)(right_ino - left_ino);
905 }
906 
907 static int64_t
908 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
909 {
910 	int64_t ret;
911 
912 	ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
913 	if (ret != 0)
914 		return ret;
915 
916 	return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
917 				       left->cgroup_id.ino);
918 }
919 
920 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
921 					  char *bf, size_t size,
922 					  unsigned int width __maybe_unused)
923 {
924 	return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
925 			       he->cgroup_id.ino);
926 }
927 
928 struct sort_entry sort_cgroup_id = {
929 	.se_header      = "cgroup id (dev/inode)",
930 	.se_cmp	        = sort__cgroup_id_cmp,
931 	.se_snprintf    = hist_entry__cgroup_id_snprintf,
932 	.se_width_idx	= HISTC_CGROUP_ID,
933 };
934 
935 /* --sort cgroup */
936 
937 static int64_t
938 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
939 {
940 	return right->cgroup - left->cgroup;
941 }
942 
943 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
944 				       char *bf, size_t size,
945 				       unsigned int width __maybe_unused)
946 {
947 	const char *cgrp_name = "N/A";
948 
949 	if (he->cgroup) {
950 		struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
951 						   he->cgroup);
952 		if (cgrp != NULL)
953 			cgrp_name = cgrp->name;
954 		else
955 			cgrp_name = "unknown";
956 	}
957 
958 	return repsep_snprintf(bf, size, "%s", cgrp_name);
959 }
960 
961 struct sort_entry sort_cgroup = {
962 	.se_header      = "Cgroup",
963 	.se_cmp	        = sort__cgroup_cmp,
964 	.se_snprintf    = hist_entry__cgroup_snprintf,
965 	.se_width_idx	= HISTC_CGROUP,
966 };
967 
968 /* --sort socket */
969 
970 static int64_t
971 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
972 {
973 	return right->socket - left->socket;
974 }
975 
976 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
977 				    size_t size, unsigned int width)
978 {
979 	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
980 }
981 
982 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
983 {
984 	int sk = *(const int *)arg;
985 
986 	if (type != HIST_FILTER__SOCKET)
987 		return -1;
988 
989 	return sk >= 0 && he->socket != sk;
990 }
991 
992 struct sort_entry sort_socket = {
993 	.se_header      = "Socket",
994 	.se_cmp	        = sort__socket_cmp,
995 	.se_snprintf    = hist_entry__socket_snprintf,
996 	.se_filter      = hist_entry__socket_filter,
997 	.se_width_idx	= HISTC_SOCKET,
998 };
999 
1000 /* --sort time */
1001 
1002 static int64_t
1003 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
1004 {
1005 	return right->time - left->time;
1006 }
1007 
1008 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
1009 				    size_t size, unsigned int width)
1010 {
1011 	char he_time[32];
1012 
1013 	if (symbol_conf.nanosecs)
1014 		timestamp__scnprintf_nsec(he->time, he_time,
1015 					  sizeof(he_time));
1016 	else
1017 		timestamp__scnprintf_usec(he->time, he_time,
1018 					  sizeof(he_time));
1019 
1020 	return repsep_snprintf(bf, size, "%-.*s", width, he_time);
1021 }
1022 
1023 struct sort_entry sort_time = {
1024 	.se_header      = "Time",
1025 	.se_cmp	        = sort__time_cmp,
1026 	.se_snprintf    = hist_entry__time_snprintf,
1027 	.se_width_idx	= HISTC_TIME,
1028 };
1029 
1030 /* --sort trace */
1031 
1032 #ifdef HAVE_LIBTRACEEVENT
1033 static char *get_trace_output(struct hist_entry *he)
1034 {
1035 	struct trace_seq seq;
1036 	struct evsel *evsel;
1037 	struct tep_record rec = {
1038 		.data = he->raw_data,
1039 		.size = he->raw_size,
1040 	};
1041 
1042 	evsel = hists_to_evsel(he->hists);
1043 
1044 	trace_seq_init(&seq);
1045 	if (symbol_conf.raw_trace) {
1046 		tep_print_fields(&seq, he->raw_data, he->raw_size,
1047 				 evsel->tp_format);
1048 	} else {
1049 		tep_print_event(evsel->tp_format->tep,
1050 				&seq, &rec, "%s", TEP_PRINT_INFO);
1051 	}
1052 	/*
1053 	 * Trim the buffer, it starts at 4KB and we're not going to
1054 	 * add anything more to this buffer.
1055 	 */
1056 	return realloc(seq.buffer, seq.len + 1);
1057 }
1058 
1059 static int64_t
1060 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
1061 {
1062 	struct evsel *evsel;
1063 
1064 	evsel = hists_to_evsel(left->hists);
1065 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1066 		return 0;
1067 
1068 	if (left->trace_output == NULL)
1069 		left->trace_output = get_trace_output(left);
1070 	if (right->trace_output == NULL)
1071 		right->trace_output = get_trace_output(right);
1072 
1073 	return strcmp(right->trace_output, left->trace_output);
1074 }
1075 
1076 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
1077 				    size_t size, unsigned int width)
1078 {
1079 	struct evsel *evsel;
1080 
1081 	evsel = hists_to_evsel(he->hists);
1082 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1083 		return scnprintf(bf, size, "%-.*s", width, "N/A");
1084 
1085 	if (he->trace_output == NULL)
1086 		he->trace_output = get_trace_output(he);
1087 	return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
1088 }
1089 
1090 struct sort_entry sort_trace = {
1091 	.se_header      = "Trace output",
1092 	.se_cmp	        = sort__trace_cmp,
1093 	.se_snprintf    = hist_entry__trace_snprintf,
1094 	.se_width_idx	= HISTC_TRACE,
1095 };
1096 #endif /* HAVE_LIBTRACEEVENT */
1097 
1098 /* sort keys for branch stacks */
1099 
1100 static int64_t
1101 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
1102 {
1103 	if (!left->branch_info || !right->branch_info)
1104 		return cmp_null(left->branch_info, right->branch_info);
1105 
1106 	return _sort__dso_cmp(left->branch_info->from.ms.map,
1107 			      right->branch_info->from.ms.map);
1108 }
1109 
1110 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
1111 				    size_t size, unsigned int width)
1112 {
1113 	if (he->branch_info)
1114 		return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
1115 						 bf, size, width);
1116 	else
1117 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1118 }
1119 
1120 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
1121 				       const void *arg)
1122 {
1123 	const struct dso *dso = arg;
1124 
1125 	if (type != HIST_FILTER__DSO)
1126 		return -1;
1127 
1128 	return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
1129 		map__dso(he->branch_info->from.ms.map) != dso);
1130 }
1131 
1132 static int64_t
1133 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
1134 {
1135 	if (!left->branch_info || !right->branch_info)
1136 		return cmp_null(left->branch_info, right->branch_info);
1137 
1138 	return _sort__dso_cmp(left->branch_info->to.ms.map,
1139 			      right->branch_info->to.ms.map);
1140 }
1141 
1142 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
1143 				       size_t size, unsigned int width)
1144 {
1145 	if (he->branch_info)
1146 		return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1147 						 bf, size, width);
1148 	else
1149 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1150 }
1151 
1152 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1153 				     const void *arg)
1154 {
1155 	const struct dso *dso = arg;
1156 
1157 	if (type != HIST_FILTER__DSO)
1158 		return -1;
1159 
1160 	return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1161 		map__dso(he->branch_info->to.ms.map) != dso);
1162 }
1163 
1164 static int64_t
1165 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1166 {
1167 	struct addr_map_symbol *from_l, *from_r;
1168 
1169 	if (!left->branch_info || !right->branch_info)
1170 		return cmp_null(left->branch_info, right->branch_info);
1171 
1172 	from_l = &left->branch_info->from;
1173 	from_r = &right->branch_info->from;
1174 
1175 	if (!from_l->ms.sym && !from_r->ms.sym)
1176 		return _sort__addr_cmp(from_l->addr, from_r->addr);
1177 
1178 	return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1179 }
1180 
1181 static int64_t
1182 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1183 {
1184 	struct addr_map_symbol *to_l, *to_r;
1185 
1186 	if (!left->branch_info || !right->branch_info)
1187 		return cmp_null(left->branch_info, right->branch_info);
1188 
1189 	to_l = &left->branch_info->to;
1190 	to_r = &right->branch_info->to;
1191 
1192 	if (!to_l->ms.sym && !to_r->ms.sym)
1193 		return _sort__addr_cmp(to_l->addr, to_r->addr);
1194 
1195 	return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1196 }
1197 
1198 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1199 					 size_t size, unsigned int width)
1200 {
1201 	if (he->branch_info) {
1202 		struct addr_map_symbol *from = &he->branch_info->from;
1203 
1204 		return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1205 						 from->al_level, bf, size, width);
1206 	}
1207 
1208 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1209 }
1210 
1211 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1212 				       size_t size, unsigned int width)
1213 {
1214 	if (he->branch_info) {
1215 		struct addr_map_symbol *to = &he->branch_info->to;
1216 
1217 		return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1218 						 to->al_level, bf, size, width);
1219 	}
1220 
1221 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1222 }
1223 
1224 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1225 				       const void *arg)
1226 {
1227 	const char *sym = arg;
1228 
1229 	if (type != HIST_FILTER__SYMBOL)
1230 		return -1;
1231 
1232 	return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1233 			strstr(he->branch_info->from.ms.sym->name, sym));
1234 }
1235 
1236 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1237 				       const void *arg)
1238 {
1239 	const char *sym = arg;
1240 
1241 	if (type != HIST_FILTER__SYMBOL)
1242 		return -1;
1243 
1244 	return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1245 		        strstr(he->branch_info->to.ms.sym->name, sym));
1246 }
1247 
1248 struct sort_entry sort_dso_from = {
1249 	.se_header	= "Source Shared Object",
1250 	.se_cmp		= sort__dso_from_cmp,
1251 	.se_snprintf	= hist_entry__dso_from_snprintf,
1252 	.se_filter	= hist_entry__dso_from_filter,
1253 	.se_width_idx	= HISTC_DSO_FROM,
1254 };
1255 
1256 struct sort_entry sort_dso_to = {
1257 	.se_header	= "Target Shared Object",
1258 	.se_cmp		= sort__dso_to_cmp,
1259 	.se_snprintf	= hist_entry__dso_to_snprintf,
1260 	.se_filter	= hist_entry__dso_to_filter,
1261 	.se_width_idx	= HISTC_DSO_TO,
1262 };
1263 
1264 struct sort_entry sort_sym_from = {
1265 	.se_header	= "Source Symbol",
1266 	.se_cmp		= sort__sym_from_cmp,
1267 	.se_snprintf	= hist_entry__sym_from_snprintf,
1268 	.se_filter	= hist_entry__sym_from_filter,
1269 	.se_width_idx	= HISTC_SYMBOL_FROM,
1270 };
1271 
1272 struct sort_entry sort_sym_to = {
1273 	.se_header	= "Target Symbol",
1274 	.se_cmp		= sort__sym_to_cmp,
1275 	.se_snprintf	= hist_entry__sym_to_snprintf,
1276 	.se_filter	= hist_entry__sym_to_filter,
1277 	.se_width_idx	= HISTC_SYMBOL_TO,
1278 };
1279 
1280 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1281 				     u64 ip, char level, char *bf, size_t size,
1282 				     unsigned int width)
1283 {
1284 	struct symbol *sym = ms->sym;
1285 	struct map *map = ms->map;
1286 	size_t ret = 0, offs;
1287 
1288 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1289 	if (sym && map) {
1290 		if (sym->type == STT_OBJECT) {
1291 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1292 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1293 					ip - map__unmap_ip(map, sym->start));
1294 		} else {
1295 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1296 					       width - ret,
1297 					       sym->name);
1298 			offs = ip - sym->start;
1299 			if (offs)
1300 				ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1301 		}
1302 	} else {
1303 		size_t len = BITS_PER_LONG / 4;
1304 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1305 				       len, ip);
1306 	}
1307 
1308 	return ret;
1309 }
1310 
1311 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1312 					 size_t size, unsigned int width)
1313 {
1314 	if (he->branch_info) {
1315 		struct addr_map_symbol *from = &he->branch_info->from;
1316 
1317 		return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1318 						 he->level, bf, size, width);
1319 	}
1320 
1321 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1322 }
1323 
1324 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1325 				       size_t size, unsigned int width)
1326 {
1327 	if (he->branch_info) {
1328 		struct addr_map_symbol *to = &he->branch_info->to;
1329 
1330 		return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1331 						 he->level, bf, size, width);
1332 	}
1333 
1334 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1335 }
1336 
1337 static int64_t
1338 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1339 {
1340 	struct addr_map_symbol *from_l;
1341 	struct addr_map_symbol *from_r;
1342 	int64_t ret;
1343 
1344 	if (!left->branch_info || !right->branch_info)
1345 		return cmp_null(left->branch_info, right->branch_info);
1346 
1347 	from_l = &left->branch_info->from;
1348 	from_r = &right->branch_info->from;
1349 
1350 	/*
1351 	 * comparing symbol address alone is not enough since it's a
1352 	 * relative address within a dso.
1353 	 */
1354 	ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1355 	if (ret != 0)
1356 		return ret;
1357 
1358 	return _sort__addr_cmp(from_l->addr, from_r->addr);
1359 }
1360 
1361 static int64_t
1362 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1363 {
1364 	struct addr_map_symbol *to_l;
1365 	struct addr_map_symbol *to_r;
1366 	int64_t ret;
1367 
1368 	if (!left->branch_info || !right->branch_info)
1369 		return cmp_null(left->branch_info, right->branch_info);
1370 
1371 	to_l = &left->branch_info->to;
1372 	to_r = &right->branch_info->to;
1373 
1374 	/*
1375 	 * comparing symbol address alone is not enough since it's a
1376 	 * relative address within a dso.
1377 	 */
1378 	ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1379 	if (ret != 0)
1380 		return ret;
1381 
1382 	return _sort__addr_cmp(to_l->addr, to_r->addr);
1383 }
1384 
1385 struct sort_entry sort_addr_from = {
1386 	.se_header	= "Source Address",
1387 	.se_cmp		= sort__addr_from_cmp,
1388 	.se_snprintf	= hist_entry__addr_from_snprintf,
1389 	.se_filter	= hist_entry__sym_from_filter, /* shared with sym_from */
1390 	.se_width_idx	= HISTC_ADDR_FROM,
1391 };
1392 
1393 struct sort_entry sort_addr_to = {
1394 	.se_header	= "Target Address",
1395 	.se_cmp		= sort__addr_to_cmp,
1396 	.se_snprintf	= hist_entry__addr_to_snprintf,
1397 	.se_filter	= hist_entry__sym_to_filter, /* shared with sym_to */
1398 	.se_width_idx	= HISTC_ADDR_TO,
1399 };
1400 
1401 
1402 static int64_t
1403 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1404 {
1405 	unsigned char mp, p;
1406 
1407 	if (!left->branch_info || !right->branch_info)
1408 		return cmp_null(left->branch_info, right->branch_info);
1409 
1410 	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1411 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1412 	return mp || p;
1413 }
1414 
1415 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1416 				    size_t size, unsigned int width){
1417 	static const char *out = "N/A";
1418 
1419 	if (he->branch_info) {
1420 		if (he->branch_info->flags.predicted)
1421 			out = "N";
1422 		else if (he->branch_info->flags.mispred)
1423 			out = "Y";
1424 	}
1425 
1426 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1427 }
1428 
1429 static int64_t
1430 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1431 {
1432 	if (!left->branch_info || !right->branch_info)
1433 		return cmp_null(left->branch_info, right->branch_info);
1434 
1435 	return left->branch_info->flags.cycles -
1436 		right->branch_info->flags.cycles;
1437 }
1438 
1439 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1440 				    size_t size, unsigned int width)
1441 {
1442 	if (!he->branch_info)
1443 		return scnprintf(bf, size, "%-.*s", width, "N/A");
1444 	if (he->branch_info->flags.cycles == 0)
1445 		return repsep_snprintf(bf, size, "%-*s", width, "-");
1446 	return repsep_snprintf(bf, size, "%-*hd", width,
1447 			       he->branch_info->flags.cycles);
1448 }
1449 
1450 struct sort_entry sort_cycles = {
1451 	.se_header	= "Basic Block Cycles",
1452 	.se_cmp		= sort__cycles_cmp,
1453 	.se_snprintf	= hist_entry__cycles_snprintf,
1454 	.se_width_idx	= HISTC_CYCLES,
1455 };
1456 
1457 /* --sort daddr_sym */
1458 int64_t
1459 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1460 {
1461 	uint64_t l = 0, r = 0;
1462 
1463 	if (left->mem_info)
1464 		l = mem_info__daddr(left->mem_info)->addr;
1465 	if (right->mem_info)
1466 		r = mem_info__daddr(right->mem_info)->addr;
1467 
1468 	return (int64_t)(r - l);
1469 }
1470 
1471 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1472 				    size_t size, unsigned int width)
1473 {
1474 	uint64_t addr = 0;
1475 	struct map_symbol *ms = NULL;
1476 
1477 	if (he->mem_info) {
1478 		addr = mem_info__daddr(he->mem_info)->addr;
1479 		ms = &mem_info__daddr(he->mem_info)->ms;
1480 	}
1481 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1482 }
1483 
1484 int64_t
1485 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1486 {
1487 	uint64_t l = 0, r = 0;
1488 
1489 	if (left->mem_info)
1490 		l = mem_info__iaddr(left->mem_info)->addr;
1491 	if (right->mem_info)
1492 		r = mem_info__iaddr(right->mem_info)->addr;
1493 
1494 	return (int64_t)(r - l);
1495 }
1496 
1497 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1498 				    size_t size, unsigned int width)
1499 {
1500 	uint64_t addr = 0;
1501 	struct map_symbol *ms = NULL;
1502 
1503 	if (he->mem_info) {
1504 		addr = mem_info__iaddr(he->mem_info)->addr;
1505 		ms   = &mem_info__iaddr(he->mem_info)->ms;
1506 	}
1507 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1508 }
1509 
1510 static int64_t
1511 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1512 {
1513 	struct map *map_l = NULL;
1514 	struct map *map_r = NULL;
1515 
1516 	if (left->mem_info)
1517 		map_l = mem_info__daddr(left->mem_info)->ms.map;
1518 	if (right->mem_info)
1519 		map_r = mem_info__daddr(right->mem_info)->ms.map;
1520 
1521 	return _sort__dso_cmp(map_l, map_r);
1522 }
1523 
1524 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1525 				    size_t size, unsigned int width)
1526 {
1527 	struct map *map = NULL;
1528 
1529 	if (he->mem_info)
1530 		map = mem_info__daddr(he->mem_info)->ms.map;
1531 
1532 	return _hist_entry__dso_snprintf(map, bf, size, width);
1533 }
1534 
1535 static int64_t
1536 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1537 {
1538 	union perf_mem_data_src data_src_l;
1539 	union perf_mem_data_src data_src_r;
1540 
1541 	if (left->mem_info)
1542 		data_src_l = *mem_info__data_src(left->mem_info);
1543 	else
1544 		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1545 
1546 	if (right->mem_info)
1547 		data_src_r = *mem_info__data_src(right->mem_info);
1548 	else
1549 		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1550 
1551 	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1552 }
1553 
1554 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1555 				    size_t size, unsigned int width)
1556 {
1557 	char out[10];
1558 
1559 	perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1560 	return repsep_snprintf(bf, size, "%.*s", width, out);
1561 }
1562 
1563 static int64_t
1564 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1565 {
1566 	union perf_mem_data_src data_src_l;
1567 	union perf_mem_data_src data_src_r;
1568 
1569 	if (left->mem_info)
1570 		data_src_l = *mem_info__data_src(left->mem_info);
1571 	else
1572 		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1573 
1574 	if (right->mem_info)
1575 		data_src_r = *mem_info__data_src(right->mem_info);
1576 	else
1577 		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1578 
1579 	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1580 }
1581 
1582 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1583 				    size_t size, unsigned int width)
1584 {
1585 	char out[64];
1586 
1587 	perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1588 	return repsep_snprintf(bf, size, "%-*s", width, out);
1589 }
1590 
1591 static int64_t
1592 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1593 {
1594 	union perf_mem_data_src data_src_l;
1595 	union perf_mem_data_src data_src_r;
1596 
1597 	if (left->mem_info)
1598 		data_src_l = *mem_info__data_src(left->mem_info);
1599 	else
1600 		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1601 
1602 	if (right->mem_info)
1603 		data_src_r = *mem_info__data_src(right->mem_info);
1604 	else
1605 		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1606 
1607 	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1608 }
1609 
1610 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1611 				    size_t size, unsigned int width)
1612 {
1613 	char out[64];
1614 
1615 	perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1616 	return repsep_snprintf(bf, size, "%-*s", width, out);
1617 }
1618 
1619 static int64_t
1620 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1621 {
1622 	union perf_mem_data_src data_src_l;
1623 	union perf_mem_data_src data_src_r;
1624 
1625 	if (left->mem_info)
1626 		data_src_l = *mem_info__data_src(left->mem_info);
1627 	else
1628 		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1629 
1630 	if (right->mem_info)
1631 		data_src_r = *mem_info__data_src(right->mem_info);
1632 	else
1633 		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1634 
1635 	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1636 }
1637 
1638 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1639 				    size_t size, unsigned int width)
1640 {
1641 	char out[64];
1642 
1643 	perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1644 	return repsep_snprintf(bf, size, "%-*s", width, out);
1645 }
1646 
1647 int64_t
1648 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1649 {
1650 	u64 l, r;
1651 	struct map *l_map, *r_map;
1652 	struct dso *l_dso, *r_dso;
1653 	int rc;
1654 
1655 	if (!left->mem_info)  return -1;
1656 	if (!right->mem_info) return 1;
1657 
1658 	/* group event types together */
1659 	if (left->cpumode > right->cpumode) return -1;
1660 	if (left->cpumode < right->cpumode) return 1;
1661 
1662 	l_map = mem_info__daddr(left->mem_info)->ms.map;
1663 	r_map = mem_info__daddr(right->mem_info)->ms.map;
1664 
1665 	/* if both are NULL, jump to sort on al_addr instead */
1666 	if (!l_map && !r_map)
1667 		goto addr;
1668 
1669 	if (!l_map) return -1;
1670 	if (!r_map) return 1;
1671 
1672 	l_dso = map__dso(l_map);
1673 	r_dso = map__dso(r_map);
1674 	rc = dso__cmp_id(l_dso, r_dso);
1675 	if (rc)
1676 		return rc;
1677 	/*
1678 	 * Addresses with no major/minor numbers are assumed to be
1679 	 * anonymous in userspace.  Sort those on pid then address.
1680 	 *
1681 	 * The kernel and non-zero major/minor mapped areas are
1682 	 * assumed to be unity mapped.  Sort those on address.
1683 	 */
1684 
1685 	if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1686 	    (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min &&
1687 	     !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) {
1688 		/* userspace anonymous */
1689 
1690 		if (thread__pid(left->thread) > thread__pid(right->thread))
1691 			return -1;
1692 		if (thread__pid(left->thread) < thread__pid(right->thread))
1693 			return 1;
1694 	}
1695 
1696 addr:
1697 	/* al_addr does all the right addr - start + offset calculations */
1698 	l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl);
1699 	r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl);
1700 
1701 	if (l > r) return -1;
1702 	if (l < r) return 1;
1703 
1704 	return 0;
1705 }
1706 
1707 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1708 					  size_t size, unsigned int width)
1709 {
1710 
1711 	uint64_t addr = 0;
1712 	struct map_symbol *ms = NULL;
1713 	char level = he->level;
1714 
1715 	if (he->mem_info) {
1716 		struct map *map = mem_info__daddr(he->mem_info)->ms.map;
1717 		struct dso *dso = map ? map__dso(map) : NULL;
1718 
1719 		addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl);
1720 		ms = &mem_info__daddr(he->mem_info)->ms;
1721 
1722 		/* print [s] for shared data mmaps */
1723 		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1724 		     map && !(map__prot(map) & PROT_EXEC) &&
1725 		     (map__flags(map) & MAP_SHARED) &&
1726 		     (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino ||
1727 		      dso__id(dso)->ino_generation))
1728 			level = 's';
1729 		else if (!map)
1730 			level = 'X';
1731 	}
1732 	return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1733 }
1734 
1735 struct sort_entry sort_mispredict = {
1736 	.se_header	= "Branch Mispredicted",
1737 	.se_cmp		= sort__mispredict_cmp,
1738 	.se_snprintf	= hist_entry__mispredict_snprintf,
1739 	.se_width_idx	= HISTC_MISPREDICT,
1740 };
1741 
1742 static int64_t
1743 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1744 {
1745 	return left->weight - right->weight;
1746 }
1747 
1748 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1749 				    size_t size, unsigned int width)
1750 {
1751 	return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1752 }
1753 
1754 struct sort_entry sort_local_weight = {
1755 	.se_header	= "Local Weight",
1756 	.se_cmp		= sort__weight_cmp,
1757 	.se_snprintf	= hist_entry__local_weight_snprintf,
1758 	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1759 };
1760 
1761 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1762 					      size_t size, unsigned int width)
1763 {
1764 	return repsep_snprintf(bf, size, "%-*llu", width,
1765 			       he->weight * he->stat.nr_events);
1766 }
1767 
1768 struct sort_entry sort_global_weight = {
1769 	.se_header	= "Weight",
1770 	.se_cmp		= sort__weight_cmp,
1771 	.se_snprintf	= hist_entry__global_weight_snprintf,
1772 	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1773 };
1774 
1775 static int64_t
1776 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1777 {
1778 	return left->ins_lat - right->ins_lat;
1779 }
1780 
1781 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1782 					      size_t size, unsigned int width)
1783 {
1784 	return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1785 }
1786 
1787 struct sort_entry sort_local_ins_lat = {
1788 	.se_header	= "Local INSTR Latency",
1789 	.se_cmp		= sort__ins_lat_cmp,
1790 	.se_snprintf	= hist_entry__local_ins_lat_snprintf,
1791 	.se_width_idx	= HISTC_LOCAL_INS_LAT,
1792 };
1793 
1794 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1795 					       size_t size, unsigned int width)
1796 {
1797 	return repsep_snprintf(bf, size, "%-*u", width,
1798 			       he->ins_lat * he->stat.nr_events);
1799 }
1800 
1801 struct sort_entry sort_global_ins_lat = {
1802 	.se_header	= "INSTR Latency",
1803 	.se_cmp		= sort__ins_lat_cmp,
1804 	.se_snprintf	= hist_entry__global_ins_lat_snprintf,
1805 	.se_width_idx	= HISTC_GLOBAL_INS_LAT,
1806 };
1807 
1808 static int64_t
1809 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1810 {
1811 	return left->p_stage_cyc - right->p_stage_cyc;
1812 }
1813 
1814 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1815 					size_t size, unsigned int width)
1816 {
1817 	return repsep_snprintf(bf, size, "%-*u", width,
1818 			he->p_stage_cyc * he->stat.nr_events);
1819 }
1820 
1821 
1822 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1823 					size_t size, unsigned int width)
1824 {
1825 	return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1826 }
1827 
1828 struct sort_entry sort_local_p_stage_cyc = {
1829 	.se_header      = "Local Pipeline Stage Cycle",
1830 	.se_cmp         = sort__p_stage_cyc_cmp,
1831 	.se_snprintf	= hist_entry__p_stage_cyc_snprintf,
1832 	.se_width_idx	= HISTC_LOCAL_P_STAGE_CYC,
1833 };
1834 
1835 struct sort_entry sort_global_p_stage_cyc = {
1836 	.se_header      = "Pipeline Stage Cycle",
1837 	.se_cmp         = sort__p_stage_cyc_cmp,
1838 	.se_snprintf    = hist_entry__global_p_stage_cyc_snprintf,
1839 	.se_width_idx   = HISTC_GLOBAL_P_STAGE_CYC,
1840 };
1841 
1842 struct sort_entry sort_mem_daddr_sym = {
1843 	.se_header	= "Data Symbol",
1844 	.se_cmp		= sort__daddr_cmp,
1845 	.se_snprintf	= hist_entry__daddr_snprintf,
1846 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1847 };
1848 
1849 struct sort_entry sort_mem_iaddr_sym = {
1850 	.se_header	= "Code Symbol",
1851 	.se_cmp		= sort__iaddr_cmp,
1852 	.se_snprintf	= hist_entry__iaddr_snprintf,
1853 	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
1854 };
1855 
1856 struct sort_entry sort_mem_daddr_dso = {
1857 	.se_header	= "Data Object",
1858 	.se_cmp		= sort__dso_daddr_cmp,
1859 	.se_snprintf	= hist_entry__dso_daddr_snprintf,
1860 	.se_width_idx	= HISTC_MEM_DADDR_DSO,
1861 };
1862 
1863 struct sort_entry sort_mem_locked = {
1864 	.se_header	= "Locked",
1865 	.se_cmp		= sort__locked_cmp,
1866 	.se_snprintf	= hist_entry__locked_snprintf,
1867 	.se_width_idx	= HISTC_MEM_LOCKED,
1868 };
1869 
1870 struct sort_entry sort_mem_tlb = {
1871 	.se_header	= "TLB access",
1872 	.se_cmp		= sort__tlb_cmp,
1873 	.se_snprintf	= hist_entry__tlb_snprintf,
1874 	.se_width_idx	= HISTC_MEM_TLB,
1875 };
1876 
1877 struct sort_entry sort_mem_lvl = {
1878 	.se_header	= "Memory access",
1879 	.se_cmp		= sort__lvl_cmp,
1880 	.se_snprintf	= hist_entry__lvl_snprintf,
1881 	.se_width_idx	= HISTC_MEM_LVL,
1882 };
1883 
1884 struct sort_entry sort_mem_snoop = {
1885 	.se_header	= "Snoop",
1886 	.se_cmp		= sort__snoop_cmp,
1887 	.se_snprintf	= hist_entry__snoop_snprintf,
1888 	.se_width_idx	= HISTC_MEM_SNOOP,
1889 };
1890 
1891 struct sort_entry sort_mem_dcacheline = {
1892 	.se_header	= "Data Cacheline",
1893 	.se_cmp		= sort__dcacheline_cmp,
1894 	.se_snprintf	= hist_entry__dcacheline_snprintf,
1895 	.se_width_idx	= HISTC_MEM_DCACHELINE,
1896 };
1897 
1898 static int64_t
1899 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1900 {
1901 	union perf_mem_data_src data_src_l;
1902 	union perf_mem_data_src data_src_r;
1903 
1904 	if (left->mem_info)
1905 		data_src_l = *mem_info__data_src(left->mem_info);
1906 	else
1907 		data_src_l.mem_blk = PERF_MEM_BLK_NA;
1908 
1909 	if (right->mem_info)
1910 		data_src_r = *mem_info__data_src(right->mem_info);
1911 	else
1912 		data_src_r.mem_blk = PERF_MEM_BLK_NA;
1913 
1914 	return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1915 }
1916 
1917 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1918 					size_t size, unsigned int width)
1919 {
1920 	char out[16];
1921 
1922 	perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1923 	return repsep_snprintf(bf, size, "%.*s", width, out);
1924 }
1925 
1926 struct sort_entry sort_mem_blocked = {
1927 	.se_header	= "Blocked",
1928 	.se_cmp		= sort__blocked_cmp,
1929 	.se_snprintf	= hist_entry__blocked_snprintf,
1930 	.se_width_idx	= HISTC_MEM_BLOCKED,
1931 };
1932 
1933 static int64_t
1934 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1935 {
1936 	uint64_t l = 0, r = 0;
1937 
1938 	if (left->mem_info)
1939 		l = mem_info__daddr(left->mem_info)->phys_addr;
1940 	if (right->mem_info)
1941 		r = mem_info__daddr(right->mem_info)->phys_addr;
1942 
1943 	return (int64_t)(r - l);
1944 }
1945 
1946 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1947 					   size_t size, unsigned int width)
1948 {
1949 	uint64_t addr = 0;
1950 	size_t ret = 0;
1951 	size_t len = BITS_PER_LONG / 4;
1952 
1953 	addr = mem_info__daddr(he->mem_info)->phys_addr;
1954 
1955 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1956 
1957 	ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1958 
1959 	ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1960 
1961 	if (ret > width)
1962 		bf[width] = '\0';
1963 
1964 	return width;
1965 }
1966 
1967 struct sort_entry sort_mem_phys_daddr = {
1968 	.se_header	= "Data Physical Address",
1969 	.se_cmp		= sort__phys_daddr_cmp,
1970 	.se_snprintf	= hist_entry__phys_daddr_snprintf,
1971 	.se_width_idx	= HISTC_MEM_PHYS_DADDR,
1972 };
1973 
1974 static int64_t
1975 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1976 {
1977 	uint64_t l = 0, r = 0;
1978 
1979 	if (left->mem_info)
1980 		l = mem_info__daddr(left->mem_info)->data_page_size;
1981 	if (right->mem_info)
1982 		r = mem_info__daddr(right->mem_info)->data_page_size;
1983 
1984 	return (int64_t)(r - l);
1985 }
1986 
1987 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1988 					  size_t size, unsigned int width)
1989 {
1990 	char str[PAGE_SIZE_NAME_LEN];
1991 
1992 	return repsep_snprintf(bf, size, "%-*s", width,
1993 			get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str));
1994 }
1995 
1996 struct sort_entry sort_mem_data_page_size = {
1997 	.se_header	= "Data Page Size",
1998 	.se_cmp		= sort__data_page_size_cmp,
1999 	.se_snprintf	= hist_entry__data_page_size_snprintf,
2000 	.se_width_idx	= HISTC_MEM_DATA_PAGE_SIZE,
2001 };
2002 
2003 static int64_t
2004 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
2005 {
2006 	uint64_t l = left->code_page_size;
2007 	uint64_t r = right->code_page_size;
2008 
2009 	return (int64_t)(r - l);
2010 }
2011 
2012 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
2013 					  size_t size, unsigned int width)
2014 {
2015 	char str[PAGE_SIZE_NAME_LEN];
2016 
2017 	return repsep_snprintf(bf, size, "%-*s", width,
2018 			       get_page_size_name(he->code_page_size, str));
2019 }
2020 
2021 struct sort_entry sort_code_page_size = {
2022 	.se_header	= "Code Page Size",
2023 	.se_cmp		= sort__code_page_size_cmp,
2024 	.se_snprintf	= hist_entry__code_page_size_snprintf,
2025 	.se_width_idx	= HISTC_CODE_PAGE_SIZE,
2026 };
2027 
2028 static int64_t
2029 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
2030 {
2031 	if (!left->branch_info || !right->branch_info)
2032 		return cmp_null(left->branch_info, right->branch_info);
2033 
2034 	return left->branch_info->flags.abort !=
2035 		right->branch_info->flags.abort;
2036 }
2037 
2038 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
2039 				    size_t size, unsigned int width)
2040 {
2041 	static const char *out = "N/A";
2042 
2043 	if (he->branch_info) {
2044 		if (he->branch_info->flags.abort)
2045 			out = "A";
2046 		else
2047 			out = ".";
2048 	}
2049 
2050 	return repsep_snprintf(bf, size, "%-*s", width, out);
2051 }
2052 
2053 struct sort_entry sort_abort = {
2054 	.se_header	= "Transaction abort",
2055 	.se_cmp		= sort__abort_cmp,
2056 	.se_snprintf	= hist_entry__abort_snprintf,
2057 	.se_width_idx	= HISTC_ABORT,
2058 };
2059 
2060 static int64_t
2061 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
2062 {
2063 	if (!left->branch_info || !right->branch_info)
2064 		return cmp_null(left->branch_info, right->branch_info);
2065 
2066 	return left->branch_info->flags.in_tx !=
2067 		right->branch_info->flags.in_tx;
2068 }
2069 
2070 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
2071 				    size_t size, unsigned int width)
2072 {
2073 	static const char *out = "N/A";
2074 
2075 	if (he->branch_info) {
2076 		if (he->branch_info->flags.in_tx)
2077 			out = "T";
2078 		else
2079 			out = ".";
2080 	}
2081 
2082 	return repsep_snprintf(bf, size, "%-*s", width, out);
2083 }
2084 
2085 struct sort_entry sort_in_tx = {
2086 	.se_header	= "Branch in transaction",
2087 	.se_cmp		= sort__in_tx_cmp,
2088 	.se_snprintf	= hist_entry__in_tx_snprintf,
2089 	.se_width_idx	= HISTC_IN_TX,
2090 };
2091 
2092 static int64_t
2093 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
2094 {
2095 	return left->transaction - right->transaction;
2096 }
2097 
2098 static inline char *add_str(char *p, const char *str)
2099 {
2100 	strcpy(p, str);
2101 	return p + strlen(str);
2102 }
2103 
2104 static struct txbit {
2105 	unsigned flag;
2106 	const char *name;
2107 	int skip_for_len;
2108 } txbits[] = {
2109 	{ PERF_TXN_ELISION,        "EL ",        0 },
2110 	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
2111 	{ PERF_TXN_SYNC,           "SYNC ",      1 },
2112 	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
2113 	{ PERF_TXN_RETRY,          "RETRY ",     0 },
2114 	{ PERF_TXN_CONFLICT,       "CON ",       0 },
2115 	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
2116 	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
2117 	{ 0, NULL, 0 }
2118 };
2119 
2120 int hist_entry__transaction_len(void)
2121 {
2122 	int i;
2123 	int len = 0;
2124 
2125 	for (i = 0; txbits[i].name; i++) {
2126 		if (!txbits[i].skip_for_len)
2127 			len += strlen(txbits[i].name);
2128 	}
2129 	len += 4; /* :XX<space> */
2130 	return len;
2131 }
2132 
2133 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
2134 					    size_t size, unsigned int width)
2135 {
2136 	u64 t = he->transaction;
2137 	char buf[128];
2138 	char *p = buf;
2139 	int i;
2140 
2141 	buf[0] = 0;
2142 	for (i = 0; txbits[i].name; i++)
2143 		if (txbits[i].flag & t)
2144 			p = add_str(p, txbits[i].name);
2145 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
2146 		p = add_str(p, "NEITHER ");
2147 	if (t & PERF_TXN_ABORT_MASK) {
2148 		sprintf(p, ":%" PRIx64,
2149 			(t & PERF_TXN_ABORT_MASK) >>
2150 			PERF_TXN_ABORT_SHIFT);
2151 		p += strlen(p);
2152 	}
2153 
2154 	return repsep_snprintf(bf, size, "%-*s", width, buf);
2155 }
2156 
2157 struct sort_entry sort_transaction = {
2158 	.se_header	= "Transaction                ",
2159 	.se_cmp		= sort__transaction_cmp,
2160 	.se_snprintf	= hist_entry__transaction_snprintf,
2161 	.se_width_idx	= HISTC_TRANSACTION,
2162 };
2163 
2164 /* --sort symbol_size */
2165 
2166 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2167 {
2168 	int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2169 	int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2170 
2171 	return size_l < size_r ? -1 :
2172 		size_l == size_r ? 0 : 1;
2173 }
2174 
2175 static int64_t
2176 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2177 {
2178 	return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2179 }
2180 
2181 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2182 					  size_t bf_size, unsigned int width)
2183 {
2184 	if (sym)
2185 		return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2186 
2187 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2188 }
2189 
2190 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2191 					 size_t size, unsigned int width)
2192 {
2193 	return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2194 }
2195 
2196 struct sort_entry sort_sym_size = {
2197 	.se_header	= "Symbol size",
2198 	.se_cmp		= sort__sym_size_cmp,
2199 	.se_snprintf	= hist_entry__sym_size_snprintf,
2200 	.se_width_idx	= HISTC_SYM_SIZE,
2201 };
2202 
2203 /* --sort dso_size */
2204 
2205 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2206 {
2207 	int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2208 	int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2209 
2210 	return size_l < size_r ? -1 :
2211 		size_l == size_r ? 0 : 1;
2212 }
2213 
2214 static int64_t
2215 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2216 {
2217 	return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2218 }
2219 
2220 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2221 					  size_t bf_size, unsigned int width)
2222 {
2223 	if (map && map__dso(map))
2224 		return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2225 
2226 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2227 }
2228 
2229 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2230 					 size_t size, unsigned int width)
2231 {
2232 	return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2233 }
2234 
2235 struct sort_entry sort_dso_size = {
2236 	.se_header	= "DSO size",
2237 	.se_cmp		= sort__dso_size_cmp,
2238 	.se_snprintf	= hist_entry__dso_size_snprintf,
2239 	.se_width_idx	= HISTC_DSO_SIZE,
2240 };
2241 
2242 /* --sort addr */
2243 
2244 static int64_t
2245 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2246 {
2247 	u64 left_ip = left->ip;
2248 	u64 right_ip = right->ip;
2249 	struct map *left_map = left->ms.map;
2250 	struct map *right_map = right->ms.map;
2251 
2252 	if (left_map)
2253 		left_ip = map__unmap_ip(left_map, left_ip);
2254 	if (right_map)
2255 		right_ip = map__unmap_ip(right_map, right_ip);
2256 
2257 	return _sort__addr_cmp(left_ip, right_ip);
2258 }
2259 
2260 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2261 				     size_t size, unsigned int width)
2262 {
2263 	u64 ip = he->ip;
2264 	struct map *map = he->ms.map;
2265 
2266 	if (map)
2267 		ip = map__unmap_ip(map, ip);
2268 
2269 	return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2270 }
2271 
2272 struct sort_entry sort_addr = {
2273 	.se_header	= "Address",
2274 	.se_cmp		= sort__addr_cmp,
2275 	.se_snprintf	= hist_entry__addr_snprintf,
2276 	.se_width_idx	= HISTC_ADDR,
2277 };
2278 
2279 /* --sort type */
2280 
2281 struct annotated_data_type unknown_type = {
2282 	.self = {
2283 		.type_name = (char *)"(unknown)",
2284 		.children = LIST_HEAD_INIT(unknown_type.self.children),
2285 	},
2286 };
2287 
2288 static int64_t
2289 sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
2290 {
2291 	return sort__addr_cmp(left, right);
2292 }
2293 
2294 static void sort__type_init(struct hist_entry *he)
2295 {
2296 	if (he->mem_type)
2297 		return;
2298 
2299 	he->mem_type = hist_entry__get_data_type(he);
2300 	if (he->mem_type == NULL) {
2301 		he->mem_type = &unknown_type;
2302 		he->mem_type_off = 0;
2303 	}
2304 }
2305 
2306 static int64_t
2307 sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
2308 {
2309 	struct annotated_data_type *left_type = left->mem_type;
2310 	struct annotated_data_type *right_type = right->mem_type;
2311 
2312 	if (!left_type) {
2313 		sort__type_init(left);
2314 		left_type = left->mem_type;
2315 	}
2316 
2317 	if (!right_type) {
2318 		sort__type_init(right);
2319 		right_type = right->mem_type;
2320 	}
2321 
2322 	return strcmp(left_type->self.type_name, right_type->self.type_name);
2323 }
2324 
2325 static int64_t
2326 sort__type_sort(struct hist_entry *left, struct hist_entry *right)
2327 {
2328 	return sort__type_collapse(left, right);
2329 }
2330 
2331 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
2332 				     size_t size, unsigned int width)
2333 {
2334 	return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
2335 }
2336 
2337 struct sort_entry sort_type = {
2338 	.se_header	= "Data Type",
2339 	.se_cmp		= sort__type_cmp,
2340 	.se_collapse	= sort__type_collapse,
2341 	.se_sort	= sort__type_sort,
2342 	.se_init	= sort__type_init,
2343 	.se_snprintf	= hist_entry__type_snprintf,
2344 	.se_width_idx	= HISTC_TYPE,
2345 };
2346 
2347 /* --sort typeoff */
2348 
2349 static int64_t
2350 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
2351 {
2352 	struct annotated_data_type *left_type = left->mem_type;
2353 	struct annotated_data_type *right_type = right->mem_type;
2354 	int64_t ret;
2355 
2356 	if (!left_type) {
2357 		sort__type_init(left);
2358 		left_type = left->mem_type;
2359 	}
2360 
2361 	if (!right_type) {
2362 		sort__type_init(right);
2363 		right_type = right->mem_type;
2364 	}
2365 
2366 	ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2367 	if (ret)
2368 		return ret;
2369 	return left->mem_type_off - right->mem_type_off;
2370 }
2371 
2372 static void fill_member_name(char *buf, size_t sz, struct annotated_member *m,
2373 			     int offset, bool first)
2374 {
2375 	struct annotated_member *child;
2376 
2377 	if (list_empty(&m->children))
2378 		return;
2379 
2380 	list_for_each_entry(child, &m->children, node) {
2381 		if (child->offset <= offset && offset < child->offset + child->size) {
2382 			int len = 0;
2383 
2384 			/* It can have anonymous struct/union members */
2385 			if (child->var_name) {
2386 				len = scnprintf(buf, sz, "%s%s",
2387 						first ? "" : ".", child->var_name);
2388 				first = false;
2389 			}
2390 
2391 			fill_member_name(buf + len, sz - len, child, offset, first);
2392 			return;
2393 		}
2394 	}
2395 }
2396 
2397 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
2398 				     size_t size, unsigned int width __maybe_unused)
2399 {
2400 	struct annotated_data_type *he_type = he->mem_type;
2401 	char buf[4096];
2402 
2403 	buf[0] = '\0';
2404 	if (list_empty(&he_type->self.children))
2405 		snprintf(buf, sizeof(buf), "no field");
2406 	else
2407 		fill_member_name(buf, sizeof(buf), &he_type->self,
2408 				 he->mem_type_off, true);
2409 	buf[4095] = '\0';
2410 
2411 	return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name,
2412 			       he->mem_type_off, buf);
2413 }
2414 
2415 struct sort_entry sort_type_offset = {
2416 	.se_header	= "Data Type Offset",
2417 	.se_cmp		= sort__type_cmp,
2418 	.se_collapse	= sort__typeoff_sort,
2419 	.se_sort	= sort__typeoff_sort,
2420 	.se_init	= sort__type_init,
2421 	.se_snprintf	= hist_entry__typeoff_snprintf,
2422 	.se_width_idx	= HISTC_TYPE_OFFSET,
2423 };
2424 
2425 /* --sort typecln */
2426 
2427 /* TODO: use actual value in the system */
2428 #define TYPE_CACHELINE_SIZE  64
2429 
2430 static int64_t
2431 sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
2432 {
2433 	struct annotated_data_type *left_type = left->mem_type;
2434 	struct annotated_data_type *right_type = right->mem_type;
2435 	int64_t left_cln, right_cln;
2436 	int64_t ret;
2437 
2438 	if (!left_type) {
2439 		sort__type_init(left);
2440 		left_type = left->mem_type;
2441 	}
2442 
2443 	if (!right_type) {
2444 		sort__type_init(right);
2445 		right_type = right->mem_type;
2446 	}
2447 
2448 	ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2449 	if (ret)
2450 		return ret;
2451 
2452 	left_cln = left->mem_type_off / TYPE_CACHELINE_SIZE;
2453 	right_cln = right->mem_type_off / TYPE_CACHELINE_SIZE;
2454 	return left_cln - right_cln;
2455 }
2456 
2457 static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf,
2458 				     size_t size, unsigned int width __maybe_unused)
2459 {
2460 	struct annotated_data_type *he_type = he->mem_type;
2461 
2462 	return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
2463 			       he->mem_type_off / TYPE_CACHELINE_SIZE);
2464 }
2465 
2466 struct sort_entry sort_type_cacheline = {
2467 	.se_header	= "Data Type Cacheline",
2468 	.se_cmp		= sort__type_cmp,
2469 	.se_collapse	= sort__typecln_sort,
2470 	.se_sort	= sort__typecln_sort,
2471 	.se_init	= sort__type_init,
2472 	.se_snprintf	= hist_entry__typecln_snprintf,
2473 	.se_width_idx	= HISTC_TYPE_CACHELINE,
2474 };
2475 
2476 
2477 struct sort_dimension {
2478 	const char		*name;
2479 	struct sort_entry	*entry;
2480 	int			taken;
2481 };
2482 
2483 int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2484 {
2485 	return 0;
2486 }
2487 
2488 const char * __weak arch_perf_header_entry(const char *se_header)
2489 {
2490 	return se_header;
2491 }
2492 
2493 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2494 {
2495 	sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2496 }
2497 
2498 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2499 
2500 static struct sort_dimension common_sort_dimensions[] = {
2501 	DIM(SORT_PID, "pid", sort_thread),
2502 	DIM(SORT_COMM, "comm", sort_comm),
2503 	DIM(SORT_DSO, "dso", sort_dso),
2504 	DIM(SORT_SYM, "symbol", sort_sym),
2505 	DIM(SORT_PARENT, "parent", sort_parent),
2506 	DIM(SORT_CPU, "cpu", sort_cpu),
2507 	DIM(SORT_SOCKET, "socket", sort_socket),
2508 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
2509 	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2510 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2511 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2512 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2513 #ifdef HAVE_LIBTRACEEVENT
2514 	DIM(SORT_TRACE, "trace", sort_trace),
2515 #endif
2516 	DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2517 	DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2518 	DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2519 	DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2520 	DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2521 	DIM(SORT_TIME, "time", sort_time),
2522 	DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2523 	DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2524 	DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2525 	DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2526 	DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2527 	DIM(SORT_ADDR, "addr", sort_addr),
2528 	DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2529 	DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2530 	DIM(SORT_SIMD, "simd", sort_simd),
2531 	DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
2532 	DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
2533 	DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
2534 	DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline),
2535 };
2536 
2537 #undef DIM
2538 
2539 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2540 
2541 static struct sort_dimension bstack_sort_dimensions[] = {
2542 	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2543 	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2544 	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2545 	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2546 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2547 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2548 	DIM(SORT_ABORT, "abort", sort_abort),
2549 	DIM(SORT_CYCLES, "cycles", sort_cycles),
2550 	DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2551 	DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2552 	DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2553 	DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2554 	DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2555 	DIM(SORT_CALLCHAIN_BRANCH_PREDICTED,
2556 		"callchain_branch_predicted",
2557 		sort_callchain_branch_predicted),
2558 	DIM(SORT_CALLCHAIN_BRANCH_ABORT,
2559 		"callchain_branch_abort",
2560 		sort_callchain_branch_abort),
2561 	DIM(SORT_CALLCHAIN_BRANCH_CYCLES,
2562 		"callchain_branch_cycles",
2563 		sort_callchain_branch_cycles)
2564 };
2565 
2566 #undef DIM
2567 
2568 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2569 
2570 static struct sort_dimension memory_sort_dimensions[] = {
2571 	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2572 	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2573 	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2574 	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2575 	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2576 	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2577 	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2578 	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2579 	DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2580 	DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2581 	DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2582 };
2583 
2584 #undef DIM
2585 
2586 struct hpp_dimension {
2587 	const char		*name;
2588 	struct perf_hpp_fmt	*fmt;
2589 	int			taken;
2590 };
2591 
2592 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2593 
2594 static struct hpp_dimension hpp_sort_dimensions[] = {
2595 	DIM(PERF_HPP__OVERHEAD, "overhead"),
2596 	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2597 	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2598 	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2599 	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2600 	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2601 	DIM(PERF_HPP__SAMPLES, "sample"),
2602 	DIM(PERF_HPP__PERIOD, "period"),
2603 	DIM(PERF_HPP__WEIGHT1, "weight1"),
2604 	DIM(PERF_HPP__WEIGHT2, "weight2"),
2605 	DIM(PERF_HPP__WEIGHT3, "weight3"),
2606 	/* aliases for weight_struct */
2607 	DIM(PERF_HPP__WEIGHT2, "ins_lat"),
2608 	DIM(PERF_HPP__WEIGHT3, "retire_lat"),
2609 	DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
2610 };
2611 
2612 #undef DIM
2613 
2614 struct hpp_sort_entry {
2615 	struct perf_hpp_fmt hpp;
2616 	struct sort_entry *se;
2617 };
2618 
2619 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2620 {
2621 	struct hpp_sort_entry *hse;
2622 
2623 	if (!perf_hpp__is_sort_entry(fmt))
2624 		return;
2625 
2626 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2627 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2628 }
2629 
2630 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2631 			      struct hists *hists, int line __maybe_unused,
2632 			      int *span __maybe_unused)
2633 {
2634 	struct hpp_sort_entry *hse;
2635 	size_t len = fmt->user_len;
2636 
2637 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2638 
2639 	if (!len)
2640 		len = hists__col_len(hists, hse->se->se_width_idx);
2641 
2642 	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2643 }
2644 
2645 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2646 			     struct perf_hpp *hpp __maybe_unused,
2647 			     struct hists *hists)
2648 {
2649 	struct hpp_sort_entry *hse;
2650 	size_t len = fmt->user_len;
2651 
2652 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2653 
2654 	if (!len)
2655 		len = hists__col_len(hists, hse->se->se_width_idx);
2656 
2657 	return len;
2658 }
2659 
2660 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2661 			     struct hist_entry *he)
2662 {
2663 	struct hpp_sort_entry *hse;
2664 	size_t len = fmt->user_len;
2665 
2666 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2667 
2668 	if (!len)
2669 		len = hists__col_len(he->hists, hse->se->se_width_idx);
2670 
2671 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2672 }
2673 
2674 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2675 			       struct hist_entry *a, struct hist_entry *b)
2676 {
2677 	struct hpp_sort_entry *hse;
2678 
2679 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2680 	return hse->se->se_cmp(a, b);
2681 }
2682 
2683 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2684 				    struct hist_entry *a, struct hist_entry *b)
2685 {
2686 	struct hpp_sort_entry *hse;
2687 	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2688 
2689 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2690 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2691 	return collapse_fn(a, b);
2692 }
2693 
2694 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2695 				struct hist_entry *a, struct hist_entry *b)
2696 {
2697 	struct hpp_sort_entry *hse;
2698 	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2699 
2700 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2701 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2702 	return sort_fn(a, b);
2703 }
2704 
2705 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2706 {
2707 	return format->header == __sort__hpp_header;
2708 }
2709 
2710 #define MK_SORT_ENTRY_CHK(key)					\
2711 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt)	\
2712 {								\
2713 	struct hpp_sort_entry *hse;				\
2714 								\
2715 	if (!perf_hpp__is_sort_entry(fmt))			\
2716 		return false;					\
2717 								\
2718 	hse = container_of(fmt, struct hpp_sort_entry, hpp);	\
2719 	return hse->se == &sort_ ## key ;			\
2720 }
2721 
2722 #ifdef HAVE_LIBTRACEEVENT
2723 MK_SORT_ENTRY_CHK(trace)
2724 #else
2725 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2726 {
2727 	return false;
2728 }
2729 #endif
2730 MK_SORT_ENTRY_CHK(srcline)
2731 MK_SORT_ENTRY_CHK(srcfile)
2732 MK_SORT_ENTRY_CHK(thread)
2733 MK_SORT_ENTRY_CHK(comm)
2734 MK_SORT_ENTRY_CHK(dso)
2735 MK_SORT_ENTRY_CHK(sym)
2736 
2737 
2738 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2739 {
2740 	struct hpp_sort_entry *hse_a;
2741 	struct hpp_sort_entry *hse_b;
2742 
2743 	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2744 		return false;
2745 
2746 	hse_a = container_of(a, struct hpp_sort_entry, hpp);
2747 	hse_b = container_of(b, struct hpp_sort_entry, hpp);
2748 
2749 	return hse_a->se == hse_b->se;
2750 }
2751 
2752 static void hse_free(struct perf_hpp_fmt *fmt)
2753 {
2754 	struct hpp_sort_entry *hse;
2755 
2756 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2757 	free(hse);
2758 }
2759 
2760 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2761 {
2762 	struct hpp_sort_entry *hse;
2763 
2764 	if (!perf_hpp__is_sort_entry(fmt))
2765 		return;
2766 
2767 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2768 
2769 	if (hse->se->se_init)
2770 		hse->se->se_init(he);
2771 }
2772 
2773 static struct hpp_sort_entry *
2774 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2775 {
2776 	struct hpp_sort_entry *hse;
2777 
2778 	hse = malloc(sizeof(*hse));
2779 	if (hse == NULL) {
2780 		pr_err("Memory allocation failed\n");
2781 		return NULL;
2782 	}
2783 
2784 	hse->se = sd->entry;
2785 	hse->hpp.name = sd->entry->se_header;
2786 	hse->hpp.header = __sort__hpp_header;
2787 	hse->hpp.width = __sort__hpp_width;
2788 	hse->hpp.entry = __sort__hpp_entry;
2789 	hse->hpp.color = NULL;
2790 
2791 	hse->hpp.cmp = __sort__hpp_cmp;
2792 	hse->hpp.collapse = __sort__hpp_collapse;
2793 	hse->hpp.sort = __sort__hpp_sort;
2794 	hse->hpp.equal = __sort__hpp_equal;
2795 	hse->hpp.free = hse_free;
2796 	hse->hpp.init = hse_init;
2797 
2798 	INIT_LIST_HEAD(&hse->hpp.list);
2799 	INIT_LIST_HEAD(&hse->hpp.sort_list);
2800 	hse->hpp.elide = false;
2801 	hse->hpp.len = 0;
2802 	hse->hpp.user_len = 0;
2803 	hse->hpp.level = level;
2804 
2805 	return hse;
2806 }
2807 
2808 static void hpp_free(struct perf_hpp_fmt *fmt)
2809 {
2810 	free(fmt);
2811 }
2812 
2813 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2814 						       int level)
2815 {
2816 	struct perf_hpp_fmt *fmt;
2817 
2818 	fmt = memdup(hd->fmt, sizeof(*fmt));
2819 	if (fmt) {
2820 		INIT_LIST_HEAD(&fmt->list);
2821 		INIT_LIST_HEAD(&fmt->sort_list);
2822 		fmt->free = hpp_free;
2823 		fmt->level = level;
2824 	}
2825 
2826 	return fmt;
2827 }
2828 
2829 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2830 {
2831 	struct perf_hpp_fmt *fmt;
2832 	struct hpp_sort_entry *hse;
2833 	int ret = -1;
2834 	int r;
2835 
2836 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2837 		if (!perf_hpp__is_sort_entry(fmt))
2838 			continue;
2839 
2840 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2841 		if (hse->se->se_filter == NULL)
2842 			continue;
2843 
2844 		/*
2845 		 * hist entry is filtered if any of sort key in the hpp list
2846 		 * is applied.  But it should skip non-matched filter types.
2847 		 */
2848 		r = hse->se->se_filter(he, type, arg);
2849 		if (r >= 0) {
2850 			if (ret < 0)
2851 				ret = 0;
2852 			ret |= r;
2853 		}
2854 	}
2855 
2856 	return ret;
2857 }
2858 
2859 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2860 					  struct perf_hpp_list *list,
2861 					  int level)
2862 {
2863 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2864 
2865 	if (hse == NULL)
2866 		return -1;
2867 
2868 	perf_hpp_list__register_sort_field(list, &hse->hpp);
2869 	return 0;
2870 }
2871 
2872 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2873 					    struct perf_hpp_list *list)
2874 {
2875 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2876 
2877 	if (hse == NULL)
2878 		return -1;
2879 
2880 	perf_hpp_list__column_register(list, &hse->hpp);
2881 	return 0;
2882 }
2883 
2884 #ifndef HAVE_LIBTRACEEVENT
2885 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2886 {
2887 	return false;
2888 }
2889 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2890 				     struct hists *hists __maybe_unused)
2891 {
2892 	return false;
2893 }
2894 #else
2895 struct hpp_dynamic_entry {
2896 	struct perf_hpp_fmt hpp;
2897 	struct evsel *evsel;
2898 	struct tep_format_field *field;
2899 	unsigned dynamic_len;
2900 	bool raw_trace;
2901 };
2902 
2903 static int hde_width(struct hpp_dynamic_entry *hde)
2904 {
2905 	if (!hde->hpp.len) {
2906 		int len = hde->dynamic_len;
2907 		int namelen = strlen(hde->field->name);
2908 		int fieldlen = hde->field->size;
2909 
2910 		if (namelen > len)
2911 			len = namelen;
2912 
2913 		if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2914 			/* length for print hex numbers */
2915 			fieldlen = hde->field->size * 2 + 2;
2916 		}
2917 		if (fieldlen > len)
2918 			len = fieldlen;
2919 
2920 		hde->hpp.len = len;
2921 	}
2922 	return hde->hpp.len;
2923 }
2924 
2925 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2926 			       struct hist_entry *he)
2927 {
2928 	char *str, *pos;
2929 	struct tep_format_field *field = hde->field;
2930 	size_t namelen;
2931 	bool last = false;
2932 
2933 	if (hde->raw_trace)
2934 		return;
2935 
2936 	/* parse pretty print result and update max length */
2937 	if (!he->trace_output)
2938 		he->trace_output = get_trace_output(he);
2939 
2940 	namelen = strlen(field->name);
2941 	str = he->trace_output;
2942 
2943 	while (str) {
2944 		pos = strchr(str, ' ');
2945 		if (pos == NULL) {
2946 			last = true;
2947 			pos = str + strlen(str);
2948 		}
2949 
2950 		if (!strncmp(str, field->name, namelen)) {
2951 			size_t len;
2952 
2953 			str += namelen + 1;
2954 			len = pos - str;
2955 
2956 			if (len > hde->dynamic_len)
2957 				hde->dynamic_len = len;
2958 			break;
2959 		}
2960 
2961 		if (last)
2962 			str = NULL;
2963 		else
2964 			str = pos + 1;
2965 	}
2966 }
2967 
2968 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2969 			      struct hists *hists __maybe_unused,
2970 			      int line __maybe_unused,
2971 			      int *span __maybe_unused)
2972 {
2973 	struct hpp_dynamic_entry *hde;
2974 	size_t len = fmt->user_len;
2975 
2976 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2977 
2978 	if (!len)
2979 		len = hde_width(hde);
2980 
2981 	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2982 }
2983 
2984 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2985 			     struct perf_hpp *hpp __maybe_unused,
2986 			     struct hists *hists __maybe_unused)
2987 {
2988 	struct hpp_dynamic_entry *hde;
2989 	size_t len = fmt->user_len;
2990 
2991 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2992 
2993 	if (!len)
2994 		len = hde_width(hde);
2995 
2996 	return len;
2997 }
2998 
2999 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
3000 {
3001 	struct hpp_dynamic_entry *hde;
3002 
3003 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3004 
3005 	return hists_to_evsel(hists) == hde->evsel;
3006 }
3007 
3008 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
3009 			     struct hist_entry *he)
3010 {
3011 	struct hpp_dynamic_entry *hde;
3012 	size_t len = fmt->user_len;
3013 	char *str, *pos;
3014 	struct tep_format_field *field;
3015 	size_t namelen;
3016 	bool last = false;
3017 	int ret;
3018 
3019 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3020 
3021 	if (!len)
3022 		len = hde_width(hde);
3023 
3024 	if (hde->raw_trace)
3025 		goto raw_field;
3026 
3027 	if (!he->trace_output)
3028 		he->trace_output = get_trace_output(he);
3029 
3030 	field = hde->field;
3031 	namelen = strlen(field->name);
3032 	str = he->trace_output;
3033 
3034 	while (str) {
3035 		pos = strchr(str, ' ');
3036 		if (pos == NULL) {
3037 			last = true;
3038 			pos = str + strlen(str);
3039 		}
3040 
3041 		if (!strncmp(str, field->name, namelen)) {
3042 			str += namelen + 1;
3043 			str = strndup(str, pos - str);
3044 
3045 			if (str == NULL)
3046 				return scnprintf(hpp->buf, hpp->size,
3047 						 "%*.*s", len, len, "ERROR");
3048 			break;
3049 		}
3050 
3051 		if (last)
3052 			str = NULL;
3053 		else
3054 			str = pos + 1;
3055 	}
3056 
3057 	if (str == NULL) {
3058 		struct trace_seq seq;
3059 raw_field:
3060 		trace_seq_init(&seq);
3061 		tep_print_field(&seq, he->raw_data, hde->field);
3062 		str = seq.buffer;
3063 	}
3064 
3065 	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
3066 	free(str);
3067 	return ret;
3068 }
3069 
3070 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
3071 			       struct hist_entry *a, struct hist_entry *b)
3072 {
3073 	struct hpp_dynamic_entry *hde;
3074 	struct tep_format_field *field;
3075 	unsigned offset, size;
3076 
3077 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3078 
3079 	field = hde->field;
3080 	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
3081 		unsigned long long dyn;
3082 
3083 		tep_read_number_field(field, a->raw_data, &dyn);
3084 		offset = dyn & 0xffff;
3085 		size = (dyn >> 16) & 0xffff;
3086 		if (tep_field_is_relative(field->flags))
3087 			offset += field->offset + field->size;
3088 		/* record max width for output */
3089 		if (size > hde->dynamic_len)
3090 			hde->dynamic_len = size;
3091 	} else {
3092 		offset = field->offset;
3093 		size = field->size;
3094 	}
3095 
3096 	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
3097 }
3098 
3099 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
3100 {
3101 	return fmt->cmp == __sort__hde_cmp;
3102 }
3103 
3104 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
3105 {
3106 	struct hpp_dynamic_entry *hde_a;
3107 	struct hpp_dynamic_entry *hde_b;
3108 
3109 	if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
3110 		return false;
3111 
3112 	hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
3113 	hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
3114 
3115 	return hde_a->field == hde_b->field;
3116 }
3117 
3118 static void hde_free(struct perf_hpp_fmt *fmt)
3119 {
3120 	struct hpp_dynamic_entry *hde;
3121 
3122 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3123 	free(hde);
3124 }
3125 
3126 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
3127 {
3128 	struct hpp_dynamic_entry *hde;
3129 
3130 	if (!perf_hpp__is_dynamic_entry(fmt))
3131 		return;
3132 
3133 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3134 	update_dynamic_len(hde, he);
3135 }
3136 
3137 static struct hpp_dynamic_entry *
3138 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
3139 		      int level)
3140 {
3141 	struct hpp_dynamic_entry *hde;
3142 
3143 	hde = malloc(sizeof(*hde));
3144 	if (hde == NULL) {
3145 		pr_debug("Memory allocation failed\n");
3146 		return NULL;
3147 	}
3148 
3149 	hde->evsel = evsel;
3150 	hde->field = field;
3151 	hde->dynamic_len = 0;
3152 
3153 	hde->hpp.name = field->name;
3154 	hde->hpp.header = __sort__hde_header;
3155 	hde->hpp.width  = __sort__hde_width;
3156 	hde->hpp.entry  = __sort__hde_entry;
3157 	hde->hpp.color  = NULL;
3158 
3159 	hde->hpp.init = __sort__hde_init;
3160 	hde->hpp.cmp = __sort__hde_cmp;
3161 	hde->hpp.collapse = __sort__hde_cmp;
3162 	hde->hpp.sort = __sort__hde_cmp;
3163 	hde->hpp.equal = __sort__hde_equal;
3164 	hde->hpp.free = hde_free;
3165 
3166 	INIT_LIST_HEAD(&hde->hpp.list);
3167 	INIT_LIST_HEAD(&hde->hpp.sort_list);
3168 	hde->hpp.elide = false;
3169 	hde->hpp.len = 0;
3170 	hde->hpp.user_len = 0;
3171 	hde->hpp.level = level;
3172 
3173 	return hde;
3174 }
3175 #endif /* HAVE_LIBTRACEEVENT */
3176 
3177 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
3178 {
3179 	struct perf_hpp_fmt *new_fmt = NULL;
3180 
3181 	if (perf_hpp__is_sort_entry(fmt)) {
3182 		struct hpp_sort_entry *hse, *new_hse;
3183 
3184 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3185 		new_hse = memdup(hse, sizeof(*hse));
3186 		if (new_hse)
3187 			new_fmt = &new_hse->hpp;
3188 #ifdef HAVE_LIBTRACEEVENT
3189 	} else if (perf_hpp__is_dynamic_entry(fmt)) {
3190 		struct hpp_dynamic_entry *hde, *new_hde;
3191 
3192 		hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3193 		new_hde = memdup(hde, sizeof(*hde));
3194 		if (new_hde)
3195 			new_fmt = &new_hde->hpp;
3196 #endif
3197 	} else {
3198 		new_fmt = memdup(fmt, sizeof(*fmt));
3199 	}
3200 
3201 	INIT_LIST_HEAD(&new_fmt->list);
3202 	INIT_LIST_HEAD(&new_fmt->sort_list);
3203 
3204 	return new_fmt;
3205 }
3206 
3207 static int parse_field_name(char *str, char **event, char **field, char **opt)
3208 {
3209 	char *event_name, *field_name, *opt_name;
3210 
3211 	event_name = str;
3212 	field_name = strchr(str, '.');
3213 
3214 	if (field_name) {
3215 		*field_name++ = '\0';
3216 	} else {
3217 		event_name = NULL;
3218 		field_name = str;
3219 	}
3220 
3221 	opt_name = strchr(field_name, '/');
3222 	if (opt_name)
3223 		*opt_name++ = '\0';
3224 
3225 	*event = event_name;
3226 	*field = field_name;
3227 	*opt   = opt_name;
3228 
3229 	return 0;
3230 }
3231 
3232 /* find match evsel using a given event name.  The event name can be:
3233  *   1. '%' + event index (e.g. '%1' for first event)
3234  *   2. full event name (e.g. sched:sched_switch)
3235  *   3. partial event name (should not contain ':')
3236  */
3237 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
3238 {
3239 	struct evsel *evsel = NULL;
3240 	struct evsel *pos;
3241 	bool full_name;
3242 
3243 	/* case 1 */
3244 	if (event_name[0] == '%') {
3245 		int nr = strtol(event_name+1, NULL, 0);
3246 
3247 		if (nr > evlist->core.nr_entries)
3248 			return NULL;
3249 
3250 		evsel = evlist__first(evlist);
3251 		while (--nr > 0)
3252 			evsel = evsel__next(evsel);
3253 
3254 		return evsel;
3255 	}
3256 
3257 	full_name = !!strchr(event_name, ':');
3258 	evlist__for_each_entry(evlist, pos) {
3259 		/* case 2 */
3260 		if (full_name && evsel__name_is(pos, event_name))
3261 			return pos;
3262 		/* case 3 */
3263 		if (!full_name && strstr(pos->name, event_name)) {
3264 			if (evsel) {
3265 				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
3266 					 event_name, evsel->name, pos->name);
3267 				return NULL;
3268 			}
3269 			evsel = pos;
3270 		}
3271 	}
3272 
3273 	return evsel;
3274 }
3275 
3276 #ifdef HAVE_LIBTRACEEVENT
3277 static int __dynamic_dimension__add(struct evsel *evsel,
3278 				    struct tep_format_field *field,
3279 				    bool raw_trace, int level)
3280 {
3281 	struct hpp_dynamic_entry *hde;
3282 
3283 	hde = __alloc_dynamic_entry(evsel, field, level);
3284 	if (hde == NULL)
3285 		return -ENOMEM;
3286 
3287 	hde->raw_trace = raw_trace;
3288 
3289 	perf_hpp__register_sort_field(&hde->hpp);
3290 	return 0;
3291 }
3292 
3293 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
3294 {
3295 	int ret;
3296 	struct tep_format_field *field;
3297 
3298 	field = evsel->tp_format->format.fields;
3299 	while (field) {
3300 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3301 		if (ret < 0)
3302 			return ret;
3303 
3304 		field = field->next;
3305 	}
3306 	return 0;
3307 }
3308 
3309 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
3310 				  int level)
3311 {
3312 	int ret;
3313 	struct evsel *evsel;
3314 
3315 	evlist__for_each_entry(evlist, evsel) {
3316 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3317 			continue;
3318 
3319 		ret = add_evsel_fields(evsel, raw_trace, level);
3320 		if (ret < 0)
3321 			return ret;
3322 	}
3323 	return 0;
3324 }
3325 
3326 static int add_all_matching_fields(struct evlist *evlist,
3327 				   char *field_name, bool raw_trace, int level)
3328 {
3329 	int ret = -ESRCH;
3330 	struct evsel *evsel;
3331 	struct tep_format_field *field;
3332 
3333 	evlist__for_each_entry(evlist, evsel) {
3334 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3335 			continue;
3336 
3337 		field = tep_find_any_field(evsel->tp_format, field_name);
3338 		if (field == NULL)
3339 			continue;
3340 
3341 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3342 		if (ret < 0)
3343 			break;
3344 	}
3345 	return ret;
3346 }
3347 #endif /* HAVE_LIBTRACEEVENT */
3348 
3349 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
3350 			     int level)
3351 {
3352 	char *str, *event_name, *field_name, *opt_name;
3353 	struct evsel *evsel;
3354 	bool raw_trace = symbol_conf.raw_trace;
3355 	int ret = 0;
3356 
3357 	if (evlist == NULL)
3358 		return -ENOENT;
3359 
3360 	str = strdup(tok);
3361 	if (str == NULL)
3362 		return -ENOMEM;
3363 
3364 	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3365 		ret = -EINVAL;
3366 		goto out;
3367 	}
3368 
3369 	if (opt_name) {
3370 		if (strcmp(opt_name, "raw")) {
3371 			pr_debug("unsupported field option %s\n", opt_name);
3372 			ret = -EINVAL;
3373 			goto out;
3374 		}
3375 		raw_trace = true;
3376 	}
3377 
3378 #ifdef HAVE_LIBTRACEEVENT
3379 	if (!strcmp(field_name, "trace_fields")) {
3380 		ret = add_all_dynamic_fields(evlist, raw_trace, level);
3381 		goto out;
3382 	}
3383 
3384 	if (event_name == NULL) {
3385 		ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3386 		goto out;
3387 	}
3388 #else
3389 	evlist__for_each_entry(evlist, evsel) {
3390 		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3391 			pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3392 			ret = -ENOTSUP;
3393 		}
3394 	}
3395 
3396 	if (ret) {
3397 		pr_err("\n");
3398 		goto out;
3399 	}
3400 #endif
3401 
3402 	evsel = find_evsel(evlist, event_name);
3403 	if (evsel == NULL) {
3404 		pr_debug("Cannot find event: %s\n", event_name);
3405 		ret = -ENOENT;
3406 		goto out;
3407 	}
3408 
3409 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3410 		pr_debug("%s is not a tracepoint event\n", event_name);
3411 		ret = -EINVAL;
3412 		goto out;
3413 	}
3414 
3415 #ifdef HAVE_LIBTRACEEVENT
3416 	if (!strcmp(field_name, "*")) {
3417 		ret = add_evsel_fields(evsel, raw_trace, level);
3418 	} else {
3419 		struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
3420 
3421 		if (field == NULL) {
3422 			pr_debug("Cannot find event field for %s.%s\n",
3423 				 event_name, field_name);
3424 			return -ENOENT;
3425 		}
3426 
3427 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3428 	}
3429 #else
3430 	(void)level;
3431 	(void)raw_trace;
3432 #endif /* HAVE_LIBTRACEEVENT */
3433 
3434 out:
3435 	free(str);
3436 	return ret;
3437 }
3438 
3439 static int __sort_dimension__add(struct sort_dimension *sd,
3440 				 struct perf_hpp_list *list,
3441 				 int level)
3442 {
3443 	if (sd->taken)
3444 		return 0;
3445 
3446 	if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3447 		return -1;
3448 
3449 	if (sd->entry->se_collapse)
3450 		list->need_collapse = 1;
3451 
3452 	sd->taken = 1;
3453 
3454 	return 0;
3455 }
3456 
3457 static int __hpp_dimension__add(struct hpp_dimension *hd,
3458 				struct perf_hpp_list *list,
3459 				int level)
3460 {
3461 	struct perf_hpp_fmt *fmt;
3462 
3463 	if (hd->taken)
3464 		return 0;
3465 
3466 	fmt = __hpp_dimension__alloc_hpp(hd, level);
3467 	if (!fmt)
3468 		return -1;
3469 
3470 	hd->taken = 1;
3471 	perf_hpp_list__register_sort_field(list, fmt);
3472 	return 0;
3473 }
3474 
3475 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3476 					struct sort_dimension *sd)
3477 {
3478 	if (sd->taken)
3479 		return 0;
3480 
3481 	if (__sort_dimension__add_hpp_output(sd, list) < 0)
3482 		return -1;
3483 
3484 	sd->taken = 1;
3485 	return 0;
3486 }
3487 
3488 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3489 				       struct hpp_dimension *hd)
3490 {
3491 	struct perf_hpp_fmt *fmt;
3492 
3493 	if (hd->taken)
3494 		return 0;
3495 
3496 	fmt = __hpp_dimension__alloc_hpp(hd, 0);
3497 	if (!fmt)
3498 		return -1;
3499 
3500 	hd->taken = 1;
3501 	perf_hpp_list__column_register(list, fmt);
3502 	return 0;
3503 }
3504 
3505 int hpp_dimension__add_output(unsigned col)
3506 {
3507 	BUG_ON(col >= PERF_HPP__MAX_INDEX);
3508 	return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
3509 }
3510 
3511 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3512 			struct evlist *evlist,
3513 			int level)
3514 {
3515 	unsigned int i, j;
3516 
3517 	/*
3518 	 * Check to see if there are any arch specific
3519 	 * sort dimensions not applicable for the current
3520 	 * architecture. If so, Skip that sort key since
3521 	 * we don't want to display it in the output fields.
3522 	 */
3523 	for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3524 		if (!strcmp(arch_specific_sort_keys[j], tok) &&
3525 				!arch_support_sort_key(tok)) {
3526 			return 0;
3527 		}
3528 	}
3529 
3530 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3531 		struct sort_dimension *sd = &common_sort_dimensions[i];
3532 
3533 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3534 			continue;
3535 
3536 		for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3537 			if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3538 				sort_dimension_add_dynamic_header(sd);
3539 		}
3540 
3541 		if (sd->entry == &sort_parent && parent_pattern) {
3542 			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3543 			if (ret) {
3544 				char err[BUFSIZ];
3545 
3546 				regerror(ret, &parent_regex, err, sizeof(err));
3547 				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3548 				return -EINVAL;
3549 			}
3550 			list->parent = 1;
3551 		} else if (sd->entry == &sort_sym) {
3552 			list->sym = 1;
3553 			/*
3554 			 * perf diff displays the performance difference amongst
3555 			 * two or more perf.data files. Those files could come
3556 			 * from different binaries. So we should not compare
3557 			 * their ips, but the name of symbol.
3558 			 */
3559 			if (sort__mode == SORT_MODE__DIFF)
3560 				sd->entry->se_collapse = sort__sym_sort;
3561 
3562 		} else if (sd->entry == &sort_dso) {
3563 			list->dso = 1;
3564 		} else if (sd->entry == &sort_socket) {
3565 			list->socket = 1;
3566 		} else if (sd->entry == &sort_thread) {
3567 			list->thread = 1;
3568 		} else if (sd->entry == &sort_comm) {
3569 			list->comm = 1;
3570 		} else if (sd->entry == &sort_type_offset) {
3571 			symbol_conf.annotate_data_member = true;
3572 		}
3573 
3574 		return __sort_dimension__add(sd, list, level);
3575 	}
3576 
3577 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3578 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3579 
3580 		if (strncasecmp(tok, hd->name, strlen(tok)))
3581 			continue;
3582 
3583 		return __hpp_dimension__add(hd, list, level);
3584 	}
3585 
3586 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3587 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3588 
3589 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3590 			continue;
3591 
3592 		if ((sort__mode != SORT_MODE__BRANCH) &&
3593 			strncasecmp(tok, "callchain_branch_predicted",
3594 				    strlen(tok)) &&
3595 			strncasecmp(tok, "callchain_branch_abort",
3596 				    strlen(tok)) &&
3597 			strncasecmp(tok, "callchain_branch_cycles",
3598 				    strlen(tok)))
3599 			return -EINVAL;
3600 
3601 		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3602 			list->sym = 1;
3603 
3604 		__sort_dimension__add(sd, list, level);
3605 		return 0;
3606 	}
3607 
3608 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3609 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3610 
3611 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3612 			continue;
3613 
3614 		if (sort__mode != SORT_MODE__MEMORY)
3615 			return -EINVAL;
3616 
3617 		if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3618 			return -EINVAL;
3619 
3620 		if (sd->entry == &sort_mem_daddr_sym)
3621 			list->sym = 1;
3622 
3623 		__sort_dimension__add(sd, list, level);
3624 		return 0;
3625 	}
3626 
3627 	if (!add_dynamic_entry(evlist, tok, level))
3628 		return 0;
3629 
3630 	return -ESRCH;
3631 }
3632 
3633 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3634 			   struct evlist *evlist)
3635 {
3636 	char *tmp, *tok;
3637 	int ret = 0;
3638 	int level = 0;
3639 	int next_level = 1;
3640 	bool in_group = false;
3641 
3642 	do {
3643 		tok = str;
3644 		tmp = strpbrk(str, "{}, ");
3645 		if (tmp) {
3646 			if (in_group)
3647 				next_level = level;
3648 			else
3649 				next_level = level + 1;
3650 
3651 			if (*tmp == '{')
3652 				in_group = true;
3653 			else if (*tmp == '}')
3654 				in_group = false;
3655 
3656 			*tmp = '\0';
3657 			str = tmp + 1;
3658 		}
3659 
3660 		if (*tok) {
3661 			ret = sort_dimension__add(list, tok, evlist, level);
3662 			if (ret == -EINVAL) {
3663 				if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3664 					ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3665 				else
3666 					ui__error("Invalid --sort key: `%s'", tok);
3667 				break;
3668 			} else if (ret == -ESRCH) {
3669 				ui__error("Unknown --sort key: `%s'", tok);
3670 				break;
3671 			}
3672 		}
3673 
3674 		level = next_level;
3675 	} while (tmp);
3676 
3677 	return ret;
3678 }
3679 
3680 static const char *get_default_sort_order(struct evlist *evlist)
3681 {
3682 	const char *default_sort_orders[] = {
3683 		default_sort_order,
3684 		default_branch_sort_order,
3685 		default_mem_sort_order,
3686 		default_top_sort_order,
3687 		default_diff_sort_order,
3688 		default_tracepoint_sort_order,
3689 	};
3690 	bool use_trace = true;
3691 	struct evsel *evsel;
3692 
3693 	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3694 
3695 	if (evlist == NULL || evlist__empty(evlist))
3696 		goto out_no_evlist;
3697 
3698 	evlist__for_each_entry(evlist, evsel) {
3699 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3700 			use_trace = false;
3701 			break;
3702 		}
3703 	}
3704 
3705 	if (use_trace) {
3706 		sort__mode = SORT_MODE__TRACEPOINT;
3707 		if (symbol_conf.raw_trace)
3708 			return "trace_fields";
3709 	}
3710 out_no_evlist:
3711 	return default_sort_orders[sort__mode];
3712 }
3713 
3714 static int setup_sort_order(struct evlist *evlist)
3715 {
3716 	char *new_sort_order;
3717 
3718 	/*
3719 	 * Append '+'-prefixed sort order to the default sort
3720 	 * order string.
3721 	 */
3722 	if (!sort_order || is_strict_order(sort_order))
3723 		return 0;
3724 
3725 	if (sort_order[1] == '\0') {
3726 		ui__error("Invalid --sort key: `+'");
3727 		return -EINVAL;
3728 	}
3729 
3730 	/*
3731 	 * We allocate new sort_order string, but we never free it,
3732 	 * because it's checked over the rest of the code.
3733 	 */
3734 	if (asprintf(&new_sort_order, "%s,%s",
3735 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
3736 		pr_err("Not enough memory to set up --sort");
3737 		return -ENOMEM;
3738 	}
3739 
3740 	sort_order = new_sort_order;
3741 	return 0;
3742 }
3743 
3744 /*
3745  * Adds 'pre,' prefix into 'str' is 'pre' is
3746  * not already part of 'str'.
3747  */
3748 static char *prefix_if_not_in(const char *pre, char *str)
3749 {
3750 	char *n;
3751 
3752 	if (!str || strstr(str, pre))
3753 		return str;
3754 
3755 	if (asprintf(&n, "%s,%s", pre, str) < 0)
3756 		n = NULL;
3757 
3758 	free(str);
3759 	return n;
3760 }
3761 
3762 static char *setup_overhead(char *keys)
3763 {
3764 	if (sort__mode == SORT_MODE__DIFF)
3765 		return keys;
3766 
3767 	keys = prefix_if_not_in("overhead", keys);
3768 
3769 	if (symbol_conf.cumulate_callchain)
3770 		keys = prefix_if_not_in("overhead_children", keys);
3771 
3772 	return keys;
3773 }
3774 
3775 static int __setup_sorting(struct evlist *evlist)
3776 {
3777 	char *str;
3778 	const char *sort_keys;
3779 	int ret = 0;
3780 
3781 	ret = setup_sort_order(evlist);
3782 	if (ret)
3783 		return ret;
3784 
3785 	sort_keys = sort_order;
3786 	if (sort_keys == NULL) {
3787 		if (is_strict_order(field_order)) {
3788 			/*
3789 			 * If user specified field order but no sort order,
3790 			 * we'll honor it and not add default sort orders.
3791 			 */
3792 			return 0;
3793 		}
3794 
3795 		sort_keys = get_default_sort_order(evlist);
3796 	}
3797 
3798 	str = strdup(sort_keys);
3799 	if (str == NULL) {
3800 		pr_err("Not enough memory to setup sort keys");
3801 		return -ENOMEM;
3802 	}
3803 
3804 	/*
3805 	 * Prepend overhead fields for backward compatibility.
3806 	 */
3807 	if (!is_strict_order(field_order)) {
3808 		str = setup_overhead(str);
3809 		if (str == NULL) {
3810 			pr_err("Not enough memory to setup overhead keys");
3811 			return -ENOMEM;
3812 		}
3813 	}
3814 
3815 	ret = setup_sort_list(&perf_hpp_list, str, evlist);
3816 
3817 	free(str);
3818 	return ret;
3819 }
3820 
3821 void perf_hpp__set_elide(int idx, bool elide)
3822 {
3823 	struct perf_hpp_fmt *fmt;
3824 	struct hpp_sort_entry *hse;
3825 
3826 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3827 		if (!perf_hpp__is_sort_entry(fmt))
3828 			continue;
3829 
3830 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3831 		if (hse->se->se_width_idx == idx) {
3832 			fmt->elide = elide;
3833 			break;
3834 		}
3835 	}
3836 }
3837 
3838 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3839 {
3840 	if (list && strlist__nr_entries(list) == 1) {
3841 		if (fp != NULL)
3842 			fprintf(fp, "# %s: %s\n", list_name,
3843 				strlist__entry(list, 0)->s);
3844 		return true;
3845 	}
3846 	return false;
3847 }
3848 
3849 static bool get_elide(int idx, FILE *output)
3850 {
3851 	switch (idx) {
3852 	case HISTC_SYMBOL:
3853 		return __get_elide(symbol_conf.sym_list, "symbol", output);
3854 	case HISTC_DSO:
3855 		return __get_elide(symbol_conf.dso_list, "dso", output);
3856 	case HISTC_COMM:
3857 		return __get_elide(symbol_conf.comm_list, "comm", output);
3858 	default:
3859 		break;
3860 	}
3861 
3862 	if (sort__mode != SORT_MODE__BRANCH)
3863 		return false;
3864 
3865 	switch (idx) {
3866 	case HISTC_SYMBOL_FROM:
3867 		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3868 	case HISTC_SYMBOL_TO:
3869 		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3870 	case HISTC_DSO_FROM:
3871 		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3872 	case HISTC_DSO_TO:
3873 		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3874 	case HISTC_ADDR_FROM:
3875 		return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3876 	case HISTC_ADDR_TO:
3877 		return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3878 	default:
3879 		break;
3880 	}
3881 
3882 	return false;
3883 }
3884 
3885 void sort__setup_elide(FILE *output)
3886 {
3887 	struct perf_hpp_fmt *fmt;
3888 	struct hpp_sort_entry *hse;
3889 
3890 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3891 		if (!perf_hpp__is_sort_entry(fmt))
3892 			continue;
3893 
3894 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3895 		fmt->elide = get_elide(hse->se->se_width_idx, output);
3896 	}
3897 
3898 	/*
3899 	 * It makes no sense to elide all of sort entries.
3900 	 * Just revert them to show up again.
3901 	 */
3902 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3903 		if (!perf_hpp__is_sort_entry(fmt))
3904 			continue;
3905 
3906 		if (!fmt->elide)
3907 			return;
3908 	}
3909 
3910 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3911 		if (!perf_hpp__is_sort_entry(fmt))
3912 			continue;
3913 
3914 		fmt->elide = false;
3915 	}
3916 }
3917 
3918 int output_field_add(struct perf_hpp_list *list, const char *tok)
3919 {
3920 	unsigned int i;
3921 
3922 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3923 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3924 
3925 		if (strncasecmp(tok, hd->name, strlen(tok)))
3926 			continue;
3927 
3928 		if (!strcasecmp(tok, "weight"))
3929 			ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
3930 
3931 		return __hpp_dimension__add_output(list, hd);
3932 	}
3933 
3934 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3935 		struct sort_dimension *sd = &common_sort_dimensions[i];
3936 
3937 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3938 			continue;
3939 
3940 		return __sort_dimension__add_output(list, sd);
3941 	}
3942 
3943 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3944 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3945 
3946 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3947 			continue;
3948 
3949 		if (sort__mode != SORT_MODE__BRANCH)
3950 			return -EINVAL;
3951 
3952 		return __sort_dimension__add_output(list, sd);
3953 	}
3954 
3955 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3956 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3957 
3958 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3959 			continue;
3960 
3961 		if (sort__mode != SORT_MODE__MEMORY)
3962 			return -EINVAL;
3963 
3964 		return __sort_dimension__add_output(list, sd);
3965 	}
3966 
3967 	return -ESRCH;
3968 }
3969 
3970 static int setup_output_list(struct perf_hpp_list *list, char *str)
3971 {
3972 	char *tmp, *tok;
3973 	int ret = 0;
3974 
3975 	for (tok = strtok_r(str, ", ", &tmp);
3976 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
3977 		ret = output_field_add(list, tok);
3978 		if (ret == -EINVAL) {
3979 			ui__error("Invalid --fields key: `%s'", tok);
3980 			break;
3981 		} else if (ret == -ESRCH) {
3982 			ui__error("Unknown --fields key: `%s'", tok);
3983 			break;
3984 		}
3985 	}
3986 
3987 	return ret;
3988 }
3989 
3990 void reset_dimensions(void)
3991 {
3992 	unsigned int i;
3993 
3994 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3995 		common_sort_dimensions[i].taken = 0;
3996 
3997 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3998 		hpp_sort_dimensions[i].taken = 0;
3999 
4000 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
4001 		bstack_sort_dimensions[i].taken = 0;
4002 
4003 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
4004 		memory_sort_dimensions[i].taken = 0;
4005 }
4006 
4007 bool is_strict_order(const char *order)
4008 {
4009 	return order && (*order != '+');
4010 }
4011 
4012 static int __setup_output_field(void)
4013 {
4014 	char *str, *strp;
4015 	int ret = -EINVAL;
4016 
4017 	if (field_order == NULL)
4018 		return 0;
4019 
4020 	strp = str = strdup(field_order);
4021 	if (str == NULL) {
4022 		pr_err("Not enough memory to setup output fields");
4023 		return -ENOMEM;
4024 	}
4025 
4026 	if (!is_strict_order(field_order))
4027 		strp++;
4028 
4029 	if (!strlen(strp)) {
4030 		ui__error("Invalid --fields key: `+'");
4031 		goto out;
4032 	}
4033 
4034 	ret = setup_output_list(&perf_hpp_list, strp);
4035 
4036 out:
4037 	free(str);
4038 	return ret;
4039 }
4040 
4041 int setup_sorting(struct evlist *evlist)
4042 {
4043 	int err;
4044 
4045 	err = __setup_sorting(evlist);
4046 	if (err < 0)
4047 		return err;
4048 
4049 	if (parent_pattern != default_parent_pattern) {
4050 		err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
4051 		if (err < 0)
4052 			return err;
4053 	}
4054 
4055 	reset_dimensions();
4056 
4057 	/*
4058 	 * perf diff doesn't use default hpp output fields.
4059 	 */
4060 	if (sort__mode != SORT_MODE__DIFF)
4061 		perf_hpp__init();
4062 
4063 	err = __setup_output_field();
4064 	if (err < 0)
4065 		return err;
4066 
4067 	/* copy sort keys to output fields */
4068 	perf_hpp__setup_output_field(&perf_hpp_list);
4069 	/* and then copy output fields to sort keys */
4070 	perf_hpp__append_sort_keys(&perf_hpp_list);
4071 
4072 	/* setup hists-specific output fields */
4073 	if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
4074 		return -1;
4075 
4076 	return 0;
4077 }
4078 
4079 void reset_output_field(void)
4080 {
4081 	perf_hpp_list.need_collapse = 0;
4082 	perf_hpp_list.parent = 0;
4083 	perf_hpp_list.sym = 0;
4084 	perf_hpp_list.dso = 0;
4085 
4086 	field_order = NULL;
4087 	sort_order = NULL;
4088 
4089 	reset_dimensions();
4090 	perf_hpp__reset_output_field(&perf_hpp_list);
4091 }
4092 
4093 #define INDENT (3*8 + 1)
4094 
4095 static void add_key(struct strbuf *sb, const char *str, int *llen)
4096 {
4097 	if (!str)
4098 		return;
4099 
4100 	if (*llen >= 75) {
4101 		strbuf_addstr(sb, "\n\t\t\t ");
4102 		*llen = INDENT;
4103 	}
4104 	strbuf_addf(sb, " %s", str);
4105 	*llen += strlen(str) + 1;
4106 }
4107 
4108 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
4109 			    int *llen)
4110 {
4111 	int i;
4112 
4113 	for (i = 0; i < n; i++)
4114 		add_key(sb, s[i].name, llen);
4115 }
4116 
4117 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
4118 				int *llen)
4119 {
4120 	int i;
4121 
4122 	for (i = 0; i < n; i++)
4123 		add_key(sb, s[i].name, llen);
4124 }
4125 
4126 char *sort_help(const char *prefix, enum sort_mode mode)
4127 {
4128 	struct strbuf sb;
4129 	char *s;
4130 	int len = strlen(prefix) + INDENT;
4131 
4132 	strbuf_init(&sb, 300);
4133 	strbuf_addstr(&sb, prefix);
4134 	add_hpp_sort_string(&sb, hpp_sort_dimensions,
4135 			    ARRAY_SIZE(hpp_sort_dimensions), &len);
4136 	add_sort_string(&sb, common_sort_dimensions,
4137 			    ARRAY_SIZE(common_sort_dimensions), &len);
4138 	if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH)
4139 		add_sort_string(&sb, bstack_sort_dimensions,
4140 				ARRAY_SIZE(bstack_sort_dimensions), &len);
4141 	if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY)
4142 		add_sort_string(&sb, memory_sort_dimensions,
4143 				ARRAY_SIZE(memory_sort_dimensions), &len);
4144 	s = strbuf_detach(&sb, NULL);
4145 	strbuf_release(&sb);
4146 	return s;
4147 }
4148