xref: /linux/tools/perf/util/sort.c (revision 0f648fc245c316d799f853d7ab97f2bfef68d7dd)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <ctype.h>
3 #include <errno.h>
4 #include <inttypes.h>
5 #include <regex.h>
6 #include <stdlib.h>
7 #include <linux/mman.h>
8 #include <linux/time64.h>
9 #include "debug.h"
10 #include "dso.h"
11 #include "sort.h"
12 #include "hist.h"
13 #include "cacheline.h"
14 #include "comm.h"
15 #include "map.h"
16 #include "maps.h"
17 #include "symbol.h"
18 #include "map_symbol.h"
19 #include "branch.h"
20 #include "thread.h"
21 #include "evsel.h"
22 #include "evlist.h"
23 #include "srcline.h"
24 #include "strlist.h"
25 #include "strbuf.h"
26 #include "mem-events.h"
27 #include "mem-info.h"
28 #include "annotate.h"
29 #include "annotate-data.h"
30 #include "event.h"
31 #include "time-utils.h"
32 #include "cgroup.h"
33 #include "machine.h"
34 #include "session.h"
35 #include "trace-event.h"
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 
39 #ifdef HAVE_LIBTRACEEVENT
40 #include <event-parse.h>
41 #endif
42 
43 regex_t		parent_regex;
44 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
45 const char	*parent_pattern = default_parent_pattern;
46 const char	*default_sort_order = "comm,dso,symbol";
47 static const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
48 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
49 static const char	default_top_sort_order[] = "dso,symbol";
50 static const char	default_diff_sort_order[] = "dso,symbol";
51 static const char	default_tracepoint_sort_order[] = "trace";
52 const char	*sort_order;
53 const char	*field_order;
54 regex_t		ignore_callees_regex;
55 int		have_ignore_callees = 0;
56 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
57 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
58 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
59 
60 /*
61  * Some architectures have Adjacent Cacheline Prefetch feature, which
62  * behaves like the cacheline size is doubled. Enable this flag to
63  * check things in double cacheline granularity.
64  */
65 bool chk_double_cl;
66 
67 /*
68  * Replaces all occurrences of a char used with the:
69  *
70  * -t, --field-separator
71  *
72  * option, that uses a special separator character and don't pad with spaces,
73  * replacing all occurrences of this separator in symbol names (and other
74  * output) with a '.' character, that thus it's the only non valid separator.
75 */
76 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
77 {
78 	int n;
79 	va_list ap;
80 
81 	va_start(ap, fmt);
82 	n = vsnprintf(bf, size, fmt, ap);
83 	if (symbol_conf.field_sep && n > 0) {
84 		char *sep = bf;
85 
86 		while (1) {
87 			sep = strchr(sep, *symbol_conf.field_sep);
88 			if (sep == NULL)
89 				break;
90 			*sep = '.';
91 		}
92 	}
93 	va_end(ap);
94 
95 	if (n >= (int)size)
96 		return size - 1;
97 	return n;
98 }
99 
100 static int64_t cmp_null(const void *l, const void *r)
101 {
102 	if (!l && !r)
103 		return 0;
104 	else if (!l)
105 		return -1;
106 	else
107 		return 1;
108 }
109 
110 /* --sort pid */
111 
112 static int64_t
113 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
114 {
115 	return thread__tid(right->thread) - thread__tid(left->thread);
116 }
117 
118 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
119 				       size_t size, unsigned int width)
120 {
121 	const char *comm = thread__comm_str(he->thread);
122 
123 	width = max(7U, width) - 8;
124 	return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
125 			       width, width, comm ?: "");
126 }
127 
128 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
129 {
130 	const struct thread *th = arg;
131 
132 	if (type != HIST_FILTER__THREAD)
133 		return -1;
134 
135 	return th && !RC_CHK_EQUAL(he->thread, th);
136 }
137 
138 struct sort_entry sort_thread = {
139 	.se_header	= "    Pid:Command",
140 	.se_cmp		= sort__thread_cmp,
141 	.se_snprintf	= hist_entry__thread_snprintf,
142 	.se_filter	= hist_entry__thread_filter,
143 	.se_width_idx	= HISTC_THREAD,
144 };
145 
146 /* --sort tgid */
147 
148 static int64_t
149 sort__tgid_cmp(struct hist_entry *left, struct hist_entry *right)
150 {
151 	return thread__pid(right->thread) - thread__pid(left->thread);
152 }
153 
154 static int hist_entry__tgid_snprintf(struct hist_entry *he, char *bf,
155 				       size_t size, unsigned int width)
156 {
157 	int tgid = thread__pid(he->thread);
158 	const char *comm = NULL;
159 
160 	/* display comm of the thread-group leader */
161 	if (thread__pid(he->thread) == thread__tid(he->thread)) {
162 		comm = thread__comm_str(he->thread);
163 	} else {
164 		struct maps *maps = thread__maps(he->thread);
165 		struct thread *leader = machine__find_thread(maps__machine(maps),
166 							     tgid, tgid);
167 		if (leader) {
168 			comm = thread__comm_str(leader);
169 			thread__put(leader);
170 		}
171 	}
172 	width = max(7U, width) - 8;
173 	return repsep_snprintf(bf, size, "%7d:%-*.*s", tgid, width, width, comm ?: "");
174 }
175 
176 static struct sort_entry sort_tgid = {
177 	.se_header	= "   Tgid:Command",
178 	.se_cmp		= sort__tgid_cmp,
179 	.se_snprintf	= hist_entry__tgid_snprintf,
180 	.se_width_idx	= HISTC_TGID,
181 };
182 
183 /* --sort simd */
184 
185 static int64_t
186 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
187 {
188 	if (left->simd_flags.arch != right->simd_flags.arch)
189 		return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
190 
191 	return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
192 }
193 
194 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
195 {
196 	u64 arch = simd_flags->arch;
197 
198 	if (arch == SIMD_OP_FLAGS_ARCH_SVE)
199 		return "SVE";
200 	else if (arch == SIMD_OP_FLAGS_ARCH_SME)
201 		return "SME";
202 	else if (arch == SIMD_OP_FLAGS_ARCH_ASE)
203 		return "ASE";
204 	else
205 		return "n/a";
206 }
207 
208 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
209 				     size_t size, unsigned int width __maybe_unused)
210 {
211 	const char *name;
212 	const char *pred_str = ".";
213 
214 	if (!he->simd_flags.arch)
215 		return repsep_snprintf(bf, size, "");
216 
217 	name = hist_entry__get_simd_name(&he->simd_flags);
218 
219 	if (he->simd_flags.pred == SIMD_OP_FLAGS_PRED_EMPTY)
220 		pred_str = "e";
221 	else if (he->simd_flags.pred == SIMD_OP_FLAGS_PRED_PARTIAL)
222 		pred_str = "p";
223 	else if (he->simd_flags.pred == SIMD_OP_FLAGS_PRED_DISABLED)
224 		pred_str = "d";
225 	else if (he->simd_flags.pred == SIMD_OP_FLAGS_PRED_FULL)
226 		pred_str = "f";
227 
228 	return repsep_snprintf(bf, size, "[%s] %s", pred_str, name);
229 }
230 
231 static struct sort_entry sort_simd = {
232 	.se_header	= "Simd   ",
233 	.se_cmp		= sort__simd_cmp,
234 	.se_snprintf	= hist_entry__simd_snprintf,
235 	.se_width_idx	= HISTC_SIMD,
236 };
237 
238 /* --sort comm */
239 
240 /*
241  * We can't use pointer comparison in functions below,
242  * because it gives different results based on pointer
243  * values, which could break some sorting assumptions.
244  */
245 static int64_t
246 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
247 {
248 	return strcmp(comm__str(right->comm), comm__str(left->comm));
249 }
250 
251 static int64_t
252 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
253 {
254 	return strcmp(comm__str(right->comm), comm__str(left->comm));
255 }
256 
257 static int64_t
258 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
259 {
260 	return strcmp(comm__str(right->comm), comm__str(left->comm));
261 }
262 
263 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
264 				     size_t size, unsigned int width)
265 {
266 	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
267 }
268 
269 struct sort_entry sort_comm = {
270 	.se_header	= "Command",
271 	.se_cmp		= sort__comm_cmp,
272 	.se_collapse	= sort__comm_collapse,
273 	.se_sort	= sort__comm_sort,
274 	.se_snprintf	= hist_entry__comm_snprintf,
275 	.se_filter	= hist_entry__thread_filter,
276 	.se_width_idx	= HISTC_COMM,
277 };
278 
279 /* --sort comm_nodigit */
280 
281 size_t sort__comm_nodigit_len(struct hist_entry *entry)
282 {
283 	const char *comm = comm__str(entry->comm);
284 	size_t index, len_nodigit = 0;
285 	bool in_number = false;
286 
287 	if (!comm)
288 		return 0;
289 
290 	for (index = 0; comm[index]; index++) {
291 		if (!isdigit((unsigned char)comm[index])) {
292 			in_number = false;
293 			len_nodigit++;
294 		} else if (!in_number) {
295 			in_number = true;
296 			len_nodigit += 3; /* <N> */
297 		}
298 	}
299 
300 	return len_nodigit;
301 }
302 
303 static int64_t strcmp_nodigit(const char *left, const char *right)
304 {
305 	for (;;) {
306 		while (*left && isdigit((unsigned char)*left))
307 			left++;
308 		while (*right && isdigit((unsigned char)*right))
309 			right++;
310 		if (*left == *right && !*left) {
311 			return 0;
312 		} else if (*left == *right) {
313 			left++;
314 			right++;
315 		} else {
316 			return (int64_t)((unsigned char)*left - (unsigned char)*right);
317 		}
318 	}
319 }
320 
321 static int64_t
322 sort__comm_nodigit_cmp(struct hist_entry *left, struct hist_entry *right)
323 {
324 	return strcmp_nodigit(comm__str(right->comm), comm__str(left->comm));
325 }
326 
327 static int64_t
328 sort__comm_nodigit_collapse(struct hist_entry *left, struct hist_entry *right)
329 {
330 	return strcmp_nodigit(comm__str(right->comm), comm__str(left->comm));
331 }
332 
333 static int64_t
334 sort__comm_nodigit_sort(struct hist_entry *left, struct hist_entry *right)
335 {
336 	return strcmp_nodigit(comm__str(right->comm), comm__str(left->comm));
337 }
338 
339 static int hist_entry__comm_nodigit_snprintf(struct hist_entry *he, char *bf,
340 						size_t size, unsigned int width)
341 {
342 	int ret = 0;
343 	unsigned int print_len, printed = 0, start = 0, end = 0;
344 	bool in_digit;
345 	const char *comm = comm__str(he->comm), *print;
346 
347 	while (printed < width && printed < size && comm[start]) {
348 		in_digit = !!isdigit((unsigned char)comm[start]);
349 		end = start + 1;
350 		while (comm[end] && !!isdigit((unsigned char)comm[end]) == in_digit)
351 			end++;
352 		if (in_digit) {
353 			print_len = 3; /* <N> */
354 			print = "<N>";
355 		} else {
356 			print_len = end - start;
357 			print = &comm[start];
358 		}
359 		print_len = min(print_len, width - printed);
360 		ret = repsep_snprintf(bf + printed, size - printed, "%-.*s",
361 					print_len, print);
362 		if (ret < 0)
363 			return ret;
364 		start = end;
365 		printed += ret;
366 	}
367 	/* Pad to width if necessary */
368 	if (printed < width && printed < size) {
369 		ret = repsep_snprintf(bf + printed, size - printed, "%-*.*s",
370 				       width - printed, width - printed, "");
371 		if (ret < 0)
372 			return ret;
373 		printed += ret;
374 	}
375 	return printed;
376 }
377 
378 struct sort_entry sort_comm_nodigit = {
379 	.se_header	= "CommandNoDigit",
380 	.se_cmp		= sort__comm_nodigit_cmp,
381 	.se_collapse	= sort__comm_nodigit_collapse,
382 	.se_sort	= sort__comm_nodigit_sort,
383 	.se_snprintf	= hist_entry__comm_nodigit_snprintf,
384 	.se_filter	= hist_entry__thread_filter,
385 	.se_width_idx	= HISTC_COMM_NODIGIT,
386 };
387 
388 /* --sort dso */
389 
390 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
391 {
392 	struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
393 	struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
394 	const char *dso_name_l, *dso_name_r;
395 
396 	if (!dso_l || !dso_r)
397 		return cmp_null(dso_r, dso_l);
398 
399 	if (verbose > 0) {
400 		dso_name_l = dso__long_name(dso_l);
401 		dso_name_r = dso__long_name(dso_r);
402 	} else {
403 		dso_name_l = dso__short_name(dso_l);
404 		dso_name_r = dso__short_name(dso_r);
405 	}
406 
407 	return strcmp(dso_name_l, dso_name_r);
408 }
409 
410 static int64_t
411 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
412 {
413 	return _sort__dso_cmp(right->ms.map, left->ms.map);
414 }
415 
416 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
417 				     size_t size, unsigned int width)
418 {
419 	const struct dso *dso = map ? map__dso(map) : NULL;
420 	const char *dso_name = "[unknown]";
421 
422 	if (dso)
423 		dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso);
424 
425 	return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
426 }
427 
428 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
429 				    size_t size, unsigned int width)
430 {
431 	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
432 }
433 
434 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
435 {
436 	const struct dso *dso = arg;
437 
438 	if (type != HIST_FILTER__DSO)
439 		return -1;
440 
441 	return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
442 }
443 
444 struct sort_entry sort_dso = {
445 	.se_header	= "Shared Object",
446 	.se_cmp		= sort__dso_cmp,
447 	.se_snprintf	= hist_entry__dso_snprintf,
448 	.se_filter	= hist_entry__dso_filter,
449 	.se_width_idx	= HISTC_DSO,
450 };
451 
452 /* --sort symbol */
453 
454 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
455 {
456 	return (int64_t)(right_ip - left_ip);
457 }
458 
459 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
460 {
461 	if (!sym_l || !sym_r)
462 		return cmp_null(sym_l, sym_r);
463 
464 	if (sym_l == sym_r)
465 		return 0;
466 
467 	if (sym_l->inlined || sym_r->inlined) {
468 		int ret = strcmp(sym_l->name, sym_r->name);
469 
470 		if (ret)
471 			return ret;
472 		if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
473 			return 0;
474 	}
475 
476 	if (sym_l->start != sym_r->start)
477 		return (int64_t)(sym_r->start - sym_l->start);
478 
479 	return (int64_t)(sym_r->end - sym_l->end);
480 }
481 
482 static int64_t
483 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
484 {
485 	int64_t ret;
486 
487 	if (!left->ms.sym && !right->ms.sym)
488 		return _sort__addr_cmp(left->ip, right->ip);
489 
490 	/*
491 	 * comparing symbol address alone is not enough since it's a
492 	 * relative address within a dso.
493 	 */
494 	if (!hists__has(left->hists, dso)) {
495 		ret = sort__dso_cmp(left, right);
496 		if (ret != 0)
497 			return ret;
498 	}
499 
500 	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
501 }
502 
503 static int64_t
504 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
505 {
506 	if (!left->ms.sym || !right->ms.sym)
507 		return cmp_null(left->ms.sym, right->ms.sym);
508 
509 	return strcmp(right->ms.sym->name, left->ms.sym->name);
510 }
511 
512 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
513 				     u64 ip, char level, char *bf, size_t size,
514 				     unsigned int width)
515 {
516 	struct symbol *sym = ms->sym;
517 	struct map *map = ms->map;
518 	size_t ret = 0;
519 
520 	if (verbose > 0) {
521 		struct dso *dso = map ? map__dso(map) : NULL;
522 		char o = dso ? dso__symtab_origin(dso) : '!';
523 		u64 rip = ip;
524 
525 		if (dso && dso__kernel(dso) && dso__adjust_symbols(dso))
526 			rip = map__unmap_ip(map, ip);
527 
528 		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
529 				       BITS_PER_LONG / 4 + 2, rip, o);
530 	}
531 
532 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
533 	if (sym && map) {
534 		if (sym->type == STT_OBJECT) {
535 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
536 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
537 					ip - map__unmap_ip(map, sym->start));
538 		} else {
539 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
540 					       width - ret,
541 					       sym->name);
542 			if (sym->inlined)
543 				ret += repsep_snprintf(bf + ret, size - ret,
544 						       " (inlined)");
545 		}
546 	} else {
547 		size_t len = BITS_PER_LONG / 4;
548 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
549 				       len, ip);
550 	}
551 
552 	return ret;
553 }
554 
555 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
556 {
557 	return _hist_entry__sym_snprintf(&he->ms, he->ip,
558 					 he->level, bf, size, width);
559 }
560 
561 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
562 {
563 	const char *sym = arg;
564 
565 	if (type != HIST_FILTER__SYMBOL)
566 		return -1;
567 
568 	return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
569 }
570 
571 struct sort_entry sort_sym = {
572 	.se_header	= "Symbol",
573 	.se_cmp		= sort__sym_cmp,
574 	.se_sort	= sort__sym_sort,
575 	.se_snprintf	= hist_entry__sym_snprintf,
576 	.se_filter	= hist_entry__sym_filter,
577 	.se_width_idx	= HISTC_SYMBOL,
578 };
579 
580 /* --sort symoff */
581 
582 static int64_t
583 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
584 {
585 	int64_t ret;
586 
587 	ret = sort__sym_cmp(left, right);
588 	if (ret)
589 		return ret;
590 
591 	return left->ip - right->ip;
592 }
593 
594 static int64_t
595 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
596 {
597 	int64_t ret;
598 
599 	ret = sort__sym_sort(left, right);
600 	if (ret)
601 		return ret;
602 
603 	return left->ip - right->ip;
604 }
605 
606 static int
607 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
608 {
609 	struct symbol *sym = he->ms.sym;
610 
611 	if (sym == NULL)
612 		return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
613 
614 	return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
615 }
616 
617 static struct sort_entry sort_sym_offset = {
618 	.se_header	= "Symbol Offset",
619 	.se_cmp		= sort__symoff_cmp,
620 	.se_sort	= sort__symoff_sort,
621 	.se_snprintf	= hist_entry__symoff_snprintf,
622 	.se_filter	= hist_entry__sym_filter,
623 	.se_width_idx	= HISTC_SYMBOL_OFFSET,
624 };
625 
626 /* --sort srcline */
627 
628 char *hist_entry__srcline(struct hist_entry *he)
629 {
630 	return map__srcline(he->ms.map, he->ip, he->ms.sym);
631 }
632 
633 static int64_t
634 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
635 {
636 	int64_t ret;
637 
638 	ret = _sort__addr_cmp(left->ip, right->ip);
639 	if (ret)
640 		return ret;
641 
642 	return sort__dso_cmp(left, right);
643 }
644 
645 static int64_t
646 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
647 {
648 	if (!left->srcline)
649 		left->srcline = hist_entry__srcline(left);
650 	if (!right->srcline)
651 		right->srcline = hist_entry__srcline(right);
652 
653 	return strcmp(right->srcline, left->srcline);
654 }
655 
656 static int64_t
657 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
658 {
659 	return sort__srcline_collapse(left, right);
660 }
661 
662 static void
663 sort__srcline_init(struct hist_entry *he)
664 {
665 	if (!he->srcline)
666 		he->srcline = hist_entry__srcline(he);
667 }
668 
669 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
670 					size_t size, unsigned int width)
671 {
672 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
673 }
674 
675 struct sort_entry sort_srcline = {
676 	.se_header	= "Source:Line",
677 	.se_cmp		= sort__srcline_cmp,
678 	.se_collapse	= sort__srcline_collapse,
679 	.se_sort	= sort__srcline_sort,
680 	.se_init	= sort__srcline_init,
681 	.se_snprintf	= hist_entry__srcline_snprintf,
682 	.se_width_idx	= HISTC_SRCLINE,
683 };
684 
685 /* --sort srcline_from */
686 
687 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
688 {
689 	return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
690 }
691 
692 static int64_t
693 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
694 {
695 	return left->branch_info->from.addr - right->branch_info->from.addr;
696 }
697 
698 static int64_t
699 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
700 {
701 	if (!left->branch_info->srcline_from)
702 		left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
703 
704 	if (!right->branch_info->srcline_from)
705 		right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
706 
707 	return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
708 }
709 
710 static int64_t
711 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
712 {
713 	return sort__srcline_from_collapse(left, right);
714 }
715 
716 static void sort__srcline_from_init(struct hist_entry *he)
717 {
718 	if (!he->branch_info->srcline_from)
719 		he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
720 }
721 
722 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
723 					size_t size, unsigned int width)
724 {
725 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
726 }
727 
728 static struct sort_entry sort_srcline_from = {
729 	.se_header	= "From Source:Line",
730 	.se_cmp		= sort__srcline_from_cmp,
731 	.se_collapse	= sort__srcline_from_collapse,
732 	.se_sort	= sort__srcline_from_sort,
733 	.se_init	= sort__srcline_from_init,
734 	.se_snprintf	= hist_entry__srcline_from_snprintf,
735 	.se_width_idx	= HISTC_SRCLINE_FROM,
736 };
737 
738 /* --sort srcline_to */
739 
740 static int64_t
741 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
742 {
743 	return left->branch_info->to.addr - right->branch_info->to.addr;
744 }
745 
746 static int64_t
747 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
748 {
749 	if (!left->branch_info->srcline_to)
750 		left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
751 
752 	if (!right->branch_info->srcline_to)
753 		right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
754 
755 	return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
756 }
757 
758 static int64_t
759 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
760 {
761 	return sort__srcline_to_collapse(left, right);
762 }
763 
764 static void sort__srcline_to_init(struct hist_entry *he)
765 {
766 	if (!he->branch_info->srcline_to)
767 		he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
768 }
769 
770 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
771 					size_t size, unsigned int width)
772 {
773 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
774 }
775 
776 static struct sort_entry sort_srcline_to = {
777 	.se_header	= "To Source:Line",
778 	.se_cmp		= sort__srcline_to_cmp,
779 	.se_collapse	= sort__srcline_to_collapse,
780 	.se_sort	= sort__srcline_to_sort,
781 	.se_init	= sort__srcline_to_init,
782 	.se_snprintf	= hist_entry__srcline_to_snprintf,
783 	.se_width_idx	= HISTC_SRCLINE_TO,
784 };
785 
786 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
787 					size_t size, unsigned int width)
788 {
789 
790 	struct symbol *sym = he->ms.sym;
791 	struct annotated_branch *branch;
792 	double ipc = 0.0, coverage = 0.0;
793 	char tmp[64];
794 
795 	if (!sym)
796 		return repsep_snprintf(bf, size, "%-*s", width, "-");
797 
798 	branch = symbol__annotation(sym)->branch;
799 
800 	if (branch && branch->hit_cycles)
801 		ipc = branch->hit_insn / ((double)branch->hit_cycles);
802 
803 	if (branch && branch->total_insn) {
804 		coverage = branch->cover_insn * 100.0 /
805 			((double)branch->total_insn);
806 	}
807 
808 	snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
809 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
810 }
811 
812 static struct sort_entry sort_sym_ipc = {
813 	.se_header	= "IPC   [IPC Coverage]",
814 	.se_cmp		= sort__sym_cmp,
815 	.se_snprintf	= hist_entry__sym_ipc_snprintf,
816 	.se_width_idx	= HISTC_SYMBOL_IPC,
817 };
818 
819 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
820 					     __maybe_unused,
821 					     char *bf, size_t size,
822 					     unsigned int width)
823 {
824 	char tmp[64];
825 
826 	snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
827 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
828 }
829 
830 static struct sort_entry sort_sym_ipc_null = {
831 	.se_header	= "IPC   [IPC Coverage]",
832 	.se_cmp		= sort__sym_cmp,
833 	.se_snprintf	= hist_entry__sym_ipc_null_snprintf,
834 	.se_width_idx	= HISTC_SYMBOL_IPC,
835 };
836 
837 /* --sort callchain_branch_predicted */
838 
839 static int64_t
840 sort__callchain_branch_predicted_cmp(struct hist_entry *left __maybe_unused,
841 				     struct hist_entry *right __maybe_unused)
842 {
843 	return 0;
844 }
845 
846 static int hist_entry__callchain_branch_predicted_snprintf(
847 	struct hist_entry *he, char *bf, size_t size, unsigned int width)
848 {
849 	u64 branch_count, predicted_count;
850 	double percent = 0.0;
851 	char str[32];
852 
853 	callchain_branch_counts(he->callchain, &branch_count,
854 				&predicted_count, NULL, NULL);
855 
856 	if (branch_count)
857 		percent = predicted_count * 100.0 / branch_count;
858 
859 	snprintf(str, sizeof(str), "%.1f%%", percent);
860 	return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
861 }
862 
863 static struct sort_entry sort_callchain_branch_predicted = {
864 	.se_header	= "Predicted",
865 	.se_cmp		= sort__callchain_branch_predicted_cmp,
866 	.se_snprintf	= hist_entry__callchain_branch_predicted_snprintf,
867 	.se_width_idx	= HISTC_CALLCHAIN_BRANCH_PREDICTED,
868 };
869 
870 /* --sort callchain_branch_abort */
871 
872 static int64_t
873 sort__callchain_branch_abort_cmp(struct hist_entry *left __maybe_unused,
874 				 struct hist_entry *right __maybe_unused)
875 {
876 	return 0;
877 }
878 
879 static int hist_entry__callchain_branch_abort_snprintf(struct hist_entry *he,
880 						       char *bf, size_t size,
881 						       unsigned int width)
882 {
883 	u64 branch_count, abort_count;
884 	char str[32];
885 
886 	callchain_branch_counts(he->callchain, &branch_count,
887 				NULL, &abort_count, NULL);
888 
889 	snprintf(str, sizeof(str), "%" PRId64, abort_count);
890 	return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
891 }
892 
893 static struct sort_entry sort_callchain_branch_abort = {
894 	.se_header	= "Abort",
895 	.se_cmp		= sort__callchain_branch_abort_cmp,
896 	.se_snprintf	= hist_entry__callchain_branch_abort_snprintf,
897 	.se_width_idx	= HISTC_CALLCHAIN_BRANCH_ABORT,
898 };
899 
900 /* --sort callchain_branch_cycles */
901 
902 static int64_t
903 sort__callchain_branch_cycles_cmp(struct hist_entry *left __maybe_unused,
904 				  struct hist_entry *right __maybe_unused)
905 {
906 	return 0;
907 }
908 
909 static int hist_entry__callchain_branch_cycles_snprintf(struct hist_entry *he,
910 							char *bf, size_t size,
911 							unsigned int width)
912 {
913 	u64 branch_count, cycles_count, cycles = 0;
914 	char str[32];
915 
916 	callchain_branch_counts(he->callchain, &branch_count,
917 				NULL, NULL, &cycles_count);
918 
919 	if (branch_count)
920 		cycles = cycles_count / branch_count;
921 
922 	snprintf(str, sizeof(str), "%" PRId64 "", cycles);
923 	return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
924 }
925 
926 static struct sort_entry sort_callchain_branch_cycles = {
927 	.se_header	= "Cycles",
928 	.se_cmp		= sort__callchain_branch_cycles_cmp,
929 	.se_snprintf	= hist_entry__callchain_branch_cycles_snprintf,
930 	.se_width_idx	= HISTC_CALLCHAIN_BRANCH_CYCLES,
931 };
932 
933 /* --sort srcfile */
934 
935 static char no_srcfile[1];
936 
937 static char *hist_entry__get_srcfile(struct hist_entry *e)
938 {
939 	char *sf, *p;
940 	struct map *map = e->ms.map;
941 
942 	if (!map)
943 		return no_srcfile;
944 
945 	sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
946 			 e->ms.sym, false, true, true, e->ip);
947 	if (sf == SRCLINE_UNKNOWN)
948 		return no_srcfile;
949 	p = strchr(sf, ':');
950 	if (p && *sf) {
951 		*p = 0;
952 		return sf;
953 	}
954 	free(sf);
955 	return no_srcfile;
956 }
957 
958 static int64_t
959 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
960 {
961 	return sort__srcline_cmp(left, right);
962 }
963 
964 static int64_t
965 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
966 {
967 	if (!left->srcfile)
968 		left->srcfile = hist_entry__get_srcfile(left);
969 	if (!right->srcfile)
970 		right->srcfile = hist_entry__get_srcfile(right);
971 
972 	return strcmp(right->srcfile, left->srcfile);
973 }
974 
975 static int64_t
976 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
977 {
978 	return sort__srcfile_collapse(left, right);
979 }
980 
981 static void sort__srcfile_init(struct hist_entry *he)
982 {
983 	if (!he->srcfile)
984 		he->srcfile = hist_entry__get_srcfile(he);
985 }
986 
987 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
988 					size_t size, unsigned int width)
989 {
990 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
991 }
992 
993 static struct sort_entry sort_srcfile = {
994 	.se_header	= "Source File",
995 	.se_cmp		= sort__srcfile_cmp,
996 	.se_collapse	= sort__srcfile_collapse,
997 	.se_sort	= sort__srcfile_sort,
998 	.se_init	= sort__srcfile_init,
999 	.se_snprintf	= hist_entry__srcfile_snprintf,
1000 	.se_width_idx	= HISTC_SRCFILE,
1001 };
1002 
1003 /* --sort parent */
1004 
1005 static int64_t
1006 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
1007 {
1008 	struct symbol *sym_l = left->parent;
1009 	struct symbol *sym_r = right->parent;
1010 
1011 	if (!sym_l || !sym_r)
1012 		return cmp_null(sym_l, sym_r);
1013 
1014 	return strcmp(sym_r->name, sym_l->name);
1015 }
1016 
1017 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
1018 				       size_t size, unsigned int width)
1019 {
1020 	return repsep_snprintf(bf, size, "%-*.*s", width, width,
1021 			      he->parent ? he->parent->name : "[other]");
1022 }
1023 
1024 struct sort_entry sort_parent = {
1025 	.se_header	= "Parent symbol",
1026 	.se_cmp		= sort__parent_cmp,
1027 	.se_snprintf	= hist_entry__parent_snprintf,
1028 	.se_width_idx	= HISTC_PARENT,
1029 };
1030 
1031 /* --sort cpu */
1032 
1033 static int64_t
1034 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
1035 {
1036 	return right->cpu - left->cpu;
1037 }
1038 
1039 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
1040 				    size_t size, unsigned int width)
1041 {
1042 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
1043 }
1044 
1045 static struct sort_entry sort_cpu = {
1046 	.se_header      = "CPU",
1047 	.se_cmp	        = sort__cpu_cmp,
1048 	.se_snprintf    = hist_entry__cpu_snprintf,
1049 	.se_width_idx	= HISTC_CPU,
1050 };
1051 
1052 /* --sort parallelism */
1053 
1054 static int64_t
1055 sort__parallelism_cmp(struct hist_entry *left, struct hist_entry *right)
1056 {
1057 	return right->parallelism - left->parallelism;
1058 }
1059 
1060 static int hist_entry__parallelism_filter(struct hist_entry *he, int type, const void *arg)
1061 {
1062 	const unsigned long *parallelism_filter = arg;
1063 
1064 	if (type != HIST_FILTER__PARALLELISM)
1065 		return -1;
1066 
1067 	return test_bit(he->parallelism, parallelism_filter);
1068 }
1069 
1070 static int hist_entry__parallelism_snprintf(struct hist_entry *he, char *bf,
1071 				    size_t size, unsigned int width)
1072 {
1073 	return repsep_snprintf(bf, size, "%*d", width, he->parallelism);
1074 }
1075 
1076 static struct sort_entry sort_parallelism = {
1077 	.se_header      = "Parallelism",
1078 	.se_cmp	        = sort__parallelism_cmp,
1079 	.se_filter	= hist_entry__parallelism_filter,
1080 	.se_snprintf    = hist_entry__parallelism_snprintf,
1081 	.se_width_idx	= HISTC_PARALLELISM,
1082 };
1083 
1084 /* --sort cgroup_id */
1085 
1086 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
1087 {
1088 	return (int64_t)(right_dev - left_dev);
1089 }
1090 
1091 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
1092 {
1093 	return (int64_t)(right_ino - left_ino);
1094 }
1095 
1096 static int64_t
1097 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
1098 {
1099 	int64_t ret;
1100 
1101 	ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
1102 	if (ret != 0)
1103 		return ret;
1104 
1105 	return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
1106 				       left->cgroup_id.ino);
1107 }
1108 
1109 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
1110 					  char *bf, size_t size,
1111 					  unsigned int width __maybe_unused)
1112 {
1113 	return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
1114 			       he->cgroup_id.ino);
1115 }
1116 
1117 static struct sort_entry sort_cgroup_id = {
1118 	.se_header      = "cgroup id (dev/inode)",
1119 	.se_cmp	        = sort__cgroup_id_cmp,
1120 	.se_snprintf    = hist_entry__cgroup_id_snprintf,
1121 	.se_width_idx	= HISTC_CGROUP_ID,
1122 };
1123 
1124 /* --sort cgroup */
1125 
1126 static int64_t
1127 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
1128 {
1129 	return right->cgroup - left->cgroup;
1130 }
1131 
1132 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
1133 				       char *bf, size_t size,
1134 				       unsigned int width __maybe_unused)
1135 {
1136 	const char *cgrp_name = "N/A";
1137 
1138 	if (he->cgroup) {
1139 		struct cgroup *cgrp = cgroup__find(maps__machine(thread__maps(he->ms.thread))->env,
1140 						   he->cgroup);
1141 		if (cgrp != NULL)
1142 			cgrp_name = cgrp->name;
1143 		else
1144 			cgrp_name = "unknown";
1145 	}
1146 
1147 	return repsep_snprintf(bf, size, "%s", cgrp_name);
1148 }
1149 
1150 static struct sort_entry sort_cgroup = {
1151 	.se_header      = "Cgroup",
1152 	.se_cmp	        = sort__cgroup_cmp,
1153 	.se_snprintf    = hist_entry__cgroup_snprintf,
1154 	.se_width_idx	= HISTC_CGROUP,
1155 };
1156 
1157 /* --sort socket */
1158 
1159 static int64_t
1160 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
1161 {
1162 	return right->socket - left->socket;
1163 }
1164 
1165 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
1166 				    size_t size, unsigned int width)
1167 {
1168 	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
1169 }
1170 
1171 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
1172 {
1173 	int sk = *(const int *)arg;
1174 
1175 	if (type != HIST_FILTER__SOCKET)
1176 		return -1;
1177 
1178 	return sk >= 0 && he->socket != sk;
1179 }
1180 
1181 static struct sort_entry sort_socket = {
1182 	.se_header      = "Socket",
1183 	.se_cmp	        = sort__socket_cmp,
1184 	.se_snprintf    = hist_entry__socket_snprintf,
1185 	.se_filter      = hist_entry__socket_filter,
1186 	.se_width_idx	= HISTC_SOCKET,
1187 };
1188 
1189 /* --sort time */
1190 
1191 static int64_t
1192 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
1193 {
1194 	return right->time - left->time;
1195 }
1196 
1197 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
1198 				    size_t size, unsigned int width)
1199 {
1200 	char he_time[32];
1201 
1202 	if (symbol_conf.nanosecs)
1203 		timestamp__scnprintf_nsec(he->time, he_time,
1204 					  sizeof(he_time));
1205 	else
1206 		timestamp__scnprintf_usec(he->time, he_time,
1207 					  sizeof(he_time));
1208 
1209 	return repsep_snprintf(bf, size, "%-.*s", width, he_time);
1210 }
1211 
1212 static struct sort_entry sort_time = {
1213 	.se_header      = "Time",
1214 	.se_cmp	        = sort__time_cmp,
1215 	.se_snprintf    = hist_entry__time_snprintf,
1216 	.se_width_idx	= HISTC_TIME,
1217 };
1218 
1219 /* --sort trace */
1220 
1221 #ifdef HAVE_LIBTRACEEVENT
1222 static char *get_trace_output(struct hist_entry *he)
1223 {
1224 	struct trace_seq seq;
1225 	struct evsel *evsel;
1226 	struct tep_record rec = {
1227 		.data = he->raw_data,
1228 		.size = he->raw_size,
1229 	};
1230 	struct tep_event *tp_format;
1231 
1232 	evsel = hists_to_evsel(he->hists);
1233 
1234 	trace_seq_init(&seq);
1235 	tp_format = evsel__tp_format(evsel);
1236 	if (tp_format) {
1237 		if (symbol_conf.raw_trace)
1238 			tep_print_fields(&seq, he->raw_data, he->raw_size, tp_format);
1239 		else
1240 			tep_print_event(tp_format->tep, &seq, &rec, "%s", TEP_PRINT_INFO);
1241 	}
1242 
1243 	/*
1244 	 * Trim the buffer, it starts at 4KB and we're not going to
1245 	 * add anything more to this buffer.
1246 	 */
1247 	return realloc(seq.buffer, seq.len + 1);
1248 }
1249 
1250 static int64_t
1251 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
1252 {
1253 	struct evsel *evsel;
1254 
1255 	evsel = hists_to_evsel(left->hists);
1256 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1257 		return 0;
1258 
1259 	if (left->trace_output == NULL)
1260 		left->trace_output = get_trace_output(left);
1261 	if (right->trace_output == NULL)
1262 		right->trace_output = get_trace_output(right);
1263 
1264 	return strcmp(right->trace_output, left->trace_output);
1265 }
1266 
1267 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
1268 				    size_t size, unsigned int width)
1269 {
1270 	struct evsel *evsel;
1271 
1272 	evsel = hists_to_evsel(he->hists);
1273 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1274 		return scnprintf(bf, size, "%-.*s", width, "N/A");
1275 
1276 	if (he->trace_output == NULL)
1277 		he->trace_output = get_trace_output(he);
1278 	return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
1279 }
1280 
1281 static struct sort_entry sort_trace = {
1282 	.se_header      = "Trace output",
1283 	.se_cmp	        = sort__trace_cmp,
1284 	.se_snprintf    = hist_entry__trace_snprintf,
1285 	.se_width_idx	= HISTC_TRACE,
1286 };
1287 #endif /* HAVE_LIBTRACEEVENT */
1288 
1289 /* sort keys for branch stacks */
1290 
1291 static int64_t
1292 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
1293 {
1294 	if (!left->branch_info || !right->branch_info)
1295 		return cmp_null(left->branch_info, right->branch_info);
1296 
1297 	return _sort__dso_cmp(left->branch_info->from.ms.map,
1298 			      right->branch_info->from.ms.map);
1299 }
1300 
1301 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
1302 				    size_t size, unsigned int width)
1303 {
1304 	if (he->branch_info)
1305 		return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
1306 						 bf, size, width);
1307 	else
1308 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1309 }
1310 
1311 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
1312 				       const void *arg)
1313 {
1314 	const struct dso *dso = arg;
1315 
1316 	if (type != HIST_FILTER__DSO)
1317 		return -1;
1318 
1319 	return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
1320 		map__dso(he->branch_info->from.ms.map) != dso);
1321 }
1322 
1323 static int64_t
1324 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
1325 {
1326 	if (!left->branch_info || !right->branch_info)
1327 		return cmp_null(left->branch_info, right->branch_info);
1328 
1329 	return _sort__dso_cmp(left->branch_info->to.ms.map,
1330 			      right->branch_info->to.ms.map);
1331 }
1332 
1333 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
1334 				       size_t size, unsigned int width)
1335 {
1336 	if (he->branch_info)
1337 		return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1338 						 bf, size, width);
1339 	else
1340 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1341 }
1342 
1343 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1344 				     const void *arg)
1345 {
1346 	const struct dso *dso = arg;
1347 
1348 	if (type != HIST_FILTER__DSO)
1349 		return -1;
1350 
1351 	return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1352 		map__dso(he->branch_info->to.ms.map) != dso);
1353 }
1354 
1355 static int64_t
1356 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1357 {
1358 	struct addr_map_symbol *from_l, *from_r;
1359 
1360 	if (!left->branch_info || !right->branch_info)
1361 		return cmp_null(left->branch_info, right->branch_info);
1362 
1363 	from_l = &left->branch_info->from;
1364 	from_r = &right->branch_info->from;
1365 
1366 	if (!from_l->ms.sym && !from_r->ms.sym)
1367 		return _sort__addr_cmp(from_l->addr, from_r->addr);
1368 
1369 	return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1370 }
1371 
1372 static int64_t
1373 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1374 {
1375 	struct addr_map_symbol *to_l, *to_r;
1376 
1377 	if (!left->branch_info || !right->branch_info)
1378 		return cmp_null(left->branch_info, right->branch_info);
1379 
1380 	to_l = &left->branch_info->to;
1381 	to_r = &right->branch_info->to;
1382 
1383 	if (!to_l->ms.sym && !to_r->ms.sym)
1384 		return _sort__addr_cmp(to_l->addr, to_r->addr);
1385 
1386 	return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1387 }
1388 
1389 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1390 					 size_t size, unsigned int width)
1391 {
1392 	if (he->branch_info) {
1393 		struct addr_map_symbol *from = &he->branch_info->from;
1394 
1395 		return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1396 						 from->al_level, bf, size, width);
1397 	}
1398 
1399 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1400 }
1401 
1402 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1403 				       size_t size, unsigned int width)
1404 {
1405 	if (he->branch_info) {
1406 		struct addr_map_symbol *to = &he->branch_info->to;
1407 
1408 		return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1409 						 to->al_level, bf, size, width);
1410 	}
1411 
1412 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1413 }
1414 
1415 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1416 				       const void *arg)
1417 {
1418 	const char *sym = arg;
1419 
1420 	if (type != HIST_FILTER__SYMBOL)
1421 		return -1;
1422 
1423 	return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1424 			strstr(he->branch_info->from.ms.sym->name, sym));
1425 }
1426 
1427 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1428 				       const void *arg)
1429 {
1430 	const char *sym = arg;
1431 
1432 	if (type != HIST_FILTER__SYMBOL)
1433 		return -1;
1434 
1435 	return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1436 		        strstr(he->branch_info->to.ms.sym->name, sym));
1437 }
1438 
1439 struct sort_entry sort_dso_from = {
1440 	.se_header	= "Source Shared Object",
1441 	.se_cmp		= sort__dso_from_cmp,
1442 	.se_snprintf	= hist_entry__dso_from_snprintf,
1443 	.se_filter	= hist_entry__dso_from_filter,
1444 	.se_width_idx	= HISTC_DSO_FROM,
1445 };
1446 
1447 struct sort_entry sort_dso_to = {
1448 	.se_header	= "Target Shared Object",
1449 	.se_cmp		= sort__dso_to_cmp,
1450 	.se_snprintf	= hist_entry__dso_to_snprintf,
1451 	.se_filter	= hist_entry__dso_to_filter,
1452 	.se_width_idx	= HISTC_DSO_TO,
1453 };
1454 
1455 struct sort_entry sort_sym_from = {
1456 	.se_header	= "Source Symbol",
1457 	.se_cmp		= sort__sym_from_cmp,
1458 	.se_snprintf	= hist_entry__sym_from_snprintf,
1459 	.se_filter	= hist_entry__sym_from_filter,
1460 	.se_width_idx	= HISTC_SYMBOL_FROM,
1461 };
1462 
1463 struct sort_entry sort_sym_to = {
1464 	.se_header	= "Target Symbol",
1465 	.se_cmp		= sort__sym_to_cmp,
1466 	.se_snprintf	= hist_entry__sym_to_snprintf,
1467 	.se_filter	= hist_entry__sym_to_filter,
1468 	.se_width_idx	= HISTC_SYMBOL_TO,
1469 };
1470 
1471 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1472 				     u64 ip, char level, char *bf, size_t size,
1473 				     unsigned int width)
1474 {
1475 	struct symbol *sym = ms->sym;
1476 	struct map *map = ms->map;
1477 	size_t ret = 0, offs;
1478 
1479 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1480 	if (sym && map) {
1481 		if (sym->type == STT_OBJECT) {
1482 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1483 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1484 					ip - map__unmap_ip(map, sym->start));
1485 		} else {
1486 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1487 					       width - ret,
1488 					       sym->name);
1489 			offs = ip - sym->start;
1490 			if (offs)
1491 				ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1492 		}
1493 	} else {
1494 		size_t len = BITS_PER_LONG / 4;
1495 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1496 				       len, ip);
1497 	}
1498 
1499 	return ret;
1500 }
1501 
1502 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1503 					 size_t size, unsigned int width)
1504 {
1505 	if (he->branch_info) {
1506 		struct addr_map_symbol *from = &he->branch_info->from;
1507 
1508 		return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1509 						 he->level, bf, size, width);
1510 	}
1511 
1512 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1513 }
1514 
1515 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1516 				       size_t size, unsigned int width)
1517 {
1518 	if (he->branch_info) {
1519 		struct addr_map_symbol *to = &he->branch_info->to;
1520 
1521 		return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1522 						 he->level, bf, size, width);
1523 	}
1524 
1525 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1526 }
1527 
1528 static int64_t
1529 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1530 {
1531 	struct addr_map_symbol *from_l;
1532 	struct addr_map_symbol *from_r;
1533 	int64_t ret;
1534 
1535 	if (!left->branch_info || !right->branch_info)
1536 		return cmp_null(left->branch_info, right->branch_info);
1537 
1538 	from_l = &left->branch_info->from;
1539 	from_r = &right->branch_info->from;
1540 
1541 	/*
1542 	 * comparing symbol address alone is not enough since it's a
1543 	 * relative address within a dso.
1544 	 */
1545 	ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1546 	if (ret != 0)
1547 		return ret;
1548 
1549 	return _sort__addr_cmp(from_l->addr, from_r->addr);
1550 }
1551 
1552 static int64_t
1553 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1554 {
1555 	struct addr_map_symbol *to_l;
1556 	struct addr_map_symbol *to_r;
1557 	int64_t ret;
1558 
1559 	if (!left->branch_info || !right->branch_info)
1560 		return cmp_null(left->branch_info, right->branch_info);
1561 
1562 	to_l = &left->branch_info->to;
1563 	to_r = &right->branch_info->to;
1564 
1565 	/*
1566 	 * comparing symbol address alone is not enough since it's a
1567 	 * relative address within a dso.
1568 	 */
1569 	ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1570 	if (ret != 0)
1571 		return ret;
1572 
1573 	return _sort__addr_cmp(to_l->addr, to_r->addr);
1574 }
1575 
1576 static struct sort_entry sort_addr_from = {
1577 	.se_header	= "Source Address",
1578 	.se_cmp		= sort__addr_from_cmp,
1579 	.se_snprintf	= hist_entry__addr_from_snprintf,
1580 	.se_filter	= hist_entry__sym_from_filter, /* shared with sym_from */
1581 	.se_width_idx	= HISTC_ADDR_FROM,
1582 };
1583 
1584 static struct sort_entry sort_addr_to = {
1585 	.se_header	= "Target Address",
1586 	.se_cmp		= sort__addr_to_cmp,
1587 	.se_snprintf	= hist_entry__addr_to_snprintf,
1588 	.se_filter	= hist_entry__sym_to_filter, /* shared with sym_to */
1589 	.se_width_idx	= HISTC_ADDR_TO,
1590 };
1591 
1592 
1593 static int64_t
1594 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1595 {
1596 	unsigned char mp, p;
1597 
1598 	if (!left->branch_info || !right->branch_info)
1599 		return cmp_null(left->branch_info, right->branch_info);
1600 
1601 	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1602 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1603 	return mp || p;
1604 }
1605 
1606 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1607 				    size_t size, unsigned int width){
1608 	static const char *out = "N/A";
1609 
1610 	if (he->branch_info) {
1611 		if (he->branch_info->flags.predicted)
1612 			out = "N";
1613 		else if (he->branch_info->flags.mispred)
1614 			out = "Y";
1615 	}
1616 
1617 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1618 }
1619 
1620 static int64_t
1621 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1622 {
1623 	if (!left->branch_info || !right->branch_info)
1624 		return cmp_null(left->branch_info, right->branch_info);
1625 
1626 	return left->branch_info->flags.cycles -
1627 		right->branch_info->flags.cycles;
1628 }
1629 
1630 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1631 				    size_t size, unsigned int width)
1632 {
1633 	if (!he->branch_info)
1634 		return scnprintf(bf, size, "%-.*s", width, "N/A");
1635 	if (he->branch_info->flags.cycles == 0)
1636 		return repsep_snprintf(bf, size, "%-*s", width, "-");
1637 	return repsep_snprintf(bf, size, "%-*hd", width,
1638 			       he->branch_info->flags.cycles);
1639 }
1640 
1641 static struct sort_entry sort_cycles = {
1642 	.se_header	= "Basic Block Cycles",
1643 	.se_cmp		= sort__cycles_cmp,
1644 	.se_snprintf	= hist_entry__cycles_snprintf,
1645 	.se_width_idx	= HISTC_CYCLES,
1646 };
1647 
1648 /* --sort daddr_sym */
1649 int64_t
1650 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1651 {
1652 	uint64_t l = 0, r = 0;
1653 
1654 	if (left->mem_info)
1655 		l = mem_info__daddr(left->mem_info)->addr;
1656 	if (right->mem_info)
1657 		r = mem_info__daddr(right->mem_info)->addr;
1658 
1659 	return (int64_t)(r - l);
1660 }
1661 
1662 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1663 				    size_t size, unsigned int width)
1664 {
1665 	uint64_t addr = 0;
1666 	struct map_symbol *ms = NULL;
1667 
1668 	if (he->mem_info) {
1669 		addr = mem_info__daddr(he->mem_info)->addr;
1670 		ms = &mem_info__daddr(he->mem_info)->ms;
1671 	}
1672 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1673 }
1674 
1675 int64_t
1676 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1677 {
1678 	uint64_t l = 0, r = 0;
1679 
1680 	if (left->mem_info)
1681 		l = mem_info__iaddr(left->mem_info)->addr;
1682 	if (right->mem_info)
1683 		r = mem_info__iaddr(right->mem_info)->addr;
1684 
1685 	return (int64_t)(r - l);
1686 }
1687 
1688 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1689 				    size_t size, unsigned int width)
1690 {
1691 	uint64_t addr = 0;
1692 	struct map_symbol *ms = NULL;
1693 
1694 	if (he->mem_info) {
1695 		addr = mem_info__iaddr(he->mem_info)->addr;
1696 		ms   = &mem_info__iaddr(he->mem_info)->ms;
1697 	}
1698 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1699 }
1700 
1701 static int64_t
1702 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1703 {
1704 	struct map *map_l = NULL;
1705 	struct map *map_r = NULL;
1706 
1707 	if (left->mem_info)
1708 		map_l = mem_info__daddr(left->mem_info)->ms.map;
1709 	if (right->mem_info)
1710 		map_r = mem_info__daddr(right->mem_info)->ms.map;
1711 
1712 	return _sort__dso_cmp(map_l, map_r);
1713 }
1714 
1715 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1716 				    size_t size, unsigned int width)
1717 {
1718 	struct map *map = NULL;
1719 
1720 	if (he->mem_info)
1721 		map = mem_info__daddr(he->mem_info)->ms.map;
1722 
1723 	return _hist_entry__dso_snprintf(map, bf, size, width);
1724 }
1725 
1726 static int64_t
1727 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1728 {
1729 	union perf_mem_data_src data_src_l;
1730 	union perf_mem_data_src data_src_r;
1731 
1732 	if (left->mem_info)
1733 		data_src_l = *mem_info__data_src(left->mem_info);
1734 	else
1735 		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1736 
1737 	if (right->mem_info)
1738 		data_src_r = *mem_info__data_src(right->mem_info);
1739 	else
1740 		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1741 
1742 	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1743 }
1744 
1745 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1746 				    size_t size, unsigned int width)
1747 {
1748 	char out[10];
1749 
1750 	perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1751 	return repsep_snprintf(bf, size, "%.*s", width, out);
1752 }
1753 
1754 static int64_t
1755 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1756 {
1757 	union perf_mem_data_src data_src_l;
1758 	union perf_mem_data_src data_src_r;
1759 
1760 	if (left->mem_info)
1761 		data_src_l = *mem_info__data_src(left->mem_info);
1762 	else
1763 		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1764 
1765 	if (right->mem_info)
1766 		data_src_r = *mem_info__data_src(right->mem_info);
1767 	else
1768 		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1769 
1770 	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1771 }
1772 
1773 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1774 				    size_t size, unsigned int width)
1775 {
1776 	char out[64];
1777 
1778 	perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1779 	return repsep_snprintf(bf, size, "%-*s", width, out);
1780 }
1781 
1782 static int64_t
1783 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1784 {
1785 	union perf_mem_data_src data_src_l;
1786 	union perf_mem_data_src data_src_r;
1787 
1788 	if (left->mem_info)
1789 		data_src_l = *mem_info__data_src(left->mem_info);
1790 	else
1791 		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1792 
1793 	if (right->mem_info)
1794 		data_src_r = *mem_info__data_src(right->mem_info);
1795 	else
1796 		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1797 
1798 	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1799 }
1800 
1801 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1802 				    size_t size, unsigned int width)
1803 {
1804 	char out[64];
1805 
1806 	perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1807 	return repsep_snprintf(bf, size, "%-*s", width, out);
1808 }
1809 
1810 static int64_t
1811 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1812 {
1813 	union perf_mem_data_src data_src_l;
1814 	union perf_mem_data_src data_src_r;
1815 
1816 	if (left->mem_info)
1817 		data_src_l = *mem_info__data_src(left->mem_info);
1818 	else
1819 		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1820 
1821 	if (right->mem_info)
1822 		data_src_r = *mem_info__data_src(right->mem_info);
1823 	else
1824 		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1825 
1826 	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1827 }
1828 
1829 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1830 				    size_t size, unsigned int width)
1831 {
1832 	char out[64];
1833 
1834 	perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1835 	return repsep_snprintf(bf, size, "%-*s", width, out);
1836 }
1837 
1838 int64_t
1839 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1840 {
1841 	u64 l, r;
1842 	struct map *l_map, *r_map;
1843 	struct dso *l_dso, *r_dso;
1844 	int rc;
1845 
1846 	if (!left->mem_info)  return -1;
1847 	if (!right->mem_info) return 1;
1848 
1849 	/* group event types together */
1850 	if (left->cpumode > right->cpumode) return -1;
1851 	if (left->cpumode < right->cpumode) return 1;
1852 
1853 	l_map = mem_info__daddr(left->mem_info)->ms.map;
1854 	r_map = mem_info__daddr(right->mem_info)->ms.map;
1855 
1856 	/* if both are NULL, jump to sort on al_addr instead */
1857 	if (!l_map && !r_map)
1858 		goto addr;
1859 
1860 	if (!l_map) return -1;
1861 	if (!r_map) return 1;
1862 
1863 	l_dso = map__dso(l_map);
1864 	r_dso = map__dso(r_map);
1865 	rc = dso__cmp_id(l_dso, r_dso);
1866 	if (rc)
1867 		return rc;
1868 	/*
1869 	 * Addresses with no major/minor numbers or build ID are assumed to be
1870 	 * anonymous in userspace.  Sort those on pid then address.
1871 	 *
1872 	 * The kernel and non-zero major/minor mapped areas are
1873 	 * assumed to be unity mapped.  Sort those on address.
1874 	 */
1875 	if (left->cpumode != PERF_RECORD_MISC_KERNEL && (map__flags(l_map) & MAP_SHARED) == 0) {
1876 		const struct dso_id *dso_id = dso__id_const(l_dso);
1877 
1878 		if (!dso_id->mmap2_valid)
1879 			dso_id = dso__id_const(r_dso);
1880 
1881 		if (!build_id__is_defined(&dso_id->build_id) &&
1882 		    (!dso_id->mmap2_valid || (dso_id->maj == 0 && dso_id->min == 0))) {
1883 			/* userspace anonymous */
1884 
1885 			if (thread__pid(left->thread) > thread__pid(right->thread))
1886 				return -1;
1887 			if (thread__pid(left->thread) < thread__pid(right->thread))
1888 				return 1;
1889 		}
1890 	}
1891 
1892 addr:
1893 	/* al_addr does all the right addr - start + offset calculations */
1894 	l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl);
1895 	r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl);
1896 
1897 	if (l > r) return -1;
1898 	if (l < r) return 1;
1899 
1900 	return 0;
1901 }
1902 
1903 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1904 					  size_t size, unsigned int width)
1905 {
1906 
1907 	uint64_t addr = 0;
1908 	struct map_symbol *ms = NULL;
1909 	char level = he->level;
1910 
1911 	if (he->mem_info) {
1912 		struct map *map = mem_info__daddr(he->mem_info)->ms.map;
1913 		struct dso *dso = map ? map__dso(map) : NULL;
1914 		const struct dso_id *dso_id = dso ? dso__id_const(dso) : &dso_id_empty;
1915 
1916 		addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl);
1917 		ms = &mem_info__daddr(he->mem_info)->ms;
1918 
1919 		/* print [s] for shared data mmaps */
1920 		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1921 		     map && !(map__prot(map) & PROT_EXEC) &&
1922 		     (map__flags(map) & MAP_SHARED) &&
1923 		     (!dso_id->mmap2_valid || (dso_id->maj == 0 && dso_id->min == 0)))
1924 			level = 's';
1925 		else if (!map)
1926 			level = 'X';
1927 	}
1928 	return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1929 }
1930 
1931 static struct sort_entry sort_mispredict = {
1932 	.se_header	= "Branch Mispredicted",
1933 	.se_cmp		= sort__mispredict_cmp,
1934 	.se_snprintf	= hist_entry__mispredict_snprintf,
1935 	.se_width_idx	= HISTC_MISPREDICT,
1936 };
1937 
1938 static int64_t
1939 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1940 {
1941 	return left->weight - right->weight;
1942 }
1943 
1944 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1945 				    size_t size, unsigned int width)
1946 {
1947 	return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1948 }
1949 
1950 static struct sort_entry sort_local_weight = {
1951 	.se_header	= "Local Weight",
1952 	.se_cmp		= sort__weight_cmp,
1953 	.se_snprintf	= hist_entry__local_weight_snprintf,
1954 	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1955 };
1956 
1957 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1958 					      size_t size, unsigned int width)
1959 {
1960 	return repsep_snprintf(bf, size, "%-*llu", width,
1961 			       he->weight * he->stat.nr_events);
1962 }
1963 
1964 static struct sort_entry sort_global_weight = {
1965 	.se_header	= "Weight",
1966 	.se_cmp		= sort__weight_cmp,
1967 	.se_snprintf	= hist_entry__global_weight_snprintf,
1968 	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1969 };
1970 
1971 static int64_t
1972 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1973 {
1974 	return left->ins_lat - right->ins_lat;
1975 }
1976 
1977 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1978 					      size_t size, unsigned int width)
1979 {
1980 	return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1981 }
1982 
1983 static struct sort_entry sort_local_ins_lat = {
1984 	.se_header	= "Local INSTR Latency",
1985 	.se_cmp		= sort__ins_lat_cmp,
1986 	.se_snprintf	= hist_entry__local_ins_lat_snprintf,
1987 	.se_width_idx	= HISTC_LOCAL_INS_LAT,
1988 };
1989 
1990 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1991 					       size_t size, unsigned int width)
1992 {
1993 	return repsep_snprintf(bf, size, "%-*u", width,
1994 			       he->ins_lat * he->stat.nr_events);
1995 }
1996 
1997 static struct sort_entry sort_global_ins_lat = {
1998 	.se_header	= "INSTR Latency",
1999 	.se_cmp		= sort__ins_lat_cmp,
2000 	.se_snprintf	= hist_entry__global_ins_lat_snprintf,
2001 	.se_width_idx	= HISTC_GLOBAL_INS_LAT,
2002 };
2003 
2004 static int64_t
2005 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
2006 {
2007 	return left->weight3 - right->weight3;
2008 }
2009 
2010 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
2011 					size_t size, unsigned int width)
2012 {
2013 	return repsep_snprintf(bf, size, "%-*u", width, he->weight3 * he->stat.nr_events);
2014 }
2015 
2016 
2017 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
2018 					size_t size, unsigned int width)
2019 {
2020 	return repsep_snprintf(bf, size, "%-*u", width, he->weight3);
2021 }
2022 
2023 static struct sort_entry sort_local_p_stage_cyc = {
2024 	.se_header      = "Local Pipeline Stage Cycle",
2025 	.se_cmp         = sort__p_stage_cyc_cmp,
2026 	.se_snprintf	= hist_entry__p_stage_cyc_snprintf,
2027 	.se_width_idx	= HISTC_LOCAL_P_STAGE_CYC,
2028 };
2029 
2030 static struct sort_entry sort_global_p_stage_cyc = {
2031 	.se_header      = "Pipeline Stage Cycle",
2032 	.se_cmp         = sort__p_stage_cyc_cmp,
2033 	.se_snprintf    = hist_entry__global_p_stage_cyc_snprintf,
2034 	.se_width_idx   = HISTC_GLOBAL_P_STAGE_CYC,
2035 };
2036 
2037 static struct sort_entry sort_mem_daddr_sym = {
2038 	.se_header	= "Data Symbol",
2039 	.se_cmp		= sort__daddr_cmp,
2040 	.se_snprintf	= hist_entry__daddr_snprintf,
2041 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
2042 };
2043 
2044 static struct sort_entry sort_mem_iaddr_sym = {
2045 	.se_header	= "Code Symbol",
2046 	.se_cmp		= sort__iaddr_cmp,
2047 	.se_snprintf	= hist_entry__iaddr_snprintf,
2048 	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
2049 };
2050 
2051 static struct sort_entry sort_mem_daddr_dso = {
2052 	.se_header	= "Data Object",
2053 	.se_cmp		= sort__dso_daddr_cmp,
2054 	.se_snprintf	= hist_entry__dso_daddr_snprintf,
2055 	.se_width_idx	= HISTC_MEM_DADDR_DSO,
2056 };
2057 
2058 static struct sort_entry sort_mem_locked = {
2059 	.se_header	= "Locked",
2060 	.se_cmp		= sort__locked_cmp,
2061 	.se_snprintf	= hist_entry__locked_snprintf,
2062 	.se_width_idx	= HISTC_MEM_LOCKED,
2063 };
2064 
2065 static struct sort_entry sort_mem_tlb = {
2066 	.se_header	= "TLB access",
2067 	.se_cmp		= sort__tlb_cmp,
2068 	.se_snprintf	= hist_entry__tlb_snprintf,
2069 	.se_width_idx	= HISTC_MEM_TLB,
2070 };
2071 
2072 static struct sort_entry sort_mem_lvl = {
2073 	.se_header	= "Memory access",
2074 	.se_cmp		= sort__lvl_cmp,
2075 	.se_snprintf	= hist_entry__lvl_snprintf,
2076 	.se_width_idx	= HISTC_MEM_LVL,
2077 };
2078 
2079 static struct sort_entry sort_mem_snoop = {
2080 	.se_header	= "Snoop",
2081 	.se_cmp		= sort__snoop_cmp,
2082 	.se_snprintf	= hist_entry__snoop_snprintf,
2083 	.se_width_idx	= HISTC_MEM_SNOOP,
2084 };
2085 
2086 static struct sort_entry sort_mem_dcacheline = {
2087 	.se_header	= "Data Cacheline",
2088 	.se_cmp		= sort__dcacheline_cmp,
2089 	.se_snprintf	= hist_entry__dcacheline_snprintf,
2090 	.se_width_idx	= HISTC_MEM_DCACHELINE,
2091 };
2092 
2093 static int64_t
2094 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
2095 {
2096 	union perf_mem_data_src data_src_l;
2097 	union perf_mem_data_src data_src_r;
2098 
2099 	if (left->mem_info)
2100 		data_src_l = *mem_info__data_src(left->mem_info);
2101 	else
2102 		data_src_l.mem_blk = PERF_MEM_BLK_NA;
2103 
2104 	if (right->mem_info)
2105 		data_src_r = *mem_info__data_src(right->mem_info);
2106 	else
2107 		data_src_r.mem_blk = PERF_MEM_BLK_NA;
2108 
2109 	return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
2110 }
2111 
2112 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
2113 					size_t size, unsigned int width)
2114 {
2115 	char out[16];
2116 
2117 	perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
2118 	return repsep_snprintf(bf, size, "%.*s", width, out);
2119 }
2120 
2121 static struct sort_entry sort_mem_blocked = {
2122 	.se_header	= "Blocked",
2123 	.se_cmp		= sort__blocked_cmp,
2124 	.se_snprintf	= hist_entry__blocked_snprintf,
2125 	.se_width_idx	= HISTC_MEM_BLOCKED,
2126 };
2127 
2128 static int64_t
2129 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
2130 {
2131 	uint64_t l = 0, r = 0;
2132 
2133 	if (left->mem_info)
2134 		l = mem_info__daddr(left->mem_info)->phys_addr;
2135 	if (right->mem_info)
2136 		r = mem_info__daddr(right->mem_info)->phys_addr;
2137 
2138 	return (int64_t)(r - l);
2139 }
2140 
2141 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
2142 					   size_t size, unsigned int width)
2143 {
2144 	uint64_t addr = 0;
2145 	size_t ret = 0;
2146 	size_t len = BITS_PER_LONG / 4;
2147 
2148 	addr = mem_info__daddr(he->mem_info)->phys_addr;
2149 
2150 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
2151 
2152 	ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
2153 
2154 	ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
2155 
2156 	if (ret > width)
2157 		bf[width] = '\0';
2158 
2159 	return width;
2160 }
2161 
2162 static struct sort_entry sort_mem_phys_daddr = {
2163 	.se_header	= "Data Physical Address",
2164 	.se_cmp		= sort__phys_daddr_cmp,
2165 	.se_snprintf	= hist_entry__phys_daddr_snprintf,
2166 	.se_width_idx	= HISTC_MEM_PHYS_DADDR,
2167 };
2168 
2169 static int64_t
2170 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
2171 {
2172 	uint64_t l = 0, r = 0;
2173 
2174 	if (left->mem_info)
2175 		l = mem_info__daddr(left->mem_info)->data_page_size;
2176 	if (right->mem_info)
2177 		r = mem_info__daddr(right->mem_info)->data_page_size;
2178 
2179 	return (int64_t)(r - l);
2180 }
2181 
2182 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
2183 					  size_t size, unsigned int width)
2184 {
2185 	char str[PAGE_SIZE_NAME_LEN];
2186 
2187 	return repsep_snprintf(bf, size, "%-*s", width,
2188 			get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str));
2189 }
2190 
2191 static struct sort_entry sort_mem_data_page_size = {
2192 	.se_header	= "Data Page Size",
2193 	.se_cmp		= sort__data_page_size_cmp,
2194 	.se_snprintf	= hist_entry__data_page_size_snprintf,
2195 	.se_width_idx	= HISTC_MEM_DATA_PAGE_SIZE,
2196 };
2197 
2198 static int64_t
2199 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
2200 {
2201 	uint64_t l = left->code_page_size;
2202 	uint64_t r = right->code_page_size;
2203 
2204 	return (int64_t)(r - l);
2205 }
2206 
2207 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
2208 					  size_t size, unsigned int width)
2209 {
2210 	char str[PAGE_SIZE_NAME_LEN];
2211 
2212 	return repsep_snprintf(bf, size, "%-*s", width,
2213 			       get_page_size_name(he->code_page_size, str));
2214 }
2215 
2216 static struct sort_entry sort_code_page_size = {
2217 	.se_header	= "Code Page Size",
2218 	.se_cmp		= sort__code_page_size_cmp,
2219 	.se_snprintf	= hist_entry__code_page_size_snprintf,
2220 	.se_width_idx	= HISTC_CODE_PAGE_SIZE,
2221 };
2222 
2223 static int64_t
2224 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
2225 {
2226 	if (!left->branch_info || !right->branch_info)
2227 		return cmp_null(left->branch_info, right->branch_info);
2228 
2229 	return left->branch_info->flags.abort !=
2230 		right->branch_info->flags.abort;
2231 }
2232 
2233 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
2234 				    size_t size, unsigned int width)
2235 {
2236 	static const char *out = "N/A";
2237 
2238 	if (he->branch_info) {
2239 		if (he->branch_info->flags.abort)
2240 			out = "A";
2241 		else
2242 			out = ".";
2243 	}
2244 
2245 	return repsep_snprintf(bf, size, "%-*s", width, out);
2246 }
2247 
2248 static struct sort_entry sort_abort = {
2249 	.se_header	= "Transaction abort",
2250 	.se_cmp		= sort__abort_cmp,
2251 	.se_snprintf	= hist_entry__abort_snprintf,
2252 	.se_width_idx	= HISTC_ABORT,
2253 };
2254 
2255 static int64_t
2256 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
2257 {
2258 	if (!left->branch_info || !right->branch_info)
2259 		return cmp_null(left->branch_info, right->branch_info);
2260 
2261 	return left->branch_info->flags.in_tx !=
2262 		right->branch_info->flags.in_tx;
2263 }
2264 
2265 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
2266 				    size_t size, unsigned int width)
2267 {
2268 	static const char *out = "N/A";
2269 
2270 	if (he->branch_info) {
2271 		if (he->branch_info->flags.in_tx)
2272 			out = "T";
2273 		else
2274 			out = ".";
2275 	}
2276 
2277 	return repsep_snprintf(bf, size, "%-*s", width, out);
2278 }
2279 
2280 static struct sort_entry sort_in_tx = {
2281 	.se_header	= "Branch in transaction",
2282 	.se_cmp		= sort__in_tx_cmp,
2283 	.se_snprintf	= hist_entry__in_tx_snprintf,
2284 	.se_width_idx	= HISTC_IN_TX,
2285 };
2286 
2287 static int64_t
2288 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
2289 {
2290 	return left->transaction - right->transaction;
2291 }
2292 
2293 static inline char *add_str(char *p, const char *str)
2294 {
2295 	strcpy(p, str);
2296 	return p + strlen(str);
2297 }
2298 
2299 static struct txbit {
2300 	unsigned flag;
2301 	const char *name;
2302 	int skip_for_len;
2303 } txbits[] = {
2304 	{ PERF_TXN_ELISION,        "EL ",        0 },
2305 	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
2306 	{ PERF_TXN_SYNC,           "SYNC ",      1 },
2307 	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
2308 	{ PERF_TXN_RETRY,          "RETRY ",     0 },
2309 	{ PERF_TXN_CONFLICT,       "CON ",       0 },
2310 	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
2311 	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
2312 	{ 0, NULL, 0 }
2313 };
2314 
2315 int hist_entry__transaction_len(void)
2316 {
2317 	int i;
2318 	int len = 0;
2319 
2320 	for (i = 0; txbits[i].name; i++) {
2321 		if (!txbits[i].skip_for_len)
2322 			len += strlen(txbits[i].name);
2323 	}
2324 	len += 4; /* :XX<space> */
2325 	return len;
2326 }
2327 
2328 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
2329 					    size_t size, unsigned int width)
2330 {
2331 	u64 t = he->transaction;
2332 	char buf[128];
2333 	char *p = buf;
2334 	int i;
2335 
2336 	buf[0] = 0;
2337 	for (i = 0; txbits[i].name; i++)
2338 		if (txbits[i].flag & t)
2339 			p = add_str(p, txbits[i].name);
2340 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
2341 		p = add_str(p, "NEITHER ");
2342 	if (t & PERF_TXN_ABORT_MASK) {
2343 		sprintf(p, ":%" PRIx64,
2344 			(t & PERF_TXN_ABORT_MASK) >>
2345 			PERF_TXN_ABORT_SHIFT);
2346 		p += strlen(p);
2347 	}
2348 
2349 	return repsep_snprintf(bf, size, "%-*s", width, buf);
2350 }
2351 
2352 static struct sort_entry sort_transaction = {
2353 	.se_header	= "Transaction                ",
2354 	.se_cmp		= sort__transaction_cmp,
2355 	.se_snprintf	= hist_entry__transaction_snprintf,
2356 	.se_width_idx	= HISTC_TRANSACTION,
2357 };
2358 
2359 /* --sort symbol_size */
2360 
2361 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2362 {
2363 	int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2364 	int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2365 
2366 	return size_l < size_r ? -1 :
2367 		size_l == size_r ? 0 : 1;
2368 }
2369 
2370 static int64_t
2371 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2372 {
2373 	return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2374 }
2375 
2376 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2377 					  size_t bf_size, unsigned int width)
2378 {
2379 	if (sym)
2380 		return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2381 
2382 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2383 }
2384 
2385 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2386 					 size_t size, unsigned int width)
2387 {
2388 	return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2389 }
2390 
2391 static struct sort_entry sort_sym_size = {
2392 	.se_header	= "Symbol size",
2393 	.se_cmp		= sort__sym_size_cmp,
2394 	.se_snprintf	= hist_entry__sym_size_snprintf,
2395 	.se_width_idx	= HISTC_SYM_SIZE,
2396 };
2397 
2398 /* --sort dso_size */
2399 
2400 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2401 {
2402 	int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2403 	int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2404 
2405 	return size_l < size_r ? -1 :
2406 		size_l == size_r ? 0 : 1;
2407 }
2408 
2409 static int64_t
2410 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2411 {
2412 	return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2413 }
2414 
2415 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2416 					  size_t bf_size, unsigned int width)
2417 {
2418 	if (map && map__dso(map))
2419 		return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2420 
2421 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2422 }
2423 
2424 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2425 					 size_t size, unsigned int width)
2426 {
2427 	return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2428 }
2429 
2430 static struct sort_entry sort_dso_size = {
2431 	.se_header	= "DSO size",
2432 	.se_cmp		= sort__dso_size_cmp,
2433 	.se_snprintf	= hist_entry__dso_size_snprintf,
2434 	.se_width_idx	= HISTC_DSO_SIZE,
2435 };
2436 
2437 /* --sort addr */
2438 
2439 static int64_t
2440 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2441 {
2442 	u64 left_ip = left->ip;
2443 	u64 right_ip = right->ip;
2444 	struct map *left_map = left->ms.map;
2445 	struct map *right_map = right->ms.map;
2446 
2447 	if (left_map)
2448 		left_ip = map__unmap_ip(left_map, left_ip);
2449 	if (right_map)
2450 		right_ip = map__unmap_ip(right_map, right_ip);
2451 
2452 	return _sort__addr_cmp(left_ip, right_ip);
2453 }
2454 
2455 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2456 				     size_t size, unsigned int width)
2457 {
2458 	u64 ip = he->ip;
2459 	struct map *map = he->ms.map;
2460 
2461 	if (map)
2462 		ip = map__unmap_ip(map, ip);
2463 
2464 	return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2465 }
2466 
2467 static struct sort_entry sort_addr = {
2468 	.se_header	= "Address",
2469 	.se_cmp		= sort__addr_cmp,
2470 	.se_snprintf	= hist_entry__addr_snprintf,
2471 	.se_width_idx	= HISTC_ADDR,
2472 };
2473 
2474 /* --sort type */
2475 
2476 struct annotated_data_type unknown_type = {
2477 	.self = {
2478 		.type_name = (char *)"(unknown)",
2479 		.children = LIST_HEAD_INIT(unknown_type.self.children),
2480 	},
2481 };
2482 
2483 static int64_t
2484 sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
2485 {
2486 	return sort__addr_cmp(left, right);
2487 }
2488 
2489 static void sort__type_init(struct hist_entry *he)
2490 {
2491 	if (he->mem_type)
2492 		return;
2493 
2494 	he->mem_type = hist_entry__get_data_type(he);
2495 	if (he->mem_type == NULL) {
2496 		he->mem_type = &unknown_type;
2497 		he->mem_type_off = 0;
2498 	}
2499 }
2500 
2501 static int64_t
2502 sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
2503 {
2504 	struct annotated_data_type *left_type = left->mem_type;
2505 	struct annotated_data_type *right_type = right->mem_type;
2506 
2507 	if (!left_type) {
2508 		sort__type_init(left);
2509 		left_type = left->mem_type;
2510 	}
2511 
2512 	if (!right_type) {
2513 		sort__type_init(right);
2514 		right_type = right->mem_type;
2515 	}
2516 
2517 	return strcmp(left_type->self.type_name, right_type->self.type_name);
2518 }
2519 
2520 static int64_t
2521 sort__type_sort(struct hist_entry *left, struct hist_entry *right)
2522 {
2523 	return sort__type_collapse(left, right);
2524 }
2525 
2526 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
2527 				     size_t size, unsigned int width)
2528 {
2529 	return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
2530 }
2531 
2532 struct sort_entry sort_type = {
2533 	.se_header	= "Data Type",
2534 	.se_cmp		= sort__type_cmp,
2535 	.se_collapse	= sort__type_collapse,
2536 	.se_sort	= sort__type_sort,
2537 	.se_init	= sort__type_init,
2538 	.se_snprintf	= hist_entry__type_snprintf,
2539 	.se_width_idx	= HISTC_TYPE,
2540 };
2541 
2542 /* --sort typeoff */
2543 
2544 static int64_t
2545 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
2546 {
2547 	struct annotated_data_type *left_type = left->mem_type;
2548 	struct annotated_data_type *right_type = right->mem_type;
2549 	int64_t ret;
2550 
2551 	if (!left_type) {
2552 		sort__type_init(left);
2553 		left_type = left->mem_type;
2554 	}
2555 
2556 	if (!right_type) {
2557 		sort__type_init(right);
2558 		right_type = right->mem_type;
2559 	}
2560 
2561 	ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2562 	if (ret)
2563 		return ret;
2564 	return left->mem_type_off - right->mem_type_off;
2565 }
2566 
2567 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
2568 				     size_t size, unsigned int width __maybe_unused)
2569 {
2570 	struct annotated_data_type *he_type = he->mem_type;
2571 	char buf[4096];
2572 
2573 	if (he_type == &unknown_type || he_type == &stackop_type ||
2574 	    he_type == &canary_type)
2575 		return repsep_snprintf(bf, size, "%s", he_type->self.type_name);
2576 
2577 	if (!annotated_data_type__get_member_name(he_type, buf, sizeof(buf),
2578 						  he->mem_type_off))
2579 		scnprintf(buf, sizeof(buf), "no field");
2580 
2581 	return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name,
2582 			       he->mem_type_off, buf);
2583 }
2584 
2585 static struct sort_entry sort_type_offset = {
2586 	.se_header	= "Data Type Offset",
2587 	.se_cmp		= sort__type_cmp,
2588 	.se_collapse	= sort__typeoff_sort,
2589 	.se_sort	= sort__typeoff_sort,
2590 	.se_init	= sort__type_init,
2591 	.se_snprintf	= hist_entry__typeoff_snprintf,
2592 	.se_width_idx	= HISTC_TYPE_OFFSET,
2593 };
2594 
2595 /* --sort typecln */
2596 
2597 static int
2598 hist_entry__cln_size(struct hist_entry *he)
2599 {
2600 	int ret = 0;
2601 
2602 	if (he && he->hists) {
2603 		struct evsel *evsel = hists_to_evsel(he->hists);
2604 
2605 		if (evsel) {
2606 			struct perf_session *session = evsel__session(evsel);
2607 
2608 			ret = session->header.env.cln_size;
2609 		}
2610 	}
2611 
2612 	if (ret < 1)
2613 		ret = DEFAULT_CACHELINE_SIZE; // avoid div/0 later
2614 
2615 	return ret;
2616 }
2617 
2618 static int64_t
2619 sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
2620 {
2621 	struct annotated_data_type *left_type = left->mem_type;
2622 	struct annotated_data_type *right_type = right->mem_type;
2623 	int64_t left_cln, right_cln;
2624 	int64_t cln_size_left = hist_entry__cln_size(left);
2625 	int64_t cln_size_right = hist_entry__cln_size(right);
2626 	int64_t ret;
2627 
2628 	if (!left_type) {
2629 		sort__type_init(left);
2630 		left_type = left->mem_type;
2631 	}
2632 
2633 	if (!right_type) {
2634 		sort__type_init(right);
2635 		right_type = right->mem_type;
2636 	}
2637 
2638 	ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2639 	if (ret)
2640 		return ret;
2641 
2642 	left_cln = left->mem_type_off / cln_size_left;
2643 	right_cln = right->mem_type_off / cln_size_right;
2644 	return left_cln - right_cln;
2645 }
2646 
2647 static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf,
2648 				     size_t size, unsigned int width __maybe_unused)
2649 {
2650 	struct annotated_data_type *he_type = he->mem_type;
2651 	int cln_size = hist_entry__cln_size(he);
2652 
2653 	return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
2654 			       he->mem_type_off / cln_size);
2655 }
2656 
2657 static struct sort_entry sort_type_cacheline = {
2658 	.se_header	= "Data Type Cacheline",
2659 	.se_cmp		= sort__type_cmp,
2660 	.se_collapse	= sort__typecln_sort,
2661 	.se_sort	= sort__typecln_sort,
2662 	.se_init	= sort__type_init,
2663 	.se_snprintf	= hist_entry__typecln_snprintf,
2664 	.se_width_idx	= HISTC_TYPE_CACHELINE,
2665 };
2666 
2667 
2668 struct sort_dimension {
2669 	const char		*name;
2670 	struct sort_entry	*entry;
2671 	int			taken;
2672 };
2673 
2674 static int arch_support_sort_key(const char *sort_key, struct perf_env *env)
2675 {
2676 	const char *arch = perf_env__arch(env);
2677 
2678 	if (!strcmp("x86", arch) || !strcmp("powerpc", arch)) {
2679 		if (!strcmp(sort_key, "p_stage_cyc"))
2680 			return 1;
2681 		if (!strcmp(sort_key, "local_p_stage_cyc"))
2682 			return 1;
2683 	}
2684 	return 0;
2685 }
2686 
2687 static const char *arch_perf_header_entry(const char *se_header, struct perf_env *env)
2688 {
2689 	const char *arch = perf_env__arch(env);
2690 
2691 	if (!strcmp("x86", arch)) {
2692 		if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
2693 			return "Local Retire Latency";
2694 		else if (!strcmp(se_header, "Pipeline Stage Cycle"))
2695 			return "Retire Latency";
2696 	} else if (!strcmp("powerpc", arch)) {
2697 		if (!strcmp(se_header, "Local INSTR Latency"))
2698 			return "Finish Cyc";
2699 		else if (!strcmp(se_header, "INSTR Latency"))
2700 			return "Global Finish_cyc";
2701 		else if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
2702 			return "Dispatch Cyc";
2703 		else if (!strcmp(se_header, "Pipeline Stage Cycle"))
2704 			return "Global Dispatch_cyc";
2705 	}
2706 	return se_header;
2707 }
2708 
2709 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd, struct perf_env *env)
2710 {
2711 	sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header, env);
2712 }
2713 
2714 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2715 
2716 static struct sort_dimension common_sort_dimensions[] = {
2717 	DIM(SORT_PID, "pid", sort_thread),
2718 	DIM(SORT_TGID, "tgid", sort_tgid),
2719 	DIM(SORT_COMM, "comm", sort_comm),
2720 	DIM(SORT_COMM_NODIGIT, "comm_nodigit", sort_comm_nodigit),
2721 	DIM(SORT_DSO, "dso", sort_dso),
2722 	DIM(SORT_SYM, "symbol", sort_sym),
2723 	DIM(SORT_PARENT, "parent", sort_parent),
2724 	DIM(SORT_CPU, "cpu", sort_cpu),
2725 	DIM(SORT_SOCKET, "socket", sort_socket),
2726 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
2727 	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2728 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2729 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2730 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2731 #ifdef HAVE_LIBTRACEEVENT
2732 	DIM(SORT_TRACE, "trace", sort_trace),
2733 #endif
2734 	DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2735 	DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2736 	DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2737 	DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2738 	DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2739 	DIM(SORT_TIME, "time", sort_time),
2740 	DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2741 	DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2742 	DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2743 	DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2744 	DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2745 	DIM(SORT_ADDR, "addr", sort_addr),
2746 	DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2747 	DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2748 	DIM(SORT_SIMD, "simd", sort_simd),
2749 	DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
2750 	DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
2751 	DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
2752 	DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline),
2753 	DIM(SORT_PARALLELISM, "parallelism", sort_parallelism),
2754 };
2755 
2756 #undef DIM
2757 
2758 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2759 
2760 static struct sort_dimension bstack_sort_dimensions[] = {
2761 	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2762 	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2763 	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2764 	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2765 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2766 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2767 	DIM(SORT_ABORT, "abort", sort_abort),
2768 	DIM(SORT_CYCLES, "cycles", sort_cycles),
2769 	DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2770 	DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2771 	DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2772 	DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2773 	DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2774 	DIM(SORT_CALLCHAIN_BRANCH_PREDICTED,
2775 		"callchain_branch_predicted",
2776 		sort_callchain_branch_predicted),
2777 	DIM(SORT_CALLCHAIN_BRANCH_ABORT,
2778 		"callchain_branch_abort",
2779 		sort_callchain_branch_abort),
2780 	DIM(SORT_CALLCHAIN_BRANCH_CYCLES,
2781 		"callchain_branch_cycles",
2782 		sort_callchain_branch_cycles)
2783 };
2784 
2785 #undef DIM
2786 
2787 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2788 
2789 static struct sort_dimension memory_sort_dimensions[] = {
2790 	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2791 	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2792 	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2793 	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2794 	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2795 	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2796 	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2797 	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2798 	DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2799 	DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2800 	DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2801 };
2802 
2803 #undef DIM
2804 
2805 struct hpp_dimension {
2806 	const char		*name;
2807 	struct perf_hpp_fmt	*fmt;
2808 	int			taken;
2809 	int			was_taken;
2810 	int			mem_mode;
2811 };
2812 
2813 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2814 #define DIM_MEM(d, n) { .name = n, .fmt = &perf_hpp__format[d], .mem_mode = 1, }
2815 
2816 static struct hpp_dimension hpp_sort_dimensions[] = {
2817 	DIM(PERF_HPP__OVERHEAD, "overhead"),
2818 	DIM(PERF_HPP__LATENCY, "latency"),
2819 	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2820 	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2821 	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2822 	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2823 	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2824 	DIM(PERF_HPP__LATENCY_ACC, "latency_children"),
2825 	DIM(PERF_HPP__SAMPLES, "sample"),
2826 	DIM(PERF_HPP__PERIOD, "period"),
2827 	DIM(PERF_HPP__WEIGHT1, "weight1"),
2828 	DIM(PERF_HPP__WEIGHT2, "weight2"),
2829 	DIM(PERF_HPP__WEIGHT3, "weight3"),
2830 	/* aliases for weight_struct */
2831 	DIM(PERF_HPP__WEIGHT2, "ins_lat"),
2832 	DIM(PERF_HPP__WEIGHT3, "retire_lat"),
2833 	DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
2834 	/* used for output only when SORT_MODE__MEM */
2835 	DIM_MEM(PERF_HPP__MEM_STAT_OP, "op"),
2836 	DIM_MEM(PERF_HPP__MEM_STAT_CACHE, "cache"),
2837 	DIM_MEM(PERF_HPP__MEM_STAT_MEMORY, "memory"),
2838 	DIM_MEM(PERF_HPP__MEM_STAT_SNOOP, "snoop"),
2839 	DIM_MEM(PERF_HPP__MEM_STAT_DTLB, "dtlb"),
2840 };
2841 
2842 #undef DIM_MEM
2843 #undef DIM
2844 
2845 struct hpp_sort_entry {
2846 	struct perf_hpp_fmt hpp;
2847 	struct sort_entry *se;
2848 };
2849 
2850 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2851 {
2852 	struct hpp_sort_entry *hse;
2853 
2854 	if (!perf_hpp__is_sort_entry(fmt))
2855 		return;
2856 
2857 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2858 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2859 }
2860 
2861 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2862 			      struct hists *hists, int line,
2863 			      int *span __maybe_unused)
2864 {
2865 	struct hpp_sort_entry *hse;
2866 	size_t len = fmt->user_len;
2867 	const char *hdr = "";
2868 
2869 	if (line == hists->hpp_list->nr_header_lines - 1)
2870 		hdr = fmt->name;
2871 
2872 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2873 
2874 	if (!len)
2875 		len = hists__col_len(hists, hse->se->se_width_idx);
2876 
2877 	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, hdr);
2878 }
2879 
2880 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2881 			     struct perf_hpp *hpp __maybe_unused,
2882 			     struct hists *hists)
2883 {
2884 	struct hpp_sort_entry *hse;
2885 	size_t len = fmt->user_len;
2886 
2887 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2888 
2889 	if (!len)
2890 		len = hists__col_len(hists, hse->se->se_width_idx);
2891 
2892 	return len;
2893 }
2894 
2895 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2896 			     struct hist_entry *he)
2897 {
2898 	struct hpp_sort_entry *hse;
2899 	size_t len = fmt->user_len;
2900 
2901 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2902 
2903 	if (!len)
2904 		len = hists__col_len(he->hists, hse->se->se_width_idx);
2905 
2906 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2907 }
2908 
2909 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2910 			       struct hist_entry *a, struct hist_entry *b)
2911 {
2912 	struct hpp_sort_entry *hse;
2913 
2914 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2915 	return hse->se->se_cmp(a, b);
2916 }
2917 
2918 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2919 				    struct hist_entry *a, struct hist_entry *b)
2920 {
2921 	struct hpp_sort_entry *hse;
2922 	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2923 
2924 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2925 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2926 	return collapse_fn(a, b);
2927 }
2928 
2929 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2930 				struct hist_entry *a, struct hist_entry *b)
2931 {
2932 	struct hpp_sort_entry *hse;
2933 	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2934 
2935 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2936 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2937 	return sort_fn(a, b);
2938 }
2939 
2940 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2941 {
2942 	return format->header == __sort__hpp_header;
2943 }
2944 
2945 #define MK_SORT_ENTRY_CHK(key)					\
2946 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt)	\
2947 {								\
2948 	struct hpp_sort_entry *hse;				\
2949 								\
2950 	if (!perf_hpp__is_sort_entry(fmt))			\
2951 		return false;					\
2952 								\
2953 	hse = container_of(fmt, struct hpp_sort_entry, hpp);	\
2954 	return hse->se == &sort_ ## key ;			\
2955 }
2956 
2957 #ifdef HAVE_LIBTRACEEVENT
2958 MK_SORT_ENTRY_CHK(trace)
2959 #else
2960 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2961 {
2962 	return false;
2963 }
2964 #endif
2965 MK_SORT_ENTRY_CHK(srcline)
2966 MK_SORT_ENTRY_CHK(srcfile)
2967 MK_SORT_ENTRY_CHK(thread)
2968 MK_SORT_ENTRY_CHK(comm)
2969 MK_SORT_ENTRY_CHK(dso)
2970 MK_SORT_ENTRY_CHK(sym)
2971 MK_SORT_ENTRY_CHK(parallelism)
2972 
2973 
2974 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2975 {
2976 	struct hpp_sort_entry *hse_a;
2977 	struct hpp_sort_entry *hse_b;
2978 
2979 	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2980 		return false;
2981 
2982 	hse_a = container_of(a, struct hpp_sort_entry, hpp);
2983 	hse_b = container_of(b, struct hpp_sort_entry, hpp);
2984 
2985 	return hse_a->se == hse_b->se;
2986 }
2987 
2988 static void hse_free(struct perf_hpp_fmt *fmt)
2989 {
2990 	struct hpp_sort_entry *hse;
2991 
2992 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2993 	free(hse);
2994 }
2995 
2996 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2997 {
2998 	struct hpp_sort_entry *hse;
2999 
3000 	if (!perf_hpp__is_sort_entry(fmt))
3001 		return;
3002 
3003 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
3004 
3005 	if (hse->se->se_init)
3006 		hse->se->se_init(he);
3007 }
3008 
3009 static struct hpp_sort_entry *
3010 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
3011 {
3012 	struct hpp_sort_entry *hse;
3013 
3014 	hse = malloc(sizeof(*hse));
3015 	if (hse == NULL) {
3016 		pr_err("Memory allocation failed\n");
3017 		return NULL;
3018 	}
3019 
3020 	hse->se = sd->entry;
3021 	hse->hpp.name = sd->entry->se_header;
3022 	hse->hpp.header = __sort__hpp_header;
3023 	hse->hpp.width = __sort__hpp_width;
3024 	hse->hpp.entry = __sort__hpp_entry;
3025 	hse->hpp.color = NULL;
3026 
3027 	hse->hpp.cmp = __sort__hpp_cmp;
3028 	hse->hpp.collapse = __sort__hpp_collapse;
3029 	hse->hpp.sort = __sort__hpp_sort;
3030 	hse->hpp.equal = __sort__hpp_equal;
3031 	hse->hpp.free = hse_free;
3032 	hse->hpp.init = hse_init;
3033 
3034 	INIT_LIST_HEAD(&hse->hpp.list);
3035 	INIT_LIST_HEAD(&hse->hpp.sort_list);
3036 	hse->hpp.elide = false;
3037 	hse->hpp.len = 0;
3038 	hse->hpp.user_len = 0;
3039 	hse->hpp.level = level;
3040 
3041 	return hse;
3042 }
3043 
3044 static void hpp_free(struct perf_hpp_fmt *fmt)
3045 {
3046 	free(fmt);
3047 }
3048 
3049 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
3050 						       int level)
3051 {
3052 	struct perf_hpp_fmt *fmt;
3053 
3054 	fmt = memdup(hd->fmt, sizeof(*fmt));
3055 	if (fmt) {
3056 		INIT_LIST_HEAD(&fmt->list);
3057 		INIT_LIST_HEAD(&fmt->sort_list);
3058 		fmt->free = hpp_free;
3059 		fmt->level = level;
3060 	}
3061 
3062 	return fmt;
3063 }
3064 
3065 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
3066 {
3067 	struct perf_hpp_fmt *fmt;
3068 	struct hpp_sort_entry *hse;
3069 	int ret = -1;
3070 	int r;
3071 
3072 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
3073 		if (!perf_hpp__is_sort_entry(fmt))
3074 			continue;
3075 
3076 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3077 		if (hse->se->se_filter == NULL)
3078 			continue;
3079 
3080 		/*
3081 		 * hist entry is filtered if any of sort key in the hpp list
3082 		 * is applied.  But it should skip non-matched filter types.
3083 		 */
3084 		r = hse->se->se_filter(he, type, arg);
3085 		if (r >= 0) {
3086 			if (ret < 0)
3087 				ret = 0;
3088 			ret |= r;
3089 		}
3090 	}
3091 
3092 	return ret;
3093 }
3094 
3095 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
3096 					  struct perf_hpp_list *list,
3097 					  int level)
3098 {
3099 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
3100 
3101 	if (hse == NULL)
3102 		return -1;
3103 
3104 	perf_hpp_list__register_sort_field(list, &hse->hpp);
3105 	return 0;
3106 }
3107 
3108 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
3109 					    struct perf_hpp_list *list,
3110 					    int level)
3111 {
3112 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
3113 
3114 	if (hse == NULL)
3115 		return -1;
3116 
3117 	perf_hpp_list__column_register(list, &hse->hpp);
3118 	return 0;
3119 }
3120 
3121 #ifndef HAVE_LIBTRACEEVENT
3122 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
3123 {
3124 	return false;
3125 }
3126 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
3127 				     struct hists *hists __maybe_unused)
3128 {
3129 	return false;
3130 }
3131 #else
3132 struct hpp_dynamic_entry {
3133 	struct perf_hpp_fmt hpp;
3134 	struct evsel *evsel;
3135 	struct tep_format_field *field;
3136 	unsigned dynamic_len;
3137 	bool raw_trace;
3138 };
3139 
3140 static int hde_width(struct hpp_dynamic_entry *hde)
3141 {
3142 	if (!hde->hpp.len) {
3143 		int len = hde->dynamic_len;
3144 		int namelen = strlen(hde->field->name);
3145 		int fieldlen = hde->field->size;
3146 
3147 		if (namelen > len)
3148 			len = namelen;
3149 
3150 		if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
3151 			/* length for print hex numbers */
3152 			fieldlen = hde->field->size * 2 + 2;
3153 		}
3154 		if (fieldlen > len)
3155 			len = fieldlen;
3156 
3157 		hde->hpp.len = len;
3158 	}
3159 	return hde->hpp.len;
3160 }
3161 
3162 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
3163 			       struct hist_entry *he)
3164 {
3165 	char *str, *pos;
3166 	struct tep_format_field *field = hde->field;
3167 	size_t namelen;
3168 	bool last = false;
3169 
3170 	if (hde->raw_trace)
3171 		return;
3172 
3173 	/* parse pretty print result and update max length */
3174 	if (!he->trace_output)
3175 		he->trace_output = get_trace_output(he);
3176 
3177 	namelen = strlen(field->name);
3178 	str = he->trace_output;
3179 
3180 	while (str) {
3181 		pos = strchr(str, ' ');
3182 		if (pos == NULL) {
3183 			last = true;
3184 			pos = str + strlen(str);
3185 		}
3186 
3187 		if (!strncmp(str, field->name, namelen)) {
3188 			size_t len;
3189 
3190 			str += namelen + 1;
3191 			len = pos - str;
3192 
3193 			if (len > hde->dynamic_len)
3194 				hde->dynamic_len = len;
3195 			break;
3196 		}
3197 
3198 		if (last)
3199 			str = NULL;
3200 		else
3201 			str = pos + 1;
3202 	}
3203 }
3204 
3205 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
3206 			      struct hists *hists __maybe_unused,
3207 			      int line __maybe_unused,
3208 			      int *span __maybe_unused)
3209 {
3210 	struct hpp_dynamic_entry *hde;
3211 	size_t len = fmt->user_len;
3212 
3213 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3214 
3215 	if (!len)
3216 		len = hde_width(hde);
3217 
3218 	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
3219 }
3220 
3221 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
3222 			     struct perf_hpp *hpp __maybe_unused,
3223 			     struct hists *hists __maybe_unused)
3224 {
3225 	struct hpp_dynamic_entry *hde;
3226 	size_t len = fmt->user_len;
3227 
3228 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3229 
3230 	if (!len)
3231 		len = hde_width(hde);
3232 
3233 	return len;
3234 }
3235 
3236 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
3237 {
3238 	struct hpp_dynamic_entry *hde;
3239 
3240 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3241 
3242 	return hists_to_evsel(hists) == hde->evsel;
3243 }
3244 
3245 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
3246 			     struct hist_entry *he)
3247 {
3248 	struct hpp_dynamic_entry *hde;
3249 	size_t len = fmt->user_len;
3250 	char *str, *pos;
3251 	struct tep_format_field *field;
3252 	size_t namelen;
3253 	bool last = false;
3254 	int ret;
3255 
3256 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3257 
3258 	if (!len)
3259 		len = hde_width(hde);
3260 
3261 	if (hde->raw_trace)
3262 		goto raw_field;
3263 
3264 	if (!he->trace_output)
3265 		he->trace_output = get_trace_output(he);
3266 
3267 	field = hde->field;
3268 	namelen = strlen(field->name);
3269 	str = he->trace_output;
3270 
3271 	while (str) {
3272 		pos = strchr(str, ' ');
3273 		if (pos == NULL) {
3274 			last = true;
3275 			pos = str + strlen(str);
3276 		}
3277 
3278 		if (!strncmp(str, field->name, namelen)) {
3279 			str += namelen + 1;
3280 			str = strndup(str, pos - str);
3281 
3282 			if (str == NULL)
3283 				return scnprintf(hpp->buf, hpp->size,
3284 						 "%*.*s", len, len, "ERROR");
3285 			break;
3286 		}
3287 
3288 		if (last)
3289 			str = NULL;
3290 		else
3291 			str = pos + 1;
3292 	}
3293 
3294 	if (str == NULL) {
3295 		struct trace_seq seq;
3296 raw_field:
3297 		trace_seq_init(&seq);
3298 		tep_print_field(&seq, he->raw_data, hde->field);
3299 		str = seq.buffer;
3300 	}
3301 
3302 	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
3303 	free(str);
3304 	return ret;
3305 }
3306 
3307 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
3308 			       struct hist_entry *a, struct hist_entry *b)
3309 {
3310 	struct hpp_dynamic_entry *hde;
3311 	struct tep_format_field *field;
3312 	unsigned offset, size;
3313 
3314 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3315 
3316 	field = hde->field;
3317 	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
3318 		unsigned long long dyn;
3319 
3320 		tep_read_number_field(field, a->raw_data, &dyn);
3321 		offset = dyn & 0xffff;
3322 		size = (dyn >> 16) & 0xffff;
3323 		if (tep_field_is_relative(field->flags))
3324 			offset += field->offset + field->size;
3325 		/* record max width for output */
3326 		if (size > hde->dynamic_len)
3327 			hde->dynamic_len = size;
3328 	} else {
3329 		offset = field->offset;
3330 		size = field->size;
3331 	}
3332 
3333 	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
3334 }
3335 
3336 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
3337 {
3338 	return fmt->cmp == __sort__hde_cmp;
3339 }
3340 
3341 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
3342 {
3343 	struct hpp_dynamic_entry *hde_a;
3344 	struct hpp_dynamic_entry *hde_b;
3345 
3346 	if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
3347 		return false;
3348 
3349 	hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
3350 	hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
3351 
3352 	return hde_a->field == hde_b->field;
3353 }
3354 
3355 static void hde_free(struct perf_hpp_fmt *fmt)
3356 {
3357 	struct hpp_dynamic_entry *hde;
3358 
3359 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3360 	free(hde);
3361 }
3362 
3363 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
3364 {
3365 	struct hpp_dynamic_entry *hde;
3366 
3367 	if (!perf_hpp__is_dynamic_entry(fmt))
3368 		return;
3369 
3370 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3371 	update_dynamic_len(hde, he);
3372 }
3373 
3374 static struct hpp_dynamic_entry *
3375 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
3376 		      int level)
3377 {
3378 	struct hpp_dynamic_entry *hde;
3379 
3380 	hde = malloc(sizeof(*hde));
3381 	if (hde == NULL) {
3382 		pr_debug("Memory allocation failed\n");
3383 		return NULL;
3384 	}
3385 
3386 	hde->evsel = evsel;
3387 	hde->field = field;
3388 	hde->dynamic_len = 0;
3389 
3390 	hde->hpp.name = field->name;
3391 	hde->hpp.header = __sort__hde_header;
3392 	hde->hpp.width  = __sort__hde_width;
3393 	hde->hpp.entry  = __sort__hde_entry;
3394 	hde->hpp.color  = NULL;
3395 
3396 	hde->hpp.init = __sort__hde_init;
3397 	hde->hpp.cmp = __sort__hde_cmp;
3398 	hde->hpp.collapse = __sort__hde_cmp;
3399 	hde->hpp.sort = __sort__hde_cmp;
3400 	hde->hpp.equal = __sort__hde_equal;
3401 	hde->hpp.free = hde_free;
3402 
3403 	INIT_LIST_HEAD(&hde->hpp.list);
3404 	INIT_LIST_HEAD(&hde->hpp.sort_list);
3405 	hde->hpp.elide = false;
3406 	hde->hpp.len = 0;
3407 	hde->hpp.user_len = 0;
3408 	hde->hpp.level = level;
3409 
3410 	return hde;
3411 }
3412 #endif /* HAVE_LIBTRACEEVENT */
3413 
3414 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
3415 {
3416 	struct perf_hpp_fmt *new_fmt = NULL;
3417 
3418 	if (perf_hpp__is_sort_entry(fmt)) {
3419 		struct hpp_sort_entry *hse, *new_hse;
3420 
3421 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3422 		new_hse = memdup(hse, sizeof(*hse));
3423 		if (new_hse)
3424 			new_fmt = &new_hse->hpp;
3425 #ifdef HAVE_LIBTRACEEVENT
3426 	} else if (perf_hpp__is_dynamic_entry(fmt)) {
3427 		struct hpp_dynamic_entry *hde, *new_hde;
3428 
3429 		hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3430 		new_hde = memdup(hde, sizeof(*hde));
3431 		if (new_hde)
3432 			new_fmt = &new_hde->hpp;
3433 #endif
3434 	} else {
3435 		new_fmt = memdup(fmt, sizeof(*fmt));
3436 	}
3437 
3438 	INIT_LIST_HEAD(&new_fmt->list);
3439 	INIT_LIST_HEAD(&new_fmt->sort_list);
3440 
3441 	return new_fmt;
3442 }
3443 
3444 static int parse_field_name(char *str, char **event, char **field, char **opt)
3445 {
3446 	char *event_name, *field_name, *opt_name;
3447 
3448 	event_name = str;
3449 	field_name = strchr(str, '.');
3450 
3451 	if (field_name) {
3452 		*field_name++ = '\0';
3453 	} else {
3454 		event_name = NULL;
3455 		field_name = str;
3456 	}
3457 
3458 	opt_name = strchr(field_name, '/');
3459 	if (opt_name)
3460 		*opt_name++ = '\0';
3461 
3462 	*event = event_name;
3463 	*field = field_name;
3464 	*opt   = opt_name;
3465 
3466 	return 0;
3467 }
3468 
3469 /* find match evsel using a given event name.  The event name can be:
3470  *   1. '%' + event index (e.g. '%1' for first event)
3471  *   2. full event name (e.g. sched:sched_switch)
3472  *   3. partial event name (should not contain ':')
3473  */
3474 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
3475 {
3476 	struct evsel *evsel = NULL;
3477 	struct evsel *pos;
3478 	bool full_name;
3479 
3480 	/* case 1 */
3481 	if (event_name[0] == '%') {
3482 		int nr = strtol(event_name+1, NULL, 0);
3483 
3484 		if (nr > evlist->core.nr_entries)
3485 			return NULL;
3486 
3487 		evsel = evlist__first(evlist);
3488 		while (--nr > 0)
3489 			evsel = evsel__next(evsel);
3490 
3491 		return evsel;
3492 	}
3493 
3494 	full_name = !!strchr(event_name, ':');
3495 	evlist__for_each_entry(evlist, pos) {
3496 		/* case 2 */
3497 		if (full_name && evsel__name_is(pos, event_name))
3498 			return pos;
3499 		/* case 3 */
3500 		if (!full_name && strstr(pos->name, event_name)) {
3501 			if (evsel) {
3502 				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
3503 					 event_name, evsel->name, pos->name);
3504 				return NULL;
3505 			}
3506 			evsel = pos;
3507 		}
3508 	}
3509 
3510 	return evsel;
3511 }
3512 
3513 #ifdef HAVE_LIBTRACEEVENT
3514 static int __dynamic_dimension__add(struct evsel *evsel,
3515 				    struct tep_format_field *field,
3516 				    bool raw_trace, int level)
3517 {
3518 	struct hpp_dynamic_entry *hde;
3519 
3520 	hde = __alloc_dynamic_entry(evsel, field, level);
3521 	if (hde == NULL)
3522 		return -ENOMEM;
3523 
3524 	hde->raw_trace = raw_trace;
3525 
3526 	perf_hpp__register_sort_field(&hde->hpp);
3527 	return 0;
3528 }
3529 
3530 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
3531 {
3532 	int ret;
3533 	struct tep_event *tp_format = evsel__tp_format(evsel);
3534 	struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL;
3535 	while (field) {
3536 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3537 		if (ret < 0)
3538 			return ret;
3539 
3540 		field = field->next;
3541 	}
3542 	return 0;
3543 }
3544 
3545 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
3546 				  int level)
3547 {
3548 	int ret;
3549 	struct evsel *evsel;
3550 
3551 	evlist__for_each_entry(evlist, evsel) {
3552 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3553 			continue;
3554 
3555 		ret = add_evsel_fields(evsel, raw_trace, level);
3556 		if (ret < 0)
3557 			return ret;
3558 	}
3559 	return 0;
3560 }
3561 
3562 static int add_all_matching_fields(struct evlist *evlist,
3563 				   char *field_name, bool raw_trace, int level)
3564 {
3565 	int ret = -ESRCH;
3566 	struct evsel *evsel;
3567 
3568 	evlist__for_each_entry(evlist, evsel) {
3569 		struct tep_event *tp_format;
3570 		struct tep_format_field *field;
3571 
3572 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3573 			continue;
3574 
3575 		tp_format = evsel__tp_format(evsel);
3576 		if (tp_format == NULL)
3577 			continue;
3578 
3579 		field = tep_find_any_field(tp_format, field_name);
3580 		if (field == NULL)
3581 			continue;
3582 
3583 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3584 		if (ret < 0)
3585 			break;
3586 	}
3587 	return ret;
3588 }
3589 #endif /* HAVE_LIBTRACEEVENT */
3590 
3591 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
3592 			     int level)
3593 {
3594 	char *str, *event_name, *field_name, *opt_name;
3595 	struct evsel *evsel;
3596 	bool raw_trace = symbol_conf.raw_trace;
3597 	int ret = 0;
3598 
3599 	if (evlist == NULL)
3600 		return -ENOENT;
3601 
3602 	str = strdup(tok);
3603 	if (str == NULL)
3604 		return -ENOMEM;
3605 
3606 	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3607 		ret = -EINVAL;
3608 		goto out;
3609 	}
3610 
3611 	if (opt_name) {
3612 		if (strcmp(opt_name, "raw")) {
3613 			pr_debug("unsupported field option %s\n", opt_name);
3614 			ret = -EINVAL;
3615 			goto out;
3616 		}
3617 		raw_trace = true;
3618 	}
3619 
3620 #ifdef HAVE_LIBTRACEEVENT
3621 	if (!strcmp(field_name, "trace_fields")) {
3622 		ret = add_all_dynamic_fields(evlist, raw_trace, level);
3623 		goto out;
3624 	}
3625 
3626 	if (event_name == NULL) {
3627 		ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3628 		goto out;
3629 	}
3630 #else
3631 	evlist__for_each_entry(evlist, evsel) {
3632 		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3633 			pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3634 			ret = -ENOTSUP;
3635 		}
3636 	}
3637 
3638 	if (ret) {
3639 		pr_err("\n");
3640 		goto out;
3641 	}
3642 #endif
3643 
3644 	evsel = find_evsel(evlist, event_name);
3645 	if (evsel == NULL) {
3646 		pr_debug("Cannot find event: %s\n", event_name);
3647 		ret = -ENOENT;
3648 		goto out;
3649 	}
3650 
3651 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3652 		pr_debug("%s is not a tracepoint event\n", event_name);
3653 		ret = -EINVAL;
3654 		goto out;
3655 	}
3656 
3657 #ifdef HAVE_LIBTRACEEVENT
3658 	if (!strcmp(field_name, "*")) {
3659 		ret = add_evsel_fields(evsel, raw_trace, level);
3660 	} else {
3661 		struct tep_event *tp_format = evsel__tp_format(evsel);
3662 		struct tep_format_field *field =
3663 			tp_format ? tep_find_any_field(tp_format, field_name) : NULL;
3664 
3665 		if (field == NULL) {
3666 			pr_debug("Cannot find event field for %s.%s\n",
3667 				 event_name, field_name);
3668 			return -ENOENT;
3669 		}
3670 
3671 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3672 	}
3673 #else
3674 	(void)level;
3675 	(void)raw_trace;
3676 #endif /* HAVE_LIBTRACEEVENT */
3677 
3678 out:
3679 	free(str);
3680 	return ret;
3681 }
3682 
3683 static int __sort_dimension__update(struct sort_dimension *sd,
3684 				    struct perf_hpp_list *list)
3685 {
3686 	if (sd->entry == &sort_parent && parent_pattern) {
3687 		int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3688 		if (ret) {
3689 			char err[BUFSIZ];
3690 
3691 			regerror(ret, &parent_regex, err, sizeof(err));
3692 			pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3693 			return -EINVAL;
3694 		}
3695 		list->parent = 1;
3696 	} else if (sd->entry == &sort_sym) {
3697 		list->sym = 1;
3698 		/*
3699 		 * perf diff displays the performance difference amongst
3700 		 * two or more perf.data files. Those files could come
3701 		 * from different binaries. So we should not compare
3702 		 * their ips, but the name of symbol.
3703 		 */
3704 		if (sort__mode == SORT_MODE__DIFF)
3705 			sd->entry->se_collapse = sort__sym_sort;
3706 
3707 	} else if (sd->entry == &sort_sym_offset) {
3708 		list->sym = 1;
3709 	} else if (sd->entry == &sort_dso) {
3710 		list->dso = 1;
3711 	} else if (sd->entry == &sort_socket) {
3712 		list->socket = 1;
3713 	} else if (sd->entry == &sort_thread) {
3714 		list->thread = 1;
3715 	} else if (sd->entry == &sort_comm) {
3716 		list->comm = 1;
3717 	} else if (sd->entry == &sort_comm_nodigit) {
3718 		list->comm_nodigit = list->comm = 1;
3719 	} else if (sd->entry == &sort_type_offset) {
3720 		symbol_conf.annotate_data_member = true;
3721 	} else if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) {
3722 		list->sym = 1;
3723 	} else if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) {
3724 		return -EINVAL;
3725 	} else if (sd->entry == &sort_mem_daddr_sym) {
3726 		list->sym = 1;
3727 	}
3728 
3729 	if (sd->entry->se_collapse)
3730 		list->need_collapse = 1;
3731 
3732 	return 0;
3733 }
3734 
3735 static int __sort_dimension__add(struct sort_dimension *sd,
3736 				 struct perf_hpp_list *list,
3737 				 int level)
3738 {
3739 	if (sd->taken)
3740 		return 0;
3741 
3742 	if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3743 		return -1;
3744 
3745 	if (__sort_dimension__update(sd, list) < 0)
3746 		return -1;
3747 
3748 	sd->taken = 1;
3749 
3750 	return 0;
3751 }
3752 
3753 static int __hpp_dimension__add(struct hpp_dimension *hd,
3754 				struct perf_hpp_list *list,
3755 				int level)
3756 {
3757 	struct perf_hpp_fmt *fmt;
3758 
3759 	if (hd->taken)
3760 		return 0;
3761 
3762 	fmt = __hpp_dimension__alloc_hpp(hd, level);
3763 	if (!fmt)
3764 		return -1;
3765 
3766 	hd->taken = 1;
3767 	hd->was_taken = 1;
3768 	perf_hpp_list__register_sort_field(list, fmt);
3769 	return 0;
3770 }
3771 
3772 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3773 					struct sort_dimension *sd,
3774 					int level)
3775 {
3776 	if (sd->taken)
3777 		return 0;
3778 
3779 	if (__sort_dimension__add_hpp_output(sd, list, level) < 0)
3780 		return -1;
3781 
3782 	if (__sort_dimension__update(sd, list) < 0)
3783 		return -1;
3784 
3785 	sd->taken = 1;
3786 	return 0;
3787 }
3788 
3789 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3790 				       struct hpp_dimension *hd,
3791 				       int level)
3792 {
3793 	struct perf_hpp_fmt *fmt;
3794 
3795 	if (hd->taken)
3796 		return 0;
3797 
3798 	fmt = __hpp_dimension__alloc_hpp(hd, level);
3799 	if (!fmt)
3800 		return -1;
3801 
3802 	hd->taken = 1;
3803 	perf_hpp_list__column_register(list, fmt);
3804 	return 0;
3805 }
3806 
3807 int hpp_dimension__add_output(unsigned col, bool implicit)
3808 {
3809 	struct hpp_dimension *hd;
3810 
3811 	BUG_ON(col >= PERF_HPP__MAX_INDEX);
3812 	hd = &hpp_sort_dimensions[col];
3813 	if (implicit && !hd->was_taken)
3814 		return 0;
3815 	return __hpp_dimension__add_output(&perf_hpp_list, hd, /*level=*/0);
3816 }
3817 
3818 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3819 			struct evlist *evlist, struct perf_env *env,
3820 			int level)
3821 {
3822 	unsigned int i, j;
3823 
3824 	/*
3825 	 * Check to see if there are any arch specific
3826 	 * sort dimensions not applicable for the current
3827 	 * architecture. If so, Skip that sort key since
3828 	 * we don't want to display it in the output fields.
3829 	 */
3830 	for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3831 		if (!strcmp(arch_specific_sort_keys[j], tok) &&
3832 		    !arch_support_sort_key(tok, env)) {
3833 			return 0;
3834 		}
3835 	}
3836 
3837 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3838 		struct sort_dimension *sd = &common_sort_dimensions[i];
3839 
3840 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3841 			continue;
3842 
3843 		for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3844 			if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3845 				sort_dimension_add_dynamic_header(sd, env);
3846 		}
3847 
3848 		return __sort_dimension__add(sd, list, level);
3849 	}
3850 
3851 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3852 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3853 
3854 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3855 			continue;
3856 
3857 		if ((sort__mode != SORT_MODE__BRANCH) &&
3858 			strncasecmp(tok, "callchain_branch_predicted",
3859 				    strlen(tok)) &&
3860 			strncasecmp(tok, "callchain_branch_abort",
3861 				    strlen(tok)) &&
3862 			strncasecmp(tok, "callchain_branch_cycles",
3863 				    strlen(tok)))
3864 			return -EINVAL;
3865 
3866 		__sort_dimension__add(sd, list, level);
3867 		return 0;
3868 	}
3869 
3870 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3871 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3872 
3873 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3874 			continue;
3875 
3876 		if (sort__mode != SORT_MODE__MEMORY)
3877 			return -EINVAL;
3878 
3879 		__sort_dimension__add(sd, list, level);
3880 		return 0;
3881 	}
3882 
3883 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3884 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3885 
3886 		if (strncasecmp(tok, hd->name, strlen(tok)))
3887 			continue;
3888 
3889 		return __hpp_dimension__add(hd, list, level);
3890 	}
3891 
3892 	if (!add_dynamic_entry(evlist, tok, level))
3893 		return 0;
3894 
3895 	return -ESRCH;
3896 }
3897 
3898 /* This should match with sort_dimension__add() above */
3899 static bool is_hpp_sort_key(const char *key, struct perf_env *env)
3900 {
3901 	unsigned i;
3902 
3903 	for (i = 0; i < ARRAY_SIZE(arch_specific_sort_keys); i++) {
3904 		if (!strcmp(arch_specific_sort_keys[i], key) &&
3905 		    !arch_support_sort_key(key, env)) {
3906 			return false;
3907 		}
3908 	}
3909 
3910 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3911 		struct sort_dimension *sd = &common_sort_dimensions[i];
3912 
3913 		if (sd->name && !strncasecmp(key, sd->name, strlen(key)))
3914 			return false;
3915 	}
3916 
3917 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3918 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3919 
3920 		if (!strncasecmp(key, hd->name, strlen(key)))
3921 			return true;
3922 	}
3923 	return false;
3924 }
3925 
3926 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3927 			   struct evlist *evlist, struct perf_env *env)
3928 {
3929 	char *tmp, *tok;
3930 	int ret = 0;
3931 	int level = 0;
3932 	int next_level = 1;
3933 	int prev_level = 0;
3934 	bool in_group = false;
3935 	bool prev_was_hpp = false;
3936 
3937 	do {
3938 		tok = str;
3939 		tmp = strpbrk(str, "{}, ");
3940 		if (tmp) {
3941 			if (in_group)
3942 				next_level = level;
3943 			else
3944 				next_level = level + 1;
3945 
3946 			if (*tmp == '{')
3947 				in_group = true;
3948 			else if (*tmp == '}')
3949 				in_group = false;
3950 
3951 			*tmp = '\0';
3952 			str = tmp + 1;
3953 		}
3954 
3955 		if (*tok) {
3956 			if (is_hpp_sort_key(tok, env)) {
3957 				/* keep output (hpp) sort keys in the same level */
3958 				if (prev_was_hpp) {
3959 					bool next_same = (level == next_level);
3960 
3961 					level = prev_level;
3962 					next_level = next_same ? level : level+1;
3963 				}
3964 				prev_was_hpp = true;
3965 			} else {
3966 				prev_was_hpp = false;
3967 			}
3968 
3969 			ret = sort_dimension__add(list, tok, evlist, env, level);
3970 			if (ret == -EINVAL) {
3971 				if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3972 					ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3973 				else
3974 					ui__error("Invalid --sort key: `%s'", tok);
3975 				break;
3976 			} else if (ret == -ESRCH) {
3977 				ui__error("Unknown --sort key: `%s'", tok);
3978 				break;
3979 			}
3980 			prev_level = level;
3981 		}
3982 
3983 		level = next_level;
3984 	} while (tmp);
3985 
3986 	return ret;
3987 }
3988 
3989 static const char *get_default_sort_order(struct evlist *evlist)
3990 {
3991 	const char *default_sort_orders[] = {
3992 		default_sort_order,
3993 		default_branch_sort_order,
3994 		default_mem_sort_order,
3995 		default_top_sort_order,
3996 		default_diff_sort_order,
3997 		default_tracepoint_sort_order,
3998 	};
3999 	bool use_trace = true;
4000 	struct evsel *evsel;
4001 
4002 	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
4003 
4004 	if (evlist == NULL || evlist__empty(evlist))
4005 		goto out_no_evlist;
4006 
4007 	evlist__for_each_entry(evlist, evsel) {
4008 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
4009 			use_trace = false;
4010 			break;
4011 		}
4012 	}
4013 
4014 	if (use_trace) {
4015 		sort__mode = SORT_MODE__TRACEPOINT;
4016 		if (symbol_conf.raw_trace)
4017 			return "trace_fields";
4018 	}
4019 out_no_evlist:
4020 	return default_sort_orders[sort__mode];
4021 }
4022 
4023 static int setup_sort_order(struct evlist *evlist)
4024 {
4025 	char *new_sort_order;
4026 
4027 	/*
4028 	 * Append '+'-prefixed sort order to the default sort
4029 	 * order string.
4030 	 */
4031 	if (!sort_order || is_strict_order(sort_order))
4032 		return 0;
4033 
4034 	if (sort_order[1] == '\0') {
4035 		ui__error("Invalid --sort key: `+'");
4036 		return -EINVAL;
4037 	}
4038 
4039 	/*
4040 	 * We allocate new sort_order string, but we never free it,
4041 	 * because it's checked over the rest of the code.
4042 	 */
4043 	if (asprintf(&new_sort_order, "%s,%s",
4044 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
4045 		pr_err("Not enough memory to set up --sort");
4046 		return -ENOMEM;
4047 	}
4048 
4049 	sort_order = new_sort_order;
4050 	return 0;
4051 }
4052 
4053 /*
4054  * Adds 'pre,' prefix into 'str' is 'pre' is
4055  * not already part of 'str'.
4056  */
4057 static char *prefix_if_not_in(const char *pre, char *str)
4058 {
4059 	char *n;
4060 
4061 	if (!str || strstr(str, pre))
4062 		return str;
4063 
4064 	if (asprintf(&n, "%s,%s", pre, str) < 0)
4065 		n = NULL;
4066 
4067 	free(str);
4068 	return n;
4069 }
4070 
4071 static char *setup_overhead(char *keys)
4072 {
4073 	if (sort__mode == SORT_MODE__DIFF)
4074 		return keys;
4075 
4076 	if (symbol_conf.prefer_latency) {
4077 		keys = prefix_if_not_in("overhead", keys);
4078 		keys = prefix_if_not_in("latency", keys);
4079 		if (symbol_conf.cumulate_callchain) {
4080 			keys = prefix_if_not_in("overhead_children", keys);
4081 			keys = prefix_if_not_in("latency_children", keys);
4082 		}
4083 	} else if (!keys || (!strstr(keys, "overhead") &&
4084 			!strstr(keys, "latency"))) {
4085 		if (symbol_conf.enable_latency)
4086 			keys = prefix_if_not_in("latency", keys);
4087 		keys = prefix_if_not_in("overhead", keys);
4088 		if (symbol_conf.cumulate_callchain) {
4089 			if (symbol_conf.enable_latency)
4090 				keys = prefix_if_not_in("latency_children", keys);
4091 			keys = prefix_if_not_in("overhead_children", keys);
4092 		}
4093 	}
4094 
4095 	return keys;
4096 }
4097 
4098 static int __setup_sorting(struct evlist *evlist, struct perf_env *env)
4099 {
4100 	char *str;
4101 	const char *sort_keys;
4102 	int ret = 0;
4103 
4104 	ret = setup_sort_order(evlist);
4105 	if (ret)
4106 		return ret;
4107 
4108 	sort_keys = sort_order;
4109 	if (sort_keys == NULL) {
4110 		if (is_strict_order(field_order)) {
4111 			/*
4112 			 * If user specified field order but no sort order,
4113 			 * we'll honor it and not add default sort orders.
4114 			 */
4115 			return 0;
4116 		}
4117 
4118 		sort_keys = get_default_sort_order(evlist);
4119 	}
4120 
4121 	str = strdup(sort_keys);
4122 	if (str == NULL) {
4123 		pr_err("Not enough memory to setup sort keys");
4124 		return -ENOMEM;
4125 	}
4126 
4127 	/*
4128 	 * Prepend overhead fields for backward compatibility.
4129 	 */
4130 	if (!is_strict_order(field_order)) {
4131 		str = setup_overhead(str);
4132 		if (str == NULL) {
4133 			pr_err("Not enough memory to setup overhead keys");
4134 			return -ENOMEM;
4135 		}
4136 	}
4137 
4138 	ret = setup_sort_list(&perf_hpp_list, str, evlist, env);
4139 
4140 	free(str);
4141 	return ret;
4142 }
4143 
4144 void perf_hpp__set_elide(int idx, bool elide)
4145 {
4146 	struct perf_hpp_fmt *fmt;
4147 	struct hpp_sort_entry *hse;
4148 
4149 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
4150 		if (!perf_hpp__is_sort_entry(fmt))
4151 			continue;
4152 
4153 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
4154 		if (hse->se->se_width_idx == idx) {
4155 			fmt->elide = elide;
4156 			break;
4157 		}
4158 	}
4159 }
4160 
4161 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
4162 {
4163 	if (list && strlist__nr_entries(list) == 1) {
4164 		if (fp != NULL)
4165 			fprintf(fp, "# %s: %s\n", list_name,
4166 				strlist__entry(list, 0)->s);
4167 		return true;
4168 	}
4169 	return false;
4170 }
4171 
4172 static bool get_elide(int idx, FILE *output)
4173 {
4174 	switch (idx) {
4175 	case HISTC_SYMBOL:
4176 		return __get_elide(symbol_conf.sym_list, "symbol", output);
4177 	case HISTC_DSO:
4178 		return __get_elide(symbol_conf.dso_list, "dso", output);
4179 	case HISTC_COMM:
4180 	case HISTC_COMM_NODIGIT:
4181 		return __get_elide(symbol_conf.comm_list, "comm", output);
4182 	default:
4183 		break;
4184 	}
4185 
4186 	if (sort__mode != SORT_MODE__BRANCH)
4187 		return false;
4188 
4189 	switch (idx) {
4190 	case HISTC_SYMBOL_FROM:
4191 		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
4192 	case HISTC_SYMBOL_TO:
4193 		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
4194 	case HISTC_DSO_FROM:
4195 		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
4196 	case HISTC_DSO_TO:
4197 		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
4198 	case HISTC_ADDR_FROM:
4199 		return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
4200 	case HISTC_ADDR_TO:
4201 		return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
4202 	default:
4203 		break;
4204 	}
4205 
4206 	return false;
4207 }
4208 
4209 void sort__setup_elide(FILE *output)
4210 {
4211 	struct perf_hpp_fmt *fmt;
4212 	struct hpp_sort_entry *hse;
4213 
4214 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
4215 		if (!perf_hpp__is_sort_entry(fmt))
4216 			continue;
4217 
4218 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
4219 		fmt->elide = get_elide(hse->se->se_width_idx, output);
4220 	}
4221 
4222 	/*
4223 	 * It makes no sense to elide all of sort entries.
4224 	 * Just revert them to show up again.
4225 	 */
4226 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
4227 		if (!perf_hpp__is_sort_entry(fmt))
4228 			continue;
4229 
4230 		if (!fmt->elide)
4231 			return;
4232 	}
4233 
4234 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
4235 		if (!perf_hpp__is_sort_entry(fmt))
4236 			continue;
4237 
4238 		fmt->elide = false;
4239 	}
4240 }
4241 
4242 int output_field_add(struct perf_hpp_list *list, const char *tok, int *level)
4243 {
4244 	unsigned int i;
4245 
4246 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
4247 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
4248 
4249 		if (strncasecmp(tok, hd->name, strlen(tok)))
4250 			continue;
4251 
4252 		if (!strcasecmp(tok, "weight"))
4253 			ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
4254 
4255 		if (hd->mem_mode && sort__mode != SORT_MODE__MEMORY)
4256 			continue;
4257 
4258 		return __hpp_dimension__add_output(list, hd, *level);
4259 	}
4260 
4261 	/*
4262 	 * A non-output field will increase level so that it can be in a
4263 	 * different hierarchy.
4264 	 */
4265 	(*level)++;
4266 
4267 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
4268 		struct sort_dimension *sd = &common_sort_dimensions[i];
4269 
4270 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4271 			continue;
4272 
4273 		return __sort_dimension__add_output(list, sd, *level);
4274 	}
4275 
4276 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
4277 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
4278 
4279 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4280 			continue;
4281 
4282 		if (sort__mode != SORT_MODE__BRANCH)
4283 			return -EINVAL;
4284 
4285 		return __sort_dimension__add_output(list, sd, *level);
4286 	}
4287 
4288 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
4289 		struct sort_dimension *sd = &memory_sort_dimensions[i];
4290 
4291 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4292 			continue;
4293 
4294 		if (sort__mode != SORT_MODE__MEMORY)
4295 			return -EINVAL;
4296 
4297 		return __sort_dimension__add_output(list, sd, *level);
4298 	}
4299 
4300 	return -ESRCH;
4301 }
4302 
4303 static int setup_output_list(struct perf_hpp_list *list, char *str)
4304 {
4305 	char *tmp, *tok;
4306 	int ret = 0;
4307 	int level = 0;
4308 
4309 	for (tok = strtok_r(str, ", ", &tmp);
4310 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
4311 		ret = output_field_add(list, tok, &level);
4312 		if (ret == -EINVAL) {
4313 			ui__error("Invalid --fields key: `%s'", tok);
4314 			break;
4315 		} else if (ret == -ESRCH) {
4316 			ui__error("Unknown --fields key: `%s'", tok);
4317 			break;
4318 		}
4319 	}
4320 
4321 	return ret;
4322 }
4323 
4324 void reset_dimensions(void)
4325 {
4326 	unsigned int i;
4327 
4328 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
4329 		common_sort_dimensions[i].taken = 0;
4330 
4331 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
4332 		hpp_sort_dimensions[i].taken = 0;
4333 
4334 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
4335 		bstack_sort_dimensions[i].taken = 0;
4336 
4337 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
4338 		memory_sort_dimensions[i].taken = 0;
4339 }
4340 
4341 bool is_strict_order(const char *order)
4342 {
4343 	return order && (*order != '+');
4344 }
4345 
4346 static int __setup_output_field(void)
4347 {
4348 	char *str, *strp;
4349 	int ret = -EINVAL;
4350 
4351 	if (field_order == NULL)
4352 		return 0;
4353 
4354 	strp = str = strdup(field_order);
4355 	if (str == NULL) {
4356 		pr_err("Not enough memory to setup output fields");
4357 		return -ENOMEM;
4358 	}
4359 
4360 	if (!is_strict_order(field_order))
4361 		strp++;
4362 
4363 	if (!strlen(strp)) {
4364 		ui__error("Invalid --fields key: `+'");
4365 		goto out;
4366 	}
4367 
4368 	ret = setup_output_list(&perf_hpp_list, strp);
4369 
4370 out:
4371 	free(str);
4372 	return ret;
4373 }
4374 
4375 int setup_sorting(struct evlist *evlist, struct perf_env *env)
4376 {
4377 	int err;
4378 
4379 	err = __setup_sorting(evlist, env);
4380 	if (err < 0)
4381 		return err;
4382 
4383 	if (parent_pattern != default_parent_pattern) {
4384 		err = sort_dimension__add(&perf_hpp_list, "parent", evlist, env, -1);
4385 		if (err < 0)
4386 			return err;
4387 	}
4388 
4389 	reset_dimensions();
4390 
4391 	/*
4392 	 * perf diff doesn't use default hpp output fields.
4393 	 */
4394 	if (sort__mode != SORT_MODE__DIFF)
4395 		perf_hpp__init();
4396 
4397 	err = __setup_output_field();
4398 	if (err < 0)
4399 		return err;
4400 
4401 	err = perf_hpp__alloc_mem_stats(&perf_hpp_list, evlist);
4402 	if (err < 0)
4403 		return err;
4404 
4405 	/* copy sort keys to output fields */
4406 	perf_hpp__setup_output_field(&perf_hpp_list);
4407 	/* and then copy output fields to sort keys */
4408 	perf_hpp__append_sort_keys(&perf_hpp_list);
4409 
4410 	/* setup hists-specific output fields */
4411 	if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
4412 		return -1;
4413 
4414 	return 0;
4415 }
4416 
4417 void reset_output_field(void)
4418 {
4419 	perf_hpp_list.need_collapse = 0;
4420 	perf_hpp_list.parent = 0;
4421 	perf_hpp_list.sym = 0;
4422 	perf_hpp_list.dso = 0;
4423 
4424 	field_order = NULL;
4425 	sort_order = NULL;
4426 
4427 	reset_dimensions();
4428 	perf_hpp__reset_output_field(&perf_hpp_list);
4429 }
4430 
4431 #define INDENT (3*8 + 1)
4432 
4433 static void add_key(struct strbuf *sb, const char *str, int *llen)
4434 {
4435 	if (!str)
4436 		return;
4437 
4438 	if (*llen >= 75) {
4439 		strbuf_addstr(sb, "\n\t\t\t ");
4440 		*llen = INDENT;
4441 	}
4442 	strbuf_addf(sb, " %s", str);
4443 	*llen += strlen(str) + 1;
4444 }
4445 
4446 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
4447 			    int *llen)
4448 {
4449 	int i;
4450 
4451 	for (i = 0; i < n; i++)
4452 		add_key(sb, s[i].name, llen);
4453 }
4454 
4455 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
4456 				int *llen)
4457 {
4458 	int i;
4459 
4460 	for (i = 0; i < n; i++)
4461 		add_key(sb, s[i].name, llen);
4462 }
4463 
4464 char *sort_help(const char *prefix, enum sort_mode mode)
4465 {
4466 	struct strbuf sb;
4467 	char *s;
4468 	int len = strlen(prefix) + INDENT;
4469 
4470 	strbuf_init(&sb, 300);
4471 	strbuf_addstr(&sb, prefix);
4472 	add_hpp_sort_string(&sb, hpp_sort_dimensions,
4473 			    ARRAY_SIZE(hpp_sort_dimensions), &len);
4474 	add_sort_string(&sb, common_sort_dimensions,
4475 			    ARRAY_SIZE(common_sort_dimensions), &len);
4476 	if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH)
4477 		add_sort_string(&sb, bstack_sort_dimensions,
4478 				ARRAY_SIZE(bstack_sort_dimensions), &len);
4479 	if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY)
4480 		add_sort_string(&sb, memory_sort_dimensions,
4481 				ARRAY_SIZE(memory_sort_dimensions), &len);
4482 	s = strbuf_detach(&sb, NULL);
4483 	strbuf_release(&sb);
4484 	return s;
4485 }
4486