xref: /linux/tools/perf/util/sort.c (revision 5e0266f0e5f57617472d5aac4013f58a3ef264ac)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <stdlib.h>
6 #include <linux/mman.h>
7 #include <linux/time64.h>
8 #include "debug.h"
9 #include "dso.h"
10 #include "sort.h"
11 #include "hist.h"
12 #include "cacheline.h"
13 #include "comm.h"
14 #include "map.h"
15 #include "maps.h"
16 #include "symbol.h"
17 #include "map_symbol.h"
18 #include "branch.h"
19 #include "thread.h"
20 #include "evsel.h"
21 #include "evlist.h"
22 #include "srcline.h"
23 #include "strlist.h"
24 #include "strbuf.h"
25 #include "mem-events.h"
26 #include "annotate.h"
27 #include "event.h"
28 #include "time-utils.h"
29 #include "cgroup.h"
30 #include "machine.h"
31 #include "trace-event.h"
32 #include <linux/kernel.h>
33 #include <linux/string.h>
34 
35 #ifdef HAVE_LIBTRACEEVENT
36 #include <traceevent/event-parse.h>
37 #endif
38 
39 regex_t		parent_regex;
40 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
41 const char	*parent_pattern = default_parent_pattern;
42 const char	*default_sort_order = "comm,dso,symbol";
43 const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
44 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
45 const char	default_top_sort_order[] = "dso,symbol";
46 const char	default_diff_sort_order[] = "dso,symbol";
47 const char	default_tracepoint_sort_order[] = "trace";
48 const char	*sort_order;
49 const char	*field_order;
50 regex_t		ignore_callees_regex;
51 int		have_ignore_callees = 0;
52 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
53 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
54 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
55 
56 /*
57  * Some architectures have Adjacent Cacheline Prefetch feature, which
58  * behaves like the cacheline size is doubled. Enable this flag to
59  * check things in double cacheline granularity.
60  */
61 bool chk_double_cl;
62 
63 /*
64  * Replaces all occurrences of a char used with the:
65  *
66  * -t, --field-separator
67  *
68  * option, that uses a special separator character and don't pad with spaces,
69  * replacing all occurrences of this separator in symbol names (and other
70  * output) with a '.' character, that thus it's the only non valid separator.
71 */
72 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
73 {
74 	int n;
75 	va_list ap;
76 
77 	va_start(ap, fmt);
78 	n = vsnprintf(bf, size, fmt, ap);
79 	if (symbol_conf.field_sep && n > 0) {
80 		char *sep = bf;
81 
82 		while (1) {
83 			sep = strchr(sep, *symbol_conf.field_sep);
84 			if (sep == NULL)
85 				break;
86 			*sep = '.';
87 		}
88 	}
89 	va_end(ap);
90 
91 	if (n >= (int)size)
92 		return size - 1;
93 	return n;
94 }
95 
96 static int64_t cmp_null(const void *l, const void *r)
97 {
98 	if (!l && !r)
99 		return 0;
100 	else if (!l)
101 		return -1;
102 	else
103 		return 1;
104 }
105 
106 /* --sort pid */
107 
108 static int64_t
109 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
110 {
111 	return right->thread->tid - left->thread->tid;
112 }
113 
114 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
115 				       size_t size, unsigned int width)
116 {
117 	const char *comm = thread__comm_str(he->thread);
118 
119 	width = max(7U, width) - 8;
120 	return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
121 			       width, width, comm ?: "");
122 }
123 
124 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
125 {
126 	const struct thread *th = arg;
127 
128 	if (type != HIST_FILTER__THREAD)
129 		return -1;
130 
131 	return th && he->thread != th;
132 }
133 
134 struct sort_entry sort_thread = {
135 	.se_header	= "    Pid:Command",
136 	.se_cmp		= sort__thread_cmp,
137 	.se_snprintf	= hist_entry__thread_snprintf,
138 	.se_filter	= hist_entry__thread_filter,
139 	.se_width_idx	= HISTC_THREAD,
140 };
141 
142 /* --sort comm */
143 
144 /*
145  * We can't use pointer comparison in functions below,
146  * because it gives different results based on pointer
147  * values, which could break some sorting assumptions.
148  */
149 static int64_t
150 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
151 {
152 	return strcmp(comm__str(right->comm), comm__str(left->comm));
153 }
154 
155 static int64_t
156 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
157 {
158 	return strcmp(comm__str(right->comm), comm__str(left->comm));
159 }
160 
161 static int64_t
162 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
163 {
164 	return strcmp(comm__str(right->comm), comm__str(left->comm));
165 }
166 
167 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
168 				     size_t size, unsigned int width)
169 {
170 	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
171 }
172 
173 struct sort_entry sort_comm = {
174 	.se_header	= "Command",
175 	.se_cmp		= sort__comm_cmp,
176 	.se_collapse	= sort__comm_collapse,
177 	.se_sort	= sort__comm_sort,
178 	.se_snprintf	= hist_entry__comm_snprintf,
179 	.se_filter	= hist_entry__thread_filter,
180 	.se_width_idx	= HISTC_COMM,
181 };
182 
183 /* --sort dso */
184 
185 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
186 {
187 	struct dso *dso_l = map_l ? map_l->dso : NULL;
188 	struct dso *dso_r = map_r ? map_r->dso : NULL;
189 	const char *dso_name_l, *dso_name_r;
190 
191 	if (!dso_l || !dso_r)
192 		return cmp_null(dso_r, dso_l);
193 
194 	if (verbose > 0) {
195 		dso_name_l = dso_l->long_name;
196 		dso_name_r = dso_r->long_name;
197 	} else {
198 		dso_name_l = dso_l->short_name;
199 		dso_name_r = dso_r->short_name;
200 	}
201 
202 	return strcmp(dso_name_l, dso_name_r);
203 }
204 
205 static int64_t
206 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
207 {
208 	return _sort__dso_cmp(right->ms.map, left->ms.map);
209 }
210 
211 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
212 				     size_t size, unsigned int width)
213 {
214 	if (map && map->dso) {
215 		const char *dso_name = verbose > 0 ? map->dso->long_name :
216 			map->dso->short_name;
217 		return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
218 	}
219 
220 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
221 }
222 
223 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
224 				    size_t size, unsigned int width)
225 {
226 	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
227 }
228 
229 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
230 {
231 	const struct dso *dso = arg;
232 
233 	if (type != HIST_FILTER__DSO)
234 		return -1;
235 
236 	return dso && (!he->ms.map || he->ms.map->dso != dso);
237 }
238 
239 struct sort_entry sort_dso = {
240 	.se_header	= "Shared Object",
241 	.se_cmp		= sort__dso_cmp,
242 	.se_snprintf	= hist_entry__dso_snprintf,
243 	.se_filter	= hist_entry__dso_filter,
244 	.se_width_idx	= HISTC_DSO,
245 };
246 
247 /* --sort symbol */
248 
249 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
250 {
251 	return (int64_t)(right_ip - left_ip);
252 }
253 
254 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
255 {
256 	if (!sym_l || !sym_r)
257 		return cmp_null(sym_l, sym_r);
258 
259 	if (sym_l == sym_r)
260 		return 0;
261 
262 	if (sym_l->inlined || sym_r->inlined) {
263 		int ret = strcmp(sym_l->name, sym_r->name);
264 
265 		if (ret)
266 			return ret;
267 		if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
268 			return 0;
269 	}
270 
271 	if (sym_l->start != sym_r->start)
272 		return (int64_t)(sym_r->start - sym_l->start);
273 
274 	return (int64_t)(sym_r->end - sym_l->end);
275 }
276 
277 static int64_t
278 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
279 {
280 	int64_t ret;
281 
282 	if (!left->ms.sym && !right->ms.sym)
283 		return _sort__addr_cmp(left->ip, right->ip);
284 
285 	/*
286 	 * comparing symbol address alone is not enough since it's a
287 	 * relative address within a dso.
288 	 */
289 	if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
290 		ret = sort__dso_cmp(left, right);
291 		if (ret != 0)
292 			return ret;
293 	}
294 
295 	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
296 }
297 
298 static int64_t
299 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
300 {
301 	if (!left->ms.sym || !right->ms.sym)
302 		return cmp_null(left->ms.sym, right->ms.sym);
303 
304 	return strcmp(right->ms.sym->name, left->ms.sym->name);
305 }
306 
307 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
308 				     u64 ip, char level, char *bf, size_t size,
309 				     unsigned int width)
310 {
311 	struct symbol *sym = ms->sym;
312 	struct map *map = ms->map;
313 	size_t ret = 0;
314 
315 	if (verbose > 0) {
316 		char o = map ? dso__symtab_origin(map->dso) : '!';
317 		u64 rip = ip;
318 
319 		if (map && map->dso && map->dso->kernel
320 		    && map->dso->adjust_symbols)
321 			rip = map->unmap_ip(map, ip);
322 
323 		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
324 				       BITS_PER_LONG / 4 + 2, rip, o);
325 	}
326 
327 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
328 	if (sym && map) {
329 		if (sym->type == STT_OBJECT) {
330 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
331 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
332 					ip - map->unmap_ip(map, sym->start));
333 		} else {
334 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
335 					       width - ret,
336 					       sym->name);
337 			if (sym->inlined)
338 				ret += repsep_snprintf(bf + ret, size - ret,
339 						       " (inlined)");
340 		}
341 	} else {
342 		size_t len = BITS_PER_LONG / 4;
343 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
344 				       len, ip);
345 	}
346 
347 	return ret;
348 }
349 
350 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
351 {
352 	return _hist_entry__sym_snprintf(&he->ms, he->ip,
353 					 he->level, bf, size, width);
354 }
355 
356 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
357 {
358 	const char *sym = arg;
359 
360 	if (type != HIST_FILTER__SYMBOL)
361 		return -1;
362 
363 	return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
364 }
365 
366 struct sort_entry sort_sym = {
367 	.se_header	= "Symbol",
368 	.se_cmp		= sort__sym_cmp,
369 	.se_sort	= sort__sym_sort,
370 	.se_snprintf	= hist_entry__sym_snprintf,
371 	.se_filter	= hist_entry__sym_filter,
372 	.se_width_idx	= HISTC_SYMBOL,
373 };
374 
375 /* --sort srcline */
376 
377 char *hist_entry__srcline(struct hist_entry *he)
378 {
379 	return map__srcline(he->ms.map, he->ip, he->ms.sym);
380 }
381 
382 static int64_t
383 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
384 {
385 	int64_t ret;
386 
387 	ret = _sort__addr_cmp(left->ip, right->ip);
388 	if (ret)
389 		return ret;
390 
391 	return sort__dso_cmp(left, right);
392 }
393 
394 static int64_t
395 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
396 {
397 	if (!left->srcline)
398 		left->srcline = hist_entry__srcline(left);
399 	if (!right->srcline)
400 		right->srcline = hist_entry__srcline(right);
401 
402 	return strcmp(right->srcline, left->srcline);
403 }
404 
405 static int64_t
406 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
407 {
408 	return sort__srcline_collapse(left, right);
409 }
410 
411 static void
412 sort__srcline_init(struct hist_entry *he)
413 {
414 	if (!he->srcline)
415 		he->srcline = hist_entry__srcline(he);
416 }
417 
418 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
419 					size_t size, unsigned int width)
420 {
421 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
422 }
423 
424 struct sort_entry sort_srcline = {
425 	.se_header	= "Source:Line",
426 	.se_cmp		= sort__srcline_cmp,
427 	.se_collapse	= sort__srcline_collapse,
428 	.se_sort	= sort__srcline_sort,
429 	.se_init	= sort__srcline_init,
430 	.se_snprintf	= hist_entry__srcline_snprintf,
431 	.se_width_idx	= HISTC_SRCLINE,
432 };
433 
434 /* --sort srcline_from */
435 
436 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
437 {
438 	return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
439 }
440 
441 static int64_t
442 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
443 {
444 	return left->branch_info->from.addr - right->branch_info->from.addr;
445 }
446 
447 static int64_t
448 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
449 {
450 	if (!left->branch_info->srcline_from)
451 		left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
452 
453 	if (!right->branch_info->srcline_from)
454 		right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
455 
456 	return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
457 }
458 
459 static int64_t
460 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
461 {
462 	return sort__srcline_from_collapse(left, right);
463 }
464 
465 static void sort__srcline_from_init(struct hist_entry *he)
466 {
467 	if (!he->branch_info->srcline_from)
468 		he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
469 }
470 
471 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
472 					size_t size, unsigned int width)
473 {
474 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
475 }
476 
477 struct sort_entry sort_srcline_from = {
478 	.se_header	= "From Source:Line",
479 	.se_cmp		= sort__srcline_from_cmp,
480 	.se_collapse	= sort__srcline_from_collapse,
481 	.se_sort	= sort__srcline_from_sort,
482 	.se_init	= sort__srcline_from_init,
483 	.se_snprintf	= hist_entry__srcline_from_snprintf,
484 	.se_width_idx	= HISTC_SRCLINE_FROM,
485 };
486 
487 /* --sort srcline_to */
488 
489 static int64_t
490 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
491 {
492 	return left->branch_info->to.addr - right->branch_info->to.addr;
493 }
494 
495 static int64_t
496 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
497 {
498 	if (!left->branch_info->srcline_to)
499 		left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
500 
501 	if (!right->branch_info->srcline_to)
502 		right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
503 
504 	return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
505 }
506 
507 static int64_t
508 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
509 {
510 	return sort__srcline_to_collapse(left, right);
511 }
512 
513 static void sort__srcline_to_init(struct hist_entry *he)
514 {
515 	if (!he->branch_info->srcline_to)
516 		he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
517 }
518 
519 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
520 					size_t size, unsigned int width)
521 {
522 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
523 }
524 
525 struct sort_entry sort_srcline_to = {
526 	.se_header	= "To Source:Line",
527 	.se_cmp		= sort__srcline_to_cmp,
528 	.se_collapse	= sort__srcline_to_collapse,
529 	.se_sort	= sort__srcline_to_sort,
530 	.se_init	= sort__srcline_to_init,
531 	.se_snprintf	= hist_entry__srcline_to_snprintf,
532 	.se_width_idx	= HISTC_SRCLINE_TO,
533 };
534 
535 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
536 					size_t size, unsigned int width)
537 {
538 
539 	struct symbol *sym = he->ms.sym;
540 	struct annotation *notes;
541 	double ipc = 0.0, coverage = 0.0;
542 	char tmp[64];
543 
544 	if (!sym)
545 		return repsep_snprintf(bf, size, "%-*s", width, "-");
546 
547 	notes = symbol__annotation(sym);
548 
549 	if (notes->hit_cycles)
550 		ipc = notes->hit_insn / ((double)notes->hit_cycles);
551 
552 	if (notes->total_insn) {
553 		coverage = notes->cover_insn * 100.0 /
554 			((double)notes->total_insn);
555 	}
556 
557 	snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
558 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
559 }
560 
561 struct sort_entry sort_sym_ipc = {
562 	.se_header	= "IPC   [IPC Coverage]",
563 	.se_cmp		= sort__sym_cmp,
564 	.se_snprintf	= hist_entry__sym_ipc_snprintf,
565 	.se_width_idx	= HISTC_SYMBOL_IPC,
566 };
567 
568 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
569 					     __maybe_unused,
570 					     char *bf, size_t size,
571 					     unsigned int width)
572 {
573 	char tmp[64];
574 
575 	snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
576 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
577 }
578 
579 struct sort_entry sort_sym_ipc_null = {
580 	.se_header	= "IPC   [IPC Coverage]",
581 	.se_cmp		= sort__sym_cmp,
582 	.se_snprintf	= hist_entry__sym_ipc_null_snprintf,
583 	.se_width_idx	= HISTC_SYMBOL_IPC,
584 };
585 
586 /* --sort srcfile */
587 
588 static char no_srcfile[1];
589 
590 static char *hist_entry__get_srcfile(struct hist_entry *e)
591 {
592 	char *sf, *p;
593 	struct map *map = e->ms.map;
594 
595 	if (!map)
596 		return no_srcfile;
597 
598 	sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
599 			 e->ms.sym, false, true, true, e->ip);
600 	if (!strcmp(sf, SRCLINE_UNKNOWN))
601 		return no_srcfile;
602 	p = strchr(sf, ':');
603 	if (p && *sf) {
604 		*p = 0;
605 		return sf;
606 	}
607 	free(sf);
608 	return no_srcfile;
609 }
610 
611 static int64_t
612 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
613 {
614 	if (!left->srcfile)
615 		left->srcfile = hist_entry__get_srcfile(left);
616 	if (!right->srcfile)
617 		right->srcfile = hist_entry__get_srcfile(right);
618 
619 	return strcmp(right->srcfile, left->srcfile);
620 }
621 
622 static int64_t
623 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
624 {
625 	if (!left->srcfile)
626 		left->srcfile = hist_entry__get_srcfile(left);
627 	if (!right->srcfile)
628 		right->srcfile = hist_entry__get_srcfile(right);
629 
630 	return strcmp(right->srcfile, left->srcfile);
631 }
632 
633 static int64_t
634 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
635 {
636 	return sort__srcfile_collapse(left, right);
637 }
638 
639 static void sort__srcfile_init(struct hist_entry *he)
640 {
641 	if (!he->srcfile)
642 		he->srcfile = hist_entry__get_srcfile(he);
643 }
644 
645 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
646 					size_t size, unsigned int width)
647 {
648 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
649 }
650 
651 struct sort_entry sort_srcfile = {
652 	.se_header	= "Source File",
653 	.se_cmp		= sort__srcfile_cmp,
654 	.se_collapse	= sort__srcfile_collapse,
655 	.se_sort	= sort__srcfile_sort,
656 	.se_init	= sort__srcfile_init,
657 	.se_snprintf	= hist_entry__srcfile_snprintf,
658 	.se_width_idx	= HISTC_SRCFILE,
659 };
660 
661 /* --sort parent */
662 
663 static int64_t
664 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
665 {
666 	struct symbol *sym_l = left->parent;
667 	struct symbol *sym_r = right->parent;
668 
669 	if (!sym_l || !sym_r)
670 		return cmp_null(sym_l, sym_r);
671 
672 	return strcmp(sym_r->name, sym_l->name);
673 }
674 
675 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
676 				       size_t size, unsigned int width)
677 {
678 	return repsep_snprintf(bf, size, "%-*.*s", width, width,
679 			      he->parent ? he->parent->name : "[other]");
680 }
681 
682 struct sort_entry sort_parent = {
683 	.se_header	= "Parent symbol",
684 	.se_cmp		= sort__parent_cmp,
685 	.se_snprintf	= hist_entry__parent_snprintf,
686 	.se_width_idx	= HISTC_PARENT,
687 };
688 
689 /* --sort cpu */
690 
691 static int64_t
692 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
693 {
694 	return right->cpu - left->cpu;
695 }
696 
697 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
698 				    size_t size, unsigned int width)
699 {
700 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
701 }
702 
703 struct sort_entry sort_cpu = {
704 	.se_header      = "CPU",
705 	.se_cmp	        = sort__cpu_cmp,
706 	.se_snprintf    = hist_entry__cpu_snprintf,
707 	.se_width_idx	= HISTC_CPU,
708 };
709 
710 /* --sort cgroup_id */
711 
712 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
713 {
714 	return (int64_t)(right_dev - left_dev);
715 }
716 
717 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
718 {
719 	return (int64_t)(right_ino - left_ino);
720 }
721 
722 static int64_t
723 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
724 {
725 	int64_t ret;
726 
727 	ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
728 	if (ret != 0)
729 		return ret;
730 
731 	return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
732 				       left->cgroup_id.ino);
733 }
734 
735 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
736 					  char *bf, size_t size,
737 					  unsigned int width __maybe_unused)
738 {
739 	return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
740 			       he->cgroup_id.ino);
741 }
742 
743 struct sort_entry sort_cgroup_id = {
744 	.se_header      = "cgroup id (dev/inode)",
745 	.se_cmp	        = sort__cgroup_id_cmp,
746 	.se_snprintf    = hist_entry__cgroup_id_snprintf,
747 	.se_width_idx	= HISTC_CGROUP_ID,
748 };
749 
750 /* --sort cgroup */
751 
752 static int64_t
753 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
754 {
755 	return right->cgroup - left->cgroup;
756 }
757 
758 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
759 				       char *bf, size_t size,
760 				       unsigned int width __maybe_unused)
761 {
762 	const char *cgrp_name = "N/A";
763 
764 	if (he->cgroup) {
765 		struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env,
766 						   he->cgroup);
767 		if (cgrp != NULL)
768 			cgrp_name = cgrp->name;
769 		else
770 			cgrp_name = "unknown";
771 	}
772 
773 	return repsep_snprintf(bf, size, "%s", cgrp_name);
774 }
775 
776 struct sort_entry sort_cgroup = {
777 	.se_header      = "Cgroup",
778 	.se_cmp	        = sort__cgroup_cmp,
779 	.se_snprintf    = hist_entry__cgroup_snprintf,
780 	.se_width_idx	= HISTC_CGROUP,
781 };
782 
783 /* --sort socket */
784 
785 static int64_t
786 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
787 {
788 	return right->socket - left->socket;
789 }
790 
791 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
792 				    size_t size, unsigned int width)
793 {
794 	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
795 }
796 
797 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
798 {
799 	int sk = *(const int *)arg;
800 
801 	if (type != HIST_FILTER__SOCKET)
802 		return -1;
803 
804 	return sk >= 0 && he->socket != sk;
805 }
806 
807 struct sort_entry sort_socket = {
808 	.se_header      = "Socket",
809 	.se_cmp	        = sort__socket_cmp,
810 	.se_snprintf    = hist_entry__socket_snprintf,
811 	.se_filter      = hist_entry__socket_filter,
812 	.se_width_idx	= HISTC_SOCKET,
813 };
814 
815 /* --sort time */
816 
817 static int64_t
818 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
819 {
820 	return right->time - left->time;
821 }
822 
823 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
824 				    size_t size, unsigned int width)
825 {
826 	char he_time[32];
827 
828 	if (symbol_conf.nanosecs)
829 		timestamp__scnprintf_nsec(he->time, he_time,
830 					  sizeof(he_time));
831 	else
832 		timestamp__scnprintf_usec(he->time, he_time,
833 					  sizeof(he_time));
834 
835 	return repsep_snprintf(bf, size, "%-.*s", width, he_time);
836 }
837 
838 struct sort_entry sort_time = {
839 	.se_header      = "Time",
840 	.se_cmp	        = sort__time_cmp,
841 	.se_snprintf    = hist_entry__time_snprintf,
842 	.se_width_idx	= HISTC_TIME,
843 };
844 
845 /* --sort trace */
846 
847 #ifdef HAVE_LIBTRACEEVENT
848 static char *get_trace_output(struct hist_entry *he)
849 {
850 	struct trace_seq seq;
851 	struct evsel *evsel;
852 	struct tep_record rec = {
853 		.data = he->raw_data,
854 		.size = he->raw_size,
855 	};
856 
857 	evsel = hists_to_evsel(he->hists);
858 
859 	trace_seq_init(&seq);
860 	if (symbol_conf.raw_trace) {
861 		tep_print_fields(&seq, he->raw_data, he->raw_size,
862 				 evsel->tp_format);
863 	} else {
864 		tep_print_event(evsel->tp_format->tep,
865 				&seq, &rec, "%s", TEP_PRINT_INFO);
866 	}
867 	/*
868 	 * Trim the buffer, it starts at 4KB and we're not going to
869 	 * add anything more to this buffer.
870 	 */
871 	return realloc(seq.buffer, seq.len + 1);
872 }
873 
874 static int64_t
875 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
876 {
877 	struct evsel *evsel;
878 
879 	evsel = hists_to_evsel(left->hists);
880 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
881 		return 0;
882 
883 	if (left->trace_output == NULL)
884 		left->trace_output = get_trace_output(left);
885 	if (right->trace_output == NULL)
886 		right->trace_output = get_trace_output(right);
887 
888 	return strcmp(right->trace_output, left->trace_output);
889 }
890 
891 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
892 				    size_t size, unsigned int width)
893 {
894 	struct evsel *evsel;
895 
896 	evsel = hists_to_evsel(he->hists);
897 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
898 		return scnprintf(bf, size, "%-.*s", width, "N/A");
899 
900 	if (he->trace_output == NULL)
901 		he->trace_output = get_trace_output(he);
902 	return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
903 }
904 
905 struct sort_entry sort_trace = {
906 	.se_header      = "Trace output",
907 	.se_cmp	        = sort__trace_cmp,
908 	.se_snprintf    = hist_entry__trace_snprintf,
909 	.se_width_idx	= HISTC_TRACE,
910 };
911 #endif /* HAVE_LIBTRACEEVENT */
912 
913 /* sort keys for branch stacks */
914 
915 static int64_t
916 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
917 {
918 	if (!left->branch_info || !right->branch_info)
919 		return cmp_null(left->branch_info, right->branch_info);
920 
921 	return _sort__dso_cmp(left->branch_info->from.ms.map,
922 			      right->branch_info->from.ms.map);
923 }
924 
925 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
926 				    size_t size, unsigned int width)
927 {
928 	if (he->branch_info)
929 		return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
930 						 bf, size, width);
931 	else
932 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
933 }
934 
935 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
936 				       const void *arg)
937 {
938 	const struct dso *dso = arg;
939 
940 	if (type != HIST_FILTER__DSO)
941 		return -1;
942 
943 	return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
944 		       he->branch_info->from.ms.map->dso != dso);
945 }
946 
947 static int64_t
948 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
949 {
950 	if (!left->branch_info || !right->branch_info)
951 		return cmp_null(left->branch_info, right->branch_info);
952 
953 	return _sort__dso_cmp(left->branch_info->to.ms.map,
954 			      right->branch_info->to.ms.map);
955 }
956 
957 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
958 				       size_t size, unsigned int width)
959 {
960 	if (he->branch_info)
961 		return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
962 						 bf, size, width);
963 	else
964 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
965 }
966 
967 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
968 				     const void *arg)
969 {
970 	const struct dso *dso = arg;
971 
972 	if (type != HIST_FILTER__DSO)
973 		return -1;
974 
975 	return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
976 		       he->branch_info->to.ms.map->dso != dso);
977 }
978 
979 static int64_t
980 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
981 {
982 	struct addr_map_symbol *from_l = &left->branch_info->from;
983 	struct addr_map_symbol *from_r = &right->branch_info->from;
984 
985 	if (!left->branch_info || !right->branch_info)
986 		return cmp_null(left->branch_info, right->branch_info);
987 
988 	from_l = &left->branch_info->from;
989 	from_r = &right->branch_info->from;
990 
991 	if (!from_l->ms.sym && !from_r->ms.sym)
992 		return _sort__addr_cmp(from_l->addr, from_r->addr);
993 
994 	return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
995 }
996 
997 static int64_t
998 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
999 {
1000 	struct addr_map_symbol *to_l, *to_r;
1001 
1002 	if (!left->branch_info || !right->branch_info)
1003 		return cmp_null(left->branch_info, right->branch_info);
1004 
1005 	to_l = &left->branch_info->to;
1006 	to_r = &right->branch_info->to;
1007 
1008 	if (!to_l->ms.sym && !to_r->ms.sym)
1009 		return _sort__addr_cmp(to_l->addr, to_r->addr);
1010 
1011 	return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1012 }
1013 
1014 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1015 					 size_t size, unsigned int width)
1016 {
1017 	if (he->branch_info) {
1018 		struct addr_map_symbol *from = &he->branch_info->from;
1019 
1020 		return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1021 						 from->al_level, bf, size, width);
1022 	}
1023 
1024 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1025 }
1026 
1027 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1028 				       size_t size, unsigned int width)
1029 {
1030 	if (he->branch_info) {
1031 		struct addr_map_symbol *to = &he->branch_info->to;
1032 
1033 		return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1034 						 to->al_level, bf, size, width);
1035 	}
1036 
1037 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1038 }
1039 
1040 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1041 				       const void *arg)
1042 {
1043 	const char *sym = arg;
1044 
1045 	if (type != HIST_FILTER__SYMBOL)
1046 		return -1;
1047 
1048 	return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1049 			strstr(he->branch_info->from.ms.sym->name, sym));
1050 }
1051 
1052 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1053 				       const void *arg)
1054 {
1055 	const char *sym = arg;
1056 
1057 	if (type != HIST_FILTER__SYMBOL)
1058 		return -1;
1059 
1060 	return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1061 		        strstr(he->branch_info->to.ms.sym->name, sym));
1062 }
1063 
1064 struct sort_entry sort_dso_from = {
1065 	.se_header	= "Source Shared Object",
1066 	.se_cmp		= sort__dso_from_cmp,
1067 	.se_snprintf	= hist_entry__dso_from_snprintf,
1068 	.se_filter	= hist_entry__dso_from_filter,
1069 	.se_width_idx	= HISTC_DSO_FROM,
1070 };
1071 
1072 struct sort_entry sort_dso_to = {
1073 	.se_header	= "Target Shared Object",
1074 	.se_cmp		= sort__dso_to_cmp,
1075 	.se_snprintf	= hist_entry__dso_to_snprintf,
1076 	.se_filter	= hist_entry__dso_to_filter,
1077 	.se_width_idx	= HISTC_DSO_TO,
1078 };
1079 
1080 struct sort_entry sort_sym_from = {
1081 	.se_header	= "Source Symbol",
1082 	.se_cmp		= sort__sym_from_cmp,
1083 	.se_snprintf	= hist_entry__sym_from_snprintf,
1084 	.se_filter	= hist_entry__sym_from_filter,
1085 	.se_width_idx	= HISTC_SYMBOL_FROM,
1086 };
1087 
1088 struct sort_entry sort_sym_to = {
1089 	.se_header	= "Target Symbol",
1090 	.se_cmp		= sort__sym_to_cmp,
1091 	.se_snprintf	= hist_entry__sym_to_snprintf,
1092 	.se_filter	= hist_entry__sym_to_filter,
1093 	.se_width_idx	= HISTC_SYMBOL_TO,
1094 };
1095 
1096 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1097 				     u64 ip, char level, char *bf, size_t size,
1098 				     unsigned int width)
1099 {
1100 	struct symbol *sym = ms->sym;
1101 	struct map *map = ms->map;
1102 	size_t ret = 0, offs;
1103 
1104 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1105 	if (sym && map) {
1106 		if (sym->type == STT_OBJECT) {
1107 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1108 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1109 					ip - map->unmap_ip(map, sym->start));
1110 		} else {
1111 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1112 					       width - ret,
1113 					       sym->name);
1114 			offs = ip - sym->start;
1115 			if (offs)
1116 				ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1117 		}
1118 	} else {
1119 		size_t len = BITS_PER_LONG / 4;
1120 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1121 				       len, ip);
1122 	}
1123 
1124 	return ret;
1125 }
1126 
1127 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1128 					 size_t size, unsigned int width)
1129 {
1130 	if (he->branch_info) {
1131 		struct addr_map_symbol *from = &he->branch_info->from;
1132 
1133 		return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1134 						 he->level, bf, size, width);
1135 	}
1136 
1137 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1138 }
1139 
1140 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1141 				       size_t size, unsigned int width)
1142 {
1143 	if (he->branch_info) {
1144 		struct addr_map_symbol *to = &he->branch_info->to;
1145 
1146 		return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1147 						 he->level, bf, size, width);
1148 	}
1149 
1150 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1151 }
1152 
1153 static int64_t
1154 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1155 {
1156 	struct addr_map_symbol *from_l;
1157 	struct addr_map_symbol *from_r;
1158 	int64_t ret;
1159 
1160 	if (!left->branch_info || !right->branch_info)
1161 		return cmp_null(left->branch_info, right->branch_info);
1162 
1163 	from_l = &left->branch_info->from;
1164 	from_r = &right->branch_info->from;
1165 
1166 	/*
1167 	 * comparing symbol address alone is not enough since it's a
1168 	 * relative address within a dso.
1169 	 */
1170 	ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1171 	if (ret != 0)
1172 		return ret;
1173 
1174 	return _sort__addr_cmp(from_l->addr, from_r->addr);
1175 }
1176 
1177 static int64_t
1178 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1179 {
1180 	struct addr_map_symbol *to_l;
1181 	struct addr_map_symbol *to_r;
1182 	int64_t ret;
1183 
1184 	if (!left->branch_info || !right->branch_info)
1185 		return cmp_null(left->branch_info, right->branch_info);
1186 
1187 	to_l = &left->branch_info->to;
1188 	to_r = &right->branch_info->to;
1189 
1190 	/*
1191 	 * comparing symbol address alone is not enough since it's a
1192 	 * relative address within a dso.
1193 	 */
1194 	ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1195 	if (ret != 0)
1196 		return ret;
1197 
1198 	return _sort__addr_cmp(to_l->addr, to_r->addr);
1199 }
1200 
1201 struct sort_entry sort_addr_from = {
1202 	.se_header	= "Source Address",
1203 	.se_cmp		= sort__addr_from_cmp,
1204 	.se_snprintf	= hist_entry__addr_from_snprintf,
1205 	.se_filter	= hist_entry__sym_from_filter, /* shared with sym_from */
1206 	.se_width_idx	= HISTC_ADDR_FROM,
1207 };
1208 
1209 struct sort_entry sort_addr_to = {
1210 	.se_header	= "Target Address",
1211 	.se_cmp		= sort__addr_to_cmp,
1212 	.se_snprintf	= hist_entry__addr_to_snprintf,
1213 	.se_filter	= hist_entry__sym_to_filter, /* shared with sym_to */
1214 	.se_width_idx	= HISTC_ADDR_TO,
1215 };
1216 
1217 
1218 static int64_t
1219 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1220 {
1221 	unsigned char mp, p;
1222 
1223 	if (!left->branch_info || !right->branch_info)
1224 		return cmp_null(left->branch_info, right->branch_info);
1225 
1226 	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1227 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1228 	return mp || p;
1229 }
1230 
1231 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1232 				    size_t size, unsigned int width){
1233 	static const char *out = "N/A";
1234 
1235 	if (he->branch_info) {
1236 		if (he->branch_info->flags.predicted)
1237 			out = "N";
1238 		else if (he->branch_info->flags.mispred)
1239 			out = "Y";
1240 	}
1241 
1242 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1243 }
1244 
1245 static int64_t
1246 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1247 {
1248 	if (!left->branch_info || !right->branch_info)
1249 		return cmp_null(left->branch_info, right->branch_info);
1250 
1251 	return left->branch_info->flags.cycles -
1252 		right->branch_info->flags.cycles;
1253 }
1254 
1255 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1256 				    size_t size, unsigned int width)
1257 {
1258 	if (!he->branch_info)
1259 		return scnprintf(bf, size, "%-.*s", width, "N/A");
1260 	if (he->branch_info->flags.cycles == 0)
1261 		return repsep_snprintf(bf, size, "%-*s", width, "-");
1262 	return repsep_snprintf(bf, size, "%-*hd", width,
1263 			       he->branch_info->flags.cycles);
1264 }
1265 
1266 struct sort_entry sort_cycles = {
1267 	.se_header	= "Basic Block Cycles",
1268 	.se_cmp		= sort__cycles_cmp,
1269 	.se_snprintf	= hist_entry__cycles_snprintf,
1270 	.se_width_idx	= HISTC_CYCLES,
1271 };
1272 
1273 /* --sort daddr_sym */
1274 int64_t
1275 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1276 {
1277 	uint64_t l = 0, r = 0;
1278 
1279 	if (left->mem_info)
1280 		l = left->mem_info->daddr.addr;
1281 	if (right->mem_info)
1282 		r = right->mem_info->daddr.addr;
1283 
1284 	return (int64_t)(r - l);
1285 }
1286 
1287 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1288 				    size_t size, unsigned int width)
1289 {
1290 	uint64_t addr = 0;
1291 	struct map_symbol *ms = NULL;
1292 
1293 	if (he->mem_info) {
1294 		addr = he->mem_info->daddr.addr;
1295 		ms = &he->mem_info->daddr.ms;
1296 	}
1297 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1298 }
1299 
1300 int64_t
1301 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1302 {
1303 	uint64_t l = 0, r = 0;
1304 
1305 	if (left->mem_info)
1306 		l = left->mem_info->iaddr.addr;
1307 	if (right->mem_info)
1308 		r = right->mem_info->iaddr.addr;
1309 
1310 	return (int64_t)(r - l);
1311 }
1312 
1313 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1314 				    size_t size, unsigned int width)
1315 {
1316 	uint64_t addr = 0;
1317 	struct map_symbol *ms = NULL;
1318 
1319 	if (he->mem_info) {
1320 		addr = he->mem_info->iaddr.addr;
1321 		ms   = &he->mem_info->iaddr.ms;
1322 	}
1323 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1324 }
1325 
1326 static int64_t
1327 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1328 {
1329 	struct map *map_l = NULL;
1330 	struct map *map_r = NULL;
1331 
1332 	if (left->mem_info)
1333 		map_l = left->mem_info->daddr.ms.map;
1334 	if (right->mem_info)
1335 		map_r = right->mem_info->daddr.ms.map;
1336 
1337 	return _sort__dso_cmp(map_l, map_r);
1338 }
1339 
1340 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1341 				    size_t size, unsigned int width)
1342 {
1343 	struct map *map = NULL;
1344 
1345 	if (he->mem_info)
1346 		map = he->mem_info->daddr.ms.map;
1347 
1348 	return _hist_entry__dso_snprintf(map, bf, size, width);
1349 }
1350 
1351 static int64_t
1352 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1353 {
1354 	union perf_mem_data_src data_src_l;
1355 	union perf_mem_data_src data_src_r;
1356 
1357 	if (left->mem_info)
1358 		data_src_l = left->mem_info->data_src;
1359 	else
1360 		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1361 
1362 	if (right->mem_info)
1363 		data_src_r = right->mem_info->data_src;
1364 	else
1365 		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1366 
1367 	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1368 }
1369 
1370 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1371 				    size_t size, unsigned int width)
1372 {
1373 	char out[10];
1374 
1375 	perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1376 	return repsep_snprintf(bf, size, "%.*s", width, out);
1377 }
1378 
1379 static int64_t
1380 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1381 {
1382 	union perf_mem_data_src data_src_l;
1383 	union perf_mem_data_src data_src_r;
1384 
1385 	if (left->mem_info)
1386 		data_src_l = left->mem_info->data_src;
1387 	else
1388 		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1389 
1390 	if (right->mem_info)
1391 		data_src_r = right->mem_info->data_src;
1392 	else
1393 		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1394 
1395 	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1396 }
1397 
1398 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1399 				    size_t size, unsigned int width)
1400 {
1401 	char out[64];
1402 
1403 	perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1404 	return repsep_snprintf(bf, size, "%-*s", width, out);
1405 }
1406 
1407 static int64_t
1408 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1409 {
1410 	union perf_mem_data_src data_src_l;
1411 	union perf_mem_data_src data_src_r;
1412 
1413 	if (left->mem_info)
1414 		data_src_l = left->mem_info->data_src;
1415 	else
1416 		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1417 
1418 	if (right->mem_info)
1419 		data_src_r = right->mem_info->data_src;
1420 	else
1421 		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1422 
1423 	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1424 }
1425 
1426 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1427 				    size_t size, unsigned int width)
1428 {
1429 	char out[64];
1430 
1431 	perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1432 	return repsep_snprintf(bf, size, "%-*s", width, out);
1433 }
1434 
1435 static int64_t
1436 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1437 {
1438 	union perf_mem_data_src data_src_l;
1439 	union perf_mem_data_src data_src_r;
1440 
1441 	if (left->mem_info)
1442 		data_src_l = left->mem_info->data_src;
1443 	else
1444 		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1445 
1446 	if (right->mem_info)
1447 		data_src_r = right->mem_info->data_src;
1448 	else
1449 		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1450 
1451 	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1452 }
1453 
1454 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1455 				    size_t size, unsigned int width)
1456 {
1457 	char out[64];
1458 
1459 	perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1460 	return repsep_snprintf(bf, size, "%-*s", width, out);
1461 }
1462 
1463 int64_t
1464 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1465 {
1466 	u64 l, r;
1467 	struct map *l_map, *r_map;
1468 	int rc;
1469 
1470 	if (!left->mem_info)  return -1;
1471 	if (!right->mem_info) return 1;
1472 
1473 	/* group event types together */
1474 	if (left->cpumode > right->cpumode) return -1;
1475 	if (left->cpumode < right->cpumode) return 1;
1476 
1477 	l_map = left->mem_info->daddr.ms.map;
1478 	r_map = right->mem_info->daddr.ms.map;
1479 
1480 	/* if both are NULL, jump to sort on al_addr instead */
1481 	if (!l_map && !r_map)
1482 		goto addr;
1483 
1484 	if (!l_map) return -1;
1485 	if (!r_map) return 1;
1486 
1487 	rc = dso__cmp_id(l_map->dso, r_map->dso);
1488 	if (rc)
1489 		return rc;
1490 	/*
1491 	 * Addresses with no major/minor numbers are assumed to be
1492 	 * anonymous in userspace.  Sort those on pid then address.
1493 	 *
1494 	 * The kernel and non-zero major/minor mapped areas are
1495 	 * assumed to be unity mapped.  Sort those on address.
1496 	 */
1497 
1498 	if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1499 	    (!(l_map->flags & MAP_SHARED)) &&
1500 	    !l_map->dso->id.maj && !l_map->dso->id.min &&
1501 	    !l_map->dso->id.ino && !l_map->dso->id.ino_generation) {
1502 		/* userspace anonymous */
1503 
1504 		if (left->thread->pid_ > right->thread->pid_) return -1;
1505 		if (left->thread->pid_ < right->thread->pid_) return 1;
1506 	}
1507 
1508 addr:
1509 	/* al_addr does all the right addr - start + offset calculations */
1510 	l = cl_address(left->mem_info->daddr.al_addr, chk_double_cl);
1511 	r = cl_address(right->mem_info->daddr.al_addr, chk_double_cl);
1512 
1513 	if (l > r) return -1;
1514 	if (l < r) return 1;
1515 
1516 	return 0;
1517 }
1518 
1519 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1520 					  size_t size, unsigned int width)
1521 {
1522 
1523 	uint64_t addr = 0;
1524 	struct map_symbol *ms = NULL;
1525 	char level = he->level;
1526 
1527 	if (he->mem_info) {
1528 		struct map *map = he->mem_info->daddr.ms.map;
1529 
1530 		addr = cl_address(he->mem_info->daddr.al_addr, chk_double_cl);
1531 		ms = &he->mem_info->daddr.ms;
1532 
1533 		/* print [s] for shared data mmaps */
1534 		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1535 		     map && !(map->prot & PROT_EXEC) &&
1536 		    (map->flags & MAP_SHARED) &&
1537 		    (map->dso->id.maj || map->dso->id.min ||
1538 		     map->dso->id.ino || map->dso->id.ino_generation))
1539 			level = 's';
1540 		else if (!map)
1541 			level = 'X';
1542 	}
1543 	return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1544 }
1545 
1546 struct sort_entry sort_mispredict = {
1547 	.se_header	= "Branch Mispredicted",
1548 	.se_cmp		= sort__mispredict_cmp,
1549 	.se_snprintf	= hist_entry__mispredict_snprintf,
1550 	.se_width_idx	= HISTC_MISPREDICT,
1551 };
1552 
1553 static int64_t
1554 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1555 {
1556 	return left->weight - right->weight;
1557 }
1558 
1559 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1560 				    size_t size, unsigned int width)
1561 {
1562 	return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1563 }
1564 
1565 struct sort_entry sort_local_weight = {
1566 	.se_header	= "Local Weight",
1567 	.se_cmp		= sort__weight_cmp,
1568 	.se_snprintf	= hist_entry__local_weight_snprintf,
1569 	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1570 };
1571 
1572 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1573 					      size_t size, unsigned int width)
1574 {
1575 	return repsep_snprintf(bf, size, "%-*llu", width,
1576 			       he->weight * he->stat.nr_events);
1577 }
1578 
1579 struct sort_entry sort_global_weight = {
1580 	.se_header	= "Weight",
1581 	.se_cmp		= sort__weight_cmp,
1582 	.se_snprintf	= hist_entry__global_weight_snprintf,
1583 	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1584 };
1585 
1586 static int64_t
1587 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1588 {
1589 	return left->ins_lat - right->ins_lat;
1590 }
1591 
1592 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1593 					      size_t size, unsigned int width)
1594 {
1595 	return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1596 }
1597 
1598 struct sort_entry sort_local_ins_lat = {
1599 	.se_header	= "Local INSTR Latency",
1600 	.se_cmp		= sort__ins_lat_cmp,
1601 	.se_snprintf	= hist_entry__local_ins_lat_snprintf,
1602 	.se_width_idx	= HISTC_LOCAL_INS_LAT,
1603 };
1604 
1605 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1606 					       size_t size, unsigned int width)
1607 {
1608 	return repsep_snprintf(bf, size, "%-*u", width,
1609 			       he->ins_lat * he->stat.nr_events);
1610 }
1611 
1612 struct sort_entry sort_global_ins_lat = {
1613 	.se_header	= "INSTR Latency",
1614 	.se_cmp		= sort__ins_lat_cmp,
1615 	.se_snprintf	= hist_entry__global_ins_lat_snprintf,
1616 	.se_width_idx	= HISTC_GLOBAL_INS_LAT,
1617 };
1618 
1619 static int64_t
1620 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1621 {
1622 	return left->p_stage_cyc - right->p_stage_cyc;
1623 }
1624 
1625 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1626 					size_t size, unsigned int width)
1627 {
1628 	return repsep_snprintf(bf, size, "%-*u", width,
1629 			he->p_stage_cyc * he->stat.nr_events);
1630 }
1631 
1632 
1633 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1634 					size_t size, unsigned int width)
1635 {
1636 	return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1637 }
1638 
1639 struct sort_entry sort_local_p_stage_cyc = {
1640 	.se_header      = "Local Pipeline Stage Cycle",
1641 	.se_cmp         = sort__p_stage_cyc_cmp,
1642 	.se_snprintf	= hist_entry__p_stage_cyc_snprintf,
1643 	.se_width_idx	= HISTC_LOCAL_P_STAGE_CYC,
1644 };
1645 
1646 struct sort_entry sort_global_p_stage_cyc = {
1647 	.se_header      = "Pipeline Stage Cycle",
1648 	.se_cmp         = sort__p_stage_cyc_cmp,
1649 	.se_snprintf    = hist_entry__global_p_stage_cyc_snprintf,
1650 	.se_width_idx   = HISTC_GLOBAL_P_STAGE_CYC,
1651 };
1652 
1653 struct sort_entry sort_mem_daddr_sym = {
1654 	.se_header	= "Data Symbol",
1655 	.se_cmp		= sort__daddr_cmp,
1656 	.se_snprintf	= hist_entry__daddr_snprintf,
1657 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1658 };
1659 
1660 struct sort_entry sort_mem_iaddr_sym = {
1661 	.se_header	= "Code Symbol",
1662 	.se_cmp		= sort__iaddr_cmp,
1663 	.se_snprintf	= hist_entry__iaddr_snprintf,
1664 	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
1665 };
1666 
1667 struct sort_entry sort_mem_daddr_dso = {
1668 	.se_header	= "Data Object",
1669 	.se_cmp		= sort__dso_daddr_cmp,
1670 	.se_snprintf	= hist_entry__dso_daddr_snprintf,
1671 	.se_width_idx	= HISTC_MEM_DADDR_DSO,
1672 };
1673 
1674 struct sort_entry sort_mem_locked = {
1675 	.se_header	= "Locked",
1676 	.se_cmp		= sort__locked_cmp,
1677 	.se_snprintf	= hist_entry__locked_snprintf,
1678 	.se_width_idx	= HISTC_MEM_LOCKED,
1679 };
1680 
1681 struct sort_entry sort_mem_tlb = {
1682 	.se_header	= "TLB access",
1683 	.se_cmp		= sort__tlb_cmp,
1684 	.se_snprintf	= hist_entry__tlb_snprintf,
1685 	.se_width_idx	= HISTC_MEM_TLB,
1686 };
1687 
1688 struct sort_entry sort_mem_lvl = {
1689 	.se_header	= "Memory access",
1690 	.se_cmp		= sort__lvl_cmp,
1691 	.se_snprintf	= hist_entry__lvl_snprintf,
1692 	.se_width_idx	= HISTC_MEM_LVL,
1693 };
1694 
1695 struct sort_entry sort_mem_snoop = {
1696 	.se_header	= "Snoop",
1697 	.se_cmp		= sort__snoop_cmp,
1698 	.se_snprintf	= hist_entry__snoop_snprintf,
1699 	.se_width_idx	= HISTC_MEM_SNOOP,
1700 };
1701 
1702 struct sort_entry sort_mem_dcacheline = {
1703 	.se_header	= "Data Cacheline",
1704 	.se_cmp		= sort__dcacheline_cmp,
1705 	.se_snprintf	= hist_entry__dcacheline_snprintf,
1706 	.se_width_idx	= HISTC_MEM_DCACHELINE,
1707 };
1708 
1709 static int64_t
1710 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1711 {
1712 	union perf_mem_data_src data_src_l;
1713 	union perf_mem_data_src data_src_r;
1714 
1715 	if (left->mem_info)
1716 		data_src_l = left->mem_info->data_src;
1717 	else
1718 		data_src_l.mem_blk = PERF_MEM_BLK_NA;
1719 
1720 	if (right->mem_info)
1721 		data_src_r = right->mem_info->data_src;
1722 	else
1723 		data_src_r.mem_blk = PERF_MEM_BLK_NA;
1724 
1725 	return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1726 }
1727 
1728 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1729 					size_t size, unsigned int width)
1730 {
1731 	char out[16];
1732 
1733 	perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1734 	return repsep_snprintf(bf, size, "%.*s", width, out);
1735 }
1736 
1737 struct sort_entry sort_mem_blocked = {
1738 	.se_header	= "Blocked",
1739 	.se_cmp		= sort__blocked_cmp,
1740 	.se_snprintf	= hist_entry__blocked_snprintf,
1741 	.se_width_idx	= HISTC_MEM_BLOCKED,
1742 };
1743 
1744 static int64_t
1745 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1746 {
1747 	uint64_t l = 0, r = 0;
1748 
1749 	if (left->mem_info)
1750 		l = left->mem_info->daddr.phys_addr;
1751 	if (right->mem_info)
1752 		r = right->mem_info->daddr.phys_addr;
1753 
1754 	return (int64_t)(r - l);
1755 }
1756 
1757 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1758 					   size_t size, unsigned int width)
1759 {
1760 	uint64_t addr = 0;
1761 	size_t ret = 0;
1762 	size_t len = BITS_PER_LONG / 4;
1763 
1764 	addr = he->mem_info->daddr.phys_addr;
1765 
1766 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1767 
1768 	ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1769 
1770 	ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1771 
1772 	if (ret > width)
1773 		bf[width] = '\0';
1774 
1775 	return width;
1776 }
1777 
1778 struct sort_entry sort_mem_phys_daddr = {
1779 	.se_header	= "Data Physical Address",
1780 	.se_cmp		= sort__phys_daddr_cmp,
1781 	.se_snprintf	= hist_entry__phys_daddr_snprintf,
1782 	.se_width_idx	= HISTC_MEM_PHYS_DADDR,
1783 };
1784 
1785 static int64_t
1786 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1787 {
1788 	uint64_t l = 0, r = 0;
1789 
1790 	if (left->mem_info)
1791 		l = left->mem_info->daddr.data_page_size;
1792 	if (right->mem_info)
1793 		r = right->mem_info->daddr.data_page_size;
1794 
1795 	return (int64_t)(r - l);
1796 }
1797 
1798 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1799 					  size_t size, unsigned int width)
1800 {
1801 	char str[PAGE_SIZE_NAME_LEN];
1802 
1803 	return repsep_snprintf(bf, size, "%-*s", width,
1804 			       get_page_size_name(he->mem_info->daddr.data_page_size, str));
1805 }
1806 
1807 struct sort_entry sort_mem_data_page_size = {
1808 	.se_header	= "Data Page Size",
1809 	.se_cmp		= sort__data_page_size_cmp,
1810 	.se_snprintf	= hist_entry__data_page_size_snprintf,
1811 	.se_width_idx	= HISTC_MEM_DATA_PAGE_SIZE,
1812 };
1813 
1814 static int64_t
1815 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1816 {
1817 	uint64_t l = left->code_page_size;
1818 	uint64_t r = right->code_page_size;
1819 
1820 	return (int64_t)(r - l);
1821 }
1822 
1823 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
1824 					  size_t size, unsigned int width)
1825 {
1826 	char str[PAGE_SIZE_NAME_LEN];
1827 
1828 	return repsep_snprintf(bf, size, "%-*s", width,
1829 			       get_page_size_name(he->code_page_size, str));
1830 }
1831 
1832 struct sort_entry sort_code_page_size = {
1833 	.se_header	= "Code Page Size",
1834 	.se_cmp		= sort__code_page_size_cmp,
1835 	.se_snprintf	= hist_entry__code_page_size_snprintf,
1836 	.se_width_idx	= HISTC_CODE_PAGE_SIZE,
1837 };
1838 
1839 static int64_t
1840 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1841 {
1842 	if (!left->branch_info || !right->branch_info)
1843 		return cmp_null(left->branch_info, right->branch_info);
1844 
1845 	return left->branch_info->flags.abort !=
1846 		right->branch_info->flags.abort;
1847 }
1848 
1849 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1850 				    size_t size, unsigned int width)
1851 {
1852 	static const char *out = "N/A";
1853 
1854 	if (he->branch_info) {
1855 		if (he->branch_info->flags.abort)
1856 			out = "A";
1857 		else
1858 			out = ".";
1859 	}
1860 
1861 	return repsep_snprintf(bf, size, "%-*s", width, out);
1862 }
1863 
1864 struct sort_entry sort_abort = {
1865 	.se_header	= "Transaction abort",
1866 	.se_cmp		= sort__abort_cmp,
1867 	.se_snprintf	= hist_entry__abort_snprintf,
1868 	.se_width_idx	= HISTC_ABORT,
1869 };
1870 
1871 static int64_t
1872 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1873 {
1874 	if (!left->branch_info || !right->branch_info)
1875 		return cmp_null(left->branch_info, right->branch_info);
1876 
1877 	return left->branch_info->flags.in_tx !=
1878 		right->branch_info->flags.in_tx;
1879 }
1880 
1881 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1882 				    size_t size, unsigned int width)
1883 {
1884 	static const char *out = "N/A";
1885 
1886 	if (he->branch_info) {
1887 		if (he->branch_info->flags.in_tx)
1888 			out = "T";
1889 		else
1890 			out = ".";
1891 	}
1892 
1893 	return repsep_snprintf(bf, size, "%-*s", width, out);
1894 }
1895 
1896 struct sort_entry sort_in_tx = {
1897 	.se_header	= "Branch in transaction",
1898 	.se_cmp		= sort__in_tx_cmp,
1899 	.se_snprintf	= hist_entry__in_tx_snprintf,
1900 	.se_width_idx	= HISTC_IN_TX,
1901 };
1902 
1903 static int64_t
1904 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1905 {
1906 	return left->transaction - right->transaction;
1907 }
1908 
1909 static inline char *add_str(char *p, const char *str)
1910 {
1911 	strcpy(p, str);
1912 	return p + strlen(str);
1913 }
1914 
1915 static struct txbit {
1916 	unsigned flag;
1917 	const char *name;
1918 	int skip_for_len;
1919 } txbits[] = {
1920 	{ PERF_TXN_ELISION,        "EL ",        0 },
1921 	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
1922 	{ PERF_TXN_SYNC,           "SYNC ",      1 },
1923 	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
1924 	{ PERF_TXN_RETRY,          "RETRY ",     0 },
1925 	{ PERF_TXN_CONFLICT,       "CON ",       0 },
1926 	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1927 	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
1928 	{ 0, NULL, 0 }
1929 };
1930 
1931 int hist_entry__transaction_len(void)
1932 {
1933 	int i;
1934 	int len = 0;
1935 
1936 	for (i = 0; txbits[i].name; i++) {
1937 		if (!txbits[i].skip_for_len)
1938 			len += strlen(txbits[i].name);
1939 	}
1940 	len += 4; /* :XX<space> */
1941 	return len;
1942 }
1943 
1944 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1945 					    size_t size, unsigned int width)
1946 {
1947 	u64 t = he->transaction;
1948 	char buf[128];
1949 	char *p = buf;
1950 	int i;
1951 
1952 	buf[0] = 0;
1953 	for (i = 0; txbits[i].name; i++)
1954 		if (txbits[i].flag & t)
1955 			p = add_str(p, txbits[i].name);
1956 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1957 		p = add_str(p, "NEITHER ");
1958 	if (t & PERF_TXN_ABORT_MASK) {
1959 		sprintf(p, ":%" PRIx64,
1960 			(t & PERF_TXN_ABORT_MASK) >>
1961 			PERF_TXN_ABORT_SHIFT);
1962 		p += strlen(p);
1963 	}
1964 
1965 	return repsep_snprintf(bf, size, "%-*s", width, buf);
1966 }
1967 
1968 struct sort_entry sort_transaction = {
1969 	.se_header	= "Transaction                ",
1970 	.se_cmp		= sort__transaction_cmp,
1971 	.se_snprintf	= hist_entry__transaction_snprintf,
1972 	.se_width_idx	= HISTC_TRANSACTION,
1973 };
1974 
1975 /* --sort symbol_size */
1976 
1977 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1978 {
1979 	int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1980 	int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1981 
1982 	return size_l < size_r ? -1 :
1983 		size_l == size_r ? 0 : 1;
1984 }
1985 
1986 static int64_t
1987 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1988 {
1989 	return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1990 }
1991 
1992 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1993 					  size_t bf_size, unsigned int width)
1994 {
1995 	if (sym)
1996 		return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1997 
1998 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1999 }
2000 
2001 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2002 					 size_t size, unsigned int width)
2003 {
2004 	return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2005 }
2006 
2007 struct sort_entry sort_sym_size = {
2008 	.se_header	= "Symbol size",
2009 	.se_cmp		= sort__sym_size_cmp,
2010 	.se_snprintf	= hist_entry__sym_size_snprintf,
2011 	.se_width_idx	= HISTC_SYM_SIZE,
2012 };
2013 
2014 /* --sort dso_size */
2015 
2016 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2017 {
2018 	int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2019 	int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2020 
2021 	return size_l < size_r ? -1 :
2022 		size_l == size_r ? 0 : 1;
2023 }
2024 
2025 static int64_t
2026 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2027 {
2028 	return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2029 }
2030 
2031 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2032 					  size_t bf_size, unsigned int width)
2033 {
2034 	if (map && map->dso)
2035 		return repsep_snprintf(bf, bf_size, "%*d", width,
2036 				       map__size(map));
2037 
2038 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2039 }
2040 
2041 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2042 					 size_t size, unsigned int width)
2043 {
2044 	return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2045 }
2046 
2047 struct sort_entry sort_dso_size = {
2048 	.se_header	= "DSO size",
2049 	.se_cmp		= sort__dso_size_cmp,
2050 	.se_snprintf	= hist_entry__dso_size_snprintf,
2051 	.se_width_idx	= HISTC_DSO_SIZE,
2052 };
2053 
2054 /* --sort dso_size */
2055 
2056 static int64_t
2057 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2058 {
2059 	u64 left_ip = left->ip;
2060 	u64 right_ip = right->ip;
2061 	struct map *left_map = left->ms.map;
2062 	struct map *right_map = right->ms.map;
2063 
2064 	if (left_map)
2065 		left_ip = left_map->unmap_ip(left_map, left_ip);
2066 	if (right_map)
2067 		right_ip = right_map->unmap_ip(right_map, right_ip);
2068 
2069 	return _sort__addr_cmp(left_ip, right_ip);
2070 }
2071 
2072 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2073 				     size_t size, unsigned int width)
2074 {
2075 	u64 ip = he->ip;
2076 	struct map *map = he->ms.map;
2077 
2078 	if (map)
2079 		ip = map->unmap_ip(map, ip);
2080 
2081 	return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2082 }
2083 
2084 struct sort_entry sort_addr = {
2085 	.se_header	= "Address",
2086 	.se_cmp		= sort__addr_cmp,
2087 	.se_snprintf	= hist_entry__addr_snprintf,
2088 	.se_width_idx	= HISTC_ADDR,
2089 };
2090 
2091 
2092 struct sort_dimension {
2093 	const char		*name;
2094 	struct sort_entry	*entry;
2095 	int			taken;
2096 };
2097 
2098 int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2099 {
2100 	return 0;
2101 }
2102 
2103 const char * __weak arch_perf_header_entry(const char *se_header)
2104 {
2105 	return se_header;
2106 }
2107 
2108 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2109 {
2110 	sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2111 }
2112 
2113 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2114 
2115 static struct sort_dimension common_sort_dimensions[] = {
2116 	DIM(SORT_PID, "pid", sort_thread),
2117 	DIM(SORT_COMM, "comm", sort_comm),
2118 	DIM(SORT_DSO, "dso", sort_dso),
2119 	DIM(SORT_SYM, "symbol", sort_sym),
2120 	DIM(SORT_PARENT, "parent", sort_parent),
2121 	DIM(SORT_CPU, "cpu", sort_cpu),
2122 	DIM(SORT_SOCKET, "socket", sort_socket),
2123 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
2124 	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2125 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2126 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2127 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2128 #ifdef HAVE_LIBTRACEEVENT
2129 	DIM(SORT_TRACE, "trace", sort_trace),
2130 #endif
2131 	DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2132 	DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2133 	DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2134 	DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2135 	DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2136 	DIM(SORT_TIME, "time", sort_time),
2137 	DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2138 	DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2139 	DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2140 	DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2141 	DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2142 	DIM(SORT_ADDR, "addr", sort_addr),
2143 	DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2144 	DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2145 };
2146 
2147 #undef DIM
2148 
2149 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2150 
2151 static struct sort_dimension bstack_sort_dimensions[] = {
2152 	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2153 	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2154 	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2155 	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2156 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2157 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2158 	DIM(SORT_ABORT, "abort", sort_abort),
2159 	DIM(SORT_CYCLES, "cycles", sort_cycles),
2160 	DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2161 	DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2162 	DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2163 	DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2164 	DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2165 };
2166 
2167 #undef DIM
2168 
2169 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2170 
2171 static struct sort_dimension memory_sort_dimensions[] = {
2172 	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2173 	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2174 	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2175 	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2176 	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2177 	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2178 	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2179 	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2180 	DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2181 	DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2182 	DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2183 };
2184 
2185 #undef DIM
2186 
2187 struct hpp_dimension {
2188 	const char		*name;
2189 	struct perf_hpp_fmt	*fmt;
2190 	int			taken;
2191 };
2192 
2193 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2194 
2195 static struct hpp_dimension hpp_sort_dimensions[] = {
2196 	DIM(PERF_HPP__OVERHEAD, "overhead"),
2197 	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2198 	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2199 	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2200 	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2201 	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2202 	DIM(PERF_HPP__SAMPLES, "sample"),
2203 	DIM(PERF_HPP__PERIOD, "period"),
2204 };
2205 
2206 #undef DIM
2207 
2208 struct hpp_sort_entry {
2209 	struct perf_hpp_fmt hpp;
2210 	struct sort_entry *se;
2211 };
2212 
2213 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2214 {
2215 	struct hpp_sort_entry *hse;
2216 
2217 	if (!perf_hpp__is_sort_entry(fmt))
2218 		return;
2219 
2220 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2221 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2222 }
2223 
2224 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2225 			      struct hists *hists, int line __maybe_unused,
2226 			      int *span __maybe_unused)
2227 {
2228 	struct hpp_sort_entry *hse;
2229 	size_t len = fmt->user_len;
2230 
2231 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2232 
2233 	if (!len)
2234 		len = hists__col_len(hists, hse->se->se_width_idx);
2235 
2236 	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2237 }
2238 
2239 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2240 			     struct perf_hpp *hpp __maybe_unused,
2241 			     struct hists *hists)
2242 {
2243 	struct hpp_sort_entry *hse;
2244 	size_t len = fmt->user_len;
2245 
2246 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2247 
2248 	if (!len)
2249 		len = hists__col_len(hists, hse->se->se_width_idx);
2250 
2251 	return len;
2252 }
2253 
2254 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2255 			     struct hist_entry *he)
2256 {
2257 	struct hpp_sort_entry *hse;
2258 	size_t len = fmt->user_len;
2259 
2260 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2261 
2262 	if (!len)
2263 		len = hists__col_len(he->hists, hse->se->se_width_idx);
2264 
2265 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2266 }
2267 
2268 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2269 			       struct hist_entry *a, struct hist_entry *b)
2270 {
2271 	struct hpp_sort_entry *hse;
2272 
2273 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2274 	return hse->se->se_cmp(a, b);
2275 }
2276 
2277 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2278 				    struct hist_entry *a, struct hist_entry *b)
2279 {
2280 	struct hpp_sort_entry *hse;
2281 	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2282 
2283 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2284 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2285 	return collapse_fn(a, b);
2286 }
2287 
2288 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2289 				struct hist_entry *a, struct hist_entry *b)
2290 {
2291 	struct hpp_sort_entry *hse;
2292 	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2293 
2294 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2295 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2296 	return sort_fn(a, b);
2297 }
2298 
2299 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2300 {
2301 	return format->header == __sort__hpp_header;
2302 }
2303 
2304 #define MK_SORT_ENTRY_CHK(key)					\
2305 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt)	\
2306 {								\
2307 	struct hpp_sort_entry *hse;				\
2308 								\
2309 	if (!perf_hpp__is_sort_entry(fmt))			\
2310 		return false;					\
2311 								\
2312 	hse = container_of(fmt, struct hpp_sort_entry, hpp);	\
2313 	return hse->se == &sort_ ## key ;			\
2314 }
2315 
2316 #ifdef HAVE_LIBTRACEEVENT
2317 MK_SORT_ENTRY_CHK(trace)
2318 #else
2319 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2320 {
2321 	return false;
2322 }
2323 #endif
2324 MK_SORT_ENTRY_CHK(srcline)
2325 MK_SORT_ENTRY_CHK(srcfile)
2326 MK_SORT_ENTRY_CHK(thread)
2327 MK_SORT_ENTRY_CHK(comm)
2328 MK_SORT_ENTRY_CHK(dso)
2329 MK_SORT_ENTRY_CHK(sym)
2330 
2331 
2332 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2333 {
2334 	struct hpp_sort_entry *hse_a;
2335 	struct hpp_sort_entry *hse_b;
2336 
2337 	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2338 		return false;
2339 
2340 	hse_a = container_of(a, struct hpp_sort_entry, hpp);
2341 	hse_b = container_of(b, struct hpp_sort_entry, hpp);
2342 
2343 	return hse_a->se == hse_b->se;
2344 }
2345 
2346 static void hse_free(struct perf_hpp_fmt *fmt)
2347 {
2348 	struct hpp_sort_entry *hse;
2349 
2350 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2351 	free(hse);
2352 }
2353 
2354 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2355 {
2356 	struct hpp_sort_entry *hse;
2357 
2358 	if (!perf_hpp__is_sort_entry(fmt))
2359 		return;
2360 
2361 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2362 
2363 	if (hse->se->se_init)
2364 		hse->se->se_init(he);
2365 }
2366 
2367 static struct hpp_sort_entry *
2368 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2369 {
2370 	struct hpp_sort_entry *hse;
2371 
2372 	hse = malloc(sizeof(*hse));
2373 	if (hse == NULL) {
2374 		pr_err("Memory allocation failed\n");
2375 		return NULL;
2376 	}
2377 
2378 	hse->se = sd->entry;
2379 	hse->hpp.name = sd->entry->se_header;
2380 	hse->hpp.header = __sort__hpp_header;
2381 	hse->hpp.width = __sort__hpp_width;
2382 	hse->hpp.entry = __sort__hpp_entry;
2383 	hse->hpp.color = NULL;
2384 
2385 	hse->hpp.cmp = __sort__hpp_cmp;
2386 	hse->hpp.collapse = __sort__hpp_collapse;
2387 	hse->hpp.sort = __sort__hpp_sort;
2388 	hse->hpp.equal = __sort__hpp_equal;
2389 	hse->hpp.free = hse_free;
2390 	hse->hpp.init = hse_init;
2391 
2392 	INIT_LIST_HEAD(&hse->hpp.list);
2393 	INIT_LIST_HEAD(&hse->hpp.sort_list);
2394 	hse->hpp.elide = false;
2395 	hse->hpp.len = 0;
2396 	hse->hpp.user_len = 0;
2397 	hse->hpp.level = level;
2398 
2399 	return hse;
2400 }
2401 
2402 static void hpp_free(struct perf_hpp_fmt *fmt)
2403 {
2404 	free(fmt);
2405 }
2406 
2407 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2408 						       int level)
2409 {
2410 	struct perf_hpp_fmt *fmt;
2411 
2412 	fmt = memdup(hd->fmt, sizeof(*fmt));
2413 	if (fmt) {
2414 		INIT_LIST_HEAD(&fmt->list);
2415 		INIT_LIST_HEAD(&fmt->sort_list);
2416 		fmt->free = hpp_free;
2417 		fmt->level = level;
2418 	}
2419 
2420 	return fmt;
2421 }
2422 
2423 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2424 {
2425 	struct perf_hpp_fmt *fmt;
2426 	struct hpp_sort_entry *hse;
2427 	int ret = -1;
2428 	int r;
2429 
2430 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2431 		if (!perf_hpp__is_sort_entry(fmt))
2432 			continue;
2433 
2434 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2435 		if (hse->se->se_filter == NULL)
2436 			continue;
2437 
2438 		/*
2439 		 * hist entry is filtered if any of sort key in the hpp list
2440 		 * is applied.  But it should skip non-matched filter types.
2441 		 */
2442 		r = hse->se->se_filter(he, type, arg);
2443 		if (r >= 0) {
2444 			if (ret < 0)
2445 				ret = 0;
2446 			ret |= r;
2447 		}
2448 	}
2449 
2450 	return ret;
2451 }
2452 
2453 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2454 					  struct perf_hpp_list *list,
2455 					  int level)
2456 {
2457 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2458 
2459 	if (hse == NULL)
2460 		return -1;
2461 
2462 	perf_hpp_list__register_sort_field(list, &hse->hpp);
2463 	return 0;
2464 }
2465 
2466 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2467 					    struct perf_hpp_list *list)
2468 {
2469 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2470 
2471 	if (hse == NULL)
2472 		return -1;
2473 
2474 	perf_hpp_list__column_register(list, &hse->hpp);
2475 	return 0;
2476 }
2477 
2478 #ifndef HAVE_LIBTRACEEVENT
2479 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2480 {
2481 	return false;
2482 }
2483 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2484 				     struct hists *hists __maybe_unused)
2485 {
2486 	return false;
2487 }
2488 #else
2489 struct hpp_dynamic_entry {
2490 	struct perf_hpp_fmt hpp;
2491 	struct evsel *evsel;
2492 	struct tep_format_field *field;
2493 	unsigned dynamic_len;
2494 	bool raw_trace;
2495 };
2496 
2497 static int hde_width(struct hpp_dynamic_entry *hde)
2498 {
2499 	if (!hde->hpp.len) {
2500 		int len = hde->dynamic_len;
2501 		int namelen = strlen(hde->field->name);
2502 		int fieldlen = hde->field->size;
2503 
2504 		if (namelen > len)
2505 			len = namelen;
2506 
2507 		if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2508 			/* length for print hex numbers */
2509 			fieldlen = hde->field->size * 2 + 2;
2510 		}
2511 		if (fieldlen > len)
2512 			len = fieldlen;
2513 
2514 		hde->hpp.len = len;
2515 	}
2516 	return hde->hpp.len;
2517 }
2518 
2519 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2520 			       struct hist_entry *he)
2521 {
2522 	char *str, *pos;
2523 	struct tep_format_field *field = hde->field;
2524 	size_t namelen;
2525 	bool last = false;
2526 
2527 	if (hde->raw_trace)
2528 		return;
2529 
2530 	/* parse pretty print result and update max length */
2531 	if (!he->trace_output)
2532 		he->trace_output = get_trace_output(he);
2533 
2534 	namelen = strlen(field->name);
2535 	str = he->trace_output;
2536 
2537 	while (str) {
2538 		pos = strchr(str, ' ');
2539 		if (pos == NULL) {
2540 			last = true;
2541 			pos = str + strlen(str);
2542 		}
2543 
2544 		if (!strncmp(str, field->name, namelen)) {
2545 			size_t len;
2546 
2547 			str += namelen + 1;
2548 			len = pos - str;
2549 
2550 			if (len > hde->dynamic_len)
2551 				hde->dynamic_len = len;
2552 			break;
2553 		}
2554 
2555 		if (last)
2556 			str = NULL;
2557 		else
2558 			str = pos + 1;
2559 	}
2560 }
2561 
2562 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2563 			      struct hists *hists __maybe_unused,
2564 			      int line __maybe_unused,
2565 			      int *span __maybe_unused)
2566 {
2567 	struct hpp_dynamic_entry *hde;
2568 	size_t len = fmt->user_len;
2569 
2570 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2571 
2572 	if (!len)
2573 		len = hde_width(hde);
2574 
2575 	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2576 }
2577 
2578 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2579 			     struct perf_hpp *hpp __maybe_unused,
2580 			     struct hists *hists __maybe_unused)
2581 {
2582 	struct hpp_dynamic_entry *hde;
2583 	size_t len = fmt->user_len;
2584 
2585 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2586 
2587 	if (!len)
2588 		len = hde_width(hde);
2589 
2590 	return len;
2591 }
2592 
2593 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2594 {
2595 	struct hpp_dynamic_entry *hde;
2596 
2597 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2598 
2599 	return hists_to_evsel(hists) == hde->evsel;
2600 }
2601 
2602 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2603 			     struct hist_entry *he)
2604 {
2605 	struct hpp_dynamic_entry *hde;
2606 	size_t len = fmt->user_len;
2607 	char *str, *pos;
2608 	struct tep_format_field *field;
2609 	size_t namelen;
2610 	bool last = false;
2611 	int ret;
2612 
2613 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2614 
2615 	if (!len)
2616 		len = hde_width(hde);
2617 
2618 	if (hde->raw_trace)
2619 		goto raw_field;
2620 
2621 	if (!he->trace_output)
2622 		he->trace_output = get_trace_output(he);
2623 
2624 	field = hde->field;
2625 	namelen = strlen(field->name);
2626 	str = he->trace_output;
2627 
2628 	while (str) {
2629 		pos = strchr(str, ' ');
2630 		if (pos == NULL) {
2631 			last = true;
2632 			pos = str + strlen(str);
2633 		}
2634 
2635 		if (!strncmp(str, field->name, namelen)) {
2636 			str += namelen + 1;
2637 			str = strndup(str, pos - str);
2638 
2639 			if (str == NULL)
2640 				return scnprintf(hpp->buf, hpp->size,
2641 						 "%*.*s", len, len, "ERROR");
2642 			break;
2643 		}
2644 
2645 		if (last)
2646 			str = NULL;
2647 		else
2648 			str = pos + 1;
2649 	}
2650 
2651 	if (str == NULL) {
2652 		struct trace_seq seq;
2653 raw_field:
2654 		trace_seq_init(&seq);
2655 		tep_print_field(&seq, he->raw_data, hde->field);
2656 		str = seq.buffer;
2657 	}
2658 
2659 	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2660 	free(str);
2661 	return ret;
2662 }
2663 
2664 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2665 			       struct hist_entry *a, struct hist_entry *b)
2666 {
2667 	struct hpp_dynamic_entry *hde;
2668 	struct tep_format_field *field;
2669 	unsigned offset, size;
2670 
2671 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2672 
2673 	field = hde->field;
2674 	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2675 		unsigned long long dyn;
2676 
2677 		tep_read_number_field(field, a->raw_data, &dyn);
2678 		offset = dyn & 0xffff;
2679 		size = (dyn >> 16) & 0xffff;
2680 		if (tep_field_is_relative(field->flags))
2681 			offset += field->offset + field->size;
2682 		/* record max width for output */
2683 		if (size > hde->dynamic_len)
2684 			hde->dynamic_len = size;
2685 	} else {
2686 		offset = field->offset;
2687 		size = field->size;
2688 	}
2689 
2690 	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2691 }
2692 
2693 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2694 {
2695 	return fmt->cmp == __sort__hde_cmp;
2696 }
2697 
2698 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2699 {
2700 	struct hpp_dynamic_entry *hde_a;
2701 	struct hpp_dynamic_entry *hde_b;
2702 
2703 	if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2704 		return false;
2705 
2706 	hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2707 	hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2708 
2709 	return hde_a->field == hde_b->field;
2710 }
2711 
2712 static void hde_free(struct perf_hpp_fmt *fmt)
2713 {
2714 	struct hpp_dynamic_entry *hde;
2715 
2716 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2717 	free(hde);
2718 }
2719 
2720 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2721 {
2722 	struct hpp_dynamic_entry *hde;
2723 
2724 	if (!perf_hpp__is_dynamic_entry(fmt))
2725 		return;
2726 
2727 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2728 	update_dynamic_len(hde, he);
2729 }
2730 
2731 static struct hpp_dynamic_entry *
2732 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
2733 		      int level)
2734 {
2735 	struct hpp_dynamic_entry *hde;
2736 
2737 	hde = malloc(sizeof(*hde));
2738 	if (hde == NULL) {
2739 		pr_debug("Memory allocation failed\n");
2740 		return NULL;
2741 	}
2742 
2743 	hde->evsel = evsel;
2744 	hde->field = field;
2745 	hde->dynamic_len = 0;
2746 
2747 	hde->hpp.name = field->name;
2748 	hde->hpp.header = __sort__hde_header;
2749 	hde->hpp.width  = __sort__hde_width;
2750 	hde->hpp.entry  = __sort__hde_entry;
2751 	hde->hpp.color  = NULL;
2752 
2753 	hde->hpp.init = __sort__hde_init;
2754 	hde->hpp.cmp = __sort__hde_cmp;
2755 	hde->hpp.collapse = __sort__hde_cmp;
2756 	hde->hpp.sort = __sort__hde_cmp;
2757 	hde->hpp.equal = __sort__hde_equal;
2758 	hde->hpp.free = hde_free;
2759 
2760 	INIT_LIST_HEAD(&hde->hpp.list);
2761 	INIT_LIST_HEAD(&hde->hpp.sort_list);
2762 	hde->hpp.elide = false;
2763 	hde->hpp.len = 0;
2764 	hde->hpp.user_len = 0;
2765 	hde->hpp.level = level;
2766 
2767 	return hde;
2768 }
2769 #endif /* HAVE_LIBTRACEEVENT */
2770 
2771 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2772 {
2773 	struct perf_hpp_fmt *new_fmt = NULL;
2774 
2775 	if (perf_hpp__is_sort_entry(fmt)) {
2776 		struct hpp_sort_entry *hse, *new_hse;
2777 
2778 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2779 		new_hse = memdup(hse, sizeof(*hse));
2780 		if (new_hse)
2781 			new_fmt = &new_hse->hpp;
2782 #ifdef HAVE_LIBTRACEEVENT
2783 	} else if (perf_hpp__is_dynamic_entry(fmt)) {
2784 		struct hpp_dynamic_entry *hde, *new_hde;
2785 
2786 		hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2787 		new_hde = memdup(hde, sizeof(*hde));
2788 		if (new_hde)
2789 			new_fmt = &new_hde->hpp;
2790 #endif
2791 	} else {
2792 		new_fmt = memdup(fmt, sizeof(*fmt));
2793 	}
2794 
2795 	INIT_LIST_HEAD(&new_fmt->list);
2796 	INIT_LIST_HEAD(&new_fmt->sort_list);
2797 
2798 	return new_fmt;
2799 }
2800 
2801 static int parse_field_name(char *str, char **event, char **field, char **opt)
2802 {
2803 	char *event_name, *field_name, *opt_name;
2804 
2805 	event_name = str;
2806 	field_name = strchr(str, '.');
2807 
2808 	if (field_name) {
2809 		*field_name++ = '\0';
2810 	} else {
2811 		event_name = NULL;
2812 		field_name = str;
2813 	}
2814 
2815 	opt_name = strchr(field_name, '/');
2816 	if (opt_name)
2817 		*opt_name++ = '\0';
2818 
2819 	*event = event_name;
2820 	*field = field_name;
2821 	*opt   = opt_name;
2822 
2823 	return 0;
2824 }
2825 
2826 /* find match evsel using a given event name.  The event name can be:
2827  *   1. '%' + event index (e.g. '%1' for first event)
2828  *   2. full event name (e.g. sched:sched_switch)
2829  *   3. partial event name (should not contain ':')
2830  */
2831 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
2832 {
2833 	struct evsel *evsel = NULL;
2834 	struct evsel *pos;
2835 	bool full_name;
2836 
2837 	/* case 1 */
2838 	if (event_name[0] == '%') {
2839 		int nr = strtol(event_name+1, NULL, 0);
2840 
2841 		if (nr > evlist->core.nr_entries)
2842 			return NULL;
2843 
2844 		evsel = evlist__first(evlist);
2845 		while (--nr > 0)
2846 			evsel = evsel__next(evsel);
2847 
2848 		return evsel;
2849 	}
2850 
2851 	full_name = !!strchr(event_name, ':');
2852 	evlist__for_each_entry(evlist, pos) {
2853 		/* case 2 */
2854 		if (full_name && !strcmp(pos->name, event_name))
2855 			return pos;
2856 		/* case 3 */
2857 		if (!full_name && strstr(pos->name, event_name)) {
2858 			if (evsel) {
2859 				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2860 					 event_name, evsel->name, pos->name);
2861 				return NULL;
2862 			}
2863 			evsel = pos;
2864 		}
2865 	}
2866 
2867 	return evsel;
2868 }
2869 
2870 #ifdef HAVE_LIBTRACEEVENT
2871 static int __dynamic_dimension__add(struct evsel *evsel,
2872 				    struct tep_format_field *field,
2873 				    bool raw_trace, int level)
2874 {
2875 	struct hpp_dynamic_entry *hde;
2876 
2877 	hde = __alloc_dynamic_entry(evsel, field, level);
2878 	if (hde == NULL)
2879 		return -ENOMEM;
2880 
2881 	hde->raw_trace = raw_trace;
2882 
2883 	perf_hpp__register_sort_field(&hde->hpp);
2884 	return 0;
2885 }
2886 
2887 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
2888 {
2889 	int ret;
2890 	struct tep_format_field *field;
2891 
2892 	field = evsel->tp_format->format.fields;
2893 	while (field) {
2894 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2895 		if (ret < 0)
2896 			return ret;
2897 
2898 		field = field->next;
2899 	}
2900 	return 0;
2901 }
2902 
2903 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
2904 				  int level)
2905 {
2906 	int ret;
2907 	struct evsel *evsel;
2908 
2909 	evlist__for_each_entry(evlist, evsel) {
2910 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2911 			continue;
2912 
2913 		ret = add_evsel_fields(evsel, raw_trace, level);
2914 		if (ret < 0)
2915 			return ret;
2916 	}
2917 	return 0;
2918 }
2919 
2920 static int add_all_matching_fields(struct evlist *evlist,
2921 				   char *field_name, bool raw_trace, int level)
2922 {
2923 	int ret = -ESRCH;
2924 	struct evsel *evsel;
2925 	struct tep_format_field *field;
2926 
2927 	evlist__for_each_entry(evlist, evsel) {
2928 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2929 			continue;
2930 
2931 		field = tep_find_any_field(evsel->tp_format, field_name);
2932 		if (field == NULL)
2933 			continue;
2934 
2935 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2936 		if (ret < 0)
2937 			break;
2938 	}
2939 	return ret;
2940 }
2941 #endif /* HAVE_LIBTRACEEVENT */
2942 
2943 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
2944 			     int level)
2945 {
2946 	char *str, *event_name, *field_name, *opt_name;
2947 	struct evsel *evsel;
2948 	bool raw_trace = symbol_conf.raw_trace;
2949 	int ret = 0;
2950 
2951 	if (evlist == NULL)
2952 		return -ENOENT;
2953 
2954 	str = strdup(tok);
2955 	if (str == NULL)
2956 		return -ENOMEM;
2957 
2958 	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2959 		ret = -EINVAL;
2960 		goto out;
2961 	}
2962 
2963 	if (opt_name) {
2964 		if (strcmp(opt_name, "raw")) {
2965 			pr_debug("unsupported field option %s\n", opt_name);
2966 			ret = -EINVAL;
2967 			goto out;
2968 		}
2969 		raw_trace = true;
2970 	}
2971 
2972 #ifdef HAVE_LIBTRACEEVENT
2973 	if (!strcmp(field_name, "trace_fields")) {
2974 		ret = add_all_dynamic_fields(evlist, raw_trace, level);
2975 		goto out;
2976 	}
2977 
2978 	if (event_name == NULL) {
2979 		ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2980 		goto out;
2981 	}
2982 #else
2983 	evlist__for_each_entry(evlist, evsel) {
2984 		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2985 			pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
2986 			ret = -ENOTSUP;
2987 		}
2988 	}
2989 
2990 	if (ret) {
2991 		pr_err("\n");
2992 		goto out;
2993 	}
2994 #endif
2995 
2996 	evsel = find_evsel(evlist, event_name);
2997 	if (evsel == NULL) {
2998 		pr_debug("Cannot find event: %s\n", event_name);
2999 		ret = -ENOENT;
3000 		goto out;
3001 	}
3002 
3003 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3004 		pr_debug("%s is not a tracepoint event\n", event_name);
3005 		ret = -EINVAL;
3006 		goto out;
3007 	}
3008 
3009 #ifdef HAVE_LIBTRACEEVENT
3010 	if (!strcmp(field_name, "*")) {
3011 		ret = add_evsel_fields(evsel, raw_trace, level);
3012 	} else {
3013 		struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
3014 
3015 		if (field == NULL) {
3016 			pr_debug("Cannot find event field for %s.%s\n",
3017 				 event_name, field_name);
3018 			return -ENOENT;
3019 		}
3020 
3021 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3022 	}
3023 #else
3024 	(void)level;
3025 	(void)raw_trace;
3026 #endif /* HAVE_LIBTRACEEVENT */
3027 
3028 out:
3029 	free(str);
3030 	return ret;
3031 }
3032 
3033 static int __sort_dimension__add(struct sort_dimension *sd,
3034 				 struct perf_hpp_list *list,
3035 				 int level)
3036 {
3037 	if (sd->taken)
3038 		return 0;
3039 
3040 	if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3041 		return -1;
3042 
3043 	if (sd->entry->se_collapse)
3044 		list->need_collapse = 1;
3045 
3046 	sd->taken = 1;
3047 
3048 	return 0;
3049 }
3050 
3051 static int __hpp_dimension__add(struct hpp_dimension *hd,
3052 				struct perf_hpp_list *list,
3053 				int level)
3054 {
3055 	struct perf_hpp_fmt *fmt;
3056 
3057 	if (hd->taken)
3058 		return 0;
3059 
3060 	fmt = __hpp_dimension__alloc_hpp(hd, level);
3061 	if (!fmt)
3062 		return -1;
3063 
3064 	hd->taken = 1;
3065 	perf_hpp_list__register_sort_field(list, fmt);
3066 	return 0;
3067 }
3068 
3069 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3070 					struct sort_dimension *sd)
3071 {
3072 	if (sd->taken)
3073 		return 0;
3074 
3075 	if (__sort_dimension__add_hpp_output(sd, list) < 0)
3076 		return -1;
3077 
3078 	sd->taken = 1;
3079 	return 0;
3080 }
3081 
3082 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3083 				       struct hpp_dimension *hd)
3084 {
3085 	struct perf_hpp_fmt *fmt;
3086 
3087 	if (hd->taken)
3088 		return 0;
3089 
3090 	fmt = __hpp_dimension__alloc_hpp(hd, 0);
3091 	if (!fmt)
3092 		return -1;
3093 
3094 	hd->taken = 1;
3095 	perf_hpp_list__column_register(list, fmt);
3096 	return 0;
3097 }
3098 
3099 int hpp_dimension__add_output(unsigned col)
3100 {
3101 	BUG_ON(col >= PERF_HPP__MAX_INDEX);
3102 	return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
3103 }
3104 
3105 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3106 			struct evlist *evlist,
3107 			int level)
3108 {
3109 	unsigned int i, j;
3110 
3111 	/*
3112 	 * Check to see if there are any arch specific
3113 	 * sort dimensions not applicable for the current
3114 	 * architecture. If so, Skip that sort key since
3115 	 * we don't want to display it in the output fields.
3116 	 */
3117 	for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3118 		if (!strcmp(arch_specific_sort_keys[j], tok) &&
3119 				!arch_support_sort_key(tok)) {
3120 			return 0;
3121 		}
3122 	}
3123 
3124 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3125 		struct sort_dimension *sd = &common_sort_dimensions[i];
3126 
3127 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3128 			continue;
3129 
3130 		for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3131 			if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3132 				sort_dimension_add_dynamic_header(sd);
3133 		}
3134 
3135 		if (sd->entry == &sort_parent) {
3136 			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3137 			if (ret) {
3138 				char err[BUFSIZ];
3139 
3140 				regerror(ret, &parent_regex, err, sizeof(err));
3141 				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3142 				return -EINVAL;
3143 			}
3144 			list->parent = 1;
3145 		} else if (sd->entry == &sort_sym) {
3146 			list->sym = 1;
3147 			/*
3148 			 * perf diff displays the performance difference amongst
3149 			 * two or more perf.data files. Those files could come
3150 			 * from different binaries. So we should not compare
3151 			 * their ips, but the name of symbol.
3152 			 */
3153 			if (sort__mode == SORT_MODE__DIFF)
3154 				sd->entry->se_collapse = sort__sym_sort;
3155 
3156 		} else if (sd->entry == &sort_dso) {
3157 			list->dso = 1;
3158 		} else if (sd->entry == &sort_socket) {
3159 			list->socket = 1;
3160 		} else if (sd->entry == &sort_thread) {
3161 			list->thread = 1;
3162 		} else if (sd->entry == &sort_comm) {
3163 			list->comm = 1;
3164 		}
3165 
3166 		return __sort_dimension__add(sd, list, level);
3167 	}
3168 
3169 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3170 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3171 
3172 		if (strncasecmp(tok, hd->name, strlen(tok)))
3173 			continue;
3174 
3175 		return __hpp_dimension__add(hd, list, level);
3176 	}
3177 
3178 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3179 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3180 
3181 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3182 			continue;
3183 
3184 		if (sort__mode != SORT_MODE__BRANCH)
3185 			return -EINVAL;
3186 
3187 		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3188 			list->sym = 1;
3189 
3190 		__sort_dimension__add(sd, list, level);
3191 		return 0;
3192 	}
3193 
3194 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3195 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3196 
3197 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3198 			continue;
3199 
3200 		if (sort__mode != SORT_MODE__MEMORY)
3201 			return -EINVAL;
3202 
3203 		if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3204 			return -EINVAL;
3205 
3206 		if (sd->entry == &sort_mem_daddr_sym)
3207 			list->sym = 1;
3208 
3209 		__sort_dimension__add(sd, list, level);
3210 		return 0;
3211 	}
3212 
3213 	if (!add_dynamic_entry(evlist, tok, level))
3214 		return 0;
3215 
3216 	return -ESRCH;
3217 }
3218 
3219 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3220 			   struct evlist *evlist)
3221 {
3222 	char *tmp, *tok;
3223 	int ret = 0;
3224 	int level = 0;
3225 	int next_level = 1;
3226 	bool in_group = false;
3227 
3228 	do {
3229 		tok = str;
3230 		tmp = strpbrk(str, "{}, ");
3231 		if (tmp) {
3232 			if (in_group)
3233 				next_level = level;
3234 			else
3235 				next_level = level + 1;
3236 
3237 			if (*tmp == '{')
3238 				in_group = true;
3239 			else if (*tmp == '}')
3240 				in_group = false;
3241 
3242 			*tmp = '\0';
3243 			str = tmp + 1;
3244 		}
3245 
3246 		if (*tok) {
3247 			ret = sort_dimension__add(list, tok, evlist, level);
3248 			if (ret == -EINVAL) {
3249 				if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3250 					ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3251 				else
3252 					ui__error("Invalid --sort key: `%s'", tok);
3253 				break;
3254 			} else if (ret == -ESRCH) {
3255 				ui__error("Unknown --sort key: `%s'", tok);
3256 				break;
3257 			}
3258 		}
3259 
3260 		level = next_level;
3261 	} while (tmp);
3262 
3263 	return ret;
3264 }
3265 
3266 static const char *get_default_sort_order(struct evlist *evlist)
3267 {
3268 	const char *default_sort_orders[] = {
3269 		default_sort_order,
3270 		default_branch_sort_order,
3271 		default_mem_sort_order,
3272 		default_top_sort_order,
3273 		default_diff_sort_order,
3274 		default_tracepoint_sort_order,
3275 	};
3276 	bool use_trace = true;
3277 	struct evsel *evsel;
3278 
3279 	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3280 
3281 	if (evlist == NULL || evlist__empty(evlist))
3282 		goto out_no_evlist;
3283 
3284 	evlist__for_each_entry(evlist, evsel) {
3285 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3286 			use_trace = false;
3287 			break;
3288 		}
3289 	}
3290 
3291 	if (use_trace) {
3292 		sort__mode = SORT_MODE__TRACEPOINT;
3293 		if (symbol_conf.raw_trace)
3294 			return "trace_fields";
3295 	}
3296 out_no_evlist:
3297 	return default_sort_orders[sort__mode];
3298 }
3299 
3300 static int setup_sort_order(struct evlist *evlist)
3301 {
3302 	char *new_sort_order;
3303 
3304 	/*
3305 	 * Append '+'-prefixed sort order to the default sort
3306 	 * order string.
3307 	 */
3308 	if (!sort_order || is_strict_order(sort_order))
3309 		return 0;
3310 
3311 	if (sort_order[1] == '\0') {
3312 		ui__error("Invalid --sort key: `+'");
3313 		return -EINVAL;
3314 	}
3315 
3316 	/*
3317 	 * We allocate new sort_order string, but we never free it,
3318 	 * because it's checked over the rest of the code.
3319 	 */
3320 	if (asprintf(&new_sort_order, "%s,%s",
3321 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
3322 		pr_err("Not enough memory to set up --sort");
3323 		return -ENOMEM;
3324 	}
3325 
3326 	sort_order = new_sort_order;
3327 	return 0;
3328 }
3329 
3330 /*
3331  * Adds 'pre,' prefix into 'str' is 'pre' is
3332  * not already part of 'str'.
3333  */
3334 static char *prefix_if_not_in(const char *pre, char *str)
3335 {
3336 	char *n;
3337 
3338 	if (!str || strstr(str, pre))
3339 		return str;
3340 
3341 	if (asprintf(&n, "%s,%s", pre, str) < 0)
3342 		n = NULL;
3343 
3344 	free(str);
3345 	return n;
3346 }
3347 
3348 static char *setup_overhead(char *keys)
3349 {
3350 	if (sort__mode == SORT_MODE__DIFF)
3351 		return keys;
3352 
3353 	keys = prefix_if_not_in("overhead", keys);
3354 
3355 	if (symbol_conf.cumulate_callchain)
3356 		keys = prefix_if_not_in("overhead_children", keys);
3357 
3358 	return keys;
3359 }
3360 
3361 static int __setup_sorting(struct evlist *evlist)
3362 {
3363 	char *str;
3364 	const char *sort_keys;
3365 	int ret = 0;
3366 
3367 	ret = setup_sort_order(evlist);
3368 	if (ret)
3369 		return ret;
3370 
3371 	sort_keys = sort_order;
3372 	if (sort_keys == NULL) {
3373 		if (is_strict_order(field_order)) {
3374 			/*
3375 			 * If user specified field order but no sort order,
3376 			 * we'll honor it and not add default sort orders.
3377 			 */
3378 			return 0;
3379 		}
3380 
3381 		sort_keys = get_default_sort_order(evlist);
3382 	}
3383 
3384 	str = strdup(sort_keys);
3385 	if (str == NULL) {
3386 		pr_err("Not enough memory to setup sort keys");
3387 		return -ENOMEM;
3388 	}
3389 
3390 	/*
3391 	 * Prepend overhead fields for backward compatibility.
3392 	 */
3393 	if (!is_strict_order(field_order)) {
3394 		str = setup_overhead(str);
3395 		if (str == NULL) {
3396 			pr_err("Not enough memory to setup overhead keys");
3397 			return -ENOMEM;
3398 		}
3399 	}
3400 
3401 	ret = setup_sort_list(&perf_hpp_list, str, evlist);
3402 
3403 	free(str);
3404 	return ret;
3405 }
3406 
3407 void perf_hpp__set_elide(int idx, bool elide)
3408 {
3409 	struct perf_hpp_fmt *fmt;
3410 	struct hpp_sort_entry *hse;
3411 
3412 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3413 		if (!perf_hpp__is_sort_entry(fmt))
3414 			continue;
3415 
3416 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3417 		if (hse->se->se_width_idx == idx) {
3418 			fmt->elide = elide;
3419 			break;
3420 		}
3421 	}
3422 }
3423 
3424 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3425 {
3426 	if (list && strlist__nr_entries(list) == 1) {
3427 		if (fp != NULL)
3428 			fprintf(fp, "# %s: %s\n", list_name,
3429 				strlist__entry(list, 0)->s);
3430 		return true;
3431 	}
3432 	return false;
3433 }
3434 
3435 static bool get_elide(int idx, FILE *output)
3436 {
3437 	switch (idx) {
3438 	case HISTC_SYMBOL:
3439 		return __get_elide(symbol_conf.sym_list, "symbol", output);
3440 	case HISTC_DSO:
3441 		return __get_elide(symbol_conf.dso_list, "dso", output);
3442 	case HISTC_COMM:
3443 		return __get_elide(symbol_conf.comm_list, "comm", output);
3444 	default:
3445 		break;
3446 	}
3447 
3448 	if (sort__mode != SORT_MODE__BRANCH)
3449 		return false;
3450 
3451 	switch (idx) {
3452 	case HISTC_SYMBOL_FROM:
3453 		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3454 	case HISTC_SYMBOL_TO:
3455 		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3456 	case HISTC_DSO_FROM:
3457 		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3458 	case HISTC_DSO_TO:
3459 		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3460 	case HISTC_ADDR_FROM:
3461 		return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3462 	case HISTC_ADDR_TO:
3463 		return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3464 	default:
3465 		break;
3466 	}
3467 
3468 	return false;
3469 }
3470 
3471 void sort__setup_elide(FILE *output)
3472 {
3473 	struct perf_hpp_fmt *fmt;
3474 	struct hpp_sort_entry *hse;
3475 
3476 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3477 		if (!perf_hpp__is_sort_entry(fmt))
3478 			continue;
3479 
3480 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3481 		fmt->elide = get_elide(hse->se->se_width_idx, output);
3482 	}
3483 
3484 	/*
3485 	 * It makes no sense to elide all of sort entries.
3486 	 * Just revert them to show up again.
3487 	 */
3488 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3489 		if (!perf_hpp__is_sort_entry(fmt))
3490 			continue;
3491 
3492 		if (!fmt->elide)
3493 			return;
3494 	}
3495 
3496 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3497 		if (!perf_hpp__is_sort_entry(fmt))
3498 			continue;
3499 
3500 		fmt->elide = false;
3501 	}
3502 }
3503 
3504 int output_field_add(struct perf_hpp_list *list, char *tok)
3505 {
3506 	unsigned int i;
3507 
3508 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3509 		struct sort_dimension *sd = &common_sort_dimensions[i];
3510 
3511 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3512 			continue;
3513 
3514 		return __sort_dimension__add_output(list, sd);
3515 	}
3516 
3517 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3518 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3519 
3520 		if (strncasecmp(tok, hd->name, strlen(tok)))
3521 			continue;
3522 
3523 		return __hpp_dimension__add_output(list, hd);
3524 	}
3525 
3526 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3527 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3528 
3529 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3530 			continue;
3531 
3532 		if (sort__mode != SORT_MODE__BRANCH)
3533 			return -EINVAL;
3534 
3535 		return __sort_dimension__add_output(list, sd);
3536 	}
3537 
3538 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3539 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3540 
3541 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3542 			continue;
3543 
3544 		if (sort__mode != SORT_MODE__MEMORY)
3545 			return -EINVAL;
3546 
3547 		return __sort_dimension__add_output(list, sd);
3548 	}
3549 
3550 	return -ESRCH;
3551 }
3552 
3553 static int setup_output_list(struct perf_hpp_list *list, char *str)
3554 {
3555 	char *tmp, *tok;
3556 	int ret = 0;
3557 
3558 	for (tok = strtok_r(str, ", ", &tmp);
3559 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
3560 		ret = output_field_add(list, tok);
3561 		if (ret == -EINVAL) {
3562 			ui__error("Invalid --fields key: `%s'", tok);
3563 			break;
3564 		} else if (ret == -ESRCH) {
3565 			ui__error("Unknown --fields key: `%s'", tok);
3566 			break;
3567 		}
3568 	}
3569 
3570 	return ret;
3571 }
3572 
3573 void reset_dimensions(void)
3574 {
3575 	unsigned int i;
3576 
3577 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3578 		common_sort_dimensions[i].taken = 0;
3579 
3580 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3581 		hpp_sort_dimensions[i].taken = 0;
3582 
3583 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3584 		bstack_sort_dimensions[i].taken = 0;
3585 
3586 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3587 		memory_sort_dimensions[i].taken = 0;
3588 }
3589 
3590 bool is_strict_order(const char *order)
3591 {
3592 	return order && (*order != '+');
3593 }
3594 
3595 static int __setup_output_field(void)
3596 {
3597 	char *str, *strp;
3598 	int ret = -EINVAL;
3599 
3600 	if (field_order == NULL)
3601 		return 0;
3602 
3603 	strp = str = strdup(field_order);
3604 	if (str == NULL) {
3605 		pr_err("Not enough memory to setup output fields");
3606 		return -ENOMEM;
3607 	}
3608 
3609 	if (!is_strict_order(field_order))
3610 		strp++;
3611 
3612 	if (!strlen(strp)) {
3613 		ui__error("Invalid --fields key: `+'");
3614 		goto out;
3615 	}
3616 
3617 	ret = setup_output_list(&perf_hpp_list, strp);
3618 
3619 out:
3620 	free(str);
3621 	return ret;
3622 }
3623 
3624 int setup_sorting(struct evlist *evlist)
3625 {
3626 	int err;
3627 
3628 	err = __setup_sorting(evlist);
3629 	if (err < 0)
3630 		return err;
3631 
3632 	if (parent_pattern != default_parent_pattern) {
3633 		err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3634 		if (err < 0)
3635 			return err;
3636 	}
3637 
3638 	reset_dimensions();
3639 
3640 	/*
3641 	 * perf diff doesn't use default hpp output fields.
3642 	 */
3643 	if (sort__mode != SORT_MODE__DIFF)
3644 		perf_hpp__init();
3645 
3646 	err = __setup_output_field();
3647 	if (err < 0)
3648 		return err;
3649 
3650 	/* copy sort keys to output fields */
3651 	perf_hpp__setup_output_field(&perf_hpp_list);
3652 	/* and then copy output fields to sort keys */
3653 	perf_hpp__append_sort_keys(&perf_hpp_list);
3654 
3655 	/* setup hists-specific output fields */
3656 	if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3657 		return -1;
3658 
3659 	return 0;
3660 }
3661 
3662 void reset_output_field(void)
3663 {
3664 	perf_hpp_list.need_collapse = 0;
3665 	perf_hpp_list.parent = 0;
3666 	perf_hpp_list.sym = 0;
3667 	perf_hpp_list.dso = 0;
3668 
3669 	field_order = NULL;
3670 	sort_order = NULL;
3671 
3672 	reset_dimensions();
3673 	perf_hpp__reset_output_field(&perf_hpp_list);
3674 }
3675 
3676 #define INDENT (3*8 + 1)
3677 
3678 static void add_key(struct strbuf *sb, const char *str, int *llen)
3679 {
3680 	if (!str)
3681 		return;
3682 
3683 	if (*llen >= 75) {
3684 		strbuf_addstr(sb, "\n\t\t\t ");
3685 		*llen = INDENT;
3686 	}
3687 	strbuf_addf(sb, " %s", str);
3688 	*llen += strlen(str) + 1;
3689 }
3690 
3691 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3692 			    int *llen)
3693 {
3694 	int i;
3695 
3696 	for (i = 0; i < n; i++)
3697 		add_key(sb, s[i].name, llen);
3698 }
3699 
3700 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3701 				int *llen)
3702 {
3703 	int i;
3704 
3705 	for (i = 0; i < n; i++)
3706 		add_key(sb, s[i].name, llen);
3707 }
3708 
3709 char *sort_help(const char *prefix)
3710 {
3711 	struct strbuf sb;
3712 	char *s;
3713 	int len = strlen(prefix) + INDENT;
3714 
3715 	strbuf_init(&sb, 300);
3716 	strbuf_addstr(&sb, prefix);
3717 	add_hpp_sort_string(&sb, hpp_sort_dimensions,
3718 			    ARRAY_SIZE(hpp_sort_dimensions), &len);
3719 	add_sort_string(&sb, common_sort_dimensions,
3720 			    ARRAY_SIZE(common_sort_dimensions), &len);
3721 	add_sort_string(&sb, bstack_sort_dimensions,
3722 			    ARRAY_SIZE(bstack_sort_dimensions), &len);
3723 	add_sort_string(&sb, memory_sort_dimensions,
3724 			    ARRAY_SIZE(memory_sort_dimensions), &len);
3725 	s = strbuf_detach(&sb, NULL);
3726 	strbuf_release(&sb);
3727 	return s;
3728 }
3729