xref: /linux/tools/perf/util/sort.c (revision 54618888d1ea7a26f8bccfb89e3c2420350c8047)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <stdlib.h>
6 #include <linux/mman.h>
7 #include <linux/time64.h>
8 #include "debug.h"
9 #include "dso.h"
10 #include "sort.h"
11 #include "hist.h"
12 #include "cacheline.h"
13 #include "comm.h"
14 #include "map.h"
15 #include "maps.h"
16 #include "symbol.h"
17 #include "map_symbol.h"
18 #include "branch.h"
19 #include "thread.h"
20 #include "evsel.h"
21 #include "evlist.h"
22 #include "srcline.h"
23 #include "strlist.h"
24 #include "strbuf.h"
25 #include "mem-events.h"
26 #include "annotate.h"
27 #include "event.h"
28 #include "time-utils.h"
29 #include "cgroup.h"
30 #include "machine.h"
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 
34 #ifdef HAVE_LIBTRACEEVENT
35 #include <traceevent/event-parse.h>
36 #endif
37 
38 regex_t		parent_regex;
39 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
40 const char	*parent_pattern = default_parent_pattern;
41 const char	*default_sort_order = "comm,dso,symbol";
42 const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
43 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
44 const char	default_top_sort_order[] = "dso,symbol";
45 const char	default_diff_sort_order[] = "dso,symbol";
46 const char	default_tracepoint_sort_order[] = "trace";
47 const char	*sort_order;
48 const char	*field_order;
49 regex_t		ignore_callees_regex;
50 int		have_ignore_callees = 0;
51 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
52 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
53 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
54 
55 /*
56  * Replaces all occurrences of a char used with the:
57  *
58  * -t, --field-separator
59  *
60  * option, that uses a special separator character and don't pad with spaces,
61  * replacing all occurrences of this separator in symbol names (and other
62  * output) with a '.' character, that thus it's the only non valid separator.
63 */
64 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
65 {
66 	int n;
67 	va_list ap;
68 
69 	va_start(ap, fmt);
70 	n = vsnprintf(bf, size, fmt, ap);
71 	if (symbol_conf.field_sep && n > 0) {
72 		char *sep = bf;
73 
74 		while (1) {
75 			sep = strchr(sep, *symbol_conf.field_sep);
76 			if (sep == NULL)
77 				break;
78 			*sep = '.';
79 		}
80 	}
81 	va_end(ap);
82 
83 	if (n >= (int)size)
84 		return size - 1;
85 	return n;
86 }
87 
88 static int64_t cmp_null(const void *l, const void *r)
89 {
90 	if (!l && !r)
91 		return 0;
92 	else if (!l)
93 		return -1;
94 	else
95 		return 1;
96 }
97 
98 /* --sort pid */
99 
100 static int64_t
101 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
102 {
103 	return right->thread->tid - left->thread->tid;
104 }
105 
106 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
107 				       size_t size, unsigned int width)
108 {
109 	const char *comm = thread__comm_str(he->thread);
110 
111 	width = max(7U, width) - 8;
112 	return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
113 			       width, width, comm ?: "");
114 }
115 
116 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
117 {
118 	const struct thread *th = arg;
119 
120 	if (type != HIST_FILTER__THREAD)
121 		return -1;
122 
123 	return th && he->thread != th;
124 }
125 
126 struct sort_entry sort_thread = {
127 	.se_header	= "    Pid:Command",
128 	.se_cmp		= sort__thread_cmp,
129 	.se_snprintf	= hist_entry__thread_snprintf,
130 	.se_filter	= hist_entry__thread_filter,
131 	.se_width_idx	= HISTC_THREAD,
132 };
133 
134 /* --sort comm */
135 
136 /*
137  * We can't use pointer comparison in functions below,
138  * because it gives different results based on pointer
139  * values, which could break some sorting assumptions.
140  */
141 static int64_t
142 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
143 {
144 	return strcmp(comm__str(right->comm), comm__str(left->comm));
145 }
146 
147 static int64_t
148 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
149 {
150 	return strcmp(comm__str(right->comm), comm__str(left->comm));
151 }
152 
153 static int64_t
154 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
155 {
156 	return strcmp(comm__str(right->comm), comm__str(left->comm));
157 }
158 
159 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
160 				     size_t size, unsigned int width)
161 {
162 	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
163 }
164 
165 struct sort_entry sort_comm = {
166 	.se_header	= "Command",
167 	.se_cmp		= sort__comm_cmp,
168 	.se_collapse	= sort__comm_collapse,
169 	.se_sort	= sort__comm_sort,
170 	.se_snprintf	= hist_entry__comm_snprintf,
171 	.se_filter	= hist_entry__thread_filter,
172 	.se_width_idx	= HISTC_COMM,
173 };
174 
175 /* --sort dso */
176 
177 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
178 {
179 	struct dso *dso_l = map_l ? map_l->dso : NULL;
180 	struct dso *dso_r = map_r ? map_r->dso : NULL;
181 	const char *dso_name_l, *dso_name_r;
182 
183 	if (!dso_l || !dso_r)
184 		return cmp_null(dso_r, dso_l);
185 
186 	if (verbose > 0) {
187 		dso_name_l = dso_l->long_name;
188 		dso_name_r = dso_r->long_name;
189 	} else {
190 		dso_name_l = dso_l->short_name;
191 		dso_name_r = dso_r->short_name;
192 	}
193 
194 	return strcmp(dso_name_l, dso_name_r);
195 }
196 
197 static int64_t
198 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
199 {
200 	return _sort__dso_cmp(right->ms.map, left->ms.map);
201 }
202 
203 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
204 				     size_t size, unsigned int width)
205 {
206 	if (map && map->dso) {
207 		const char *dso_name = verbose > 0 ? map->dso->long_name :
208 			map->dso->short_name;
209 		return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
210 	}
211 
212 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
213 }
214 
215 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
216 				    size_t size, unsigned int width)
217 {
218 	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
219 }
220 
221 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
222 {
223 	const struct dso *dso = arg;
224 
225 	if (type != HIST_FILTER__DSO)
226 		return -1;
227 
228 	return dso && (!he->ms.map || he->ms.map->dso != dso);
229 }
230 
231 struct sort_entry sort_dso = {
232 	.se_header	= "Shared Object",
233 	.se_cmp		= sort__dso_cmp,
234 	.se_snprintf	= hist_entry__dso_snprintf,
235 	.se_filter	= hist_entry__dso_filter,
236 	.se_width_idx	= HISTC_DSO,
237 };
238 
239 /* --sort symbol */
240 
241 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
242 {
243 	return (int64_t)(right_ip - left_ip);
244 }
245 
246 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
247 {
248 	if (!sym_l || !sym_r)
249 		return cmp_null(sym_l, sym_r);
250 
251 	if (sym_l == sym_r)
252 		return 0;
253 
254 	if (sym_l->inlined || sym_r->inlined) {
255 		int ret = strcmp(sym_l->name, sym_r->name);
256 
257 		if (ret)
258 			return ret;
259 		if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
260 			return 0;
261 	}
262 
263 	if (sym_l->start != sym_r->start)
264 		return (int64_t)(sym_r->start - sym_l->start);
265 
266 	return (int64_t)(sym_r->end - sym_l->end);
267 }
268 
269 static int64_t
270 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
271 {
272 	int64_t ret;
273 
274 	if (!left->ms.sym && !right->ms.sym)
275 		return _sort__addr_cmp(left->ip, right->ip);
276 
277 	/*
278 	 * comparing symbol address alone is not enough since it's a
279 	 * relative address within a dso.
280 	 */
281 	if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
282 		ret = sort__dso_cmp(left, right);
283 		if (ret != 0)
284 			return ret;
285 	}
286 
287 	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
288 }
289 
290 static int64_t
291 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
292 {
293 	if (!left->ms.sym || !right->ms.sym)
294 		return cmp_null(left->ms.sym, right->ms.sym);
295 
296 	return strcmp(right->ms.sym->name, left->ms.sym->name);
297 }
298 
299 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
300 				     u64 ip, char level, char *bf, size_t size,
301 				     unsigned int width)
302 {
303 	struct symbol *sym = ms->sym;
304 	struct map *map = ms->map;
305 	size_t ret = 0;
306 
307 	if (verbose > 0) {
308 		char o = map ? dso__symtab_origin(map->dso) : '!';
309 		u64 rip = ip;
310 
311 		if (map && map->dso && map->dso->kernel
312 		    && map->dso->adjust_symbols)
313 			rip = map->unmap_ip(map, ip);
314 
315 		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
316 				       BITS_PER_LONG / 4 + 2, rip, o);
317 	}
318 
319 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
320 	if (sym && map) {
321 		if (sym->type == STT_OBJECT) {
322 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
323 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
324 					ip - map->unmap_ip(map, sym->start));
325 		} else {
326 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
327 					       width - ret,
328 					       sym->name);
329 			if (sym->inlined)
330 				ret += repsep_snprintf(bf + ret, size - ret,
331 						       " (inlined)");
332 		}
333 	} else {
334 		size_t len = BITS_PER_LONG / 4;
335 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
336 				       len, ip);
337 	}
338 
339 	return ret;
340 }
341 
342 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
343 {
344 	return _hist_entry__sym_snprintf(&he->ms, he->ip,
345 					 he->level, bf, size, width);
346 }
347 
348 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
349 {
350 	const char *sym = arg;
351 
352 	if (type != HIST_FILTER__SYMBOL)
353 		return -1;
354 
355 	return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
356 }
357 
358 struct sort_entry sort_sym = {
359 	.se_header	= "Symbol",
360 	.se_cmp		= sort__sym_cmp,
361 	.se_sort	= sort__sym_sort,
362 	.se_snprintf	= hist_entry__sym_snprintf,
363 	.se_filter	= hist_entry__sym_filter,
364 	.se_width_idx	= HISTC_SYMBOL,
365 };
366 
367 /* --sort srcline */
368 
369 char *hist_entry__srcline(struct hist_entry *he)
370 {
371 	return map__srcline(he->ms.map, he->ip, he->ms.sym);
372 }
373 
374 static int64_t
375 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
376 {
377 	int64_t ret;
378 
379 	ret = _sort__addr_cmp(left->ip, right->ip);
380 	if (ret)
381 		return ret;
382 
383 	return sort__dso_cmp(left, right);
384 }
385 
386 static int64_t
387 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
388 {
389 	if (!left->srcline)
390 		left->srcline = hist_entry__srcline(left);
391 	if (!right->srcline)
392 		right->srcline = hist_entry__srcline(right);
393 
394 	return strcmp(right->srcline, left->srcline);
395 }
396 
397 static int64_t
398 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
399 {
400 	return sort__srcline_collapse(left, right);
401 }
402 
403 static void
404 sort__srcline_init(struct hist_entry *he)
405 {
406 	if (!he->srcline)
407 		he->srcline = hist_entry__srcline(he);
408 }
409 
410 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
411 					size_t size, unsigned int width)
412 {
413 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
414 }
415 
416 struct sort_entry sort_srcline = {
417 	.se_header	= "Source:Line",
418 	.se_cmp		= sort__srcline_cmp,
419 	.se_collapse	= sort__srcline_collapse,
420 	.se_sort	= sort__srcline_sort,
421 	.se_init	= sort__srcline_init,
422 	.se_snprintf	= hist_entry__srcline_snprintf,
423 	.se_width_idx	= HISTC_SRCLINE,
424 };
425 
426 /* --sort srcline_from */
427 
428 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
429 {
430 	return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
431 }
432 
433 static int64_t
434 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
435 {
436 	return left->branch_info->from.addr - right->branch_info->from.addr;
437 }
438 
439 static int64_t
440 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
441 {
442 	if (!left->branch_info->srcline_from)
443 		left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
444 
445 	if (!right->branch_info->srcline_from)
446 		right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
447 
448 	return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
449 }
450 
451 static int64_t
452 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
453 {
454 	return sort__srcline_from_collapse(left, right);
455 }
456 
457 static void sort__srcline_from_init(struct hist_entry *he)
458 {
459 	if (!he->branch_info->srcline_from)
460 		he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
461 }
462 
463 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
464 					size_t size, unsigned int width)
465 {
466 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
467 }
468 
469 struct sort_entry sort_srcline_from = {
470 	.se_header	= "From Source:Line",
471 	.se_cmp		= sort__srcline_from_cmp,
472 	.se_collapse	= sort__srcline_from_collapse,
473 	.se_sort	= sort__srcline_from_sort,
474 	.se_init	= sort__srcline_from_init,
475 	.se_snprintf	= hist_entry__srcline_from_snprintf,
476 	.se_width_idx	= HISTC_SRCLINE_FROM,
477 };
478 
479 /* --sort srcline_to */
480 
481 static int64_t
482 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
483 {
484 	return left->branch_info->to.addr - right->branch_info->to.addr;
485 }
486 
487 static int64_t
488 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
489 {
490 	if (!left->branch_info->srcline_to)
491 		left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
492 
493 	if (!right->branch_info->srcline_to)
494 		right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
495 
496 	return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
497 }
498 
499 static int64_t
500 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
501 {
502 	return sort__srcline_to_collapse(left, right);
503 }
504 
505 static void sort__srcline_to_init(struct hist_entry *he)
506 {
507 	if (!he->branch_info->srcline_to)
508 		he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
509 }
510 
511 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
512 					size_t size, unsigned int width)
513 {
514 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
515 }
516 
517 struct sort_entry sort_srcline_to = {
518 	.se_header	= "To Source:Line",
519 	.se_cmp		= sort__srcline_to_cmp,
520 	.se_collapse	= sort__srcline_to_collapse,
521 	.se_sort	= sort__srcline_to_sort,
522 	.se_init	= sort__srcline_to_init,
523 	.se_snprintf	= hist_entry__srcline_to_snprintf,
524 	.se_width_idx	= HISTC_SRCLINE_TO,
525 };
526 
527 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
528 					size_t size, unsigned int width)
529 {
530 
531 	struct symbol *sym = he->ms.sym;
532 	struct annotation *notes;
533 	double ipc = 0.0, coverage = 0.0;
534 	char tmp[64];
535 
536 	if (!sym)
537 		return repsep_snprintf(bf, size, "%-*s", width, "-");
538 
539 	notes = symbol__annotation(sym);
540 
541 	if (notes->hit_cycles)
542 		ipc = notes->hit_insn / ((double)notes->hit_cycles);
543 
544 	if (notes->total_insn) {
545 		coverage = notes->cover_insn * 100.0 /
546 			((double)notes->total_insn);
547 	}
548 
549 	snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
550 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
551 }
552 
553 struct sort_entry sort_sym_ipc = {
554 	.se_header	= "IPC   [IPC Coverage]",
555 	.se_cmp		= sort__sym_cmp,
556 	.se_snprintf	= hist_entry__sym_ipc_snprintf,
557 	.se_width_idx	= HISTC_SYMBOL_IPC,
558 };
559 
560 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
561 					     __maybe_unused,
562 					     char *bf, size_t size,
563 					     unsigned int width)
564 {
565 	char tmp[64];
566 
567 	snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
568 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
569 }
570 
571 struct sort_entry sort_sym_ipc_null = {
572 	.se_header	= "IPC   [IPC Coverage]",
573 	.se_cmp		= sort__sym_cmp,
574 	.se_snprintf	= hist_entry__sym_ipc_null_snprintf,
575 	.se_width_idx	= HISTC_SYMBOL_IPC,
576 };
577 
578 /* --sort srcfile */
579 
580 static char no_srcfile[1];
581 
582 static char *hist_entry__get_srcfile(struct hist_entry *e)
583 {
584 	char *sf, *p;
585 	struct map *map = e->ms.map;
586 
587 	if (!map)
588 		return no_srcfile;
589 
590 	sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
591 			 e->ms.sym, false, true, true, e->ip);
592 	if (!strcmp(sf, SRCLINE_UNKNOWN))
593 		return no_srcfile;
594 	p = strchr(sf, ':');
595 	if (p && *sf) {
596 		*p = 0;
597 		return sf;
598 	}
599 	free(sf);
600 	return no_srcfile;
601 }
602 
603 static int64_t
604 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
605 {
606 	if (!left->srcfile)
607 		left->srcfile = hist_entry__get_srcfile(left);
608 	if (!right->srcfile)
609 		right->srcfile = hist_entry__get_srcfile(right);
610 
611 	return strcmp(right->srcfile, left->srcfile);
612 }
613 
614 static int64_t
615 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
616 {
617 	if (!left->srcfile)
618 		left->srcfile = hist_entry__get_srcfile(left);
619 	if (!right->srcfile)
620 		right->srcfile = hist_entry__get_srcfile(right);
621 
622 	return strcmp(right->srcfile, left->srcfile);
623 }
624 
625 static int64_t
626 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
627 {
628 	return sort__srcfile_collapse(left, right);
629 }
630 
631 static void sort__srcfile_init(struct hist_entry *he)
632 {
633 	if (!he->srcfile)
634 		he->srcfile = hist_entry__get_srcfile(he);
635 }
636 
637 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
638 					size_t size, unsigned int width)
639 {
640 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
641 }
642 
643 struct sort_entry sort_srcfile = {
644 	.se_header	= "Source File",
645 	.se_cmp		= sort__srcfile_cmp,
646 	.se_collapse	= sort__srcfile_collapse,
647 	.se_sort	= sort__srcfile_sort,
648 	.se_init	= sort__srcfile_init,
649 	.se_snprintf	= hist_entry__srcfile_snprintf,
650 	.se_width_idx	= HISTC_SRCFILE,
651 };
652 
653 /* --sort parent */
654 
655 static int64_t
656 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
657 {
658 	struct symbol *sym_l = left->parent;
659 	struct symbol *sym_r = right->parent;
660 
661 	if (!sym_l || !sym_r)
662 		return cmp_null(sym_l, sym_r);
663 
664 	return strcmp(sym_r->name, sym_l->name);
665 }
666 
667 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
668 				       size_t size, unsigned int width)
669 {
670 	return repsep_snprintf(bf, size, "%-*.*s", width, width,
671 			      he->parent ? he->parent->name : "[other]");
672 }
673 
674 struct sort_entry sort_parent = {
675 	.se_header	= "Parent symbol",
676 	.se_cmp		= sort__parent_cmp,
677 	.se_snprintf	= hist_entry__parent_snprintf,
678 	.se_width_idx	= HISTC_PARENT,
679 };
680 
681 /* --sort cpu */
682 
683 static int64_t
684 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
685 {
686 	return right->cpu - left->cpu;
687 }
688 
689 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
690 				    size_t size, unsigned int width)
691 {
692 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
693 }
694 
695 struct sort_entry sort_cpu = {
696 	.se_header      = "CPU",
697 	.se_cmp	        = sort__cpu_cmp,
698 	.se_snprintf    = hist_entry__cpu_snprintf,
699 	.se_width_idx	= HISTC_CPU,
700 };
701 
702 /* --sort cgroup_id */
703 
704 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
705 {
706 	return (int64_t)(right_dev - left_dev);
707 }
708 
709 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
710 {
711 	return (int64_t)(right_ino - left_ino);
712 }
713 
714 static int64_t
715 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
716 {
717 	int64_t ret;
718 
719 	ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
720 	if (ret != 0)
721 		return ret;
722 
723 	return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
724 				       left->cgroup_id.ino);
725 }
726 
727 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
728 					  char *bf, size_t size,
729 					  unsigned int width __maybe_unused)
730 {
731 	return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
732 			       he->cgroup_id.ino);
733 }
734 
735 struct sort_entry sort_cgroup_id = {
736 	.se_header      = "cgroup id (dev/inode)",
737 	.se_cmp	        = sort__cgroup_id_cmp,
738 	.se_snprintf    = hist_entry__cgroup_id_snprintf,
739 	.se_width_idx	= HISTC_CGROUP_ID,
740 };
741 
742 /* --sort cgroup */
743 
744 static int64_t
745 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
746 {
747 	return right->cgroup - left->cgroup;
748 }
749 
750 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
751 				       char *bf, size_t size,
752 				       unsigned int width __maybe_unused)
753 {
754 	const char *cgrp_name = "N/A";
755 
756 	if (he->cgroup) {
757 		struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env,
758 						   he->cgroup);
759 		if (cgrp != NULL)
760 			cgrp_name = cgrp->name;
761 		else
762 			cgrp_name = "unknown";
763 	}
764 
765 	return repsep_snprintf(bf, size, "%s", cgrp_name);
766 }
767 
768 struct sort_entry sort_cgroup = {
769 	.se_header      = "Cgroup",
770 	.se_cmp	        = sort__cgroup_cmp,
771 	.se_snprintf    = hist_entry__cgroup_snprintf,
772 	.se_width_idx	= HISTC_CGROUP,
773 };
774 
775 /* --sort socket */
776 
777 static int64_t
778 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
779 {
780 	return right->socket - left->socket;
781 }
782 
783 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
784 				    size_t size, unsigned int width)
785 {
786 	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
787 }
788 
789 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
790 {
791 	int sk = *(const int *)arg;
792 
793 	if (type != HIST_FILTER__SOCKET)
794 		return -1;
795 
796 	return sk >= 0 && he->socket != sk;
797 }
798 
799 struct sort_entry sort_socket = {
800 	.se_header      = "Socket",
801 	.se_cmp	        = sort__socket_cmp,
802 	.se_snprintf    = hist_entry__socket_snprintf,
803 	.se_filter      = hist_entry__socket_filter,
804 	.se_width_idx	= HISTC_SOCKET,
805 };
806 
807 /* --sort time */
808 
809 static int64_t
810 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
811 {
812 	return right->time - left->time;
813 }
814 
815 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
816 				    size_t size, unsigned int width)
817 {
818 	char he_time[32];
819 
820 	if (symbol_conf.nanosecs)
821 		timestamp__scnprintf_nsec(he->time, he_time,
822 					  sizeof(he_time));
823 	else
824 		timestamp__scnprintf_usec(he->time, he_time,
825 					  sizeof(he_time));
826 
827 	return repsep_snprintf(bf, size, "%-.*s", width, he_time);
828 }
829 
830 struct sort_entry sort_time = {
831 	.se_header      = "Time",
832 	.se_cmp	        = sort__time_cmp,
833 	.se_snprintf    = hist_entry__time_snprintf,
834 	.se_width_idx	= HISTC_TIME,
835 };
836 
837 /* --sort trace */
838 
839 #ifdef HAVE_LIBTRACEEVENT
840 static char *get_trace_output(struct hist_entry *he)
841 {
842 	struct trace_seq seq;
843 	struct evsel *evsel;
844 	struct tep_record rec = {
845 		.data = he->raw_data,
846 		.size = he->raw_size,
847 	};
848 
849 	evsel = hists_to_evsel(he->hists);
850 
851 	trace_seq_init(&seq);
852 	if (symbol_conf.raw_trace) {
853 		tep_print_fields(&seq, he->raw_data, he->raw_size,
854 				 evsel->tp_format);
855 	} else {
856 		tep_print_event(evsel->tp_format->tep,
857 				&seq, &rec, "%s", TEP_PRINT_INFO);
858 	}
859 	/*
860 	 * Trim the buffer, it starts at 4KB and we're not going to
861 	 * add anything more to this buffer.
862 	 */
863 	return realloc(seq.buffer, seq.len + 1);
864 }
865 
866 static int64_t
867 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
868 {
869 	struct evsel *evsel;
870 
871 	evsel = hists_to_evsel(left->hists);
872 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
873 		return 0;
874 
875 	if (left->trace_output == NULL)
876 		left->trace_output = get_trace_output(left);
877 	if (right->trace_output == NULL)
878 		right->trace_output = get_trace_output(right);
879 
880 	return strcmp(right->trace_output, left->trace_output);
881 }
882 
883 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
884 				    size_t size, unsigned int width)
885 {
886 	struct evsel *evsel;
887 
888 	evsel = hists_to_evsel(he->hists);
889 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
890 		return scnprintf(bf, size, "%-.*s", width, "N/A");
891 
892 	if (he->trace_output == NULL)
893 		he->trace_output = get_trace_output(he);
894 	return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
895 }
896 
897 struct sort_entry sort_trace = {
898 	.se_header      = "Trace output",
899 	.se_cmp	        = sort__trace_cmp,
900 	.se_snprintf    = hist_entry__trace_snprintf,
901 	.se_width_idx	= HISTC_TRACE,
902 };
903 #endif /* HAVE_LIBTRACEEVENT */
904 
905 /* sort keys for branch stacks */
906 
907 static int64_t
908 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
909 {
910 	if (!left->branch_info || !right->branch_info)
911 		return cmp_null(left->branch_info, right->branch_info);
912 
913 	return _sort__dso_cmp(left->branch_info->from.ms.map,
914 			      right->branch_info->from.ms.map);
915 }
916 
917 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
918 				    size_t size, unsigned int width)
919 {
920 	if (he->branch_info)
921 		return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
922 						 bf, size, width);
923 	else
924 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
925 }
926 
927 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
928 				       const void *arg)
929 {
930 	const struct dso *dso = arg;
931 
932 	if (type != HIST_FILTER__DSO)
933 		return -1;
934 
935 	return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
936 		       he->branch_info->from.ms.map->dso != dso);
937 }
938 
939 static int64_t
940 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
941 {
942 	if (!left->branch_info || !right->branch_info)
943 		return cmp_null(left->branch_info, right->branch_info);
944 
945 	return _sort__dso_cmp(left->branch_info->to.ms.map,
946 			      right->branch_info->to.ms.map);
947 }
948 
949 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
950 				       size_t size, unsigned int width)
951 {
952 	if (he->branch_info)
953 		return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
954 						 bf, size, width);
955 	else
956 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
957 }
958 
959 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
960 				     const void *arg)
961 {
962 	const struct dso *dso = arg;
963 
964 	if (type != HIST_FILTER__DSO)
965 		return -1;
966 
967 	return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
968 		       he->branch_info->to.ms.map->dso != dso);
969 }
970 
971 static int64_t
972 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
973 {
974 	struct addr_map_symbol *from_l = &left->branch_info->from;
975 	struct addr_map_symbol *from_r = &right->branch_info->from;
976 
977 	if (!left->branch_info || !right->branch_info)
978 		return cmp_null(left->branch_info, right->branch_info);
979 
980 	from_l = &left->branch_info->from;
981 	from_r = &right->branch_info->from;
982 
983 	if (!from_l->ms.sym && !from_r->ms.sym)
984 		return _sort__addr_cmp(from_l->addr, from_r->addr);
985 
986 	return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
987 }
988 
989 static int64_t
990 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
991 {
992 	struct addr_map_symbol *to_l, *to_r;
993 
994 	if (!left->branch_info || !right->branch_info)
995 		return cmp_null(left->branch_info, right->branch_info);
996 
997 	to_l = &left->branch_info->to;
998 	to_r = &right->branch_info->to;
999 
1000 	if (!to_l->ms.sym && !to_r->ms.sym)
1001 		return _sort__addr_cmp(to_l->addr, to_r->addr);
1002 
1003 	return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1004 }
1005 
1006 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1007 					 size_t size, unsigned int width)
1008 {
1009 	if (he->branch_info) {
1010 		struct addr_map_symbol *from = &he->branch_info->from;
1011 
1012 		return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1013 						 from->al_level, bf, size, width);
1014 	}
1015 
1016 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1017 }
1018 
1019 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1020 				       size_t size, unsigned int width)
1021 {
1022 	if (he->branch_info) {
1023 		struct addr_map_symbol *to = &he->branch_info->to;
1024 
1025 		return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1026 						 to->al_level, bf, size, width);
1027 	}
1028 
1029 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1030 }
1031 
1032 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1033 				       const void *arg)
1034 {
1035 	const char *sym = arg;
1036 
1037 	if (type != HIST_FILTER__SYMBOL)
1038 		return -1;
1039 
1040 	return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1041 			strstr(he->branch_info->from.ms.sym->name, sym));
1042 }
1043 
1044 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1045 				       const void *arg)
1046 {
1047 	const char *sym = arg;
1048 
1049 	if (type != HIST_FILTER__SYMBOL)
1050 		return -1;
1051 
1052 	return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1053 		        strstr(he->branch_info->to.ms.sym->name, sym));
1054 }
1055 
1056 struct sort_entry sort_dso_from = {
1057 	.se_header	= "Source Shared Object",
1058 	.se_cmp		= sort__dso_from_cmp,
1059 	.se_snprintf	= hist_entry__dso_from_snprintf,
1060 	.se_filter	= hist_entry__dso_from_filter,
1061 	.se_width_idx	= HISTC_DSO_FROM,
1062 };
1063 
1064 struct sort_entry sort_dso_to = {
1065 	.se_header	= "Target Shared Object",
1066 	.se_cmp		= sort__dso_to_cmp,
1067 	.se_snprintf	= hist_entry__dso_to_snprintf,
1068 	.se_filter	= hist_entry__dso_to_filter,
1069 	.se_width_idx	= HISTC_DSO_TO,
1070 };
1071 
1072 struct sort_entry sort_sym_from = {
1073 	.se_header	= "Source Symbol",
1074 	.se_cmp		= sort__sym_from_cmp,
1075 	.se_snprintf	= hist_entry__sym_from_snprintf,
1076 	.se_filter	= hist_entry__sym_from_filter,
1077 	.se_width_idx	= HISTC_SYMBOL_FROM,
1078 };
1079 
1080 struct sort_entry sort_sym_to = {
1081 	.se_header	= "Target Symbol",
1082 	.se_cmp		= sort__sym_to_cmp,
1083 	.se_snprintf	= hist_entry__sym_to_snprintf,
1084 	.se_filter	= hist_entry__sym_to_filter,
1085 	.se_width_idx	= HISTC_SYMBOL_TO,
1086 };
1087 
1088 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1089 				     u64 ip, char level, char *bf, size_t size,
1090 				     unsigned int width)
1091 {
1092 	struct symbol *sym = ms->sym;
1093 	struct map *map = ms->map;
1094 	size_t ret = 0, offs;
1095 
1096 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1097 	if (sym && map) {
1098 		if (sym->type == STT_OBJECT) {
1099 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1100 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1101 					ip - map->unmap_ip(map, sym->start));
1102 		} else {
1103 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1104 					       width - ret,
1105 					       sym->name);
1106 			offs = ip - sym->start;
1107 			if (offs)
1108 				ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1109 		}
1110 	} else {
1111 		size_t len = BITS_PER_LONG / 4;
1112 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1113 				       len, ip);
1114 	}
1115 
1116 	return ret;
1117 }
1118 
1119 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1120 					 size_t size, unsigned int width)
1121 {
1122 	if (he->branch_info) {
1123 		struct addr_map_symbol *from = &he->branch_info->from;
1124 
1125 		return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1126 						 he->level, bf, size, width);
1127 	}
1128 
1129 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1130 }
1131 
1132 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1133 				       size_t size, unsigned int width)
1134 {
1135 	if (he->branch_info) {
1136 		struct addr_map_symbol *to = &he->branch_info->to;
1137 
1138 		return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1139 						 he->level, bf, size, width);
1140 	}
1141 
1142 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1143 }
1144 
1145 static int64_t
1146 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1147 {
1148 	struct addr_map_symbol *from_l;
1149 	struct addr_map_symbol *from_r;
1150 	int64_t ret;
1151 
1152 	if (!left->branch_info || !right->branch_info)
1153 		return cmp_null(left->branch_info, right->branch_info);
1154 
1155 	from_l = &left->branch_info->from;
1156 	from_r = &right->branch_info->from;
1157 
1158 	/*
1159 	 * comparing symbol address alone is not enough since it's a
1160 	 * relative address within a dso.
1161 	 */
1162 	ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1163 	if (ret != 0)
1164 		return ret;
1165 
1166 	return _sort__addr_cmp(from_l->addr, from_r->addr);
1167 }
1168 
1169 static int64_t
1170 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1171 {
1172 	struct addr_map_symbol *to_l;
1173 	struct addr_map_symbol *to_r;
1174 	int64_t ret;
1175 
1176 	if (!left->branch_info || !right->branch_info)
1177 		return cmp_null(left->branch_info, right->branch_info);
1178 
1179 	to_l = &left->branch_info->to;
1180 	to_r = &right->branch_info->to;
1181 
1182 	/*
1183 	 * comparing symbol address alone is not enough since it's a
1184 	 * relative address within a dso.
1185 	 */
1186 	ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1187 	if (ret != 0)
1188 		return ret;
1189 
1190 	return _sort__addr_cmp(to_l->addr, to_r->addr);
1191 }
1192 
1193 struct sort_entry sort_addr_from = {
1194 	.se_header	= "Source Address",
1195 	.se_cmp		= sort__addr_from_cmp,
1196 	.se_snprintf	= hist_entry__addr_from_snprintf,
1197 	.se_filter	= hist_entry__sym_from_filter, /* shared with sym_from */
1198 	.se_width_idx	= HISTC_ADDR_FROM,
1199 };
1200 
1201 struct sort_entry sort_addr_to = {
1202 	.se_header	= "Target Address",
1203 	.se_cmp		= sort__addr_to_cmp,
1204 	.se_snprintf	= hist_entry__addr_to_snprintf,
1205 	.se_filter	= hist_entry__sym_to_filter, /* shared with sym_to */
1206 	.se_width_idx	= HISTC_ADDR_TO,
1207 };
1208 
1209 
1210 static int64_t
1211 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1212 {
1213 	unsigned char mp, p;
1214 
1215 	if (!left->branch_info || !right->branch_info)
1216 		return cmp_null(left->branch_info, right->branch_info);
1217 
1218 	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1219 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1220 	return mp || p;
1221 }
1222 
1223 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1224 				    size_t size, unsigned int width){
1225 	static const char *out = "N/A";
1226 
1227 	if (he->branch_info) {
1228 		if (he->branch_info->flags.predicted)
1229 			out = "N";
1230 		else if (he->branch_info->flags.mispred)
1231 			out = "Y";
1232 	}
1233 
1234 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1235 }
1236 
1237 static int64_t
1238 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1239 {
1240 	if (!left->branch_info || !right->branch_info)
1241 		return cmp_null(left->branch_info, right->branch_info);
1242 
1243 	return left->branch_info->flags.cycles -
1244 		right->branch_info->flags.cycles;
1245 }
1246 
1247 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1248 				    size_t size, unsigned int width)
1249 {
1250 	if (!he->branch_info)
1251 		return scnprintf(bf, size, "%-.*s", width, "N/A");
1252 	if (he->branch_info->flags.cycles == 0)
1253 		return repsep_snprintf(bf, size, "%-*s", width, "-");
1254 	return repsep_snprintf(bf, size, "%-*hd", width,
1255 			       he->branch_info->flags.cycles);
1256 }
1257 
1258 struct sort_entry sort_cycles = {
1259 	.se_header	= "Basic Block Cycles",
1260 	.se_cmp		= sort__cycles_cmp,
1261 	.se_snprintf	= hist_entry__cycles_snprintf,
1262 	.se_width_idx	= HISTC_CYCLES,
1263 };
1264 
1265 /* --sort daddr_sym */
1266 int64_t
1267 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1268 {
1269 	uint64_t l = 0, r = 0;
1270 
1271 	if (left->mem_info)
1272 		l = left->mem_info->daddr.addr;
1273 	if (right->mem_info)
1274 		r = right->mem_info->daddr.addr;
1275 
1276 	return (int64_t)(r - l);
1277 }
1278 
1279 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1280 				    size_t size, unsigned int width)
1281 {
1282 	uint64_t addr = 0;
1283 	struct map_symbol *ms = NULL;
1284 
1285 	if (he->mem_info) {
1286 		addr = he->mem_info->daddr.addr;
1287 		ms = &he->mem_info->daddr.ms;
1288 	}
1289 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1290 }
1291 
1292 int64_t
1293 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1294 {
1295 	uint64_t l = 0, r = 0;
1296 
1297 	if (left->mem_info)
1298 		l = left->mem_info->iaddr.addr;
1299 	if (right->mem_info)
1300 		r = right->mem_info->iaddr.addr;
1301 
1302 	return (int64_t)(r - l);
1303 }
1304 
1305 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1306 				    size_t size, unsigned int width)
1307 {
1308 	uint64_t addr = 0;
1309 	struct map_symbol *ms = NULL;
1310 
1311 	if (he->mem_info) {
1312 		addr = he->mem_info->iaddr.addr;
1313 		ms   = &he->mem_info->iaddr.ms;
1314 	}
1315 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1316 }
1317 
1318 static int64_t
1319 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1320 {
1321 	struct map *map_l = NULL;
1322 	struct map *map_r = NULL;
1323 
1324 	if (left->mem_info)
1325 		map_l = left->mem_info->daddr.ms.map;
1326 	if (right->mem_info)
1327 		map_r = right->mem_info->daddr.ms.map;
1328 
1329 	return _sort__dso_cmp(map_l, map_r);
1330 }
1331 
1332 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1333 				    size_t size, unsigned int width)
1334 {
1335 	struct map *map = NULL;
1336 
1337 	if (he->mem_info)
1338 		map = he->mem_info->daddr.ms.map;
1339 
1340 	return _hist_entry__dso_snprintf(map, bf, size, width);
1341 }
1342 
1343 static int64_t
1344 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1345 {
1346 	union perf_mem_data_src data_src_l;
1347 	union perf_mem_data_src data_src_r;
1348 
1349 	if (left->mem_info)
1350 		data_src_l = left->mem_info->data_src;
1351 	else
1352 		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1353 
1354 	if (right->mem_info)
1355 		data_src_r = right->mem_info->data_src;
1356 	else
1357 		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1358 
1359 	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1360 }
1361 
1362 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1363 				    size_t size, unsigned int width)
1364 {
1365 	char out[10];
1366 
1367 	perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1368 	return repsep_snprintf(bf, size, "%.*s", width, out);
1369 }
1370 
1371 static int64_t
1372 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1373 {
1374 	union perf_mem_data_src data_src_l;
1375 	union perf_mem_data_src data_src_r;
1376 
1377 	if (left->mem_info)
1378 		data_src_l = left->mem_info->data_src;
1379 	else
1380 		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1381 
1382 	if (right->mem_info)
1383 		data_src_r = right->mem_info->data_src;
1384 	else
1385 		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1386 
1387 	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1388 }
1389 
1390 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1391 				    size_t size, unsigned int width)
1392 {
1393 	char out[64];
1394 
1395 	perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1396 	return repsep_snprintf(bf, size, "%-*s", width, out);
1397 }
1398 
1399 static int64_t
1400 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1401 {
1402 	union perf_mem_data_src data_src_l;
1403 	union perf_mem_data_src data_src_r;
1404 
1405 	if (left->mem_info)
1406 		data_src_l = left->mem_info->data_src;
1407 	else
1408 		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1409 
1410 	if (right->mem_info)
1411 		data_src_r = right->mem_info->data_src;
1412 	else
1413 		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1414 
1415 	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1416 }
1417 
1418 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1419 				    size_t size, unsigned int width)
1420 {
1421 	char out[64];
1422 
1423 	perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1424 	return repsep_snprintf(bf, size, "%-*s", width, out);
1425 }
1426 
1427 static int64_t
1428 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1429 {
1430 	union perf_mem_data_src data_src_l;
1431 	union perf_mem_data_src data_src_r;
1432 
1433 	if (left->mem_info)
1434 		data_src_l = left->mem_info->data_src;
1435 	else
1436 		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1437 
1438 	if (right->mem_info)
1439 		data_src_r = right->mem_info->data_src;
1440 	else
1441 		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1442 
1443 	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1444 }
1445 
1446 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1447 				    size_t size, unsigned int width)
1448 {
1449 	char out[64];
1450 
1451 	perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1452 	return repsep_snprintf(bf, size, "%-*s", width, out);
1453 }
1454 
1455 int64_t
1456 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1457 {
1458 	u64 l, r;
1459 	struct map *l_map, *r_map;
1460 	int rc;
1461 
1462 	if (!left->mem_info)  return -1;
1463 	if (!right->mem_info) return 1;
1464 
1465 	/* group event types together */
1466 	if (left->cpumode > right->cpumode) return -1;
1467 	if (left->cpumode < right->cpumode) return 1;
1468 
1469 	l_map = left->mem_info->daddr.ms.map;
1470 	r_map = right->mem_info->daddr.ms.map;
1471 
1472 	/* if both are NULL, jump to sort on al_addr instead */
1473 	if (!l_map && !r_map)
1474 		goto addr;
1475 
1476 	if (!l_map) return -1;
1477 	if (!r_map) return 1;
1478 
1479 	rc = dso__cmp_id(l_map->dso, r_map->dso);
1480 	if (rc)
1481 		return rc;
1482 	/*
1483 	 * Addresses with no major/minor numbers are assumed to be
1484 	 * anonymous in userspace.  Sort those on pid then address.
1485 	 *
1486 	 * The kernel and non-zero major/minor mapped areas are
1487 	 * assumed to be unity mapped.  Sort those on address.
1488 	 */
1489 
1490 	if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1491 	    (!(l_map->flags & MAP_SHARED)) &&
1492 	    !l_map->dso->id.maj && !l_map->dso->id.min &&
1493 	    !l_map->dso->id.ino && !l_map->dso->id.ino_generation) {
1494 		/* userspace anonymous */
1495 
1496 		if (left->thread->pid_ > right->thread->pid_) return -1;
1497 		if (left->thread->pid_ < right->thread->pid_) return 1;
1498 	}
1499 
1500 addr:
1501 	/* al_addr does all the right addr - start + offset calculations */
1502 	l = cl_address(left->mem_info->daddr.al_addr);
1503 	r = cl_address(right->mem_info->daddr.al_addr);
1504 
1505 	if (l > r) return -1;
1506 	if (l < r) return 1;
1507 
1508 	return 0;
1509 }
1510 
1511 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1512 					  size_t size, unsigned int width)
1513 {
1514 
1515 	uint64_t addr = 0;
1516 	struct map_symbol *ms = NULL;
1517 	char level = he->level;
1518 
1519 	if (he->mem_info) {
1520 		struct map *map = he->mem_info->daddr.ms.map;
1521 
1522 		addr = cl_address(he->mem_info->daddr.al_addr);
1523 		ms = &he->mem_info->daddr.ms;
1524 
1525 		/* print [s] for shared data mmaps */
1526 		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1527 		     map && !(map->prot & PROT_EXEC) &&
1528 		    (map->flags & MAP_SHARED) &&
1529 		    (map->dso->id.maj || map->dso->id.min ||
1530 		     map->dso->id.ino || map->dso->id.ino_generation))
1531 			level = 's';
1532 		else if (!map)
1533 			level = 'X';
1534 	}
1535 	return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1536 }
1537 
1538 struct sort_entry sort_mispredict = {
1539 	.se_header	= "Branch Mispredicted",
1540 	.se_cmp		= sort__mispredict_cmp,
1541 	.se_snprintf	= hist_entry__mispredict_snprintf,
1542 	.se_width_idx	= HISTC_MISPREDICT,
1543 };
1544 
1545 static int64_t
1546 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1547 {
1548 	return left->weight - right->weight;
1549 }
1550 
1551 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1552 				    size_t size, unsigned int width)
1553 {
1554 	return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1555 }
1556 
1557 struct sort_entry sort_local_weight = {
1558 	.se_header	= "Local Weight",
1559 	.se_cmp		= sort__weight_cmp,
1560 	.se_snprintf	= hist_entry__local_weight_snprintf,
1561 	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1562 };
1563 
1564 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1565 					      size_t size, unsigned int width)
1566 {
1567 	return repsep_snprintf(bf, size, "%-*llu", width,
1568 			       he->weight * he->stat.nr_events);
1569 }
1570 
1571 struct sort_entry sort_global_weight = {
1572 	.se_header	= "Weight",
1573 	.se_cmp		= sort__weight_cmp,
1574 	.se_snprintf	= hist_entry__global_weight_snprintf,
1575 	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1576 };
1577 
1578 static int64_t
1579 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1580 {
1581 	return left->ins_lat - right->ins_lat;
1582 }
1583 
1584 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1585 					      size_t size, unsigned int width)
1586 {
1587 	return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1588 }
1589 
1590 struct sort_entry sort_local_ins_lat = {
1591 	.se_header	= "Local INSTR Latency",
1592 	.se_cmp		= sort__ins_lat_cmp,
1593 	.se_snprintf	= hist_entry__local_ins_lat_snprintf,
1594 	.se_width_idx	= HISTC_LOCAL_INS_LAT,
1595 };
1596 
1597 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1598 					       size_t size, unsigned int width)
1599 {
1600 	return repsep_snprintf(bf, size, "%-*u", width,
1601 			       he->ins_lat * he->stat.nr_events);
1602 }
1603 
1604 struct sort_entry sort_global_ins_lat = {
1605 	.se_header	= "INSTR Latency",
1606 	.se_cmp		= sort__ins_lat_cmp,
1607 	.se_snprintf	= hist_entry__global_ins_lat_snprintf,
1608 	.se_width_idx	= HISTC_GLOBAL_INS_LAT,
1609 };
1610 
1611 static int64_t
1612 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1613 {
1614 	return left->p_stage_cyc - right->p_stage_cyc;
1615 }
1616 
1617 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1618 					size_t size, unsigned int width)
1619 {
1620 	return repsep_snprintf(bf, size, "%-*u", width,
1621 			he->p_stage_cyc * he->stat.nr_events);
1622 }
1623 
1624 
1625 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1626 					size_t size, unsigned int width)
1627 {
1628 	return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1629 }
1630 
1631 struct sort_entry sort_local_p_stage_cyc = {
1632 	.se_header      = "Local Pipeline Stage Cycle",
1633 	.se_cmp         = sort__p_stage_cyc_cmp,
1634 	.se_snprintf	= hist_entry__p_stage_cyc_snprintf,
1635 	.se_width_idx	= HISTC_LOCAL_P_STAGE_CYC,
1636 };
1637 
1638 struct sort_entry sort_global_p_stage_cyc = {
1639 	.se_header      = "Pipeline Stage Cycle",
1640 	.se_cmp         = sort__p_stage_cyc_cmp,
1641 	.se_snprintf    = hist_entry__global_p_stage_cyc_snprintf,
1642 	.se_width_idx   = HISTC_GLOBAL_P_STAGE_CYC,
1643 };
1644 
1645 struct sort_entry sort_mem_daddr_sym = {
1646 	.se_header	= "Data Symbol",
1647 	.se_cmp		= sort__daddr_cmp,
1648 	.se_snprintf	= hist_entry__daddr_snprintf,
1649 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1650 };
1651 
1652 struct sort_entry sort_mem_iaddr_sym = {
1653 	.se_header	= "Code Symbol",
1654 	.se_cmp		= sort__iaddr_cmp,
1655 	.se_snprintf	= hist_entry__iaddr_snprintf,
1656 	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
1657 };
1658 
1659 struct sort_entry sort_mem_daddr_dso = {
1660 	.se_header	= "Data Object",
1661 	.se_cmp		= sort__dso_daddr_cmp,
1662 	.se_snprintf	= hist_entry__dso_daddr_snprintf,
1663 	.se_width_idx	= HISTC_MEM_DADDR_DSO,
1664 };
1665 
1666 struct sort_entry sort_mem_locked = {
1667 	.se_header	= "Locked",
1668 	.se_cmp		= sort__locked_cmp,
1669 	.se_snprintf	= hist_entry__locked_snprintf,
1670 	.se_width_idx	= HISTC_MEM_LOCKED,
1671 };
1672 
1673 struct sort_entry sort_mem_tlb = {
1674 	.se_header	= "TLB access",
1675 	.se_cmp		= sort__tlb_cmp,
1676 	.se_snprintf	= hist_entry__tlb_snprintf,
1677 	.se_width_idx	= HISTC_MEM_TLB,
1678 };
1679 
1680 struct sort_entry sort_mem_lvl = {
1681 	.se_header	= "Memory access",
1682 	.se_cmp		= sort__lvl_cmp,
1683 	.se_snprintf	= hist_entry__lvl_snprintf,
1684 	.se_width_idx	= HISTC_MEM_LVL,
1685 };
1686 
1687 struct sort_entry sort_mem_snoop = {
1688 	.se_header	= "Snoop",
1689 	.se_cmp		= sort__snoop_cmp,
1690 	.se_snprintf	= hist_entry__snoop_snprintf,
1691 	.se_width_idx	= HISTC_MEM_SNOOP,
1692 };
1693 
1694 struct sort_entry sort_mem_dcacheline = {
1695 	.se_header	= "Data Cacheline",
1696 	.se_cmp		= sort__dcacheline_cmp,
1697 	.se_snprintf	= hist_entry__dcacheline_snprintf,
1698 	.se_width_idx	= HISTC_MEM_DCACHELINE,
1699 };
1700 
1701 static int64_t
1702 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1703 {
1704 	union perf_mem_data_src data_src_l;
1705 	union perf_mem_data_src data_src_r;
1706 
1707 	if (left->mem_info)
1708 		data_src_l = left->mem_info->data_src;
1709 	else
1710 		data_src_l.mem_blk = PERF_MEM_BLK_NA;
1711 
1712 	if (right->mem_info)
1713 		data_src_r = right->mem_info->data_src;
1714 	else
1715 		data_src_r.mem_blk = PERF_MEM_BLK_NA;
1716 
1717 	return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1718 }
1719 
1720 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1721 					size_t size, unsigned int width)
1722 {
1723 	char out[16];
1724 
1725 	perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1726 	return repsep_snprintf(bf, size, "%.*s", width, out);
1727 }
1728 
1729 struct sort_entry sort_mem_blocked = {
1730 	.se_header	= "Blocked",
1731 	.se_cmp		= sort__blocked_cmp,
1732 	.se_snprintf	= hist_entry__blocked_snprintf,
1733 	.se_width_idx	= HISTC_MEM_BLOCKED,
1734 };
1735 
1736 static int64_t
1737 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1738 {
1739 	uint64_t l = 0, r = 0;
1740 
1741 	if (left->mem_info)
1742 		l = left->mem_info->daddr.phys_addr;
1743 	if (right->mem_info)
1744 		r = right->mem_info->daddr.phys_addr;
1745 
1746 	return (int64_t)(r - l);
1747 }
1748 
1749 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1750 					   size_t size, unsigned int width)
1751 {
1752 	uint64_t addr = 0;
1753 	size_t ret = 0;
1754 	size_t len = BITS_PER_LONG / 4;
1755 
1756 	addr = he->mem_info->daddr.phys_addr;
1757 
1758 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1759 
1760 	ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1761 
1762 	ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1763 
1764 	if (ret > width)
1765 		bf[width] = '\0';
1766 
1767 	return width;
1768 }
1769 
1770 struct sort_entry sort_mem_phys_daddr = {
1771 	.se_header	= "Data Physical Address",
1772 	.se_cmp		= sort__phys_daddr_cmp,
1773 	.se_snprintf	= hist_entry__phys_daddr_snprintf,
1774 	.se_width_idx	= HISTC_MEM_PHYS_DADDR,
1775 };
1776 
1777 static int64_t
1778 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1779 {
1780 	uint64_t l = 0, r = 0;
1781 
1782 	if (left->mem_info)
1783 		l = left->mem_info->daddr.data_page_size;
1784 	if (right->mem_info)
1785 		r = right->mem_info->daddr.data_page_size;
1786 
1787 	return (int64_t)(r - l);
1788 }
1789 
1790 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1791 					  size_t size, unsigned int width)
1792 {
1793 	char str[PAGE_SIZE_NAME_LEN];
1794 
1795 	return repsep_snprintf(bf, size, "%-*s", width,
1796 			       get_page_size_name(he->mem_info->daddr.data_page_size, str));
1797 }
1798 
1799 struct sort_entry sort_mem_data_page_size = {
1800 	.se_header	= "Data Page Size",
1801 	.se_cmp		= sort__data_page_size_cmp,
1802 	.se_snprintf	= hist_entry__data_page_size_snprintf,
1803 	.se_width_idx	= HISTC_MEM_DATA_PAGE_SIZE,
1804 };
1805 
1806 static int64_t
1807 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1808 {
1809 	uint64_t l = left->code_page_size;
1810 	uint64_t r = right->code_page_size;
1811 
1812 	return (int64_t)(r - l);
1813 }
1814 
1815 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
1816 					  size_t size, unsigned int width)
1817 {
1818 	char str[PAGE_SIZE_NAME_LEN];
1819 
1820 	return repsep_snprintf(bf, size, "%-*s", width,
1821 			       get_page_size_name(he->code_page_size, str));
1822 }
1823 
1824 struct sort_entry sort_code_page_size = {
1825 	.se_header	= "Code Page Size",
1826 	.se_cmp		= sort__code_page_size_cmp,
1827 	.se_snprintf	= hist_entry__code_page_size_snprintf,
1828 	.se_width_idx	= HISTC_CODE_PAGE_SIZE,
1829 };
1830 
1831 static int64_t
1832 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1833 {
1834 	if (!left->branch_info || !right->branch_info)
1835 		return cmp_null(left->branch_info, right->branch_info);
1836 
1837 	return left->branch_info->flags.abort !=
1838 		right->branch_info->flags.abort;
1839 }
1840 
1841 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1842 				    size_t size, unsigned int width)
1843 {
1844 	static const char *out = "N/A";
1845 
1846 	if (he->branch_info) {
1847 		if (he->branch_info->flags.abort)
1848 			out = "A";
1849 		else
1850 			out = ".";
1851 	}
1852 
1853 	return repsep_snprintf(bf, size, "%-*s", width, out);
1854 }
1855 
1856 struct sort_entry sort_abort = {
1857 	.se_header	= "Transaction abort",
1858 	.se_cmp		= sort__abort_cmp,
1859 	.se_snprintf	= hist_entry__abort_snprintf,
1860 	.se_width_idx	= HISTC_ABORT,
1861 };
1862 
1863 static int64_t
1864 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1865 {
1866 	if (!left->branch_info || !right->branch_info)
1867 		return cmp_null(left->branch_info, right->branch_info);
1868 
1869 	return left->branch_info->flags.in_tx !=
1870 		right->branch_info->flags.in_tx;
1871 }
1872 
1873 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1874 				    size_t size, unsigned int width)
1875 {
1876 	static const char *out = "N/A";
1877 
1878 	if (he->branch_info) {
1879 		if (he->branch_info->flags.in_tx)
1880 			out = "T";
1881 		else
1882 			out = ".";
1883 	}
1884 
1885 	return repsep_snprintf(bf, size, "%-*s", width, out);
1886 }
1887 
1888 struct sort_entry sort_in_tx = {
1889 	.se_header	= "Branch in transaction",
1890 	.se_cmp		= sort__in_tx_cmp,
1891 	.se_snprintf	= hist_entry__in_tx_snprintf,
1892 	.se_width_idx	= HISTC_IN_TX,
1893 };
1894 
1895 static int64_t
1896 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1897 {
1898 	return left->transaction - right->transaction;
1899 }
1900 
1901 static inline char *add_str(char *p, const char *str)
1902 {
1903 	strcpy(p, str);
1904 	return p + strlen(str);
1905 }
1906 
1907 static struct txbit {
1908 	unsigned flag;
1909 	const char *name;
1910 	int skip_for_len;
1911 } txbits[] = {
1912 	{ PERF_TXN_ELISION,        "EL ",        0 },
1913 	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
1914 	{ PERF_TXN_SYNC,           "SYNC ",      1 },
1915 	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
1916 	{ PERF_TXN_RETRY,          "RETRY ",     0 },
1917 	{ PERF_TXN_CONFLICT,       "CON ",       0 },
1918 	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1919 	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
1920 	{ 0, NULL, 0 }
1921 };
1922 
1923 int hist_entry__transaction_len(void)
1924 {
1925 	int i;
1926 	int len = 0;
1927 
1928 	for (i = 0; txbits[i].name; i++) {
1929 		if (!txbits[i].skip_for_len)
1930 			len += strlen(txbits[i].name);
1931 	}
1932 	len += 4; /* :XX<space> */
1933 	return len;
1934 }
1935 
1936 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1937 					    size_t size, unsigned int width)
1938 {
1939 	u64 t = he->transaction;
1940 	char buf[128];
1941 	char *p = buf;
1942 	int i;
1943 
1944 	buf[0] = 0;
1945 	for (i = 0; txbits[i].name; i++)
1946 		if (txbits[i].flag & t)
1947 			p = add_str(p, txbits[i].name);
1948 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1949 		p = add_str(p, "NEITHER ");
1950 	if (t & PERF_TXN_ABORT_MASK) {
1951 		sprintf(p, ":%" PRIx64,
1952 			(t & PERF_TXN_ABORT_MASK) >>
1953 			PERF_TXN_ABORT_SHIFT);
1954 		p += strlen(p);
1955 	}
1956 
1957 	return repsep_snprintf(bf, size, "%-*s", width, buf);
1958 }
1959 
1960 struct sort_entry sort_transaction = {
1961 	.se_header	= "Transaction                ",
1962 	.se_cmp		= sort__transaction_cmp,
1963 	.se_snprintf	= hist_entry__transaction_snprintf,
1964 	.se_width_idx	= HISTC_TRANSACTION,
1965 };
1966 
1967 /* --sort symbol_size */
1968 
1969 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1970 {
1971 	int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1972 	int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1973 
1974 	return size_l < size_r ? -1 :
1975 		size_l == size_r ? 0 : 1;
1976 }
1977 
1978 static int64_t
1979 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1980 {
1981 	return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1982 }
1983 
1984 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1985 					  size_t bf_size, unsigned int width)
1986 {
1987 	if (sym)
1988 		return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1989 
1990 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1991 }
1992 
1993 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
1994 					 size_t size, unsigned int width)
1995 {
1996 	return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
1997 }
1998 
1999 struct sort_entry sort_sym_size = {
2000 	.se_header	= "Symbol size",
2001 	.se_cmp		= sort__sym_size_cmp,
2002 	.se_snprintf	= hist_entry__sym_size_snprintf,
2003 	.se_width_idx	= HISTC_SYM_SIZE,
2004 };
2005 
2006 /* --sort dso_size */
2007 
2008 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2009 {
2010 	int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2011 	int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2012 
2013 	return size_l < size_r ? -1 :
2014 		size_l == size_r ? 0 : 1;
2015 }
2016 
2017 static int64_t
2018 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2019 {
2020 	return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2021 }
2022 
2023 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2024 					  size_t bf_size, unsigned int width)
2025 {
2026 	if (map && map->dso)
2027 		return repsep_snprintf(bf, bf_size, "%*d", width,
2028 				       map__size(map));
2029 
2030 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2031 }
2032 
2033 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2034 					 size_t size, unsigned int width)
2035 {
2036 	return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2037 }
2038 
2039 struct sort_entry sort_dso_size = {
2040 	.se_header	= "DSO size",
2041 	.se_cmp		= sort__dso_size_cmp,
2042 	.se_snprintf	= hist_entry__dso_size_snprintf,
2043 	.se_width_idx	= HISTC_DSO_SIZE,
2044 };
2045 
2046 /* --sort dso_size */
2047 
2048 static int64_t
2049 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2050 {
2051 	u64 left_ip = left->ip;
2052 	u64 right_ip = right->ip;
2053 	struct map *left_map = left->ms.map;
2054 	struct map *right_map = right->ms.map;
2055 
2056 	if (left_map)
2057 		left_ip = left_map->unmap_ip(left_map, left_ip);
2058 	if (right_map)
2059 		right_ip = right_map->unmap_ip(right_map, right_ip);
2060 
2061 	return _sort__addr_cmp(left_ip, right_ip);
2062 }
2063 
2064 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2065 				     size_t size, unsigned int width)
2066 {
2067 	u64 ip = he->ip;
2068 	struct map *map = he->ms.map;
2069 
2070 	if (map)
2071 		ip = map->unmap_ip(map, ip);
2072 
2073 	return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2074 }
2075 
2076 struct sort_entry sort_addr = {
2077 	.se_header	= "Address",
2078 	.se_cmp		= sort__addr_cmp,
2079 	.se_snprintf	= hist_entry__addr_snprintf,
2080 	.se_width_idx	= HISTC_ADDR,
2081 };
2082 
2083 
2084 struct sort_dimension {
2085 	const char		*name;
2086 	struct sort_entry	*entry;
2087 	int			taken;
2088 };
2089 
2090 int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2091 {
2092 	return 0;
2093 }
2094 
2095 const char * __weak arch_perf_header_entry(const char *se_header)
2096 {
2097 	return se_header;
2098 }
2099 
2100 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2101 {
2102 	sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2103 }
2104 
2105 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2106 
2107 static struct sort_dimension common_sort_dimensions[] = {
2108 	DIM(SORT_PID, "pid", sort_thread),
2109 	DIM(SORT_COMM, "comm", sort_comm),
2110 	DIM(SORT_DSO, "dso", sort_dso),
2111 	DIM(SORT_SYM, "symbol", sort_sym),
2112 	DIM(SORT_PARENT, "parent", sort_parent),
2113 	DIM(SORT_CPU, "cpu", sort_cpu),
2114 	DIM(SORT_SOCKET, "socket", sort_socket),
2115 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
2116 	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2117 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2118 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2119 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2120 #ifdef HAVE_LIBTRACEEVENT
2121 	DIM(SORT_TRACE, "trace", sort_trace),
2122 #endif
2123 	DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2124 	DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2125 	DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2126 	DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2127 	DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2128 	DIM(SORT_TIME, "time", sort_time),
2129 	DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2130 	DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2131 	DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2132 	DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2133 	DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2134 	DIM(SORT_ADDR, "addr", sort_addr),
2135 };
2136 
2137 #undef DIM
2138 
2139 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2140 
2141 static struct sort_dimension bstack_sort_dimensions[] = {
2142 	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2143 	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2144 	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2145 	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2146 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2147 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2148 	DIM(SORT_ABORT, "abort", sort_abort),
2149 	DIM(SORT_CYCLES, "cycles", sort_cycles),
2150 	DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2151 	DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2152 	DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2153 	DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2154 	DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2155 };
2156 
2157 #undef DIM
2158 
2159 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2160 
2161 static struct sort_dimension memory_sort_dimensions[] = {
2162 	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2163 	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2164 	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2165 	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2166 	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2167 	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2168 	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2169 	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2170 	DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2171 	DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2172 	DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2173 };
2174 
2175 #undef DIM
2176 
2177 struct hpp_dimension {
2178 	const char		*name;
2179 	struct perf_hpp_fmt	*fmt;
2180 	int			taken;
2181 };
2182 
2183 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2184 
2185 static struct hpp_dimension hpp_sort_dimensions[] = {
2186 	DIM(PERF_HPP__OVERHEAD, "overhead"),
2187 	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2188 	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2189 	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2190 	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2191 	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2192 	DIM(PERF_HPP__SAMPLES, "sample"),
2193 	DIM(PERF_HPP__PERIOD, "period"),
2194 };
2195 
2196 #undef DIM
2197 
2198 struct hpp_sort_entry {
2199 	struct perf_hpp_fmt hpp;
2200 	struct sort_entry *se;
2201 };
2202 
2203 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2204 {
2205 	struct hpp_sort_entry *hse;
2206 
2207 	if (!perf_hpp__is_sort_entry(fmt))
2208 		return;
2209 
2210 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2211 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2212 }
2213 
2214 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2215 			      struct hists *hists, int line __maybe_unused,
2216 			      int *span __maybe_unused)
2217 {
2218 	struct hpp_sort_entry *hse;
2219 	size_t len = fmt->user_len;
2220 
2221 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2222 
2223 	if (!len)
2224 		len = hists__col_len(hists, hse->se->se_width_idx);
2225 
2226 	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2227 }
2228 
2229 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2230 			     struct perf_hpp *hpp __maybe_unused,
2231 			     struct hists *hists)
2232 {
2233 	struct hpp_sort_entry *hse;
2234 	size_t len = fmt->user_len;
2235 
2236 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2237 
2238 	if (!len)
2239 		len = hists__col_len(hists, hse->se->se_width_idx);
2240 
2241 	return len;
2242 }
2243 
2244 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2245 			     struct hist_entry *he)
2246 {
2247 	struct hpp_sort_entry *hse;
2248 	size_t len = fmt->user_len;
2249 
2250 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2251 
2252 	if (!len)
2253 		len = hists__col_len(he->hists, hse->se->se_width_idx);
2254 
2255 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2256 }
2257 
2258 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2259 			       struct hist_entry *a, struct hist_entry *b)
2260 {
2261 	struct hpp_sort_entry *hse;
2262 
2263 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2264 	return hse->se->se_cmp(a, b);
2265 }
2266 
2267 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2268 				    struct hist_entry *a, struct hist_entry *b)
2269 {
2270 	struct hpp_sort_entry *hse;
2271 	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2272 
2273 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2274 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2275 	return collapse_fn(a, b);
2276 }
2277 
2278 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2279 				struct hist_entry *a, struct hist_entry *b)
2280 {
2281 	struct hpp_sort_entry *hse;
2282 	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2283 
2284 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2285 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2286 	return sort_fn(a, b);
2287 }
2288 
2289 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2290 {
2291 	return format->header == __sort__hpp_header;
2292 }
2293 
2294 #define MK_SORT_ENTRY_CHK(key)					\
2295 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt)	\
2296 {								\
2297 	struct hpp_sort_entry *hse;				\
2298 								\
2299 	if (!perf_hpp__is_sort_entry(fmt))			\
2300 		return false;					\
2301 								\
2302 	hse = container_of(fmt, struct hpp_sort_entry, hpp);	\
2303 	return hse->se == &sort_ ## key ;			\
2304 }
2305 
2306 #ifdef HAVE_LIBTRACEEVENT
2307 MK_SORT_ENTRY_CHK(trace)
2308 #else
2309 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2310 {
2311 	return false;
2312 }
2313 #endif
2314 MK_SORT_ENTRY_CHK(srcline)
2315 MK_SORT_ENTRY_CHK(srcfile)
2316 MK_SORT_ENTRY_CHK(thread)
2317 MK_SORT_ENTRY_CHK(comm)
2318 MK_SORT_ENTRY_CHK(dso)
2319 MK_SORT_ENTRY_CHK(sym)
2320 
2321 
2322 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2323 {
2324 	struct hpp_sort_entry *hse_a;
2325 	struct hpp_sort_entry *hse_b;
2326 
2327 	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2328 		return false;
2329 
2330 	hse_a = container_of(a, struct hpp_sort_entry, hpp);
2331 	hse_b = container_of(b, struct hpp_sort_entry, hpp);
2332 
2333 	return hse_a->se == hse_b->se;
2334 }
2335 
2336 static void hse_free(struct perf_hpp_fmt *fmt)
2337 {
2338 	struct hpp_sort_entry *hse;
2339 
2340 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2341 	free(hse);
2342 }
2343 
2344 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2345 {
2346 	struct hpp_sort_entry *hse;
2347 
2348 	if (!perf_hpp__is_sort_entry(fmt))
2349 		return;
2350 
2351 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2352 
2353 	if (hse->se->se_init)
2354 		hse->se->se_init(he);
2355 }
2356 
2357 static struct hpp_sort_entry *
2358 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2359 {
2360 	struct hpp_sort_entry *hse;
2361 
2362 	hse = malloc(sizeof(*hse));
2363 	if (hse == NULL) {
2364 		pr_err("Memory allocation failed\n");
2365 		return NULL;
2366 	}
2367 
2368 	hse->se = sd->entry;
2369 	hse->hpp.name = sd->entry->se_header;
2370 	hse->hpp.header = __sort__hpp_header;
2371 	hse->hpp.width = __sort__hpp_width;
2372 	hse->hpp.entry = __sort__hpp_entry;
2373 	hse->hpp.color = NULL;
2374 
2375 	hse->hpp.cmp = __sort__hpp_cmp;
2376 	hse->hpp.collapse = __sort__hpp_collapse;
2377 	hse->hpp.sort = __sort__hpp_sort;
2378 	hse->hpp.equal = __sort__hpp_equal;
2379 	hse->hpp.free = hse_free;
2380 	hse->hpp.init = hse_init;
2381 
2382 	INIT_LIST_HEAD(&hse->hpp.list);
2383 	INIT_LIST_HEAD(&hse->hpp.sort_list);
2384 	hse->hpp.elide = false;
2385 	hse->hpp.len = 0;
2386 	hse->hpp.user_len = 0;
2387 	hse->hpp.level = level;
2388 
2389 	return hse;
2390 }
2391 
2392 static void hpp_free(struct perf_hpp_fmt *fmt)
2393 {
2394 	free(fmt);
2395 }
2396 
2397 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2398 						       int level)
2399 {
2400 	struct perf_hpp_fmt *fmt;
2401 
2402 	fmt = memdup(hd->fmt, sizeof(*fmt));
2403 	if (fmt) {
2404 		INIT_LIST_HEAD(&fmt->list);
2405 		INIT_LIST_HEAD(&fmt->sort_list);
2406 		fmt->free = hpp_free;
2407 		fmt->level = level;
2408 	}
2409 
2410 	return fmt;
2411 }
2412 
2413 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2414 {
2415 	struct perf_hpp_fmt *fmt;
2416 	struct hpp_sort_entry *hse;
2417 	int ret = -1;
2418 	int r;
2419 
2420 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2421 		if (!perf_hpp__is_sort_entry(fmt))
2422 			continue;
2423 
2424 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2425 		if (hse->se->se_filter == NULL)
2426 			continue;
2427 
2428 		/*
2429 		 * hist entry is filtered if any of sort key in the hpp list
2430 		 * is applied.  But it should skip non-matched filter types.
2431 		 */
2432 		r = hse->se->se_filter(he, type, arg);
2433 		if (r >= 0) {
2434 			if (ret < 0)
2435 				ret = 0;
2436 			ret |= r;
2437 		}
2438 	}
2439 
2440 	return ret;
2441 }
2442 
2443 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2444 					  struct perf_hpp_list *list,
2445 					  int level)
2446 {
2447 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2448 
2449 	if (hse == NULL)
2450 		return -1;
2451 
2452 	perf_hpp_list__register_sort_field(list, &hse->hpp);
2453 	return 0;
2454 }
2455 
2456 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2457 					    struct perf_hpp_list *list)
2458 {
2459 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2460 
2461 	if (hse == NULL)
2462 		return -1;
2463 
2464 	perf_hpp_list__column_register(list, &hse->hpp);
2465 	return 0;
2466 }
2467 
2468 #ifndef HAVE_LIBTRACEEVENT
2469 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2470 {
2471 	return false;
2472 }
2473 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2474 				     struct hists *hists __maybe_unused)
2475 {
2476 	return false;
2477 }
2478 #else
2479 struct hpp_dynamic_entry {
2480 	struct perf_hpp_fmt hpp;
2481 	struct evsel *evsel;
2482 	struct tep_format_field *field;
2483 	unsigned dynamic_len;
2484 	bool raw_trace;
2485 };
2486 
2487 static int hde_width(struct hpp_dynamic_entry *hde)
2488 {
2489 	if (!hde->hpp.len) {
2490 		int len = hde->dynamic_len;
2491 		int namelen = strlen(hde->field->name);
2492 		int fieldlen = hde->field->size;
2493 
2494 		if (namelen > len)
2495 			len = namelen;
2496 
2497 		if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2498 			/* length for print hex numbers */
2499 			fieldlen = hde->field->size * 2 + 2;
2500 		}
2501 		if (fieldlen > len)
2502 			len = fieldlen;
2503 
2504 		hde->hpp.len = len;
2505 	}
2506 	return hde->hpp.len;
2507 }
2508 
2509 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2510 			       struct hist_entry *he)
2511 {
2512 	char *str, *pos;
2513 	struct tep_format_field *field = hde->field;
2514 	size_t namelen;
2515 	bool last = false;
2516 
2517 	if (hde->raw_trace)
2518 		return;
2519 
2520 	/* parse pretty print result and update max length */
2521 	if (!he->trace_output)
2522 		he->trace_output = get_trace_output(he);
2523 
2524 	namelen = strlen(field->name);
2525 	str = he->trace_output;
2526 
2527 	while (str) {
2528 		pos = strchr(str, ' ');
2529 		if (pos == NULL) {
2530 			last = true;
2531 			pos = str + strlen(str);
2532 		}
2533 
2534 		if (!strncmp(str, field->name, namelen)) {
2535 			size_t len;
2536 
2537 			str += namelen + 1;
2538 			len = pos - str;
2539 
2540 			if (len > hde->dynamic_len)
2541 				hde->dynamic_len = len;
2542 			break;
2543 		}
2544 
2545 		if (last)
2546 			str = NULL;
2547 		else
2548 			str = pos + 1;
2549 	}
2550 }
2551 
2552 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2553 			      struct hists *hists __maybe_unused,
2554 			      int line __maybe_unused,
2555 			      int *span __maybe_unused)
2556 {
2557 	struct hpp_dynamic_entry *hde;
2558 	size_t len = fmt->user_len;
2559 
2560 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2561 
2562 	if (!len)
2563 		len = hde_width(hde);
2564 
2565 	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2566 }
2567 
2568 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2569 			     struct perf_hpp *hpp __maybe_unused,
2570 			     struct hists *hists __maybe_unused)
2571 {
2572 	struct hpp_dynamic_entry *hde;
2573 	size_t len = fmt->user_len;
2574 
2575 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2576 
2577 	if (!len)
2578 		len = hde_width(hde);
2579 
2580 	return len;
2581 }
2582 
2583 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2584 {
2585 	struct hpp_dynamic_entry *hde;
2586 
2587 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2588 
2589 	return hists_to_evsel(hists) == hde->evsel;
2590 }
2591 
2592 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2593 			     struct hist_entry *he)
2594 {
2595 	struct hpp_dynamic_entry *hde;
2596 	size_t len = fmt->user_len;
2597 	char *str, *pos;
2598 	struct tep_format_field *field;
2599 	size_t namelen;
2600 	bool last = false;
2601 	int ret;
2602 
2603 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2604 
2605 	if (!len)
2606 		len = hde_width(hde);
2607 
2608 	if (hde->raw_trace)
2609 		goto raw_field;
2610 
2611 	if (!he->trace_output)
2612 		he->trace_output = get_trace_output(he);
2613 
2614 	field = hde->field;
2615 	namelen = strlen(field->name);
2616 	str = he->trace_output;
2617 
2618 	while (str) {
2619 		pos = strchr(str, ' ');
2620 		if (pos == NULL) {
2621 			last = true;
2622 			pos = str + strlen(str);
2623 		}
2624 
2625 		if (!strncmp(str, field->name, namelen)) {
2626 			str += namelen + 1;
2627 			str = strndup(str, pos - str);
2628 
2629 			if (str == NULL)
2630 				return scnprintf(hpp->buf, hpp->size,
2631 						 "%*.*s", len, len, "ERROR");
2632 			break;
2633 		}
2634 
2635 		if (last)
2636 			str = NULL;
2637 		else
2638 			str = pos + 1;
2639 	}
2640 
2641 	if (str == NULL) {
2642 		struct trace_seq seq;
2643 raw_field:
2644 		trace_seq_init(&seq);
2645 		tep_print_field(&seq, he->raw_data, hde->field);
2646 		str = seq.buffer;
2647 	}
2648 
2649 	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2650 	free(str);
2651 	return ret;
2652 }
2653 
2654 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2655 			       struct hist_entry *a, struct hist_entry *b)
2656 {
2657 	struct hpp_dynamic_entry *hde;
2658 	struct tep_format_field *field;
2659 	unsigned offset, size;
2660 
2661 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2662 
2663 	field = hde->field;
2664 	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2665 		unsigned long long dyn;
2666 
2667 		tep_read_number_field(field, a->raw_data, &dyn);
2668 		offset = dyn & 0xffff;
2669 		size = (dyn >> 16) & 0xffff;
2670 #ifdef HAVE_LIBTRACEEVENT_TEP_FIELD_IS_RELATIVE
2671 		if (field->flags & TEP_FIELD_IS_RELATIVE)
2672 			offset += field->offset + field->size;
2673 #endif
2674 		/* record max width for output */
2675 		if (size > hde->dynamic_len)
2676 			hde->dynamic_len = size;
2677 	} else {
2678 		offset = field->offset;
2679 		size = field->size;
2680 	}
2681 
2682 	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2683 }
2684 
2685 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2686 {
2687 	return fmt->cmp == __sort__hde_cmp;
2688 }
2689 
2690 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2691 {
2692 	struct hpp_dynamic_entry *hde_a;
2693 	struct hpp_dynamic_entry *hde_b;
2694 
2695 	if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2696 		return false;
2697 
2698 	hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2699 	hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2700 
2701 	return hde_a->field == hde_b->field;
2702 }
2703 
2704 static void hde_free(struct perf_hpp_fmt *fmt)
2705 {
2706 	struct hpp_dynamic_entry *hde;
2707 
2708 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2709 	free(hde);
2710 }
2711 
2712 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2713 {
2714 	struct hpp_dynamic_entry *hde;
2715 
2716 	if (!perf_hpp__is_dynamic_entry(fmt))
2717 		return;
2718 
2719 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2720 	update_dynamic_len(hde, he);
2721 }
2722 
2723 static struct hpp_dynamic_entry *
2724 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
2725 		      int level)
2726 {
2727 	struct hpp_dynamic_entry *hde;
2728 
2729 	hde = malloc(sizeof(*hde));
2730 	if (hde == NULL) {
2731 		pr_debug("Memory allocation failed\n");
2732 		return NULL;
2733 	}
2734 
2735 	hde->evsel = evsel;
2736 	hde->field = field;
2737 	hde->dynamic_len = 0;
2738 
2739 	hde->hpp.name = field->name;
2740 	hde->hpp.header = __sort__hde_header;
2741 	hde->hpp.width  = __sort__hde_width;
2742 	hde->hpp.entry  = __sort__hde_entry;
2743 	hde->hpp.color  = NULL;
2744 
2745 	hde->hpp.init = __sort__hde_init;
2746 	hde->hpp.cmp = __sort__hde_cmp;
2747 	hde->hpp.collapse = __sort__hde_cmp;
2748 	hde->hpp.sort = __sort__hde_cmp;
2749 	hde->hpp.equal = __sort__hde_equal;
2750 	hde->hpp.free = hde_free;
2751 
2752 	INIT_LIST_HEAD(&hde->hpp.list);
2753 	INIT_LIST_HEAD(&hde->hpp.sort_list);
2754 	hde->hpp.elide = false;
2755 	hde->hpp.len = 0;
2756 	hde->hpp.user_len = 0;
2757 	hde->hpp.level = level;
2758 
2759 	return hde;
2760 }
2761 #endif /* HAVE_LIBTRACEEVENT */
2762 
2763 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2764 {
2765 	struct perf_hpp_fmt *new_fmt = NULL;
2766 
2767 	if (perf_hpp__is_sort_entry(fmt)) {
2768 		struct hpp_sort_entry *hse, *new_hse;
2769 
2770 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2771 		new_hse = memdup(hse, sizeof(*hse));
2772 		if (new_hse)
2773 			new_fmt = &new_hse->hpp;
2774 #ifdef HAVE_LIBTRACEEVENT
2775 	} else if (perf_hpp__is_dynamic_entry(fmt)) {
2776 		struct hpp_dynamic_entry *hde, *new_hde;
2777 
2778 		hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2779 		new_hde = memdup(hde, sizeof(*hde));
2780 		if (new_hde)
2781 			new_fmt = &new_hde->hpp;
2782 #endif
2783 	} else {
2784 		new_fmt = memdup(fmt, sizeof(*fmt));
2785 	}
2786 
2787 	INIT_LIST_HEAD(&new_fmt->list);
2788 	INIT_LIST_HEAD(&new_fmt->sort_list);
2789 
2790 	return new_fmt;
2791 }
2792 
2793 static int parse_field_name(char *str, char **event, char **field, char **opt)
2794 {
2795 	char *event_name, *field_name, *opt_name;
2796 
2797 	event_name = str;
2798 	field_name = strchr(str, '.');
2799 
2800 	if (field_name) {
2801 		*field_name++ = '\0';
2802 	} else {
2803 		event_name = NULL;
2804 		field_name = str;
2805 	}
2806 
2807 	opt_name = strchr(field_name, '/');
2808 	if (opt_name)
2809 		*opt_name++ = '\0';
2810 
2811 	*event = event_name;
2812 	*field = field_name;
2813 	*opt   = opt_name;
2814 
2815 	return 0;
2816 }
2817 
2818 /* find match evsel using a given event name.  The event name can be:
2819  *   1. '%' + event index (e.g. '%1' for first event)
2820  *   2. full event name (e.g. sched:sched_switch)
2821  *   3. partial event name (should not contain ':')
2822  */
2823 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
2824 {
2825 	struct evsel *evsel = NULL;
2826 	struct evsel *pos;
2827 	bool full_name;
2828 
2829 	/* case 1 */
2830 	if (event_name[0] == '%') {
2831 		int nr = strtol(event_name+1, NULL, 0);
2832 
2833 		if (nr > evlist->core.nr_entries)
2834 			return NULL;
2835 
2836 		evsel = evlist__first(evlist);
2837 		while (--nr > 0)
2838 			evsel = evsel__next(evsel);
2839 
2840 		return evsel;
2841 	}
2842 
2843 	full_name = !!strchr(event_name, ':');
2844 	evlist__for_each_entry(evlist, pos) {
2845 		/* case 2 */
2846 		if (full_name && !strcmp(pos->name, event_name))
2847 			return pos;
2848 		/* case 3 */
2849 		if (!full_name && strstr(pos->name, event_name)) {
2850 			if (evsel) {
2851 				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2852 					 event_name, evsel->name, pos->name);
2853 				return NULL;
2854 			}
2855 			evsel = pos;
2856 		}
2857 	}
2858 
2859 	return evsel;
2860 }
2861 
2862 #ifdef HAVE_LIBTRACEEVENT
2863 static int __dynamic_dimension__add(struct evsel *evsel,
2864 				    struct tep_format_field *field,
2865 				    bool raw_trace, int level)
2866 {
2867 	struct hpp_dynamic_entry *hde;
2868 
2869 	hde = __alloc_dynamic_entry(evsel, field, level);
2870 	if (hde == NULL)
2871 		return -ENOMEM;
2872 
2873 	hde->raw_trace = raw_trace;
2874 
2875 	perf_hpp__register_sort_field(&hde->hpp);
2876 	return 0;
2877 }
2878 
2879 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
2880 {
2881 	int ret;
2882 	struct tep_format_field *field;
2883 
2884 	field = evsel->tp_format->format.fields;
2885 	while (field) {
2886 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2887 		if (ret < 0)
2888 			return ret;
2889 
2890 		field = field->next;
2891 	}
2892 	return 0;
2893 }
2894 
2895 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
2896 				  int level)
2897 {
2898 	int ret;
2899 	struct evsel *evsel;
2900 
2901 	evlist__for_each_entry(evlist, evsel) {
2902 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2903 			continue;
2904 
2905 		ret = add_evsel_fields(evsel, raw_trace, level);
2906 		if (ret < 0)
2907 			return ret;
2908 	}
2909 	return 0;
2910 }
2911 
2912 static int add_all_matching_fields(struct evlist *evlist,
2913 				   char *field_name, bool raw_trace, int level)
2914 {
2915 	int ret = -ESRCH;
2916 	struct evsel *evsel;
2917 	struct tep_format_field *field;
2918 
2919 	evlist__for_each_entry(evlist, evsel) {
2920 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2921 			continue;
2922 
2923 		field = tep_find_any_field(evsel->tp_format, field_name);
2924 		if (field == NULL)
2925 			continue;
2926 
2927 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2928 		if (ret < 0)
2929 			break;
2930 	}
2931 	return ret;
2932 }
2933 #endif /* HAVE_LIBTRACEEVENT */
2934 
2935 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
2936 			     int level)
2937 {
2938 	char *str, *event_name, *field_name, *opt_name;
2939 	struct evsel *evsel;
2940 	bool raw_trace = symbol_conf.raw_trace;
2941 	int ret = 0;
2942 
2943 	if (evlist == NULL)
2944 		return -ENOENT;
2945 
2946 	str = strdup(tok);
2947 	if (str == NULL)
2948 		return -ENOMEM;
2949 
2950 	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2951 		ret = -EINVAL;
2952 		goto out;
2953 	}
2954 
2955 	if (opt_name) {
2956 		if (strcmp(opt_name, "raw")) {
2957 			pr_debug("unsupported field option %s\n", opt_name);
2958 			ret = -EINVAL;
2959 			goto out;
2960 		}
2961 		raw_trace = true;
2962 	}
2963 
2964 #ifdef HAVE_LIBTRACEEVENT
2965 	if (!strcmp(field_name, "trace_fields")) {
2966 		ret = add_all_dynamic_fields(evlist, raw_trace, level);
2967 		goto out;
2968 	}
2969 
2970 	if (event_name == NULL) {
2971 		ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2972 		goto out;
2973 	}
2974 #else
2975 	evlist__for_each_entry(evlist, evsel) {
2976 		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2977 			pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
2978 			ret = -ENOTSUP;
2979 		}
2980 	}
2981 
2982 	if (ret) {
2983 		pr_err("\n");
2984 		goto out;
2985 	}
2986 #endif
2987 
2988 	evsel = find_evsel(evlist, event_name);
2989 	if (evsel == NULL) {
2990 		pr_debug("Cannot find event: %s\n", event_name);
2991 		ret = -ENOENT;
2992 		goto out;
2993 	}
2994 
2995 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2996 		pr_debug("%s is not a tracepoint event\n", event_name);
2997 		ret = -EINVAL;
2998 		goto out;
2999 	}
3000 
3001 #ifdef HAVE_LIBTRACEEVENT
3002 	if (!strcmp(field_name, "*")) {
3003 		ret = add_evsel_fields(evsel, raw_trace, level);
3004 	} else {
3005 		struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
3006 
3007 		if (field == NULL) {
3008 			pr_debug("Cannot find event field for %s.%s\n",
3009 				 event_name, field_name);
3010 			return -ENOENT;
3011 		}
3012 
3013 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3014 	}
3015 #else
3016 	(void)level;
3017 	(void)raw_trace;
3018 #endif /* HAVE_LIBTRACEEVENT */
3019 
3020 out:
3021 	free(str);
3022 	return ret;
3023 }
3024 
3025 static int __sort_dimension__add(struct sort_dimension *sd,
3026 				 struct perf_hpp_list *list,
3027 				 int level)
3028 {
3029 	if (sd->taken)
3030 		return 0;
3031 
3032 	if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3033 		return -1;
3034 
3035 	if (sd->entry->se_collapse)
3036 		list->need_collapse = 1;
3037 
3038 	sd->taken = 1;
3039 
3040 	return 0;
3041 }
3042 
3043 static int __hpp_dimension__add(struct hpp_dimension *hd,
3044 				struct perf_hpp_list *list,
3045 				int level)
3046 {
3047 	struct perf_hpp_fmt *fmt;
3048 
3049 	if (hd->taken)
3050 		return 0;
3051 
3052 	fmt = __hpp_dimension__alloc_hpp(hd, level);
3053 	if (!fmt)
3054 		return -1;
3055 
3056 	hd->taken = 1;
3057 	perf_hpp_list__register_sort_field(list, fmt);
3058 	return 0;
3059 }
3060 
3061 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3062 					struct sort_dimension *sd)
3063 {
3064 	if (sd->taken)
3065 		return 0;
3066 
3067 	if (__sort_dimension__add_hpp_output(sd, list) < 0)
3068 		return -1;
3069 
3070 	sd->taken = 1;
3071 	return 0;
3072 }
3073 
3074 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3075 				       struct hpp_dimension *hd)
3076 {
3077 	struct perf_hpp_fmt *fmt;
3078 
3079 	if (hd->taken)
3080 		return 0;
3081 
3082 	fmt = __hpp_dimension__alloc_hpp(hd, 0);
3083 	if (!fmt)
3084 		return -1;
3085 
3086 	hd->taken = 1;
3087 	perf_hpp_list__column_register(list, fmt);
3088 	return 0;
3089 }
3090 
3091 int hpp_dimension__add_output(unsigned col)
3092 {
3093 	BUG_ON(col >= PERF_HPP__MAX_INDEX);
3094 	return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
3095 }
3096 
3097 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3098 			struct evlist *evlist,
3099 			int level)
3100 {
3101 	unsigned int i, j;
3102 
3103 	/*
3104 	 * Check to see if there are any arch specific
3105 	 * sort dimensions not applicable for the current
3106 	 * architecture. If so, Skip that sort key since
3107 	 * we don't want to display it in the output fields.
3108 	 */
3109 	for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3110 		if (!strcmp(arch_specific_sort_keys[j], tok) &&
3111 				!arch_support_sort_key(tok)) {
3112 			return 0;
3113 		}
3114 	}
3115 
3116 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3117 		struct sort_dimension *sd = &common_sort_dimensions[i];
3118 
3119 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3120 			continue;
3121 
3122 		for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3123 			if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3124 				sort_dimension_add_dynamic_header(sd);
3125 		}
3126 
3127 		if (sd->entry == &sort_parent) {
3128 			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3129 			if (ret) {
3130 				char err[BUFSIZ];
3131 
3132 				regerror(ret, &parent_regex, err, sizeof(err));
3133 				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3134 				return -EINVAL;
3135 			}
3136 			list->parent = 1;
3137 		} else if (sd->entry == &sort_sym) {
3138 			list->sym = 1;
3139 			/*
3140 			 * perf diff displays the performance difference amongst
3141 			 * two or more perf.data files. Those files could come
3142 			 * from different binaries. So we should not compare
3143 			 * their ips, but the name of symbol.
3144 			 */
3145 			if (sort__mode == SORT_MODE__DIFF)
3146 				sd->entry->se_collapse = sort__sym_sort;
3147 
3148 		} else if (sd->entry == &sort_dso) {
3149 			list->dso = 1;
3150 		} else if (sd->entry == &sort_socket) {
3151 			list->socket = 1;
3152 		} else if (sd->entry == &sort_thread) {
3153 			list->thread = 1;
3154 		} else if (sd->entry == &sort_comm) {
3155 			list->comm = 1;
3156 		}
3157 
3158 		return __sort_dimension__add(sd, list, level);
3159 	}
3160 
3161 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3162 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3163 
3164 		if (strncasecmp(tok, hd->name, strlen(tok)))
3165 			continue;
3166 
3167 		return __hpp_dimension__add(hd, list, level);
3168 	}
3169 
3170 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3171 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3172 
3173 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3174 			continue;
3175 
3176 		if (sort__mode != SORT_MODE__BRANCH)
3177 			return -EINVAL;
3178 
3179 		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3180 			list->sym = 1;
3181 
3182 		__sort_dimension__add(sd, list, level);
3183 		return 0;
3184 	}
3185 
3186 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3187 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3188 
3189 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3190 			continue;
3191 
3192 		if (sort__mode != SORT_MODE__MEMORY)
3193 			return -EINVAL;
3194 
3195 		if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3196 			return -EINVAL;
3197 
3198 		if (sd->entry == &sort_mem_daddr_sym)
3199 			list->sym = 1;
3200 
3201 		__sort_dimension__add(sd, list, level);
3202 		return 0;
3203 	}
3204 
3205 	if (!add_dynamic_entry(evlist, tok, level))
3206 		return 0;
3207 
3208 	return -ESRCH;
3209 }
3210 
3211 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3212 			   struct evlist *evlist)
3213 {
3214 	char *tmp, *tok;
3215 	int ret = 0;
3216 	int level = 0;
3217 	int next_level = 1;
3218 	bool in_group = false;
3219 
3220 	do {
3221 		tok = str;
3222 		tmp = strpbrk(str, "{}, ");
3223 		if (tmp) {
3224 			if (in_group)
3225 				next_level = level;
3226 			else
3227 				next_level = level + 1;
3228 
3229 			if (*tmp == '{')
3230 				in_group = true;
3231 			else if (*tmp == '}')
3232 				in_group = false;
3233 
3234 			*tmp = '\0';
3235 			str = tmp + 1;
3236 		}
3237 
3238 		if (*tok) {
3239 			ret = sort_dimension__add(list, tok, evlist, level);
3240 			if (ret == -EINVAL) {
3241 				if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3242 					ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3243 				else
3244 					ui__error("Invalid --sort key: `%s'", tok);
3245 				break;
3246 			} else if (ret == -ESRCH) {
3247 				ui__error("Unknown --sort key: `%s'", tok);
3248 				break;
3249 			}
3250 		}
3251 
3252 		level = next_level;
3253 	} while (tmp);
3254 
3255 	return ret;
3256 }
3257 
3258 static const char *get_default_sort_order(struct evlist *evlist)
3259 {
3260 	const char *default_sort_orders[] = {
3261 		default_sort_order,
3262 		default_branch_sort_order,
3263 		default_mem_sort_order,
3264 		default_top_sort_order,
3265 		default_diff_sort_order,
3266 		default_tracepoint_sort_order,
3267 	};
3268 	bool use_trace = true;
3269 	struct evsel *evsel;
3270 
3271 	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3272 
3273 	if (evlist == NULL || evlist__empty(evlist))
3274 		goto out_no_evlist;
3275 
3276 	evlist__for_each_entry(evlist, evsel) {
3277 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3278 			use_trace = false;
3279 			break;
3280 		}
3281 	}
3282 
3283 	if (use_trace) {
3284 		sort__mode = SORT_MODE__TRACEPOINT;
3285 		if (symbol_conf.raw_trace)
3286 			return "trace_fields";
3287 	}
3288 out_no_evlist:
3289 	return default_sort_orders[sort__mode];
3290 }
3291 
3292 static int setup_sort_order(struct evlist *evlist)
3293 {
3294 	char *new_sort_order;
3295 
3296 	/*
3297 	 * Append '+'-prefixed sort order to the default sort
3298 	 * order string.
3299 	 */
3300 	if (!sort_order || is_strict_order(sort_order))
3301 		return 0;
3302 
3303 	if (sort_order[1] == '\0') {
3304 		ui__error("Invalid --sort key: `+'");
3305 		return -EINVAL;
3306 	}
3307 
3308 	/*
3309 	 * We allocate new sort_order string, but we never free it,
3310 	 * because it's checked over the rest of the code.
3311 	 */
3312 	if (asprintf(&new_sort_order, "%s,%s",
3313 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
3314 		pr_err("Not enough memory to set up --sort");
3315 		return -ENOMEM;
3316 	}
3317 
3318 	sort_order = new_sort_order;
3319 	return 0;
3320 }
3321 
3322 /*
3323  * Adds 'pre,' prefix into 'str' is 'pre' is
3324  * not already part of 'str'.
3325  */
3326 static char *prefix_if_not_in(const char *pre, char *str)
3327 {
3328 	char *n;
3329 
3330 	if (!str || strstr(str, pre))
3331 		return str;
3332 
3333 	if (asprintf(&n, "%s,%s", pre, str) < 0)
3334 		n = NULL;
3335 
3336 	free(str);
3337 	return n;
3338 }
3339 
3340 static char *setup_overhead(char *keys)
3341 {
3342 	if (sort__mode == SORT_MODE__DIFF)
3343 		return keys;
3344 
3345 	keys = prefix_if_not_in("overhead", keys);
3346 
3347 	if (symbol_conf.cumulate_callchain)
3348 		keys = prefix_if_not_in("overhead_children", keys);
3349 
3350 	return keys;
3351 }
3352 
3353 static int __setup_sorting(struct evlist *evlist)
3354 {
3355 	char *str;
3356 	const char *sort_keys;
3357 	int ret = 0;
3358 
3359 	ret = setup_sort_order(evlist);
3360 	if (ret)
3361 		return ret;
3362 
3363 	sort_keys = sort_order;
3364 	if (sort_keys == NULL) {
3365 		if (is_strict_order(field_order)) {
3366 			/*
3367 			 * If user specified field order but no sort order,
3368 			 * we'll honor it and not add default sort orders.
3369 			 */
3370 			return 0;
3371 		}
3372 
3373 		sort_keys = get_default_sort_order(evlist);
3374 	}
3375 
3376 	str = strdup(sort_keys);
3377 	if (str == NULL) {
3378 		pr_err("Not enough memory to setup sort keys");
3379 		return -ENOMEM;
3380 	}
3381 
3382 	/*
3383 	 * Prepend overhead fields for backward compatibility.
3384 	 */
3385 	if (!is_strict_order(field_order)) {
3386 		str = setup_overhead(str);
3387 		if (str == NULL) {
3388 			pr_err("Not enough memory to setup overhead keys");
3389 			return -ENOMEM;
3390 		}
3391 	}
3392 
3393 	ret = setup_sort_list(&perf_hpp_list, str, evlist);
3394 
3395 	free(str);
3396 	return ret;
3397 }
3398 
3399 void perf_hpp__set_elide(int idx, bool elide)
3400 {
3401 	struct perf_hpp_fmt *fmt;
3402 	struct hpp_sort_entry *hse;
3403 
3404 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3405 		if (!perf_hpp__is_sort_entry(fmt))
3406 			continue;
3407 
3408 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3409 		if (hse->se->se_width_idx == idx) {
3410 			fmt->elide = elide;
3411 			break;
3412 		}
3413 	}
3414 }
3415 
3416 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3417 {
3418 	if (list && strlist__nr_entries(list) == 1) {
3419 		if (fp != NULL)
3420 			fprintf(fp, "# %s: %s\n", list_name,
3421 				strlist__entry(list, 0)->s);
3422 		return true;
3423 	}
3424 	return false;
3425 }
3426 
3427 static bool get_elide(int idx, FILE *output)
3428 {
3429 	switch (idx) {
3430 	case HISTC_SYMBOL:
3431 		return __get_elide(symbol_conf.sym_list, "symbol", output);
3432 	case HISTC_DSO:
3433 		return __get_elide(symbol_conf.dso_list, "dso", output);
3434 	case HISTC_COMM:
3435 		return __get_elide(symbol_conf.comm_list, "comm", output);
3436 	default:
3437 		break;
3438 	}
3439 
3440 	if (sort__mode != SORT_MODE__BRANCH)
3441 		return false;
3442 
3443 	switch (idx) {
3444 	case HISTC_SYMBOL_FROM:
3445 		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3446 	case HISTC_SYMBOL_TO:
3447 		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3448 	case HISTC_DSO_FROM:
3449 		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3450 	case HISTC_DSO_TO:
3451 		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3452 	case HISTC_ADDR_FROM:
3453 		return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3454 	case HISTC_ADDR_TO:
3455 		return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3456 	default:
3457 		break;
3458 	}
3459 
3460 	return false;
3461 }
3462 
3463 void sort__setup_elide(FILE *output)
3464 {
3465 	struct perf_hpp_fmt *fmt;
3466 	struct hpp_sort_entry *hse;
3467 
3468 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3469 		if (!perf_hpp__is_sort_entry(fmt))
3470 			continue;
3471 
3472 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3473 		fmt->elide = get_elide(hse->se->se_width_idx, output);
3474 	}
3475 
3476 	/*
3477 	 * It makes no sense to elide all of sort entries.
3478 	 * Just revert them to show up again.
3479 	 */
3480 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3481 		if (!perf_hpp__is_sort_entry(fmt))
3482 			continue;
3483 
3484 		if (!fmt->elide)
3485 			return;
3486 	}
3487 
3488 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3489 		if (!perf_hpp__is_sort_entry(fmt))
3490 			continue;
3491 
3492 		fmt->elide = false;
3493 	}
3494 }
3495 
3496 int output_field_add(struct perf_hpp_list *list, char *tok)
3497 {
3498 	unsigned int i;
3499 
3500 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3501 		struct sort_dimension *sd = &common_sort_dimensions[i];
3502 
3503 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3504 			continue;
3505 
3506 		return __sort_dimension__add_output(list, sd);
3507 	}
3508 
3509 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3510 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3511 
3512 		if (strncasecmp(tok, hd->name, strlen(tok)))
3513 			continue;
3514 
3515 		return __hpp_dimension__add_output(list, hd);
3516 	}
3517 
3518 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3519 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3520 
3521 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3522 			continue;
3523 
3524 		if (sort__mode != SORT_MODE__BRANCH)
3525 			return -EINVAL;
3526 
3527 		return __sort_dimension__add_output(list, sd);
3528 	}
3529 
3530 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3531 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3532 
3533 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3534 			continue;
3535 
3536 		if (sort__mode != SORT_MODE__MEMORY)
3537 			return -EINVAL;
3538 
3539 		return __sort_dimension__add_output(list, sd);
3540 	}
3541 
3542 	return -ESRCH;
3543 }
3544 
3545 static int setup_output_list(struct perf_hpp_list *list, char *str)
3546 {
3547 	char *tmp, *tok;
3548 	int ret = 0;
3549 
3550 	for (tok = strtok_r(str, ", ", &tmp);
3551 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
3552 		ret = output_field_add(list, tok);
3553 		if (ret == -EINVAL) {
3554 			ui__error("Invalid --fields key: `%s'", tok);
3555 			break;
3556 		} else if (ret == -ESRCH) {
3557 			ui__error("Unknown --fields key: `%s'", tok);
3558 			break;
3559 		}
3560 	}
3561 
3562 	return ret;
3563 }
3564 
3565 void reset_dimensions(void)
3566 {
3567 	unsigned int i;
3568 
3569 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3570 		common_sort_dimensions[i].taken = 0;
3571 
3572 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3573 		hpp_sort_dimensions[i].taken = 0;
3574 
3575 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3576 		bstack_sort_dimensions[i].taken = 0;
3577 
3578 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3579 		memory_sort_dimensions[i].taken = 0;
3580 }
3581 
3582 bool is_strict_order(const char *order)
3583 {
3584 	return order && (*order != '+');
3585 }
3586 
3587 static int __setup_output_field(void)
3588 {
3589 	char *str, *strp;
3590 	int ret = -EINVAL;
3591 
3592 	if (field_order == NULL)
3593 		return 0;
3594 
3595 	strp = str = strdup(field_order);
3596 	if (str == NULL) {
3597 		pr_err("Not enough memory to setup output fields");
3598 		return -ENOMEM;
3599 	}
3600 
3601 	if (!is_strict_order(field_order))
3602 		strp++;
3603 
3604 	if (!strlen(strp)) {
3605 		ui__error("Invalid --fields key: `+'");
3606 		goto out;
3607 	}
3608 
3609 	ret = setup_output_list(&perf_hpp_list, strp);
3610 
3611 out:
3612 	free(str);
3613 	return ret;
3614 }
3615 
3616 int setup_sorting(struct evlist *evlist)
3617 {
3618 	int err;
3619 
3620 	err = __setup_sorting(evlist);
3621 	if (err < 0)
3622 		return err;
3623 
3624 	if (parent_pattern != default_parent_pattern) {
3625 		err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3626 		if (err < 0)
3627 			return err;
3628 	}
3629 
3630 	reset_dimensions();
3631 
3632 	/*
3633 	 * perf diff doesn't use default hpp output fields.
3634 	 */
3635 	if (sort__mode != SORT_MODE__DIFF)
3636 		perf_hpp__init();
3637 
3638 	err = __setup_output_field();
3639 	if (err < 0)
3640 		return err;
3641 
3642 	/* copy sort keys to output fields */
3643 	perf_hpp__setup_output_field(&perf_hpp_list);
3644 	/* and then copy output fields to sort keys */
3645 	perf_hpp__append_sort_keys(&perf_hpp_list);
3646 
3647 	/* setup hists-specific output fields */
3648 	if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3649 		return -1;
3650 
3651 	return 0;
3652 }
3653 
3654 void reset_output_field(void)
3655 {
3656 	perf_hpp_list.need_collapse = 0;
3657 	perf_hpp_list.parent = 0;
3658 	perf_hpp_list.sym = 0;
3659 	perf_hpp_list.dso = 0;
3660 
3661 	field_order = NULL;
3662 	sort_order = NULL;
3663 
3664 	reset_dimensions();
3665 	perf_hpp__reset_output_field(&perf_hpp_list);
3666 }
3667 
3668 #define INDENT (3*8 + 1)
3669 
3670 static void add_key(struct strbuf *sb, const char *str, int *llen)
3671 {
3672 	if (!str)
3673 		return;
3674 
3675 	if (*llen >= 75) {
3676 		strbuf_addstr(sb, "\n\t\t\t ");
3677 		*llen = INDENT;
3678 	}
3679 	strbuf_addf(sb, " %s", str);
3680 	*llen += strlen(str) + 1;
3681 }
3682 
3683 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3684 			    int *llen)
3685 {
3686 	int i;
3687 
3688 	for (i = 0; i < n; i++)
3689 		add_key(sb, s[i].name, llen);
3690 }
3691 
3692 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3693 				int *llen)
3694 {
3695 	int i;
3696 
3697 	for (i = 0; i < n; i++)
3698 		add_key(sb, s[i].name, llen);
3699 }
3700 
3701 char *sort_help(const char *prefix)
3702 {
3703 	struct strbuf sb;
3704 	char *s;
3705 	int len = strlen(prefix) + INDENT;
3706 
3707 	strbuf_init(&sb, 300);
3708 	strbuf_addstr(&sb, prefix);
3709 	add_hpp_sort_string(&sb, hpp_sort_dimensions,
3710 			    ARRAY_SIZE(hpp_sort_dimensions), &len);
3711 	add_sort_string(&sb, common_sort_dimensions,
3712 			    ARRAY_SIZE(common_sort_dimensions), &len);
3713 	add_sort_string(&sb, bstack_sort_dimensions,
3714 			    ARRAY_SIZE(bstack_sort_dimensions), &len);
3715 	add_sort_string(&sb, memory_sort_dimensions,
3716 			    ARRAY_SIZE(memory_sort_dimensions), &len);
3717 	s = strbuf_detach(&sb, NULL);
3718 	strbuf_release(&sb);
3719 	return s;
3720 }
3721