xref: /linux/tools/perf/util/sort.c (revision 9052e9c95d908d6c3d7570aadc8898e1d871c8bb)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <stdlib.h>
6 #include <linux/mman.h>
7 #include <linux/time64.h>
8 #include "debug.h"
9 #include "dso.h"
10 #include "sort.h"
11 #include "hist.h"
12 #include "cacheline.h"
13 #include "comm.h"
14 #include "map.h"
15 #include "maps.h"
16 #include "symbol.h"
17 #include "map_symbol.h"
18 #include "branch.h"
19 #include "thread.h"
20 #include "evsel.h"
21 #include "evlist.h"
22 #include "srcline.h"
23 #include "strlist.h"
24 #include "strbuf.h"
25 #include <traceevent/event-parse.h>
26 #include "mem-events.h"
27 #include "annotate.h"
28 #include "event.h"
29 #include "time-utils.h"
30 #include "cgroup.h"
31 #include "machine.h"
32 #include <linux/kernel.h>
33 #include <linux/string.h>
34 
35 regex_t		parent_regex;
36 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
37 const char	*parent_pattern = default_parent_pattern;
38 const char	*default_sort_order = "comm,dso,symbol";
39 const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
40 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,p_stage_cyc";
41 const char	default_top_sort_order[] = "dso,symbol";
42 const char	default_diff_sort_order[] = "dso,symbol";
43 const char	default_tracepoint_sort_order[] = "trace";
44 const char	*sort_order;
45 const char	*field_order;
46 regex_t		ignore_callees_regex;
47 int		have_ignore_callees = 0;
48 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
49 const char	*dynamic_headers[] = {"local_ins_lat", "p_stage_cyc"};
50 const char	*arch_specific_sort_keys[] = {"p_stage_cyc"};
51 
52 /*
53  * Replaces all occurrences of a char used with the:
54  *
55  * -t, --field-separator
56  *
57  * option, that uses a special separator character and don't pad with spaces,
58  * replacing all occurrences of this separator in symbol names (and other
59  * output) with a '.' character, that thus it's the only non valid separator.
60 */
61 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
62 {
63 	int n;
64 	va_list ap;
65 
66 	va_start(ap, fmt);
67 	n = vsnprintf(bf, size, fmt, ap);
68 	if (symbol_conf.field_sep && n > 0) {
69 		char *sep = bf;
70 
71 		while (1) {
72 			sep = strchr(sep, *symbol_conf.field_sep);
73 			if (sep == NULL)
74 				break;
75 			*sep = '.';
76 		}
77 	}
78 	va_end(ap);
79 
80 	if (n >= (int)size)
81 		return size - 1;
82 	return n;
83 }
84 
85 static int64_t cmp_null(const void *l, const void *r)
86 {
87 	if (!l && !r)
88 		return 0;
89 	else if (!l)
90 		return -1;
91 	else
92 		return 1;
93 }
94 
95 /* --sort pid */
96 
97 static int64_t
98 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
99 {
100 	return right->thread->tid - left->thread->tid;
101 }
102 
103 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
104 				       size_t size, unsigned int width)
105 {
106 	const char *comm = thread__comm_str(he->thread);
107 
108 	width = max(7U, width) - 8;
109 	return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
110 			       width, width, comm ?: "");
111 }
112 
113 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
114 {
115 	const struct thread *th = arg;
116 
117 	if (type != HIST_FILTER__THREAD)
118 		return -1;
119 
120 	return th && he->thread != th;
121 }
122 
123 struct sort_entry sort_thread = {
124 	.se_header	= "    Pid:Command",
125 	.se_cmp		= sort__thread_cmp,
126 	.se_snprintf	= hist_entry__thread_snprintf,
127 	.se_filter	= hist_entry__thread_filter,
128 	.se_width_idx	= HISTC_THREAD,
129 };
130 
131 /* --sort comm */
132 
133 /*
134  * We can't use pointer comparison in functions below,
135  * because it gives different results based on pointer
136  * values, which could break some sorting assumptions.
137  */
138 static int64_t
139 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
140 {
141 	return strcmp(comm__str(right->comm), comm__str(left->comm));
142 }
143 
144 static int64_t
145 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
146 {
147 	return strcmp(comm__str(right->comm), comm__str(left->comm));
148 }
149 
150 static int64_t
151 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
152 {
153 	return strcmp(comm__str(right->comm), comm__str(left->comm));
154 }
155 
156 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
157 				     size_t size, unsigned int width)
158 {
159 	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
160 }
161 
162 struct sort_entry sort_comm = {
163 	.se_header	= "Command",
164 	.se_cmp		= sort__comm_cmp,
165 	.se_collapse	= sort__comm_collapse,
166 	.se_sort	= sort__comm_sort,
167 	.se_snprintf	= hist_entry__comm_snprintf,
168 	.se_filter	= hist_entry__thread_filter,
169 	.se_width_idx	= HISTC_COMM,
170 };
171 
172 /* --sort dso */
173 
174 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
175 {
176 	struct dso *dso_l = map_l ? map_l->dso : NULL;
177 	struct dso *dso_r = map_r ? map_r->dso : NULL;
178 	const char *dso_name_l, *dso_name_r;
179 
180 	if (!dso_l || !dso_r)
181 		return cmp_null(dso_r, dso_l);
182 
183 	if (verbose > 0) {
184 		dso_name_l = dso_l->long_name;
185 		dso_name_r = dso_r->long_name;
186 	} else {
187 		dso_name_l = dso_l->short_name;
188 		dso_name_r = dso_r->short_name;
189 	}
190 
191 	return strcmp(dso_name_l, dso_name_r);
192 }
193 
194 static int64_t
195 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
196 {
197 	return _sort__dso_cmp(right->ms.map, left->ms.map);
198 }
199 
200 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
201 				     size_t size, unsigned int width)
202 {
203 	if (map && map->dso) {
204 		const char *dso_name = verbose > 0 ? map->dso->long_name :
205 			map->dso->short_name;
206 		return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
207 	}
208 
209 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
210 }
211 
212 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
213 				    size_t size, unsigned int width)
214 {
215 	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
216 }
217 
218 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
219 {
220 	const struct dso *dso = arg;
221 
222 	if (type != HIST_FILTER__DSO)
223 		return -1;
224 
225 	return dso && (!he->ms.map || he->ms.map->dso != dso);
226 }
227 
228 struct sort_entry sort_dso = {
229 	.se_header	= "Shared Object",
230 	.se_cmp		= sort__dso_cmp,
231 	.se_snprintf	= hist_entry__dso_snprintf,
232 	.se_filter	= hist_entry__dso_filter,
233 	.se_width_idx	= HISTC_DSO,
234 };
235 
236 /* --sort symbol */
237 
238 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
239 {
240 	return (int64_t)(right_ip - left_ip);
241 }
242 
243 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
244 {
245 	if (!sym_l || !sym_r)
246 		return cmp_null(sym_l, sym_r);
247 
248 	if (sym_l == sym_r)
249 		return 0;
250 
251 	if (sym_l->inlined || sym_r->inlined) {
252 		int ret = strcmp(sym_l->name, sym_r->name);
253 
254 		if (ret)
255 			return ret;
256 		if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
257 			return 0;
258 	}
259 
260 	if (sym_l->start != sym_r->start)
261 		return (int64_t)(sym_r->start - sym_l->start);
262 
263 	return (int64_t)(sym_r->end - sym_l->end);
264 }
265 
266 static int64_t
267 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
268 {
269 	int64_t ret;
270 
271 	if (!left->ms.sym && !right->ms.sym)
272 		return _sort__addr_cmp(left->ip, right->ip);
273 
274 	/*
275 	 * comparing symbol address alone is not enough since it's a
276 	 * relative address within a dso.
277 	 */
278 	if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
279 		ret = sort__dso_cmp(left, right);
280 		if (ret != 0)
281 			return ret;
282 	}
283 
284 	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
285 }
286 
287 static int64_t
288 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
289 {
290 	if (!left->ms.sym || !right->ms.sym)
291 		return cmp_null(left->ms.sym, right->ms.sym);
292 
293 	return strcmp(right->ms.sym->name, left->ms.sym->name);
294 }
295 
296 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
297 				     u64 ip, char level, char *bf, size_t size,
298 				     unsigned int width)
299 {
300 	struct symbol *sym = ms->sym;
301 	struct map *map = ms->map;
302 	size_t ret = 0;
303 
304 	if (verbose > 0) {
305 		char o = map ? dso__symtab_origin(map->dso) : '!';
306 		u64 rip = ip;
307 
308 		if (map && map->dso && map->dso->kernel
309 		    && map->dso->adjust_symbols)
310 			rip = map->unmap_ip(map, ip);
311 
312 		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
313 				       BITS_PER_LONG / 4 + 2, rip, o);
314 	}
315 
316 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
317 	if (sym && map) {
318 		if (sym->type == STT_OBJECT) {
319 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
320 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
321 					ip - map->unmap_ip(map, sym->start));
322 		} else {
323 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
324 					       width - ret,
325 					       sym->name);
326 			if (sym->inlined)
327 				ret += repsep_snprintf(bf + ret, size - ret,
328 						       " (inlined)");
329 		}
330 	} else {
331 		size_t len = BITS_PER_LONG / 4;
332 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
333 				       len, ip);
334 	}
335 
336 	return ret;
337 }
338 
339 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
340 {
341 	return _hist_entry__sym_snprintf(&he->ms, he->ip,
342 					 he->level, bf, size, width);
343 }
344 
345 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
346 {
347 	const char *sym = arg;
348 
349 	if (type != HIST_FILTER__SYMBOL)
350 		return -1;
351 
352 	return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
353 }
354 
355 struct sort_entry sort_sym = {
356 	.se_header	= "Symbol",
357 	.se_cmp		= sort__sym_cmp,
358 	.se_sort	= sort__sym_sort,
359 	.se_snprintf	= hist_entry__sym_snprintf,
360 	.se_filter	= hist_entry__sym_filter,
361 	.se_width_idx	= HISTC_SYMBOL,
362 };
363 
364 /* --sort srcline */
365 
366 char *hist_entry__srcline(struct hist_entry *he)
367 {
368 	return map__srcline(he->ms.map, he->ip, he->ms.sym);
369 }
370 
371 static int64_t
372 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
373 {
374 	if (!left->srcline)
375 		left->srcline = hist_entry__srcline(left);
376 	if (!right->srcline)
377 		right->srcline = hist_entry__srcline(right);
378 
379 	return strcmp(right->srcline, left->srcline);
380 }
381 
382 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
383 					size_t size, unsigned int width)
384 {
385 	if (!he->srcline)
386 		he->srcline = hist_entry__srcline(he);
387 
388 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
389 }
390 
391 struct sort_entry sort_srcline = {
392 	.se_header	= "Source:Line",
393 	.se_cmp		= sort__srcline_cmp,
394 	.se_snprintf	= hist_entry__srcline_snprintf,
395 	.se_width_idx	= HISTC_SRCLINE,
396 };
397 
398 /* --sort srcline_from */
399 
400 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
401 {
402 	return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
403 }
404 
405 static int64_t
406 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
407 {
408 	if (!left->branch_info->srcline_from)
409 		left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
410 
411 	if (!right->branch_info->srcline_from)
412 		right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
413 
414 	return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
415 }
416 
417 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
418 					size_t size, unsigned int width)
419 {
420 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
421 }
422 
423 struct sort_entry sort_srcline_from = {
424 	.se_header	= "From Source:Line",
425 	.se_cmp		= sort__srcline_from_cmp,
426 	.se_snprintf	= hist_entry__srcline_from_snprintf,
427 	.se_width_idx	= HISTC_SRCLINE_FROM,
428 };
429 
430 /* --sort srcline_to */
431 
432 static int64_t
433 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
434 {
435 	if (!left->branch_info->srcline_to)
436 		left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
437 
438 	if (!right->branch_info->srcline_to)
439 		right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
440 
441 	return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
442 }
443 
444 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
445 					size_t size, unsigned int width)
446 {
447 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
448 }
449 
450 struct sort_entry sort_srcline_to = {
451 	.se_header	= "To Source:Line",
452 	.se_cmp		= sort__srcline_to_cmp,
453 	.se_snprintf	= hist_entry__srcline_to_snprintf,
454 	.se_width_idx	= HISTC_SRCLINE_TO,
455 };
456 
457 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
458 					size_t size, unsigned int width)
459 {
460 
461 	struct symbol *sym = he->ms.sym;
462 	struct annotation *notes;
463 	double ipc = 0.0, coverage = 0.0;
464 	char tmp[64];
465 
466 	if (!sym)
467 		return repsep_snprintf(bf, size, "%-*s", width, "-");
468 
469 	notes = symbol__annotation(sym);
470 
471 	if (notes->hit_cycles)
472 		ipc = notes->hit_insn / ((double)notes->hit_cycles);
473 
474 	if (notes->total_insn) {
475 		coverage = notes->cover_insn * 100.0 /
476 			((double)notes->total_insn);
477 	}
478 
479 	snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
480 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
481 }
482 
483 struct sort_entry sort_sym_ipc = {
484 	.se_header	= "IPC   [IPC Coverage]",
485 	.se_cmp		= sort__sym_cmp,
486 	.se_snprintf	= hist_entry__sym_ipc_snprintf,
487 	.se_width_idx	= HISTC_SYMBOL_IPC,
488 };
489 
490 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
491 					     __maybe_unused,
492 					     char *bf, size_t size,
493 					     unsigned int width)
494 {
495 	char tmp[64];
496 
497 	snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
498 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
499 }
500 
501 struct sort_entry sort_sym_ipc_null = {
502 	.se_header	= "IPC   [IPC Coverage]",
503 	.se_cmp		= sort__sym_cmp,
504 	.se_snprintf	= hist_entry__sym_ipc_null_snprintf,
505 	.se_width_idx	= HISTC_SYMBOL_IPC,
506 };
507 
508 /* --sort srcfile */
509 
510 static char no_srcfile[1];
511 
512 static char *hist_entry__get_srcfile(struct hist_entry *e)
513 {
514 	char *sf, *p;
515 	struct map *map = e->ms.map;
516 
517 	if (!map)
518 		return no_srcfile;
519 
520 	sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
521 			 e->ms.sym, false, true, true, e->ip);
522 	if (!strcmp(sf, SRCLINE_UNKNOWN))
523 		return no_srcfile;
524 	p = strchr(sf, ':');
525 	if (p && *sf) {
526 		*p = 0;
527 		return sf;
528 	}
529 	free(sf);
530 	return no_srcfile;
531 }
532 
533 static int64_t
534 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
535 {
536 	if (!left->srcfile)
537 		left->srcfile = hist_entry__get_srcfile(left);
538 	if (!right->srcfile)
539 		right->srcfile = hist_entry__get_srcfile(right);
540 
541 	return strcmp(right->srcfile, left->srcfile);
542 }
543 
544 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
545 					size_t size, unsigned int width)
546 {
547 	if (!he->srcfile)
548 		he->srcfile = hist_entry__get_srcfile(he);
549 
550 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
551 }
552 
553 struct sort_entry sort_srcfile = {
554 	.se_header	= "Source File",
555 	.se_cmp		= sort__srcfile_cmp,
556 	.se_snprintf	= hist_entry__srcfile_snprintf,
557 	.se_width_idx	= HISTC_SRCFILE,
558 };
559 
560 /* --sort parent */
561 
562 static int64_t
563 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
564 {
565 	struct symbol *sym_l = left->parent;
566 	struct symbol *sym_r = right->parent;
567 
568 	if (!sym_l || !sym_r)
569 		return cmp_null(sym_l, sym_r);
570 
571 	return strcmp(sym_r->name, sym_l->name);
572 }
573 
574 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
575 				       size_t size, unsigned int width)
576 {
577 	return repsep_snprintf(bf, size, "%-*.*s", width, width,
578 			      he->parent ? he->parent->name : "[other]");
579 }
580 
581 struct sort_entry sort_parent = {
582 	.se_header	= "Parent symbol",
583 	.se_cmp		= sort__parent_cmp,
584 	.se_snprintf	= hist_entry__parent_snprintf,
585 	.se_width_idx	= HISTC_PARENT,
586 };
587 
588 /* --sort cpu */
589 
590 static int64_t
591 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
592 {
593 	return right->cpu - left->cpu;
594 }
595 
596 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
597 				    size_t size, unsigned int width)
598 {
599 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
600 }
601 
602 struct sort_entry sort_cpu = {
603 	.se_header      = "CPU",
604 	.se_cmp	        = sort__cpu_cmp,
605 	.se_snprintf    = hist_entry__cpu_snprintf,
606 	.se_width_idx	= HISTC_CPU,
607 };
608 
609 /* --sort cgroup_id */
610 
611 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
612 {
613 	return (int64_t)(right_dev - left_dev);
614 }
615 
616 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
617 {
618 	return (int64_t)(right_ino - left_ino);
619 }
620 
621 static int64_t
622 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
623 {
624 	int64_t ret;
625 
626 	ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
627 	if (ret != 0)
628 		return ret;
629 
630 	return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
631 				       left->cgroup_id.ino);
632 }
633 
634 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
635 					  char *bf, size_t size,
636 					  unsigned int width __maybe_unused)
637 {
638 	return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
639 			       he->cgroup_id.ino);
640 }
641 
642 struct sort_entry sort_cgroup_id = {
643 	.se_header      = "cgroup id (dev/inode)",
644 	.se_cmp	        = sort__cgroup_id_cmp,
645 	.se_snprintf    = hist_entry__cgroup_id_snprintf,
646 	.se_width_idx	= HISTC_CGROUP_ID,
647 };
648 
649 /* --sort cgroup */
650 
651 static int64_t
652 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
653 {
654 	return right->cgroup - left->cgroup;
655 }
656 
657 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
658 				       char *bf, size_t size,
659 				       unsigned int width __maybe_unused)
660 {
661 	const char *cgrp_name = "N/A";
662 
663 	if (he->cgroup) {
664 		struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env,
665 						   he->cgroup);
666 		if (cgrp != NULL)
667 			cgrp_name = cgrp->name;
668 		else
669 			cgrp_name = "unknown";
670 	}
671 
672 	return repsep_snprintf(bf, size, "%s", cgrp_name);
673 }
674 
675 struct sort_entry sort_cgroup = {
676 	.se_header      = "Cgroup",
677 	.se_cmp	        = sort__cgroup_cmp,
678 	.se_snprintf    = hist_entry__cgroup_snprintf,
679 	.se_width_idx	= HISTC_CGROUP,
680 };
681 
682 /* --sort socket */
683 
684 static int64_t
685 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
686 {
687 	return right->socket - left->socket;
688 }
689 
690 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
691 				    size_t size, unsigned int width)
692 {
693 	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
694 }
695 
696 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
697 {
698 	int sk = *(const int *)arg;
699 
700 	if (type != HIST_FILTER__SOCKET)
701 		return -1;
702 
703 	return sk >= 0 && he->socket != sk;
704 }
705 
706 struct sort_entry sort_socket = {
707 	.se_header      = "Socket",
708 	.se_cmp	        = sort__socket_cmp,
709 	.se_snprintf    = hist_entry__socket_snprintf,
710 	.se_filter      = hist_entry__socket_filter,
711 	.se_width_idx	= HISTC_SOCKET,
712 };
713 
714 /* --sort time */
715 
716 static int64_t
717 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
718 {
719 	return right->time - left->time;
720 }
721 
722 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
723 				    size_t size, unsigned int width)
724 {
725 	char he_time[32];
726 
727 	if (symbol_conf.nanosecs)
728 		timestamp__scnprintf_nsec(he->time, he_time,
729 					  sizeof(he_time));
730 	else
731 		timestamp__scnprintf_usec(he->time, he_time,
732 					  sizeof(he_time));
733 
734 	return repsep_snprintf(bf, size, "%-.*s", width, he_time);
735 }
736 
737 struct sort_entry sort_time = {
738 	.se_header      = "Time",
739 	.se_cmp	        = sort__time_cmp,
740 	.se_snprintf    = hist_entry__time_snprintf,
741 	.se_width_idx	= HISTC_TIME,
742 };
743 
744 /* --sort trace */
745 
746 static char *get_trace_output(struct hist_entry *he)
747 {
748 	struct trace_seq seq;
749 	struct evsel *evsel;
750 	struct tep_record rec = {
751 		.data = he->raw_data,
752 		.size = he->raw_size,
753 	};
754 
755 	evsel = hists_to_evsel(he->hists);
756 
757 	trace_seq_init(&seq);
758 	if (symbol_conf.raw_trace) {
759 		tep_print_fields(&seq, he->raw_data, he->raw_size,
760 				 evsel->tp_format);
761 	} else {
762 		tep_print_event(evsel->tp_format->tep,
763 				&seq, &rec, "%s", TEP_PRINT_INFO);
764 	}
765 	/*
766 	 * Trim the buffer, it starts at 4KB and we're not going to
767 	 * add anything more to this buffer.
768 	 */
769 	return realloc(seq.buffer, seq.len + 1);
770 }
771 
772 static int64_t
773 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
774 {
775 	struct evsel *evsel;
776 
777 	evsel = hists_to_evsel(left->hists);
778 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
779 		return 0;
780 
781 	if (left->trace_output == NULL)
782 		left->trace_output = get_trace_output(left);
783 	if (right->trace_output == NULL)
784 		right->trace_output = get_trace_output(right);
785 
786 	return strcmp(right->trace_output, left->trace_output);
787 }
788 
789 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
790 				    size_t size, unsigned int width)
791 {
792 	struct evsel *evsel;
793 
794 	evsel = hists_to_evsel(he->hists);
795 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
796 		return scnprintf(bf, size, "%-.*s", width, "N/A");
797 
798 	if (he->trace_output == NULL)
799 		he->trace_output = get_trace_output(he);
800 	return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
801 }
802 
803 struct sort_entry sort_trace = {
804 	.se_header      = "Trace output",
805 	.se_cmp	        = sort__trace_cmp,
806 	.se_snprintf    = hist_entry__trace_snprintf,
807 	.se_width_idx	= HISTC_TRACE,
808 };
809 
810 /* sort keys for branch stacks */
811 
812 static int64_t
813 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
814 {
815 	if (!left->branch_info || !right->branch_info)
816 		return cmp_null(left->branch_info, right->branch_info);
817 
818 	return _sort__dso_cmp(left->branch_info->from.ms.map,
819 			      right->branch_info->from.ms.map);
820 }
821 
822 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
823 				    size_t size, unsigned int width)
824 {
825 	if (he->branch_info)
826 		return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
827 						 bf, size, width);
828 	else
829 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
830 }
831 
832 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
833 				       const void *arg)
834 {
835 	const struct dso *dso = arg;
836 
837 	if (type != HIST_FILTER__DSO)
838 		return -1;
839 
840 	return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
841 		       he->branch_info->from.ms.map->dso != dso);
842 }
843 
844 static int64_t
845 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
846 {
847 	if (!left->branch_info || !right->branch_info)
848 		return cmp_null(left->branch_info, right->branch_info);
849 
850 	return _sort__dso_cmp(left->branch_info->to.ms.map,
851 			      right->branch_info->to.ms.map);
852 }
853 
854 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
855 				       size_t size, unsigned int width)
856 {
857 	if (he->branch_info)
858 		return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
859 						 bf, size, width);
860 	else
861 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
862 }
863 
864 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
865 				     const void *arg)
866 {
867 	const struct dso *dso = arg;
868 
869 	if (type != HIST_FILTER__DSO)
870 		return -1;
871 
872 	return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
873 		       he->branch_info->to.ms.map->dso != dso);
874 }
875 
876 static int64_t
877 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
878 {
879 	struct addr_map_symbol *from_l = &left->branch_info->from;
880 	struct addr_map_symbol *from_r = &right->branch_info->from;
881 
882 	if (!left->branch_info || !right->branch_info)
883 		return cmp_null(left->branch_info, right->branch_info);
884 
885 	from_l = &left->branch_info->from;
886 	from_r = &right->branch_info->from;
887 
888 	if (!from_l->ms.sym && !from_r->ms.sym)
889 		return _sort__addr_cmp(from_l->addr, from_r->addr);
890 
891 	return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
892 }
893 
894 static int64_t
895 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
896 {
897 	struct addr_map_symbol *to_l, *to_r;
898 
899 	if (!left->branch_info || !right->branch_info)
900 		return cmp_null(left->branch_info, right->branch_info);
901 
902 	to_l = &left->branch_info->to;
903 	to_r = &right->branch_info->to;
904 
905 	if (!to_l->ms.sym && !to_r->ms.sym)
906 		return _sort__addr_cmp(to_l->addr, to_r->addr);
907 
908 	return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
909 }
910 
911 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
912 					 size_t size, unsigned int width)
913 {
914 	if (he->branch_info) {
915 		struct addr_map_symbol *from = &he->branch_info->from;
916 
917 		return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
918 						 he->level, bf, size, width);
919 	}
920 
921 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
922 }
923 
924 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
925 				       size_t size, unsigned int width)
926 {
927 	if (he->branch_info) {
928 		struct addr_map_symbol *to = &he->branch_info->to;
929 
930 		return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
931 						 he->level, bf, size, width);
932 	}
933 
934 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
935 }
936 
937 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
938 				       const void *arg)
939 {
940 	const char *sym = arg;
941 
942 	if (type != HIST_FILTER__SYMBOL)
943 		return -1;
944 
945 	return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
946 			strstr(he->branch_info->from.ms.sym->name, sym));
947 }
948 
949 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
950 				       const void *arg)
951 {
952 	const char *sym = arg;
953 
954 	if (type != HIST_FILTER__SYMBOL)
955 		return -1;
956 
957 	return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
958 		        strstr(he->branch_info->to.ms.sym->name, sym));
959 }
960 
961 struct sort_entry sort_dso_from = {
962 	.se_header	= "Source Shared Object",
963 	.se_cmp		= sort__dso_from_cmp,
964 	.se_snprintf	= hist_entry__dso_from_snprintf,
965 	.se_filter	= hist_entry__dso_from_filter,
966 	.se_width_idx	= HISTC_DSO_FROM,
967 };
968 
969 struct sort_entry sort_dso_to = {
970 	.se_header	= "Target Shared Object",
971 	.se_cmp		= sort__dso_to_cmp,
972 	.se_snprintf	= hist_entry__dso_to_snprintf,
973 	.se_filter	= hist_entry__dso_to_filter,
974 	.se_width_idx	= HISTC_DSO_TO,
975 };
976 
977 struct sort_entry sort_sym_from = {
978 	.se_header	= "Source Symbol",
979 	.se_cmp		= sort__sym_from_cmp,
980 	.se_snprintf	= hist_entry__sym_from_snprintf,
981 	.se_filter	= hist_entry__sym_from_filter,
982 	.se_width_idx	= HISTC_SYMBOL_FROM,
983 };
984 
985 struct sort_entry sort_sym_to = {
986 	.se_header	= "Target Symbol",
987 	.se_cmp		= sort__sym_to_cmp,
988 	.se_snprintf	= hist_entry__sym_to_snprintf,
989 	.se_filter	= hist_entry__sym_to_filter,
990 	.se_width_idx	= HISTC_SYMBOL_TO,
991 };
992 
993 static int64_t
994 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
995 {
996 	unsigned char mp, p;
997 
998 	if (!left->branch_info || !right->branch_info)
999 		return cmp_null(left->branch_info, right->branch_info);
1000 
1001 	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1002 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1003 	return mp || p;
1004 }
1005 
1006 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1007 				    size_t size, unsigned int width){
1008 	static const char *out = "N/A";
1009 
1010 	if (he->branch_info) {
1011 		if (he->branch_info->flags.predicted)
1012 			out = "N";
1013 		else if (he->branch_info->flags.mispred)
1014 			out = "Y";
1015 	}
1016 
1017 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1018 }
1019 
1020 static int64_t
1021 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1022 {
1023 	if (!left->branch_info || !right->branch_info)
1024 		return cmp_null(left->branch_info, right->branch_info);
1025 
1026 	return left->branch_info->flags.cycles -
1027 		right->branch_info->flags.cycles;
1028 }
1029 
1030 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1031 				    size_t size, unsigned int width)
1032 {
1033 	if (!he->branch_info)
1034 		return scnprintf(bf, size, "%-.*s", width, "N/A");
1035 	if (he->branch_info->flags.cycles == 0)
1036 		return repsep_snprintf(bf, size, "%-*s", width, "-");
1037 	return repsep_snprintf(bf, size, "%-*hd", width,
1038 			       he->branch_info->flags.cycles);
1039 }
1040 
1041 struct sort_entry sort_cycles = {
1042 	.se_header	= "Basic Block Cycles",
1043 	.se_cmp		= sort__cycles_cmp,
1044 	.se_snprintf	= hist_entry__cycles_snprintf,
1045 	.se_width_idx	= HISTC_CYCLES,
1046 };
1047 
1048 /* --sort daddr_sym */
1049 int64_t
1050 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1051 {
1052 	uint64_t l = 0, r = 0;
1053 
1054 	if (left->mem_info)
1055 		l = left->mem_info->daddr.addr;
1056 	if (right->mem_info)
1057 		r = right->mem_info->daddr.addr;
1058 
1059 	return (int64_t)(r - l);
1060 }
1061 
1062 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1063 				    size_t size, unsigned int width)
1064 {
1065 	uint64_t addr = 0;
1066 	struct map_symbol *ms = NULL;
1067 
1068 	if (he->mem_info) {
1069 		addr = he->mem_info->daddr.addr;
1070 		ms = &he->mem_info->daddr.ms;
1071 	}
1072 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1073 }
1074 
1075 int64_t
1076 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1077 {
1078 	uint64_t l = 0, r = 0;
1079 
1080 	if (left->mem_info)
1081 		l = left->mem_info->iaddr.addr;
1082 	if (right->mem_info)
1083 		r = right->mem_info->iaddr.addr;
1084 
1085 	return (int64_t)(r - l);
1086 }
1087 
1088 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1089 				    size_t size, unsigned int width)
1090 {
1091 	uint64_t addr = 0;
1092 	struct map_symbol *ms = NULL;
1093 
1094 	if (he->mem_info) {
1095 		addr = he->mem_info->iaddr.addr;
1096 		ms   = &he->mem_info->iaddr.ms;
1097 	}
1098 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1099 }
1100 
1101 static int64_t
1102 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1103 {
1104 	struct map *map_l = NULL;
1105 	struct map *map_r = NULL;
1106 
1107 	if (left->mem_info)
1108 		map_l = left->mem_info->daddr.ms.map;
1109 	if (right->mem_info)
1110 		map_r = right->mem_info->daddr.ms.map;
1111 
1112 	return _sort__dso_cmp(map_l, map_r);
1113 }
1114 
1115 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1116 				    size_t size, unsigned int width)
1117 {
1118 	struct map *map = NULL;
1119 
1120 	if (he->mem_info)
1121 		map = he->mem_info->daddr.ms.map;
1122 
1123 	return _hist_entry__dso_snprintf(map, bf, size, width);
1124 }
1125 
1126 static int64_t
1127 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1128 {
1129 	union perf_mem_data_src data_src_l;
1130 	union perf_mem_data_src data_src_r;
1131 
1132 	if (left->mem_info)
1133 		data_src_l = left->mem_info->data_src;
1134 	else
1135 		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1136 
1137 	if (right->mem_info)
1138 		data_src_r = right->mem_info->data_src;
1139 	else
1140 		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1141 
1142 	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1143 }
1144 
1145 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1146 				    size_t size, unsigned int width)
1147 {
1148 	char out[10];
1149 
1150 	perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1151 	return repsep_snprintf(bf, size, "%.*s", width, out);
1152 }
1153 
1154 static int64_t
1155 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1156 {
1157 	union perf_mem_data_src data_src_l;
1158 	union perf_mem_data_src data_src_r;
1159 
1160 	if (left->mem_info)
1161 		data_src_l = left->mem_info->data_src;
1162 	else
1163 		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1164 
1165 	if (right->mem_info)
1166 		data_src_r = right->mem_info->data_src;
1167 	else
1168 		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1169 
1170 	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1171 }
1172 
1173 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1174 				    size_t size, unsigned int width)
1175 {
1176 	char out[64];
1177 
1178 	perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1179 	return repsep_snprintf(bf, size, "%-*s", width, out);
1180 }
1181 
1182 static int64_t
1183 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1184 {
1185 	union perf_mem_data_src data_src_l;
1186 	union perf_mem_data_src data_src_r;
1187 
1188 	if (left->mem_info)
1189 		data_src_l = left->mem_info->data_src;
1190 	else
1191 		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1192 
1193 	if (right->mem_info)
1194 		data_src_r = right->mem_info->data_src;
1195 	else
1196 		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1197 
1198 	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1199 }
1200 
1201 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1202 				    size_t size, unsigned int width)
1203 {
1204 	char out[64];
1205 
1206 	perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1207 	return repsep_snprintf(bf, size, "%-*s", width, out);
1208 }
1209 
1210 static int64_t
1211 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1212 {
1213 	union perf_mem_data_src data_src_l;
1214 	union perf_mem_data_src data_src_r;
1215 
1216 	if (left->mem_info)
1217 		data_src_l = left->mem_info->data_src;
1218 	else
1219 		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1220 
1221 	if (right->mem_info)
1222 		data_src_r = right->mem_info->data_src;
1223 	else
1224 		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1225 
1226 	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1227 }
1228 
1229 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1230 				    size_t size, unsigned int width)
1231 {
1232 	char out[64];
1233 
1234 	perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1235 	return repsep_snprintf(bf, size, "%-*s", width, out);
1236 }
1237 
1238 int64_t
1239 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1240 {
1241 	u64 l, r;
1242 	struct map *l_map, *r_map;
1243 	int rc;
1244 
1245 	if (!left->mem_info)  return -1;
1246 	if (!right->mem_info) return 1;
1247 
1248 	/* group event types together */
1249 	if (left->cpumode > right->cpumode) return -1;
1250 	if (left->cpumode < right->cpumode) return 1;
1251 
1252 	l_map = left->mem_info->daddr.ms.map;
1253 	r_map = right->mem_info->daddr.ms.map;
1254 
1255 	/* if both are NULL, jump to sort on al_addr instead */
1256 	if (!l_map && !r_map)
1257 		goto addr;
1258 
1259 	if (!l_map) return -1;
1260 	if (!r_map) return 1;
1261 
1262 	rc = dso__cmp_id(l_map->dso, r_map->dso);
1263 	if (rc)
1264 		return rc;
1265 	/*
1266 	 * Addresses with no major/minor numbers are assumed to be
1267 	 * anonymous in userspace.  Sort those on pid then address.
1268 	 *
1269 	 * The kernel and non-zero major/minor mapped areas are
1270 	 * assumed to be unity mapped.  Sort those on address.
1271 	 */
1272 
1273 	if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1274 	    (!(l_map->flags & MAP_SHARED)) &&
1275 	    !l_map->dso->id.maj && !l_map->dso->id.min &&
1276 	    !l_map->dso->id.ino && !l_map->dso->id.ino_generation) {
1277 		/* userspace anonymous */
1278 
1279 		if (left->thread->pid_ > right->thread->pid_) return -1;
1280 		if (left->thread->pid_ < right->thread->pid_) return 1;
1281 	}
1282 
1283 addr:
1284 	/* al_addr does all the right addr - start + offset calculations */
1285 	l = cl_address(left->mem_info->daddr.al_addr);
1286 	r = cl_address(right->mem_info->daddr.al_addr);
1287 
1288 	if (l > r) return -1;
1289 	if (l < r) return 1;
1290 
1291 	return 0;
1292 }
1293 
1294 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1295 					  size_t size, unsigned int width)
1296 {
1297 
1298 	uint64_t addr = 0;
1299 	struct map_symbol *ms = NULL;
1300 	char level = he->level;
1301 
1302 	if (he->mem_info) {
1303 		struct map *map = he->mem_info->daddr.ms.map;
1304 
1305 		addr = cl_address(he->mem_info->daddr.al_addr);
1306 		ms = &he->mem_info->daddr.ms;
1307 
1308 		/* print [s] for shared data mmaps */
1309 		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1310 		     map && !(map->prot & PROT_EXEC) &&
1311 		    (map->flags & MAP_SHARED) &&
1312 		    (map->dso->id.maj || map->dso->id.min ||
1313 		     map->dso->id.ino || map->dso->id.ino_generation))
1314 			level = 's';
1315 		else if (!map)
1316 			level = 'X';
1317 	}
1318 	return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1319 }
1320 
1321 struct sort_entry sort_mispredict = {
1322 	.se_header	= "Branch Mispredicted",
1323 	.se_cmp		= sort__mispredict_cmp,
1324 	.se_snprintf	= hist_entry__mispredict_snprintf,
1325 	.se_width_idx	= HISTC_MISPREDICT,
1326 };
1327 
1328 static u64 he_weight(struct hist_entry *he)
1329 {
1330 	return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1331 }
1332 
1333 static int64_t
1334 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1335 {
1336 	return he_weight(left) - he_weight(right);
1337 }
1338 
1339 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1340 				    size_t size, unsigned int width)
1341 {
1342 	return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1343 }
1344 
1345 struct sort_entry sort_local_weight = {
1346 	.se_header	= "Local Weight",
1347 	.se_cmp		= sort__local_weight_cmp,
1348 	.se_snprintf	= hist_entry__local_weight_snprintf,
1349 	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1350 };
1351 
1352 static int64_t
1353 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1354 {
1355 	return left->stat.weight - right->stat.weight;
1356 }
1357 
1358 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1359 					      size_t size, unsigned int width)
1360 {
1361 	return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1362 }
1363 
1364 struct sort_entry sort_global_weight = {
1365 	.se_header	= "Weight",
1366 	.se_cmp		= sort__global_weight_cmp,
1367 	.se_snprintf	= hist_entry__global_weight_snprintf,
1368 	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1369 };
1370 
1371 static u64 he_ins_lat(struct hist_entry *he)
1372 {
1373 		return he->stat.nr_events ? he->stat.ins_lat / he->stat.nr_events : 0;
1374 }
1375 
1376 static int64_t
1377 sort__local_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1378 {
1379 		return he_ins_lat(left) - he_ins_lat(right);
1380 }
1381 
1382 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1383 					      size_t size, unsigned int width)
1384 {
1385 		return repsep_snprintf(bf, size, "%-*u", width, he_ins_lat(he));
1386 }
1387 
1388 struct sort_entry sort_local_ins_lat = {
1389 	.se_header	= "Local INSTR Latency",
1390 	.se_cmp		= sort__local_ins_lat_cmp,
1391 	.se_snprintf	= hist_entry__local_ins_lat_snprintf,
1392 	.se_width_idx	= HISTC_LOCAL_INS_LAT,
1393 };
1394 
1395 static int64_t
1396 sort__global_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1397 {
1398 		return left->stat.ins_lat - right->stat.ins_lat;
1399 }
1400 
1401 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1402 					       size_t size, unsigned int width)
1403 {
1404 		return repsep_snprintf(bf, size, "%-*u", width, he->stat.ins_lat);
1405 }
1406 
1407 struct sort_entry sort_global_ins_lat = {
1408 	.se_header	= "INSTR Latency",
1409 	.se_cmp		= sort__global_ins_lat_cmp,
1410 	.se_snprintf	= hist_entry__global_ins_lat_snprintf,
1411 	.se_width_idx	= HISTC_GLOBAL_INS_LAT,
1412 };
1413 
1414 static int64_t
1415 sort__global_p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1416 {
1417 	return left->stat.p_stage_cyc - right->stat.p_stage_cyc;
1418 }
1419 
1420 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1421 					size_t size, unsigned int width)
1422 {
1423 	return repsep_snprintf(bf, size, "%-*u", width, he->stat.p_stage_cyc);
1424 }
1425 
1426 struct sort_entry sort_p_stage_cyc = {
1427 	.se_header      = "Pipeline Stage Cycle",
1428 	.se_cmp         = sort__global_p_stage_cyc_cmp,
1429 	.se_snprintf	= hist_entry__p_stage_cyc_snprintf,
1430 	.se_width_idx	= HISTC_P_STAGE_CYC,
1431 };
1432 
1433 struct sort_entry sort_mem_daddr_sym = {
1434 	.se_header	= "Data Symbol",
1435 	.se_cmp		= sort__daddr_cmp,
1436 	.se_snprintf	= hist_entry__daddr_snprintf,
1437 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1438 };
1439 
1440 struct sort_entry sort_mem_iaddr_sym = {
1441 	.se_header	= "Code Symbol",
1442 	.se_cmp		= sort__iaddr_cmp,
1443 	.se_snprintf	= hist_entry__iaddr_snprintf,
1444 	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
1445 };
1446 
1447 struct sort_entry sort_mem_daddr_dso = {
1448 	.se_header	= "Data Object",
1449 	.se_cmp		= sort__dso_daddr_cmp,
1450 	.se_snprintf	= hist_entry__dso_daddr_snprintf,
1451 	.se_width_idx	= HISTC_MEM_DADDR_DSO,
1452 };
1453 
1454 struct sort_entry sort_mem_locked = {
1455 	.se_header	= "Locked",
1456 	.se_cmp		= sort__locked_cmp,
1457 	.se_snprintf	= hist_entry__locked_snprintf,
1458 	.se_width_idx	= HISTC_MEM_LOCKED,
1459 };
1460 
1461 struct sort_entry sort_mem_tlb = {
1462 	.se_header	= "TLB access",
1463 	.se_cmp		= sort__tlb_cmp,
1464 	.se_snprintf	= hist_entry__tlb_snprintf,
1465 	.se_width_idx	= HISTC_MEM_TLB,
1466 };
1467 
1468 struct sort_entry sort_mem_lvl = {
1469 	.se_header	= "Memory access",
1470 	.se_cmp		= sort__lvl_cmp,
1471 	.se_snprintf	= hist_entry__lvl_snprintf,
1472 	.se_width_idx	= HISTC_MEM_LVL,
1473 };
1474 
1475 struct sort_entry sort_mem_snoop = {
1476 	.se_header	= "Snoop",
1477 	.se_cmp		= sort__snoop_cmp,
1478 	.se_snprintf	= hist_entry__snoop_snprintf,
1479 	.se_width_idx	= HISTC_MEM_SNOOP,
1480 };
1481 
1482 struct sort_entry sort_mem_dcacheline = {
1483 	.se_header	= "Data Cacheline",
1484 	.se_cmp		= sort__dcacheline_cmp,
1485 	.se_snprintf	= hist_entry__dcacheline_snprintf,
1486 	.se_width_idx	= HISTC_MEM_DCACHELINE,
1487 };
1488 
1489 static int64_t
1490 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1491 {
1492 	union perf_mem_data_src data_src_l;
1493 	union perf_mem_data_src data_src_r;
1494 
1495 	if (left->mem_info)
1496 		data_src_l = left->mem_info->data_src;
1497 	else
1498 		data_src_l.mem_blk = PERF_MEM_BLK_NA;
1499 
1500 	if (right->mem_info)
1501 		data_src_r = right->mem_info->data_src;
1502 	else
1503 		data_src_r.mem_blk = PERF_MEM_BLK_NA;
1504 
1505 	return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1506 }
1507 
1508 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1509 					size_t size, unsigned int width)
1510 {
1511 	char out[16];
1512 
1513 	perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1514 	return repsep_snprintf(bf, size, "%.*s", width, out);
1515 }
1516 
1517 struct sort_entry sort_mem_blocked = {
1518 	.se_header	= "Blocked",
1519 	.se_cmp		= sort__blocked_cmp,
1520 	.se_snprintf	= hist_entry__blocked_snprintf,
1521 	.se_width_idx	= HISTC_MEM_BLOCKED,
1522 };
1523 
1524 static int64_t
1525 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1526 {
1527 	uint64_t l = 0, r = 0;
1528 
1529 	if (left->mem_info)
1530 		l = left->mem_info->daddr.phys_addr;
1531 	if (right->mem_info)
1532 		r = right->mem_info->daddr.phys_addr;
1533 
1534 	return (int64_t)(r - l);
1535 }
1536 
1537 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1538 					   size_t size, unsigned int width)
1539 {
1540 	uint64_t addr = 0;
1541 	size_t ret = 0;
1542 	size_t len = BITS_PER_LONG / 4;
1543 
1544 	addr = he->mem_info->daddr.phys_addr;
1545 
1546 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1547 
1548 	ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1549 
1550 	ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1551 
1552 	if (ret > width)
1553 		bf[width] = '\0';
1554 
1555 	return width;
1556 }
1557 
1558 struct sort_entry sort_mem_phys_daddr = {
1559 	.se_header	= "Data Physical Address",
1560 	.se_cmp		= sort__phys_daddr_cmp,
1561 	.se_snprintf	= hist_entry__phys_daddr_snprintf,
1562 	.se_width_idx	= HISTC_MEM_PHYS_DADDR,
1563 };
1564 
1565 static int64_t
1566 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1567 {
1568 	uint64_t l = 0, r = 0;
1569 
1570 	if (left->mem_info)
1571 		l = left->mem_info->daddr.data_page_size;
1572 	if (right->mem_info)
1573 		r = right->mem_info->daddr.data_page_size;
1574 
1575 	return (int64_t)(r - l);
1576 }
1577 
1578 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1579 					  size_t size, unsigned int width)
1580 {
1581 	char str[PAGE_SIZE_NAME_LEN];
1582 
1583 	return repsep_snprintf(bf, size, "%-*s", width,
1584 			       get_page_size_name(he->mem_info->daddr.data_page_size, str));
1585 }
1586 
1587 struct sort_entry sort_mem_data_page_size = {
1588 	.se_header	= "Data Page Size",
1589 	.se_cmp		= sort__data_page_size_cmp,
1590 	.se_snprintf	= hist_entry__data_page_size_snprintf,
1591 	.se_width_idx	= HISTC_MEM_DATA_PAGE_SIZE,
1592 };
1593 
1594 static int64_t
1595 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1596 {
1597 	uint64_t l = left->code_page_size;
1598 	uint64_t r = right->code_page_size;
1599 
1600 	return (int64_t)(r - l);
1601 }
1602 
1603 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
1604 					  size_t size, unsigned int width)
1605 {
1606 	char str[PAGE_SIZE_NAME_LEN];
1607 
1608 	return repsep_snprintf(bf, size, "%-*s", width,
1609 			       get_page_size_name(he->code_page_size, str));
1610 }
1611 
1612 struct sort_entry sort_code_page_size = {
1613 	.se_header	= "Code Page Size",
1614 	.se_cmp		= sort__code_page_size_cmp,
1615 	.se_snprintf	= hist_entry__code_page_size_snprintf,
1616 	.se_width_idx	= HISTC_CODE_PAGE_SIZE,
1617 };
1618 
1619 static int64_t
1620 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1621 {
1622 	if (!left->branch_info || !right->branch_info)
1623 		return cmp_null(left->branch_info, right->branch_info);
1624 
1625 	return left->branch_info->flags.abort !=
1626 		right->branch_info->flags.abort;
1627 }
1628 
1629 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1630 				    size_t size, unsigned int width)
1631 {
1632 	static const char *out = "N/A";
1633 
1634 	if (he->branch_info) {
1635 		if (he->branch_info->flags.abort)
1636 			out = "A";
1637 		else
1638 			out = ".";
1639 	}
1640 
1641 	return repsep_snprintf(bf, size, "%-*s", width, out);
1642 }
1643 
1644 struct sort_entry sort_abort = {
1645 	.se_header	= "Transaction abort",
1646 	.se_cmp		= sort__abort_cmp,
1647 	.se_snprintf	= hist_entry__abort_snprintf,
1648 	.se_width_idx	= HISTC_ABORT,
1649 };
1650 
1651 static int64_t
1652 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1653 {
1654 	if (!left->branch_info || !right->branch_info)
1655 		return cmp_null(left->branch_info, right->branch_info);
1656 
1657 	return left->branch_info->flags.in_tx !=
1658 		right->branch_info->flags.in_tx;
1659 }
1660 
1661 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1662 				    size_t size, unsigned int width)
1663 {
1664 	static const char *out = "N/A";
1665 
1666 	if (he->branch_info) {
1667 		if (he->branch_info->flags.in_tx)
1668 			out = "T";
1669 		else
1670 			out = ".";
1671 	}
1672 
1673 	return repsep_snprintf(bf, size, "%-*s", width, out);
1674 }
1675 
1676 struct sort_entry sort_in_tx = {
1677 	.se_header	= "Branch in transaction",
1678 	.se_cmp		= sort__in_tx_cmp,
1679 	.se_snprintf	= hist_entry__in_tx_snprintf,
1680 	.se_width_idx	= HISTC_IN_TX,
1681 };
1682 
1683 static int64_t
1684 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1685 {
1686 	return left->transaction - right->transaction;
1687 }
1688 
1689 static inline char *add_str(char *p, const char *str)
1690 {
1691 	strcpy(p, str);
1692 	return p + strlen(str);
1693 }
1694 
1695 static struct txbit {
1696 	unsigned flag;
1697 	const char *name;
1698 	int skip_for_len;
1699 } txbits[] = {
1700 	{ PERF_TXN_ELISION,        "EL ",        0 },
1701 	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
1702 	{ PERF_TXN_SYNC,           "SYNC ",      1 },
1703 	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
1704 	{ PERF_TXN_RETRY,          "RETRY ",     0 },
1705 	{ PERF_TXN_CONFLICT,       "CON ",       0 },
1706 	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1707 	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
1708 	{ 0, NULL, 0 }
1709 };
1710 
1711 int hist_entry__transaction_len(void)
1712 {
1713 	int i;
1714 	int len = 0;
1715 
1716 	for (i = 0; txbits[i].name; i++) {
1717 		if (!txbits[i].skip_for_len)
1718 			len += strlen(txbits[i].name);
1719 	}
1720 	len += 4; /* :XX<space> */
1721 	return len;
1722 }
1723 
1724 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1725 					    size_t size, unsigned int width)
1726 {
1727 	u64 t = he->transaction;
1728 	char buf[128];
1729 	char *p = buf;
1730 	int i;
1731 
1732 	buf[0] = 0;
1733 	for (i = 0; txbits[i].name; i++)
1734 		if (txbits[i].flag & t)
1735 			p = add_str(p, txbits[i].name);
1736 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1737 		p = add_str(p, "NEITHER ");
1738 	if (t & PERF_TXN_ABORT_MASK) {
1739 		sprintf(p, ":%" PRIx64,
1740 			(t & PERF_TXN_ABORT_MASK) >>
1741 			PERF_TXN_ABORT_SHIFT);
1742 		p += strlen(p);
1743 	}
1744 
1745 	return repsep_snprintf(bf, size, "%-*s", width, buf);
1746 }
1747 
1748 struct sort_entry sort_transaction = {
1749 	.se_header	= "Transaction                ",
1750 	.se_cmp		= sort__transaction_cmp,
1751 	.se_snprintf	= hist_entry__transaction_snprintf,
1752 	.se_width_idx	= HISTC_TRANSACTION,
1753 };
1754 
1755 /* --sort symbol_size */
1756 
1757 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1758 {
1759 	int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1760 	int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1761 
1762 	return size_l < size_r ? -1 :
1763 		size_l == size_r ? 0 : 1;
1764 }
1765 
1766 static int64_t
1767 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1768 {
1769 	return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1770 }
1771 
1772 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1773 					  size_t bf_size, unsigned int width)
1774 {
1775 	if (sym)
1776 		return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1777 
1778 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1779 }
1780 
1781 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
1782 					 size_t size, unsigned int width)
1783 {
1784 	return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
1785 }
1786 
1787 struct sort_entry sort_sym_size = {
1788 	.se_header	= "Symbol size",
1789 	.se_cmp		= sort__sym_size_cmp,
1790 	.se_snprintf	= hist_entry__sym_size_snprintf,
1791 	.se_width_idx	= HISTC_SYM_SIZE,
1792 };
1793 
1794 /* --sort dso_size */
1795 
1796 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
1797 {
1798 	int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
1799 	int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
1800 
1801 	return size_l < size_r ? -1 :
1802 		size_l == size_r ? 0 : 1;
1803 }
1804 
1805 static int64_t
1806 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
1807 {
1808 	return _sort__dso_size_cmp(right->ms.map, left->ms.map);
1809 }
1810 
1811 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
1812 					  size_t bf_size, unsigned int width)
1813 {
1814 	if (map && map->dso)
1815 		return repsep_snprintf(bf, bf_size, "%*d", width,
1816 				       map__size(map));
1817 
1818 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1819 }
1820 
1821 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
1822 					 size_t size, unsigned int width)
1823 {
1824 	return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
1825 }
1826 
1827 struct sort_entry sort_dso_size = {
1828 	.se_header	= "DSO size",
1829 	.se_cmp		= sort__dso_size_cmp,
1830 	.se_snprintf	= hist_entry__dso_size_snprintf,
1831 	.se_width_idx	= HISTC_DSO_SIZE,
1832 };
1833 
1834 
1835 struct sort_dimension {
1836 	const char		*name;
1837 	struct sort_entry	*entry;
1838 	int			taken;
1839 };
1840 
1841 int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
1842 {
1843 	return 0;
1844 }
1845 
1846 const char * __weak arch_perf_header_entry(const char *se_header)
1847 {
1848 	return se_header;
1849 }
1850 
1851 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
1852 {
1853 	sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
1854 }
1855 
1856 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1857 
1858 static struct sort_dimension common_sort_dimensions[] = {
1859 	DIM(SORT_PID, "pid", sort_thread),
1860 	DIM(SORT_COMM, "comm", sort_comm),
1861 	DIM(SORT_DSO, "dso", sort_dso),
1862 	DIM(SORT_SYM, "symbol", sort_sym),
1863 	DIM(SORT_PARENT, "parent", sort_parent),
1864 	DIM(SORT_CPU, "cpu", sort_cpu),
1865 	DIM(SORT_SOCKET, "socket", sort_socket),
1866 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
1867 	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1868 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1869 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1870 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1871 	DIM(SORT_TRACE, "trace", sort_trace),
1872 	DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
1873 	DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
1874 	DIM(SORT_CGROUP, "cgroup", sort_cgroup),
1875 	DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1876 	DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
1877 	DIM(SORT_TIME, "time", sort_time),
1878 	DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
1879 	DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
1880 	DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
1881 	DIM(SORT_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_p_stage_cyc),
1882 };
1883 
1884 #undef DIM
1885 
1886 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1887 
1888 static struct sort_dimension bstack_sort_dimensions[] = {
1889 	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1890 	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1891 	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1892 	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1893 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1894 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1895 	DIM(SORT_ABORT, "abort", sort_abort),
1896 	DIM(SORT_CYCLES, "cycles", sort_cycles),
1897 	DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1898 	DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1899 	DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
1900 };
1901 
1902 #undef DIM
1903 
1904 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1905 
1906 static struct sort_dimension memory_sort_dimensions[] = {
1907 	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1908 	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1909 	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1910 	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1911 	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1912 	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1913 	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1914 	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1915 	DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
1916 	DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
1917 	DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
1918 };
1919 
1920 #undef DIM
1921 
1922 struct hpp_dimension {
1923 	const char		*name;
1924 	struct perf_hpp_fmt	*fmt;
1925 	int			taken;
1926 };
1927 
1928 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1929 
1930 static struct hpp_dimension hpp_sort_dimensions[] = {
1931 	DIM(PERF_HPP__OVERHEAD, "overhead"),
1932 	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1933 	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1934 	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1935 	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1936 	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1937 	DIM(PERF_HPP__SAMPLES, "sample"),
1938 	DIM(PERF_HPP__PERIOD, "period"),
1939 };
1940 
1941 #undef DIM
1942 
1943 struct hpp_sort_entry {
1944 	struct perf_hpp_fmt hpp;
1945 	struct sort_entry *se;
1946 };
1947 
1948 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1949 {
1950 	struct hpp_sort_entry *hse;
1951 
1952 	if (!perf_hpp__is_sort_entry(fmt))
1953 		return;
1954 
1955 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1956 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1957 }
1958 
1959 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1960 			      struct hists *hists, int line __maybe_unused,
1961 			      int *span __maybe_unused)
1962 {
1963 	struct hpp_sort_entry *hse;
1964 	size_t len = fmt->user_len;
1965 
1966 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1967 
1968 	if (!len)
1969 		len = hists__col_len(hists, hse->se->se_width_idx);
1970 
1971 	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1972 }
1973 
1974 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1975 			     struct perf_hpp *hpp __maybe_unused,
1976 			     struct hists *hists)
1977 {
1978 	struct hpp_sort_entry *hse;
1979 	size_t len = fmt->user_len;
1980 
1981 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1982 
1983 	if (!len)
1984 		len = hists__col_len(hists, hse->se->se_width_idx);
1985 
1986 	return len;
1987 }
1988 
1989 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1990 			     struct hist_entry *he)
1991 {
1992 	struct hpp_sort_entry *hse;
1993 	size_t len = fmt->user_len;
1994 
1995 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1996 
1997 	if (!len)
1998 		len = hists__col_len(he->hists, hse->se->se_width_idx);
1999 
2000 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2001 }
2002 
2003 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2004 			       struct hist_entry *a, struct hist_entry *b)
2005 {
2006 	struct hpp_sort_entry *hse;
2007 
2008 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2009 	return hse->se->se_cmp(a, b);
2010 }
2011 
2012 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2013 				    struct hist_entry *a, struct hist_entry *b)
2014 {
2015 	struct hpp_sort_entry *hse;
2016 	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2017 
2018 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2019 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2020 	return collapse_fn(a, b);
2021 }
2022 
2023 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2024 				struct hist_entry *a, struct hist_entry *b)
2025 {
2026 	struct hpp_sort_entry *hse;
2027 	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2028 
2029 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2030 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2031 	return sort_fn(a, b);
2032 }
2033 
2034 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2035 {
2036 	return format->header == __sort__hpp_header;
2037 }
2038 
2039 #define MK_SORT_ENTRY_CHK(key)					\
2040 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt)	\
2041 {								\
2042 	struct hpp_sort_entry *hse;				\
2043 								\
2044 	if (!perf_hpp__is_sort_entry(fmt))			\
2045 		return false;					\
2046 								\
2047 	hse = container_of(fmt, struct hpp_sort_entry, hpp);	\
2048 	return hse->se == &sort_ ## key ;			\
2049 }
2050 
2051 MK_SORT_ENTRY_CHK(trace)
2052 MK_SORT_ENTRY_CHK(srcline)
2053 MK_SORT_ENTRY_CHK(srcfile)
2054 MK_SORT_ENTRY_CHK(thread)
2055 MK_SORT_ENTRY_CHK(comm)
2056 MK_SORT_ENTRY_CHK(dso)
2057 MK_SORT_ENTRY_CHK(sym)
2058 
2059 
2060 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2061 {
2062 	struct hpp_sort_entry *hse_a;
2063 	struct hpp_sort_entry *hse_b;
2064 
2065 	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2066 		return false;
2067 
2068 	hse_a = container_of(a, struct hpp_sort_entry, hpp);
2069 	hse_b = container_of(b, struct hpp_sort_entry, hpp);
2070 
2071 	return hse_a->se == hse_b->se;
2072 }
2073 
2074 static void hse_free(struct perf_hpp_fmt *fmt)
2075 {
2076 	struct hpp_sort_entry *hse;
2077 
2078 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2079 	free(hse);
2080 }
2081 
2082 static struct hpp_sort_entry *
2083 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2084 {
2085 	struct hpp_sort_entry *hse;
2086 
2087 	hse = malloc(sizeof(*hse));
2088 	if (hse == NULL) {
2089 		pr_err("Memory allocation failed\n");
2090 		return NULL;
2091 	}
2092 
2093 	hse->se = sd->entry;
2094 	hse->hpp.name = sd->entry->se_header;
2095 	hse->hpp.header = __sort__hpp_header;
2096 	hse->hpp.width = __sort__hpp_width;
2097 	hse->hpp.entry = __sort__hpp_entry;
2098 	hse->hpp.color = NULL;
2099 
2100 	hse->hpp.cmp = __sort__hpp_cmp;
2101 	hse->hpp.collapse = __sort__hpp_collapse;
2102 	hse->hpp.sort = __sort__hpp_sort;
2103 	hse->hpp.equal = __sort__hpp_equal;
2104 	hse->hpp.free = hse_free;
2105 
2106 	INIT_LIST_HEAD(&hse->hpp.list);
2107 	INIT_LIST_HEAD(&hse->hpp.sort_list);
2108 	hse->hpp.elide = false;
2109 	hse->hpp.len = 0;
2110 	hse->hpp.user_len = 0;
2111 	hse->hpp.level = level;
2112 
2113 	return hse;
2114 }
2115 
2116 static void hpp_free(struct perf_hpp_fmt *fmt)
2117 {
2118 	free(fmt);
2119 }
2120 
2121 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2122 						       int level)
2123 {
2124 	struct perf_hpp_fmt *fmt;
2125 
2126 	fmt = memdup(hd->fmt, sizeof(*fmt));
2127 	if (fmt) {
2128 		INIT_LIST_HEAD(&fmt->list);
2129 		INIT_LIST_HEAD(&fmt->sort_list);
2130 		fmt->free = hpp_free;
2131 		fmt->level = level;
2132 	}
2133 
2134 	return fmt;
2135 }
2136 
2137 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2138 {
2139 	struct perf_hpp_fmt *fmt;
2140 	struct hpp_sort_entry *hse;
2141 	int ret = -1;
2142 	int r;
2143 
2144 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2145 		if (!perf_hpp__is_sort_entry(fmt))
2146 			continue;
2147 
2148 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2149 		if (hse->se->se_filter == NULL)
2150 			continue;
2151 
2152 		/*
2153 		 * hist entry is filtered if any of sort key in the hpp list
2154 		 * is applied.  But it should skip non-matched filter types.
2155 		 */
2156 		r = hse->se->se_filter(he, type, arg);
2157 		if (r >= 0) {
2158 			if (ret < 0)
2159 				ret = 0;
2160 			ret |= r;
2161 		}
2162 	}
2163 
2164 	return ret;
2165 }
2166 
2167 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2168 					  struct perf_hpp_list *list,
2169 					  int level)
2170 {
2171 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2172 
2173 	if (hse == NULL)
2174 		return -1;
2175 
2176 	perf_hpp_list__register_sort_field(list, &hse->hpp);
2177 	return 0;
2178 }
2179 
2180 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2181 					    struct perf_hpp_list *list)
2182 {
2183 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2184 
2185 	if (hse == NULL)
2186 		return -1;
2187 
2188 	perf_hpp_list__column_register(list, &hse->hpp);
2189 	return 0;
2190 }
2191 
2192 struct hpp_dynamic_entry {
2193 	struct perf_hpp_fmt hpp;
2194 	struct evsel *evsel;
2195 	struct tep_format_field *field;
2196 	unsigned dynamic_len;
2197 	bool raw_trace;
2198 };
2199 
2200 static int hde_width(struct hpp_dynamic_entry *hde)
2201 {
2202 	if (!hde->hpp.len) {
2203 		int len = hde->dynamic_len;
2204 		int namelen = strlen(hde->field->name);
2205 		int fieldlen = hde->field->size;
2206 
2207 		if (namelen > len)
2208 			len = namelen;
2209 
2210 		if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2211 			/* length for print hex numbers */
2212 			fieldlen = hde->field->size * 2 + 2;
2213 		}
2214 		if (fieldlen > len)
2215 			len = fieldlen;
2216 
2217 		hde->hpp.len = len;
2218 	}
2219 	return hde->hpp.len;
2220 }
2221 
2222 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2223 			       struct hist_entry *he)
2224 {
2225 	char *str, *pos;
2226 	struct tep_format_field *field = hde->field;
2227 	size_t namelen;
2228 	bool last = false;
2229 
2230 	if (hde->raw_trace)
2231 		return;
2232 
2233 	/* parse pretty print result and update max length */
2234 	if (!he->trace_output)
2235 		he->trace_output = get_trace_output(he);
2236 
2237 	namelen = strlen(field->name);
2238 	str = he->trace_output;
2239 
2240 	while (str) {
2241 		pos = strchr(str, ' ');
2242 		if (pos == NULL) {
2243 			last = true;
2244 			pos = str + strlen(str);
2245 		}
2246 
2247 		if (!strncmp(str, field->name, namelen)) {
2248 			size_t len;
2249 
2250 			str += namelen + 1;
2251 			len = pos - str;
2252 
2253 			if (len > hde->dynamic_len)
2254 				hde->dynamic_len = len;
2255 			break;
2256 		}
2257 
2258 		if (last)
2259 			str = NULL;
2260 		else
2261 			str = pos + 1;
2262 	}
2263 }
2264 
2265 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2266 			      struct hists *hists __maybe_unused,
2267 			      int line __maybe_unused,
2268 			      int *span __maybe_unused)
2269 {
2270 	struct hpp_dynamic_entry *hde;
2271 	size_t len = fmt->user_len;
2272 
2273 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2274 
2275 	if (!len)
2276 		len = hde_width(hde);
2277 
2278 	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2279 }
2280 
2281 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2282 			     struct perf_hpp *hpp __maybe_unused,
2283 			     struct hists *hists __maybe_unused)
2284 {
2285 	struct hpp_dynamic_entry *hde;
2286 	size_t len = fmt->user_len;
2287 
2288 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2289 
2290 	if (!len)
2291 		len = hde_width(hde);
2292 
2293 	return len;
2294 }
2295 
2296 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2297 {
2298 	struct hpp_dynamic_entry *hde;
2299 
2300 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2301 
2302 	return hists_to_evsel(hists) == hde->evsel;
2303 }
2304 
2305 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2306 			     struct hist_entry *he)
2307 {
2308 	struct hpp_dynamic_entry *hde;
2309 	size_t len = fmt->user_len;
2310 	char *str, *pos;
2311 	struct tep_format_field *field;
2312 	size_t namelen;
2313 	bool last = false;
2314 	int ret;
2315 
2316 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2317 
2318 	if (!len)
2319 		len = hde_width(hde);
2320 
2321 	if (hde->raw_trace)
2322 		goto raw_field;
2323 
2324 	if (!he->trace_output)
2325 		he->trace_output = get_trace_output(he);
2326 
2327 	field = hde->field;
2328 	namelen = strlen(field->name);
2329 	str = he->trace_output;
2330 
2331 	while (str) {
2332 		pos = strchr(str, ' ');
2333 		if (pos == NULL) {
2334 			last = true;
2335 			pos = str + strlen(str);
2336 		}
2337 
2338 		if (!strncmp(str, field->name, namelen)) {
2339 			str += namelen + 1;
2340 			str = strndup(str, pos - str);
2341 
2342 			if (str == NULL)
2343 				return scnprintf(hpp->buf, hpp->size,
2344 						 "%*.*s", len, len, "ERROR");
2345 			break;
2346 		}
2347 
2348 		if (last)
2349 			str = NULL;
2350 		else
2351 			str = pos + 1;
2352 	}
2353 
2354 	if (str == NULL) {
2355 		struct trace_seq seq;
2356 raw_field:
2357 		trace_seq_init(&seq);
2358 		tep_print_field(&seq, he->raw_data, hde->field);
2359 		str = seq.buffer;
2360 	}
2361 
2362 	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2363 	free(str);
2364 	return ret;
2365 }
2366 
2367 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2368 			       struct hist_entry *a, struct hist_entry *b)
2369 {
2370 	struct hpp_dynamic_entry *hde;
2371 	struct tep_format_field *field;
2372 	unsigned offset, size;
2373 
2374 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2375 
2376 	if (b == NULL) {
2377 		update_dynamic_len(hde, a);
2378 		return 0;
2379 	}
2380 
2381 	field = hde->field;
2382 	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2383 		unsigned long long dyn;
2384 
2385 		tep_read_number_field(field, a->raw_data, &dyn);
2386 		offset = dyn & 0xffff;
2387 		size = (dyn >> 16) & 0xffff;
2388 
2389 		/* record max width for output */
2390 		if (size > hde->dynamic_len)
2391 			hde->dynamic_len = size;
2392 	} else {
2393 		offset = field->offset;
2394 		size = field->size;
2395 	}
2396 
2397 	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2398 }
2399 
2400 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2401 {
2402 	return fmt->cmp == __sort__hde_cmp;
2403 }
2404 
2405 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2406 {
2407 	struct hpp_dynamic_entry *hde_a;
2408 	struct hpp_dynamic_entry *hde_b;
2409 
2410 	if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2411 		return false;
2412 
2413 	hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2414 	hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2415 
2416 	return hde_a->field == hde_b->field;
2417 }
2418 
2419 static void hde_free(struct perf_hpp_fmt *fmt)
2420 {
2421 	struct hpp_dynamic_entry *hde;
2422 
2423 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2424 	free(hde);
2425 }
2426 
2427 static struct hpp_dynamic_entry *
2428 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
2429 		      int level)
2430 {
2431 	struct hpp_dynamic_entry *hde;
2432 
2433 	hde = malloc(sizeof(*hde));
2434 	if (hde == NULL) {
2435 		pr_debug("Memory allocation failed\n");
2436 		return NULL;
2437 	}
2438 
2439 	hde->evsel = evsel;
2440 	hde->field = field;
2441 	hde->dynamic_len = 0;
2442 
2443 	hde->hpp.name = field->name;
2444 	hde->hpp.header = __sort__hde_header;
2445 	hde->hpp.width  = __sort__hde_width;
2446 	hde->hpp.entry  = __sort__hde_entry;
2447 	hde->hpp.color  = NULL;
2448 
2449 	hde->hpp.cmp = __sort__hde_cmp;
2450 	hde->hpp.collapse = __sort__hde_cmp;
2451 	hde->hpp.sort = __sort__hde_cmp;
2452 	hde->hpp.equal = __sort__hde_equal;
2453 	hde->hpp.free = hde_free;
2454 
2455 	INIT_LIST_HEAD(&hde->hpp.list);
2456 	INIT_LIST_HEAD(&hde->hpp.sort_list);
2457 	hde->hpp.elide = false;
2458 	hde->hpp.len = 0;
2459 	hde->hpp.user_len = 0;
2460 	hde->hpp.level = level;
2461 
2462 	return hde;
2463 }
2464 
2465 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2466 {
2467 	struct perf_hpp_fmt *new_fmt = NULL;
2468 
2469 	if (perf_hpp__is_sort_entry(fmt)) {
2470 		struct hpp_sort_entry *hse, *new_hse;
2471 
2472 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2473 		new_hse = memdup(hse, sizeof(*hse));
2474 		if (new_hse)
2475 			new_fmt = &new_hse->hpp;
2476 	} else if (perf_hpp__is_dynamic_entry(fmt)) {
2477 		struct hpp_dynamic_entry *hde, *new_hde;
2478 
2479 		hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2480 		new_hde = memdup(hde, sizeof(*hde));
2481 		if (new_hde)
2482 			new_fmt = &new_hde->hpp;
2483 	} else {
2484 		new_fmt = memdup(fmt, sizeof(*fmt));
2485 	}
2486 
2487 	INIT_LIST_HEAD(&new_fmt->list);
2488 	INIT_LIST_HEAD(&new_fmt->sort_list);
2489 
2490 	return new_fmt;
2491 }
2492 
2493 static int parse_field_name(char *str, char **event, char **field, char **opt)
2494 {
2495 	char *event_name, *field_name, *opt_name;
2496 
2497 	event_name = str;
2498 	field_name = strchr(str, '.');
2499 
2500 	if (field_name) {
2501 		*field_name++ = '\0';
2502 	} else {
2503 		event_name = NULL;
2504 		field_name = str;
2505 	}
2506 
2507 	opt_name = strchr(field_name, '/');
2508 	if (opt_name)
2509 		*opt_name++ = '\0';
2510 
2511 	*event = event_name;
2512 	*field = field_name;
2513 	*opt   = opt_name;
2514 
2515 	return 0;
2516 }
2517 
2518 /* find match evsel using a given event name.  The event name can be:
2519  *   1. '%' + event index (e.g. '%1' for first event)
2520  *   2. full event name (e.g. sched:sched_switch)
2521  *   3. partial event name (should not contain ':')
2522  */
2523 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
2524 {
2525 	struct evsel *evsel = NULL;
2526 	struct evsel *pos;
2527 	bool full_name;
2528 
2529 	/* case 1 */
2530 	if (event_name[0] == '%') {
2531 		int nr = strtol(event_name+1, NULL, 0);
2532 
2533 		if (nr > evlist->core.nr_entries)
2534 			return NULL;
2535 
2536 		evsel = evlist__first(evlist);
2537 		while (--nr > 0)
2538 			evsel = evsel__next(evsel);
2539 
2540 		return evsel;
2541 	}
2542 
2543 	full_name = !!strchr(event_name, ':');
2544 	evlist__for_each_entry(evlist, pos) {
2545 		/* case 2 */
2546 		if (full_name && !strcmp(pos->name, event_name))
2547 			return pos;
2548 		/* case 3 */
2549 		if (!full_name && strstr(pos->name, event_name)) {
2550 			if (evsel) {
2551 				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2552 					 event_name, evsel->name, pos->name);
2553 				return NULL;
2554 			}
2555 			evsel = pos;
2556 		}
2557 	}
2558 
2559 	return evsel;
2560 }
2561 
2562 static int __dynamic_dimension__add(struct evsel *evsel,
2563 				    struct tep_format_field *field,
2564 				    bool raw_trace, int level)
2565 {
2566 	struct hpp_dynamic_entry *hde;
2567 
2568 	hde = __alloc_dynamic_entry(evsel, field, level);
2569 	if (hde == NULL)
2570 		return -ENOMEM;
2571 
2572 	hde->raw_trace = raw_trace;
2573 
2574 	perf_hpp__register_sort_field(&hde->hpp);
2575 	return 0;
2576 }
2577 
2578 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
2579 {
2580 	int ret;
2581 	struct tep_format_field *field;
2582 
2583 	field = evsel->tp_format->format.fields;
2584 	while (field) {
2585 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2586 		if (ret < 0)
2587 			return ret;
2588 
2589 		field = field->next;
2590 	}
2591 	return 0;
2592 }
2593 
2594 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
2595 				  int level)
2596 {
2597 	int ret;
2598 	struct evsel *evsel;
2599 
2600 	evlist__for_each_entry(evlist, evsel) {
2601 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2602 			continue;
2603 
2604 		ret = add_evsel_fields(evsel, raw_trace, level);
2605 		if (ret < 0)
2606 			return ret;
2607 	}
2608 	return 0;
2609 }
2610 
2611 static int add_all_matching_fields(struct evlist *evlist,
2612 				   char *field_name, bool raw_trace, int level)
2613 {
2614 	int ret = -ESRCH;
2615 	struct evsel *evsel;
2616 	struct tep_format_field *field;
2617 
2618 	evlist__for_each_entry(evlist, evsel) {
2619 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2620 			continue;
2621 
2622 		field = tep_find_any_field(evsel->tp_format, field_name);
2623 		if (field == NULL)
2624 			continue;
2625 
2626 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2627 		if (ret < 0)
2628 			break;
2629 	}
2630 	return ret;
2631 }
2632 
2633 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
2634 			     int level)
2635 {
2636 	char *str, *event_name, *field_name, *opt_name;
2637 	struct evsel *evsel;
2638 	struct tep_format_field *field;
2639 	bool raw_trace = symbol_conf.raw_trace;
2640 	int ret = 0;
2641 
2642 	if (evlist == NULL)
2643 		return -ENOENT;
2644 
2645 	str = strdup(tok);
2646 	if (str == NULL)
2647 		return -ENOMEM;
2648 
2649 	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2650 		ret = -EINVAL;
2651 		goto out;
2652 	}
2653 
2654 	if (opt_name) {
2655 		if (strcmp(opt_name, "raw")) {
2656 			pr_debug("unsupported field option %s\n", opt_name);
2657 			ret = -EINVAL;
2658 			goto out;
2659 		}
2660 		raw_trace = true;
2661 	}
2662 
2663 	if (!strcmp(field_name, "trace_fields")) {
2664 		ret = add_all_dynamic_fields(evlist, raw_trace, level);
2665 		goto out;
2666 	}
2667 
2668 	if (event_name == NULL) {
2669 		ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2670 		goto out;
2671 	}
2672 
2673 	evsel = find_evsel(evlist, event_name);
2674 	if (evsel == NULL) {
2675 		pr_debug("Cannot find event: %s\n", event_name);
2676 		ret = -ENOENT;
2677 		goto out;
2678 	}
2679 
2680 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2681 		pr_debug("%s is not a tracepoint event\n", event_name);
2682 		ret = -EINVAL;
2683 		goto out;
2684 	}
2685 
2686 	if (!strcmp(field_name, "*")) {
2687 		ret = add_evsel_fields(evsel, raw_trace, level);
2688 	} else {
2689 		field = tep_find_any_field(evsel->tp_format, field_name);
2690 		if (field == NULL) {
2691 			pr_debug("Cannot find event field for %s.%s\n",
2692 				 event_name, field_name);
2693 			return -ENOENT;
2694 		}
2695 
2696 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2697 	}
2698 
2699 out:
2700 	free(str);
2701 	return ret;
2702 }
2703 
2704 static int __sort_dimension__add(struct sort_dimension *sd,
2705 				 struct perf_hpp_list *list,
2706 				 int level)
2707 {
2708 	if (sd->taken)
2709 		return 0;
2710 
2711 	if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2712 		return -1;
2713 
2714 	if (sd->entry->se_collapse)
2715 		list->need_collapse = 1;
2716 
2717 	sd->taken = 1;
2718 
2719 	return 0;
2720 }
2721 
2722 static int __hpp_dimension__add(struct hpp_dimension *hd,
2723 				struct perf_hpp_list *list,
2724 				int level)
2725 {
2726 	struct perf_hpp_fmt *fmt;
2727 
2728 	if (hd->taken)
2729 		return 0;
2730 
2731 	fmt = __hpp_dimension__alloc_hpp(hd, level);
2732 	if (!fmt)
2733 		return -1;
2734 
2735 	hd->taken = 1;
2736 	perf_hpp_list__register_sort_field(list, fmt);
2737 	return 0;
2738 }
2739 
2740 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2741 					struct sort_dimension *sd)
2742 {
2743 	if (sd->taken)
2744 		return 0;
2745 
2746 	if (__sort_dimension__add_hpp_output(sd, list) < 0)
2747 		return -1;
2748 
2749 	sd->taken = 1;
2750 	return 0;
2751 }
2752 
2753 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2754 				       struct hpp_dimension *hd)
2755 {
2756 	struct perf_hpp_fmt *fmt;
2757 
2758 	if (hd->taken)
2759 		return 0;
2760 
2761 	fmt = __hpp_dimension__alloc_hpp(hd, 0);
2762 	if (!fmt)
2763 		return -1;
2764 
2765 	hd->taken = 1;
2766 	perf_hpp_list__column_register(list, fmt);
2767 	return 0;
2768 }
2769 
2770 int hpp_dimension__add_output(unsigned col)
2771 {
2772 	BUG_ON(col >= PERF_HPP__MAX_INDEX);
2773 	return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2774 }
2775 
2776 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2777 			struct evlist *evlist,
2778 			int level)
2779 {
2780 	unsigned int i, j;
2781 
2782 	/*
2783 	 * Check to see if there are any arch specific
2784 	 * sort dimensions not applicable for the current
2785 	 * architecture. If so, Skip that sort key since
2786 	 * we don't want to display it in the output fields.
2787 	 */
2788 	for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
2789 		if (!strcmp(arch_specific_sort_keys[j], tok) &&
2790 				!arch_support_sort_key(tok)) {
2791 			return 0;
2792 		}
2793 	}
2794 
2795 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2796 		struct sort_dimension *sd = &common_sort_dimensions[i];
2797 
2798 		if (strncasecmp(tok, sd->name, strlen(tok)))
2799 			continue;
2800 
2801 		for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
2802 			if (!strcmp(dynamic_headers[j], sd->name))
2803 				sort_dimension_add_dynamic_header(sd);
2804 		}
2805 
2806 		if (sd->entry == &sort_parent) {
2807 			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2808 			if (ret) {
2809 				char err[BUFSIZ];
2810 
2811 				regerror(ret, &parent_regex, err, sizeof(err));
2812 				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2813 				return -EINVAL;
2814 			}
2815 			list->parent = 1;
2816 		} else if (sd->entry == &sort_sym) {
2817 			list->sym = 1;
2818 			/*
2819 			 * perf diff displays the performance difference amongst
2820 			 * two or more perf.data files. Those files could come
2821 			 * from different binaries. So we should not compare
2822 			 * their ips, but the name of symbol.
2823 			 */
2824 			if (sort__mode == SORT_MODE__DIFF)
2825 				sd->entry->se_collapse = sort__sym_sort;
2826 
2827 		} else if (sd->entry == &sort_dso) {
2828 			list->dso = 1;
2829 		} else if (sd->entry == &sort_socket) {
2830 			list->socket = 1;
2831 		} else if (sd->entry == &sort_thread) {
2832 			list->thread = 1;
2833 		} else if (sd->entry == &sort_comm) {
2834 			list->comm = 1;
2835 		}
2836 
2837 		return __sort_dimension__add(sd, list, level);
2838 	}
2839 
2840 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2841 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2842 
2843 		if (strncasecmp(tok, hd->name, strlen(tok)))
2844 			continue;
2845 
2846 		return __hpp_dimension__add(hd, list, level);
2847 	}
2848 
2849 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2850 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
2851 
2852 		if (strncasecmp(tok, sd->name, strlen(tok)))
2853 			continue;
2854 
2855 		if (sort__mode != SORT_MODE__BRANCH)
2856 			return -EINVAL;
2857 
2858 		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2859 			list->sym = 1;
2860 
2861 		__sort_dimension__add(sd, list, level);
2862 		return 0;
2863 	}
2864 
2865 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2866 		struct sort_dimension *sd = &memory_sort_dimensions[i];
2867 
2868 		if (strncasecmp(tok, sd->name, strlen(tok)))
2869 			continue;
2870 
2871 		if (sort__mode != SORT_MODE__MEMORY)
2872 			return -EINVAL;
2873 
2874 		if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
2875 			return -EINVAL;
2876 
2877 		if (sd->entry == &sort_mem_daddr_sym)
2878 			list->sym = 1;
2879 
2880 		__sort_dimension__add(sd, list, level);
2881 		return 0;
2882 	}
2883 
2884 	if (!add_dynamic_entry(evlist, tok, level))
2885 		return 0;
2886 
2887 	return -ESRCH;
2888 }
2889 
2890 static int setup_sort_list(struct perf_hpp_list *list, char *str,
2891 			   struct evlist *evlist)
2892 {
2893 	char *tmp, *tok;
2894 	int ret = 0;
2895 	int level = 0;
2896 	int next_level = 1;
2897 	bool in_group = false;
2898 
2899 	do {
2900 		tok = str;
2901 		tmp = strpbrk(str, "{}, ");
2902 		if (tmp) {
2903 			if (in_group)
2904 				next_level = level;
2905 			else
2906 				next_level = level + 1;
2907 
2908 			if (*tmp == '{')
2909 				in_group = true;
2910 			else if (*tmp == '}')
2911 				in_group = false;
2912 
2913 			*tmp = '\0';
2914 			str = tmp + 1;
2915 		}
2916 
2917 		if (*tok) {
2918 			ret = sort_dimension__add(list, tok, evlist, level);
2919 			if (ret == -EINVAL) {
2920 				if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
2921 					ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
2922 				else
2923 					ui__error("Invalid --sort key: `%s'", tok);
2924 				break;
2925 			} else if (ret == -ESRCH) {
2926 				ui__error("Unknown --sort key: `%s'", tok);
2927 				break;
2928 			}
2929 		}
2930 
2931 		level = next_level;
2932 	} while (tmp);
2933 
2934 	return ret;
2935 }
2936 
2937 static const char *get_default_sort_order(struct evlist *evlist)
2938 {
2939 	const char *default_sort_orders[] = {
2940 		default_sort_order,
2941 		default_branch_sort_order,
2942 		default_mem_sort_order,
2943 		default_top_sort_order,
2944 		default_diff_sort_order,
2945 		default_tracepoint_sort_order,
2946 	};
2947 	bool use_trace = true;
2948 	struct evsel *evsel;
2949 
2950 	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2951 
2952 	if (evlist == NULL || evlist__empty(evlist))
2953 		goto out_no_evlist;
2954 
2955 	evlist__for_each_entry(evlist, evsel) {
2956 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2957 			use_trace = false;
2958 			break;
2959 		}
2960 	}
2961 
2962 	if (use_trace) {
2963 		sort__mode = SORT_MODE__TRACEPOINT;
2964 		if (symbol_conf.raw_trace)
2965 			return "trace_fields";
2966 	}
2967 out_no_evlist:
2968 	return default_sort_orders[sort__mode];
2969 }
2970 
2971 static int setup_sort_order(struct evlist *evlist)
2972 {
2973 	char *new_sort_order;
2974 
2975 	/*
2976 	 * Append '+'-prefixed sort order to the default sort
2977 	 * order string.
2978 	 */
2979 	if (!sort_order || is_strict_order(sort_order))
2980 		return 0;
2981 
2982 	if (sort_order[1] == '\0') {
2983 		ui__error("Invalid --sort key: `+'");
2984 		return -EINVAL;
2985 	}
2986 
2987 	/*
2988 	 * We allocate new sort_order string, but we never free it,
2989 	 * because it's checked over the rest of the code.
2990 	 */
2991 	if (asprintf(&new_sort_order, "%s,%s",
2992 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
2993 		pr_err("Not enough memory to set up --sort");
2994 		return -ENOMEM;
2995 	}
2996 
2997 	sort_order = new_sort_order;
2998 	return 0;
2999 }
3000 
3001 /*
3002  * Adds 'pre,' prefix into 'str' is 'pre' is
3003  * not already part of 'str'.
3004  */
3005 static char *prefix_if_not_in(const char *pre, char *str)
3006 {
3007 	char *n;
3008 
3009 	if (!str || strstr(str, pre))
3010 		return str;
3011 
3012 	if (asprintf(&n, "%s,%s", pre, str) < 0)
3013 		n = NULL;
3014 
3015 	free(str);
3016 	return n;
3017 }
3018 
3019 static char *setup_overhead(char *keys)
3020 {
3021 	if (sort__mode == SORT_MODE__DIFF)
3022 		return keys;
3023 
3024 	keys = prefix_if_not_in("overhead", keys);
3025 
3026 	if (symbol_conf.cumulate_callchain)
3027 		keys = prefix_if_not_in("overhead_children", keys);
3028 
3029 	return keys;
3030 }
3031 
3032 static int __setup_sorting(struct evlist *evlist)
3033 {
3034 	char *str;
3035 	const char *sort_keys;
3036 	int ret = 0;
3037 
3038 	ret = setup_sort_order(evlist);
3039 	if (ret)
3040 		return ret;
3041 
3042 	sort_keys = sort_order;
3043 	if (sort_keys == NULL) {
3044 		if (is_strict_order(field_order)) {
3045 			/*
3046 			 * If user specified field order but no sort order,
3047 			 * we'll honor it and not add default sort orders.
3048 			 */
3049 			return 0;
3050 		}
3051 
3052 		sort_keys = get_default_sort_order(evlist);
3053 	}
3054 
3055 	str = strdup(sort_keys);
3056 	if (str == NULL) {
3057 		pr_err("Not enough memory to setup sort keys");
3058 		return -ENOMEM;
3059 	}
3060 
3061 	/*
3062 	 * Prepend overhead fields for backward compatibility.
3063 	 */
3064 	if (!is_strict_order(field_order)) {
3065 		str = setup_overhead(str);
3066 		if (str == NULL) {
3067 			pr_err("Not enough memory to setup overhead keys");
3068 			return -ENOMEM;
3069 		}
3070 	}
3071 
3072 	ret = setup_sort_list(&perf_hpp_list, str, evlist);
3073 
3074 	free(str);
3075 	return ret;
3076 }
3077 
3078 void perf_hpp__set_elide(int idx, bool elide)
3079 {
3080 	struct perf_hpp_fmt *fmt;
3081 	struct hpp_sort_entry *hse;
3082 
3083 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3084 		if (!perf_hpp__is_sort_entry(fmt))
3085 			continue;
3086 
3087 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3088 		if (hse->se->se_width_idx == idx) {
3089 			fmt->elide = elide;
3090 			break;
3091 		}
3092 	}
3093 }
3094 
3095 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3096 {
3097 	if (list && strlist__nr_entries(list) == 1) {
3098 		if (fp != NULL)
3099 			fprintf(fp, "# %s: %s\n", list_name,
3100 				strlist__entry(list, 0)->s);
3101 		return true;
3102 	}
3103 	return false;
3104 }
3105 
3106 static bool get_elide(int idx, FILE *output)
3107 {
3108 	switch (idx) {
3109 	case HISTC_SYMBOL:
3110 		return __get_elide(symbol_conf.sym_list, "symbol", output);
3111 	case HISTC_DSO:
3112 		return __get_elide(symbol_conf.dso_list, "dso", output);
3113 	case HISTC_COMM:
3114 		return __get_elide(symbol_conf.comm_list, "comm", output);
3115 	default:
3116 		break;
3117 	}
3118 
3119 	if (sort__mode != SORT_MODE__BRANCH)
3120 		return false;
3121 
3122 	switch (idx) {
3123 	case HISTC_SYMBOL_FROM:
3124 		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3125 	case HISTC_SYMBOL_TO:
3126 		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3127 	case HISTC_DSO_FROM:
3128 		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3129 	case HISTC_DSO_TO:
3130 		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3131 	default:
3132 		break;
3133 	}
3134 
3135 	return false;
3136 }
3137 
3138 void sort__setup_elide(FILE *output)
3139 {
3140 	struct perf_hpp_fmt *fmt;
3141 	struct hpp_sort_entry *hse;
3142 
3143 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3144 		if (!perf_hpp__is_sort_entry(fmt))
3145 			continue;
3146 
3147 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3148 		fmt->elide = get_elide(hse->se->se_width_idx, output);
3149 	}
3150 
3151 	/*
3152 	 * It makes no sense to elide all of sort entries.
3153 	 * Just revert them to show up again.
3154 	 */
3155 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3156 		if (!perf_hpp__is_sort_entry(fmt))
3157 			continue;
3158 
3159 		if (!fmt->elide)
3160 			return;
3161 	}
3162 
3163 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3164 		if (!perf_hpp__is_sort_entry(fmt))
3165 			continue;
3166 
3167 		fmt->elide = false;
3168 	}
3169 }
3170 
3171 int output_field_add(struct perf_hpp_list *list, char *tok)
3172 {
3173 	unsigned int i;
3174 
3175 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3176 		struct sort_dimension *sd = &common_sort_dimensions[i];
3177 
3178 		if (strncasecmp(tok, sd->name, strlen(tok)))
3179 			continue;
3180 
3181 		return __sort_dimension__add_output(list, sd);
3182 	}
3183 
3184 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3185 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3186 
3187 		if (strncasecmp(tok, hd->name, strlen(tok)))
3188 			continue;
3189 
3190 		return __hpp_dimension__add_output(list, hd);
3191 	}
3192 
3193 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3194 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3195 
3196 		if (strncasecmp(tok, sd->name, strlen(tok)))
3197 			continue;
3198 
3199 		if (sort__mode != SORT_MODE__BRANCH)
3200 			return -EINVAL;
3201 
3202 		return __sort_dimension__add_output(list, sd);
3203 	}
3204 
3205 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3206 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3207 
3208 		if (strncasecmp(tok, sd->name, strlen(tok)))
3209 			continue;
3210 
3211 		if (sort__mode != SORT_MODE__MEMORY)
3212 			return -EINVAL;
3213 
3214 		return __sort_dimension__add_output(list, sd);
3215 	}
3216 
3217 	return -ESRCH;
3218 }
3219 
3220 static int setup_output_list(struct perf_hpp_list *list, char *str)
3221 {
3222 	char *tmp, *tok;
3223 	int ret = 0;
3224 
3225 	for (tok = strtok_r(str, ", ", &tmp);
3226 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
3227 		ret = output_field_add(list, tok);
3228 		if (ret == -EINVAL) {
3229 			ui__error("Invalid --fields key: `%s'", tok);
3230 			break;
3231 		} else if (ret == -ESRCH) {
3232 			ui__error("Unknown --fields key: `%s'", tok);
3233 			break;
3234 		}
3235 	}
3236 
3237 	return ret;
3238 }
3239 
3240 void reset_dimensions(void)
3241 {
3242 	unsigned int i;
3243 
3244 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3245 		common_sort_dimensions[i].taken = 0;
3246 
3247 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3248 		hpp_sort_dimensions[i].taken = 0;
3249 
3250 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3251 		bstack_sort_dimensions[i].taken = 0;
3252 
3253 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3254 		memory_sort_dimensions[i].taken = 0;
3255 }
3256 
3257 bool is_strict_order(const char *order)
3258 {
3259 	return order && (*order != '+');
3260 }
3261 
3262 static int __setup_output_field(void)
3263 {
3264 	char *str, *strp;
3265 	int ret = -EINVAL;
3266 
3267 	if (field_order == NULL)
3268 		return 0;
3269 
3270 	strp = str = strdup(field_order);
3271 	if (str == NULL) {
3272 		pr_err("Not enough memory to setup output fields");
3273 		return -ENOMEM;
3274 	}
3275 
3276 	if (!is_strict_order(field_order))
3277 		strp++;
3278 
3279 	if (!strlen(strp)) {
3280 		ui__error("Invalid --fields key: `+'");
3281 		goto out;
3282 	}
3283 
3284 	ret = setup_output_list(&perf_hpp_list, strp);
3285 
3286 out:
3287 	free(str);
3288 	return ret;
3289 }
3290 
3291 int setup_sorting(struct evlist *evlist)
3292 {
3293 	int err;
3294 
3295 	err = __setup_sorting(evlist);
3296 	if (err < 0)
3297 		return err;
3298 
3299 	if (parent_pattern != default_parent_pattern) {
3300 		err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3301 		if (err < 0)
3302 			return err;
3303 	}
3304 
3305 	reset_dimensions();
3306 
3307 	/*
3308 	 * perf diff doesn't use default hpp output fields.
3309 	 */
3310 	if (sort__mode != SORT_MODE__DIFF)
3311 		perf_hpp__init();
3312 
3313 	err = __setup_output_field();
3314 	if (err < 0)
3315 		return err;
3316 
3317 	/* copy sort keys to output fields */
3318 	perf_hpp__setup_output_field(&perf_hpp_list);
3319 	/* and then copy output fields to sort keys */
3320 	perf_hpp__append_sort_keys(&perf_hpp_list);
3321 
3322 	/* setup hists-specific output fields */
3323 	if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3324 		return -1;
3325 
3326 	return 0;
3327 }
3328 
3329 void reset_output_field(void)
3330 {
3331 	perf_hpp_list.need_collapse = 0;
3332 	perf_hpp_list.parent = 0;
3333 	perf_hpp_list.sym = 0;
3334 	perf_hpp_list.dso = 0;
3335 
3336 	field_order = NULL;
3337 	sort_order = NULL;
3338 
3339 	reset_dimensions();
3340 	perf_hpp__reset_output_field(&perf_hpp_list);
3341 }
3342 
3343 #define INDENT (3*8 + 1)
3344 
3345 static void add_key(struct strbuf *sb, const char *str, int *llen)
3346 {
3347 	if (*llen >= 75) {
3348 		strbuf_addstr(sb, "\n\t\t\t ");
3349 		*llen = INDENT;
3350 	}
3351 	strbuf_addf(sb, " %s", str);
3352 	*llen += strlen(str) + 1;
3353 }
3354 
3355 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3356 			    int *llen)
3357 {
3358 	int i;
3359 
3360 	for (i = 0; i < n; i++)
3361 		add_key(sb, s[i].name, llen);
3362 }
3363 
3364 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3365 				int *llen)
3366 {
3367 	int i;
3368 
3369 	for (i = 0; i < n; i++)
3370 		add_key(sb, s[i].name, llen);
3371 }
3372 
3373 char *sort_help(const char *prefix)
3374 {
3375 	struct strbuf sb;
3376 	char *s;
3377 	int len = strlen(prefix) + INDENT;
3378 
3379 	strbuf_init(&sb, 300);
3380 	strbuf_addstr(&sb, prefix);
3381 	add_hpp_sort_string(&sb, hpp_sort_dimensions,
3382 			    ARRAY_SIZE(hpp_sort_dimensions), &len);
3383 	add_sort_string(&sb, common_sort_dimensions,
3384 			    ARRAY_SIZE(common_sort_dimensions), &len);
3385 	add_sort_string(&sb, bstack_sort_dimensions,
3386 			    ARRAY_SIZE(bstack_sort_dimensions), &len);
3387 	add_sort_string(&sb, memory_sort_dimensions,
3388 			    ARRAY_SIZE(memory_sort_dimensions), &len);
3389 	s = strbuf_detach(&sb, NULL);
3390 	strbuf_release(&sb);
3391 	return s;
3392 }
3393