xref: /linux/tools/perf/util/sort.c (revision 0d08df6c493898e679d9c517e77ea95c063d40ec)
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9 #include "mem-events.h"
10 
11 regex_t		parent_regex;
12 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
13 const char	*parent_pattern = default_parent_pattern;
14 const char	default_sort_order[] = "comm,dso,symbol";
15 const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
16 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
17 const char	default_top_sort_order[] = "dso,symbol";
18 const char	default_diff_sort_order[] = "dso,symbol";
19 const char	default_tracepoint_sort_order[] = "trace";
20 const char	*sort_order;
21 const char	*field_order;
22 regex_t		ignore_callees_regex;
23 int		have_ignore_callees = 0;
24 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
25 
26 /*
27  * Replaces all occurrences of a char used with the:
28  *
29  * -t, --field-separator
30  *
31  * option, that uses a special separator character and don't pad with spaces,
32  * replacing all occurances of this separator in symbol names (and other
33  * output) with a '.' character, that thus it's the only non valid separator.
34 */
35 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
36 {
37 	int n;
38 	va_list ap;
39 
40 	va_start(ap, fmt);
41 	n = vsnprintf(bf, size, fmt, ap);
42 	if (symbol_conf.field_sep && n > 0) {
43 		char *sep = bf;
44 
45 		while (1) {
46 			sep = strchr(sep, *symbol_conf.field_sep);
47 			if (sep == NULL)
48 				break;
49 			*sep = '.';
50 		}
51 	}
52 	va_end(ap);
53 
54 	if (n >= (int)size)
55 		return size - 1;
56 	return n;
57 }
58 
59 static int64_t cmp_null(const void *l, const void *r)
60 {
61 	if (!l && !r)
62 		return 0;
63 	else if (!l)
64 		return -1;
65 	else
66 		return 1;
67 }
68 
69 /* --sort pid */
70 
71 static int64_t
72 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
73 {
74 	return right->thread->tid - left->thread->tid;
75 }
76 
77 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
78 				       size_t size, unsigned int width)
79 {
80 	const char *comm = thread__comm_str(he->thread);
81 
82 	width = max(7U, width) - 6;
83 	return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
84 			       width, width, comm ?: "");
85 }
86 
87 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
88 {
89 	const struct thread *th = arg;
90 
91 	if (type != HIST_FILTER__THREAD)
92 		return -1;
93 
94 	return th && he->thread != th;
95 }
96 
97 struct sort_entry sort_thread = {
98 	.se_header	= "  Pid:Command",
99 	.se_cmp		= sort__thread_cmp,
100 	.se_snprintf	= hist_entry__thread_snprintf,
101 	.se_filter	= hist_entry__thread_filter,
102 	.se_width_idx	= HISTC_THREAD,
103 };
104 
105 /* --sort comm */
106 
107 static int64_t
108 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
109 {
110 	/* Compare the addr that should be unique among comm */
111 	return strcmp(comm__str(right->comm), comm__str(left->comm));
112 }
113 
114 static int64_t
115 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
116 {
117 	/* Compare the addr that should be unique among comm */
118 	return strcmp(comm__str(right->comm), comm__str(left->comm));
119 }
120 
121 static int64_t
122 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
123 {
124 	return strcmp(comm__str(right->comm), comm__str(left->comm));
125 }
126 
127 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
128 				     size_t size, unsigned int width)
129 {
130 	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
131 }
132 
133 struct sort_entry sort_comm = {
134 	.se_header	= "Command",
135 	.se_cmp		= sort__comm_cmp,
136 	.se_collapse	= sort__comm_collapse,
137 	.se_sort	= sort__comm_sort,
138 	.se_snprintf	= hist_entry__comm_snprintf,
139 	.se_filter	= hist_entry__thread_filter,
140 	.se_width_idx	= HISTC_COMM,
141 };
142 
143 /* --sort dso */
144 
145 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
146 {
147 	struct dso *dso_l = map_l ? map_l->dso : NULL;
148 	struct dso *dso_r = map_r ? map_r->dso : NULL;
149 	const char *dso_name_l, *dso_name_r;
150 
151 	if (!dso_l || !dso_r)
152 		return cmp_null(dso_r, dso_l);
153 
154 	if (verbose) {
155 		dso_name_l = dso_l->long_name;
156 		dso_name_r = dso_r->long_name;
157 	} else {
158 		dso_name_l = dso_l->short_name;
159 		dso_name_r = dso_r->short_name;
160 	}
161 
162 	return strcmp(dso_name_l, dso_name_r);
163 }
164 
165 static int64_t
166 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
167 {
168 	return _sort__dso_cmp(right->ms.map, left->ms.map);
169 }
170 
171 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
172 				     size_t size, unsigned int width)
173 {
174 	if (map && map->dso) {
175 		const char *dso_name = !verbose ? map->dso->short_name :
176 			map->dso->long_name;
177 		return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
178 	}
179 
180 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
181 }
182 
183 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
184 				    size_t size, unsigned int width)
185 {
186 	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
187 }
188 
189 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
190 {
191 	const struct dso *dso = arg;
192 
193 	if (type != HIST_FILTER__DSO)
194 		return -1;
195 
196 	return dso && (!he->ms.map || he->ms.map->dso != dso);
197 }
198 
199 struct sort_entry sort_dso = {
200 	.se_header	= "Shared Object",
201 	.se_cmp		= sort__dso_cmp,
202 	.se_snprintf	= hist_entry__dso_snprintf,
203 	.se_filter	= hist_entry__dso_filter,
204 	.se_width_idx	= HISTC_DSO,
205 };
206 
207 /* --sort symbol */
208 
209 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
210 {
211 	return (int64_t)(right_ip - left_ip);
212 }
213 
214 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
215 {
216 	if (!sym_l || !sym_r)
217 		return cmp_null(sym_l, sym_r);
218 
219 	if (sym_l == sym_r)
220 		return 0;
221 
222 	if (sym_l->start != sym_r->start)
223 		return (int64_t)(sym_r->start - sym_l->start);
224 
225 	return (int64_t)(sym_r->end - sym_l->end);
226 }
227 
228 static int64_t
229 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
230 {
231 	int64_t ret;
232 
233 	if (!left->ms.sym && !right->ms.sym)
234 		return _sort__addr_cmp(left->ip, right->ip);
235 
236 	/*
237 	 * comparing symbol address alone is not enough since it's a
238 	 * relative address within a dso.
239 	 */
240 	if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
241 		ret = sort__dso_cmp(left, right);
242 		if (ret != 0)
243 			return ret;
244 	}
245 
246 	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
247 }
248 
249 static int64_t
250 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
251 {
252 	if (!left->ms.sym || !right->ms.sym)
253 		return cmp_null(left->ms.sym, right->ms.sym);
254 
255 	return strcmp(right->ms.sym->name, left->ms.sym->name);
256 }
257 
258 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
259 				     u64 ip, char level, char *bf, size_t size,
260 				     unsigned int width)
261 {
262 	size_t ret = 0;
263 
264 	if (verbose) {
265 		char o = map ? dso__symtab_origin(map->dso) : '!';
266 		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
267 				       BITS_PER_LONG / 4 + 2, ip, o);
268 	}
269 
270 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
271 	if (sym && map) {
272 		if (map->type == MAP__VARIABLE) {
273 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
274 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
275 					ip - map->unmap_ip(map, sym->start));
276 		} else {
277 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
278 					       width - ret,
279 					       sym->name);
280 		}
281 	} else {
282 		size_t len = BITS_PER_LONG / 4;
283 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
284 				       len, ip);
285 	}
286 
287 	return ret;
288 }
289 
290 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
291 				    size_t size, unsigned int width)
292 {
293 	return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
294 					 he->level, bf, size, width);
295 }
296 
297 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
298 {
299 	const char *sym = arg;
300 
301 	if (type != HIST_FILTER__SYMBOL)
302 		return -1;
303 
304 	return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
305 }
306 
307 struct sort_entry sort_sym = {
308 	.se_header	= "Symbol",
309 	.se_cmp		= sort__sym_cmp,
310 	.se_sort	= sort__sym_sort,
311 	.se_snprintf	= hist_entry__sym_snprintf,
312 	.se_filter	= hist_entry__sym_filter,
313 	.se_width_idx	= HISTC_SYMBOL,
314 };
315 
316 /* --sort srcline */
317 
318 static char *hist_entry__get_srcline(struct hist_entry *he)
319 {
320 	struct map *map = he->ms.map;
321 
322 	if (!map)
323 		return SRCLINE_UNKNOWN;
324 
325 	return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
326 			   he->ms.sym, true);
327 }
328 
329 static int64_t
330 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
331 {
332 	if (!left->srcline)
333 		left->srcline = hist_entry__get_srcline(left);
334 	if (!right->srcline)
335 		right->srcline = hist_entry__get_srcline(right);
336 
337 	return strcmp(right->srcline, left->srcline);
338 }
339 
340 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
341 					size_t size, unsigned int width)
342 {
343 	if (!he->srcline)
344 		he->srcline = hist_entry__get_srcline(he);
345 
346 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
347 }
348 
349 struct sort_entry sort_srcline = {
350 	.se_header	= "Source:Line",
351 	.se_cmp		= sort__srcline_cmp,
352 	.se_snprintf	= hist_entry__srcline_snprintf,
353 	.se_width_idx	= HISTC_SRCLINE,
354 };
355 
356 /* --sort srcfile */
357 
358 static char no_srcfile[1];
359 
360 static char *hist_entry__get_srcfile(struct hist_entry *e)
361 {
362 	char *sf, *p;
363 	struct map *map = e->ms.map;
364 
365 	if (!map)
366 		return no_srcfile;
367 
368 	sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
369 			 e->ms.sym, false, true);
370 	if (!strcmp(sf, SRCLINE_UNKNOWN))
371 		return no_srcfile;
372 	p = strchr(sf, ':');
373 	if (p && *sf) {
374 		*p = 0;
375 		return sf;
376 	}
377 	free(sf);
378 	return no_srcfile;
379 }
380 
381 static int64_t
382 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
383 {
384 	if (!left->srcfile)
385 		left->srcfile = hist_entry__get_srcfile(left);
386 	if (!right->srcfile)
387 		right->srcfile = hist_entry__get_srcfile(right);
388 
389 	return strcmp(right->srcfile, left->srcfile);
390 }
391 
392 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
393 					size_t size, unsigned int width)
394 {
395 	if (!he->srcfile)
396 		he->srcfile = hist_entry__get_srcfile(he);
397 
398 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
399 }
400 
401 struct sort_entry sort_srcfile = {
402 	.se_header	= "Source File",
403 	.se_cmp		= sort__srcfile_cmp,
404 	.se_snprintf	= hist_entry__srcfile_snprintf,
405 	.se_width_idx	= HISTC_SRCFILE,
406 };
407 
408 /* --sort parent */
409 
410 static int64_t
411 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
412 {
413 	struct symbol *sym_l = left->parent;
414 	struct symbol *sym_r = right->parent;
415 
416 	if (!sym_l || !sym_r)
417 		return cmp_null(sym_l, sym_r);
418 
419 	return strcmp(sym_r->name, sym_l->name);
420 }
421 
422 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
423 				       size_t size, unsigned int width)
424 {
425 	return repsep_snprintf(bf, size, "%-*.*s", width, width,
426 			      he->parent ? he->parent->name : "[other]");
427 }
428 
429 struct sort_entry sort_parent = {
430 	.se_header	= "Parent symbol",
431 	.se_cmp		= sort__parent_cmp,
432 	.se_snprintf	= hist_entry__parent_snprintf,
433 	.se_width_idx	= HISTC_PARENT,
434 };
435 
436 /* --sort cpu */
437 
438 static int64_t
439 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
440 {
441 	return right->cpu - left->cpu;
442 }
443 
444 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
445 				    size_t size, unsigned int width)
446 {
447 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
448 }
449 
450 struct sort_entry sort_cpu = {
451 	.se_header      = "CPU",
452 	.se_cmp	        = sort__cpu_cmp,
453 	.se_snprintf    = hist_entry__cpu_snprintf,
454 	.se_width_idx	= HISTC_CPU,
455 };
456 
457 /* --sort socket */
458 
459 static int64_t
460 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
461 {
462 	return right->socket - left->socket;
463 }
464 
465 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
466 				    size_t size, unsigned int width)
467 {
468 	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
469 }
470 
471 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
472 {
473 	int sk = *(const int *)arg;
474 
475 	if (type != HIST_FILTER__SOCKET)
476 		return -1;
477 
478 	return sk >= 0 && he->socket != sk;
479 }
480 
481 struct sort_entry sort_socket = {
482 	.se_header      = "Socket",
483 	.se_cmp	        = sort__socket_cmp,
484 	.se_snprintf    = hist_entry__socket_snprintf,
485 	.se_filter      = hist_entry__socket_filter,
486 	.se_width_idx	= HISTC_SOCKET,
487 };
488 
489 /* --sort trace */
490 
491 static char *get_trace_output(struct hist_entry *he)
492 {
493 	struct trace_seq seq;
494 	struct perf_evsel *evsel;
495 	struct pevent_record rec = {
496 		.data = he->raw_data,
497 		.size = he->raw_size,
498 	};
499 
500 	evsel = hists_to_evsel(he->hists);
501 
502 	trace_seq_init(&seq);
503 	if (symbol_conf.raw_trace) {
504 		pevent_print_fields(&seq, he->raw_data, he->raw_size,
505 				    evsel->tp_format);
506 	} else {
507 		pevent_event_info(&seq, evsel->tp_format, &rec);
508 	}
509 	return seq.buffer;
510 }
511 
512 static int64_t
513 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
514 {
515 	struct perf_evsel *evsel;
516 
517 	evsel = hists_to_evsel(left->hists);
518 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
519 		return 0;
520 
521 	if (left->trace_output == NULL)
522 		left->trace_output = get_trace_output(left);
523 	if (right->trace_output == NULL)
524 		right->trace_output = get_trace_output(right);
525 
526 	return strcmp(right->trace_output, left->trace_output);
527 }
528 
529 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
530 				    size_t size, unsigned int width)
531 {
532 	struct perf_evsel *evsel;
533 
534 	evsel = hists_to_evsel(he->hists);
535 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
536 		return scnprintf(bf, size, "%-.*s", width, "N/A");
537 
538 	if (he->trace_output == NULL)
539 		he->trace_output = get_trace_output(he);
540 	return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
541 }
542 
543 struct sort_entry sort_trace = {
544 	.se_header      = "Trace output",
545 	.se_cmp	        = sort__trace_cmp,
546 	.se_snprintf    = hist_entry__trace_snprintf,
547 	.se_width_idx	= HISTC_TRACE,
548 };
549 
550 /* sort keys for branch stacks */
551 
552 static int64_t
553 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
554 {
555 	if (!left->branch_info || !right->branch_info)
556 		return cmp_null(left->branch_info, right->branch_info);
557 
558 	return _sort__dso_cmp(left->branch_info->from.map,
559 			      right->branch_info->from.map);
560 }
561 
562 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
563 				    size_t size, unsigned int width)
564 {
565 	if (he->branch_info)
566 		return _hist_entry__dso_snprintf(he->branch_info->from.map,
567 						 bf, size, width);
568 	else
569 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
570 }
571 
572 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
573 				       const void *arg)
574 {
575 	const struct dso *dso = arg;
576 
577 	if (type != HIST_FILTER__DSO)
578 		return -1;
579 
580 	return dso && (!he->branch_info || !he->branch_info->from.map ||
581 		       he->branch_info->from.map->dso != dso);
582 }
583 
584 static int64_t
585 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
586 {
587 	if (!left->branch_info || !right->branch_info)
588 		return cmp_null(left->branch_info, right->branch_info);
589 
590 	return _sort__dso_cmp(left->branch_info->to.map,
591 			      right->branch_info->to.map);
592 }
593 
594 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
595 				       size_t size, unsigned int width)
596 {
597 	if (he->branch_info)
598 		return _hist_entry__dso_snprintf(he->branch_info->to.map,
599 						 bf, size, width);
600 	else
601 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
602 }
603 
604 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
605 				     const void *arg)
606 {
607 	const struct dso *dso = arg;
608 
609 	if (type != HIST_FILTER__DSO)
610 		return -1;
611 
612 	return dso && (!he->branch_info || !he->branch_info->to.map ||
613 		       he->branch_info->to.map->dso != dso);
614 }
615 
616 static int64_t
617 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
618 {
619 	struct addr_map_symbol *from_l = &left->branch_info->from;
620 	struct addr_map_symbol *from_r = &right->branch_info->from;
621 
622 	if (!left->branch_info || !right->branch_info)
623 		return cmp_null(left->branch_info, right->branch_info);
624 
625 	from_l = &left->branch_info->from;
626 	from_r = &right->branch_info->from;
627 
628 	if (!from_l->sym && !from_r->sym)
629 		return _sort__addr_cmp(from_l->addr, from_r->addr);
630 
631 	return _sort__sym_cmp(from_l->sym, from_r->sym);
632 }
633 
634 static int64_t
635 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
636 {
637 	struct addr_map_symbol *to_l, *to_r;
638 
639 	if (!left->branch_info || !right->branch_info)
640 		return cmp_null(left->branch_info, right->branch_info);
641 
642 	to_l = &left->branch_info->to;
643 	to_r = &right->branch_info->to;
644 
645 	if (!to_l->sym && !to_r->sym)
646 		return _sort__addr_cmp(to_l->addr, to_r->addr);
647 
648 	return _sort__sym_cmp(to_l->sym, to_r->sym);
649 }
650 
651 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
652 					 size_t size, unsigned int width)
653 {
654 	if (he->branch_info) {
655 		struct addr_map_symbol *from = &he->branch_info->from;
656 
657 		return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
658 						 he->level, bf, size, width);
659 	}
660 
661 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
662 }
663 
664 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
665 				       size_t size, unsigned int width)
666 {
667 	if (he->branch_info) {
668 		struct addr_map_symbol *to = &he->branch_info->to;
669 
670 		return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
671 						 he->level, bf, size, width);
672 	}
673 
674 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
675 }
676 
677 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
678 				       const void *arg)
679 {
680 	const char *sym = arg;
681 
682 	if (type != HIST_FILTER__SYMBOL)
683 		return -1;
684 
685 	return sym && !(he->branch_info && he->branch_info->from.sym &&
686 			strstr(he->branch_info->from.sym->name, sym));
687 }
688 
689 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
690 				       const void *arg)
691 {
692 	const char *sym = arg;
693 
694 	if (type != HIST_FILTER__SYMBOL)
695 		return -1;
696 
697 	return sym && !(he->branch_info && he->branch_info->to.sym &&
698 		        strstr(he->branch_info->to.sym->name, sym));
699 }
700 
701 struct sort_entry sort_dso_from = {
702 	.se_header	= "Source Shared Object",
703 	.se_cmp		= sort__dso_from_cmp,
704 	.se_snprintf	= hist_entry__dso_from_snprintf,
705 	.se_filter	= hist_entry__dso_from_filter,
706 	.se_width_idx	= HISTC_DSO_FROM,
707 };
708 
709 struct sort_entry sort_dso_to = {
710 	.se_header	= "Target Shared Object",
711 	.se_cmp		= sort__dso_to_cmp,
712 	.se_snprintf	= hist_entry__dso_to_snprintf,
713 	.se_filter	= hist_entry__dso_to_filter,
714 	.se_width_idx	= HISTC_DSO_TO,
715 };
716 
717 struct sort_entry sort_sym_from = {
718 	.se_header	= "Source Symbol",
719 	.se_cmp		= sort__sym_from_cmp,
720 	.se_snprintf	= hist_entry__sym_from_snprintf,
721 	.se_filter	= hist_entry__sym_from_filter,
722 	.se_width_idx	= HISTC_SYMBOL_FROM,
723 };
724 
725 struct sort_entry sort_sym_to = {
726 	.se_header	= "Target Symbol",
727 	.se_cmp		= sort__sym_to_cmp,
728 	.se_snprintf	= hist_entry__sym_to_snprintf,
729 	.se_filter	= hist_entry__sym_to_filter,
730 	.se_width_idx	= HISTC_SYMBOL_TO,
731 };
732 
733 static int64_t
734 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
735 {
736 	unsigned char mp, p;
737 
738 	if (!left->branch_info || !right->branch_info)
739 		return cmp_null(left->branch_info, right->branch_info);
740 
741 	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
742 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
743 	return mp || p;
744 }
745 
746 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
747 				    size_t size, unsigned int width){
748 	static const char *out = "N/A";
749 
750 	if (he->branch_info) {
751 		if (he->branch_info->flags.predicted)
752 			out = "N";
753 		else if (he->branch_info->flags.mispred)
754 			out = "Y";
755 	}
756 
757 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
758 }
759 
760 static int64_t
761 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
762 {
763 	return left->branch_info->flags.cycles -
764 		right->branch_info->flags.cycles;
765 }
766 
767 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
768 				    size_t size, unsigned int width)
769 {
770 	if (he->branch_info->flags.cycles == 0)
771 		return repsep_snprintf(bf, size, "%-*s", width, "-");
772 	return repsep_snprintf(bf, size, "%-*hd", width,
773 			       he->branch_info->flags.cycles);
774 }
775 
776 struct sort_entry sort_cycles = {
777 	.se_header	= "Basic Block Cycles",
778 	.se_cmp		= sort__cycles_cmp,
779 	.se_snprintf	= hist_entry__cycles_snprintf,
780 	.se_width_idx	= HISTC_CYCLES,
781 };
782 
783 /* --sort daddr_sym */
784 static int64_t
785 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
786 {
787 	uint64_t l = 0, r = 0;
788 
789 	if (left->mem_info)
790 		l = left->mem_info->daddr.addr;
791 	if (right->mem_info)
792 		r = right->mem_info->daddr.addr;
793 
794 	return (int64_t)(r - l);
795 }
796 
797 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
798 				    size_t size, unsigned int width)
799 {
800 	uint64_t addr = 0;
801 	struct map *map = NULL;
802 	struct symbol *sym = NULL;
803 
804 	if (he->mem_info) {
805 		addr = he->mem_info->daddr.addr;
806 		map = he->mem_info->daddr.map;
807 		sym = he->mem_info->daddr.sym;
808 	}
809 	return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
810 					 width);
811 }
812 
813 static int64_t
814 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
815 {
816 	uint64_t l = 0, r = 0;
817 
818 	if (left->mem_info)
819 		l = left->mem_info->iaddr.addr;
820 	if (right->mem_info)
821 		r = right->mem_info->iaddr.addr;
822 
823 	return (int64_t)(r - l);
824 }
825 
826 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
827 				    size_t size, unsigned int width)
828 {
829 	uint64_t addr = 0;
830 	struct map *map = NULL;
831 	struct symbol *sym = NULL;
832 
833 	if (he->mem_info) {
834 		addr = he->mem_info->iaddr.addr;
835 		map  = he->mem_info->iaddr.map;
836 		sym  = he->mem_info->iaddr.sym;
837 	}
838 	return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
839 					 width);
840 }
841 
842 static int64_t
843 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
844 {
845 	struct map *map_l = NULL;
846 	struct map *map_r = NULL;
847 
848 	if (left->mem_info)
849 		map_l = left->mem_info->daddr.map;
850 	if (right->mem_info)
851 		map_r = right->mem_info->daddr.map;
852 
853 	return _sort__dso_cmp(map_l, map_r);
854 }
855 
856 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
857 				    size_t size, unsigned int width)
858 {
859 	struct map *map = NULL;
860 
861 	if (he->mem_info)
862 		map = he->mem_info->daddr.map;
863 
864 	return _hist_entry__dso_snprintf(map, bf, size, width);
865 }
866 
867 static int64_t
868 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
869 {
870 	union perf_mem_data_src data_src_l;
871 	union perf_mem_data_src data_src_r;
872 
873 	if (left->mem_info)
874 		data_src_l = left->mem_info->data_src;
875 	else
876 		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
877 
878 	if (right->mem_info)
879 		data_src_r = right->mem_info->data_src;
880 	else
881 		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
882 
883 	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
884 }
885 
886 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
887 				    size_t size, unsigned int width)
888 {
889 	char out[10];
890 
891 	perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
892 	return repsep_snprintf(bf, size, "%.*s", width, out);
893 }
894 
895 static int64_t
896 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
897 {
898 	union perf_mem_data_src data_src_l;
899 	union perf_mem_data_src data_src_r;
900 
901 	if (left->mem_info)
902 		data_src_l = left->mem_info->data_src;
903 	else
904 		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
905 
906 	if (right->mem_info)
907 		data_src_r = right->mem_info->data_src;
908 	else
909 		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
910 
911 	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
912 }
913 
914 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
915 				    size_t size, unsigned int width)
916 {
917 	char out[64];
918 
919 	perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
920 	return repsep_snprintf(bf, size, "%-*s", width, out);
921 }
922 
923 static int64_t
924 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
925 {
926 	union perf_mem_data_src data_src_l;
927 	union perf_mem_data_src data_src_r;
928 
929 	if (left->mem_info)
930 		data_src_l = left->mem_info->data_src;
931 	else
932 		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
933 
934 	if (right->mem_info)
935 		data_src_r = right->mem_info->data_src;
936 	else
937 		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
938 
939 	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
940 }
941 
942 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
943 				    size_t size, unsigned int width)
944 {
945 	char out[64];
946 
947 	perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
948 	return repsep_snprintf(bf, size, "%-*s", width, out);
949 }
950 
951 static int64_t
952 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
953 {
954 	union perf_mem_data_src data_src_l;
955 	union perf_mem_data_src data_src_r;
956 
957 	if (left->mem_info)
958 		data_src_l = left->mem_info->data_src;
959 	else
960 		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
961 
962 	if (right->mem_info)
963 		data_src_r = right->mem_info->data_src;
964 	else
965 		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
966 
967 	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
968 }
969 
970 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
971 				    size_t size, unsigned int width)
972 {
973 	char out[64];
974 
975 	perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
976 	return repsep_snprintf(bf, size, "%-*s", width, out);
977 }
978 
979 static int64_t
980 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
981 {
982 	u64 l, r;
983 	struct map *l_map, *r_map;
984 
985 	if (!left->mem_info)  return -1;
986 	if (!right->mem_info) return 1;
987 
988 	/* group event types together */
989 	if (left->cpumode > right->cpumode) return -1;
990 	if (left->cpumode < right->cpumode) return 1;
991 
992 	l_map = left->mem_info->daddr.map;
993 	r_map = right->mem_info->daddr.map;
994 
995 	/* if both are NULL, jump to sort on al_addr instead */
996 	if (!l_map && !r_map)
997 		goto addr;
998 
999 	if (!l_map) return -1;
1000 	if (!r_map) return 1;
1001 
1002 	if (l_map->maj > r_map->maj) return -1;
1003 	if (l_map->maj < r_map->maj) return 1;
1004 
1005 	if (l_map->min > r_map->min) return -1;
1006 	if (l_map->min < r_map->min) return 1;
1007 
1008 	if (l_map->ino > r_map->ino) return -1;
1009 	if (l_map->ino < r_map->ino) return 1;
1010 
1011 	if (l_map->ino_generation > r_map->ino_generation) return -1;
1012 	if (l_map->ino_generation < r_map->ino_generation) return 1;
1013 
1014 	/*
1015 	 * Addresses with no major/minor numbers are assumed to be
1016 	 * anonymous in userspace.  Sort those on pid then address.
1017 	 *
1018 	 * The kernel and non-zero major/minor mapped areas are
1019 	 * assumed to be unity mapped.  Sort those on address.
1020 	 */
1021 
1022 	if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1023 	    (!(l_map->flags & MAP_SHARED)) &&
1024 	    !l_map->maj && !l_map->min && !l_map->ino &&
1025 	    !l_map->ino_generation) {
1026 		/* userspace anonymous */
1027 
1028 		if (left->thread->pid_ > right->thread->pid_) return -1;
1029 		if (left->thread->pid_ < right->thread->pid_) return 1;
1030 	}
1031 
1032 addr:
1033 	/* al_addr does all the right addr - start + offset calculations */
1034 	l = cl_address(left->mem_info->daddr.al_addr);
1035 	r = cl_address(right->mem_info->daddr.al_addr);
1036 
1037 	if (l > r) return -1;
1038 	if (l < r) return 1;
1039 
1040 	return 0;
1041 }
1042 
1043 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1044 					  size_t size, unsigned int width)
1045 {
1046 
1047 	uint64_t addr = 0;
1048 	struct map *map = NULL;
1049 	struct symbol *sym = NULL;
1050 	char level = he->level;
1051 
1052 	if (he->mem_info) {
1053 		addr = cl_address(he->mem_info->daddr.al_addr);
1054 		map = he->mem_info->daddr.map;
1055 		sym = he->mem_info->daddr.sym;
1056 
1057 		/* print [s] for shared data mmaps */
1058 		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1059 		     map && (map->type == MAP__VARIABLE) &&
1060 		    (map->flags & MAP_SHARED) &&
1061 		    (map->maj || map->min || map->ino ||
1062 		     map->ino_generation))
1063 			level = 's';
1064 		else if (!map)
1065 			level = 'X';
1066 	}
1067 	return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1068 					 width);
1069 }
1070 
1071 struct sort_entry sort_mispredict = {
1072 	.se_header	= "Branch Mispredicted",
1073 	.se_cmp		= sort__mispredict_cmp,
1074 	.se_snprintf	= hist_entry__mispredict_snprintf,
1075 	.se_width_idx	= HISTC_MISPREDICT,
1076 };
1077 
1078 static u64 he_weight(struct hist_entry *he)
1079 {
1080 	return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1081 }
1082 
1083 static int64_t
1084 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1085 {
1086 	return he_weight(left) - he_weight(right);
1087 }
1088 
1089 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1090 				    size_t size, unsigned int width)
1091 {
1092 	return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1093 }
1094 
1095 struct sort_entry sort_local_weight = {
1096 	.se_header	= "Local Weight",
1097 	.se_cmp		= sort__local_weight_cmp,
1098 	.se_snprintf	= hist_entry__local_weight_snprintf,
1099 	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1100 };
1101 
1102 static int64_t
1103 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1104 {
1105 	return left->stat.weight - right->stat.weight;
1106 }
1107 
1108 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1109 					      size_t size, unsigned int width)
1110 {
1111 	return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1112 }
1113 
1114 struct sort_entry sort_global_weight = {
1115 	.se_header	= "Weight",
1116 	.se_cmp		= sort__global_weight_cmp,
1117 	.se_snprintf	= hist_entry__global_weight_snprintf,
1118 	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1119 };
1120 
1121 struct sort_entry sort_mem_daddr_sym = {
1122 	.se_header	= "Data Symbol",
1123 	.se_cmp		= sort__daddr_cmp,
1124 	.se_snprintf	= hist_entry__daddr_snprintf,
1125 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1126 };
1127 
1128 struct sort_entry sort_mem_iaddr_sym = {
1129 	.se_header	= "Code Symbol",
1130 	.se_cmp		= sort__iaddr_cmp,
1131 	.se_snprintf	= hist_entry__iaddr_snprintf,
1132 	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
1133 };
1134 
1135 struct sort_entry sort_mem_daddr_dso = {
1136 	.se_header	= "Data Object",
1137 	.se_cmp		= sort__dso_daddr_cmp,
1138 	.se_snprintf	= hist_entry__dso_daddr_snprintf,
1139 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1140 };
1141 
1142 struct sort_entry sort_mem_locked = {
1143 	.se_header	= "Locked",
1144 	.se_cmp		= sort__locked_cmp,
1145 	.se_snprintf	= hist_entry__locked_snprintf,
1146 	.se_width_idx	= HISTC_MEM_LOCKED,
1147 };
1148 
1149 struct sort_entry sort_mem_tlb = {
1150 	.se_header	= "TLB access",
1151 	.se_cmp		= sort__tlb_cmp,
1152 	.se_snprintf	= hist_entry__tlb_snprintf,
1153 	.se_width_idx	= HISTC_MEM_TLB,
1154 };
1155 
1156 struct sort_entry sort_mem_lvl = {
1157 	.se_header	= "Memory access",
1158 	.se_cmp		= sort__lvl_cmp,
1159 	.se_snprintf	= hist_entry__lvl_snprintf,
1160 	.se_width_idx	= HISTC_MEM_LVL,
1161 };
1162 
1163 struct sort_entry sort_mem_snoop = {
1164 	.se_header	= "Snoop",
1165 	.se_cmp		= sort__snoop_cmp,
1166 	.se_snprintf	= hist_entry__snoop_snprintf,
1167 	.se_width_idx	= HISTC_MEM_SNOOP,
1168 };
1169 
1170 struct sort_entry sort_mem_dcacheline = {
1171 	.se_header	= "Data Cacheline",
1172 	.se_cmp		= sort__dcacheline_cmp,
1173 	.se_snprintf	= hist_entry__dcacheline_snprintf,
1174 	.se_width_idx	= HISTC_MEM_DCACHELINE,
1175 };
1176 
1177 static int64_t
1178 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1179 {
1180 	if (!left->branch_info || !right->branch_info)
1181 		return cmp_null(left->branch_info, right->branch_info);
1182 
1183 	return left->branch_info->flags.abort !=
1184 		right->branch_info->flags.abort;
1185 }
1186 
1187 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1188 				    size_t size, unsigned int width)
1189 {
1190 	static const char *out = "N/A";
1191 
1192 	if (he->branch_info) {
1193 		if (he->branch_info->flags.abort)
1194 			out = "A";
1195 		else
1196 			out = ".";
1197 	}
1198 
1199 	return repsep_snprintf(bf, size, "%-*s", width, out);
1200 }
1201 
1202 struct sort_entry sort_abort = {
1203 	.se_header	= "Transaction abort",
1204 	.se_cmp		= sort__abort_cmp,
1205 	.se_snprintf	= hist_entry__abort_snprintf,
1206 	.se_width_idx	= HISTC_ABORT,
1207 };
1208 
1209 static int64_t
1210 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1211 {
1212 	if (!left->branch_info || !right->branch_info)
1213 		return cmp_null(left->branch_info, right->branch_info);
1214 
1215 	return left->branch_info->flags.in_tx !=
1216 		right->branch_info->flags.in_tx;
1217 }
1218 
1219 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1220 				    size_t size, unsigned int width)
1221 {
1222 	static const char *out = "N/A";
1223 
1224 	if (he->branch_info) {
1225 		if (he->branch_info->flags.in_tx)
1226 			out = "T";
1227 		else
1228 			out = ".";
1229 	}
1230 
1231 	return repsep_snprintf(bf, size, "%-*s", width, out);
1232 }
1233 
1234 struct sort_entry sort_in_tx = {
1235 	.se_header	= "Branch in transaction",
1236 	.se_cmp		= sort__in_tx_cmp,
1237 	.se_snprintf	= hist_entry__in_tx_snprintf,
1238 	.se_width_idx	= HISTC_IN_TX,
1239 };
1240 
1241 static int64_t
1242 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1243 {
1244 	return left->transaction - right->transaction;
1245 }
1246 
1247 static inline char *add_str(char *p, const char *str)
1248 {
1249 	strcpy(p, str);
1250 	return p + strlen(str);
1251 }
1252 
1253 static struct txbit {
1254 	unsigned flag;
1255 	const char *name;
1256 	int skip_for_len;
1257 } txbits[] = {
1258 	{ PERF_TXN_ELISION,        "EL ",        0 },
1259 	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
1260 	{ PERF_TXN_SYNC,           "SYNC ",      1 },
1261 	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
1262 	{ PERF_TXN_RETRY,          "RETRY ",     0 },
1263 	{ PERF_TXN_CONFLICT,       "CON ",       0 },
1264 	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1265 	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
1266 	{ 0, NULL, 0 }
1267 };
1268 
1269 int hist_entry__transaction_len(void)
1270 {
1271 	int i;
1272 	int len = 0;
1273 
1274 	for (i = 0; txbits[i].name; i++) {
1275 		if (!txbits[i].skip_for_len)
1276 			len += strlen(txbits[i].name);
1277 	}
1278 	len += 4; /* :XX<space> */
1279 	return len;
1280 }
1281 
1282 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1283 					    size_t size, unsigned int width)
1284 {
1285 	u64 t = he->transaction;
1286 	char buf[128];
1287 	char *p = buf;
1288 	int i;
1289 
1290 	buf[0] = 0;
1291 	for (i = 0; txbits[i].name; i++)
1292 		if (txbits[i].flag & t)
1293 			p = add_str(p, txbits[i].name);
1294 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1295 		p = add_str(p, "NEITHER ");
1296 	if (t & PERF_TXN_ABORT_MASK) {
1297 		sprintf(p, ":%" PRIx64,
1298 			(t & PERF_TXN_ABORT_MASK) >>
1299 			PERF_TXN_ABORT_SHIFT);
1300 		p += strlen(p);
1301 	}
1302 
1303 	return repsep_snprintf(bf, size, "%-*s", width, buf);
1304 }
1305 
1306 struct sort_entry sort_transaction = {
1307 	.se_header	= "Transaction                ",
1308 	.se_cmp		= sort__transaction_cmp,
1309 	.se_snprintf	= hist_entry__transaction_snprintf,
1310 	.se_width_idx	= HISTC_TRANSACTION,
1311 };
1312 
1313 struct sort_dimension {
1314 	const char		*name;
1315 	struct sort_entry	*entry;
1316 	int			taken;
1317 };
1318 
1319 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1320 
1321 static struct sort_dimension common_sort_dimensions[] = {
1322 	DIM(SORT_PID, "pid", sort_thread),
1323 	DIM(SORT_COMM, "comm", sort_comm),
1324 	DIM(SORT_DSO, "dso", sort_dso),
1325 	DIM(SORT_SYM, "symbol", sort_sym),
1326 	DIM(SORT_PARENT, "parent", sort_parent),
1327 	DIM(SORT_CPU, "cpu", sort_cpu),
1328 	DIM(SORT_SOCKET, "socket", sort_socket),
1329 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
1330 	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1331 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1332 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1333 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1334 	DIM(SORT_TRACE, "trace", sort_trace),
1335 };
1336 
1337 #undef DIM
1338 
1339 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1340 
1341 static struct sort_dimension bstack_sort_dimensions[] = {
1342 	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1343 	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1344 	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1345 	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1346 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1347 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1348 	DIM(SORT_ABORT, "abort", sort_abort),
1349 	DIM(SORT_CYCLES, "cycles", sort_cycles),
1350 };
1351 
1352 #undef DIM
1353 
1354 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1355 
1356 static struct sort_dimension memory_sort_dimensions[] = {
1357 	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1358 	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1359 	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1360 	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1361 	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1362 	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1363 	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1364 	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1365 };
1366 
1367 #undef DIM
1368 
1369 struct hpp_dimension {
1370 	const char		*name;
1371 	struct perf_hpp_fmt	*fmt;
1372 	int			taken;
1373 };
1374 
1375 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1376 
1377 static struct hpp_dimension hpp_sort_dimensions[] = {
1378 	DIM(PERF_HPP__OVERHEAD, "overhead"),
1379 	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1380 	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1381 	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1382 	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1383 	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1384 	DIM(PERF_HPP__SAMPLES, "sample"),
1385 	DIM(PERF_HPP__PERIOD, "period"),
1386 };
1387 
1388 #undef DIM
1389 
1390 struct hpp_sort_entry {
1391 	struct perf_hpp_fmt hpp;
1392 	struct sort_entry *se;
1393 };
1394 
1395 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1396 {
1397 	struct hpp_sort_entry *hse;
1398 
1399 	if (!perf_hpp__is_sort_entry(fmt))
1400 		return;
1401 
1402 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1403 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1404 }
1405 
1406 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1407 			      struct perf_evsel *evsel)
1408 {
1409 	struct hpp_sort_entry *hse;
1410 	size_t len = fmt->user_len;
1411 
1412 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1413 
1414 	if (!len)
1415 		len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1416 
1417 	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1418 }
1419 
1420 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1421 			     struct perf_hpp *hpp __maybe_unused,
1422 			     struct perf_evsel *evsel)
1423 {
1424 	struct hpp_sort_entry *hse;
1425 	size_t len = fmt->user_len;
1426 
1427 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1428 
1429 	if (!len)
1430 		len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1431 
1432 	return len;
1433 }
1434 
1435 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1436 			     struct hist_entry *he)
1437 {
1438 	struct hpp_sort_entry *hse;
1439 	size_t len = fmt->user_len;
1440 
1441 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1442 
1443 	if (!len)
1444 		len = hists__col_len(he->hists, hse->se->se_width_idx);
1445 
1446 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1447 }
1448 
1449 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1450 			       struct hist_entry *a, struct hist_entry *b)
1451 {
1452 	struct hpp_sort_entry *hse;
1453 
1454 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1455 	return hse->se->se_cmp(a, b);
1456 }
1457 
1458 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1459 				    struct hist_entry *a, struct hist_entry *b)
1460 {
1461 	struct hpp_sort_entry *hse;
1462 	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1463 
1464 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1465 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1466 	return collapse_fn(a, b);
1467 }
1468 
1469 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1470 				struct hist_entry *a, struct hist_entry *b)
1471 {
1472 	struct hpp_sort_entry *hse;
1473 	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1474 
1475 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1476 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1477 	return sort_fn(a, b);
1478 }
1479 
1480 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1481 {
1482 	return format->header == __sort__hpp_header;
1483 }
1484 
1485 #define MK_SORT_ENTRY_CHK(key)					\
1486 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt)	\
1487 {								\
1488 	struct hpp_sort_entry *hse;				\
1489 								\
1490 	if (!perf_hpp__is_sort_entry(fmt))			\
1491 		return false;					\
1492 								\
1493 	hse = container_of(fmt, struct hpp_sort_entry, hpp);	\
1494 	return hse->se == &sort_ ## key ;			\
1495 }
1496 
1497 MK_SORT_ENTRY_CHK(trace)
1498 MK_SORT_ENTRY_CHK(srcline)
1499 MK_SORT_ENTRY_CHK(srcfile)
1500 MK_SORT_ENTRY_CHK(thread)
1501 MK_SORT_ENTRY_CHK(comm)
1502 MK_SORT_ENTRY_CHK(dso)
1503 MK_SORT_ENTRY_CHK(sym)
1504 
1505 
1506 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1507 {
1508 	struct hpp_sort_entry *hse_a;
1509 	struct hpp_sort_entry *hse_b;
1510 
1511 	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1512 		return false;
1513 
1514 	hse_a = container_of(a, struct hpp_sort_entry, hpp);
1515 	hse_b = container_of(b, struct hpp_sort_entry, hpp);
1516 
1517 	return hse_a->se == hse_b->se;
1518 }
1519 
1520 static void hse_free(struct perf_hpp_fmt *fmt)
1521 {
1522 	struct hpp_sort_entry *hse;
1523 
1524 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1525 	free(hse);
1526 }
1527 
1528 static struct hpp_sort_entry *
1529 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1530 {
1531 	struct hpp_sort_entry *hse;
1532 
1533 	hse = malloc(sizeof(*hse));
1534 	if (hse == NULL) {
1535 		pr_err("Memory allocation failed\n");
1536 		return NULL;
1537 	}
1538 
1539 	hse->se = sd->entry;
1540 	hse->hpp.name = sd->entry->se_header;
1541 	hse->hpp.header = __sort__hpp_header;
1542 	hse->hpp.width = __sort__hpp_width;
1543 	hse->hpp.entry = __sort__hpp_entry;
1544 	hse->hpp.color = NULL;
1545 
1546 	hse->hpp.cmp = __sort__hpp_cmp;
1547 	hse->hpp.collapse = __sort__hpp_collapse;
1548 	hse->hpp.sort = __sort__hpp_sort;
1549 	hse->hpp.equal = __sort__hpp_equal;
1550 	hse->hpp.free = hse_free;
1551 
1552 	INIT_LIST_HEAD(&hse->hpp.list);
1553 	INIT_LIST_HEAD(&hse->hpp.sort_list);
1554 	hse->hpp.elide = false;
1555 	hse->hpp.len = 0;
1556 	hse->hpp.user_len = 0;
1557 	hse->hpp.level = level;
1558 
1559 	return hse;
1560 }
1561 
1562 static void hpp_free(struct perf_hpp_fmt *fmt)
1563 {
1564 	free(fmt);
1565 }
1566 
1567 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1568 						       int level)
1569 {
1570 	struct perf_hpp_fmt *fmt;
1571 
1572 	fmt = memdup(hd->fmt, sizeof(*fmt));
1573 	if (fmt) {
1574 		INIT_LIST_HEAD(&fmt->list);
1575 		INIT_LIST_HEAD(&fmt->sort_list);
1576 		fmt->free = hpp_free;
1577 		fmt->level = level;
1578 	}
1579 
1580 	return fmt;
1581 }
1582 
1583 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1584 {
1585 	struct perf_hpp_fmt *fmt;
1586 	struct hpp_sort_entry *hse;
1587 	int ret = -1;
1588 	int r;
1589 
1590 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1591 		if (!perf_hpp__is_sort_entry(fmt))
1592 			continue;
1593 
1594 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
1595 		if (hse->se->se_filter == NULL)
1596 			continue;
1597 
1598 		/*
1599 		 * hist entry is filtered if any of sort key in the hpp list
1600 		 * is applied.  But it should skip non-matched filter types.
1601 		 */
1602 		r = hse->se->se_filter(he, type, arg);
1603 		if (r >= 0) {
1604 			if (ret < 0)
1605 				ret = 0;
1606 			ret |= r;
1607 		}
1608 	}
1609 
1610 	return ret;
1611 }
1612 
1613 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1614 					  struct perf_hpp_list *list,
1615 					  int level)
1616 {
1617 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1618 
1619 	if (hse == NULL)
1620 		return -1;
1621 
1622 	perf_hpp_list__register_sort_field(list, &hse->hpp);
1623 	return 0;
1624 }
1625 
1626 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1627 					    struct perf_hpp_list *list)
1628 {
1629 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1630 
1631 	if (hse == NULL)
1632 		return -1;
1633 
1634 	perf_hpp_list__column_register(list, &hse->hpp);
1635 	return 0;
1636 }
1637 
1638 struct hpp_dynamic_entry {
1639 	struct perf_hpp_fmt hpp;
1640 	struct perf_evsel *evsel;
1641 	struct format_field *field;
1642 	unsigned dynamic_len;
1643 	bool raw_trace;
1644 };
1645 
1646 static int hde_width(struct hpp_dynamic_entry *hde)
1647 {
1648 	if (!hde->hpp.len) {
1649 		int len = hde->dynamic_len;
1650 		int namelen = strlen(hde->field->name);
1651 		int fieldlen = hde->field->size;
1652 
1653 		if (namelen > len)
1654 			len = namelen;
1655 
1656 		if (!(hde->field->flags & FIELD_IS_STRING)) {
1657 			/* length for print hex numbers */
1658 			fieldlen = hde->field->size * 2 + 2;
1659 		}
1660 		if (fieldlen > len)
1661 			len = fieldlen;
1662 
1663 		hde->hpp.len = len;
1664 	}
1665 	return hde->hpp.len;
1666 }
1667 
1668 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1669 			       struct hist_entry *he)
1670 {
1671 	char *str, *pos;
1672 	struct format_field *field = hde->field;
1673 	size_t namelen;
1674 	bool last = false;
1675 
1676 	if (hde->raw_trace)
1677 		return;
1678 
1679 	/* parse pretty print result and update max length */
1680 	if (!he->trace_output)
1681 		he->trace_output = get_trace_output(he);
1682 
1683 	namelen = strlen(field->name);
1684 	str = he->trace_output;
1685 
1686 	while (str) {
1687 		pos = strchr(str, ' ');
1688 		if (pos == NULL) {
1689 			last = true;
1690 			pos = str + strlen(str);
1691 		}
1692 
1693 		if (!strncmp(str, field->name, namelen)) {
1694 			size_t len;
1695 
1696 			str += namelen + 1;
1697 			len = pos - str;
1698 
1699 			if (len > hde->dynamic_len)
1700 				hde->dynamic_len = len;
1701 			break;
1702 		}
1703 
1704 		if (last)
1705 			str = NULL;
1706 		else
1707 			str = pos + 1;
1708 	}
1709 }
1710 
1711 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1712 			      struct perf_evsel *evsel __maybe_unused)
1713 {
1714 	struct hpp_dynamic_entry *hde;
1715 	size_t len = fmt->user_len;
1716 
1717 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1718 
1719 	if (!len)
1720 		len = hde_width(hde);
1721 
1722 	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1723 }
1724 
1725 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1726 			     struct perf_hpp *hpp __maybe_unused,
1727 			     struct perf_evsel *evsel __maybe_unused)
1728 {
1729 	struct hpp_dynamic_entry *hde;
1730 	size_t len = fmt->user_len;
1731 
1732 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1733 
1734 	if (!len)
1735 		len = hde_width(hde);
1736 
1737 	return len;
1738 }
1739 
1740 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1741 {
1742 	struct hpp_dynamic_entry *hde;
1743 
1744 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1745 
1746 	return hists_to_evsel(hists) == hde->evsel;
1747 }
1748 
1749 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1750 			     struct hist_entry *he)
1751 {
1752 	struct hpp_dynamic_entry *hde;
1753 	size_t len = fmt->user_len;
1754 	char *str, *pos;
1755 	struct format_field *field;
1756 	size_t namelen;
1757 	bool last = false;
1758 	int ret;
1759 
1760 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1761 
1762 	if (!len)
1763 		len = hde_width(hde);
1764 
1765 	if (hde->raw_trace)
1766 		goto raw_field;
1767 
1768 	if (!he->trace_output)
1769 		he->trace_output = get_trace_output(he);
1770 
1771 	field = hde->field;
1772 	namelen = strlen(field->name);
1773 	str = he->trace_output;
1774 
1775 	while (str) {
1776 		pos = strchr(str, ' ');
1777 		if (pos == NULL) {
1778 			last = true;
1779 			pos = str + strlen(str);
1780 		}
1781 
1782 		if (!strncmp(str, field->name, namelen)) {
1783 			str += namelen + 1;
1784 			str = strndup(str, pos - str);
1785 
1786 			if (str == NULL)
1787 				return scnprintf(hpp->buf, hpp->size,
1788 						 "%*.*s", len, len, "ERROR");
1789 			break;
1790 		}
1791 
1792 		if (last)
1793 			str = NULL;
1794 		else
1795 			str = pos + 1;
1796 	}
1797 
1798 	if (str == NULL) {
1799 		struct trace_seq seq;
1800 raw_field:
1801 		trace_seq_init(&seq);
1802 		pevent_print_field(&seq, he->raw_data, hde->field);
1803 		str = seq.buffer;
1804 	}
1805 
1806 	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1807 	free(str);
1808 	return ret;
1809 }
1810 
1811 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1812 			       struct hist_entry *a, struct hist_entry *b)
1813 {
1814 	struct hpp_dynamic_entry *hde;
1815 	struct format_field *field;
1816 	unsigned offset, size;
1817 
1818 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1819 
1820 	if (b == NULL) {
1821 		update_dynamic_len(hde, a);
1822 		return 0;
1823 	}
1824 
1825 	field = hde->field;
1826 	if (field->flags & FIELD_IS_DYNAMIC) {
1827 		unsigned long long dyn;
1828 
1829 		pevent_read_number_field(field, a->raw_data, &dyn);
1830 		offset = dyn & 0xffff;
1831 		size = (dyn >> 16) & 0xffff;
1832 
1833 		/* record max width for output */
1834 		if (size > hde->dynamic_len)
1835 			hde->dynamic_len = size;
1836 	} else {
1837 		offset = field->offset;
1838 		size = field->size;
1839 	}
1840 
1841 	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1842 }
1843 
1844 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1845 {
1846 	return fmt->cmp == __sort__hde_cmp;
1847 }
1848 
1849 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1850 {
1851 	struct hpp_dynamic_entry *hde_a;
1852 	struct hpp_dynamic_entry *hde_b;
1853 
1854 	if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
1855 		return false;
1856 
1857 	hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
1858 	hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
1859 
1860 	return hde_a->field == hde_b->field;
1861 }
1862 
1863 static void hde_free(struct perf_hpp_fmt *fmt)
1864 {
1865 	struct hpp_dynamic_entry *hde;
1866 
1867 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1868 	free(hde);
1869 }
1870 
1871 static struct hpp_dynamic_entry *
1872 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
1873 		      int level)
1874 {
1875 	struct hpp_dynamic_entry *hde;
1876 
1877 	hde = malloc(sizeof(*hde));
1878 	if (hde == NULL) {
1879 		pr_debug("Memory allocation failed\n");
1880 		return NULL;
1881 	}
1882 
1883 	hde->evsel = evsel;
1884 	hde->field = field;
1885 	hde->dynamic_len = 0;
1886 
1887 	hde->hpp.name = field->name;
1888 	hde->hpp.header = __sort__hde_header;
1889 	hde->hpp.width  = __sort__hde_width;
1890 	hde->hpp.entry  = __sort__hde_entry;
1891 	hde->hpp.color  = NULL;
1892 
1893 	hde->hpp.cmp = __sort__hde_cmp;
1894 	hde->hpp.collapse = __sort__hde_cmp;
1895 	hde->hpp.sort = __sort__hde_cmp;
1896 	hde->hpp.equal = __sort__hde_equal;
1897 	hde->hpp.free = hde_free;
1898 
1899 	INIT_LIST_HEAD(&hde->hpp.list);
1900 	INIT_LIST_HEAD(&hde->hpp.sort_list);
1901 	hde->hpp.elide = false;
1902 	hde->hpp.len = 0;
1903 	hde->hpp.user_len = 0;
1904 	hde->hpp.level = level;
1905 
1906 	return hde;
1907 }
1908 
1909 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
1910 {
1911 	struct perf_hpp_fmt *new_fmt = NULL;
1912 
1913 	if (perf_hpp__is_sort_entry(fmt)) {
1914 		struct hpp_sort_entry *hse, *new_hse;
1915 
1916 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
1917 		new_hse = memdup(hse, sizeof(*hse));
1918 		if (new_hse)
1919 			new_fmt = &new_hse->hpp;
1920 	} else if (perf_hpp__is_dynamic_entry(fmt)) {
1921 		struct hpp_dynamic_entry *hde, *new_hde;
1922 
1923 		hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1924 		new_hde = memdup(hde, sizeof(*hde));
1925 		if (new_hde)
1926 			new_fmt = &new_hde->hpp;
1927 	} else {
1928 		new_fmt = memdup(fmt, sizeof(*fmt));
1929 	}
1930 
1931 	INIT_LIST_HEAD(&new_fmt->list);
1932 	INIT_LIST_HEAD(&new_fmt->sort_list);
1933 
1934 	return new_fmt;
1935 }
1936 
1937 static int parse_field_name(char *str, char **event, char **field, char **opt)
1938 {
1939 	char *event_name, *field_name, *opt_name;
1940 
1941 	event_name = str;
1942 	field_name = strchr(str, '.');
1943 
1944 	if (field_name) {
1945 		*field_name++ = '\0';
1946 	} else {
1947 		event_name = NULL;
1948 		field_name = str;
1949 	}
1950 
1951 	opt_name = strchr(field_name, '/');
1952 	if (opt_name)
1953 		*opt_name++ = '\0';
1954 
1955 	*event = event_name;
1956 	*field = field_name;
1957 	*opt   = opt_name;
1958 
1959 	return 0;
1960 }
1961 
1962 /* find match evsel using a given event name.  The event name can be:
1963  *   1. '%' + event index (e.g. '%1' for first event)
1964  *   2. full event name (e.g. sched:sched_switch)
1965  *   3. partial event name (should not contain ':')
1966  */
1967 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1968 {
1969 	struct perf_evsel *evsel = NULL;
1970 	struct perf_evsel *pos;
1971 	bool full_name;
1972 
1973 	/* case 1 */
1974 	if (event_name[0] == '%') {
1975 		int nr = strtol(event_name+1, NULL, 0);
1976 
1977 		if (nr > evlist->nr_entries)
1978 			return NULL;
1979 
1980 		evsel = perf_evlist__first(evlist);
1981 		while (--nr > 0)
1982 			evsel = perf_evsel__next(evsel);
1983 
1984 		return evsel;
1985 	}
1986 
1987 	full_name = !!strchr(event_name, ':');
1988 	evlist__for_each(evlist, pos) {
1989 		/* case 2 */
1990 		if (full_name && !strcmp(pos->name, event_name))
1991 			return pos;
1992 		/* case 3 */
1993 		if (!full_name && strstr(pos->name, event_name)) {
1994 			if (evsel) {
1995 				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1996 					 event_name, evsel->name, pos->name);
1997 				return NULL;
1998 			}
1999 			evsel = pos;
2000 		}
2001 	}
2002 
2003 	return evsel;
2004 }
2005 
2006 static int __dynamic_dimension__add(struct perf_evsel *evsel,
2007 				    struct format_field *field,
2008 				    bool raw_trace, int level)
2009 {
2010 	struct hpp_dynamic_entry *hde;
2011 
2012 	hde = __alloc_dynamic_entry(evsel, field, level);
2013 	if (hde == NULL)
2014 		return -ENOMEM;
2015 
2016 	hde->raw_trace = raw_trace;
2017 
2018 	perf_hpp__register_sort_field(&hde->hpp);
2019 	return 0;
2020 }
2021 
2022 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
2023 {
2024 	int ret;
2025 	struct format_field *field;
2026 
2027 	field = evsel->tp_format->format.fields;
2028 	while (field) {
2029 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2030 		if (ret < 0)
2031 			return ret;
2032 
2033 		field = field->next;
2034 	}
2035 	return 0;
2036 }
2037 
2038 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
2039 				  int level)
2040 {
2041 	int ret;
2042 	struct perf_evsel *evsel;
2043 
2044 	evlist__for_each(evlist, evsel) {
2045 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2046 			continue;
2047 
2048 		ret = add_evsel_fields(evsel, raw_trace, level);
2049 		if (ret < 0)
2050 			return ret;
2051 	}
2052 	return 0;
2053 }
2054 
2055 static int add_all_matching_fields(struct perf_evlist *evlist,
2056 				   char *field_name, bool raw_trace, int level)
2057 {
2058 	int ret = -ESRCH;
2059 	struct perf_evsel *evsel;
2060 	struct format_field *field;
2061 
2062 	evlist__for_each(evlist, evsel) {
2063 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2064 			continue;
2065 
2066 		field = pevent_find_any_field(evsel->tp_format, field_name);
2067 		if (field == NULL)
2068 			continue;
2069 
2070 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2071 		if (ret < 0)
2072 			break;
2073 	}
2074 	return ret;
2075 }
2076 
2077 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
2078 			     int level)
2079 {
2080 	char *str, *event_name, *field_name, *opt_name;
2081 	struct perf_evsel *evsel;
2082 	struct format_field *field;
2083 	bool raw_trace = symbol_conf.raw_trace;
2084 	int ret = 0;
2085 
2086 	if (evlist == NULL)
2087 		return -ENOENT;
2088 
2089 	str = strdup(tok);
2090 	if (str == NULL)
2091 		return -ENOMEM;
2092 
2093 	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2094 		ret = -EINVAL;
2095 		goto out;
2096 	}
2097 
2098 	if (opt_name) {
2099 		if (strcmp(opt_name, "raw")) {
2100 			pr_debug("unsupported field option %s\n", opt_name);
2101 			ret = -EINVAL;
2102 			goto out;
2103 		}
2104 		raw_trace = true;
2105 	}
2106 
2107 	if (!strcmp(field_name, "trace_fields")) {
2108 		ret = add_all_dynamic_fields(evlist, raw_trace, level);
2109 		goto out;
2110 	}
2111 
2112 	if (event_name == NULL) {
2113 		ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2114 		goto out;
2115 	}
2116 
2117 	evsel = find_evsel(evlist, event_name);
2118 	if (evsel == NULL) {
2119 		pr_debug("Cannot find event: %s\n", event_name);
2120 		ret = -ENOENT;
2121 		goto out;
2122 	}
2123 
2124 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2125 		pr_debug("%s is not a tracepoint event\n", event_name);
2126 		ret = -EINVAL;
2127 		goto out;
2128 	}
2129 
2130 	if (!strcmp(field_name, "*")) {
2131 		ret = add_evsel_fields(evsel, raw_trace, level);
2132 	} else {
2133 		field = pevent_find_any_field(evsel->tp_format, field_name);
2134 		if (field == NULL) {
2135 			pr_debug("Cannot find event field for %s.%s\n",
2136 				 event_name, field_name);
2137 			return -ENOENT;
2138 		}
2139 
2140 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2141 	}
2142 
2143 out:
2144 	free(str);
2145 	return ret;
2146 }
2147 
2148 static int __sort_dimension__add(struct sort_dimension *sd,
2149 				 struct perf_hpp_list *list,
2150 				 int level)
2151 {
2152 	if (sd->taken)
2153 		return 0;
2154 
2155 	if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2156 		return -1;
2157 
2158 	if (sd->entry->se_collapse)
2159 		list->need_collapse = 1;
2160 
2161 	sd->taken = 1;
2162 
2163 	return 0;
2164 }
2165 
2166 static int __hpp_dimension__add(struct hpp_dimension *hd,
2167 				struct perf_hpp_list *list,
2168 				int level)
2169 {
2170 	struct perf_hpp_fmt *fmt;
2171 
2172 	if (hd->taken)
2173 		return 0;
2174 
2175 	fmt = __hpp_dimension__alloc_hpp(hd, level);
2176 	if (!fmt)
2177 		return -1;
2178 
2179 	hd->taken = 1;
2180 	perf_hpp_list__register_sort_field(list, fmt);
2181 	return 0;
2182 }
2183 
2184 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2185 					struct sort_dimension *sd)
2186 {
2187 	if (sd->taken)
2188 		return 0;
2189 
2190 	if (__sort_dimension__add_hpp_output(sd, list) < 0)
2191 		return -1;
2192 
2193 	sd->taken = 1;
2194 	return 0;
2195 }
2196 
2197 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2198 				       struct hpp_dimension *hd)
2199 {
2200 	struct perf_hpp_fmt *fmt;
2201 
2202 	if (hd->taken)
2203 		return 0;
2204 
2205 	fmt = __hpp_dimension__alloc_hpp(hd, 0);
2206 	if (!fmt)
2207 		return -1;
2208 
2209 	hd->taken = 1;
2210 	perf_hpp_list__column_register(list, fmt);
2211 	return 0;
2212 }
2213 
2214 int hpp_dimension__add_output(unsigned col)
2215 {
2216 	BUG_ON(col >= PERF_HPP__MAX_INDEX);
2217 	return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2218 }
2219 
2220 static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2221 			       struct perf_evlist *evlist,
2222 			       int level)
2223 {
2224 	unsigned int i;
2225 
2226 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2227 		struct sort_dimension *sd = &common_sort_dimensions[i];
2228 
2229 		if (strncasecmp(tok, sd->name, strlen(tok)))
2230 			continue;
2231 
2232 		if (sd->entry == &sort_parent) {
2233 			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2234 			if (ret) {
2235 				char err[BUFSIZ];
2236 
2237 				regerror(ret, &parent_regex, err, sizeof(err));
2238 				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2239 				return -EINVAL;
2240 			}
2241 			list->parent = 1;
2242 		} else if (sd->entry == &sort_sym) {
2243 			list->sym = 1;
2244 			/*
2245 			 * perf diff displays the performance difference amongst
2246 			 * two or more perf.data files. Those files could come
2247 			 * from different binaries. So we should not compare
2248 			 * their ips, but the name of symbol.
2249 			 */
2250 			if (sort__mode == SORT_MODE__DIFF)
2251 				sd->entry->se_collapse = sort__sym_sort;
2252 
2253 		} else if (sd->entry == &sort_dso) {
2254 			list->dso = 1;
2255 		} else if (sd->entry == &sort_socket) {
2256 			list->socket = 1;
2257 		} else if (sd->entry == &sort_thread) {
2258 			list->thread = 1;
2259 		} else if (sd->entry == &sort_comm) {
2260 			list->comm = 1;
2261 		}
2262 
2263 		return __sort_dimension__add(sd, list, level);
2264 	}
2265 
2266 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2267 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2268 
2269 		if (strncasecmp(tok, hd->name, strlen(tok)))
2270 			continue;
2271 
2272 		return __hpp_dimension__add(hd, list, level);
2273 	}
2274 
2275 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2276 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
2277 
2278 		if (strncasecmp(tok, sd->name, strlen(tok)))
2279 			continue;
2280 
2281 		if (sort__mode != SORT_MODE__BRANCH)
2282 			return -EINVAL;
2283 
2284 		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2285 			list->sym = 1;
2286 
2287 		__sort_dimension__add(sd, list, level);
2288 		return 0;
2289 	}
2290 
2291 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2292 		struct sort_dimension *sd = &memory_sort_dimensions[i];
2293 
2294 		if (strncasecmp(tok, sd->name, strlen(tok)))
2295 			continue;
2296 
2297 		if (sort__mode != SORT_MODE__MEMORY)
2298 			return -EINVAL;
2299 
2300 		if (sd->entry == &sort_mem_daddr_sym)
2301 			list->sym = 1;
2302 
2303 		__sort_dimension__add(sd, list, level);
2304 		return 0;
2305 	}
2306 
2307 	if (!add_dynamic_entry(evlist, tok, level))
2308 		return 0;
2309 
2310 	return -ESRCH;
2311 }
2312 
2313 static int setup_sort_list(struct perf_hpp_list *list, char *str,
2314 			   struct perf_evlist *evlist)
2315 {
2316 	char *tmp, *tok;
2317 	int ret = 0;
2318 	int level = 0;
2319 	int next_level = 1;
2320 	bool in_group = false;
2321 
2322 	do {
2323 		tok = str;
2324 		tmp = strpbrk(str, "{}, ");
2325 		if (tmp) {
2326 			if (in_group)
2327 				next_level = level;
2328 			else
2329 				next_level = level + 1;
2330 
2331 			if (*tmp == '{')
2332 				in_group = true;
2333 			else if (*tmp == '}')
2334 				in_group = false;
2335 
2336 			*tmp = '\0';
2337 			str = tmp + 1;
2338 		}
2339 
2340 		if (*tok) {
2341 			ret = sort_dimension__add(list, tok, evlist, level);
2342 			if (ret == -EINVAL) {
2343 				error("Invalid --sort key: `%s'", tok);
2344 				break;
2345 			} else if (ret == -ESRCH) {
2346 				error("Unknown --sort key: `%s'", tok);
2347 				break;
2348 			}
2349 		}
2350 
2351 		level = next_level;
2352 	} while (tmp);
2353 
2354 	return ret;
2355 }
2356 
2357 static const char *get_default_sort_order(struct perf_evlist *evlist)
2358 {
2359 	const char *default_sort_orders[] = {
2360 		default_sort_order,
2361 		default_branch_sort_order,
2362 		default_mem_sort_order,
2363 		default_top_sort_order,
2364 		default_diff_sort_order,
2365 		default_tracepoint_sort_order,
2366 	};
2367 	bool use_trace = true;
2368 	struct perf_evsel *evsel;
2369 
2370 	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2371 
2372 	if (evlist == NULL)
2373 		goto out_no_evlist;
2374 
2375 	evlist__for_each(evlist, evsel) {
2376 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2377 			use_trace = false;
2378 			break;
2379 		}
2380 	}
2381 
2382 	if (use_trace) {
2383 		sort__mode = SORT_MODE__TRACEPOINT;
2384 		if (symbol_conf.raw_trace)
2385 			return "trace_fields";
2386 	}
2387 out_no_evlist:
2388 	return default_sort_orders[sort__mode];
2389 }
2390 
2391 static int setup_sort_order(struct perf_evlist *evlist)
2392 {
2393 	char *new_sort_order;
2394 
2395 	/*
2396 	 * Append '+'-prefixed sort order to the default sort
2397 	 * order string.
2398 	 */
2399 	if (!sort_order || is_strict_order(sort_order))
2400 		return 0;
2401 
2402 	if (sort_order[1] == '\0') {
2403 		error("Invalid --sort key: `+'");
2404 		return -EINVAL;
2405 	}
2406 
2407 	/*
2408 	 * We allocate new sort_order string, but we never free it,
2409 	 * because it's checked over the rest of the code.
2410 	 */
2411 	if (asprintf(&new_sort_order, "%s,%s",
2412 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
2413 		error("Not enough memory to set up --sort");
2414 		return -ENOMEM;
2415 	}
2416 
2417 	sort_order = new_sort_order;
2418 	return 0;
2419 }
2420 
2421 /*
2422  * Adds 'pre,' prefix into 'str' is 'pre' is
2423  * not already part of 'str'.
2424  */
2425 static char *prefix_if_not_in(const char *pre, char *str)
2426 {
2427 	char *n;
2428 
2429 	if (!str || strstr(str, pre))
2430 		return str;
2431 
2432 	if (asprintf(&n, "%s,%s", pre, str) < 0)
2433 		return NULL;
2434 
2435 	free(str);
2436 	return n;
2437 }
2438 
2439 static char *setup_overhead(char *keys)
2440 {
2441 	if (sort__mode == SORT_MODE__DIFF)
2442 		return keys;
2443 
2444 	keys = prefix_if_not_in("overhead", keys);
2445 
2446 	if (symbol_conf.cumulate_callchain)
2447 		keys = prefix_if_not_in("overhead_children", keys);
2448 
2449 	return keys;
2450 }
2451 
2452 static int __setup_sorting(struct perf_evlist *evlist)
2453 {
2454 	char *str;
2455 	const char *sort_keys;
2456 	int ret = 0;
2457 
2458 	ret = setup_sort_order(evlist);
2459 	if (ret)
2460 		return ret;
2461 
2462 	sort_keys = sort_order;
2463 	if (sort_keys == NULL) {
2464 		if (is_strict_order(field_order)) {
2465 			/*
2466 			 * If user specified field order but no sort order,
2467 			 * we'll honor it and not add default sort orders.
2468 			 */
2469 			return 0;
2470 		}
2471 
2472 		sort_keys = get_default_sort_order(evlist);
2473 	}
2474 
2475 	str = strdup(sort_keys);
2476 	if (str == NULL) {
2477 		error("Not enough memory to setup sort keys");
2478 		return -ENOMEM;
2479 	}
2480 
2481 	/*
2482 	 * Prepend overhead fields for backward compatibility.
2483 	 */
2484 	if (!is_strict_order(field_order)) {
2485 		str = setup_overhead(str);
2486 		if (str == NULL) {
2487 			error("Not enough memory to setup overhead keys");
2488 			return -ENOMEM;
2489 		}
2490 	}
2491 
2492 	ret = setup_sort_list(&perf_hpp_list, str, evlist);
2493 
2494 	free(str);
2495 	return ret;
2496 }
2497 
2498 void perf_hpp__set_elide(int idx, bool elide)
2499 {
2500 	struct perf_hpp_fmt *fmt;
2501 	struct hpp_sort_entry *hse;
2502 
2503 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2504 		if (!perf_hpp__is_sort_entry(fmt))
2505 			continue;
2506 
2507 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2508 		if (hse->se->se_width_idx == idx) {
2509 			fmt->elide = elide;
2510 			break;
2511 		}
2512 	}
2513 }
2514 
2515 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2516 {
2517 	if (list && strlist__nr_entries(list) == 1) {
2518 		if (fp != NULL)
2519 			fprintf(fp, "# %s: %s\n", list_name,
2520 				strlist__entry(list, 0)->s);
2521 		return true;
2522 	}
2523 	return false;
2524 }
2525 
2526 static bool get_elide(int idx, FILE *output)
2527 {
2528 	switch (idx) {
2529 	case HISTC_SYMBOL:
2530 		return __get_elide(symbol_conf.sym_list, "symbol", output);
2531 	case HISTC_DSO:
2532 		return __get_elide(symbol_conf.dso_list, "dso", output);
2533 	case HISTC_COMM:
2534 		return __get_elide(symbol_conf.comm_list, "comm", output);
2535 	default:
2536 		break;
2537 	}
2538 
2539 	if (sort__mode != SORT_MODE__BRANCH)
2540 		return false;
2541 
2542 	switch (idx) {
2543 	case HISTC_SYMBOL_FROM:
2544 		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2545 	case HISTC_SYMBOL_TO:
2546 		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2547 	case HISTC_DSO_FROM:
2548 		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2549 	case HISTC_DSO_TO:
2550 		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2551 	default:
2552 		break;
2553 	}
2554 
2555 	return false;
2556 }
2557 
2558 void sort__setup_elide(FILE *output)
2559 {
2560 	struct perf_hpp_fmt *fmt;
2561 	struct hpp_sort_entry *hse;
2562 
2563 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2564 		if (!perf_hpp__is_sort_entry(fmt))
2565 			continue;
2566 
2567 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2568 		fmt->elide = get_elide(hse->se->se_width_idx, output);
2569 	}
2570 
2571 	/*
2572 	 * It makes no sense to elide all of sort entries.
2573 	 * Just revert them to show up again.
2574 	 */
2575 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2576 		if (!perf_hpp__is_sort_entry(fmt))
2577 			continue;
2578 
2579 		if (!fmt->elide)
2580 			return;
2581 	}
2582 
2583 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2584 		if (!perf_hpp__is_sort_entry(fmt))
2585 			continue;
2586 
2587 		fmt->elide = false;
2588 	}
2589 }
2590 
2591 static int output_field_add(struct perf_hpp_list *list, char *tok)
2592 {
2593 	unsigned int i;
2594 
2595 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2596 		struct sort_dimension *sd = &common_sort_dimensions[i];
2597 
2598 		if (strncasecmp(tok, sd->name, strlen(tok)))
2599 			continue;
2600 
2601 		return __sort_dimension__add_output(list, sd);
2602 	}
2603 
2604 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2605 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2606 
2607 		if (strncasecmp(tok, hd->name, strlen(tok)))
2608 			continue;
2609 
2610 		return __hpp_dimension__add_output(list, hd);
2611 	}
2612 
2613 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2614 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
2615 
2616 		if (strncasecmp(tok, sd->name, strlen(tok)))
2617 			continue;
2618 
2619 		return __sort_dimension__add_output(list, sd);
2620 	}
2621 
2622 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2623 		struct sort_dimension *sd = &memory_sort_dimensions[i];
2624 
2625 		if (strncasecmp(tok, sd->name, strlen(tok)))
2626 			continue;
2627 
2628 		return __sort_dimension__add_output(list, sd);
2629 	}
2630 
2631 	return -ESRCH;
2632 }
2633 
2634 static int setup_output_list(struct perf_hpp_list *list, char *str)
2635 {
2636 	char *tmp, *tok;
2637 	int ret = 0;
2638 
2639 	for (tok = strtok_r(str, ", ", &tmp);
2640 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
2641 		ret = output_field_add(list, tok);
2642 		if (ret == -EINVAL) {
2643 			error("Invalid --fields key: `%s'", tok);
2644 			break;
2645 		} else if (ret == -ESRCH) {
2646 			error("Unknown --fields key: `%s'", tok);
2647 			break;
2648 		}
2649 	}
2650 
2651 	return ret;
2652 }
2653 
2654 static void reset_dimensions(void)
2655 {
2656 	unsigned int i;
2657 
2658 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2659 		common_sort_dimensions[i].taken = 0;
2660 
2661 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2662 		hpp_sort_dimensions[i].taken = 0;
2663 
2664 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2665 		bstack_sort_dimensions[i].taken = 0;
2666 
2667 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2668 		memory_sort_dimensions[i].taken = 0;
2669 }
2670 
2671 bool is_strict_order(const char *order)
2672 {
2673 	return order && (*order != '+');
2674 }
2675 
2676 static int __setup_output_field(void)
2677 {
2678 	char *str, *strp;
2679 	int ret = -EINVAL;
2680 
2681 	if (field_order == NULL)
2682 		return 0;
2683 
2684 	strp = str = strdup(field_order);
2685 	if (str == NULL) {
2686 		error("Not enough memory to setup output fields");
2687 		return -ENOMEM;
2688 	}
2689 
2690 	if (!is_strict_order(field_order))
2691 		strp++;
2692 
2693 	if (!strlen(strp)) {
2694 		error("Invalid --fields key: `+'");
2695 		goto out;
2696 	}
2697 
2698 	ret = setup_output_list(&perf_hpp_list, strp);
2699 
2700 out:
2701 	free(str);
2702 	return ret;
2703 }
2704 
2705 int setup_sorting(struct perf_evlist *evlist)
2706 {
2707 	int err;
2708 
2709 	err = __setup_sorting(evlist);
2710 	if (err < 0)
2711 		return err;
2712 
2713 	if (parent_pattern != default_parent_pattern) {
2714 		err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
2715 		if (err < 0)
2716 			return err;
2717 	}
2718 
2719 	reset_dimensions();
2720 
2721 	/*
2722 	 * perf diff doesn't use default hpp output fields.
2723 	 */
2724 	if (sort__mode != SORT_MODE__DIFF)
2725 		perf_hpp__init();
2726 
2727 	err = __setup_output_field();
2728 	if (err < 0)
2729 		return err;
2730 
2731 	/* copy sort keys to output fields */
2732 	perf_hpp__setup_output_field(&perf_hpp_list);
2733 	/* and then copy output fields to sort keys */
2734 	perf_hpp__append_sort_keys(&perf_hpp_list);
2735 
2736 	/* setup hists-specific output fields */
2737 	if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
2738 		return -1;
2739 
2740 	return 0;
2741 }
2742 
2743 void reset_output_field(void)
2744 {
2745 	perf_hpp_list.need_collapse = 0;
2746 	perf_hpp_list.parent = 0;
2747 	perf_hpp_list.sym = 0;
2748 	perf_hpp_list.dso = 0;
2749 
2750 	field_order = NULL;
2751 	sort_order = NULL;
2752 
2753 	reset_dimensions();
2754 	perf_hpp__reset_output_field(&perf_hpp_list);
2755 }
2756