xref: /linux/tools/perf/util/sort.c (revision 877a7a11050ee4d465364c57f8fbf78f6b1a2559)
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9 #include "mem-events.h"
10 #include <linux/kernel.h>
11 
12 regex_t		parent_regex;
13 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
14 const char	*parent_pattern = default_parent_pattern;
15 const char	*default_sort_order = "comm,dso,symbol";
16 const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
17 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
18 const char	default_top_sort_order[] = "dso,symbol";
19 const char	default_diff_sort_order[] = "dso,symbol";
20 const char	default_tracepoint_sort_order[] = "trace";
21 const char	*sort_order;
22 const char	*field_order;
23 regex_t		ignore_callees_regex;
24 int		have_ignore_callees = 0;
25 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
26 
27 /*
28  * Replaces all occurrences of a char used with the:
29  *
30  * -t, --field-separator
31  *
32  * option, that uses a special separator character and don't pad with spaces,
33  * replacing all occurances of this separator in symbol names (and other
34  * output) with a '.' character, that thus it's the only non valid separator.
35 */
36 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
37 {
38 	int n;
39 	va_list ap;
40 
41 	va_start(ap, fmt);
42 	n = vsnprintf(bf, size, fmt, ap);
43 	if (symbol_conf.field_sep && n > 0) {
44 		char *sep = bf;
45 
46 		while (1) {
47 			sep = strchr(sep, *symbol_conf.field_sep);
48 			if (sep == NULL)
49 				break;
50 			*sep = '.';
51 		}
52 	}
53 	va_end(ap);
54 
55 	if (n >= (int)size)
56 		return size - 1;
57 	return n;
58 }
59 
60 static int64_t cmp_null(const void *l, const void *r)
61 {
62 	if (!l && !r)
63 		return 0;
64 	else if (!l)
65 		return -1;
66 	else
67 		return 1;
68 }
69 
70 /* --sort pid */
71 
72 static int64_t
73 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
74 {
75 	return right->thread->tid - left->thread->tid;
76 }
77 
78 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
79 				       size_t size, unsigned int width)
80 {
81 	const char *comm = thread__comm_str(he->thread);
82 
83 	width = max(7U, width) - 8;
84 	return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
85 			       width, width, comm ?: "");
86 }
87 
88 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
89 {
90 	const struct thread *th = arg;
91 
92 	if (type != HIST_FILTER__THREAD)
93 		return -1;
94 
95 	return th && he->thread != th;
96 }
97 
98 struct sort_entry sort_thread = {
99 	.se_header	= "    Pid:Command",
100 	.se_cmp		= sort__thread_cmp,
101 	.se_snprintf	= hist_entry__thread_snprintf,
102 	.se_filter	= hist_entry__thread_filter,
103 	.se_width_idx	= HISTC_THREAD,
104 };
105 
106 /* --sort comm */
107 
108 static int64_t
109 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
110 {
111 	/* Compare the addr that should be unique among comm */
112 	return strcmp(comm__str(right->comm), comm__str(left->comm));
113 }
114 
115 static int64_t
116 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
117 {
118 	/* Compare the addr that should be unique among comm */
119 	return strcmp(comm__str(right->comm), comm__str(left->comm));
120 }
121 
122 static int64_t
123 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
124 {
125 	return strcmp(comm__str(right->comm), comm__str(left->comm));
126 }
127 
128 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
129 				     size_t size, unsigned int width)
130 {
131 	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
132 }
133 
134 struct sort_entry sort_comm = {
135 	.se_header	= "Command",
136 	.se_cmp		= sort__comm_cmp,
137 	.se_collapse	= sort__comm_collapse,
138 	.se_sort	= sort__comm_sort,
139 	.se_snprintf	= hist_entry__comm_snprintf,
140 	.se_filter	= hist_entry__thread_filter,
141 	.se_width_idx	= HISTC_COMM,
142 };
143 
144 /* --sort dso */
145 
146 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
147 {
148 	struct dso *dso_l = map_l ? map_l->dso : NULL;
149 	struct dso *dso_r = map_r ? map_r->dso : NULL;
150 	const char *dso_name_l, *dso_name_r;
151 
152 	if (!dso_l || !dso_r)
153 		return cmp_null(dso_r, dso_l);
154 
155 	if (verbose > 0) {
156 		dso_name_l = dso_l->long_name;
157 		dso_name_r = dso_r->long_name;
158 	} else {
159 		dso_name_l = dso_l->short_name;
160 		dso_name_r = dso_r->short_name;
161 	}
162 
163 	return strcmp(dso_name_l, dso_name_r);
164 }
165 
166 static int64_t
167 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
168 {
169 	return _sort__dso_cmp(right->ms.map, left->ms.map);
170 }
171 
172 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
173 				     size_t size, unsigned int width)
174 {
175 	if (map && map->dso) {
176 		const char *dso_name = verbose > 0 ? map->dso->long_name :
177 			map->dso->short_name;
178 		return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
179 	}
180 
181 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
182 }
183 
184 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
185 				    size_t size, unsigned int width)
186 {
187 	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
188 }
189 
190 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
191 {
192 	const struct dso *dso = arg;
193 
194 	if (type != HIST_FILTER__DSO)
195 		return -1;
196 
197 	return dso && (!he->ms.map || he->ms.map->dso != dso);
198 }
199 
200 struct sort_entry sort_dso = {
201 	.se_header	= "Shared Object",
202 	.se_cmp		= sort__dso_cmp,
203 	.se_snprintf	= hist_entry__dso_snprintf,
204 	.se_filter	= hist_entry__dso_filter,
205 	.se_width_idx	= HISTC_DSO,
206 };
207 
208 /* --sort symbol */
209 
210 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
211 {
212 	return (int64_t)(right_ip - left_ip);
213 }
214 
215 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
216 {
217 	if (!sym_l || !sym_r)
218 		return cmp_null(sym_l, sym_r);
219 
220 	if (sym_l == sym_r)
221 		return 0;
222 
223 	if (sym_l->start != sym_r->start)
224 		return (int64_t)(sym_r->start - sym_l->start);
225 
226 	return (int64_t)(sym_r->end - sym_l->end);
227 }
228 
229 static int64_t
230 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
231 {
232 	int64_t ret;
233 
234 	if (!left->ms.sym && !right->ms.sym)
235 		return _sort__addr_cmp(left->ip, right->ip);
236 
237 	/*
238 	 * comparing symbol address alone is not enough since it's a
239 	 * relative address within a dso.
240 	 */
241 	if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
242 		ret = sort__dso_cmp(left, right);
243 		if (ret != 0)
244 			return ret;
245 	}
246 
247 	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
248 }
249 
250 static int64_t
251 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
252 {
253 	if (!left->ms.sym || !right->ms.sym)
254 		return cmp_null(left->ms.sym, right->ms.sym);
255 
256 	return strcmp(right->ms.sym->name, left->ms.sym->name);
257 }
258 
259 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
260 				     u64 ip, char level, char *bf, size_t size,
261 				     unsigned int width)
262 {
263 	size_t ret = 0;
264 
265 	if (verbose > 0) {
266 		char o = map ? dso__symtab_origin(map->dso) : '!';
267 		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
268 				       BITS_PER_LONG / 4 + 2, ip, o);
269 	}
270 
271 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
272 	if (sym && map) {
273 		if (map->type == MAP__VARIABLE) {
274 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
275 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
276 					ip - map->unmap_ip(map, sym->start));
277 		} else {
278 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
279 					       width - ret,
280 					       sym->name);
281 		}
282 	} else {
283 		size_t len = BITS_PER_LONG / 4;
284 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
285 				       len, ip);
286 	}
287 
288 	return ret;
289 }
290 
291 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
292 				    size_t size, unsigned int width)
293 {
294 	return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
295 					 he->level, bf, size, width);
296 }
297 
298 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
299 {
300 	const char *sym = arg;
301 
302 	if (type != HIST_FILTER__SYMBOL)
303 		return -1;
304 
305 	return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
306 }
307 
308 struct sort_entry sort_sym = {
309 	.se_header	= "Symbol",
310 	.se_cmp		= sort__sym_cmp,
311 	.se_sort	= sort__sym_sort,
312 	.se_snprintf	= hist_entry__sym_snprintf,
313 	.se_filter	= hist_entry__sym_filter,
314 	.se_width_idx	= HISTC_SYMBOL,
315 };
316 
317 /* --sort srcline */
318 
319 char *hist_entry__get_srcline(struct hist_entry *he)
320 {
321 	struct map *map = he->ms.map;
322 
323 	if (!map)
324 		return SRCLINE_UNKNOWN;
325 
326 	return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
327 			   he->ms.sym, true, true);
328 }
329 
330 static int64_t
331 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
332 {
333 	if (!left->srcline)
334 		left->srcline = hist_entry__get_srcline(left);
335 	if (!right->srcline)
336 		right->srcline = hist_entry__get_srcline(right);
337 
338 	return strcmp(right->srcline, left->srcline);
339 }
340 
341 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
342 					size_t size, unsigned int width)
343 {
344 	if (!he->srcline)
345 		he->srcline = hist_entry__get_srcline(he);
346 
347 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
348 }
349 
350 struct sort_entry sort_srcline = {
351 	.se_header	= "Source:Line",
352 	.se_cmp		= sort__srcline_cmp,
353 	.se_snprintf	= hist_entry__srcline_snprintf,
354 	.se_width_idx	= HISTC_SRCLINE,
355 };
356 
357 /* --sort srcline_from */
358 
359 static int64_t
360 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
361 {
362 	if (!left->branch_info->srcline_from) {
363 		struct map *map = left->branch_info->from.map;
364 		if (!map)
365 			left->branch_info->srcline_from = SRCLINE_UNKNOWN;
366 		else
367 			left->branch_info->srcline_from = get_srcline(map->dso,
368 					   map__rip_2objdump(map,
369 							     left->branch_info->from.al_addr),
370 							 left->branch_info->from.sym,
371 							 true, true);
372 	}
373 	if (!right->branch_info->srcline_from) {
374 		struct map *map = right->branch_info->from.map;
375 		if (!map)
376 			right->branch_info->srcline_from = SRCLINE_UNKNOWN;
377 		else
378 			right->branch_info->srcline_from = get_srcline(map->dso,
379 					     map__rip_2objdump(map,
380 							       right->branch_info->from.al_addr),
381 						     right->branch_info->from.sym,
382 						     true, true);
383 	}
384 	return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
385 }
386 
387 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
388 					size_t size, unsigned int width)
389 {
390 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
391 }
392 
393 struct sort_entry sort_srcline_from = {
394 	.se_header	= "From Source:Line",
395 	.se_cmp		= sort__srcline_from_cmp,
396 	.se_snprintf	= hist_entry__srcline_from_snprintf,
397 	.se_width_idx	= HISTC_SRCLINE_FROM,
398 };
399 
400 /* --sort srcline_to */
401 
402 static int64_t
403 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
404 {
405 	if (!left->branch_info->srcline_to) {
406 		struct map *map = left->branch_info->to.map;
407 		if (!map)
408 			left->branch_info->srcline_to = SRCLINE_UNKNOWN;
409 		else
410 			left->branch_info->srcline_to = get_srcline(map->dso,
411 					   map__rip_2objdump(map,
412 							     left->branch_info->to.al_addr),
413 							 left->branch_info->from.sym,
414 							 true, true);
415 	}
416 	if (!right->branch_info->srcline_to) {
417 		struct map *map = right->branch_info->to.map;
418 		if (!map)
419 			right->branch_info->srcline_to = SRCLINE_UNKNOWN;
420 		else
421 			right->branch_info->srcline_to = get_srcline(map->dso,
422 					     map__rip_2objdump(map,
423 							       right->branch_info->to.al_addr),
424 						     right->branch_info->to.sym,
425 						     true, true);
426 	}
427 	return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
428 }
429 
430 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
431 					size_t size, unsigned int width)
432 {
433 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
434 }
435 
436 struct sort_entry sort_srcline_to = {
437 	.se_header	= "To Source:Line",
438 	.se_cmp		= sort__srcline_to_cmp,
439 	.se_snprintf	= hist_entry__srcline_to_snprintf,
440 	.se_width_idx	= HISTC_SRCLINE_TO,
441 };
442 
443 /* --sort srcfile */
444 
445 static char no_srcfile[1];
446 
447 static char *hist_entry__get_srcfile(struct hist_entry *e)
448 {
449 	char *sf, *p;
450 	struct map *map = e->ms.map;
451 
452 	if (!map)
453 		return no_srcfile;
454 
455 	sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
456 			 e->ms.sym, false, true, true);
457 	if (!strcmp(sf, SRCLINE_UNKNOWN))
458 		return no_srcfile;
459 	p = strchr(sf, ':');
460 	if (p && *sf) {
461 		*p = 0;
462 		return sf;
463 	}
464 	free(sf);
465 	return no_srcfile;
466 }
467 
468 static int64_t
469 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
470 {
471 	if (!left->srcfile)
472 		left->srcfile = hist_entry__get_srcfile(left);
473 	if (!right->srcfile)
474 		right->srcfile = hist_entry__get_srcfile(right);
475 
476 	return strcmp(right->srcfile, left->srcfile);
477 }
478 
479 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
480 					size_t size, unsigned int width)
481 {
482 	if (!he->srcfile)
483 		he->srcfile = hist_entry__get_srcfile(he);
484 
485 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
486 }
487 
488 struct sort_entry sort_srcfile = {
489 	.se_header	= "Source File",
490 	.se_cmp		= sort__srcfile_cmp,
491 	.se_snprintf	= hist_entry__srcfile_snprintf,
492 	.se_width_idx	= HISTC_SRCFILE,
493 };
494 
495 /* --sort parent */
496 
497 static int64_t
498 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
499 {
500 	struct symbol *sym_l = left->parent;
501 	struct symbol *sym_r = right->parent;
502 
503 	if (!sym_l || !sym_r)
504 		return cmp_null(sym_l, sym_r);
505 
506 	return strcmp(sym_r->name, sym_l->name);
507 }
508 
509 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
510 				       size_t size, unsigned int width)
511 {
512 	return repsep_snprintf(bf, size, "%-*.*s", width, width,
513 			      he->parent ? he->parent->name : "[other]");
514 }
515 
516 struct sort_entry sort_parent = {
517 	.se_header	= "Parent symbol",
518 	.se_cmp		= sort__parent_cmp,
519 	.se_snprintf	= hist_entry__parent_snprintf,
520 	.se_width_idx	= HISTC_PARENT,
521 };
522 
523 /* --sort cpu */
524 
525 static int64_t
526 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
527 {
528 	return right->cpu - left->cpu;
529 }
530 
531 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
532 				    size_t size, unsigned int width)
533 {
534 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
535 }
536 
537 struct sort_entry sort_cpu = {
538 	.se_header      = "CPU",
539 	.se_cmp	        = sort__cpu_cmp,
540 	.se_snprintf    = hist_entry__cpu_snprintf,
541 	.se_width_idx	= HISTC_CPU,
542 };
543 
544 /* --sort cgroup_id */
545 
546 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
547 {
548 	return (int64_t)(right_dev - left_dev);
549 }
550 
551 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
552 {
553 	return (int64_t)(right_ino - left_ino);
554 }
555 
556 static int64_t
557 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
558 {
559 	int64_t ret;
560 
561 	ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
562 	if (ret != 0)
563 		return ret;
564 
565 	return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
566 				       left->cgroup_id.ino);
567 }
568 
569 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
570 					  char *bf, size_t size,
571 					  unsigned int width __maybe_unused)
572 {
573 	return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
574 			       he->cgroup_id.ino);
575 }
576 
577 struct sort_entry sort_cgroup_id = {
578 	.se_header      = "cgroup id (dev/inode)",
579 	.se_cmp	        = sort__cgroup_id_cmp,
580 	.se_snprintf    = hist_entry__cgroup_id_snprintf,
581 	.se_width_idx	= HISTC_CGROUP_ID,
582 };
583 
584 /* --sort socket */
585 
586 static int64_t
587 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
588 {
589 	return right->socket - left->socket;
590 }
591 
592 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
593 				    size_t size, unsigned int width)
594 {
595 	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
596 }
597 
598 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
599 {
600 	int sk = *(const int *)arg;
601 
602 	if (type != HIST_FILTER__SOCKET)
603 		return -1;
604 
605 	return sk >= 0 && he->socket != sk;
606 }
607 
608 struct sort_entry sort_socket = {
609 	.se_header      = "Socket",
610 	.se_cmp	        = sort__socket_cmp,
611 	.se_snprintf    = hist_entry__socket_snprintf,
612 	.se_filter      = hist_entry__socket_filter,
613 	.se_width_idx	= HISTC_SOCKET,
614 };
615 
616 /* --sort trace */
617 
618 static char *get_trace_output(struct hist_entry *he)
619 {
620 	struct trace_seq seq;
621 	struct perf_evsel *evsel;
622 	struct pevent_record rec = {
623 		.data = he->raw_data,
624 		.size = he->raw_size,
625 	};
626 
627 	evsel = hists_to_evsel(he->hists);
628 
629 	trace_seq_init(&seq);
630 	if (symbol_conf.raw_trace) {
631 		pevent_print_fields(&seq, he->raw_data, he->raw_size,
632 				    evsel->tp_format);
633 	} else {
634 		pevent_event_info(&seq, evsel->tp_format, &rec);
635 	}
636 	/*
637 	 * Trim the buffer, it starts at 4KB and we're not going to
638 	 * add anything more to this buffer.
639 	 */
640 	return realloc(seq.buffer, seq.len + 1);
641 }
642 
643 static int64_t
644 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
645 {
646 	struct perf_evsel *evsel;
647 
648 	evsel = hists_to_evsel(left->hists);
649 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
650 		return 0;
651 
652 	if (left->trace_output == NULL)
653 		left->trace_output = get_trace_output(left);
654 	if (right->trace_output == NULL)
655 		right->trace_output = get_trace_output(right);
656 
657 	return strcmp(right->trace_output, left->trace_output);
658 }
659 
660 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
661 				    size_t size, unsigned int width)
662 {
663 	struct perf_evsel *evsel;
664 
665 	evsel = hists_to_evsel(he->hists);
666 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
667 		return scnprintf(bf, size, "%-.*s", width, "N/A");
668 
669 	if (he->trace_output == NULL)
670 		he->trace_output = get_trace_output(he);
671 	return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
672 }
673 
674 struct sort_entry sort_trace = {
675 	.se_header      = "Trace output",
676 	.se_cmp	        = sort__trace_cmp,
677 	.se_snprintf    = hist_entry__trace_snprintf,
678 	.se_width_idx	= HISTC_TRACE,
679 };
680 
681 /* sort keys for branch stacks */
682 
683 static int64_t
684 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
685 {
686 	if (!left->branch_info || !right->branch_info)
687 		return cmp_null(left->branch_info, right->branch_info);
688 
689 	return _sort__dso_cmp(left->branch_info->from.map,
690 			      right->branch_info->from.map);
691 }
692 
693 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
694 				    size_t size, unsigned int width)
695 {
696 	if (he->branch_info)
697 		return _hist_entry__dso_snprintf(he->branch_info->from.map,
698 						 bf, size, width);
699 	else
700 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
701 }
702 
703 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
704 				       const void *arg)
705 {
706 	const struct dso *dso = arg;
707 
708 	if (type != HIST_FILTER__DSO)
709 		return -1;
710 
711 	return dso && (!he->branch_info || !he->branch_info->from.map ||
712 		       he->branch_info->from.map->dso != dso);
713 }
714 
715 static int64_t
716 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
717 {
718 	if (!left->branch_info || !right->branch_info)
719 		return cmp_null(left->branch_info, right->branch_info);
720 
721 	return _sort__dso_cmp(left->branch_info->to.map,
722 			      right->branch_info->to.map);
723 }
724 
725 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
726 				       size_t size, unsigned int width)
727 {
728 	if (he->branch_info)
729 		return _hist_entry__dso_snprintf(he->branch_info->to.map,
730 						 bf, size, width);
731 	else
732 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
733 }
734 
735 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
736 				     const void *arg)
737 {
738 	const struct dso *dso = arg;
739 
740 	if (type != HIST_FILTER__DSO)
741 		return -1;
742 
743 	return dso && (!he->branch_info || !he->branch_info->to.map ||
744 		       he->branch_info->to.map->dso != dso);
745 }
746 
747 static int64_t
748 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
749 {
750 	struct addr_map_symbol *from_l = &left->branch_info->from;
751 	struct addr_map_symbol *from_r = &right->branch_info->from;
752 
753 	if (!left->branch_info || !right->branch_info)
754 		return cmp_null(left->branch_info, right->branch_info);
755 
756 	from_l = &left->branch_info->from;
757 	from_r = &right->branch_info->from;
758 
759 	if (!from_l->sym && !from_r->sym)
760 		return _sort__addr_cmp(from_l->addr, from_r->addr);
761 
762 	return _sort__sym_cmp(from_l->sym, from_r->sym);
763 }
764 
765 static int64_t
766 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
767 {
768 	struct addr_map_symbol *to_l, *to_r;
769 
770 	if (!left->branch_info || !right->branch_info)
771 		return cmp_null(left->branch_info, right->branch_info);
772 
773 	to_l = &left->branch_info->to;
774 	to_r = &right->branch_info->to;
775 
776 	if (!to_l->sym && !to_r->sym)
777 		return _sort__addr_cmp(to_l->addr, to_r->addr);
778 
779 	return _sort__sym_cmp(to_l->sym, to_r->sym);
780 }
781 
782 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
783 					 size_t size, unsigned int width)
784 {
785 	if (he->branch_info) {
786 		struct addr_map_symbol *from = &he->branch_info->from;
787 
788 		return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
789 						 he->level, bf, size, width);
790 	}
791 
792 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
793 }
794 
795 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
796 				       size_t size, unsigned int width)
797 {
798 	if (he->branch_info) {
799 		struct addr_map_symbol *to = &he->branch_info->to;
800 
801 		return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
802 						 he->level, bf, size, width);
803 	}
804 
805 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
806 }
807 
808 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
809 				       const void *arg)
810 {
811 	const char *sym = arg;
812 
813 	if (type != HIST_FILTER__SYMBOL)
814 		return -1;
815 
816 	return sym && !(he->branch_info && he->branch_info->from.sym &&
817 			strstr(he->branch_info->from.sym->name, sym));
818 }
819 
820 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
821 				       const void *arg)
822 {
823 	const char *sym = arg;
824 
825 	if (type != HIST_FILTER__SYMBOL)
826 		return -1;
827 
828 	return sym && !(he->branch_info && he->branch_info->to.sym &&
829 		        strstr(he->branch_info->to.sym->name, sym));
830 }
831 
832 struct sort_entry sort_dso_from = {
833 	.se_header	= "Source Shared Object",
834 	.se_cmp		= sort__dso_from_cmp,
835 	.se_snprintf	= hist_entry__dso_from_snprintf,
836 	.se_filter	= hist_entry__dso_from_filter,
837 	.se_width_idx	= HISTC_DSO_FROM,
838 };
839 
840 struct sort_entry sort_dso_to = {
841 	.se_header	= "Target Shared Object",
842 	.se_cmp		= sort__dso_to_cmp,
843 	.se_snprintf	= hist_entry__dso_to_snprintf,
844 	.se_filter	= hist_entry__dso_to_filter,
845 	.se_width_idx	= HISTC_DSO_TO,
846 };
847 
848 struct sort_entry sort_sym_from = {
849 	.se_header	= "Source Symbol",
850 	.se_cmp		= sort__sym_from_cmp,
851 	.se_snprintf	= hist_entry__sym_from_snprintf,
852 	.se_filter	= hist_entry__sym_from_filter,
853 	.se_width_idx	= HISTC_SYMBOL_FROM,
854 };
855 
856 struct sort_entry sort_sym_to = {
857 	.se_header	= "Target Symbol",
858 	.se_cmp		= sort__sym_to_cmp,
859 	.se_snprintf	= hist_entry__sym_to_snprintf,
860 	.se_filter	= hist_entry__sym_to_filter,
861 	.se_width_idx	= HISTC_SYMBOL_TO,
862 };
863 
864 static int64_t
865 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
866 {
867 	unsigned char mp, p;
868 
869 	if (!left->branch_info || !right->branch_info)
870 		return cmp_null(left->branch_info, right->branch_info);
871 
872 	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
873 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
874 	return mp || p;
875 }
876 
877 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
878 				    size_t size, unsigned int width){
879 	static const char *out = "N/A";
880 
881 	if (he->branch_info) {
882 		if (he->branch_info->flags.predicted)
883 			out = "N";
884 		else if (he->branch_info->flags.mispred)
885 			out = "Y";
886 	}
887 
888 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
889 }
890 
891 static int64_t
892 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
893 {
894 	if (!left->branch_info || !right->branch_info)
895 		return cmp_null(left->branch_info, right->branch_info);
896 
897 	return left->branch_info->flags.cycles -
898 		right->branch_info->flags.cycles;
899 }
900 
901 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
902 				    size_t size, unsigned int width)
903 {
904 	if (!he->branch_info)
905 		return scnprintf(bf, size, "%-.*s", width, "N/A");
906 	if (he->branch_info->flags.cycles == 0)
907 		return repsep_snprintf(bf, size, "%-*s", width, "-");
908 	return repsep_snprintf(bf, size, "%-*hd", width,
909 			       he->branch_info->flags.cycles);
910 }
911 
912 struct sort_entry sort_cycles = {
913 	.se_header	= "Basic Block Cycles",
914 	.se_cmp		= sort__cycles_cmp,
915 	.se_snprintf	= hist_entry__cycles_snprintf,
916 	.se_width_idx	= HISTC_CYCLES,
917 };
918 
919 /* --sort daddr_sym */
920 int64_t
921 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
922 {
923 	uint64_t l = 0, r = 0;
924 
925 	if (left->mem_info)
926 		l = left->mem_info->daddr.addr;
927 	if (right->mem_info)
928 		r = right->mem_info->daddr.addr;
929 
930 	return (int64_t)(r - l);
931 }
932 
933 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
934 				    size_t size, unsigned int width)
935 {
936 	uint64_t addr = 0;
937 	struct map *map = NULL;
938 	struct symbol *sym = NULL;
939 
940 	if (he->mem_info) {
941 		addr = he->mem_info->daddr.addr;
942 		map = he->mem_info->daddr.map;
943 		sym = he->mem_info->daddr.sym;
944 	}
945 	return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
946 					 width);
947 }
948 
949 int64_t
950 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
951 {
952 	uint64_t l = 0, r = 0;
953 
954 	if (left->mem_info)
955 		l = left->mem_info->iaddr.addr;
956 	if (right->mem_info)
957 		r = right->mem_info->iaddr.addr;
958 
959 	return (int64_t)(r - l);
960 }
961 
962 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
963 				    size_t size, unsigned int width)
964 {
965 	uint64_t addr = 0;
966 	struct map *map = NULL;
967 	struct symbol *sym = NULL;
968 
969 	if (he->mem_info) {
970 		addr = he->mem_info->iaddr.addr;
971 		map  = he->mem_info->iaddr.map;
972 		sym  = he->mem_info->iaddr.sym;
973 	}
974 	return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
975 					 width);
976 }
977 
978 static int64_t
979 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
980 {
981 	struct map *map_l = NULL;
982 	struct map *map_r = NULL;
983 
984 	if (left->mem_info)
985 		map_l = left->mem_info->daddr.map;
986 	if (right->mem_info)
987 		map_r = right->mem_info->daddr.map;
988 
989 	return _sort__dso_cmp(map_l, map_r);
990 }
991 
992 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
993 				    size_t size, unsigned int width)
994 {
995 	struct map *map = NULL;
996 
997 	if (he->mem_info)
998 		map = he->mem_info->daddr.map;
999 
1000 	return _hist_entry__dso_snprintf(map, bf, size, width);
1001 }
1002 
1003 static int64_t
1004 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1005 {
1006 	union perf_mem_data_src data_src_l;
1007 	union perf_mem_data_src data_src_r;
1008 
1009 	if (left->mem_info)
1010 		data_src_l = left->mem_info->data_src;
1011 	else
1012 		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1013 
1014 	if (right->mem_info)
1015 		data_src_r = right->mem_info->data_src;
1016 	else
1017 		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1018 
1019 	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1020 }
1021 
1022 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1023 				    size_t size, unsigned int width)
1024 {
1025 	char out[10];
1026 
1027 	perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1028 	return repsep_snprintf(bf, size, "%.*s", width, out);
1029 }
1030 
1031 static int64_t
1032 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1033 {
1034 	union perf_mem_data_src data_src_l;
1035 	union perf_mem_data_src data_src_r;
1036 
1037 	if (left->mem_info)
1038 		data_src_l = left->mem_info->data_src;
1039 	else
1040 		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1041 
1042 	if (right->mem_info)
1043 		data_src_r = right->mem_info->data_src;
1044 	else
1045 		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1046 
1047 	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1048 }
1049 
1050 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1051 				    size_t size, unsigned int width)
1052 {
1053 	char out[64];
1054 
1055 	perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1056 	return repsep_snprintf(bf, size, "%-*s", width, out);
1057 }
1058 
1059 static int64_t
1060 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1061 {
1062 	union perf_mem_data_src data_src_l;
1063 	union perf_mem_data_src data_src_r;
1064 
1065 	if (left->mem_info)
1066 		data_src_l = left->mem_info->data_src;
1067 	else
1068 		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1069 
1070 	if (right->mem_info)
1071 		data_src_r = right->mem_info->data_src;
1072 	else
1073 		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1074 
1075 	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1076 }
1077 
1078 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1079 				    size_t size, unsigned int width)
1080 {
1081 	char out[64];
1082 
1083 	perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1084 	return repsep_snprintf(bf, size, "%-*s", width, out);
1085 }
1086 
1087 static int64_t
1088 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1089 {
1090 	union perf_mem_data_src data_src_l;
1091 	union perf_mem_data_src data_src_r;
1092 
1093 	if (left->mem_info)
1094 		data_src_l = left->mem_info->data_src;
1095 	else
1096 		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1097 
1098 	if (right->mem_info)
1099 		data_src_r = right->mem_info->data_src;
1100 	else
1101 		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1102 
1103 	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1104 }
1105 
1106 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1107 				    size_t size, unsigned int width)
1108 {
1109 	char out[64];
1110 
1111 	perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1112 	return repsep_snprintf(bf, size, "%-*s", width, out);
1113 }
1114 
1115 int64_t
1116 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1117 {
1118 	u64 l, r;
1119 	struct map *l_map, *r_map;
1120 
1121 	if (!left->mem_info)  return -1;
1122 	if (!right->mem_info) return 1;
1123 
1124 	/* group event types together */
1125 	if (left->cpumode > right->cpumode) return -1;
1126 	if (left->cpumode < right->cpumode) return 1;
1127 
1128 	l_map = left->mem_info->daddr.map;
1129 	r_map = right->mem_info->daddr.map;
1130 
1131 	/* if both are NULL, jump to sort on al_addr instead */
1132 	if (!l_map && !r_map)
1133 		goto addr;
1134 
1135 	if (!l_map) return -1;
1136 	if (!r_map) return 1;
1137 
1138 	if (l_map->maj > r_map->maj) return -1;
1139 	if (l_map->maj < r_map->maj) return 1;
1140 
1141 	if (l_map->min > r_map->min) return -1;
1142 	if (l_map->min < r_map->min) return 1;
1143 
1144 	if (l_map->ino > r_map->ino) return -1;
1145 	if (l_map->ino < r_map->ino) return 1;
1146 
1147 	if (l_map->ino_generation > r_map->ino_generation) return -1;
1148 	if (l_map->ino_generation < r_map->ino_generation) return 1;
1149 
1150 	/*
1151 	 * Addresses with no major/minor numbers are assumed to be
1152 	 * anonymous in userspace.  Sort those on pid then address.
1153 	 *
1154 	 * The kernel and non-zero major/minor mapped areas are
1155 	 * assumed to be unity mapped.  Sort those on address.
1156 	 */
1157 
1158 	if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1159 	    (!(l_map->flags & MAP_SHARED)) &&
1160 	    !l_map->maj && !l_map->min && !l_map->ino &&
1161 	    !l_map->ino_generation) {
1162 		/* userspace anonymous */
1163 
1164 		if (left->thread->pid_ > right->thread->pid_) return -1;
1165 		if (left->thread->pid_ < right->thread->pid_) return 1;
1166 	}
1167 
1168 addr:
1169 	/* al_addr does all the right addr - start + offset calculations */
1170 	l = cl_address(left->mem_info->daddr.al_addr);
1171 	r = cl_address(right->mem_info->daddr.al_addr);
1172 
1173 	if (l > r) return -1;
1174 	if (l < r) return 1;
1175 
1176 	return 0;
1177 }
1178 
1179 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1180 					  size_t size, unsigned int width)
1181 {
1182 
1183 	uint64_t addr = 0;
1184 	struct map *map = NULL;
1185 	struct symbol *sym = NULL;
1186 	char level = he->level;
1187 
1188 	if (he->mem_info) {
1189 		addr = cl_address(he->mem_info->daddr.al_addr);
1190 		map = he->mem_info->daddr.map;
1191 		sym = he->mem_info->daddr.sym;
1192 
1193 		/* print [s] for shared data mmaps */
1194 		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1195 		     map && (map->type == MAP__VARIABLE) &&
1196 		    (map->flags & MAP_SHARED) &&
1197 		    (map->maj || map->min || map->ino ||
1198 		     map->ino_generation))
1199 			level = 's';
1200 		else if (!map)
1201 			level = 'X';
1202 	}
1203 	return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1204 					 width);
1205 }
1206 
1207 struct sort_entry sort_mispredict = {
1208 	.se_header	= "Branch Mispredicted",
1209 	.se_cmp		= sort__mispredict_cmp,
1210 	.se_snprintf	= hist_entry__mispredict_snprintf,
1211 	.se_width_idx	= HISTC_MISPREDICT,
1212 };
1213 
1214 static u64 he_weight(struct hist_entry *he)
1215 {
1216 	return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1217 }
1218 
1219 static int64_t
1220 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1221 {
1222 	return he_weight(left) - he_weight(right);
1223 }
1224 
1225 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1226 				    size_t size, unsigned int width)
1227 {
1228 	return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1229 }
1230 
1231 struct sort_entry sort_local_weight = {
1232 	.se_header	= "Local Weight",
1233 	.se_cmp		= sort__local_weight_cmp,
1234 	.se_snprintf	= hist_entry__local_weight_snprintf,
1235 	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1236 };
1237 
1238 static int64_t
1239 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1240 {
1241 	return left->stat.weight - right->stat.weight;
1242 }
1243 
1244 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1245 					      size_t size, unsigned int width)
1246 {
1247 	return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1248 }
1249 
1250 struct sort_entry sort_global_weight = {
1251 	.se_header	= "Weight",
1252 	.se_cmp		= sort__global_weight_cmp,
1253 	.se_snprintf	= hist_entry__global_weight_snprintf,
1254 	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1255 };
1256 
1257 struct sort_entry sort_mem_daddr_sym = {
1258 	.se_header	= "Data Symbol",
1259 	.se_cmp		= sort__daddr_cmp,
1260 	.se_snprintf	= hist_entry__daddr_snprintf,
1261 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1262 };
1263 
1264 struct sort_entry sort_mem_iaddr_sym = {
1265 	.se_header	= "Code Symbol",
1266 	.se_cmp		= sort__iaddr_cmp,
1267 	.se_snprintf	= hist_entry__iaddr_snprintf,
1268 	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
1269 };
1270 
1271 struct sort_entry sort_mem_daddr_dso = {
1272 	.se_header	= "Data Object",
1273 	.se_cmp		= sort__dso_daddr_cmp,
1274 	.se_snprintf	= hist_entry__dso_daddr_snprintf,
1275 	.se_width_idx	= HISTC_MEM_DADDR_DSO,
1276 };
1277 
1278 struct sort_entry sort_mem_locked = {
1279 	.se_header	= "Locked",
1280 	.se_cmp		= sort__locked_cmp,
1281 	.se_snprintf	= hist_entry__locked_snprintf,
1282 	.se_width_idx	= HISTC_MEM_LOCKED,
1283 };
1284 
1285 struct sort_entry sort_mem_tlb = {
1286 	.se_header	= "TLB access",
1287 	.se_cmp		= sort__tlb_cmp,
1288 	.se_snprintf	= hist_entry__tlb_snprintf,
1289 	.se_width_idx	= HISTC_MEM_TLB,
1290 };
1291 
1292 struct sort_entry sort_mem_lvl = {
1293 	.se_header	= "Memory access",
1294 	.se_cmp		= sort__lvl_cmp,
1295 	.se_snprintf	= hist_entry__lvl_snprintf,
1296 	.se_width_idx	= HISTC_MEM_LVL,
1297 };
1298 
1299 struct sort_entry sort_mem_snoop = {
1300 	.se_header	= "Snoop",
1301 	.se_cmp		= sort__snoop_cmp,
1302 	.se_snprintf	= hist_entry__snoop_snprintf,
1303 	.se_width_idx	= HISTC_MEM_SNOOP,
1304 };
1305 
1306 struct sort_entry sort_mem_dcacheline = {
1307 	.se_header	= "Data Cacheline",
1308 	.se_cmp		= sort__dcacheline_cmp,
1309 	.se_snprintf	= hist_entry__dcacheline_snprintf,
1310 	.se_width_idx	= HISTC_MEM_DCACHELINE,
1311 };
1312 
1313 static int64_t
1314 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1315 {
1316 	if (!left->branch_info || !right->branch_info)
1317 		return cmp_null(left->branch_info, right->branch_info);
1318 
1319 	return left->branch_info->flags.abort !=
1320 		right->branch_info->flags.abort;
1321 }
1322 
1323 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1324 				    size_t size, unsigned int width)
1325 {
1326 	static const char *out = "N/A";
1327 
1328 	if (he->branch_info) {
1329 		if (he->branch_info->flags.abort)
1330 			out = "A";
1331 		else
1332 			out = ".";
1333 	}
1334 
1335 	return repsep_snprintf(bf, size, "%-*s", width, out);
1336 }
1337 
1338 struct sort_entry sort_abort = {
1339 	.se_header	= "Transaction abort",
1340 	.se_cmp		= sort__abort_cmp,
1341 	.se_snprintf	= hist_entry__abort_snprintf,
1342 	.se_width_idx	= HISTC_ABORT,
1343 };
1344 
1345 static int64_t
1346 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1347 {
1348 	if (!left->branch_info || !right->branch_info)
1349 		return cmp_null(left->branch_info, right->branch_info);
1350 
1351 	return left->branch_info->flags.in_tx !=
1352 		right->branch_info->flags.in_tx;
1353 }
1354 
1355 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1356 				    size_t size, unsigned int width)
1357 {
1358 	static const char *out = "N/A";
1359 
1360 	if (he->branch_info) {
1361 		if (he->branch_info->flags.in_tx)
1362 			out = "T";
1363 		else
1364 			out = ".";
1365 	}
1366 
1367 	return repsep_snprintf(bf, size, "%-*s", width, out);
1368 }
1369 
1370 struct sort_entry sort_in_tx = {
1371 	.se_header	= "Branch in transaction",
1372 	.se_cmp		= sort__in_tx_cmp,
1373 	.se_snprintf	= hist_entry__in_tx_snprintf,
1374 	.se_width_idx	= HISTC_IN_TX,
1375 };
1376 
1377 static int64_t
1378 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1379 {
1380 	return left->transaction - right->transaction;
1381 }
1382 
1383 static inline char *add_str(char *p, const char *str)
1384 {
1385 	strcpy(p, str);
1386 	return p + strlen(str);
1387 }
1388 
1389 static struct txbit {
1390 	unsigned flag;
1391 	const char *name;
1392 	int skip_for_len;
1393 } txbits[] = {
1394 	{ PERF_TXN_ELISION,        "EL ",        0 },
1395 	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
1396 	{ PERF_TXN_SYNC,           "SYNC ",      1 },
1397 	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
1398 	{ PERF_TXN_RETRY,          "RETRY ",     0 },
1399 	{ PERF_TXN_CONFLICT,       "CON ",       0 },
1400 	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1401 	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
1402 	{ 0, NULL, 0 }
1403 };
1404 
1405 int hist_entry__transaction_len(void)
1406 {
1407 	int i;
1408 	int len = 0;
1409 
1410 	for (i = 0; txbits[i].name; i++) {
1411 		if (!txbits[i].skip_for_len)
1412 			len += strlen(txbits[i].name);
1413 	}
1414 	len += 4; /* :XX<space> */
1415 	return len;
1416 }
1417 
1418 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1419 					    size_t size, unsigned int width)
1420 {
1421 	u64 t = he->transaction;
1422 	char buf[128];
1423 	char *p = buf;
1424 	int i;
1425 
1426 	buf[0] = 0;
1427 	for (i = 0; txbits[i].name; i++)
1428 		if (txbits[i].flag & t)
1429 			p = add_str(p, txbits[i].name);
1430 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1431 		p = add_str(p, "NEITHER ");
1432 	if (t & PERF_TXN_ABORT_MASK) {
1433 		sprintf(p, ":%" PRIx64,
1434 			(t & PERF_TXN_ABORT_MASK) >>
1435 			PERF_TXN_ABORT_SHIFT);
1436 		p += strlen(p);
1437 	}
1438 
1439 	return repsep_snprintf(bf, size, "%-*s", width, buf);
1440 }
1441 
1442 struct sort_entry sort_transaction = {
1443 	.se_header	= "Transaction                ",
1444 	.se_cmp		= sort__transaction_cmp,
1445 	.se_snprintf	= hist_entry__transaction_snprintf,
1446 	.se_width_idx	= HISTC_TRANSACTION,
1447 };
1448 
1449 /* --sort symbol_size */
1450 
1451 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1452 {
1453 	int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1454 	int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1455 
1456 	return size_l < size_r ? -1 :
1457 		size_l == size_r ? 0 : 1;
1458 }
1459 
1460 static int64_t
1461 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1462 {
1463 	return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1464 }
1465 
1466 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1467 					  size_t bf_size, unsigned int width)
1468 {
1469 	if (sym)
1470 		return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1471 
1472 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1473 }
1474 
1475 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
1476 					 size_t size, unsigned int width)
1477 {
1478 	return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
1479 }
1480 
1481 struct sort_entry sort_sym_size = {
1482 	.se_header	= "Symbol size",
1483 	.se_cmp		= sort__sym_size_cmp,
1484 	.se_snprintf	= hist_entry__sym_size_snprintf,
1485 	.se_width_idx	= HISTC_SYM_SIZE,
1486 };
1487 
1488 
1489 struct sort_dimension {
1490 	const char		*name;
1491 	struct sort_entry	*entry;
1492 	int			taken;
1493 };
1494 
1495 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1496 
1497 static struct sort_dimension common_sort_dimensions[] = {
1498 	DIM(SORT_PID, "pid", sort_thread),
1499 	DIM(SORT_COMM, "comm", sort_comm),
1500 	DIM(SORT_DSO, "dso", sort_dso),
1501 	DIM(SORT_SYM, "symbol", sort_sym),
1502 	DIM(SORT_PARENT, "parent", sort_parent),
1503 	DIM(SORT_CPU, "cpu", sort_cpu),
1504 	DIM(SORT_SOCKET, "socket", sort_socket),
1505 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
1506 	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1507 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1508 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1509 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1510 	DIM(SORT_TRACE, "trace", sort_trace),
1511 	DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
1512 	DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1513 };
1514 
1515 #undef DIM
1516 
1517 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1518 
1519 static struct sort_dimension bstack_sort_dimensions[] = {
1520 	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1521 	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1522 	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1523 	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1524 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1525 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1526 	DIM(SORT_ABORT, "abort", sort_abort),
1527 	DIM(SORT_CYCLES, "cycles", sort_cycles),
1528 	DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1529 	DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1530 };
1531 
1532 #undef DIM
1533 
1534 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1535 
1536 static struct sort_dimension memory_sort_dimensions[] = {
1537 	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1538 	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1539 	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1540 	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1541 	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1542 	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1543 	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1544 	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1545 };
1546 
1547 #undef DIM
1548 
1549 struct hpp_dimension {
1550 	const char		*name;
1551 	struct perf_hpp_fmt	*fmt;
1552 	int			taken;
1553 };
1554 
1555 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1556 
1557 static struct hpp_dimension hpp_sort_dimensions[] = {
1558 	DIM(PERF_HPP__OVERHEAD, "overhead"),
1559 	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1560 	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1561 	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1562 	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1563 	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1564 	DIM(PERF_HPP__SAMPLES, "sample"),
1565 	DIM(PERF_HPP__PERIOD, "period"),
1566 };
1567 
1568 #undef DIM
1569 
1570 struct hpp_sort_entry {
1571 	struct perf_hpp_fmt hpp;
1572 	struct sort_entry *se;
1573 };
1574 
1575 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1576 {
1577 	struct hpp_sort_entry *hse;
1578 
1579 	if (!perf_hpp__is_sort_entry(fmt))
1580 		return;
1581 
1582 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1583 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1584 }
1585 
1586 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1587 			      struct hists *hists, int line __maybe_unused,
1588 			      int *span __maybe_unused)
1589 {
1590 	struct hpp_sort_entry *hse;
1591 	size_t len = fmt->user_len;
1592 
1593 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1594 
1595 	if (!len)
1596 		len = hists__col_len(hists, hse->se->se_width_idx);
1597 
1598 	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1599 }
1600 
1601 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1602 			     struct perf_hpp *hpp __maybe_unused,
1603 			     struct hists *hists)
1604 {
1605 	struct hpp_sort_entry *hse;
1606 	size_t len = fmt->user_len;
1607 
1608 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1609 
1610 	if (!len)
1611 		len = hists__col_len(hists, hse->se->se_width_idx);
1612 
1613 	return len;
1614 }
1615 
1616 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1617 			     struct hist_entry *he)
1618 {
1619 	struct hpp_sort_entry *hse;
1620 	size_t len = fmt->user_len;
1621 
1622 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1623 
1624 	if (!len)
1625 		len = hists__col_len(he->hists, hse->se->se_width_idx);
1626 
1627 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1628 }
1629 
1630 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1631 			       struct hist_entry *a, struct hist_entry *b)
1632 {
1633 	struct hpp_sort_entry *hse;
1634 
1635 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1636 	return hse->se->se_cmp(a, b);
1637 }
1638 
1639 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1640 				    struct hist_entry *a, struct hist_entry *b)
1641 {
1642 	struct hpp_sort_entry *hse;
1643 	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1644 
1645 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1646 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1647 	return collapse_fn(a, b);
1648 }
1649 
1650 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1651 				struct hist_entry *a, struct hist_entry *b)
1652 {
1653 	struct hpp_sort_entry *hse;
1654 	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1655 
1656 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1657 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1658 	return sort_fn(a, b);
1659 }
1660 
1661 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1662 {
1663 	return format->header == __sort__hpp_header;
1664 }
1665 
1666 #define MK_SORT_ENTRY_CHK(key)					\
1667 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt)	\
1668 {								\
1669 	struct hpp_sort_entry *hse;				\
1670 								\
1671 	if (!perf_hpp__is_sort_entry(fmt))			\
1672 		return false;					\
1673 								\
1674 	hse = container_of(fmt, struct hpp_sort_entry, hpp);	\
1675 	return hse->se == &sort_ ## key ;			\
1676 }
1677 
1678 MK_SORT_ENTRY_CHK(trace)
1679 MK_SORT_ENTRY_CHK(srcline)
1680 MK_SORT_ENTRY_CHK(srcfile)
1681 MK_SORT_ENTRY_CHK(thread)
1682 MK_SORT_ENTRY_CHK(comm)
1683 MK_SORT_ENTRY_CHK(dso)
1684 MK_SORT_ENTRY_CHK(sym)
1685 
1686 
1687 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1688 {
1689 	struct hpp_sort_entry *hse_a;
1690 	struct hpp_sort_entry *hse_b;
1691 
1692 	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1693 		return false;
1694 
1695 	hse_a = container_of(a, struct hpp_sort_entry, hpp);
1696 	hse_b = container_of(b, struct hpp_sort_entry, hpp);
1697 
1698 	return hse_a->se == hse_b->se;
1699 }
1700 
1701 static void hse_free(struct perf_hpp_fmt *fmt)
1702 {
1703 	struct hpp_sort_entry *hse;
1704 
1705 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1706 	free(hse);
1707 }
1708 
1709 static struct hpp_sort_entry *
1710 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1711 {
1712 	struct hpp_sort_entry *hse;
1713 
1714 	hse = malloc(sizeof(*hse));
1715 	if (hse == NULL) {
1716 		pr_err("Memory allocation failed\n");
1717 		return NULL;
1718 	}
1719 
1720 	hse->se = sd->entry;
1721 	hse->hpp.name = sd->entry->se_header;
1722 	hse->hpp.header = __sort__hpp_header;
1723 	hse->hpp.width = __sort__hpp_width;
1724 	hse->hpp.entry = __sort__hpp_entry;
1725 	hse->hpp.color = NULL;
1726 
1727 	hse->hpp.cmp = __sort__hpp_cmp;
1728 	hse->hpp.collapse = __sort__hpp_collapse;
1729 	hse->hpp.sort = __sort__hpp_sort;
1730 	hse->hpp.equal = __sort__hpp_equal;
1731 	hse->hpp.free = hse_free;
1732 
1733 	INIT_LIST_HEAD(&hse->hpp.list);
1734 	INIT_LIST_HEAD(&hse->hpp.sort_list);
1735 	hse->hpp.elide = false;
1736 	hse->hpp.len = 0;
1737 	hse->hpp.user_len = 0;
1738 	hse->hpp.level = level;
1739 
1740 	return hse;
1741 }
1742 
1743 static void hpp_free(struct perf_hpp_fmt *fmt)
1744 {
1745 	free(fmt);
1746 }
1747 
1748 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1749 						       int level)
1750 {
1751 	struct perf_hpp_fmt *fmt;
1752 
1753 	fmt = memdup(hd->fmt, sizeof(*fmt));
1754 	if (fmt) {
1755 		INIT_LIST_HEAD(&fmt->list);
1756 		INIT_LIST_HEAD(&fmt->sort_list);
1757 		fmt->free = hpp_free;
1758 		fmt->level = level;
1759 	}
1760 
1761 	return fmt;
1762 }
1763 
1764 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1765 {
1766 	struct perf_hpp_fmt *fmt;
1767 	struct hpp_sort_entry *hse;
1768 	int ret = -1;
1769 	int r;
1770 
1771 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1772 		if (!perf_hpp__is_sort_entry(fmt))
1773 			continue;
1774 
1775 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
1776 		if (hse->se->se_filter == NULL)
1777 			continue;
1778 
1779 		/*
1780 		 * hist entry is filtered if any of sort key in the hpp list
1781 		 * is applied.  But it should skip non-matched filter types.
1782 		 */
1783 		r = hse->se->se_filter(he, type, arg);
1784 		if (r >= 0) {
1785 			if (ret < 0)
1786 				ret = 0;
1787 			ret |= r;
1788 		}
1789 	}
1790 
1791 	return ret;
1792 }
1793 
1794 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1795 					  struct perf_hpp_list *list,
1796 					  int level)
1797 {
1798 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1799 
1800 	if (hse == NULL)
1801 		return -1;
1802 
1803 	perf_hpp_list__register_sort_field(list, &hse->hpp);
1804 	return 0;
1805 }
1806 
1807 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1808 					    struct perf_hpp_list *list)
1809 {
1810 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1811 
1812 	if (hse == NULL)
1813 		return -1;
1814 
1815 	perf_hpp_list__column_register(list, &hse->hpp);
1816 	return 0;
1817 }
1818 
1819 struct hpp_dynamic_entry {
1820 	struct perf_hpp_fmt hpp;
1821 	struct perf_evsel *evsel;
1822 	struct format_field *field;
1823 	unsigned dynamic_len;
1824 	bool raw_trace;
1825 };
1826 
1827 static int hde_width(struct hpp_dynamic_entry *hde)
1828 {
1829 	if (!hde->hpp.len) {
1830 		int len = hde->dynamic_len;
1831 		int namelen = strlen(hde->field->name);
1832 		int fieldlen = hde->field->size;
1833 
1834 		if (namelen > len)
1835 			len = namelen;
1836 
1837 		if (!(hde->field->flags & FIELD_IS_STRING)) {
1838 			/* length for print hex numbers */
1839 			fieldlen = hde->field->size * 2 + 2;
1840 		}
1841 		if (fieldlen > len)
1842 			len = fieldlen;
1843 
1844 		hde->hpp.len = len;
1845 	}
1846 	return hde->hpp.len;
1847 }
1848 
1849 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1850 			       struct hist_entry *he)
1851 {
1852 	char *str, *pos;
1853 	struct format_field *field = hde->field;
1854 	size_t namelen;
1855 	bool last = false;
1856 
1857 	if (hde->raw_trace)
1858 		return;
1859 
1860 	/* parse pretty print result and update max length */
1861 	if (!he->trace_output)
1862 		he->trace_output = get_trace_output(he);
1863 
1864 	namelen = strlen(field->name);
1865 	str = he->trace_output;
1866 
1867 	while (str) {
1868 		pos = strchr(str, ' ');
1869 		if (pos == NULL) {
1870 			last = true;
1871 			pos = str + strlen(str);
1872 		}
1873 
1874 		if (!strncmp(str, field->name, namelen)) {
1875 			size_t len;
1876 
1877 			str += namelen + 1;
1878 			len = pos - str;
1879 
1880 			if (len > hde->dynamic_len)
1881 				hde->dynamic_len = len;
1882 			break;
1883 		}
1884 
1885 		if (last)
1886 			str = NULL;
1887 		else
1888 			str = pos + 1;
1889 	}
1890 }
1891 
1892 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1893 			      struct hists *hists __maybe_unused,
1894 			      int line __maybe_unused,
1895 			      int *span __maybe_unused)
1896 {
1897 	struct hpp_dynamic_entry *hde;
1898 	size_t len = fmt->user_len;
1899 
1900 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1901 
1902 	if (!len)
1903 		len = hde_width(hde);
1904 
1905 	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1906 }
1907 
1908 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1909 			     struct perf_hpp *hpp __maybe_unused,
1910 			     struct hists *hists __maybe_unused)
1911 {
1912 	struct hpp_dynamic_entry *hde;
1913 	size_t len = fmt->user_len;
1914 
1915 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1916 
1917 	if (!len)
1918 		len = hde_width(hde);
1919 
1920 	return len;
1921 }
1922 
1923 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1924 {
1925 	struct hpp_dynamic_entry *hde;
1926 
1927 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1928 
1929 	return hists_to_evsel(hists) == hde->evsel;
1930 }
1931 
1932 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1933 			     struct hist_entry *he)
1934 {
1935 	struct hpp_dynamic_entry *hde;
1936 	size_t len = fmt->user_len;
1937 	char *str, *pos;
1938 	struct format_field *field;
1939 	size_t namelen;
1940 	bool last = false;
1941 	int ret;
1942 
1943 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1944 
1945 	if (!len)
1946 		len = hde_width(hde);
1947 
1948 	if (hde->raw_trace)
1949 		goto raw_field;
1950 
1951 	if (!he->trace_output)
1952 		he->trace_output = get_trace_output(he);
1953 
1954 	field = hde->field;
1955 	namelen = strlen(field->name);
1956 	str = he->trace_output;
1957 
1958 	while (str) {
1959 		pos = strchr(str, ' ');
1960 		if (pos == NULL) {
1961 			last = true;
1962 			pos = str + strlen(str);
1963 		}
1964 
1965 		if (!strncmp(str, field->name, namelen)) {
1966 			str += namelen + 1;
1967 			str = strndup(str, pos - str);
1968 
1969 			if (str == NULL)
1970 				return scnprintf(hpp->buf, hpp->size,
1971 						 "%*.*s", len, len, "ERROR");
1972 			break;
1973 		}
1974 
1975 		if (last)
1976 			str = NULL;
1977 		else
1978 			str = pos + 1;
1979 	}
1980 
1981 	if (str == NULL) {
1982 		struct trace_seq seq;
1983 raw_field:
1984 		trace_seq_init(&seq);
1985 		pevent_print_field(&seq, he->raw_data, hde->field);
1986 		str = seq.buffer;
1987 	}
1988 
1989 	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1990 	free(str);
1991 	return ret;
1992 }
1993 
1994 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1995 			       struct hist_entry *a, struct hist_entry *b)
1996 {
1997 	struct hpp_dynamic_entry *hde;
1998 	struct format_field *field;
1999 	unsigned offset, size;
2000 
2001 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2002 
2003 	if (b == NULL) {
2004 		update_dynamic_len(hde, a);
2005 		return 0;
2006 	}
2007 
2008 	field = hde->field;
2009 	if (field->flags & FIELD_IS_DYNAMIC) {
2010 		unsigned long long dyn;
2011 
2012 		pevent_read_number_field(field, a->raw_data, &dyn);
2013 		offset = dyn & 0xffff;
2014 		size = (dyn >> 16) & 0xffff;
2015 
2016 		/* record max width for output */
2017 		if (size > hde->dynamic_len)
2018 			hde->dynamic_len = size;
2019 	} else {
2020 		offset = field->offset;
2021 		size = field->size;
2022 	}
2023 
2024 	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2025 }
2026 
2027 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2028 {
2029 	return fmt->cmp == __sort__hde_cmp;
2030 }
2031 
2032 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2033 {
2034 	struct hpp_dynamic_entry *hde_a;
2035 	struct hpp_dynamic_entry *hde_b;
2036 
2037 	if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2038 		return false;
2039 
2040 	hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2041 	hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2042 
2043 	return hde_a->field == hde_b->field;
2044 }
2045 
2046 static void hde_free(struct perf_hpp_fmt *fmt)
2047 {
2048 	struct hpp_dynamic_entry *hde;
2049 
2050 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2051 	free(hde);
2052 }
2053 
2054 static struct hpp_dynamic_entry *
2055 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
2056 		      int level)
2057 {
2058 	struct hpp_dynamic_entry *hde;
2059 
2060 	hde = malloc(sizeof(*hde));
2061 	if (hde == NULL) {
2062 		pr_debug("Memory allocation failed\n");
2063 		return NULL;
2064 	}
2065 
2066 	hde->evsel = evsel;
2067 	hde->field = field;
2068 	hde->dynamic_len = 0;
2069 
2070 	hde->hpp.name = field->name;
2071 	hde->hpp.header = __sort__hde_header;
2072 	hde->hpp.width  = __sort__hde_width;
2073 	hde->hpp.entry  = __sort__hde_entry;
2074 	hde->hpp.color  = NULL;
2075 
2076 	hde->hpp.cmp = __sort__hde_cmp;
2077 	hde->hpp.collapse = __sort__hde_cmp;
2078 	hde->hpp.sort = __sort__hde_cmp;
2079 	hde->hpp.equal = __sort__hde_equal;
2080 	hde->hpp.free = hde_free;
2081 
2082 	INIT_LIST_HEAD(&hde->hpp.list);
2083 	INIT_LIST_HEAD(&hde->hpp.sort_list);
2084 	hde->hpp.elide = false;
2085 	hde->hpp.len = 0;
2086 	hde->hpp.user_len = 0;
2087 	hde->hpp.level = level;
2088 
2089 	return hde;
2090 }
2091 
2092 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2093 {
2094 	struct perf_hpp_fmt *new_fmt = NULL;
2095 
2096 	if (perf_hpp__is_sort_entry(fmt)) {
2097 		struct hpp_sort_entry *hse, *new_hse;
2098 
2099 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2100 		new_hse = memdup(hse, sizeof(*hse));
2101 		if (new_hse)
2102 			new_fmt = &new_hse->hpp;
2103 	} else if (perf_hpp__is_dynamic_entry(fmt)) {
2104 		struct hpp_dynamic_entry *hde, *new_hde;
2105 
2106 		hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2107 		new_hde = memdup(hde, sizeof(*hde));
2108 		if (new_hde)
2109 			new_fmt = &new_hde->hpp;
2110 	} else {
2111 		new_fmt = memdup(fmt, sizeof(*fmt));
2112 	}
2113 
2114 	INIT_LIST_HEAD(&new_fmt->list);
2115 	INIT_LIST_HEAD(&new_fmt->sort_list);
2116 
2117 	return new_fmt;
2118 }
2119 
2120 static int parse_field_name(char *str, char **event, char **field, char **opt)
2121 {
2122 	char *event_name, *field_name, *opt_name;
2123 
2124 	event_name = str;
2125 	field_name = strchr(str, '.');
2126 
2127 	if (field_name) {
2128 		*field_name++ = '\0';
2129 	} else {
2130 		event_name = NULL;
2131 		field_name = str;
2132 	}
2133 
2134 	opt_name = strchr(field_name, '/');
2135 	if (opt_name)
2136 		*opt_name++ = '\0';
2137 
2138 	*event = event_name;
2139 	*field = field_name;
2140 	*opt   = opt_name;
2141 
2142 	return 0;
2143 }
2144 
2145 /* find match evsel using a given event name.  The event name can be:
2146  *   1. '%' + event index (e.g. '%1' for first event)
2147  *   2. full event name (e.g. sched:sched_switch)
2148  *   3. partial event name (should not contain ':')
2149  */
2150 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
2151 {
2152 	struct perf_evsel *evsel = NULL;
2153 	struct perf_evsel *pos;
2154 	bool full_name;
2155 
2156 	/* case 1 */
2157 	if (event_name[0] == '%') {
2158 		int nr = strtol(event_name+1, NULL, 0);
2159 
2160 		if (nr > evlist->nr_entries)
2161 			return NULL;
2162 
2163 		evsel = perf_evlist__first(evlist);
2164 		while (--nr > 0)
2165 			evsel = perf_evsel__next(evsel);
2166 
2167 		return evsel;
2168 	}
2169 
2170 	full_name = !!strchr(event_name, ':');
2171 	evlist__for_each_entry(evlist, pos) {
2172 		/* case 2 */
2173 		if (full_name && !strcmp(pos->name, event_name))
2174 			return pos;
2175 		/* case 3 */
2176 		if (!full_name && strstr(pos->name, event_name)) {
2177 			if (evsel) {
2178 				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2179 					 event_name, evsel->name, pos->name);
2180 				return NULL;
2181 			}
2182 			evsel = pos;
2183 		}
2184 	}
2185 
2186 	return evsel;
2187 }
2188 
2189 static int __dynamic_dimension__add(struct perf_evsel *evsel,
2190 				    struct format_field *field,
2191 				    bool raw_trace, int level)
2192 {
2193 	struct hpp_dynamic_entry *hde;
2194 
2195 	hde = __alloc_dynamic_entry(evsel, field, level);
2196 	if (hde == NULL)
2197 		return -ENOMEM;
2198 
2199 	hde->raw_trace = raw_trace;
2200 
2201 	perf_hpp__register_sort_field(&hde->hpp);
2202 	return 0;
2203 }
2204 
2205 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
2206 {
2207 	int ret;
2208 	struct format_field *field;
2209 
2210 	field = evsel->tp_format->format.fields;
2211 	while (field) {
2212 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2213 		if (ret < 0)
2214 			return ret;
2215 
2216 		field = field->next;
2217 	}
2218 	return 0;
2219 }
2220 
2221 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
2222 				  int level)
2223 {
2224 	int ret;
2225 	struct perf_evsel *evsel;
2226 
2227 	evlist__for_each_entry(evlist, evsel) {
2228 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2229 			continue;
2230 
2231 		ret = add_evsel_fields(evsel, raw_trace, level);
2232 		if (ret < 0)
2233 			return ret;
2234 	}
2235 	return 0;
2236 }
2237 
2238 static int add_all_matching_fields(struct perf_evlist *evlist,
2239 				   char *field_name, bool raw_trace, int level)
2240 {
2241 	int ret = -ESRCH;
2242 	struct perf_evsel *evsel;
2243 	struct format_field *field;
2244 
2245 	evlist__for_each_entry(evlist, evsel) {
2246 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2247 			continue;
2248 
2249 		field = pevent_find_any_field(evsel->tp_format, field_name);
2250 		if (field == NULL)
2251 			continue;
2252 
2253 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2254 		if (ret < 0)
2255 			break;
2256 	}
2257 	return ret;
2258 }
2259 
2260 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
2261 			     int level)
2262 {
2263 	char *str, *event_name, *field_name, *opt_name;
2264 	struct perf_evsel *evsel;
2265 	struct format_field *field;
2266 	bool raw_trace = symbol_conf.raw_trace;
2267 	int ret = 0;
2268 
2269 	if (evlist == NULL)
2270 		return -ENOENT;
2271 
2272 	str = strdup(tok);
2273 	if (str == NULL)
2274 		return -ENOMEM;
2275 
2276 	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2277 		ret = -EINVAL;
2278 		goto out;
2279 	}
2280 
2281 	if (opt_name) {
2282 		if (strcmp(opt_name, "raw")) {
2283 			pr_debug("unsupported field option %s\n", opt_name);
2284 			ret = -EINVAL;
2285 			goto out;
2286 		}
2287 		raw_trace = true;
2288 	}
2289 
2290 	if (!strcmp(field_name, "trace_fields")) {
2291 		ret = add_all_dynamic_fields(evlist, raw_trace, level);
2292 		goto out;
2293 	}
2294 
2295 	if (event_name == NULL) {
2296 		ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2297 		goto out;
2298 	}
2299 
2300 	evsel = find_evsel(evlist, event_name);
2301 	if (evsel == NULL) {
2302 		pr_debug("Cannot find event: %s\n", event_name);
2303 		ret = -ENOENT;
2304 		goto out;
2305 	}
2306 
2307 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2308 		pr_debug("%s is not a tracepoint event\n", event_name);
2309 		ret = -EINVAL;
2310 		goto out;
2311 	}
2312 
2313 	if (!strcmp(field_name, "*")) {
2314 		ret = add_evsel_fields(evsel, raw_trace, level);
2315 	} else {
2316 		field = pevent_find_any_field(evsel->tp_format, field_name);
2317 		if (field == NULL) {
2318 			pr_debug("Cannot find event field for %s.%s\n",
2319 				 event_name, field_name);
2320 			return -ENOENT;
2321 		}
2322 
2323 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2324 	}
2325 
2326 out:
2327 	free(str);
2328 	return ret;
2329 }
2330 
2331 static int __sort_dimension__add(struct sort_dimension *sd,
2332 				 struct perf_hpp_list *list,
2333 				 int level)
2334 {
2335 	if (sd->taken)
2336 		return 0;
2337 
2338 	if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2339 		return -1;
2340 
2341 	if (sd->entry->se_collapse)
2342 		list->need_collapse = 1;
2343 
2344 	sd->taken = 1;
2345 
2346 	return 0;
2347 }
2348 
2349 static int __hpp_dimension__add(struct hpp_dimension *hd,
2350 				struct perf_hpp_list *list,
2351 				int level)
2352 {
2353 	struct perf_hpp_fmt *fmt;
2354 
2355 	if (hd->taken)
2356 		return 0;
2357 
2358 	fmt = __hpp_dimension__alloc_hpp(hd, level);
2359 	if (!fmt)
2360 		return -1;
2361 
2362 	hd->taken = 1;
2363 	perf_hpp_list__register_sort_field(list, fmt);
2364 	return 0;
2365 }
2366 
2367 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2368 					struct sort_dimension *sd)
2369 {
2370 	if (sd->taken)
2371 		return 0;
2372 
2373 	if (__sort_dimension__add_hpp_output(sd, list) < 0)
2374 		return -1;
2375 
2376 	sd->taken = 1;
2377 	return 0;
2378 }
2379 
2380 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2381 				       struct hpp_dimension *hd)
2382 {
2383 	struct perf_hpp_fmt *fmt;
2384 
2385 	if (hd->taken)
2386 		return 0;
2387 
2388 	fmt = __hpp_dimension__alloc_hpp(hd, 0);
2389 	if (!fmt)
2390 		return -1;
2391 
2392 	hd->taken = 1;
2393 	perf_hpp_list__column_register(list, fmt);
2394 	return 0;
2395 }
2396 
2397 int hpp_dimension__add_output(unsigned col)
2398 {
2399 	BUG_ON(col >= PERF_HPP__MAX_INDEX);
2400 	return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2401 }
2402 
2403 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2404 			struct perf_evlist *evlist,
2405 			int level)
2406 {
2407 	unsigned int i;
2408 
2409 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2410 		struct sort_dimension *sd = &common_sort_dimensions[i];
2411 
2412 		if (strncasecmp(tok, sd->name, strlen(tok)))
2413 			continue;
2414 
2415 		if (sd->entry == &sort_parent) {
2416 			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2417 			if (ret) {
2418 				char err[BUFSIZ];
2419 
2420 				regerror(ret, &parent_regex, err, sizeof(err));
2421 				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2422 				return -EINVAL;
2423 			}
2424 			list->parent = 1;
2425 		} else if (sd->entry == &sort_sym) {
2426 			list->sym = 1;
2427 			/*
2428 			 * perf diff displays the performance difference amongst
2429 			 * two or more perf.data files. Those files could come
2430 			 * from different binaries. So we should not compare
2431 			 * their ips, but the name of symbol.
2432 			 */
2433 			if (sort__mode == SORT_MODE__DIFF)
2434 				sd->entry->se_collapse = sort__sym_sort;
2435 
2436 		} else if (sd->entry == &sort_dso) {
2437 			list->dso = 1;
2438 		} else if (sd->entry == &sort_socket) {
2439 			list->socket = 1;
2440 		} else if (sd->entry == &sort_thread) {
2441 			list->thread = 1;
2442 		} else if (sd->entry == &sort_comm) {
2443 			list->comm = 1;
2444 		}
2445 
2446 		return __sort_dimension__add(sd, list, level);
2447 	}
2448 
2449 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2450 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2451 
2452 		if (strncasecmp(tok, hd->name, strlen(tok)))
2453 			continue;
2454 
2455 		return __hpp_dimension__add(hd, list, level);
2456 	}
2457 
2458 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2459 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
2460 
2461 		if (strncasecmp(tok, sd->name, strlen(tok)))
2462 			continue;
2463 
2464 		if (sort__mode != SORT_MODE__BRANCH)
2465 			return -EINVAL;
2466 
2467 		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2468 			list->sym = 1;
2469 
2470 		__sort_dimension__add(sd, list, level);
2471 		return 0;
2472 	}
2473 
2474 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2475 		struct sort_dimension *sd = &memory_sort_dimensions[i];
2476 
2477 		if (strncasecmp(tok, sd->name, strlen(tok)))
2478 			continue;
2479 
2480 		if (sort__mode != SORT_MODE__MEMORY)
2481 			return -EINVAL;
2482 
2483 		if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0)
2484 			return -EINVAL;
2485 
2486 		if (sd->entry == &sort_mem_daddr_sym)
2487 			list->sym = 1;
2488 
2489 		__sort_dimension__add(sd, list, level);
2490 		return 0;
2491 	}
2492 
2493 	if (!add_dynamic_entry(evlist, tok, level))
2494 		return 0;
2495 
2496 	return -ESRCH;
2497 }
2498 
2499 static int setup_sort_list(struct perf_hpp_list *list, char *str,
2500 			   struct perf_evlist *evlist)
2501 {
2502 	char *tmp, *tok;
2503 	int ret = 0;
2504 	int level = 0;
2505 	int next_level = 1;
2506 	bool in_group = false;
2507 
2508 	do {
2509 		tok = str;
2510 		tmp = strpbrk(str, "{}, ");
2511 		if (tmp) {
2512 			if (in_group)
2513 				next_level = level;
2514 			else
2515 				next_level = level + 1;
2516 
2517 			if (*tmp == '{')
2518 				in_group = true;
2519 			else if (*tmp == '}')
2520 				in_group = false;
2521 
2522 			*tmp = '\0';
2523 			str = tmp + 1;
2524 		}
2525 
2526 		if (*tok) {
2527 			ret = sort_dimension__add(list, tok, evlist, level);
2528 			if (ret == -EINVAL) {
2529 				if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok)))
2530 					error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
2531 				else
2532 					error("Invalid --sort key: `%s'", tok);
2533 				break;
2534 			} else if (ret == -ESRCH) {
2535 				error("Unknown --sort key: `%s'", tok);
2536 				break;
2537 			}
2538 		}
2539 
2540 		level = next_level;
2541 	} while (tmp);
2542 
2543 	return ret;
2544 }
2545 
2546 static const char *get_default_sort_order(struct perf_evlist *evlist)
2547 {
2548 	const char *default_sort_orders[] = {
2549 		default_sort_order,
2550 		default_branch_sort_order,
2551 		default_mem_sort_order,
2552 		default_top_sort_order,
2553 		default_diff_sort_order,
2554 		default_tracepoint_sort_order,
2555 	};
2556 	bool use_trace = true;
2557 	struct perf_evsel *evsel;
2558 
2559 	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2560 
2561 	if (evlist == NULL)
2562 		goto out_no_evlist;
2563 
2564 	evlist__for_each_entry(evlist, evsel) {
2565 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2566 			use_trace = false;
2567 			break;
2568 		}
2569 	}
2570 
2571 	if (use_trace) {
2572 		sort__mode = SORT_MODE__TRACEPOINT;
2573 		if (symbol_conf.raw_trace)
2574 			return "trace_fields";
2575 	}
2576 out_no_evlist:
2577 	return default_sort_orders[sort__mode];
2578 }
2579 
2580 static int setup_sort_order(struct perf_evlist *evlist)
2581 {
2582 	char *new_sort_order;
2583 
2584 	/*
2585 	 * Append '+'-prefixed sort order to the default sort
2586 	 * order string.
2587 	 */
2588 	if (!sort_order || is_strict_order(sort_order))
2589 		return 0;
2590 
2591 	if (sort_order[1] == '\0') {
2592 		error("Invalid --sort key: `+'");
2593 		return -EINVAL;
2594 	}
2595 
2596 	/*
2597 	 * We allocate new sort_order string, but we never free it,
2598 	 * because it's checked over the rest of the code.
2599 	 */
2600 	if (asprintf(&new_sort_order, "%s,%s",
2601 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
2602 		error("Not enough memory to set up --sort");
2603 		return -ENOMEM;
2604 	}
2605 
2606 	sort_order = new_sort_order;
2607 	return 0;
2608 }
2609 
2610 /*
2611  * Adds 'pre,' prefix into 'str' is 'pre' is
2612  * not already part of 'str'.
2613  */
2614 static char *prefix_if_not_in(const char *pre, char *str)
2615 {
2616 	char *n;
2617 
2618 	if (!str || strstr(str, pre))
2619 		return str;
2620 
2621 	if (asprintf(&n, "%s,%s", pre, str) < 0)
2622 		return NULL;
2623 
2624 	free(str);
2625 	return n;
2626 }
2627 
2628 static char *setup_overhead(char *keys)
2629 {
2630 	if (sort__mode == SORT_MODE__DIFF)
2631 		return keys;
2632 
2633 	keys = prefix_if_not_in("overhead", keys);
2634 
2635 	if (symbol_conf.cumulate_callchain)
2636 		keys = prefix_if_not_in("overhead_children", keys);
2637 
2638 	return keys;
2639 }
2640 
2641 static int __setup_sorting(struct perf_evlist *evlist)
2642 {
2643 	char *str;
2644 	const char *sort_keys;
2645 	int ret = 0;
2646 
2647 	ret = setup_sort_order(evlist);
2648 	if (ret)
2649 		return ret;
2650 
2651 	sort_keys = sort_order;
2652 	if (sort_keys == NULL) {
2653 		if (is_strict_order(field_order)) {
2654 			/*
2655 			 * If user specified field order but no sort order,
2656 			 * we'll honor it and not add default sort orders.
2657 			 */
2658 			return 0;
2659 		}
2660 
2661 		sort_keys = get_default_sort_order(evlist);
2662 	}
2663 
2664 	str = strdup(sort_keys);
2665 	if (str == NULL) {
2666 		error("Not enough memory to setup sort keys");
2667 		return -ENOMEM;
2668 	}
2669 
2670 	/*
2671 	 * Prepend overhead fields for backward compatibility.
2672 	 */
2673 	if (!is_strict_order(field_order)) {
2674 		str = setup_overhead(str);
2675 		if (str == NULL) {
2676 			error("Not enough memory to setup overhead keys");
2677 			return -ENOMEM;
2678 		}
2679 	}
2680 
2681 	ret = setup_sort_list(&perf_hpp_list, str, evlist);
2682 
2683 	free(str);
2684 	return ret;
2685 }
2686 
2687 void perf_hpp__set_elide(int idx, bool elide)
2688 {
2689 	struct perf_hpp_fmt *fmt;
2690 	struct hpp_sort_entry *hse;
2691 
2692 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2693 		if (!perf_hpp__is_sort_entry(fmt))
2694 			continue;
2695 
2696 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2697 		if (hse->se->se_width_idx == idx) {
2698 			fmt->elide = elide;
2699 			break;
2700 		}
2701 	}
2702 }
2703 
2704 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2705 {
2706 	if (list && strlist__nr_entries(list) == 1) {
2707 		if (fp != NULL)
2708 			fprintf(fp, "# %s: %s\n", list_name,
2709 				strlist__entry(list, 0)->s);
2710 		return true;
2711 	}
2712 	return false;
2713 }
2714 
2715 static bool get_elide(int idx, FILE *output)
2716 {
2717 	switch (idx) {
2718 	case HISTC_SYMBOL:
2719 		return __get_elide(symbol_conf.sym_list, "symbol", output);
2720 	case HISTC_DSO:
2721 		return __get_elide(symbol_conf.dso_list, "dso", output);
2722 	case HISTC_COMM:
2723 		return __get_elide(symbol_conf.comm_list, "comm", output);
2724 	default:
2725 		break;
2726 	}
2727 
2728 	if (sort__mode != SORT_MODE__BRANCH)
2729 		return false;
2730 
2731 	switch (idx) {
2732 	case HISTC_SYMBOL_FROM:
2733 		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2734 	case HISTC_SYMBOL_TO:
2735 		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2736 	case HISTC_DSO_FROM:
2737 		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2738 	case HISTC_DSO_TO:
2739 		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2740 	default:
2741 		break;
2742 	}
2743 
2744 	return false;
2745 }
2746 
2747 void sort__setup_elide(FILE *output)
2748 {
2749 	struct perf_hpp_fmt *fmt;
2750 	struct hpp_sort_entry *hse;
2751 
2752 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2753 		if (!perf_hpp__is_sort_entry(fmt))
2754 			continue;
2755 
2756 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2757 		fmt->elide = get_elide(hse->se->se_width_idx, output);
2758 	}
2759 
2760 	/*
2761 	 * It makes no sense to elide all of sort entries.
2762 	 * Just revert them to show up again.
2763 	 */
2764 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2765 		if (!perf_hpp__is_sort_entry(fmt))
2766 			continue;
2767 
2768 		if (!fmt->elide)
2769 			return;
2770 	}
2771 
2772 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2773 		if (!perf_hpp__is_sort_entry(fmt))
2774 			continue;
2775 
2776 		fmt->elide = false;
2777 	}
2778 }
2779 
2780 int output_field_add(struct perf_hpp_list *list, char *tok)
2781 {
2782 	unsigned int i;
2783 
2784 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2785 		struct sort_dimension *sd = &common_sort_dimensions[i];
2786 
2787 		if (strncasecmp(tok, sd->name, strlen(tok)))
2788 			continue;
2789 
2790 		return __sort_dimension__add_output(list, sd);
2791 	}
2792 
2793 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2794 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2795 
2796 		if (strncasecmp(tok, hd->name, strlen(tok)))
2797 			continue;
2798 
2799 		return __hpp_dimension__add_output(list, hd);
2800 	}
2801 
2802 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2803 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
2804 
2805 		if (strncasecmp(tok, sd->name, strlen(tok)))
2806 			continue;
2807 
2808 		return __sort_dimension__add_output(list, sd);
2809 	}
2810 
2811 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2812 		struct sort_dimension *sd = &memory_sort_dimensions[i];
2813 
2814 		if (strncasecmp(tok, sd->name, strlen(tok)))
2815 			continue;
2816 
2817 		return __sort_dimension__add_output(list, sd);
2818 	}
2819 
2820 	return -ESRCH;
2821 }
2822 
2823 static int setup_output_list(struct perf_hpp_list *list, char *str)
2824 {
2825 	char *tmp, *tok;
2826 	int ret = 0;
2827 
2828 	for (tok = strtok_r(str, ", ", &tmp);
2829 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
2830 		ret = output_field_add(list, tok);
2831 		if (ret == -EINVAL) {
2832 			error("Invalid --fields key: `%s'", tok);
2833 			break;
2834 		} else if (ret == -ESRCH) {
2835 			error("Unknown --fields key: `%s'", tok);
2836 			break;
2837 		}
2838 	}
2839 
2840 	return ret;
2841 }
2842 
2843 void reset_dimensions(void)
2844 {
2845 	unsigned int i;
2846 
2847 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2848 		common_sort_dimensions[i].taken = 0;
2849 
2850 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2851 		hpp_sort_dimensions[i].taken = 0;
2852 
2853 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2854 		bstack_sort_dimensions[i].taken = 0;
2855 
2856 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2857 		memory_sort_dimensions[i].taken = 0;
2858 }
2859 
2860 bool is_strict_order(const char *order)
2861 {
2862 	return order && (*order != '+');
2863 }
2864 
2865 static int __setup_output_field(void)
2866 {
2867 	char *str, *strp;
2868 	int ret = -EINVAL;
2869 
2870 	if (field_order == NULL)
2871 		return 0;
2872 
2873 	strp = str = strdup(field_order);
2874 	if (str == NULL) {
2875 		error("Not enough memory to setup output fields");
2876 		return -ENOMEM;
2877 	}
2878 
2879 	if (!is_strict_order(field_order))
2880 		strp++;
2881 
2882 	if (!strlen(strp)) {
2883 		error("Invalid --fields key: `+'");
2884 		goto out;
2885 	}
2886 
2887 	ret = setup_output_list(&perf_hpp_list, strp);
2888 
2889 out:
2890 	free(str);
2891 	return ret;
2892 }
2893 
2894 int setup_sorting(struct perf_evlist *evlist)
2895 {
2896 	int err;
2897 
2898 	err = __setup_sorting(evlist);
2899 	if (err < 0)
2900 		return err;
2901 
2902 	if (parent_pattern != default_parent_pattern) {
2903 		err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
2904 		if (err < 0)
2905 			return err;
2906 	}
2907 
2908 	reset_dimensions();
2909 
2910 	/*
2911 	 * perf diff doesn't use default hpp output fields.
2912 	 */
2913 	if (sort__mode != SORT_MODE__DIFF)
2914 		perf_hpp__init();
2915 
2916 	err = __setup_output_field();
2917 	if (err < 0)
2918 		return err;
2919 
2920 	/* copy sort keys to output fields */
2921 	perf_hpp__setup_output_field(&perf_hpp_list);
2922 	/* and then copy output fields to sort keys */
2923 	perf_hpp__append_sort_keys(&perf_hpp_list);
2924 
2925 	/* setup hists-specific output fields */
2926 	if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
2927 		return -1;
2928 
2929 	return 0;
2930 }
2931 
2932 void reset_output_field(void)
2933 {
2934 	perf_hpp_list.need_collapse = 0;
2935 	perf_hpp_list.parent = 0;
2936 	perf_hpp_list.sym = 0;
2937 	perf_hpp_list.dso = 0;
2938 
2939 	field_order = NULL;
2940 	sort_order = NULL;
2941 
2942 	reset_dimensions();
2943 	perf_hpp__reset_output_field(&perf_hpp_list);
2944 }
2945