xref: /linux/tools/perf/util/sort.c (revision 607bfbd7ffc60156ae0831c917497dc91a57dd8d)
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9 
10 regex_t		parent_regex;
11 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
12 const char	*parent_pattern = default_parent_pattern;
13 const char	default_sort_order[] = "comm,dso,symbol";
14 const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
15 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16 const char	default_top_sort_order[] = "dso,symbol";
17 const char	default_diff_sort_order[] = "dso,symbol";
18 const char	default_tracepoint_sort_order[] = "trace";
19 const char	*sort_order;
20 const char	*field_order;
21 regex_t		ignore_callees_regex;
22 int		have_ignore_callees = 0;
23 int		sort__need_collapse = 0;
24 int		sort__has_parent = 0;
25 int		sort__has_sym = 0;
26 int		sort__has_dso = 0;
27 int		sort__has_socket = 0;
28 int		sort__has_thread = 0;
29 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
30 
31 /*
32  * Replaces all occurrences of a char used with the:
33  *
34  * -t, --field-separator
35  *
36  * option, that uses a special separator character and don't pad with spaces,
37  * replacing all occurances of this separator in symbol names (and other
38  * output) with a '.' character, that thus it's the only non valid separator.
39 */
40 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
41 {
42 	int n;
43 	va_list ap;
44 
45 	va_start(ap, fmt);
46 	n = vsnprintf(bf, size, fmt, ap);
47 	if (symbol_conf.field_sep && n > 0) {
48 		char *sep = bf;
49 
50 		while (1) {
51 			sep = strchr(sep, *symbol_conf.field_sep);
52 			if (sep == NULL)
53 				break;
54 			*sep = '.';
55 		}
56 	}
57 	va_end(ap);
58 
59 	if (n >= (int)size)
60 		return size - 1;
61 	return n;
62 }
63 
64 static int64_t cmp_null(const void *l, const void *r)
65 {
66 	if (!l && !r)
67 		return 0;
68 	else if (!l)
69 		return -1;
70 	else
71 		return 1;
72 }
73 
74 /* --sort pid */
75 
76 static int64_t
77 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
78 {
79 	return right->thread->tid - left->thread->tid;
80 }
81 
82 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
83 				       size_t size, unsigned int width)
84 {
85 	const char *comm = thread__comm_str(he->thread);
86 
87 	width = max(7U, width) - 6;
88 	return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
89 			       width, width, comm ?: "");
90 }
91 
92 struct sort_entry sort_thread = {
93 	.se_header	= "  Pid:Command",
94 	.se_cmp		= sort__thread_cmp,
95 	.se_snprintf	= hist_entry__thread_snprintf,
96 	.se_width_idx	= HISTC_THREAD,
97 };
98 
99 /* --sort comm */
100 
101 static int64_t
102 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
103 {
104 	/* Compare the addr that should be unique among comm */
105 	return strcmp(comm__str(right->comm), comm__str(left->comm));
106 }
107 
108 static int64_t
109 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
110 {
111 	/* Compare the addr that should be unique among comm */
112 	return strcmp(comm__str(right->comm), comm__str(left->comm));
113 }
114 
115 static int64_t
116 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
117 {
118 	return strcmp(comm__str(right->comm), comm__str(left->comm));
119 }
120 
121 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
122 				     size_t size, unsigned int width)
123 {
124 	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
125 }
126 
127 struct sort_entry sort_comm = {
128 	.se_header	= "Command",
129 	.se_cmp		= sort__comm_cmp,
130 	.se_collapse	= sort__comm_collapse,
131 	.se_sort	= sort__comm_sort,
132 	.se_snprintf	= hist_entry__comm_snprintf,
133 	.se_width_idx	= HISTC_COMM,
134 };
135 
136 /* --sort dso */
137 
138 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
139 {
140 	struct dso *dso_l = map_l ? map_l->dso : NULL;
141 	struct dso *dso_r = map_r ? map_r->dso : NULL;
142 	const char *dso_name_l, *dso_name_r;
143 
144 	if (!dso_l || !dso_r)
145 		return cmp_null(dso_r, dso_l);
146 
147 	if (verbose) {
148 		dso_name_l = dso_l->long_name;
149 		dso_name_r = dso_r->long_name;
150 	} else {
151 		dso_name_l = dso_l->short_name;
152 		dso_name_r = dso_r->short_name;
153 	}
154 
155 	return strcmp(dso_name_l, dso_name_r);
156 }
157 
158 static int64_t
159 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
160 {
161 	return _sort__dso_cmp(right->ms.map, left->ms.map);
162 }
163 
164 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
165 				     size_t size, unsigned int width)
166 {
167 	if (map && map->dso) {
168 		const char *dso_name = !verbose ? map->dso->short_name :
169 			map->dso->long_name;
170 		return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
171 	}
172 
173 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
174 }
175 
176 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
177 				    size_t size, unsigned int width)
178 {
179 	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
180 }
181 
182 struct sort_entry sort_dso = {
183 	.se_header	= "Shared Object",
184 	.se_cmp		= sort__dso_cmp,
185 	.se_snprintf	= hist_entry__dso_snprintf,
186 	.se_width_idx	= HISTC_DSO,
187 };
188 
189 /* --sort symbol */
190 
191 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
192 {
193 	return (int64_t)(right_ip - left_ip);
194 }
195 
196 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
197 {
198 	if (!sym_l || !sym_r)
199 		return cmp_null(sym_l, sym_r);
200 
201 	if (sym_l == sym_r)
202 		return 0;
203 
204 	if (sym_l->start != sym_r->start)
205 		return (int64_t)(sym_r->start - sym_l->start);
206 
207 	return (int64_t)(sym_r->end - sym_l->end);
208 }
209 
210 static int64_t
211 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
212 {
213 	int64_t ret;
214 
215 	if (!left->ms.sym && !right->ms.sym)
216 		return _sort__addr_cmp(left->ip, right->ip);
217 
218 	/*
219 	 * comparing symbol address alone is not enough since it's a
220 	 * relative address within a dso.
221 	 */
222 	if (!sort__has_dso) {
223 		ret = sort__dso_cmp(left, right);
224 		if (ret != 0)
225 			return ret;
226 	}
227 
228 	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
229 }
230 
231 static int64_t
232 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
233 {
234 	if (!left->ms.sym || !right->ms.sym)
235 		return cmp_null(left->ms.sym, right->ms.sym);
236 
237 	return strcmp(right->ms.sym->name, left->ms.sym->name);
238 }
239 
240 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
241 				     u64 ip, char level, char *bf, size_t size,
242 				     unsigned int width)
243 {
244 	size_t ret = 0;
245 
246 	if (verbose) {
247 		char o = map ? dso__symtab_origin(map->dso) : '!';
248 		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
249 				       BITS_PER_LONG / 4 + 2, ip, o);
250 	}
251 
252 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
253 	if (sym && map) {
254 		if (map->type == MAP__VARIABLE) {
255 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
256 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
257 					ip - map->unmap_ip(map, sym->start));
258 		} else {
259 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
260 					       width - ret,
261 					       sym->name);
262 		}
263 	} else {
264 		size_t len = BITS_PER_LONG / 4;
265 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
266 				       len, ip);
267 	}
268 
269 	return ret;
270 }
271 
272 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
273 				    size_t size, unsigned int width)
274 {
275 	return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
276 					 he->level, bf, size, width);
277 }
278 
279 struct sort_entry sort_sym = {
280 	.se_header	= "Symbol",
281 	.se_cmp		= sort__sym_cmp,
282 	.se_sort	= sort__sym_sort,
283 	.se_snprintf	= hist_entry__sym_snprintf,
284 	.se_width_idx	= HISTC_SYMBOL,
285 };
286 
287 /* --sort srcline */
288 
289 static int64_t
290 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
291 {
292 	if (!left->srcline) {
293 		if (!left->ms.map)
294 			left->srcline = SRCLINE_UNKNOWN;
295 		else {
296 			struct map *map = left->ms.map;
297 			left->srcline = get_srcline(map->dso,
298 					   map__rip_2objdump(map, left->ip),
299 						    left->ms.sym, true);
300 		}
301 	}
302 	if (!right->srcline) {
303 		if (!right->ms.map)
304 			right->srcline = SRCLINE_UNKNOWN;
305 		else {
306 			struct map *map = right->ms.map;
307 			right->srcline = get_srcline(map->dso,
308 					     map__rip_2objdump(map, right->ip),
309 						     right->ms.sym, true);
310 		}
311 	}
312 	return strcmp(right->srcline, left->srcline);
313 }
314 
315 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
316 					size_t size, unsigned int width)
317 {
318 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline);
319 }
320 
321 struct sort_entry sort_srcline = {
322 	.se_header	= "Source:Line",
323 	.se_cmp		= sort__srcline_cmp,
324 	.se_snprintf	= hist_entry__srcline_snprintf,
325 	.se_width_idx	= HISTC_SRCLINE,
326 };
327 
328 /* --sort srcfile */
329 
330 static char no_srcfile[1];
331 
332 static char *get_srcfile(struct hist_entry *e)
333 {
334 	char *sf, *p;
335 	struct map *map = e->ms.map;
336 
337 	sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
338 			 e->ms.sym, false, true);
339 	if (!strcmp(sf, SRCLINE_UNKNOWN))
340 		return no_srcfile;
341 	p = strchr(sf, ':');
342 	if (p && *sf) {
343 		*p = 0;
344 		return sf;
345 	}
346 	free(sf);
347 	return no_srcfile;
348 }
349 
350 static int64_t
351 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
352 {
353 	if (!left->srcfile) {
354 		if (!left->ms.map)
355 			left->srcfile = no_srcfile;
356 		else
357 			left->srcfile = get_srcfile(left);
358 	}
359 	if (!right->srcfile) {
360 		if (!right->ms.map)
361 			right->srcfile = no_srcfile;
362 		else
363 			right->srcfile = get_srcfile(right);
364 	}
365 	return strcmp(right->srcfile, left->srcfile);
366 }
367 
368 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
369 					size_t size, unsigned int width)
370 {
371 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
372 }
373 
374 struct sort_entry sort_srcfile = {
375 	.se_header	= "Source File",
376 	.se_cmp		= sort__srcfile_cmp,
377 	.se_snprintf	= hist_entry__srcfile_snprintf,
378 	.se_width_idx	= HISTC_SRCFILE,
379 };
380 
381 /* --sort parent */
382 
383 static int64_t
384 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
385 {
386 	struct symbol *sym_l = left->parent;
387 	struct symbol *sym_r = right->parent;
388 
389 	if (!sym_l || !sym_r)
390 		return cmp_null(sym_l, sym_r);
391 
392 	return strcmp(sym_r->name, sym_l->name);
393 }
394 
395 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
396 				       size_t size, unsigned int width)
397 {
398 	return repsep_snprintf(bf, size, "%-*.*s", width, width,
399 			      he->parent ? he->parent->name : "[other]");
400 }
401 
402 struct sort_entry sort_parent = {
403 	.se_header	= "Parent symbol",
404 	.se_cmp		= sort__parent_cmp,
405 	.se_snprintf	= hist_entry__parent_snprintf,
406 	.se_width_idx	= HISTC_PARENT,
407 };
408 
409 /* --sort cpu */
410 
411 static int64_t
412 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
413 {
414 	return right->cpu - left->cpu;
415 }
416 
417 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
418 				    size_t size, unsigned int width)
419 {
420 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
421 }
422 
423 struct sort_entry sort_cpu = {
424 	.se_header      = "CPU",
425 	.se_cmp	        = sort__cpu_cmp,
426 	.se_snprintf    = hist_entry__cpu_snprintf,
427 	.se_width_idx	= HISTC_CPU,
428 };
429 
430 /* --sort socket */
431 
432 static int64_t
433 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
434 {
435 	return right->socket - left->socket;
436 }
437 
438 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
439 				    size_t size, unsigned int width)
440 {
441 	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
442 }
443 
444 struct sort_entry sort_socket = {
445 	.se_header      = "Socket",
446 	.se_cmp	        = sort__socket_cmp,
447 	.se_snprintf    = hist_entry__socket_snprintf,
448 	.se_width_idx	= HISTC_SOCKET,
449 };
450 
451 /* --sort trace */
452 
453 static char *get_trace_output(struct hist_entry *he)
454 {
455 	struct trace_seq seq;
456 	struct perf_evsel *evsel;
457 	struct pevent_record rec = {
458 		.data = he->raw_data,
459 		.size = he->raw_size,
460 	};
461 
462 	evsel = hists_to_evsel(he->hists);
463 
464 	trace_seq_init(&seq);
465 	if (symbol_conf.raw_trace) {
466 		pevent_print_fields(&seq, he->raw_data, he->raw_size,
467 				    evsel->tp_format);
468 	} else {
469 		pevent_event_info(&seq, evsel->tp_format, &rec);
470 	}
471 	return seq.buffer;
472 }
473 
474 static int64_t
475 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
476 {
477 	struct perf_evsel *evsel;
478 
479 	evsel = hists_to_evsel(left->hists);
480 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
481 		return 0;
482 
483 	if (left->trace_output == NULL)
484 		left->trace_output = get_trace_output(left);
485 	if (right->trace_output == NULL)
486 		right->trace_output = get_trace_output(right);
487 
488 	hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
489 	hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
490 
491 	return strcmp(right->trace_output, left->trace_output);
492 }
493 
494 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
495 				    size_t size, unsigned int width)
496 {
497 	struct perf_evsel *evsel;
498 
499 	evsel = hists_to_evsel(he->hists);
500 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
501 		return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
502 
503 	if (he->trace_output == NULL)
504 		he->trace_output = get_trace_output(he);
505 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
506 }
507 
508 struct sort_entry sort_trace = {
509 	.se_header      = "Trace output",
510 	.se_cmp	        = sort__trace_cmp,
511 	.se_snprintf    = hist_entry__trace_snprintf,
512 	.se_width_idx	= HISTC_TRACE,
513 };
514 
515 /* sort keys for branch stacks */
516 
517 static int64_t
518 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
519 {
520 	if (!left->branch_info || !right->branch_info)
521 		return cmp_null(left->branch_info, right->branch_info);
522 
523 	return _sort__dso_cmp(left->branch_info->from.map,
524 			      right->branch_info->from.map);
525 }
526 
527 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
528 				    size_t size, unsigned int width)
529 {
530 	if (he->branch_info)
531 		return _hist_entry__dso_snprintf(he->branch_info->from.map,
532 						 bf, size, width);
533 	else
534 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
535 }
536 
537 static int64_t
538 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
539 {
540 	if (!left->branch_info || !right->branch_info)
541 		return cmp_null(left->branch_info, right->branch_info);
542 
543 	return _sort__dso_cmp(left->branch_info->to.map,
544 			      right->branch_info->to.map);
545 }
546 
547 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
548 				       size_t size, unsigned int width)
549 {
550 	if (he->branch_info)
551 		return _hist_entry__dso_snprintf(he->branch_info->to.map,
552 						 bf, size, width);
553 	else
554 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
555 }
556 
557 static int64_t
558 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
559 {
560 	struct addr_map_symbol *from_l = &left->branch_info->from;
561 	struct addr_map_symbol *from_r = &right->branch_info->from;
562 
563 	if (!left->branch_info || !right->branch_info)
564 		return cmp_null(left->branch_info, right->branch_info);
565 
566 	from_l = &left->branch_info->from;
567 	from_r = &right->branch_info->from;
568 
569 	if (!from_l->sym && !from_r->sym)
570 		return _sort__addr_cmp(from_l->addr, from_r->addr);
571 
572 	return _sort__sym_cmp(from_l->sym, from_r->sym);
573 }
574 
575 static int64_t
576 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
577 {
578 	struct addr_map_symbol *to_l, *to_r;
579 
580 	if (!left->branch_info || !right->branch_info)
581 		return cmp_null(left->branch_info, right->branch_info);
582 
583 	to_l = &left->branch_info->to;
584 	to_r = &right->branch_info->to;
585 
586 	if (!to_l->sym && !to_r->sym)
587 		return _sort__addr_cmp(to_l->addr, to_r->addr);
588 
589 	return _sort__sym_cmp(to_l->sym, to_r->sym);
590 }
591 
592 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
593 					 size_t size, unsigned int width)
594 {
595 	if (he->branch_info) {
596 		struct addr_map_symbol *from = &he->branch_info->from;
597 
598 		return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
599 						 he->level, bf, size, width);
600 	}
601 
602 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
603 }
604 
605 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
606 				       size_t size, unsigned int width)
607 {
608 	if (he->branch_info) {
609 		struct addr_map_symbol *to = &he->branch_info->to;
610 
611 		return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
612 						 he->level, bf, size, width);
613 	}
614 
615 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
616 }
617 
618 struct sort_entry sort_dso_from = {
619 	.se_header	= "Source Shared Object",
620 	.se_cmp		= sort__dso_from_cmp,
621 	.se_snprintf	= hist_entry__dso_from_snprintf,
622 	.se_width_idx	= HISTC_DSO_FROM,
623 };
624 
625 struct sort_entry sort_dso_to = {
626 	.se_header	= "Target Shared Object",
627 	.se_cmp		= sort__dso_to_cmp,
628 	.se_snprintf	= hist_entry__dso_to_snprintf,
629 	.se_width_idx	= HISTC_DSO_TO,
630 };
631 
632 struct sort_entry sort_sym_from = {
633 	.se_header	= "Source Symbol",
634 	.se_cmp		= sort__sym_from_cmp,
635 	.se_snprintf	= hist_entry__sym_from_snprintf,
636 	.se_width_idx	= HISTC_SYMBOL_FROM,
637 };
638 
639 struct sort_entry sort_sym_to = {
640 	.se_header	= "Target Symbol",
641 	.se_cmp		= sort__sym_to_cmp,
642 	.se_snprintf	= hist_entry__sym_to_snprintf,
643 	.se_width_idx	= HISTC_SYMBOL_TO,
644 };
645 
646 static int64_t
647 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
648 {
649 	unsigned char mp, p;
650 
651 	if (!left->branch_info || !right->branch_info)
652 		return cmp_null(left->branch_info, right->branch_info);
653 
654 	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
655 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
656 	return mp || p;
657 }
658 
659 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
660 				    size_t size, unsigned int width){
661 	static const char *out = "N/A";
662 
663 	if (he->branch_info) {
664 		if (he->branch_info->flags.predicted)
665 			out = "N";
666 		else if (he->branch_info->flags.mispred)
667 			out = "Y";
668 	}
669 
670 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
671 }
672 
673 static int64_t
674 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
675 {
676 	return left->branch_info->flags.cycles -
677 		right->branch_info->flags.cycles;
678 }
679 
680 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
681 				    size_t size, unsigned int width)
682 {
683 	if (he->branch_info->flags.cycles == 0)
684 		return repsep_snprintf(bf, size, "%-*s", width, "-");
685 	return repsep_snprintf(bf, size, "%-*hd", width,
686 			       he->branch_info->flags.cycles);
687 }
688 
689 struct sort_entry sort_cycles = {
690 	.se_header	= "Basic Block Cycles",
691 	.se_cmp		= sort__cycles_cmp,
692 	.se_snprintf	= hist_entry__cycles_snprintf,
693 	.se_width_idx	= HISTC_CYCLES,
694 };
695 
696 /* --sort daddr_sym */
697 static int64_t
698 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
699 {
700 	uint64_t l = 0, r = 0;
701 
702 	if (left->mem_info)
703 		l = left->mem_info->daddr.addr;
704 	if (right->mem_info)
705 		r = right->mem_info->daddr.addr;
706 
707 	return (int64_t)(r - l);
708 }
709 
710 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
711 				    size_t size, unsigned int width)
712 {
713 	uint64_t addr = 0;
714 	struct map *map = NULL;
715 	struct symbol *sym = NULL;
716 
717 	if (he->mem_info) {
718 		addr = he->mem_info->daddr.addr;
719 		map = he->mem_info->daddr.map;
720 		sym = he->mem_info->daddr.sym;
721 	}
722 	return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
723 					 width);
724 }
725 
726 static int64_t
727 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
728 {
729 	uint64_t l = 0, r = 0;
730 
731 	if (left->mem_info)
732 		l = left->mem_info->iaddr.addr;
733 	if (right->mem_info)
734 		r = right->mem_info->iaddr.addr;
735 
736 	return (int64_t)(r - l);
737 }
738 
739 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
740 				    size_t size, unsigned int width)
741 {
742 	uint64_t addr = 0;
743 	struct map *map = NULL;
744 	struct symbol *sym = NULL;
745 
746 	if (he->mem_info) {
747 		addr = he->mem_info->iaddr.addr;
748 		map  = he->mem_info->iaddr.map;
749 		sym  = he->mem_info->iaddr.sym;
750 	}
751 	return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
752 					 width);
753 }
754 
755 static int64_t
756 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
757 {
758 	struct map *map_l = NULL;
759 	struct map *map_r = NULL;
760 
761 	if (left->mem_info)
762 		map_l = left->mem_info->daddr.map;
763 	if (right->mem_info)
764 		map_r = right->mem_info->daddr.map;
765 
766 	return _sort__dso_cmp(map_l, map_r);
767 }
768 
769 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
770 				    size_t size, unsigned int width)
771 {
772 	struct map *map = NULL;
773 
774 	if (he->mem_info)
775 		map = he->mem_info->daddr.map;
776 
777 	return _hist_entry__dso_snprintf(map, bf, size, width);
778 }
779 
780 static int64_t
781 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
782 {
783 	union perf_mem_data_src data_src_l;
784 	union perf_mem_data_src data_src_r;
785 
786 	if (left->mem_info)
787 		data_src_l = left->mem_info->data_src;
788 	else
789 		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
790 
791 	if (right->mem_info)
792 		data_src_r = right->mem_info->data_src;
793 	else
794 		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
795 
796 	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
797 }
798 
799 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
800 				    size_t size, unsigned int width)
801 {
802 	const char *out;
803 	u64 mask = PERF_MEM_LOCK_NA;
804 
805 	if (he->mem_info)
806 		mask = he->mem_info->data_src.mem_lock;
807 
808 	if (mask & PERF_MEM_LOCK_NA)
809 		out = "N/A";
810 	else if (mask & PERF_MEM_LOCK_LOCKED)
811 		out = "Yes";
812 	else
813 		out = "No";
814 
815 	return repsep_snprintf(bf, size, "%.*s", width, out);
816 }
817 
818 static int64_t
819 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
820 {
821 	union perf_mem_data_src data_src_l;
822 	union perf_mem_data_src data_src_r;
823 
824 	if (left->mem_info)
825 		data_src_l = left->mem_info->data_src;
826 	else
827 		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
828 
829 	if (right->mem_info)
830 		data_src_r = right->mem_info->data_src;
831 	else
832 		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
833 
834 	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
835 }
836 
837 static const char * const tlb_access[] = {
838 	"N/A",
839 	"HIT",
840 	"MISS",
841 	"L1",
842 	"L2",
843 	"Walker",
844 	"Fault",
845 };
846 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
847 
848 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
849 				    size_t size, unsigned int width)
850 {
851 	char out[64];
852 	size_t sz = sizeof(out) - 1; /* -1 for null termination */
853 	size_t l = 0, i;
854 	u64 m = PERF_MEM_TLB_NA;
855 	u64 hit, miss;
856 
857 	out[0] = '\0';
858 
859 	if (he->mem_info)
860 		m = he->mem_info->data_src.mem_dtlb;
861 
862 	hit = m & PERF_MEM_TLB_HIT;
863 	miss = m & PERF_MEM_TLB_MISS;
864 
865 	/* already taken care of */
866 	m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
867 
868 	for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
869 		if (!(m & 0x1))
870 			continue;
871 		if (l) {
872 			strcat(out, " or ");
873 			l += 4;
874 		}
875 		strncat(out, tlb_access[i], sz - l);
876 		l += strlen(tlb_access[i]);
877 	}
878 	if (*out == '\0')
879 		strcpy(out, "N/A");
880 	if (hit)
881 		strncat(out, " hit", sz - l);
882 	if (miss)
883 		strncat(out, " miss", sz - l);
884 
885 	return repsep_snprintf(bf, size, "%-*s", width, out);
886 }
887 
888 static int64_t
889 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
890 {
891 	union perf_mem_data_src data_src_l;
892 	union perf_mem_data_src data_src_r;
893 
894 	if (left->mem_info)
895 		data_src_l = left->mem_info->data_src;
896 	else
897 		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
898 
899 	if (right->mem_info)
900 		data_src_r = right->mem_info->data_src;
901 	else
902 		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
903 
904 	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
905 }
906 
907 static const char * const mem_lvl[] = {
908 	"N/A",
909 	"HIT",
910 	"MISS",
911 	"L1",
912 	"LFB",
913 	"L2",
914 	"L3",
915 	"Local RAM",
916 	"Remote RAM (1 hop)",
917 	"Remote RAM (2 hops)",
918 	"Remote Cache (1 hop)",
919 	"Remote Cache (2 hops)",
920 	"I/O",
921 	"Uncached",
922 };
923 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
924 
925 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
926 				    size_t size, unsigned int width)
927 {
928 	char out[64];
929 	size_t sz = sizeof(out) - 1; /* -1 for null termination */
930 	size_t i, l = 0;
931 	u64 m =  PERF_MEM_LVL_NA;
932 	u64 hit, miss;
933 
934 	if (he->mem_info)
935 		m  = he->mem_info->data_src.mem_lvl;
936 
937 	out[0] = '\0';
938 
939 	hit = m & PERF_MEM_LVL_HIT;
940 	miss = m & PERF_MEM_LVL_MISS;
941 
942 	/* already taken care of */
943 	m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
944 
945 	for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
946 		if (!(m & 0x1))
947 			continue;
948 		if (l) {
949 			strcat(out, " or ");
950 			l += 4;
951 		}
952 		strncat(out, mem_lvl[i], sz - l);
953 		l += strlen(mem_lvl[i]);
954 	}
955 	if (*out == '\0')
956 		strcpy(out, "N/A");
957 	if (hit)
958 		strncat(out, " hit", sz - l);
959 	if (miss)
960 		strncat(out, " miss", sz - l);
961 
962 	return repsep_snprintf(bf, size, "%-*s", width, out);
963 }
964 
965 static int64_t
966 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
967 {
968 	union perf_mem_data_src data_src_l;
969 	union perf_mem_data_src data_src_r;
970 
971 	if (left->mem_info)
972 		data_src_l = left->mem_info->data_src;
973 	else
974 		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
975 
976 	if (right->mem_info)
977 		data_src_r = right->mem_info->data_src;
978 	else
979 		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
980 
981 	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
982 }
983 
984 static const char * const snoop_access[] = {
985 	"N/A",
986 	"None",
987 	"Miss",
988 	"Hit",
989 	"HitM",
990 };
991 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
992 
993 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
994 				    size_t size, unsigned int width)
995 {
996 	char out[64];
997 	size_t sz = sizeof(out) - 1; /* -1 for null termination */
998 	size_t i, l = 0;
999 	u64 m = PERF_MEM_SNOOP_NA;
1000 
1001 	out[0] = '\0';
1002 
1003 	if (he->mem_info)
1004 		m = he->mem_info->data_src.mem_snoop;
1005 
1006 	for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
1007 		if (!(m & 0x1))
1008 			continue;
1009 		if (l) {
1010 			strcat(out, " or ");
1011 			l += 4;
1012 		}
1013 		strncat(out, snoop_access[i], sz - l);
1014 		l += strlen(snoop_access[i]);
1015 	}
1016 
1017 	if (*out == '\0')
1018 		strcpy(out, "N/A");
1019 
1020 	return repsep_snprintf(bf, size, "%-*s", width, out);
1021 }
1022 
1023 static inline  u64 cl_address(u64 address)
1024 {
1025 	/* return the cacheline of the address */
1026 	return (address & ~(cacheline_size - 1));
1027 }
1028 
1029 static int64_t
1030 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1031 {
1032 	u64 l, r;
1033 	struct map *l_map, *r_map;
1034 
1035 	if (!left->mem_info)  return -1;
1036 	if (!right->mem_info) return 1;
1037 
1038 	/* group event types together */
1039 	if (left->cpumode > right->cpumode) return -1;
1040 	if (left->cpumode < right->cpumode) return 1;
1041 
1042 	l_map = left->mem_info->daddr.map;
1043 	r_map = right->mem_info->daddr.map;
1044 
1045 	/* if both are NULL, jump to sort on al_addr instead */
1046 	if (!l_map && !r_map)
1047 		goto addr;
1048 
1049 	if (!l_map) return -1;
1050 	if (!r_map) return 1;
1051 
1052 	if (l_map->maj > r_map->maj) return -1;
1053 	if (l_map->maj < r_map->maj) return 1;
1054 
1055 	if (l_map->min > r_map->min) return -1;
1056 	if (l_map->min < r_map->min) return 1;
1057 
1058 	if (l_map->ino > r_map->ino) return -1;
1059 	if (l_map->ino < r_map->ino) return 1;
1060 
1061 	if (l_map->ino_generation > r_map->ino_generation) return -1;
1062 	if (l_map->ino_generation < r_map->ino_generation) return 1;
1063 
1064 	/*
1065 	 * Addresses with no major/minor numbers are assumed to be
1066 	 * anonymous in userspace.  Sort those on pid then address.
1067 	 *
1068 	 * The kernel and non-zero major/minor mapped areas are
1069 	 * assumed to be unity mapped.  Sort those on address.
1070 	 */
1071 
1072 	if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1073 	    (!(l_map->flags & MAP_SHARED)) &&
1074 	    !l_map->maj && !l_map->min && !l_map->ino &&
1075 	    !l_map->ino_generation) {
1076 		/* userspace anonymous */
1077 
1078 		if (left->thread->pid_ > right->thread->pid_) return -1;
1079 		if (left->thread->pid_ < right->thread->pid_) return 1;
1080 	}
1081 
1082 addr:
1083 	/* al_addr does all the right addr - start + offset calculations */
1084 	l = cl_address(left->mem_info->daddr.al_addr);
1085 	r = cl_address(right->mem_info->daddr.al_addr);
1086 
1087 	if (l > r) return -1;
1088 	if (l < r) return 1;
1089 
1090 	return 0;
1091 }
1092 
1093 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1094 					  size_t size, unsigned int width)
1095 {
1096 
1097 	uint64_t addr = 0;
1098 	struct map *map = NULL;
1099 	struct symbol *sym = NULL;
1100 	char level = he->level;
1101 
1102 	if (he->mem_info) {
1103 		addr = cl_address(he->mem_info->daddr.al_addr);
1104 		map = he->mem_info->daddr.map;
1105 		sym = he->mem_info->daddr.sym;
1106 
1107 		/* print [s] for shared data mmaps */
1108 		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1109 		     map && (map->type == MAP__VARIABLE) &&
1110 		    (map->flags & MAP_SHARED) &&
1111 		    (map->maj || map->min || map->ino ||
1112 		     map->ino_generation))
1113 			level = 's';
1114 		else if (!map)
1115 			level = 'X';
1116 	}
1117 	return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1118 					 width);
1119 }
1120 
1121 struct sort_entry sort_mispredict = {
1122 	.se_header	= "Branch Mispredicted",
1123 	.se_cmp		= sort__mispredict_cmp,
1124 	.se_snprintf	= hist_entry__mispredict_snprintf,
1125 	.se_width_idx	= HISTC_MISPREDICT,
1126 };
1127 
1128 static u64 he_weight(struct hist_entry *he)
1129 {
1130 	return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1131 }
1132 
1133 static int64_t
1134 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1135 {
1136 	return he_weight(left) - he_weight(right);
1137 }
1138 
1139 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1140 				    size_t size, unsigned int width)
1141 {
1142 	return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1143 }
1144 
1145 struct sort_entry sort_local_weight = {
1146 	.se_header	= "Local Weight",
1147 	.se_cmp		= sort__local_weight_cmp,
1148 	.se_snprintf	= hist_entry__local_weight_snprintf,
1149 	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1150 };
1151 
1152 static int64_t
1153 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1154 {
1155 	return left->stat.weight - right->stat.weight;
1156 }
1157 
1158 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1159 					      size_t size, unsigned int width)
1160 {
1161 	return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1162 }
1163 
1164 struct sort_entry sort_global_weight = {
1165 	.se_header	= "Weight",
1166 	.se_cmp		= sort__global_weight_cmp,
1167 	.se_snprintf	= hist_entry__global_weight_snprintf,
1168 	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1169 };
1170 
1171 struct sort_entry sort_mem_daddr_sym = {
1172 	.se_header	= "Data Symbol",
1173 	.se_cmp		= sort__daddr_cmp,
1174 	.se_snprintf	= hist_entry__daddr_snprintf,
1175 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1176 };
1177 
1178 struct sort_entry sort_mem_iaddr_sym = {
1179 	.se_header	= "Code Symbol",
1180 	.se_cmp		= sort__iaddr_cmp,
1181 	.se_snprintf	= hist_entry__iaddr_snprintf,
1182 	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
1183 };
1184 
1185 struct sort_entry sort_mem_daddr_dso = {
1186 	.se_header	= "Data Object",
1187 	.se_cmp		= sort__dso_daddr_cmp,
1188 	.se_snprintf	= hist_entry__dso_daddr_snprintf,
1189 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1190 };
1191 
1192 struct sort_entry sort_mem_locked = {
1193 	.se_header	= "Locked",
1194 	.se_cmp		= sort__locked_cmp,
1195 	.se_snprintf	= hist_entry__locked_snprintf,
1196 	.se_width_idx	= HISTC_MEM_LOCKED,
1197 };
1198 
1199 struct sort_entry sort_mem_tlb = {
1200 	.se_header	= "TLB access",
1201 	.se_cmp		= sort__tlb_cmp,
1202 	.se_snprintf	= hist_entry__tlb_snprintf,
1203 	.se_width_idx	= HISTC_MEM_TLB,
1204 };
1205 
1206 struct sort_entry sort_mem_lvl = {
1207 	.se_header	= "Memory access",
1208 	.se_cmp		= sort__lvl_cmp,
1209 	.se_snprintf	= hist_entry__lvl_snprintf,
1210 	.se_width_idx	= HISTC_MEM_LVL,
1211 };
1212 
1213 struct sort_entry sort_mem_snoop = {
1214 	.se_header	= "Snoop",
1215 	.se_cmp		= sort__snoop_cmp,
1216 	.se_snprintf	= hist_entry__snoop_snprintf,
1217 	.se_width_idx	= HISTC_MEM_SNOOP,
1218 };
1219 
1220 struct sort_entry sort_mem_dcacheline = {
1221 	.se_header	= "Data Cacheline",
1222 	.se_cmp		= sort__dcacheline_cmp,
1223 	.se_snprintf	= hist_entry__dcacheline_snprintf,
1224 	.se_width_idx	= HISTC_MEM_DCACHELINE,
1225 };
1226 
1227 static int64_t
1228 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1229 {
1230 	if (!left->branch_info || !right->branch_info)
1231 		return cmp_null(left->branch_info, right->branch_info);
1232 
1233 	return left->branch_info->flags.abort !=
1234 		right->branch_info->flags.abort;
1235 }
1236 
1237 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1238 				    size_t size, unsigned int width)
1239 {
1240 	static const char *out = "N/A";
1241 
1242 	if (he->branch_info) {
1243 		if (he->branch_info->flags.abort)
1244 			out = "A";
1245 		else
1246 			out = ".";
1247 	}
1248 
1249 	return repsep_snprintf(bf, size, "%-*s", width, out);
1250 }
1251 
1252 struct sort_entry sort_abort = {
1253 	.se_header	= "Transaction abort",
1254 	.se_cmp		= sort__abort_cmp,
1255 	.se_snprintf	= hist_entry__abort_snprintf,
1256 	.se_width_idx	= HISTC_ABORT,
1257 };
1258 
1259 static int64_t
1260 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1261 {
1262 	if (!left->branch_info || !right->branch_info)
1263 		return cmp_null(left->branch_info, right->branch_info);
1264 
1265 	return left->branch_info->flags.in_tx !=
1266 		right->branch_info->flags.in_tx;
1267 }
1268 
1269 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1270 				    size_t size, unsigned int width)
1271 {
1272 	static const char *out = "N/A";
1273 
1274 	if (he->branch_info) {
1275 		if (he->branch_info->flags.in_tx)
1276 			out = "T";
1277 		else
1278 			out = ".";
1279 	}
1280 
1281 	return repsep_snprintf(bf, size, "%-*s", width, out);
1282 }
1283 
1284 struct sort_entry sort_in_tx = {
1285 	.se_header	= "Branch in transaction",
1286 	.se_cmp		= sort__in_tx_cmp,
1287 	.se_snprintf	= hist_entry__in_tx_snprintf,
1288 	.se_width_idx	= HISTC_IN_TX,
1289 };
1290 
1291 static int64_t
1292 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1293 {
1294 	return left->transaction - right->transaction;
1295 }
1296 
1297 static inline char *add_str(char *p, const char *str)
1298 {
1299 	strcpy(p, str);
1300 	return p + strlen(str);
1301 }
1302 
1303 static struct txbit {
1304 	unsigned flag;
1305 	const char *name;
1306 	int skip_for_len;
1307 } txbits[] = {
1308 	{ PERF_TXN_ELISION,        "EL ",        0 },
1309 	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
1310 	{ PERF_TXN_SYNC,           "SYNC ",      1 },
1311 	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
1312 	{ PERF_TXN_RETRY,          "RETRY ",     0 },
1313 	{ PERF_TXN_CONFLICT,       "CON ",       0 },
1314 	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1315 	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
1316 	{ 0, NULL, 0 }
1317 };
1318 
1319 int hist_entry__transaction_len(void)
1320 {
1321 	int i;
1322 	int len = 0;
1323 
1324 	for (i = 0; txbits[i].name; i++) {
1325 		if (!txbits[i].skip_for_len)
1326 			len += strlen(txbits[i].name);
1327 	}
1328 	len += 4; /* :XX<space> */
1329 	return len;
1330 }
1331 
1332 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1333 					    size_t size, unsigned int width)
1334 {
1335 	u64 t = he->transaction;
1336 	char buf[128];
1337 	char *p = buf;
1338 	int i;
1339 
1340 	buf[0] = 0;
1341 	for (i = 0; txbits[i].name; i++)
1342 		if (txbits[i].flag & t)
1343 			p = add_str(p, txbits[i].name);
1344 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1345 		p = add_str(p, "NEITHER ");
1346 	if (t & PERF_TXN_ABORT_MASK) {
1347 		sprintf(p, ":%" PRIx64,
1348 			(t & PERF_TXN_ABORT_MASK) >>
1349 			PERF_TXN_ABORT_SHIFT);
1350 		p += strlen(p);
1351 	}
1352 
1353 	return repsep_snprintf(bf, size, "%-*s", width, buf);
1354 }
1355 
1356 struct sort_entry sort_transaction = {
1357 	.se_header	= "Transaction                ",
1358 	.se_cmp		= sort__transaction_cmp,
1359 	.se_snprintf	= hist_entry__transaction_snprintf,
1360 	.se_width_idx	= HISTC_TRANSACTION,
1361 };
1362 
1363 struct sort_dimension {
1364 	const char		*name;
1365 	struct sort_entry	*entry;
1366 	int			taken;
1367 };
1368 
1369 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1370 
1371 static struct sort_dimension common_sort_dimensions[] = {
1372 	DIM(SORT_PID, "pid", sort_thread),
1373 	DIM(SORT_COMM, "comm", sort_comm),
1374 	DIM(SORT_DSO, "dso", sort_dso),
1375 	DIM(SORT_SYM, "symbol", sort_sym),
1376 	DIM(SORT_PARENT, "parent", sort_parent),
1377 	DIM(SORT_CPU, "cpu", sort_cpu),
1378 	DIM(SORT_SOCKET, "socket", sort_socket),
1379 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
1380 	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1381 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1382 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1383 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1384 	DIM(SORT_TRACE, "trace", sort_trace),
1385 };
1386 
1387 #undef DIM
1388 
1389 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1390 
1391 static struct sort_dimension bstack_sort_dimensions[] = {
1392 	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1393 	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1394 	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1395 	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1396 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1397 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1398 	DIM(SORT_ABORT, "abort", sort_abort),
1399 	DIM(SORT_CYCLES, "cycles", sort_cycles),
1400 };
1401 
1402 #undef DIM
1403 
1404 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1405 
1406 static struct sort_dimension memory_sort_dimensions[] = {
1407 	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1408 	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1409 	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1410 	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1411 	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1412 	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1413 	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1414 	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1415 };
1416 
1417 #undef DIM
1418 
1419 struct hpp_dimension {
1420 	const char		*name;
1421 	struct perf_hpp_fmt	*fmt;
1422 	int			taken;
1423 };
1424 
1425 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1426 
1427 static struct hpp_dimension hpp_sort_dimensions[] = {
1428 	DIM(PERF_HPP__OVERHEAD, "overhead"),
1429 	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1430 	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1431 	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1432 	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1433 	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1434 	DIM(PERF_HPP__SAMPLES, "sample"),
1435 	DIM(PERF_HPP__PERIOD, "period"),
1436 };
1437 
1438 #undef DIM
1439 
1440 struct hpp_sort_entry {
1441 	struct perf_hpp_fmt hpp;
1442 	struct sort_entry *se;
1443 };
1444 
1445 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1446 {
1447 	struct hpp_sort_entry *hse;
1448 
1449 	if (!perf_hpp__is_sort_entry(fmt))
1450 		return;
1451 
1452 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1453 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1454 }
1455 
1456 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1457 			      struct perf_evsel *evsel)
1458 {
1459 	struct hpp_sort_entry *hse;
1460 	size_t len = fmt->user_len;
1461 
1462 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1463 
1464 	if (!len)
1465 		len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1466 
1467 	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1468 }
1469 
1470 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1471 			     struct perf_hpp *hpp __maybe_unused,
1472 			     struct perf_evsel *evsel)
1473 {
1474 	struct hpp_sort_entry *hse;
1475 	size_t len = fmt->user_len;
1476 
1477 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1478 
1479 	if (!len)
1480 		len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1481 
1482 	return len;
1483 }
1484 
1485 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1486 			     struct hist_entry *he)
1487 {
1488 	struct hpp_sort_entry *hse;
1489 	size_t len = fmt->user_len;
1490 
1491 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1492 
1493 	if (!len)
1494 		len = hists__col_len(he->hists, hse->se->se_width_idx);
1495 
1496 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1497 }
1498 
1499 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1500 			       struct hist_entry *a, struct hist_entry *b)
1501 {
1502 	struct hpp_sort_entry *hse;
1503 
1504 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1505 	return hse->se->se_cmp(a, b);
1506 }
1507 
1508 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1509 				    struct hist_entry *a, struct hist_entry *b)
1510 {
1511 	struct hpp_sort_entry *hse;
1512 	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1513 
1514 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1515 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1516 	return collapse_fn(a, b);
1517 }
1518 
1519 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1520 				struct hist_entry *a, struct hist_entry *b)
1521 {
1522 	struct hpp_sort_entry *hse;
1523 	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1524 
1525 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1526 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1527 	return sort_fn(a, b);
1528 }
1529 
1530 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1531 {
1532 	return format->header == __sort__hpp_header;
1533 }
1534 
1535 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1536 {
1537 	struct hpp_sort_entry *hse_a;
1538 	struct hpp_sort_entry *hse_b;
1539 
1540 	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1541 		return false;
1542 
1543 	hse_a = container_of(a, struct hpp_sort_entry, hpp);
1544 	hse_b = container_of(b, struct hpp_sort_entry, hpp);
1545 
1546 	return hse_a->se == hse_b->se;
1547 }
1548 
1549 static void hse_free(struct perf_hpp_fmt *fmt)
1550 {
1551 	struct hpp_sort_entry *hse;
1552 
1553 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1554 	free(hse);
1555 }
1556 
1557 static struct hpp_sort_entry *
1558 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1559 {
1560 	struct hpp_sort_entry *hse;
1561 
1562 	hse = malloc(sizeof(*hse));
1563 	if (hse == NULL) {
1564 		pr_err("Memory allocation failed\n");
1565 		return NULL;
1566 	}
1567 
1568 	hse->se = sd->entry;
1569 	hse->hpp.name = sd->entry->se_header;
1570 	hse->hpp.header = __sort__hpp_header;
1571 	hse->hpp.width = __sort__hpp_width;
1572 	hse->hpp.entry = __sort__hpp_entry;
1573 	hse->hpp.color = NULL;
1574 
1575 	hse->hpp.cmp = __sort__hpp_cmp;
1576 	hse->hpp.collapse = __sort__hpp_collapse;
1577 	hse->hpp.sort = __sort__hpp_sort;
1578 	hse->hpp.equal = __sort__hpp_equal;
1579 	hse->hpp.free = hse_free;
1580 
1581 	INIT_LIST_HEAD(&hse->hpp.list);
1582 	INIT_LIST_HEAD(&hse->hpp.sort_list);
1583 	hse->hpp.elide = false;
1584 	hse->hpp.len = 0;
1585 	hse->hpp.user_len = 0;
1586 
1587 	return hse;
1588 }
1589 
1590 static void hpp_free(struct perf_hpp_fmt *fmt)
1591 {
1592 	free(fmt);
1593 }
1594 
1595 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd)
1596 {
1597 	struct perf_hpp_fmt *fmt;
1598 
1599 	fmt = memdup(hd->fmt, sizeof(*fmt));
1600 	if (fmt) {
1601 		INIT_LIST_HEAD(&fmt->list);
1602 		INIT_LIST_HEAD(&fmt->sort_list);
1603 		fmt->free = hpp_free;
1604 	}
1605 
1606 	return fmt;
1607 }
1608 
1609 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1610 {
1611 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1612 
1613 	if (hse == NULL)
1614 		return -1;
1615 
1616 	perf_hpp__register_sort_field(&hse->hpp);
1617 	return 0;
1618 }
1619 
1620 static int __sort_dimension__add_hpp_output(struct perf_hpp_list *list,
1621 					    struct sort_dimension *sd)
1622 {
1623 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1624 
1625 	if (hse == NULL)
1626 		return -1;
1627 
1628 	perf_hpp_list__column_register(list, &hse->hpp);
1629 	return 0;
1630 }
1631 
1632 struct hpp_dynamic_entry {
1633 	struct perf_hpp_fmt hpp;
1634 	struct perf_evsel *evsel;
1635 	struct format_field *field;
1636 	unsigned dynamic_len;
1637 	bool raw_trace;
1638 };
1639 
1640 static int hde_width(struct hpp_dynamic_entry *hde)
1641 {
1642 	if (!hde->hpp.len) {
1643 		int len = hde->dynamic_len;
1644 		int namelen = strlen(hde->field->name);
1645 		int fieldlen = hde->field->size;
1646 
1647 		if (namelen > len)
1648 			len = namelen;
1649 
1650 		if (!(hde->field->flags & FIELD_IS_STRING)) {
1651 			/* length for print hex numbers */
1652 			fieldlen = hde->field->size * 2 + 2;
1653 		}
1654 		if (fieldlen > len)
1655 			len = fieldlen;
1656 
1657 		hde->hpp.len = len;
1658 	}
1659 	return hde->hpp.len;
1660 }
1661 
1662 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1663 			       struct hist_entry *he)
1664 {
1665 	char *str, *pos;
1666 	struct format_field *field = hde->field;
1667 	size_t namelen;
1668 	bool last = false;
1669 
1670 	if (hde->raw_trace)
1671 		return;
1672 
1673 	/* parse pretty print result and update max length */
1674 	if (!he->trace_output)
1675 		he->trace_output = get_trace_output(he);
1676 
1677 	namelen = strlen(field->name);
1678 	str = he->trace_output;
1679 
1680 	while (str) {
1681 		pos = strchr(str, ' ');
1682 		if (pos == NULL) {
1683 			last = true;
1684 			pos = str + strlen(str);
1685 		}
1686 
1687 		if (!strncmp(str, field->name, namelen)) {
1688 			size_t len;
1689 
1690 			str += namelen + 1;
1691 			len = pos - str;
1692 
1693 			if (len > hde->dynamic_len)
1694 				hde->dynamic_len = len;
1695 			break;
1696 		}
1697 
1698 		if (last)
1699 			str = NULL;
1700 		else
1701 			str = pos + 1;
1702 	}
1703 }
1704 
1705 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1706 			      struct perf_evsel *evsel __maybe_unused)
1707 {
1708 	struct hpp_dynamic_entry *hde;
1709 	size_t len = fmt->user_len;
1710 
1711 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1712 
1713 	if (!len)
1714 		len = hde_width(hde);
1715 
1716 	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1717 }
1718 
1719 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1720 			     struct perf_hpp *hpp __maybe_unused,
1721 			     struct perf_evsel *evsel __maybe_unused)
1722 {
1723 	struct hpp_dynamic_entry *hde;
1724 	size_t len = fmt->user_len;
1725 
1726 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1727 
1728 	if (!len)
1729 		len = hde_width(hde);
1730 
1731 	return len;
1732 }
1733 
1734 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1735 {
1736 	struct hpp_dynamic_entry *hde;
1737 
1738 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1739 
1740 	return hists_to_evsel(hists) == hde->evsel;
1741 }
1742 
1743 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1744 			     struct hist_entry *he)
1745 {
1746 	struct hpp_dynamic_entry *hde;
1747 	size_t len = fmt->user_len;
1748 	char *str, *pos;
1749 	struct format_field *field;
1750 	size_t namelen;
1751 	bool last = false;
1752 	int ret;
1753 
1754 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1755 
1756 	if (!len)
1757 		len = hde_width(hde);
1758 
1759 	if (hde->raw_trace)
1760 		goto raw_field;
1761 
1762 	field = hde->field;
1763 	namelen = strlen(field->name);
1764 	str = he->trace_output;
1765 
1766 	while (str) {
1767 		pos = strchr(str, ' ');
1768 		if (pos == NULL) {
1769 			last = true;
1770 			pos = str + strlen(str);
1771 		}
1772 
1773 		if (!strncmp(str, field->name, namelen)) {
1774 			str += namelen + 1;
1775 			str = strndup(str, pos - str);
1776 
1777 			if (str == NULL)
1778 				return scnprintf(hpp->buf, hpp->size,
1779 						 "%*.*s", len, len, "ERROR");
1780 			break;
1781 		}
1782 
1783 		if (last)
1784 			str = NULL;
1785 		else
1786 			str = pos + 1;
1787 	}
1788 
1789 	if (str == NULL) {
1790 		struct trace_seq seq;
1791 raw_field:
1792 		trace_seq_init(&seq);
1793 		pevent_print_field(&seq, he->raw_data, hde->field);
1794 		str = seq.buffer;
1795 	}
1796 
1797 	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1798 	free(str);
1799 	return ret;
1800 }
1801 
1802 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1803 			       struct hist_entry *a, struct hist_entry *b)
1804 {
1805 	struct hpp_dynamic_entry *hde;
1806 	struct format_field *field;
1807 	unsigned offset, size;
1808 
1809 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1810 
1811 	field = hde->field;
1812 	if (field->flags & FIELD_IS_DYNAMIC) {
1813 		unsigned long long dyn;
1814 
1815 		pevent_read_number_field(field, a->raw_data, &dyn);
1816 		offset = dyn & 0xffff;
1817 		size = (dyn >> 16) & 0xffff;
1818 
1819 		/* record max width for output */
1820 		if (size > hde->dynamic_len)
1821 			hde->dynamic_len = size;
1822 	} else {
1823 		offset = field->offset;
1824 		size = field->size;
1825 
1826 		update_dynamic_len(hde, a);
1827 		update_dynamic_len(hde, b);
1828 	}
1829 
1830 	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1831 }
1832 
1833 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1834 {
1835 	return fmt->cmp == __sort__hde_cmp;
1836 }
1837 
1838 static void hde_free(struct perf_hpp_fmt *fmt)
1839 {
1840 	struct hpp_dynamic_entry *hde;
1841 
1842 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1843 	free(hde);
1844 }
1845 
1846 static struct hpp_dynamic_entry *
1847 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1848 {
1849 	struct hpp_dynamic_entry *hde;
1850 
1851 	hde = malloc(sizeof(*hde));
1852 	if (hde == NULL) {
1853 		pr_debug("Memory allocation failed\n");
1854 		return NULL;
1855 	}
1856 
1857 	hde->evsel = evsel;
1858 	hde->field = field;
1859 	hde->dynamic_len = 0;
1860 
1861 	hde->hpp.name = field->name;
1862 	hde->hpp.header = __sort__hde_header;
1863 	hde->hpp.width  = __sort__hde_width;
1864 	hde->hpp.entry  = __sort__hde_entry;
1865 	hde->hpp.color  = NULL;
1866 
1867 	hde->hpp.cmp = __sort__hde_cmp;
1868 	hde->hpp.collapse = __sort__hde_cmp;
1869 	hde->hpp.sort = __sort__hde_cmp;
1870 	hde->hpp.free = hde_free;
1871 
1872 	INIT_LIST_HEAD(&hde->hpp.list);
1873 	INIT_LIST_HEAD(&hde->hpp.sort_list);
1874 	hde->hpp.elide = false;
1875 	hde->hpp.len = 0;
1876 	hde->hpp.user_len = 0;
1877 
1878 	return hde;
1879 }
1880 
1881 static int parse_field_name(char *str, char **event, char **field, char **opt)
1882 {
1883 	char *event_name, *field_name, *opt_name;
1884 
1885 	event_name = str;
1886 	field_name = strchr(str, '.');
1887 
1888 	if (field_name) {
1889 		*field_name++ = '\0';
1890 	} else {
1891 		event_name = NULL;
1892 		field_name = str;
1893 	}
1894 
1895 	opt_name = strchr(field_name, '/');
1896 	if (opt_name)
1897 		*opt_name++ = '\0';
1898 
1899 	*event = event_name;
1900 	*field = field_name;
1901 	*opt   = opt_name;
1902 
1903 	return 0;
1904 }
1905 
1906 /* find match evsel using a given event name.  The event name can be:
1907  *   1. '%' + event index (e.g. '%1' for first event)
1908  *   2. full event name (e.g. sched:sched_switch)
1909  *   3. partial event name (should not contain ':')
1910  */
1911 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1912 {
1913 	struct perf_evsel *evsel = NULL;
1914 	struct perf_evsel *pos;
1915 	bool full_name;
1916 
1917 	/* case 1 */
1918 	if (event_name[0] == '%') {
1919 		int nr = strtol(event_name+1, NULL, 0);
1920 
1921 		if (nr > evlist->nr_entries)
1922 			return NULL;
1923 
1924 		evsel = perf_evlist__first(evlist);
1925 		while (--nr > 0)
1926 			evsel = perf_evsel__next(evsel);
1927 
1928 		return evsel;
1929 	}
1930 
1931 	full_name = !!strchr(event_name, ':');
1932 	evlist__for_each(evlist, pos) {
1933 		/* case 2 */
1934 		if (full_name && !strcmp(pos->name, event_name))
1935 			return pos;
1936 		/* case 3 */
1937 		if (!full_name && strstr(pos->name, event_name)) {
1938 			if (evsel) {
1939 				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1940 					 event_name, evsel->name, pos->name);
1941 				return NULL;
1942 			}
1943 			evsel = pos;
1944 		}
1945 	}
1946 
1947 	return evsel;
1948 }
1949 
1950 static int __dynamic_dimension__add(struct perf_evsel *evsel,
1951 				    struct format_field *field,
1952 				    bool raw_trace)
1953 {
1954 	struct hpp_dynamic_entry *hde;
1955 
1956 	hde = __alloc_dynamic_entry(evsel, field);
1957 	if (hde == NULL)
1958 		return -ENOMEM;
1959 
1960 	hde->raw_trace = raw_trace;
1961 
1962 	perf_hpp__register_sort_field(&hde->hpp);
1963 	return 0;
1964 }
1965 
1966 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
1967 {
1968 	int ret;
1969 	struct format_field *field;
1970 
1971 	field = evsel->tp_format->format.fields;
1972 	while (field) {
1973 		ret = __dynamic_dimension__add(evsel, field, raw_trace);
1974 		if (ret < 0)
1975 			return ret;
1976 
1977 		field = field->next;
1978 	}
1979 	return 0;
1980 }
1981 
1982 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
1983 {
1984 	int ret;
1985 	struct perf_evsel *evsel;
1986 
1987 	evlist__for_each(evlist, evsel) {
1988 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1989 			continue;
1990 
1991 		ret = add_evsel_fields(evsel, raw_trace);
1992 		if (ret < 0)
1993 			return ret;
1994 	}
1995 	return 0;
1996 }
1997 
1998 static int add_all_matching_fields(struct perf_evlist *evlist,
1999 				   char *field_name, bool raw_trace)
2000 {
2001 	int ret = -ESRCH;
2002 	struct perf_evsel *evsel;
2003 	struct format_field *field;
2004 
2005 	evlist__for_each(evlist, evsel) {
2006 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2007 			continue;
2008 
2009 		field = pevent_find_any_field(evsel->tp_format, field_name);
2010 		if (field == NULL)
2011 			continue;
2012 
2013 		ret = __dynamic_dimension__add(evsel, field, raw_trace);
2014 		if (ret < 0)
2015 			break;
2016 	}
2017 	return ret;
2018 }
2019 
2020 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
2021 {
2022 	char *str, *event_name, *field_name, *opt_name;
2023 	struct perf_evsel *evsel;
2024 	struct format_field *field;
2025 	bool raw_trace = symbol_conf.raw_trace;
2026 	int ret = 0;
2027 
2028 	if (evlist == NULL)
2029 		return -ENOENT;
2030 
2031 	str = strdup(tok);
2032 	if (str == NULL)
2033 		return -ENOMEM;
2034 
2035 	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2036 		ret = -EINVAL;
2037 		goto out;
2038 	}
2039 
2040 	if (opt_name) {
2041 		if (strcmp(opt_name, "raw")) {
2042 			pr_debug("unsupported field option %s\n", opt_name);
2043 			ret = -EINVAL;
2044 			goto out;
2045 		}
2046 		raw_trace = true;
2047 	}
2048 
2049 	if (!strcmp(field_name, "trace_fields")) {
2050 		ret = add_all_dynamic_fields(evlist, raw_trace);
2051 		goto out;
2052 	}
2053 
2054 	if (event_name == NULL) {
2055 		ret = add_all_matching_fields(evlist, field_name, raw_trace);
2056 		goto out;
2057 	}
2058 
2059 	evsel = find_evsel(evlist, event_name);
2060 	if (evsel == NULL) {
2061 		pr_debug("Cannot find event: %s\n", event_name);
2062 		ret = -ENOENT;
2063 		goto out;
2064 	}
2065 
2066 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2067 		pr_debug("%s is not a tracepoint event\n", event_name);
2068 		ret = -EINVAL;
2069 		goto out;
2070 	}
2071 
2072 	if (!strcmp(field_name, "*")) {
2073 		ret = add_evsel_fields(evsel, raw_trace);
2074 	} else {
2075 		field = pevent_find_any_field(evsel->tp_format, field_name);
2076 		if (field == NULL) {
2077 			pr_debug("Cannot find event field for %s.%s\n",
2078 				 event_name, field_name);
2079 			return -ENOENT;
2080 		}
2081 
2082 		ret = __dynamic_dimension__add(evsel, field, raw_trace);
2083 	}
2084 
2085 out:
2086 	free(str);
2087 	return ret;
2088 }
2089 
2090 static int __sort_dimension__add(struct sort_dimension *sd)
2091 {
2092 	if (sd->taken)
2093 		return 0;
2094 
2095 	if (__sort_dimension__add_hpp_sort(sd) < 0)
2096 		return -1;
2097 
2098 	if (sd->entry->se_collapse)
2099 		sort__need_collapse = 1;
2100 
2101 	sd->taken = 1;
2102 
2103 	return 0;
2104 }
2105 
2106 static int __hpp_dimension__add(struct hpp_dimension *hd)
2107 {
2108 	struct perf_hpp_fmt *fmt;
2109 
2110 	if (hd->taken)
2111 		return 0;
2112 
2113 	fmt = __hpp_dimension__alloc_hpp(hd);
2114 	if (!fmt)
2115 		return -1;
2116 
2117 	hd->taken = 1;
2118 	perf_hpp__register_sort_field(fmt);
2119 	return 0;
2120 }
2121 
2122 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2123 					struct sort_dimension *sd)
2124 {
2125 	if (sd->taken)
2126 		return 0;
2127 
2128 	if (__sort_dimension__add_hpp_output(list, sd) < 0)
2129 		return -1;
2130 
2131 	sd->taken = 1;
2132 	return 0;
2133 }
2134 
2135 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2136 				       struct hpp_dimension *hd)
2137 {
2138 	struct perf_hpp_fmt *fmt;
2139 
2140 	if (hd->taken)
2141 		return 0;
2142 
2143 	fmt = __hpp_dimension__alloc_hpp(hd);
2144 	if (!fmt)
2145 		return -1;
2146 
2147 	hd->taken = 1;
2148 	perf_hpp_list__column_register(list, fmt);
2149 	return 0;
2150 }
2151 
2152 int hpp_dimension__add_output(unsigned col)
2153 {
2154 	BUG_ON(col >= PERF_HPP__MAX_INDEX);
2155 	return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2156 }
2157 
2158 static int sort_dimension__add(const char *tok,
2159 			       struct perf_evlist *evlist __maybe_unused)
2160 {
2161 	unsigned int i;
2162 
2163 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2164 		struct sort_dimension *sd = &common_sort_dimensions[i];
2165 
2166 		if (strncasecmp(tok, sd->name, strlen(tok)))
2167 			continue;
2168 
2169 		if (sd->entry == &sort_parent) {
2170 			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2171 			if (ret) {
2172 				char err[BUFSIZ];
2173 
2174 				regerror(ret, &parent_regex, err, sizeof(err));
2175 				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2176 				return -EINVAL;
2177 			}
2178 			sort__has_parent = 1;
2179 		} else if (sd->entry == &sort_sym) {
2180 			sort__has_sym = 1;
2181 			/*
2182 			 * perf diff displays the performance difference amongst
2183 			 * two or more perf.data files. Those files could come
2184 			 * from different binaries. So we should not compare
2185 			 * their ips, but the name of symbol.
2186 			 */
2187 			if (sort__mode == SORT_MODE__DIFF)
2188 				sd->entry->se_collapse = sort__sym_sort;
2189 
2190 		} else if (sd->entry == &sort_dso) {
2191 			sort__has_dso = 1;
2192 		} else if (sd->entry == &sort_socket) {
2193 			sort__has_socket = 1;
2194 		} else if (sd->entry == &sort_thread) {
2195 			sort__has_thread = 1;
2196 		}
2197 
2198 		return __sort_dimension__add(sd);
2199 	}
2200 
2201 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2202 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2203 
2204 		if (strncasecmp(tok, hd->name, strlen(tok)))
2205 			continue;
2206 
2207 		return __hpp_dimension__add(hd);
2208 	}
2209 
2210 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2211 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
2212 
2213 		if (strncasecmp(tok, sd->name, strlen(tok)))
2214 			continue;
2215 
2216 		if (sort__mode != SORT_MODE__BRANCH)
2217 			return -EINVAL;
2218 
2219 		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2220 			sort__has_sym = 1;
2221 
2222 		__sort_dimension__add(sd);
2223 		return 0;
2224 	}
2225 
2226 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2227 		struct sort_dimension *sd = &memory_sort_dimensions[i];
2228 
2229 		if (strncasecmp(tok, sd->name, strlen(tok)))
2230 			continue;
2231 
2232 		if (sort__mode != SORT_MODE__MEMORY)
2233 			return -EINVAL;
2234 
2235 		if (sd->entry == &sort_mem_daddr_sym)
2236 			sort__has_sym = 1;
2237 
2238 		__sort_dimension__add(sd);
2239 		return 0;
2240 	}
2241 
2242 	if (!add_dynamic_entry(evlist, tok))
2243 		return 0;
2244 
2245 	return -ESRCH;
2246 }
2247 
2248 static int setup_sort_list(char *str, struct perf_evlist *evlist)
2249 {
2250 	char *tmp, *tok;
2251 	int ret = 0;
2252 
2253 	for (tok = strtok_r(str, ", ", &tmp);
2254 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
2255 		ret = sort_dimension__add(tok, evlist);
2256 		if (ret == -EINVAL) {
2257 			error("Invalid --sort key: `%s'", tok);
2258 			break;
2259 		} else if (ret == -ESRCH) {
2260 			error("Unknown --sort key: `%s'", tok);
2261 			break;
2262 		}
2263 	}
2264 
2265 	return ret;
2266 }
2267 
2268 static const char *get_default_sort_order(struct perf_evlist *evlist)
2269 {
2270 	const char *default_sort_orders[] = {
2271 		default_sort_order,
2272 		default_branch_sort_order,
2273 		default_mem_sort_order,
2274 		default_top_sort_order,
2275 		default_diff_sort_order,
2276 		default_tracepoint_sort_order,
2277 	};
2278 	bool use_trace = true;
2279 	struct perf_evsel *evsel;
2280 
2281 	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2282 
2283 	if (evlist == NULL)
2284 		goto out_no_evlist;
2285 
2286 	evlist__for_each(evlist, evsel) {
2287 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2288 			use_trace = false;
2289 			break;
2290 		}
2291 	}
2292 
2293 	if (use_trace) {
2294 		sort__mode = SORT_MODE__TRACEPOINT;
2295 		if (symbol_conf.raw_trace)
2296 			return "trace_fields";
2297 	}
2298 out_no_evlist:
2299 	return default_sort_orders[sort__mode];
2300 }
2301 
2302 static int setup_sort_order(struct perf_evlist *evlist)
2303 {
2304 	char *new_sort_order;
2305 
2306 	/*
2307 	 * Append '+'-prefixed sort order to the default sort
2308 	 * order string.
2309 	 */
2310 	if (!sort_order || is_strict_order(sort_order))
2311 		return 0;
2312 
2313 	if (sort_order[1] == '\0') {
2314 		error("Invalid --sort key: `+'");
2315 		return -EINVAL;
2316 	}
2317 
2318 	/*
2319 	 * We allocate new sort_order string, but we never free it,
2320 	 * because it's checked over the rest of the code.
2321 	 */
2322 	if (asprintf(&new_sort_order, "%s,%s",
2323 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
2324 		error("Not enough memory to set up --sort");
2325 		return -ENOMEM;
2326 	}
2327 
2328 	sort_order = new_sort_order;
2329 	return 0;
2330 }
2331 
2332 /*
2333  * Adds 'pre,' prefix into 'str' is 'pre' is
2334  * not already part of 'str'.
2335  */
2336 static char *prefix_if_not_in(const char *pre, char *str)
2337 {
2338 	char *n;
2339 
2340 	if (!str || strstr(str, pre))
2341 		return str;
2342 
2343 	if (asprintf(&n, "%s,%s", pre, str) < 0)
2344 		return NULL;
2345 
2346 	free(str);
2347 	return n;
2348 }
2349 
2350 static char *setup_overhead(char *keys)
2351 {
2352 	keys = prefix_if_not_in("overhead", keys);
2353 
2354 	if (symbol_conf.cumulate_callchain)
2355 		keys = prefix_if_not_in("overhead_children", keys);
2356 
2357 	return keys;
2358 }
2359 
2360 static int __setup_sorting(struct perf_evlist *evlist)
2361 {
2362 	char *str;
2363 	const char *sort_keys;
2364 	int ret = 0;
2365 
2366 	ret = setup_sort_order(evlist);
2367 	if (ret)
2368 		return ret;
2369 
2370 	sort_keys = sort_order;
2371 	if (sort_keys == NULL) {
2372 		if (is_strict_order(field_order)) {
2373 			/*
2374 			 * If user specified field order but no sort order,
2375 			 * we'll honor it and not add default sort orders.
2376 			 */
2377 			return 0;
2378 		}
2379 
2380 		sort_keys = get_default_sort_order(evlist);
2381 	}
2382 
2383 	str = strdup(sort_keys);
2384 	if (str == NULL) {
2385 		error("Not enough memory to setup sort keys");
2386 		return -ENOMEM;
2387 	}
2388 
2389 	/*
2390 	 * Prepend overhead fields for backward compatibility.
2391 	 */
2392 	if (!is_strict_order(field_order)) {
2393 		str = setup_overhead(str);
2394 		if (str == NULL) {
2395 			error("Not enough memory to setup overhead keys");
2396 			return -ENOMEM;
2397 		}
2398 	}
2399 
2400 	ret = setup_sort_list(str, evlist);
2401 
2402 	free(str);
2403 	return ret;
2404 }
2405 
2406 void perf_hpp__set_elide(int idx, bool elide)
2407 {
2408 	struct perf_hpp_fmt *fmt;
2409 	struct hpp_sort_entry *hse;
2410 
2411 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2412 		if (!perf_hpp__is_sort_entry(fmt))
2413 			continue;
2414 
2415 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2416 		if (hse->se->se_width_idx == idx) {
2417 			fmt->elide = elide;
2418 			break;
2419 		}
2420 	}
2421 }
2422 
2423 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2424 {
2425 	if (list && strlist__nr_entries(list) == 1) {
2426 		if (fp != NULL)
2427 			fprintf(fp, "# %s: %s\n", list_name,
2428 				strlist__entry(list, 0)->s);
2429 		return true;
2430 	}
2431 	return false;
2432 }
2433 
2434 static bool get_elide(int idx, FILE *output)
2435 {
2436 	switch (idx) {
2437 	case HISTC_SYMBOL:
2438 		return __get_elide(symbol_conf.sym_list, "symbol", output);
2439 	case HISTC_DSO:
2440 		return __get_elide(symbol_conf.dso_list, "dso", output);
2441 	case HISTC_COMM:
2442 		return __get_elide(symbol_conf.comm_list, "comm", output);
2443 	default:
2444 		break;
2445 	}
2446 
2447 	if (sort__mode != SORT_MODE__BRANCH)
2448 		return false;
2449 
2450 	switch (idx) {
2451 	case HISTC_SYMBOL_FROM:
2452 		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2453 	case HISTC_SYMBOL_TO:
2454 		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2455 	case HISTC_DSO_FROM:
2456 		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2457 	case HISTC_DSO_TO:
2458 		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2459 	default:
2460 		break;
2461 	}
2462 
2463 	return false;
2464 }
2465 
2466 void sort__setup_elide(FILE *output)
2467 {
2468 	struct perf_hpp_fmt *fmt;
2469 	struct hpp_sort_entry *hse;
2470 
2471 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2472 		if (!perf_hpp__is_sort_entry(fmt))
2473 			continue;
2474 
2475 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2476 		fmt->elide = get_elide(hse->se->se_width_idx, output);
2477 	}
2478 
2479 	/*
2480 	 * It makes no sense to elide all of sort entries.
2481 	 * Just revert them to show up again.
2482 	 */
2483 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2484 		if (!perf_hpp__is_sort_entry(fmt))
2485 			continue;
2486 
2487 		if (!fmt->elide)
2488 			return;
2489 	}
2490 
2491 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2492 		if (!perf_hpp__is_sort_entry(fmt))
2493 			continue;
2494 
2495 		fmt->elide = false;
2496 	}
2497 }
2498 
2499 static int output_field_add(struct perf_hpp_list *list, char *tok)
2500 {
2501 	unsigned int i;
2502 
2503 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2504 		struct sort_dimension *sd = &common_sort_dimensions[i];
2505 
2506 		if (strncasecmp(tok, sd->name, strlen(tok)))
2507 			continue;
2508 
2509 		return __sort_dimension__add_output(list, sd);
2510 	}
2511 
2512 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2513 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2514 
2515 		if (strncasecmp(tok, hd->name, strlen(tok)))
2516 			continue;
2517 
2518 		return __hpp_dimension__add_output(list, hd);
2519 	}
2520 
2521 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2522 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
2523 
2524 		if (strncasecmp(tok, sd->name, strlen(tok)))
2525 			continue;
2526 
2527 		return __sort_dimension__add_output(list, sd);
2528 	}
2529 
2530 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2531 		struct sort_dimension *sd = &memory_sort_dimensions[i];
2532 
2533 		if (strncasecmp(tok, sd->name, strlen(tok)))
2534 			continue;
2535 
2536 		return __sort_dimension__add_output(list, sd);
2537 	}
2538 
2539 	return -ESRCH;
2540 }
2541 
2542 static int setup_output_list(struct perf_hpp_list *list, char *str)
2543 {
2544 	char *tmp, *tok;
2545 	int ret = 0;
2546 
2547 	for (tok = strtok_r(str, ", ", &tmp);
2548 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
2549 		ret = output_field_add(list, tok);
2550 		if (ret == -EINVAL) {
2551 			error("Invalid --fields key: `%s'", tok);
2552 			break;
2553 		} else if (ret == -ESRCH) {
2554 			error("Unknown --fields key: `%s'", tok);
2555 			break;
2556 		}
2557 	}
2558 
2559 	return ret;
2560 }
2561 
2562 static void reset_dimensions(void)
2563 {
2564 	unsigned int i;
2565 
2566 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2567 		common_sort_dimensions[i].taken = 0;
2568 
2569 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2570 		hpp_sort_dimensions[i].taken = 0;
2571 
2572 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2573 		bstack_sort_dimensions[i].taken = 0;
2574 
2575 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2576 		memory_sort_dimensions[i].taken = 0;
2577 }
2578 
2579 bool is_strict_order(const char *order)
2580 {
2581 	return order && (*order != '+');
2582 }
2583 
2584 static int __setup_output_field(void)
2585 {
2586 	char *str, *strp;
2587 	int ret = -EINVAL;
2588 
2589 	if (field_order == NULL)
2590 		return 0;
2591 
2592 	strp = str = strdup(field_order);
2593 	if (str == NULL) {
2594 		error("Not enough memory to setup output fields");
2595 		return -ENOMEM;
2596 	}
2597 
2598 	if (!is_strict_order(field_order))
2599 		strp++;
2600 
2601 	if (!strlen(strp)) {
2602 		error("Invalid --fields key: `+'");
2603 		goto out;
2604 	}
2605 
2606 	ret = setup_output_list(&perf_hpp_list, strp);
2607 
2608 out:
2609 	free(str);
2610 	return ret;
2611 }
2612 
2613 int setup_sorting(struct perf_evlist *evlist)
2614 {
2615 	int err;
2616 
2617 	err = __setup_sorting(evlist);
2618 	if (err < 0)
2619 		return err;
2620 
2621 	if (parent_pattern != default_parent_pattern) {
2622 		err = sort_dimension__add("parent", evlist);
2623 		if (err < 0)
2624 			return err;
2625 	}
2626 
2627 	reset_dimensions();
2628 
2629 	/*
2630 	 * perf diff doesn't use default hpp output fields.
2631 	 */
2632 	if (sort__mode != SORT_MODE__DIFF)
2633 		perf_hpp__init();
2634 
2635 	err = __setup_output_field();
2636 	if (err < 0)
2637 		return err;
2638 
2639 	/* copy sort keys to output fields */
2640 	perf_hpp__setup_output_field(&perf_hpp_list);
2641 	/* and then copy output fields to sort keys */
2642 	perf_hpp__append_sort_keys(&perf_hpp_list);
2643 
2644 	return 0;
2645 }
2646 
2647 void reset_output_field(void)
2648 {
2649 	sort__need_collapse = 0;
2650 	sort__has_parent = 0;
2651 	sort__has_sym = 0;
2652 	sort__has_dso = 0;
2653 
2654 	field_order = NULL;
2655 	sort_order = NULL;
2656 
2657 	reset_dimensions();
2658 	perf_hpp__reset_output_field(&perf_hpp_list);
2659 }
2660