xref: /linux/tools/perf/builtin-report.c (revision a36e9f5cfe9eb3a1dce8769c7058251c42705357)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-report.c
4  *
5  * Builtin report command: Analyze the perf.data input file,
6  * look up and read DSOs and symbol information and display
7  * a histogram of results, along various sorting keys.
8  */
9 #include "builtin.h"
10 
11 #include "util/config.h"
12 
13 #include "util/annotate.h"
14 #include "util/color.h"
15 #include "util/dso.h"
16 #include <linux/list.h>
17 #include <linux/rbtree.h>
18 #include <linux/err.h>
19 #include <linux/zalloc.h>
20 #include "util/map.h"
21 #include "util/symbol.h"
22 #include "util/map_symbol.h"
23 #include "util/mem-events.h"
24 #include "util/branch.h"
25 #include "util/callchain.h"
26 #include "util/values.h"
27 
28 #include "perf.h"
29 #include "util/debug.h"
30 #include "util/evlist.h"
31 #include "util/evsel.h"
32 #include "util/evswitch.h"
33 #include "util/header.h"
34 #include "util/mem-info.h"
35 #include "util/session.h"
36 #include "util/srcline.h"
37 #include "util/tool.h"
38 
39 #include <subcmd/parse-options.h>
40 #include <subcmd/exec-cmd.h>
41 #include "util/parse-events.h"
42 
43 #include "util/thread.h"
44 #include "util/sort.h"
45 #include "util/hist.h"
46 #include "util/data.h"
47 #include "arch/common.h"
48 #include "util/time-utils.h"
49 #include "util/auxtrace.h"
50 #include "util/units.h"
51 #include "util/util.h" // perf_tip()
52 #include "ui/ui.h"
53 #include "ui/progress.h"
54 #include "util/block-info.h"
55 
56 #include <dlfcn.h>
57 #include <errno.h>
58 #include <inttypes.h>
59 #include <regex.h>
60 #include <linux/ctype.h>
61 #include <signal.h>
62 #include <linux/bitmap.h>
63 #include <linux/list_sort.h>
64 #include <linux/string.h>
65 #include <linux/stringify.h>
66 #include <linux/time64.h>
67 #include <sys/types.h>
68 #include <sys/stat.h>
69 #include <unistd.h>
70 #include <linux/mman.h>
71 
72 #ifdef HAVE_LIBTRACEEVENT
73 #include <traceevent/event-parse.h>
74 #endif
75 
76 struct report {
77 	struct perf_tool	tool;
78 	struct perf_session	*session;
79 	struct evswitch		evswitch;
80 #ifdef HAVE_SLANG_SUPPORT
81 	bool			use_tui;
82 #endif
83 #ifdef HAVE_GTK2_SUPPORT
84 	bool			use_gtk;
85 #endif
86 	bool			use_stdio;
87 	bool			show_full_info;
88 	bool			show_threads;
89 	bool			inverted_callchain;
90 	bool			mem_mode;
91 	bool			stats_mode;
92 	bool			tasks_mode;
93 	bool			mmaps_mode;
94 	bool			header;
95 	bool			header_only;
96 	bool			nonany_branch_mode;
97 	bool			group_set;
98 	bool			stitch_lbr;
99 	bool			disable_order;
100 	bool			skip_empty;
101 	bool			data_type;
102 	int			max_stack;
103 	struct perf_read_values	show_threads_values;
104 	const char		*pretty_printing_style;
105 	const char		*cpu_list;
106 	const char		*symbol_filter_str;
107 	const char		*time_str;
108 	struct perf_time_interval *ptime_range;
109 	int			range_size;
110 	int			range_num;
111 	float			min_percent;
112 	u64			nr_entries;
113 	u64			queue_size;
114 	u64			total_cycles;
115 	int			socket_filter;
116 	DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
117 	struct branch_type_stat	brtype_stat;
118 	bool			symbol_ipc;
119 	bool			total_cycles_mode;
120 	struct block_report	*block_reports;
121 	int			nr_block_reports;
122 };
123 
124 static int report__config(const char *var, const char *value, void *cb)
125 {
126 	struct report *rep = cb;
127 
128 	if (!strcmp(var, "report.group")) {
129 		symbol_conf.event_group = perf_config_bool(var, value);
130 		return 0;
131 	}
132 	if (!strcmp(var, "report.percent-limit")) {
133 		double pcnt = strtof(value, NULL);
134 
135 		rep->min_percent = pcnt;
136 		callchain_param.min_percent = pcnt;
137 		return 0;
138 	}
139 	if (!strcmp(var, "report.children")) {
140 		symbol_conf.cumulate_callchain = perf_config_bool(var, value);
141 		return 0;
142 	}
143 	if (!strcmp(var, "report.queue-size"))
144 		return perf_config_u64(&rep->queue_size, var, value);
145 
146 	if (!strcmp(var, "report.sort_order")) {
147 		default_sort_order = strdup(value);
148 		if (!default_sort_order) {
149 			pr_err("Not enough memory for report.sort_order\n");
150 			return -1;
151 		}
152 		return 0;
153 	}
154 
155 	if (!strcmp(var, "report.skip-empty")) {
156 		rep->skip_empty = perf_config_bool(var, value);
157 		return 0;
158 	}
159 
160 	pr_debug("%s variable unknown, ignoring...", var);
161 	return 0;
162 }
163 
164 static int hist_iter__report_callback(struct hist_entry_iter *iter,
165 				      struct addr_location *al, bool single,
166 				      void *arg)
167 {
168 	int err = 0;
169 	struct report *rep = arg;
170 	struct hist_entry *he = iter->he;
171 	struct evsel *evsel = iter->evsel;
172 	struct perf_sample *sample = iter->sample;
173 	struct mem_info *mi;
174 	struct branch_info *bi;
175 
176 	if (!ui__has_annotation() && !rep->symbol_ipc)
177 		return 0;
178 
179 	if (sort__mode == SORT_MODE__BRANCH) {
180 		bi = he->branch_info;
181 		err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
182 		if (err)
183 			goto out;
184 
185 		err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
186 
187 	} else if (rep->mem_mode) {
188 		mi = he->mem_info;
189 		err = addr_map_symbol__inc_samples(mem_info__daddr(mi), sample, evsel);
190 		if (err)
191 			goto out;
192 
193 		err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
194 
195 	} else if (symbol_conf.cumulate_callchain) {
196 		if (single)
197 			err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
198 	} else {
199 		err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
200 	}
201 
202 out:
203 	return err;
204 }
205 
206 static int hist_iter__branch_callback(struct hist_entry_iter *iter,
207 				      struct addr_location *al __maybe_unused,
208 				      bool single __maybe_unused,
209 				      void *arg)
210 {
211 	struct hist_entry *he = iter->he;
212 	struct report *rep = arg;
213 	struct branch_info *bi = he->branch_info;
214 	struct perf_sample *sample = iter->sample;
215 	struct evsel *evsel = iter->evsel;
216 	int err;
217 
218 	branch_type_count(&rep->brtype_stat, &bi->flags,
219 			  bi->from.addr, bi->to.addr);
220 
221 	if (!ui__has_annotation() && !rep->symbol_ipc)
222 		return 0;
223 
224 	err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
225 	if (err)
226 		goto out;
227 
228 	err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
229 
230 out:
231 	return err;
232 }
233 
234 static void setup_forced_leader(struct report *report,
235 				struct evlist *evlist)
236 {
237 	if (report->group_set)
238 		evlist__force_leader(evlist);
239 }
240 
241 static int process_feature_event(struct perf_session *session,
242 				 union perf_event *event)
243 {
244 	struct report *rep = container_of(session->tool, struct report, tool);
245 
246 	if (event->feat.feat_id < HEADER_LAST_FEATURE)
247 		return perf_event__process_feature(session, event);
248 
249 	if (event->feat.feat_id != HEADER_LAST_FEATURE) {
250 		pr_err("failed: wrong feature ID: %" PRI_lu64 "\n",
251 		       event->feat.feat_id);
252 		return -1;
253 	} else if (rep->header_only) {
254 		session_done = 1;
255 	}
256 
257 	/*
258 	 * (feat_id = HEADER_LAST_FEATURE) is the end marker which
259 	 * means all features are received, now we can force the
260 	 * group if needed.
261 	 */
262 	setup_forced_leader(rep, session->evlist);
263 	return 0;
264 }
265 
266 static int process_sample_event(struct perf_tool *tool,
267 				union perf_event *event,
268 				struct perf_sample *sample,
269 				struct evsel *evsel,
270 				struct machine *machine)
271 {
272 	struct report *rep = container_of(tool, struct report, tool);
273 	struct addr_location al;
274 	struct hist_entry_iter iter = {
275 		.evsel 			= evsel,
276 		.sample 		= sample,
277 		.hide_unresolved 	= symbol_conf.hide_unresolved,
278 		.add_entry_cb 		= hist_iter__report_callback,
279 	};
280 	int ret = 0;
281 
282 	if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num,
283 					  sample->time)) {
284 		return 0;
285 	}
286 
287 	if (evswitch__discard(&rep->evswitch, evsel))
288 		return 0;
289 
290 	addr_location__init(&al);
291 	if (machine__resolve(machine, &al, sample) < 0) {
292 		pr_debug("problem processing %d event, skipping it.\n",
293 			 event->header.type);
294 		ret = -1;
295 		goto out_put;
296 	}
297 
298 	if (rep->stitch_lbr)
299 		thread__set_lbr_stitch_enable(al.thread, true);
300 
301 	if (symbol_conf.hide_unresolved && al.sym == NULL)
302 		goto out_put;
303 
304 	if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
305 		goto out_put;
306 
307 	if (sort__mode == SORT_MODE__BRANCH) {
308 		/*
309 		 * A non-synthesized event might not have a branch stack if
310 		 * branch stacks have been synthesized (using itrace options).
311 		 */
312 		if (!sample->branch_stack)
313 			goto out_put;
314 
315 		iter.add_entry_cb = hist_iter__branch_callback;
316 		iter.ops = &hist_iter_branch;
317 	} else if (rep->mem_mode) {
318 		iter.ops = &hist_iter_mem;
319 	} else if (symbol_conf.cumulate_callchain) {
320 		iter.ops = &hist_iter_cumulative;
321 	} else {
322 		iter.ops = &hist_iter_normal;
323 	}
324 
325 	if (al.map != NULL)
326 		dso__set_hit(map__dso(al.map));
327 
328 	if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) {
329 		hist__account_cycles(sample->branch_stack, &al, sample,
330 				     rep->nonany_branch_mode,
331 				     &rep->total_cycles);
332 	}
333 
334 	ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
335 	if (ret < 0)
336 		pr_debug("problem adding hist entry, skipping event\n");
337 out_put:
338 	addr_location__exit(&al);
339 	return ret;
340 }
341 
342 static int process_read_event(struct perf_tool *tool,
343 			      union perf_event *event,
344 			      struct perf_sample *sample __maybe_unused,
345 			      struct evsel *evsel,
346 			      struct machine *machine __maybe_unused)
347 {
348 	struct report *rep = container_of(tool, struct report, tool);
349 
350 	if (rep->show_threads) {
351 		const char *name = evsel__name(evsel);
352 		int err = perf_read_values_add_value(&rep->show_threads_values,
353 					   event->read.pid, event->read.tid,
354 					   evsel->core.idx,
355 					   name,
356 					   event->read.value);
357 
358 		if (err)
359 			return err;
360 	}
361 
362 	return 0;
363 }
364 
365 /* For pipe mode, sample_type is not currently set */
366 static int report__setup_sample_type(struct report *rep)
367 {
368 	struct perf_session *session = rep->session;
369 	u64 sample_type = evlist__combined_sample_type(session->evlist);
370 	bool is_pipe = perf_data__is_pipe(session->data);
371 	struct evsel *evsel;
372 
373 	if (session->itrace_synth_opts->callchain ||
374 	    session->itrace_synth_opts->add_callchain ||
375 	    (!is_pipe &&
376 	     perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
377 	     !session->itrace_synth_opts->set))
378 		sample_type |= PERF_SAMPLE_CALLCHAIN;
379 
380 	if (session->itrace_synth_opts->last_branch ||
381 	    session->itrace_synth_opts->add_last_branch)
382 		sample_type |= PERF_SAMPLE_BRANCH_STACK;
383 
384 	if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
385 		if (perf_hpp_list.parent) {
386 			ui__error("Selected --sort parent, but no "
387 				    "callchain data. Did you call "
388 				    "'perf record' without -g?\n");
389 			return -EINVAL;
390 		}
391 		if (symbol_conf.use_callchain &&
392 			!symbol_conf.show_branchflag_count) {
393 			ui__error("Selected -g or --branch-history.\n"
394 				  "But no callchain or branch data.\n"
395 				  "Did you call 'perf record' without -g or -b?\n");
396 			return -1;
397 		}
398 	} else if (!callchain_param.enabled &&
399 		   callchain_param.mode != CHAIN_NONE &&
400 		   !symbol_conf.use_callchain) {
401 			symbol_conf.use_callchain = true;
402 			if (callchain_register_param(&callchain_param) < 0) {
403 				ui__error("Can't register callchain params.\n");
404 				return -EINVAL;
405 			}
406 	}
407 
408 	if (symbol_conf.cumulate_callchain) {
409 		/* Silently ignore if callchain is missing */
410 		if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
411 			symbol_conf.cumulate_callchain = false;
412 			perf_hpp__cancel_cumulate();
413 		}
414 	}
415 
416 	if (sort__mode == SORT_MODE__BRANCH) {
417 		if (!is_pipe &&
418 		    !(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
419 			ui__error("Selected -b but no branch data. "
420 				  "Did you call perf record without -b?\n");
421 			return -1;
422 		}
423 	}
424 
425 	if (sort__mode == SORT_MODE__MEMORY) {
426 		/*
427 		 * FIXUP: prior to kernel 5.18, Arm SPE missed to set
428 		 * PERF_SAMPLE_DATA_SRC bit in sample type.  For backward
429 		 * compatibility, set the bit if it's an old perf data file.
430 		 */
431 		evlist__for_each_entry(session->evlist, evsel) {
432 			if (strstr(evsel__name(evsel), "arm_spe") &&
433 				!(sample_type & PERF_SAMPLE_DATA_SRC)) {
434 				evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
435 				sample_type |= PERF_SAMPLE_DATA_SRC;
436 			}
437 		}
438 
439 		if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
440 			ui__error("Selected --mem-mode but no mem data. "
441 				  "Did you call perf record without -d?\n");
442 			return -1;
443 		}
444 	}
445 
446 	callchain_param_setup(sample_type, perf_env__arch(&rep->session->header.env));
447 
448 	if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
449 		ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
450 			    "Please apply --call-graph lbr when recording.\n");
451 		rep->stitch_lbr = false;
452 	}
453 
454 	/* ??? handle more cases than just ANY? */
455 	if (!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY))
456 		rep->nonany_branch_mode = true;
457 
458 #if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_DWARF_SUPPORT)
459 	if (dwarf_callchain_users) {
460 		ui__warning("Please install libunwind or libdw "
461 			    "development packages during the perf build.\n");
462 	}
463 #endif
464 
465 	return 0;
466 }
467 
468 static void sig_handler(int sig __maybe_unused)
469 {
470 	session_done = 1;
471 }
472 
473 static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep,
474 					      const char *evname, FILE *fp)
475 {
476 	size_t ret;
477 	char unit;
478 	unsigned long nr_samples = hists->stats.nr_samples;
479 	u64 nr_events = hists->stats.total_period;
480 	struct evsel *evsel = hists_to_evsel(hists);
481 	char buf[512];
482 	size_t size = sizeof(buf);
483 	int socked_id = hists->socket_filter;
484 
485 	if (quiet)
486 		return 0;
487 
488 	if (symbol_conf.filter_relative) {
489 		nr_samples = hists->stats.nr_non_filtered_samples;
490 		nr_events = hists->stats.total_non_filtered_period;
491 	}
492 
493 	if (evsel__is_group_event(evsel)) {
494 		struct evsel *pos;
495 
496 		evsel__group_desc(evsel, buf, size);
497 		evname = buf;
498 
499 		for_each_group_member(pos, evsel) {
500 			const struct hists *pos_hists = evsel__hists(pos);
501 
502 			if (symbol_conf.filter_relative) {
503 				nr_samples += pos_hists->stats.nr_non_filtered_samples;
504 				nr_events += pos_hists->stats.total_non_filtered_period;
505 			} else {
506 				nr_samples += pos_hists->stats.nr_samples;
507 				nr_events += pos_hists->stats.total_period;
508 			}
509 		}
510 	}
511 
512 	nr_samples = convert_unit(nr_samples, &unit);
513 	ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
514 	if (evname != NULL) {
515 		ret += fprintf(fp, " of event%s '%s'",
516 			       evsel->core.nr_members > 1 ? "s" : "", evname);
517 	}
518 
519 	if (rep->time_str)
520 		ret += fprintf(fp, " (time slices: %s)", rep->time_str);
521 
522 	if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) {
523 		ret += fprintf(fp, ", show reference callgraph");
524 	}
525 
526 	if (rep->mem_mode) {
527 		ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
528 		ret += fprintf(fp, "\n# Sort order   : %s", sort_order ? : default_mem_sort_order);
529 	} else
530 		ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
531 
532 	if (socked_id > -1)
533 		ret += fprintf(fp, "\n# Processor Socket: %d", socked_id);
534 
535 	return ret + fprintf(fp, "\n#\n");
536 }
537 
538 static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *rep)
539 {
540 	struct evsel *pos;
541 	int i = 0, ret;
542 
543 	evlist__for_each_entry(evlist, pos) {
544 		ret = report__browse_block_hists(&rep->block_reports[i++].hist,
545 						 rep->min_percent, pos,
546 						 &rep->session->header.env);
547 		if (ret != 0)
548 			return ret;
549 	}
550 
551 	return 0;
552 }
553 
554 static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, const char *help)
555 {
556 	struct evsel *pos;
557 	int i = 0;
558 
559 	if (!quiet) {
560 		fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n",
561 			evlist->stats.total_lost_samples);
562 	}
563 
564 	evlist__for_each_entry(evlist, pos) {
565 		struct hists *hists = evsel__hists(pos);
566 		const char *evname = evsel__name(pos);
567 
568 		if (symbol_conf.event_group && !evsel__is_group_leader(pos))
569 			continue;
570 
571 		if (rep->skip_empty && !hists->stats.nr_samples)
572 			continue;
573 
574 		hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
575 
576 		if (rep->total_cycles_mode) {
577 			report__browse_block_hists(&rep->block_reports[i++].hist,
578 						   rep->min_percent, pos, NULL);
579 			continue;
580 		}
581 
582 		hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
583 			       !(symbol_conf.use_callchain ||
584 			         symbol_conf.show_branchflag_count));
585 		fprintf(stdout, "\n\n");
586 	}
587 
588 	if (!quiet)
589 		fprintf(stdout, "#\n# (%s)\n#\n", help);
590 
591 	if (rep->show_threads) {
592 		bool style = !strcmp(rep->pretty_printing_style, "raw");
593 		perf_read_values_display(stdout, &rep->show_threads_values,
594 					 style);
595 		perf_read_values_destroy(&rep->show_threads_values);
596 	}
597 
598 	if (sort__mode == SORT_MODE__BRANCH)
599 		branch_type_stat_display(stdout, &rep->brtype_stat);
600 
601 	return 0;
602 }
603 
604 static void report__warn_kptr_restrict(const struct report *rep)
605 {
606 	struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
607 	struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
608 
609 	if (evlist__exclude_kernel(rep->session->evlist))
610 		return;
611 
612 	if (kernel_map == NULL ||
613 	    (dso__hit(map__dso(kernel_map)) &&
614 	     (kernel_kmap->ref_reloc_sym == NULL ||
615 	      kernel_kmap->ref_reloc_sym->addr == 0))) {
616 		const char *desc =
617 		    "As no suitable kallsyms nor vmlinux was found, kernel samples\n"
618 		    "can't be resolved.";
619 
620 		if (kernel_map && map__has_symbols(kernel_map)) {
621 			desc = "If some relocation was applied (e.g. "
622 			       "kexec) symbols may be misresolved.";
623 		}
624 
625 		ui__warning(
626 "Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
627 "Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
628 "Samples in kernel modules can't be resolved as well.\n\n",
629 		desc);
630 	}
631 }
632 
633 static int report__gtk_browse_hists(struct report *rep, const char *help)
634 {
635 	int (*hist_browser)(struct evlist *evlist, const char *help,
636 			    struct hist_browser_timer *timer, float min_pcnt);
637 
638 	hist_browser = dlsym(perf_gtk_handle, "evlist__gtk_browse_hists");
639 
640 	if (hist_browser == NULL) {
641 		ui__error("GTK browser not found!\n");
642 		return -1;
643 	}
644 
645 	return hist_browser(rep->session->evlist, help, NULL, rep->min_percent);
646 }
647 
648 static int report__browse_hists(struct report *rep)
649 {
650 	int ret;
651 	struct perf_session *session = rep->session;
652 	struct evlist *evlist = session->evlist;
653 	char *help = NULL, *path = NULL;
654 
655 	path = system_path(TIPDIR);
656 	if (perf_tip(&help, path) || help == NULL) {
657 		/* fallback for people who don't install perf ;-) */
658 		free(path);
659 		path = system_path(DOCDIR);
660 		if (perf_tip(&help, path) || help == NULL)
661 			help = strdup("Cannot load tips.txt file, please install perf!");
662 	}
663 	free(path);
664 
665 	switch (use_browser) {
666 	case 1:
667 		if (rep->total_cycles_mode) {
668 			ret = evlist__tui_block_hists_browse(evlist, rep);
669 			break;
670 		}
671 
672 		ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent,
673 					       &session->header.env, true);
674 		/*
675 		 * Usually "ret" is the last pressed key, and we only
676 		 * care if the key notifies us to switch data file.
677 		 */
678 		if (ret != K_SWITCH_INPUT_DATA && ret != K_RELOAD)
679 			ret = 0;
680 		break;
681 	case 2:
682 		ret = report__gtk_browse_hists(rep, help);
683 		break;
684 	default:
685 		ret = evlist__tty_browse_hists(evlist, rep, help);
686 		break;
687 	}
688 	free(help);
689 	return ret;
690 }
691 
692 static int report__collapse_hists(struct report *rep)
693 {
694 	struct perf_session *session = rep->session;
695 	struct evlist *evlist = session->evlist;
696 	struct ui_progress prog;
697 	struct evsel *pos;
698 	int ret = 0;
699 
700 	/*
701 	 * The pipe data needs to setup hierarchy hpp formats now, because it
702 	 * cannot know about evsels in the data before reading the data.  The
703 	 * normal file data saves the event (attribute) info in the header
704 	 * section, but pipe does not have the luxury.
705 	 */
706 	if (perf_data__is_pipe(session->data)) {
707 		if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) {
708 			ui__error("Failed to setup hierarchy output formats\n");
709 			return -1;
710 		}
711 	}
712 
713 	ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
714 
715 	evlist__for_each_entry(rep->session->evlist, pos) {
716 		struct hists *hists = evsel__hists(pos);
717 
718 		if (pos->core.idx == 0)
719 			hists->symbol_filter_str = rep->symbol_filter_str;
720 
721 		hists->socket_filter = rep->socket_filter;
722 
723 		ret = hists__collapse_resort(hists, &prog);
724 		if (ret < 0)
725 			break;
726 
727 		/* Non-group events are considered as leader */
728 		if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
729 			struct hists *leader_hists = evsel__hists(evsel__leader(pos));
730 
731 			hists__match(leader_hists, hists);
732 			hists__link(leader_hists, hists);
733 		}
734 	}
735 
736 	ui_progress__finish();
737 	return ret;
738 }
739 
740 static int hists__resort_cb(struct hist_entry *he, void *arg)
741 {
742 	struct report *rep = arg;
743 	struct symbol *sym = he->ms.sym;
744 
745 	if (rep->symbol_ipc && sym && !sym->annotate2) {
746 		struct evsel *evsel = hists_to_evsel(he->hists);
747 
748 		symbol__annotate2(&he->ms, evsel, NULL);
749 	}
750 
751 	return 0;
752 }
753 
754 static void report__output_resort(struct report *rep)
755 {
756 	struct ui_progress prog;
757 	struct evsel *pos;
758 
759 	ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
760 
761 	evlist__for_each_entry(rep->session->evlist, pos) {
762 		evsel__output_resort_cb(pos, &prog, hists__resort_cb, rep);
763 	}
764 
765 	ui_progress__finish();
766 }
767 
768 static int count_sample_event(struct perf_tool *tool __maybe_unused,
769 			      union perf_event *event __maybe_unused,
770 			      struct perf_sample *sample __maybe_unused,
771 			      struct evsel *evsel,
772 			      struct machine *machine __maybe_unused)
773 {
774 	struct hists *hists = evsel__hists(evsel);
775 
776 	hists__inc_nr_events(hists);
777 	return 0;
778 }
779 
780 static int count_lost_samples_event(struct perf_tool *tool,
781 				    union perf_event *event,
782 				    struct perf_sample *sample,
783 				    struct machine *machine __maybe_unused)
784 {
785 	struct report *rep = container_of(tool, struct report, tool);
786 	struct evsel *evsel;
787 
788 	evsel = evlist__id2evsel(rep->session->evlist, sample->id);
789 	if (evsel) {
790 		hists__inc_nr_lost_samples(evsel__hists(evsel),
791 					   event->lost_samples.lost);
792 	}
793 	return 0;
794 }
795 
796 static int process_attr(struct perf_tool *tool __maybe_unused,
797 			union perf_event *event,
798 			struct evlist **pevlist);
799 
800 static void stats_setup(struct report *rep)
801 {
802 	memset(&rep->tool, 0, sizeof(rep->tool));
803 	rep->tool.attr = process_attr;
804 	rep->tool.sample = count_sample_event;
805 	rep->tool.lost_samples = count_lost_samples_event;
806 	rep->tool.no_warn = true;
807 }
808 
809 static int stats_print(struct report *rep)
810 {
811 	struct perf_session *session = rep->session;
812 
813 	perf_session__fprintf_nr_events(session, stdout);
814 	evlist__fprintf_nr_events(session->evlist, stdout);
815 	return 0;
816 }
817 
818 static void tasks_setup(struct report *rep)
819 {
820 	memset(&rep->tool, 0, sizeof(rep->tool));
821 	rep->tool.ordered_events = true;
822 	if (rep->mmaps_mode) {
823 		rep->tool.mmap = perf_event__process_mmap;
824 		rep->tool.mmap2 = perf_event__process_mmap2;
825 	}
826 	rep->tool.attr = process_attr;
827 	rep->tool.comm = perf_event__process_comm;
828 	rep->tool.exit = perf_event__process_exit;
829 	rep->tool.fork = perf_event__process_fork;
830 	rep->tool.no_warn = true;
831 }
832 
833 struct maps__fprintf_task_args {
834 	int indent;
835 	FILE *fp;
836 	size_t printed;
837 };
838 
839 static int maps__fprintf_task_cb(struct map *map, void *data)
840 {
841 	struct maps__fprintf_task_args *args = data;
842 	const struct dso *dso = map__dso(map);
843 	u32 prot = map__prot(map);
844 	int ret;
845 
846 	ret = fprintf(args->fp,
847 		"%*s  %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
848 		args->indent, "", map__start(map), map__end(map),
849 		prot & PROT_READ ? 'r' : '-',
850 		prot & PROT_WRITE ? 'w' : '-',
851 		prot & PROT_EXEC ? 'x' : '-',
852 		map__flags(map) ? 's' : 'p',
853 		map__pgoff(map),
854 		dso__id_const(dso)->ino, dso__name(dso));
855 
856 	if (ret < 0)
857 		return ret;
858 
859 	args->printed += ret;
860 	return 0;
861 }
862 
863 static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
864 {
865 	struct maps__fprintf_task_args args = {
866 		.indent = indent,
867 		.fp = fp,
868 		.printed = 0,
869 	};
870 
871 	maps__for_each_map(maps, maps__fprintf_task_cb, &args);
872 
873 	return args.printed;
874 }
875 
876 static int thread_level(struct machine *machine, const struct thread *thread)
877 {
878 	struct thread *parent_thread;
879 	int res;
880 
881 	if (thread__tid(thread) <= 0)
882 		return 0;
883 
884 	if (thread__ppid(thread) <= 0)
885 		return 1;
886 
887 	parent_thread = machine__find_thread(machine, -1, thread__ppid(thread));
888 	if (!parent_thread) {
889 		pr_err("Missing parent thread of %d\n", thread__tid(thread));
890 		return 0;
891 	}
892 	res = 1 + thread_level(machine, parent_thread);
893 	thread__put(parent_thread);
894 	return res;
895 }
896 
897 static void task__print_level(struct machine *machine, struct thread *thread, FILE *fp)
898 {
899 	int level = thread_level(machine, thread);
900 	int comm_indent = fprintf(fp, "  %8d %8d %8d |%*s",
901 				  thread__pid(thread), thread__tid(thread),
902 				  thread__ppid(thread), level, "");
903 
904 	fprintf(fp, "%s\n", thread__comm_str(thread));
905 
906 	maps__fprintf_task(thread__maps(thread), comm_indent, fp);
907 }
908 
909 /*
910  * Sort two thread list nodes such that they form a tree. The first node is the
911  * root of the tree, its children are ordered numerically after it. If a child
912  * has children itself then they appear immediately after their parent. For
913  * example, the 4 threads in the order they'd appear in the list:
914  * - init with a TID 1 and a parent of 0
915  * - systemd with a TID 3000 and a parent of init/1
916  * - systemd child thread with TID 4000, the parent is 3000
917  * - NetworkManager is a child of init with a TID of 3500.
918  */
919 static int task_list_cmp(void *priv, const struct list_head *la, const struct list_head *lb)
920 {
921 	struct machine *machine = priv;
922 	struct thread_list *task_a = list_entry(la, struct thread_list, list);
923 	struct thread_list *task_b = list_entry(lb, struct thread_list, list);
924 	struct thread *a = task_a->thread;
925 	struct thread *b = task_b->thread;
926 	int level_a, level_b, res;
927 
928 	/* Same thread? */
929 	if (thread__tid(a) == thread__tid(b))
930 		return 0;
931 
932 	/* Compare a and b to root. */
933 	if (thread__tid(a) == 0)
934 		return -1;
935 
936 	if (thread__tid(b) == 0)
937 		return 1;
938 
939 	/* If parents match sort by tid. */
940 	if (thread__ppid(a) == thread__ppid(b))
941 		return thread__tid(a) < thread__tid(b) ? -1 : 1;
942 
943 	/*
944 	 * Find a and b such that if they are a child of each other a and b's
945 	 * tid's match, otherwise a and b have a common parent and distinct
946 	 * tid's to sort by. First make the depths of the threads match.
947 	 */
948 	level_a = thread_level(machine, a);
949 	level_b = thread_level(machine, b);
950 	a = thread__get(a);
951 	b = thread__get(b);
952 	for (int i = level_a; i > level_b; i--) {
953 		struct thread *parent = machine__find_thread(machine, -1, thread__ppid(a));
954 
955 		thread__put(a);
956 		if (!parent) {
957 			pr_err("Missing parent thread of %d\n", thread__tid(a));
958 			thread__put(b);
959 			return -1;
960 		}
961 		a = parent;
962 	}
963 	for (int i = level_b; i > level_a; i--) {
964 		struct thread *parent = machine__find_thread(machine, -1, thread__ppid(b));
965 
966 		thread__put(b);
967 		if (!parent) {
968 			pr_err("Missing parent thread of %d\n", thread__tid(b));
969 			thread__put(a);
970 			return 1;
971 		}
972 		b = parent;
973 	}
974 	/* Search up to a common parent. */
975 	while (thread__ppid(a) != thread__ppid(b)) {
976 		struct thread *parent;
977 
978 		parent = machine__find_thread(machine, -1, thread__ppid(a));
979 		thread__put(a);
980 		if (!parent)
981 			pr_err("Missing parent thread of %d\n", thread__tid(a));
982 		a = parent;
983 		parent = machine__find_thread(machine, -1, thread__ppid(b));
984 		thread__put(b);
985 		if (!parent)
986 			pr_err("Missing parent thread of %d\n", thread__tid(b));
987 		b = parent;
988 		if (!a || !b) {
989 			/* Handle missing parent (unexpected) with some sanity. */
990 			thread__put(a);
991 			thread__put(b);
992 			return !a && !b ? 0 : (!a ? -1 : 1);
993 		}
994 	}
995 	if (thread__tid(a) == thread__tid(b)) {
996 		/* a is a child of b or vice-versa, deeper levels appear later. */
997 		res = level_a < level_b ? -1 : (level_a > level_b ? 1 : 0);
998 	} else {
999 		/* Sort by tid now the parent is the same. */
1000 		res = thread__tid(a) < thread__tid(b) ? -1 : 1;
1001 	}
1002 	thread__put(a);
1003 	thread__put(b);
1004 	return res;
1005 }
1006 
1007 static int tasks_print(struct report *rep, FILE *fp)
1008 {
1009 	struct machine *machine = &rep->session->machines.host;
1010 	LIST_HEAD(tasks);
1011 	int ret;
1012 
1013 	ret = machine__thread_list(machine, &tasks);
1014 	if (!ret) {
1015 		struct thread_list *task;
1016 
1017 		list_sort(machine, &tasks, task_list_cmp);
1018 
1019 		fprintf(fp, "# %8s %8s %8s  %s\n", "pid", "tid", "ppid", "comm");
1020 
1021 		list_for_each_entry(task, &tasks, list)
1022 			task__print_level(machine, task->thread, fp);
1023 	}
1024 	thread_list__delete(&tasks);
1025 	return ret;
1026 }
1027 
1028 static int __cmd_report(struct report *rep)
1029 {
1030 	int ret;
1031 	struct perf_session *session = rep->session;
1032 	struct evsel *pos;
1033 	struct perf_data *data = session->data;
1034 
1035 	signal(SIGINT, sig_handler);
1036 
1037 	if (rep->cpu_list) {
1038 		ret = perf_session__cpu_bitmap(session, rep->cpu_list,
1039 					       rep->cpu_bitmap);
1040 		if (ret) {
1041 			ui__error("failed to set cpu bitmap\n");
1042 			return ret;
1043 		}
1044 		session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap;
1045 	}
1046 
1047 	if (rep->show_threads) {
1048 		ret = perf_read_values_init(&rep->show_threads_values);
1049 		if (ret)
1050 			return ret;
1051 	}
1052 
1053 	ret = report__setup_sample_type(rep);
1054 	if (ret) {
1055 		/* report__setup_sample_type() already showed error message */
1056 		return ret;
1057 	}
1058 
1059 	if (rep->stats_mode)
1060 		stats_setup(rep);
1061 
1062 	if (rep->tasks_mode)
1063 		tasks_setup(rep);
1064 
1065 	ret = perf_session__process_events(session);
1066 	if (ret) {
1067 		ui__error("failed to process sample\n");
1068 		return ret;
1069 	}
1070 
1071 	evlist__check_mem_load_aux(session->evlist);
1072 
1073 	if (rep->stats_mode)
1074 		return stats_print(rep);
1075 
1076 	if (rep->tasks_mode)
1077 		return tasks_print(rep, stdout);
1078 
1079 	report__warn_kptr_restrict(rep);
1080 
1081 	evlist__for_each_entry(session->evlist, pos)
1082 		rep->nr_entries += evsel__hists(pos)->nr_entries;
1083 
1084 	if (use_browser == 0) {
1085 		if (verbose > 3)
1086 			perf_session__fprintf(session, stdout);
1087 
1088 		if (verbose > 2)
1089 			perf_session__fprintf_dsos(session, stdout);
1090 
1091 		if (dump_trace) {
1092 			stats_print(rep);
1093 			return 0;
1094 		}
1095 	}
1096 
1097 	ret = report__collapse_hists(rep);
1098 	if (ret) {
1099 		ui__error("failed to process hist entry\n");
1100 		return ret;
1101 	}
1102 
1103 	if (session_done())
1104 		return 0;
1105 
1106 	/*
1107 	 * recalculate number of entries after collapsing since it
1108 	 * might be changed during the collapse phase.
1109 	 */
1110 	rep->nr_entries = 0;
1111 	evlist__for_each_entry(session->evlist, pos)
1112 		rep->nr_entries += evsel__hists(pos)->nr_entries;
1113 
1114 	if (rep->nr_entries == 0) {
1115 		ui__error("The %s data has no samples!\n", data->path);
1116 		return 0;
1117 	}
1118 
1119 	report__output_resort(rep);
1120 
1121 	if (rep->total_cycles_mode) {
1122 		int block_hpps[6] = {
1123 			PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT,
1124 			PERF_HPP_REPORT__BLOCK_LBR_CYCLES,
1125 			PERF_HPP_REPORT__BLOCK_CYCLES_PCT,
1126 			PERF_HPP_REPORT__BLOCK_AVG_CYCLES,
1127 			PERF_HPP_REPORT__BLOCK_RANGE,
1128 			PERF_HPP_REPORT__BLOCK_DSO,
1129 		};
1130 
1131 		rep->block_reports = block_info__create_report(session->evlist,
1132 							       rep->total_cycles,
1133 							       block_hpps, 6,
1134 							       &rep->nr_block_reports);
1135 		if (!rep->block_reports)
1136 			return -1;
1137 	}
1138 
1139 	return report__browse_hists(rep);
1140 }
1141 
1142 static int
1143 report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1144 {
1145 	struct callchain_param *callchain = opt->value;
1146 
1147 	callchain->enabled = !unset;
1148 	/*
1149 	 * --no-call-graph
1150 	 */
1151 	if (unset) {
1152 		symbol_conf.use_callchain = false;
1153 		callchain->mode = CHAIN_NONE;
1154 		return 0;
1155 	}
1156 
1157 	return parse_callchain_report_opt(arg);
1158 }
1159 
1160 static int
1161 parse_time_quantum(const struct option *opt, const char *arg,
1162 		   int unset __maybe_unused)
1163 {
1164 	unsigned long *time_q = opt->value;
1165 	char *end;
1166 
1167 	*time_q = strtoul(arg, &end, 0);
1168 	if (end == arg)
1169 		goto parse_err;
1170 	if (*time_q == 0) {
1171 		pr_err("time quantum cannot be 0");
1172 		return -1;
1173 	}
1174 	end = skip_spaces(end);
1175 	if (*end == 0)
1176 		return 0;
1177 	if (!strcmp(end, "s")) {
1178 		*time_q *= NSEC_PER_SEC;
1179 		return 0;
1180 	}
1181 	if (!strcmp(end, "ms")) {
1182 		*time_q *= NSEC_PER_MSEC;
1183 		return 0;
1184 	}
1185 	if (!strcmp(end, "us")) {
1186 		*time_q *= NSEC_PER_USEC;
1187 		return 0;
1188 	}
1189 	if (!strcmp(end, "ns"))
1190 		return 0;
1191 parse_err:
1192 	pr_err("Cannot parse time quantum `%s'\n", arg);
1193 	return -1;
1194 }
1195 
1196 int
1197 report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
1198 				const char *arg, int unset __maybe_unused)
1199 {
1200 	if (arg) {
1201 		int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED);
1202 		if (err) {
1203 			char buf[BUFSIZ];
1204 			regerror(err, &ignore_callees_regex, buf, sizeof(buf));
1205 			pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf);
1206 			return -1;
1207 		}
1208 		have_ignore_callees = 1;
1209 	}
1210 
1211 	return 0;
1212 }
1213 
1214 static int
1215 parse_branch_mode(const struct option *opt,
1216 		  const char *str __maybe_unused, int unset)
1217 {
1218 	int *branch_mode = opt->value;
1219 
1220 	*branch_mode = !unset;
1221 	return 0;
1222 }
1223 
1224 static int
1225 parse_percent_limit(const struct option *opt, const char *str,
1226 		    int unset __maybe_unused)
1227 {
1228 	struct report *rep = opt->value;
1229 	double pcnt = strtof(str, NULL);
1230 
1231 	rep->min_percent = pcnt;
1232 	callchain_param.min_percent = pcnt;
1233 	return 0;
1234 }
1235 
1236 static int process_attr(struct perf_tool *tool __maybe_unused,
1237 			union perf_event *event,
1238 			struct evlist **pevlist)
1239 {
1240 	u64 sample_type;
1241 	int err;
1242 
1243 	err = perf_event__process_attr(tool, event, pevlist);
1244 	if (err)
1245 		return err;
1246 
1247 	/*
1248 	 * Check if we need to enable callchains based
1249 	 * on events sample_type.
1250 	 */
1251 	sample_type = evlist__combined_sample_type(*pevlist);
1252 	callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env));
1253 	return 0;
1254 }
1255 
1256 int cmd_report(int argc, const char **argv)
1257 {
1258 	struct perf_session *session;
1259 	struct itrace_synth_opts itrace_synth_opts = { .set = 0, };
1260 	struct stat st;
1261 	bool has_br_stack = false;
1262 	int branch_mode = -1;
1263 	int last_key = 0;
1264 	bool branch_call_mode = false;
1265 #define CALLCHAIN_DEFAULT_OPT  "graph,0.5,caller,function,percent"
1266 	static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
1267 						    CALLCHAIN_REPORT_HELP
1268 						    "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
1269 	char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
1270 	const char * const report_usage[] = {
1271 		"perf report [<options>]",
1272 		NULL
1273 	};
1274 	struct report report = {
1275 		.tool = {
1276 			.sample		 = process_sample_event,
1277 			.mmap		 = perf_event__process_mmap,
1278 			.mmap2		 = perf_event__process_mmap2,
1279 			.comm		 = perf_event__process_comm,
1280 			.namespaces	 = perf_event__process_namespaces,
1281 			.cgroup		 = perf_event__process_cgroup,
1282 			.exit		 = perf_event__process_exit,
1283 			.fork		 = perf_event__process_fork,
1284 			.lost		 = perf_event__process_lost,
1285 			.read		 = process_read_event,
1286 			.attr		 = process_attr,
1287 #ifdef HAVE_LIBTRACEEVENT
1288 			.tracing_data	 = perf_event__process_tracing_data,
1289 #endif
1290 			.build_id	 = perf_event__process_build_id,
1291 			.id_index	 = perf_event__process_id_index,
1292 			.auxtrace_info	 = perf_event__process_auxtrace_info,
1293 			.auxtrace	 = perf_event__process_auxtrace,
1294 			.event_update	 = perf_event__process_event_update,
1295 			.feature	 = process_feature_event,
1296 			.ordered_events	 = true,
1297 			.ordering_requires_timestamps = true,
1298 		},
1299 		.max_stack		 = PERF_MAX_STACK_DEPTH,
1300 		.pretty_printing_style	 = "normal",
1301 		.socket_filter		 = -1,
1302 		.skip_empty		 = true,
1303 	};
1304 	char *sort_order_help = sort_help("sort by key(s):");
1305 	char *field_order_help = sort_help("output field(s): overhead period sample ");
1306 	const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
1307 	const struct option options[] = {
1308 	OPT_STRING('i', "input", &input_name, "file",
1309 		    "input file name"),
1310 	OPT_INCR('v', "verbose", &verbose,
1311 		    "be more verbose (show symbol address, etc)"),
1312 	OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
1313 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1314 		    "dump raw trace in ASCII"),
1315 	OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
1316 	OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"),
1317 	OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"),
1318 	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1319 		   "file", "vmlinux pathname"),
1320 	OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1321                     "don't load vmlinux even if found"),
1322 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1323 		   "file", "kallsyms pathname"),
1324 	OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
1325 	OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
1326 		    "load module symbols - WARNING: use only with -k and LIVE kernel"),
1327 	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1328 		    "Show a column with the number of samples"),
1329 	OPT_BOOLEAN('T', "threads", &report.show_threads,
1330 		    "Show per-thread event counters"),
1331 	OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
1332 		   "pretty printing style key: normal raw"),
1333 #ifdef HAVE_SLANG_SUPPORT
1334 	OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
1335 #endif
1336 #ifdef HAVE_GTK2_SUPPORT
1337 	OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
1338 #endif
1339 	OPT_BOOLEAN(0, "stdio", &report.use_stdio,
1340 		    "Use the stdio interface"),
1341 	OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
1342 	OPT_BOOLEAN(0, "header-only", &report.header_only,
1343 		    "Show only data header."),
1344 	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1345 		   sort_order_help),
1346 	OPT_STRING('F', "fields", &field_order, "key[,keys...]",
1347 		   field_order_help),
1348 	OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
1349 		    "Show sample percentage for different cpu modes"),
1350 	OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
1351 		    "Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN),
1352 	OPT_STRING('p', "parent", &parent_pattern, "regex",
1353 		   "regex filter to identify parent, see: '--sort parent'"),
1354 	OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
1355 		    "Only display entries with parent-match"),
1356 	OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
1357 			     "print_type,threshold[,print_limit],order,sort_key[,branch],value",
1358 			     report_callchain_help, &report_parse_callchain_opt,
1359 			     callchain_default_opt),
1360 	OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1361 		    "Accumulate callchains of children and show total overhead as well. "
1362 		    "Enabled by default, use --no-children to disable."),
1363 	OPT_INTEGER(0, "max-stack", &report.max_stack,
1364 		    "Set the maximum stack depth when parsing the callchain, "
1365 		    "anything beyond the specified depth will be ignored. "
1366 		    "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1367 	OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
1368 		    "alias for inverted call graph"),
1369 	OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1370 		   "ignore callees of these functions in call graphs",
1371 		   report_parse_ignore_callees_opt),
1372 	OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1373 		   "only consider symbols in these dsos"),
1374 	OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1375 		   "only consider symbols in these comms"),
1376 	OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
1377 		   "only consider symbols in these pids"),
1378 	OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
1379 		   "only consider symbols in these tids"),
1380 	OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1381 		   "only consider these symbols"),
1382 	OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
1383 		   "only show symbols that (partially) match with this filter"),
1384 	OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1385 		   "width[,width...]",
1386 		   "don't try to adjust column width, use these fixed values"),
1387 	OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
1388 		   "separator for columns, no spaces will be added between "
1389 		   "columns '.' is reserved."),
1390 	OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
1391 		    "Only display entries resolved to a symbol"),
1392 	OPT_CALLBACK(0, "symfs", NULL, "directory",
1393 		     "Look for files with symbols relative to this directory",
1394 		     symbol__config_symfs),
1395 	OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
1396 		   "list of cpus to profile"),
1397 	OPT_BOOLEAN('I', "show-info", &report.show_full_info,
1398 		    "Display extended information about perf.data file"),
1399 	OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
1400 		    "Interleave source code with assembly code (default)"),
1401 	OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
1402 		    "Display raw encoding of assembly instructions (default)"),
1403 	OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
1404 		   "Specify disassembler style (e.g. -M intel for intel syntax)"),
1405 	OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
1406 		    "Add prefix to source file path names in programs (with --prefix-strip)"),
1407 	OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
1408 		    "Strip first N entries of source file path name in programs (with --prefix)"),
1409 	OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1410 		    "Show a column with the sum of periods"),
1411 	OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set,
1412 		    "Show event group information together"),
1413 	OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
1414 		    "Sort the output by the event at the index n in group. "
1415 		    "If n is invalid, sort by the first event. "
1416 		    "WARNING: should be used on grouped events."),
1417 	OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "",
1418 		    "use branch records for per branch histogram filling",
1419 		    parse_branch_mode),
1420 	OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
1421 		    "add last branch records to call history"),
1422 	OPT_STRING(0, "objdump", &objdump_path, "path",
1423 		   "objdump binary to use for disassembly and annotations"),
1424 	OPT_STRING(0, "addr2line", &addr2line_path, "path",
1425 		   "addr2line binary to use for line numbers"),
1426 	OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
1427 		    "Disable symbol demangling"),
1428 	OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1429 		    "Enable kernel symbol demangling"),
1430 	OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
1431 	OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
1432 		    "Number of samples to save per histogram entry for individual browsing"),
1433 	OPT_CALLBACK(0, "percent-limit", &report, "percent",
1434 		     "Don't show entries under that percent", parse_percent_limit),
1435 	OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1436 		     "how to display percentage of filtered entries", parse_filter_percentage),
1437 	OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
1438 			    "Instruction Tracing options\n" ITRACE_HELP,
1439 			    itrace_parse_synth_opts),
1440 	OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
1441 			"Show full source file name path for source lines"),
1442 	OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
1443 		    "Show callgraph from reference event"),
1444 	OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr,
1445 		    "Enable LBR callgraph stitching approach"),
1446 	OPT_INTEGER(0, "socket-filter", &report.socket_filter,
1447 		    "only show processor socket that match with this filter"),
1448 	OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1449 		    "Show raw trace event output (do not use print fmt or plugins)"),
1450 	OPT_BOOLEAN('H', "hierarchy", &symbol_conf.report_hierarchy,
1451 		    "Show entries in a hierarchy"),
1452 	OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
1453 			     "'always' (default), 'never' or 'auto' only applicable to --stdio mode",
1454 			     stdio__config_color, "always"),
1455 	OPT_STRING(0, "time", &report.time_str, "str",
1456 		   "Time span of interest (start,stop)"),
1457 	OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
1458 		    "Show inline function"),
1459 	OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period",
1460 		     "Set percent type local/global-period/hits",
1461 		     annotate_parse_percent_type),
1462 	OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
1463 	OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
1464 		     "Set time quantum for time sort key (default 100ms)",
1465 		     parse_time_quantum),
1466 	OPTS_EVSWITCH(&report.evswitch),
1467 	OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode,
1468 		    "Sort all blocks by 'Sampled Cycles%'"),
1469 	OPT_BOOLEAN(0, "disable-order", &report.disable_order,
1470 		    "Disable raw trace ordering"),
1471 	OPT_BOOLEAN(0, "skip-empty", &report.skip_empty,
1472 		    "Do not display empty (or dummy) events in the output"),
1473 	OPT_END()
1474 	};
1475 	struct perf_data data = {
1476 		.mode  = PERF_DATA_MODE_READ,
1477 	};
1478 	int ret = hists__init();
1479 	char sort_tmp[128];
1480 
1481 	if (ret < 0)
1482 		goto exit;
1483 
1484 	/*
1485 	 * tasks_mode require access to exited threads to list those that are in
1486 	 * the data file. Off-cpu events are synthesized after other events and
1487 	 * reference exited threads.
1488 	 */
1489 	symbol_conf.keep_exited_threads = true;
1490 
1491 	annotation_options__init();
1492 
1493 	ret = perf_config(report__config, &report);
1494 	if (ret)
1495 		goto exit;
1496 
1497 	argc = parse_options(argc, argv, options, report_usage, 0);
1498 	if (argc) {
1499 		/*
1500 		 * Special case: if there's an argument left then assume that
1501 		 * it's a symbol filter:
1502 		 */
1503 		if (argc > 1)
1504 			usage_with_options(report_usage, options);
1505 
1506 		report.symbol_filter_str = argv[0];
1507 	}
1508 
1509 	if (disassembler_style) {
1510 		annotate_opts.disassembler_style = strdup(disassembler_style);
1511 		if (!annotate_opts.disassembler_style)
1512 			return -ENOMEM;
1513 	}
1514 	if (objdump_path) {
1515 		annotate_opts.objdump_path = strdup(objdump_path);
1516 		if (!annotate_opts.objdump_path)
1517 			return -ENOMEM;
1518 	}
1519 	if (addr2line_path) {
1520 		symbol_conf.addr2line_path = strdup(addr2line_path);
1521 		if (!symbol_conf.addr2line_path)
1522 			return -ENOMEM;
1523 	}
1524 
1525 	if (annotate_check_args() < 0) {
1526 		ret = -EINVAL;
1527 		goto exit;
1528 	}
1529 
1530 	if (report.mmaps_mode)
1531 		report.tasks_mode = true;
1532 
1533 	if (dump_trace && report.disable_order)
1534 		report.tool.ordered_events = false;
1535 
1536 	if (quiet)
1537 		perf_quiet_option();
1538 
1539 	ret = symbol__validate_sym_arguments();
1540 	if (ret)
1541 		goto exit;
1542 
1543 	if (report.inverted_callchain)
1544 		callchain_param.order = ORDER_CALLER;
1545 	if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1546 		callchain_param.order = ORDER_CALLER;
1547 
1548 	if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
1549 	    (int)itrace_synth_opts.callchain_sz > report.max_stack)
1550 		report.max_stack = itrace_synth_opts.callchain_sz;
1551 
1552 	if (!input_name || !strlen(input_name)) {
1553 		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
1554 			input_name = "-";
1555 		else
1556 			input_name = "perf.data";
1557 	}
1558 
1559 	data.path  = input_name;
1560 	data.force = symbol_conf.force;
1561 
1562 	symbol_conf.skip_empty = report.skip_empty;
1563 
1564 repeat:
1565 	session = perf_session__new(&data, &report.tool);
1566 	if (IS_ERR(session)) {
1567 		ret = PTR_ERR(session);
1568 		goto exit;
1569 	}
1570 
1571 	ret = evswitch__init(&report.evswitch, session->evlist, stderr);
1572 	if (ret)
1573 		goto exit;
1574 
1575 	if (zstd_init(&(session->zstd_data), 0) < 0)
1576 		pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
1577 
1578 	if (report.queue_size) {
1579 		ordered_events__set_alloc_size(&session->ordered_events,
1580 					       report.queue_size);
1581 	}
1582 
1583 	session->itrace_synth_opts = &itrace_synth_opts;
1584 
1585 	report.session = session;
1586 
1587 	has_br_stack = perf_header__has_feat(&session->header,
1588 					     HEADER_BRANCH_STACK);
1589 	if (evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER)
1590 		has_br_stack = false;
1591 
1592 	setup_forced_leader(&report, session->evlist);
1593 
1594 	if (symbol_conf.group_sort_idx && evlist__nr_groups(session->evlist) == 0) {
1595 		parse_options_usage(NULL, options, "group-sort-idx", 0);
1596 		ret = -EINVAL;
1597 		goto error;
1598 	}
1599 
1600 	if (itrace_synth_opts.last_branch || itrace_synth_opts.add_last_branch)
1601 		has_br_stack = true;
1602 
1603 	if (has_br_stack && branch_call_mode)
1604 		symbol_conf.show_branchflag_count = true;
1605 
1606 	memset(&report.brtype_stat, 0, sizeof(struct branch_type_stat));
1607 
1608 	/*
1609 	 * Branch mode is a tristate:
1610 	 * -1 means default, so decide based on the file having branch data.
1611 	 * 0/1 means the user chose a mode.
1612 	 */
1613 	if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) &&
1614 	    !branch_call_mode) {
1615 		sort__mode = SORT_MODE__BRANCH;
1616 		symbol_conf.cumulate_callchain = false;
1617 	}
1618 	if (branch_call_mode) {
1619 		callchain_param.key = CCKEY_ADDRESS;
1620 		callchain_param.branch_callstack = true;
1621 		symbol_conf.use_callchain = true;
1622 		callchain_register_param(&callchain_param);
1623 		if (sort_order == NULL)
1624 			sort_order = "srcline,symbol,dso";
1625 	}
1626 
1627 	if (report.mem_mode) {
1628 		if (sort__mode == SORT_MODE__BRANCH) {
1629 			pr_err("branch and mem mode incompatible\n");
1630 			goto error;
1631 		}
1632 		sort__mode = SORT_MODE__MEMORY;
1633 		symbol_conf.cumulate_callchain = false;
1634 	}
1635 
1636 	if (symbol_conf.report_hierarchy) {
1637 		/* disable incompatible options */
1638 		symbol_conf.cumulate_callchain = false;
1639 
1640 		if (field_order) {
1641 			pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1642 			parse_options_usage(report_usage, options, "F", 1);
1643 			parse_options_usage(NULL, options, "hierarchy", 0);
1644 			goto error;
1645 		}
1646 
1647 		perf_hpp_list.need_collapse = true;
1648 	}
1649 
1650 	if (report.use_stdio)
1651 		use_browser = 0;
1652 #ifdef HAVE_SLANG_SUPPORT
1653 	else if (report.use_tui)
1654 		use_browser = 1;
1655 #endif
1656 #ifdef HAVE_GTK2_SUPPORT
1657 	else if (report.use_gtk)
1658 		use_browser = 2;
1659 #endif
1660 
1661 	/* Force tty output for header output and per-thread stat. */
1662 	if (report.header || report.header_only || report.show_threads)
1663 		use_browser = 0;
1664 	if (report.header || report.header_only)
1665 		report.tool.show_feat_hdr = SHOW_FEAT_HEADER;
1666 	if (report.show_full_info)
1667 		report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
1668 	if (report.stats_mode || report.tasks_mode)
1669 		use_browser = 0;
1670 	if (report.stats_mode && report.tasks_mode) {
1671 		pr_err("Error: --tasks and --mmaps can't be used together with --stats\n");
1672 		goto error;
1673 	}
1674 
1675 	if (report.total_cycles_mode) {
1676 		if (sort__mode != SORT_MODE__BRANCH)
1677 			report.total_cycles_mode = false;
1678 		else
1679 			sort_order = NULL;
1680 	}
1681 
1682 	if (sort_order && strstr(sort_order, "type")) {
1683 		report.data_type = true;
1684 		annotate_opts.annotate_src = false;
1685 
1686 #ifndef HAVE_DWARF_GETLOCATIONS_SUPPORT
1687 		pr_err("Error: Data type profiling is disabled due to missing DWARF support\n");
1688 		goto error;
1689 #endif
1690 	}
1691 
1692 	if (strcmp(input_name, "-") != 0)
1693 		setup_browser(true);
1694 	else
1695 		use_browser = 0;
1696 
1697 	if (report.data_type && use_browser == 1) {
1698 		symbol_conf.annotate_data_member = true;
1699 		symbol_conf.annotate_data_sample = true;
1700 	}
1701 
1702 	if (sort_order && strstr(sort_order, "ipc")) {
1703 		parse_options_usage(report_usage, options, "s", 1);
1704 		goto error;
1705 	}
1706 
1707 	if (sort_order && strstr(sort_order, "symbol")) {
1708 		if (sort__mode == SORT_MODE__BRANCH) {
1709 			snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1710 				 sort_order, "ipc_lbr");
1711 			report.symbol_ipc = true;
1712 		} else {
1713 			snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1714 				 sort_order, "ipc_null");
1715 		}
1716 
1717 		sort_order = sort_tmp;
1718 	}
1719 
1720 	if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) &&
1721 	    (setup_sorting(session->evlist) < 0)) {
1722 		if (sort_order)
1723 			parse_options_usage(report_usage, options, "s", 1);
1724 		if (field_order)
1725 			parse_options_usage(sort_order ? NULL : report_usage,
1726 					    options, "F", 1);
1727 		goto error;
1728 	}
1729 
1730 	if ((report.header || report.header_only) && !quiet) {
1731 		perf_session__fprintf_info(session, stdout,
1732 					   report.show_full_info);
1733 		if (report.header_only) {
1734 			if (data.is_pipe) {
1735 				/*
1736 				 * we need to process first few records
1737 				 * which contains PERF_RECORD_HEADER_FEATURE.
1738 				 */
1739 				perf_session__process_events(session);
1740 			}
1741 			ret = 0;
1742 			goto error;
1743 		}
1744 	} else if (use_browser == 0 && !quiet &&
1745 		   !report.stats_mode && !report.tasks_mode) {
1746 		fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
1747 		      stdout);
1748 	}
1749 
1750 	/*
1751 	 * Only in the TUI browser we are doing integrated annotation,
1752 	 * so don't allocate extra space that won't be used in the stdio
1753 	 * implementation.
1754 	 */
1755 	if (ui__has_annotation() || report.symbol_ipc || report.data_type ||
1756 	    report.total_cycles_mode) {
1757 		ret = symbol__annotation_init();
1758 		if (ret < 0)
1759 			goto error;
1760 		/*
1761  		 * For searching by name on the "Browse map details".
1762  		 * providing it only in verbose mode not to bloat too
1763  		 * much struct symbol.
1764  		 */
1765 		if (verbose > 0) {
1766 			/*
1767 			 * XXX: Need to provide a less kludgy way to ask for
1768 			 * more space per symbol, the u32 is for the index on
1769 			 * the ui browser.
1770 			 * See symbol__browser_index.
1771 			 */
1772 			symbol_conf.priv_size += sizeof(u32);
1773 		}
1774 		annotation_config__init();
1775 	}
1776 
1777 	if (symbol__init(&session->header.env) < 0)
1778 		goto error;
1779 
1780 	if (report.time_str) {
1781 		ret = perf_time__parse_for_ranges(report.time_str, session,
1782 						  &report.ptime_range,
1783 						  &report.range_size,
1784 						  &report.range_num);
1785 		if (ret < 0)
1786 			goto error;
1787 
1788 		itrace_synth_opts__set_time_range(&itrace_synth_opts,
1789 						  report.ptime_range,
1790 						  report.range_num);
1791 	}
1792 
1793 #ifdef HAVE_LIBTRACEEVENT
1794 	if (session->tevent.pevent &&
1795 	    tep_set_function_resolver(session->tevent.pevent,
1796 				      machine__resolve_kernel_addr,
1797 				      &session->machines.host) < 0) {
1798 		pr_err("%s: failed to set libtraceevent function resolver\n",
1799 		       __func__);
1800 		return -1;
1801 	}
1802 #endif
1803 	sort__setup_elide(stdout);
1804 
1805 	ret = __cmd_report(&report);
1806 	if (ret == K_SWITCH_INPUT_DATA || ret == K_RELOAD) {
1807 		perf_session__delete(session);
1808 		last_key = K_SWITCH_INPUT_DATA;
1809 		goto repeat;
1810 	} else
1811 		ret = 0;
1812 
1813 	if (!use_browser && (verbose > 2 || debug_kmaps))
1814 		perf_session__dump_kmaps(session);
1815 error:
1816 	if (report.ptime_range) {
1817 		itrace_synth_opts__clear_time_range(&itrace_synth_opts);
1818 		zfree(&report.ptime_range);
1819 	}
1820 
1821 	if (report.block_reports) {
1822 		block_info__free_report(report.block_reports,
1823 					report.nr_block_reports);
1824 		report.block_reports = NULL;
1825 	}
1826 
1827 	zstd_fini(&(session->zstd_data));
1828 	perf_session__delete(session);
1829 exit:
1830 	annotation_options__exit();
1831 	free(sort_order_help);
1832 	free(field_order_help);
1833 	return ret;
1834 }
1835