1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * builtin-report.c 4 * 5 * Builtin report command: Analyze the perf.data input file, 6 * look up and read DSOs and symbol information and display 7 * a histogram of results, along various sorting keys. 8 */ 9 #include "builtin.h" 10 11 #include "util/config.h" 12 13 #include "util/annotate.h" 14 #include "util/color.h" 15 #include "util/dso.h" 16 #include <linux/list.h> 17 #include <linux/rbtree.h> 18 #include <linux/err.h> 19 #include <linux/zalloc.h> 20 #include "util/map.h" 21 #include "util/symbol.h" 22 #include "util/map_symbol.h" 23 #include "util/mem-events.h" 24 #include "util/branch.h" 25 #include "util/callchain.h" 26 #include "util/values.h" 27 28 #include "perf.h" 29 #include "util/debug.h" 30 #include "util/evlist.h" 31 #include "util/evsel.h" 32 #include "util/evswitch.h" 33 #include "util/header.h" 34 #include "util/mem-info.h" 35 #include "util/session.h" 36 #include "util/srcline.h" 37 #include "util/tool.h" 38 39 #include <subcmd/parse-options.h> 40 #include <subcmd/exec-cmd.h> 41 #include "util/parse-events.h" 42 43 #include "util/thread.h" 44 #include "util/sort.h" 45 #include "util/hist.h" 46 #include "util/data.h" 47 #include "arch/common.h" 48 #include "util/time-utils.h" 49 #include "util/auxtrace.h" 50 #include "util/units.h" 51 #include "util/util.h" // perf_tip() 52 #include "ui/ui.h" 53 #include "ui/progress.h" 54 #include "util/block-info.h" 55 56 #include <dlfcn.h> 57 #include <errno.h> 58 #include <inttypes.h> 59 #include <regex.h> 60 #include <linux/ctype.h> 61 #include <signal.h> 62 #include <linux/bitmap.h> 63 #include <linux/list_sort.h> 64 #include <linux/string.h> 65 #include <linux/stringify.h> 66 #include <linux/time64.h> 67 #include <sys/types.h> 68 #include <sys/stat.h> 69 #include <unistd.h> 70 #include <linux/mman.h> 71 72 #ifdef HAVE_LIBTRACEEVENT 73 #include <event-parse.h> 74 #endif 75 76 struct report { 77 struct perf_tool tool; 78 struct perf_session *session; 79 struct evswitch evswitch; 80 #ifdef HAVE_SLANG_SUPPORT 81 bool use_tui; 82 #endif 83 #ifdef HAVE_GTK2_SUPPORT 84 bool use_gtk; 85 #endif 86 bool use_stdio; 87 bool show_full_info; 88 bool show_threads; 89 bool inverted_callchain; 90 bool mem_mode; 91 bool stats_mode; 92 bool tasks_mode; 93 bool mmaps_mode; 94 bool header; 95 bool header_only; 96 bool nonany_branch_mode; 97 bool group_set; 98 bool stitch_lbr; 99 bool disable_order; 100 bool skip_empty; 101 bool data_type; 102 int max_stack; 103 struct perf_read_values show_threads_values; 104 const char *pretty_printing_style; 105 const char *cpu_list; 106 const char *symbol_filter_str; 107 const char *time_str; 108 struct perf_time_interval *ptime_range; 109 int range_size; 110 int range_num; 111 float min_percent; 112 u64 nr_entries; 113 u64 queue_size; 114 u64 total_cycles; 115 u64 total_samples; 116 u64 singlethreaded_samples; 117 int socket_filter; 118 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 119 struct branch_type_stat brtype_stat; 120 bool symbol_ipc; 121 bool total_cycles_mode; 122 struct block_report *block_reports; 123 int nr_block_reports; 124 }; 125 126 static int report__config(const char *var, const char *value, void *cb) 127 { 128 struct report *rep = cb; 129 130 if (!strcmp(var, "report.group")) { 131 symbol_conf.event_group = perf_config_bool(var, value); 132 return 0; 133 } 134 if (!strcmp(var, "report.percent-limit")) { 135 double pcnt = strtof(value, NULL); 136 137 rep->min_percent = pcnt; 138 callchain_param.min_percent = pcnt; 139 return 0; 140 } 141 if (!strcmp(var, "report.children")) { 142 symbol_conf.cumulate_callchain = perf_config_bool(var, value); 143 return 0; 144 } 145 if (!strcmp(var, "report.queue-size")) 146 return perf_config_u64(&rep->queue_size, var, value); 147 148 if (!strcmp(var, "report.sort_order")) { 149 default_sort_order = strdup(value); 150 if (!default_sort_order) { 151 pr_err("Not enough memory for report.sort_order\n"); 152 return -1; 153 } 154 return 0; 155 } 156 157 if (!strcmp(var, "report.skip-empty")) { 158 rep->skip_empty = perf_config_bool(var, value); 159 return 0; 160 } 161 162 pr_debug("%s variable unknown, ignoring...", var); 163 return 0; 164 } 165 166 static int hist_iter__report_callback(struct hist_entry_iter *iter, 167 struct addr_location *al, bool single, 168 void *arg) 169 { 170 int err = 0; 171 struct report *rep = arg; 172 struct hist_entry *he = iter->he; 173 struct evsel *evsel = iter->evsel; 174 struct perf_sample *sample = iter->sample; 175 struct mem_info *mi; 176 struct branch_info *bi; 177 178 if (!ui__has_annotation() && !rep->symbol_ipc) 179 return 0; 180 181 if (sort__mode == SORT_MODE__BRANCH) { 182 bi = he->branch_info; 183 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel); 184 if (err) 185 goto out; 186 187 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel); 188 189 } else if (rep->mem_mode) { 190 mi = he->mem_info; 191 err = addr_map_symbol__inc_samples(mem_info__daddr(mi), sample, evsel); 192 if (err) 193 goto out; 194 195 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr); 196 197 } else if (symbol_conf.cumulate_callchain) { 198 if (single) 199 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr); 200 } else { 201 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr); 202 } 203 204 out: 205 return err; 206 } 207 208 static int hist_iter__branch_callback(struct hist_entry_iter *iter, 209 struct addr_location *al __maybe_unused, 210 bool single __maybe_unused, 211 void *arg) 212 { 213 struct hist_entry *he = iter->he; 214 struct report *rep = arg; 215 struct branch_info *bi = he->branch_info; 216 struct perf_sample *sample = iter->sample; 217 struct evsel *evsel = iter->evsel; 218 int err; 219 220 branch_type_count(&rep->brtype_stat, &bi->flags, 221 bi->from.addr, bi->to.addr); 222 223 if (!ui__has_annotation() && !rep->symbol_ipc) 224 return 0; 225 226 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel); 227 if (err) 228 goto out; 229 230 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel); 231 232 out: 233 return err; 234 } 235 236 static void setup_forced_leader(struct report *report, 237 struct evlist *evlist) 238 { 239 if (report->group_set) 240 evlist__force_leader(evlist); 241 } 242 243 static int process_feature_event(const struct perf_tool *tool, 244 struct perf_session *session, 245 union perf_event *event) 246 { 247 struct report *rep = container_of(tool, struct report, tool); 248 249 if (event->feat.feat_id < HEADER_LAST_FEATURE) 250 return perf_event__process_feature(session, event); 251 252 if (event->feat.feat_id != HEADER_LAST_FEATURE) { 253 pr_err("failed: wrong feature ID: %" PRI_lu64 "\n", 254 event->feat.feat_id); 255 return -1; 256 } else if (rep->header_only) { 257 session_done = 1; 258 } 259 260 /* 261 * (feat_id = HEADER_LAST_FEATURE) is the end marker which 262 * means all features are received, now we can force the 263 * group if needed. 264 */ 265 setup_forced_leader(rep, session->evlist); 266 return 0; 267 } 268 269 static int process_sample_event(const struct perf_tool *tool, 270 union perf_event *event, 271 struct perf_sample *sample, 272 struct evsel *evsel, 273 struct machine *machine) 274 { 275 struct report *rep = container_of(tool, struct report, tool); 276 struct addr_location al; 277 struct hist_entry_iter iter = { 278 .evsel = evsel, 279 .sample = sample, 280 .hide_unresolved = symbol_conf.hide_unresolved, 281 .add_entry_cb = hist_iter__report_callback, 282 }; 283 int ret = 0; 284 285 if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num, 286 sample->time)) { 287 return 0; 288 } 289 290 if (evswitch__discard(&rep->evswitch, evsel)) 291 return 0; 292 293 addr_location__init(&al); 294 if (machine__resolve(machine, &al, sample) < 0) { 295 pr_debug("problem processing %d event, skipping it.\n", 296 event->header.type); 297 ret = -1; 298 goto out_put; 299 } 300 301 if (rep->stitch_lbr) 302 thread__set_lbr_stitch_enable(al.thread, true); 303 304 if (symbol_conf.hide_unresolved && al.sym == NULL) 305 goto out_put; 306 307 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) 308 goto out_put; 309 310 if (sort__mode == SORT_MODE__BRANCH) { 311 /* 312 * A non-synthesized event might not have a branch stack if 313 * branch stacks have been synthesized (using itrace options). 314 */ 315 if (!sample->branch_stack) 316 goto out_put; 317 318 iter.add_entry_cb = hist_iter__branch_callback; 319 iter.ops = &hist_iter_branch; 320 } else if (rep->mem_mode) { 321 iter.ops = &hist_iter_mem; 322 } else if (symbol_conf.cumulate_callchain) { 323 iter.ops = &hist_iter_cumulative; 324 } else { 325 iter.ops = &hist_iter_normal; 326 } 327 328 if (al.map != NULL) 329 dso__set_hit(map__dso(al.map)); 330 331 if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) { 332 hist__account_cycles(sample->branch_stack, &al, sample, 333 rep->nonany_branch_mode, 334 &rep->total_cycles, evsel); 335 } 336 337 rep->total_samples++; 338 if (al.parallelism == 1) 339 rep->singlethreaded_samples++; 340 341 ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep); 342 if (ret < 0) 343 pr_debug("problem adding hist entry, skipping event\n"); 344 out_put: 345 addr_location__exit(&al); 346 return ret; 347 } 348 349 static int process_read_event(const struct perf_tool *tool, 350 union perf_event *event, 351 struct perf_sample *sample __maybe_unused, 352 struct evsel *evsel, 353 struct machine *machine __maybe_unused) 354 { 355 struct report *rep = container_of(tool, struct report, tool); 356 357 if (rep->show_threads) { 358 int err = perf_read_values_add_value(&rep->show_threads_values, 359 event->read.pid, event->read.tid, 360 evsel, 361 event->read.value); 362 363 if (err) 364 return err; 365 } 366 367 return 0; 368 } 369 370 /* For pipe mode, sample_type is not currently set */ 371 static int report__setup_sample_type(struct report *rep) 372 { 373 struct perf_session *session = rep->session; 374 u64 sample_type = evlist__combined_sample_type(session->evlist); 375 bool is_pipe = perf_data__is_pipe(session->data); 376 struct evsel *evsel; 377 378 if (session->itrace_synth_opts->callchain || 379 session->itrace_synth_opts->add_callchain || 380 (!is_pipe && 381 perf_header__has_feat(&session->header, HEADER_AUXTRACE) && 382 !session->itrace_synth_opts->set)) 383 sample_type |= PERF_SAMPLE_CALLCHAIN; 384 385 if (session->itrace_synth_opts->last_branch || 386 session->itrace_synth_opts->add_last_branch) 387 sample_type |= PERF_SAMPLE_BRANCH_STACK; 388 389 if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) { 390 if (perf_hpp_list.parent) { 391 ui__error("Selected --sort parent, but no " 392 "callchain data. Did you call " 393 "'perf record' without -g?\n"); 394 return -EINVAL; 395 } 396 if (symbol_conf.use_callchain && 397 !symbol_conf.show_branchflag_count) { 398 ui__error("Selected -g or --branch-history.\n" 399 "But no callchain or branch data.\n" 400 "Did you call 'perf record' without -g or -b?\n"); 401 return -1; 402 } 403 } else if (!callchain_param.enabled && 404 callchain_param.mode != CHAIN_NONE && 405 !symbol_conf.use_callchain) { 406 symbol_conf.use_callchain = true; 407 if (callchain_register_param(&callchain_param) < 0) { 408 ui__error("Can't register callchain params.\n"); 409 return -EINVAL; 410 } 411 } 412 413 if (symbol_conf.cumulate_callchain) { 414 /* Silently ignore if callchain is missing */ 415 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { 416 symbol_conf.cumulate_callchain = false; 417 perf_hpp__cancel_cumulate(session->evlist); 418 } 419 } 420 421 if (sort__mode == SORT_MODE__BRANCH) { 422 if (!is_pipe && 423 !(sample_type & PERF_SAMPLE_BRANCH_STACK)) { 424 ui__error("Selected -b but no branch data. " 425 "Did you call perf record without -b?\n"); 426 return -1; 427 } 428 } 429 430 if (sort__mode == SORT_MODE__MEMORY) { 431 /* 432 * FIXUP: prior to kernel 5.18, Arm SPE missed to set 433 * PERF_SAMPLE_DATA_SRC bit in sample type. For backward 434 * compatibility, set the bit if it's an old perf data file. 435 */ 436 evlist__for_each_entry(session->evlist, evsel) { 437 if (strstr(evsel__name(evsel), "arm_spe") && 438 !(sample_type & PERF_SAMPLE_DATA_SRC)) { 439 evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC; 440 sample_type |= PERF_SAMPLE_DATA_SRC; 441 } 442 } 443 444 if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) { 445 ui__error("Selected --mem-mode but no mem data. " 446 "Did you call perf record without -d?\n"); 447 return -1; 448 } 449 } 450 451 callchain_param_setup(sample_type, perf_session__e_machine(session, /*e_flags=*/NULL)); 452 453 if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) { 454 ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n" 455 "Please apply --call-graph lbr when recording.\n"); 456 rep->stitch_lbr = false; 457 } 458 459 /* ??? handle more cases than just ANY? */ 460 if (!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY)) 461 rep->nonany_branch_mode = true; 462 463 #if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_LIBDW_SUPPORT) 464 if (dwarf_callchain_users) { 465 ui__warning("Please install libunwind or libdw " 466 "development packages during the perf build.\n"); 467 } 468 #endif 469 470 return 0; 471 } 472 473 static void sig_handler(int sig __maybe_unused) 474 { 475 session_done = 1; 476 } 477 478 static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep, 479 const char *evname, FILE *fp) 480 { 481 size_t ret; 482 char unit; 483 unsigned long nr_samples = hists->stats.nr_samples; 484 u64 nr_events = hists->stats.total_period; 485 struct evsel *evsel = hists_to_evsel(hists); 486 char buf[512]; 487 size_t size = sizeof(buf); 488 int socked_id = hists->socket_filter; 489 490 if (quiet) 491 return 0; 492 493 if (symbol_conf.filter_relative) { 494 nr_samples = hists->stats.nr_non_filtered_samples; 495 nr_events = hists->stats.total_non_filtered_period; 496 } 497 498 if (evsel__is_group_event(evsel)) { 499 struct evsel *pos; 500 501 evsel__group_desc(evsel, buf, size); 502 evname = buf; 503 504 for_each_group_member(pos, evsel) { 505 const struct hists *pos_hists = evsel__hists(pos); 506 507 if (symbol_conf.filter_relative) { 508 nr_samples += pos_hists->stats.nr_non_filtered_samples; 509 nr_events += pos_hists->stats.total_non_filtered_period; 510 } else { 511 nr_samples += pos_hists->stats.nr_samples; 512 nr_events += pos_hists->stats.total_period; 513 } 514 } 515 } 516 517 nr_samples = convert_unit(nr_samples, &unit); 518 ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit); 519 if (evname != NULL) { 520 ret += fprintf(fp, " of event%s '%s'", 521 evsel->core.nr_members > 1 ? "s" : "", evname); 522 } 523 524 if (rep->time_str) 525 ret += fprintf(fp, " (time slices: %s)", rep->time_str); 526 527 if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) { 528 ret += fprintf(fp, ", show reference callgraph"); 529 } 530 531 if (rep->mem_mode) { 532 ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events); 533 if (sort_order || !field_order) { 534 ret += fprintf(fp, "\n# Sort order : %s", 535 sort_order ? : default_mem_sort_order); 536 } 537 } else 538 ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events); 539 540 if (socked_id > -1) 541 ret += fprintf(fp, "\n# Processor Socket: %d", socked_id); 542 543 return ret + fprintf(fp, "\n#\n"); 544 } 545 546 static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *rep) 547 { 548 struct evsel *pos; 549 int i = 0, ret; 550 551 evlist__for_each_entry(evlist, pos) { 552 ret = report__browse_block_hists(&rep->block_reports[i++].hist, 553 rep->min_percent, pos, 554 perf_session__env(rep->session)); 555 if (ret != 0) 556 return ret; 557 } 558 559 return 0; 560 } 561 562 static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, const char *help) 563 { 564 struct evsel *pos; 565 int i = 0; 566 567 if (!quiet) { 568 fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n", 569 evlist->stats.total_lost_samples); 570 } 571 572 evlist__for_each_entry(evlist, pos) { 573 struct hists *hists = evsel__hists(pos); 574 const char *evname = evsel__name(pos); 575 576 i++; 577 if (symbol_conf.event_group && !evsel__is_group_leader(pos)) 578 continue; 579 580 if (rep->skip_empty && !hists->stats.nr_samples) 581 continue; 582 583 hists__fprintf_nr_sample_events(hists, rep, evname, stdout); 584 585 if (rep->total_cycles_mode) { 586 char *buf; 587 588 if (!annotation_br_cntr_abbr_list(&buf, pos, true)) { 589 fprintf(stdout, "%s", buf); 590 fprintf(stdout, "#\n"); 591 free(buf); 592 } 593 report__browse_block_hists(&rep->block_reports[i - 1].hist, 594 rep->min_percent, pos, NULL); 595 continue; 596 } 597 598 hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout, 599 !(symbol_conf.use_callchain || 600 symbol_conf.show_branchflag_count)); 601 fprintf(stdout, "\n\n"); 602 } 603 604 if (!quiet) 605 fprintf(stdout, "#\n# (%s)\n#\n", help); 606 607 if (rep->show_threads) { 608 bool style = !strcmp(rep->pretty_printing_style, "raw"); 609 perf_read_values_display(stdout, &rep->show_threads_values, 610 style); 611 perf_read_values_destroy(&rep->show_threads_values); 612 } 613 614 if (sort__mode == SORT_MODE__BRANCH) 615 branch_type_stat_display(stdout, &rep->brtype_stat); 616 617 return 0; 618 } 619 620 static void report__warn_kptr_restrict(const struct report *rep) 621 { 622 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host); 623 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL; 624 625 if (evlist__exclude_kernel(rep->session->evlist)) 626 return; 627 628 if (kernel_map == NULL || 629 (dso__hit(map__dso(kernel_map)) && 630 (kernel_kmap->ref_reloc_sym == NULL || 631 kernel_kmap->ref_reloc_sym->addr == 0))) { 632 const char *desc = 633 "As no suitable kallsyms nor vmlinux was found, kernel samples\n" 634 "can't be resolved."; 635 636 if (kernel_map && map__has_symbols(kernel_map)) { 637 desc = "If some relocation was applied (e.g. " 638 "kexec) symbols may be misresolved."; 639 } 640 641 ui__warning( 642 "Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n" 643 "Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n" 644 "Samples in kernel modules can't be resolved as well.\n\n", 645 desc); 646 } 647 } 648 649 static int report__gtk_browse_hists(struct report *rep, const char *help) 650 { 651 int (*hist_browser)(struct evlist *evlist, const char *help, 652 struct hist_browser_timer *timer, float min_pcnt); 653 654 hist_browser = dlsym(perf_gtk_handle, "evlist__gtk_browse_hists"); 655 656 if (hist_browser == NULL) { 657 ui__error("GTK browser not found!\n"); 658 return -1; 659 } 660 661 return hist_browser(rep->session->evlist, help, NULL, rep->min_percent); 662 } 663 664 static int report__browse_hists(struct report *rep) 665 { 666 int ret; 667 struct perf_session *session = rep->session; 668 struct evlist *evlist = session->evlist; 669 char *help = NULL, *path = NULL; 670 671 path = system_path(TIPDIR); 672 if (perf_tip(&help, path) || help == NULL) { 673 /* fallback for people who don't install perf ;-) */ 674 free(path); 675 path = system_path(DOCDIR); 676 if (perf_tip(&help, path) || help == NULL) 677 help = strdup("Cannot load tips.txt file, please install perf!"); 678 } 679 free(path); 680 681 switch (use_browser) { 682 case 1: 683 if (rep->total_cycles_mode) { 684 ret = evlist__tui_block_hists_browse(evlist, rep); 685 break; 686 } 687 688 ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent, 689 perf_session__env(session), true); 690 /* 691 * Usually "ret" is the last pressed key, and we only 692 * care if the key notifies us to switch data file. 693 */ 694 if (ret != K_SWITCH_INPUT_DATA && ret != K_RELOAD) 695 ret = 0; 696 break; 697 case 2: 698 ret = report__gtk_browse_hists(rep, help); 699 break; 700 default: 701 ret = evlist__tty_browse_hists(evlist, rep, help); 702 break; 703 } 704 free(help); 705 return ret; 706 } 707 708 static int report__collapse_hists(struct report *rep) 709 { 710 struct perf_session *session = rep->session; 711 struct evlist *evlist = session->evlist; 712 struct ui_progress prog; 713 struct evsel *pos; 714 int ret = 0; 715 716 /* 717 * The pipe data needs to setup hierarchy hpp formats now, because it 718 * cannot know about evsels in the data before reading the data. The 719 * normal file data saves the event (attribute) info in the header 720 * section, but pipe does not have the luxury. 721 */ 722 if (perf_data__is_pipe(session->data)) { 723 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) { 724 ui__error("Failed to setup hierarchy output formats\n"); 725 return -1; 726 } 727 } 728 729 ui_progress__init(&prog, rep->nr_entries, "Merging related events..."); 730 731 evlist__for_each_entry(rep->session->evlist, pos) { 732 struct hists *hists = evsel__hists(pos); 733 734 if (pos->core.idx == 0) 735 hists->symbol_filter_str = rep->symbol_filter_str; 736 737 hists->socket_filter = rep->socket_filter; 738 739 ret = hists__collapse_resort(hists, &prog); 740 if (ret < 0) 741 break; 742 743 /* Non-group events are considered as leader */ 744 if (symbol_conf.event_group && !evsel__is_group_leader(pos)) { 745 struct hists *leader_hists = evsel__hists(evsel__leader(pos)); 746 747 hists__match(leader_hists, hists); 748 hists__link(leader_hists, hists); 749 } 750 } 751 752 ui_progress__finish(); 753 return ret; 754 } 755 756 static int hists__resort_cb(struct hist_entry *he, void *arg) 757 { 758 struct report *rep = arg; 759 struct symbol *sym = he->ms.sym; 760 761 if (rep->symbol_ipc && sym && !sym->annotate2) { 762 struct evsel *evsel = hists_to_evsel(he->hists); 763 764 symbol__annotate2(&he->ms, evsel, NULL); 765 } 766 767 return 0; 768 } 769 770 static void report__output_resort(struct report *rep) 771 { 772 struct ui_progress prog; 773 struct evsel *pos; 774 775 ui_progress__init(&prog, rep->nr_entries, "Sorting events for output..."); 776 777 evlist__for_each_entry(rep->session->evlist, pos) { 778 evsel__output_resort_cb(pos, &prog, hists__resort_cb, rep); 779 } 780 781 ui_progress__finish(); 782 } 783 784 static int count_sample_event(const struct perf_tool *tool __maybe_unused, 785 union perf_event *event __maybe_unused, 786 struct perf_sample *sample __maybe_unused, 787 struct evsel *evsel, 788 struct machine *machine __maybe_unused) 789 { 790 struct hists *hists = evsel__hists(evsel); 791 792 hists__inc_nr_events(hists); 793 return 0; 794 } 795 796 static int count_lost_samples_event(const struct perf_tool *tool, 797 union perf_event *event, 798 struct perf_sample *sample, 799 struct machine *machine __maybe_unused) 800 { 801 struct report *rep = container_of(tool, struct report, tool); 802 struct evsel *evsel; 803 804 evsel = evlist__id2evsel(rep->session->evlist, sample->id); 805 if (evsel) { 806 struct hists *hists = evsel__hists(evsel); 807 u32 count = event->lost_samples.lost; 808 809 if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF) 810 hists__inc_nr_dropped_samples(hists, count); 811 else 812 hists__inc_nr_lost_samples(hists, count); 813 } 814 return 0; 815 } 816 817 static int process_attr(const struct perf_tool *tool __maybe_unused, 818 union perf_event *event, 819 struct evlist **pevlist); 820 821 static void stats_setup(struct report *rep) 822 { 823 perf_tool__init(&rep->tool, /*ordered_events=*/false); 824 rep->tool.attr = process_attr; 825 rep->tool.sample = count_sample_event; 826 rep->tool.lost_samples = count_lost_samples_event; 827 rep->tool.event_update = perf_event__process_event_update; 828 rep->tool.no_warn = true; 829 } 830 831 static int stats_print(struct report *rep) 832 { 833 struct perf_session *session = rep->session; 834 835 perf_session__fprintf_nr_events(session, stdout); 836 evlist__fprintf_nr_events(session->evlist, stdout); 837 return 0; 838 } 839 840 static void tasks_setup(struct report *rep) 841 { 842 perf_tool__init(&rep->tool, /*ordered_events=*/true); 843 if (rep->mmaps_mode) { 844 rep->tool.mmap = perf_event__process_mmap; 845 rep->tool.mmap2 = perf_event__process_mmap2; 846 } 847 rep->tool.attr = process_attr; 848 rep->tool.comm = perf_event__process_comm; 849 rep->tool.exit = perf_event__process_exit; 850 rep->tool.fork = perf_event__process_fork; 851 rep->tool.no_warn = true; 852 } 853 854 struct maps__fprintf_task_args { 855 int indent; 856 FILE *fp; 857 size_t printed; 858 }; 859 860 static int maps__fprintf_task_cb(struct map *map, void *data) 861 { 862 struct maps__fprintf_task_args *args = data; 863 const struct dso *dso = map__dso(map); 864 u32 prot = map__prot(map); 865 const struct dso_id *dso_id = dso__id_const(dso); 866 int ret; 867 char buf[SBUILD_ID_SIZE]; 868 869 if (dso_id->mmap2_valid) 870 snprintf(buf, sizeof(buf), "%" PRIu64, dso_id->ino); 871 else 872 build_id__snprintf(&dso_id->build_id, buf, sizeof(buf)); 873 874 ret = fprintf(args->fp, 875 "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %s %s\n", 876 args->indent, "", map__start(map), map__end(map), 877 prot & PROT_READ ? 'r' : '-', 878 prot & PROT_WRITE ? 'w' : '-', 879 prot & PROT_EXEC ? 'x' : '-', 880 map__flags(map) ? 's' : 'p', 881 map__pgoff(map), 882 buf, dso__name(dso)); 883 884 if (ret < 0) 885 return ret; 886 887 args->printed += ret; 888 return 0; 889 } 890 891 static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp) 892 { 893 struct maps__fprintf_task_args args = { 894 .indent = indent, 895 .fp = fp, 896 .printed = 0, 897 }; 898 899 maps__for_each_map(maps, maps__fprintf_task_cb, &args); 900 901 return args.printed; 902 } 903 904 static int thread_level(struct machine *machine, const struct thread *thread) 905 { 906 struct thread *parent_thread; 907 int res; 908 909 if (thread__tid(thread) <= 0) 910 return 0; 911 912 if (thread__ppid(thread) <= 0) 913 return 1; 914 915 parent_thread = machine__find_thread(machine, -1, thread__ppid(thread)); 916 if (!parent_thread) { 917 pr_err("Missing parent thread of %d\n", thread__tid(thread)); 918 return 0; 919 } 920 res = 1 + thread_level(machine, parent_thread); 921 thread__put(parent_thread); 922 return res; 923 } 924 925 static void task__print_level(struct machine *machine, struct thread *thread, FILE *fp) 926 { 927 int level = thread_level(machine, thread); 928 int comm_indent = fprintf(fp, " %8d %8d %8d |%*s", 929 thread__pid(thread), thread__tid(thread), 930 thread__ppid(thread), level, ""); 931 932 fprintf(fp, "%s\n", thread__comm_str(thread)); 933 934 maps__fprintf_task(thread__maps(thread), comm_indent, fp); 935 } 936 937 /* 938 * Sort two thread list nodes such that they form a tree. The first node is the 939 * root of the tree, its children are ordered numerically after it. If a child 940 * has children itself then they appear immediately after their parent. For 941 * example, the 4 threads in the order they'd appear in the list: 942 * - init with a TID 1 and a parent of 0 943 * - systemd with a TID 3000 and a parent of init/1 944 * - systemd child thread with TID 4000, the parent is 3000 945 * - NetworkManager is a child of init with a TID of 3500. 946 */ 947 static int task_list_cmp(void *priv, const struct list_head *la, const struct list_head *lb) 948 { 949 struct machine *machine = priv; 950 struct thread_list *task_a = list_entry(la, struct thread_list, list); 951 struct thread_list *task_b = list_entry(lb, struct thread_list, list); 952 struct thread *a = task_a->thread; 953 struct thread *b = task_b->thread; 954 int level_a, level_b, res; 955 956 /* Same thread? */ 957 if (thread__tid(a) == thread__tid(b)) 958 return 0; 959 960 /* Compare a and b to root. */ 961 if (thread__tid(a) == 0) 962 return -1; 963 964 if (thread__tid(b) == 0) 965 return 1; 966 967 /* If parents match sort by tid. */ 968 if (thread__ppid(a) == thread__ppid(b)) 969 return thread__tid(a) < thread__tid(b) ? -1 : 1; 970 971 /* 972 * Find a and b such that if they are a child of each other a and b's 973 * tid's match, otherwise a and b have a common parent and distinct 974 * tid's to sort by. First make the depths of the threads match. 975 */ 976 level_a = thread_level(machine, a); 977 level_b = thread_level(machine, b); 978 a = thread__get(a); 979 b = thread__get(b); 980 for (int i = level_a; i > level_b; i--) { 981 struct thread *parent = machine__find_thread(machine, -1, thread__ppid(a)); 982 983 thread__put(a); 984 if (!parent) { 985 pr_err("Missing parent thread of %d\n", thread__tid(a)); 986 thread__put(b); 987 return -1; 988 } 989 a = parent; 990 } 991 for (int i = level_b; i > level_a; i--) { 992 struct thread *parent = machine__find_thread(machine, -1, thread__ppid(b)); 993 994 thread__put(b); 995 if (!parent) { 996 pr_err("Missing parent thread of %d\n", thread__tid(b)); 997 thread__put(a); 998 return 1; 999 } 1000 b = parent; 1001 } 1002 /* Search up to a common parent. */ 1003 while (thread__ppid(a) != thread__ppid(b)) { 1004 struct thread *parent; 1005 1006 parent = machine__find_thread(machine, -1, thread__ppid(a)); 1007 thread__put(a); 1008 if (!parent) 1009 pr_err("Missing parent thread of %d\n", thread__tid(a)); 1010 a = parent; 1011 parent = machine__find_thread(machine, -1, thread__ppid(b)); 1012 thread__put(b); 1013 if (!parent) 1014 pr_err("Missing parent thread of %d\n", thread__tid(b)); 1015 b = parent; 1016 if (!a || !b) { 1017 /* Handle missing parent (unexpected) with some sanity. */ 1018 thread__put(a); 1019 thread__put(b); 1020 return !a && !b ? 0 : (!a ? -1 : 1); 1021 } 1022 } 1023 if (thread__tid(a) == thread__tid(b)) { 1024 /* a is a child of b or vice-versa, deeper levels appear later. */ 1025 res = level_a < level_b ? -1 : (level_a > level_b ? 1 : 0); 1026 } else { 1027 /* Sort by tid now the parent is the same. */ 1028 res = thread__tid(a) < thread__tid(b) ? -1 : 1; 1029 } 1030 thread__put(a); 1031 thread__put(b); 1032 return res; 1033 } 1034 1035 static int tasks_print(struct report *rep, FILE *fp) 1036 { 1037 struct machine *machine = &rep->session->machines.host; 1038 LIST_HEAD(tasks); 1039 int ret; 1040 1041 ret = machine__thread_list(machine, &tasks); 1042 if (!ret) { 1043 struct thread_list *task; 1044 1045 list_sort(machine, &tasks, task_list_cmp); 1046 1047 fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm"); 1048 1049 list_for_each_entry(task, &tasks, list) 1050 task__print_level(machine, task->thread, fp); 1051 } 1052 thread_list__delete(&tasks); 1053 return ret; 1054 } 1055 1056 static int __cmd_report(struct report *rep) 1057 { 1058 int ret; 1059 struct perf_session *session = rep->session; 1060 struct evsel *pos; 1061 struct perf_data *data = session->data; 1062 1063 signal(SIGINT, sig_handler); 1064 1065 if (rep->cpu_list) { 1066 ret = perf_session__cpu_bitmap(session, rep->cpu_list, 1067 rep->cpu_bitmap); 1068 if (ret) { 1069 ui__error("failed to set cpu bitmap\n"); 1070 return ret; 1071 } 1072 session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap; 1073 } 1074 1075 if (rep->show_threads) { 1076 ret = perf_read_values_init(&rep->show_threads_values); 1077 if (ret) 1078 return ret; 1079 } 1080 1081 ret = report__setup_sample_type(rep); 1082 if (ret) { 1083 /* report__setup_sample_type() already showed error message */ 1084 return ret; 1085 } 1086 1087 if (rep->stats_mode) 1088 stats_setup(rep); 1089 1090 if (rep->tasks_mode) 1091 tasks_setup(rep); 1092 1093 ret = perf_session__process_events(session); 1094 if (ret) { 1095 ui__error("failed to process sample\n"); 1096 return ret; 1097 } 1098 1099 /* Don't show Latency column for non-parallel profiles by default. */ 1100 if (!symbol_conf.prefer_latency && rep->total_samples && 1101 rep->singlethreaded_samples * 100 / rep->total_samples >= 99) 1102 perf_hpp__cancel_latency(session->evlist); 1103 1104 evlist__check_mem_load_aux(session->evlist); 1105 1106 if (rep->stats_mode) 1107 return stats_print(rep); 1108 1109 if (rep->tasks_mode) 1110 return tasks_print(rep, stdout); 1111 1112 report__warn_kptr_restrict(rep); 1113 1114 evlist__for_each_entry(session->evlist, pos) 1115 rep->nr_entries += evsel__hists(pos)->nr_entries; 1116 1117 if (use_browser == 0) { 1118 if (verbose > 3) 1119 perf_session__fprintf(session, stdout); 1120 1121 if (verbose > 2) 1122 perf_session__fprintf_dsos(session, stdout); 1123 1124 if (dump_trace) { 1125 stats_print(rep); 1126 return 0; 1127 } 1128 } 1129 1130 ret = report__collapse_hists(rep); 1131 if (ret) { 1132 ui__error("failed to process hist entry\n"); 1133 return ret; 1134 } 1135 1136 if (session_done()) 1137 return 0; 1138 1139 /* 1140 * recalculate number of entries after collapsing since it 1141 * might be changed during the collapse phase. 1142 */ 1143 rep->nr_entries = 0; 1144 evlist__for_each_entry(session->evlist, pos) 1145 rep->nr_entries += evsel__hists(pos)->nr_entries; 1146 1147 if (rep->nr_entries == 0) { 1148 ui__error("The %s data has no samples!\n", data->path); 1149 return 0; 1150 } 1151 1152 report__output_resort(rep); 1153 1154 if (rep->total_cycles_mode) { 1155 int nr_hpps = 4; 1156 int block_hpps[PERF_HPP_REPORT__BLOCK_MAX_INDEX] = { 1157 PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT, 1158 PERF_HPP_REPORT__BLOCK_LBR_CYCLES, 1159 PERF_HPP_REPORT__BLOCK_CYCLES_PCT, 1160 PERF_HPP_REPORT__BLOCK_AVG_CYCLES, 1161 }; 1162 1163 if (session->evlist->nr_br_cntr > 0) 1164 block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_BRANCH_COUNTER; 1165 1166 block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_RANGE; 1167 block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_DSO; 1168 1169 rep->block_reports = block_info__create_report(session->evlist, 1170 rep->total_cycles, 1171 block_hpps, nr_hpps, 1172 &rep->nr_block_reports); 1173 if (!rep->block_reports) 1174 return -1; 1175 } 1176 1177 return report__browse_hists(rep); 1178 } 1179 1180 static int 1181 report_parse_callchain_opt(const struct option *opt, const char *arg, int unset) 1182 { 1183 struct callchain_param *callchain = opt->value; 1184 1185 callchain->enabled = !unset; 1186 /* 1187 * --no-call-graph 1188 */ 1189 if (unset) { 1190 symbol_conf.use_callchain = false; 1191 callchain->mode = CHAIN_NONE; 1192 return 0; 1193 } 1194 1195 return parse_callchain_report_opt(arg); 1196 } 1197 1198 static int 1199 parse_time_quantum(const struct option *opt, const char *arg, 1200 int unset __maybe_unused) 1201 { 1202 unsigned long *time_q = opt->value; 1203 char *end; 1204 1205 *time_q = strtoul(arg, &end, 0); 1206 if (end == arg) 1207 goto parse_err; 1208 if (*time_q == 0) { 1209 pr_err("time quantum cannot be 0"); 1210 return -1; 1211 } 1212 end = skip_spaces(end); 1213 if (*end == 0) 1214 return 0; 1215 if (!strcmp(end, "s")) { 1216 *time_q *= NSEC_PER_SEC; 1217 return 0; 1218 } 1219 if (!strcmp(end, "ms")) { 1220 *time_q *= NSEC_PER_MSEC; 1221 return 0; 1222 } 1223 if (!strcmp(end, "us")) { 1224 *time_q *= NSEC_PER_USEC; 1225 return 0; 1226 } 1227 if (!strcmp(end, "ns")) 1228 return 0; 1229 parse_err: 1230 pr_err("Cannot parse time quantum `%s'\n", arg); 1231 return -1; 1232 } 1233 1234 int 1235 report_parse_ignore_callees_opt(const struct option *opt __maybe_unused, 1236 const char *arg, int unset __maybe_unused) 1237 { 1238 if (arg) { 1239 int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED); 1240 if (err) { 1241 char buf[BUFSIZ]; 1242 regerror(err, &ignore_callees_regex, buf, sizeof(buf)); 1243 pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf); 1244 return -1; 1245 } 1246 have_ignore_callees = 1; 1247 } 1248 1249 return 0; 1250 } 1251 1252 static int 1253 parse_branch_mode(const struct option *opt, 1254 const char *str __maybe_unused, int unset) 1255 { 1256 int *branch_mode = opt->value; 1257 1258 *branch_mode = !unset; 1259 return 0; 1260 } 1261 1262 static int 1263 parse_percent_limit(const struct option *opt, const char *str, 1264 int unset __maybe_unused) 1265 { 1266 struct report *rep = opt->value; 1267 double pcnt = strtof(str, NULL); 1268 1269 rep->min_percent = pcnt; 1270 callchain_param.min_percent = pcnt; 1271 return 0; 1272 } 1273 1274 static int 1275 report_parse_addr2line_config(const struct option *opt __maybe_unused, 1276 const char *arg, int unset __maybe_unused) 1277 { 1278 return addr2line_configure("addr2line.style", arg, NULL); 1279 } 1280 1281 static int process_attr(const struct perf_tool *tool __maybe_unused, 1282 union perf_event *event, 1283 struct evlist **pevlist) 1284 { 1285 struct perf_session *session; 1286 u64 sample_type; 1287 int err; 1288 1289 err = perf_event__process_attr(tool, event, pevlist); 1290 if (err) 1291 return err; 1292 1293 /* 1294 * Check if we need to enable callchains based 1295 * on events sample_type. 1296 */ 1297 sample_type = evlist__combined_sample_type(*pevlist); 1298 session = (*pevlist)->session; 1299 callchain_param_setup(sample_type, perf_session__e_machine(session, /*e_flags=*/NULL)); 1300 return 0; 1301 } 1302 1303 #define CALLCHAIN_BRANCH_SORT_ORDER \ 1304 "srcline,symbol,dso,callchain_branch_predicted," \ 1305 "callchain_branch_abort,callchain_branch_cycles" 1306 1307 int cmd_report(int argc, const char **argv) 1308 { 1309 struct perf_session *session; 1310 struct itrace_synth_opts itrace_synth_opts = { .set = 0, }; 1311 struct stat st; 1312 bool has_br_stack = false; 1313 int branch_mode = -1; 1314 int last_key = 0; 1315 bool branch_call_mode = false; 1316 #define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent" 1317 static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n" 1318 CALLCHAIN_REPORT_HELP 1319 "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT; 1320 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT; 1321 const char * const report_usage[] = { 1322 "perf report [<options>]", 1323 NULL 1324 }; 1325 struct report report = { 1326 .max_stack = PERF_MAX_STACK_DEPTH, 1327 .pretty_printing_style = "normal", 1328 .socket_filter = -1, 1329 .skip_empty = true, 1330 }; 1331 char *sort_order_help = sort_help("sort by key(s):", SORT_MODE__NORMAL); 1332 char *field_order_help = sort_help("output field(s):", SORT_MODE__NORMAL); 1333 const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL; 1334 const struct option options[] = { 1335 OPT_STRING('i', "input", &input_name, "file", 1336 "input file name"), 1337 OPT_INCR('v', "verbose", &verbose, 1338 "be more verbose (show symbol address, etc)"), 1339 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"), 1340 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 1341 "dump raw trace in ASCII"), 1342 OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"), 1343 OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"), 1344 OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"), 1345 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 1346 "file", "vmlinux pathname"), 1347 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux, 1348 "don't load vmlinux even if found"), 1349 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 1350 "file", "kallsyms pathname"), 1351 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"), 1352 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 1353 "load module symbols - WARNING: use only with -k and LIVE kernel"), 1354 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 1355 "Show a column with the number of samples"), 1356 OPT_BOOLEAN('T', "threads", &report.show_threads, 1357 "Show per-thread event counters"), 1358 OPT_STRING(0, "pretty", &report.pretty_printing_style, "key", 1359 "pretty printing style key: normal raw"), 1360 #ifdef HAVE_SLANG_SUPPORT 1361 OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"), 1362 #endif 1363 #ifdef HAVE_GTK2_SUPPORT 1364 OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"), 1365 #endif 1366 OPT_BOOLEAN(0, "stdio", &report.use_stdio, 1367 "Use the stdio interface"), 1368 OPT_BOOLEAN(0, "header", &report.header, "Show data header."), 1369 OPT_BOOLEAN(0, "header-only", &report.header_only, 1370 "Show only data header."), 1371 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 1372 sort_order_help), 1373 OPT_STRING('F', "fields", &field_order, "key[,keys...]", 1374 field_order_help), 1375 OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization, 1376 "Show sample percentage for different cpu modes"), 1377 OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, 1378 "Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN), 1379 OPT_STRING('p', "parent", &parent_pattern, "regex", 1380 "regex filter to identify parent, see: '--sort parent'"), 1381 OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, 1382 "Only display entries with parent-match"), 1383 OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param, 1384 "print_type,threshold[,print_limit],order,sort_key[,branch],value", 1385 report_callchain_help, &report_parse_callchain_opt, 1386 callchain_default_opt), 1387 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain, 1388 "Accumulate callchains of children and show total overhead as well. " 1389 "Enabled by default, use --no-children to disable."), 1390 OPT_INTEGER(0, "max-stack", &report.max_stack, 1391 "Set the maximum stack depth when parsing the callchain, " 1392 "anything beyond the specified depth will be ignored. " 1393 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 1394 OPT_BOOLEAN('G', "inverted", &report.inverted_callchain, 1395 "alias for inverted call graph"), 1396 OPT_CALLBACK(0, "ignore-callees", NULL, "regex", 1397 "ignore callees of these functions in call graphs", 1398 report_parse_ignore_callees_opt), 1399 OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", 1400 "only consider symbols in these dsos"), 1401 OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", 1402 "only consider symbols in these comms"), 1403 OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]", 1404 "only consider symbols in these pids"), 1405 OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]", 1406 "only consider symbols in these tids"), 1407 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 1408 "only consider these symbols"), 1409 OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter", 1410 "only show symbols that (partially) match with this filter"), 1411 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str, 1412 "width[,width...]", 1413 "don't try to adjust column width, use these fixed values"), 1414 OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator", 1415 "separator for columns, no spaces will be added between " 1416 "columns '.' is reserved."), 1417 OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved, 1418 "Only display entries resolved to a symbol"), 1419 OPT_CALLBACK(0, "symfs", NULL, "directory", 1420 "Look for files with symbols relative to this directory", 1421 symbol__config_symfs), 1422 OPT_STRING('C', "cpu", &report.cpu_list, "cpu", 1423 "list of cpus to profile"), 1424 OPT_STRING(0, "parallelism", &symbol_conf.parallelism_list_str, "parallelism", 1425 "only consider these parallelism levels (cpu set format)"), 1426 OPT_BOOLEAN('I', "show-info", &report.show_full_info, 1427 "Display extended information about perf.data file"), 1428 OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src, 1429 "Interleave source code with assembly code (default)"), 1430 OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw, 1431 "Display raw encoding of assembly instructions (default)"), 1432 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", 1433 "Specify disassembler style (e.g. -M intel for intel syntax)"), 1434 OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix", 1435 "Add prefix to source file path names in programs (with --prefix-strip)"), 1436 OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N", 1437 "Strip first N entries of source file path name in programs (with --prefix)"), 1438 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, 1439 "Show a column with the sum of periods"), 1440 OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set, 1441 "Show event group information together"), 1442 OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx, 1443 "Sort the output by the event at the index n in group. " 1444 "If n is invalid, sort by the first event. " 1445 "WARNING: should be used on grouped events."), 1446 OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "", 1447 "use branch records for per branch histogram filling", 1448 parse_branch_mode), 1449 OPT_BOOLEAN(0, "branch-history", &branch_call_mode, 1450 "add last branch records to call history"), 1451 OPT_STRING(0, "objdump", &objdump_path, "path", 1452 "objdump binary to use for disassembly and annotations"), 1453 OPT_STRING(0, "addr2line", &addr2line_path, "path", 1454 "addr2line binary to use for line numbers"), 1455 OPT_CALLBACK(0, "addr2line-style", NULL, "addr2line style", 1456 "addr2line styles (libdw,llvm,libbfd,addr2line)", 1457 report_parse_addr2line_config), 1458 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, 1459 "Symbol demangling. Enabled by default, use --no-demangle to disable."), 1460 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, 1461 "Enable kernel symbol demangling"), 1462 OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"), 1463 OPT_INTEGER(0, "samples", &symbol_conf.res_sample, 1464 "Number of samples to save per histogram entry for individual browsing"), 1465 OPT_CALLBACK(0, "percent-limit", &report, "percent", 1466 "Don't show entries under that percent", parse_percent_limit), 1467 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", 1468 "how to display percentage of filtered entries", parse_filter_percentage), 1469 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts", 1470 "Instruction Tracing options\n" ITRACE_HELP, 1471 itrace_parse_synth_opts), 1472 OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename, 1473 "Show full source file name path for source lines"), 1474 OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph, 1475 "Show callgraph from reference event"), 1476 OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr, 1477 "Enable LBR callgraph stitching approach"), 1478 OPT_INTEGER(0, "socket-filter", &report.socket_filter, 1479 "only show processor socket that match with this filter"), 1480 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace, 1481 "Show raw trace event output (do not use print fmt or plugins)"), 1482 OPT_BOOLEAN('H', "hierarchy", &symbol_conf.report_hierarchy, 1483 "Show entries in a hierarchy"), 1484 OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode", 1485 "'always' (default), 'never' or 'auto' only applicable to --stdio mode", 1486 stdio__config_color, "always"), 1487 OPT_STRING(0, "time", &report.time_str, "str", 1488 "Time span of interest (start,stop)"), 1489 OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name, 1490 "Show inline function"), 1491 OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period", 1492 "Set percent type local/global-period/hits", 1493 annotate_parse_percent_type), 1494 OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"), 1495 OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)", 1496 "Set time quantum for time sort key (default 100ms)", 1497 parse_time_quantum), 1498 OPTS_EVSWITCH(&report.evswitch), 1499 OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode, 1500 "Sort all blocks by 'Sampled Cycles%'"), 1501 OPT_BOOLEAN(0, "disable-order", &report.disable_order, 1502 "Disable raw trace ordering"), 1503 OPT_BOOLEAN(0, "skip-empty", &report.skip_empty, 1504 "Do not display empty (or dummy) events in the output"), 1505 OPT_BOOLEAN(0, "latency", &symbol_conf.prefer_latency, 1506 "Show latency-centric profile rather than the default\n" 1507 "\t\t\t CPU-consumption-centric profile\n" 1508 "\t\t\t (requires perf record --latency flag)."), 1509 OPT_END() 1510 }; 1511 struct perf_data data = { 1512 .mode = PERF_DATA_MODE_READ, 1513 }; 1514 int ret = hists__init(); 1515 char sort_tmp[128]; 1516 bool ordered_events = true; 1517 1518 if (ret < 0) 1519 goto exit; 1520 1521 /* 1522 * tasks_mode require access to exited threads to list those that are in 1523 * the data file. Off-cpu events are synthesized after other events and 1524 * reference exited threads. 1525 */ 1526 symbol_conf.keep_exited_threads = true; 1527 1528 annotation_options__init(); 1529 1530 ret = perf_config(report__config, &report); 1531 if (ret) 1532 goto exit; 1533 1534 argc = parse_options(argc, argv, options, report_usage, 0); 1535 if (argc) { 1536 /* 1537 * Special case: if there's an argument left then assume that 1538 * it's a symbol filter: 1539 */ 1540 if (argc > 1) 1541 usage_with_options(report_usage, options); 1542 1543 report.symbol_filter_str = argv[0]; 1544 } 1545 1546 if (disassembler_style) { 1547 annotate_opts.disassembler_style = strdup(disassembler_style); 1548 if (!annotate_opts.disassembler_style) 1549 return -ENOMEM; 1550 } 1551 if (objdump_path) { 1552 annotate_opts.objdump_path = strdup(objdump_path); 1553 if (!annotate_opts.objdump_path) 1554 return -ENOMEM; 1555 } 1556 if (addr2line_path) { 1557 symbol_conf.addr2line_path = strdup(addr2line_path); 1558 if (!symbol_conf.addr2line_path) 1559 return -ENOMEM; 1560 } 1561 1562 if (annotate_check_args() < 0) { 1563 ret = -EINVAL; 1564 goto exit; 1565 } 1566 1567 if (report.mmaps_mode) 1568 report.tasks_mode = true; 1569 1570 if (dump_trace && report.disable_order) 1571 ordered_events = false; 1572 1573 if (quiet) 1574 perf_quiet_option(); 1575 1576 ret = symbol__validate_sym_arguments(); 1577 if (ret) 1578 goto exit; 1579 1580 if (report.inverted_callchain) 1581 callchain_param.order = ORDER_CALLER; 1582 if (symbol_conf.cumulate_callchain && !callchain_param.order_set) 1583 callchain_param.order = ORDER_CALLER; 1584 1585 if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) && 1586 (int)itrace_synth_opts.callchain_sz > report.max_stack) 1587 report.max_stack = itrace_synth_opts.callchain_sz; 1588 1589 if (!input_name || !strlen(input_name)) { 1590 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 1591 input_name = "-"; 1592 else 1593 input_name = "perf.data"; 1594 } 1595 1596 repeat: 1597 data.path = input_name; 1598 data.force = symbol_conf.force; 1599 1600 symbol_conf.skip_empty = report.skip_empty; 1601 1602 perf_tool__init(&report.tool, ordered_events); 1603 report.tool.sample = process_sample_event; 1604 report.tool.mmap = perf_event__process_mmap; 1605 report.tool.mmap2 = perf_event__process_mmap2; 1606 report.tool.comm = perf_event__process_comm; 1607 report.tool.namespaces = perf_event__process_namespaces; 1608 report.tool.cgroup = perf_event__process_cgroup; 1609 report.tool.exit = perf_event__process_exit; 1610 report.tool.fork = perf_event__process_fork; 1611 report.tool.context_switch = perf_event__process_switch; 1612 report.tool.lost = perf_event__process_lost; 1613 report.tool.read = process_read_event; 1614 report.tool.attr = process_attr; 1615 #ifdef HAVE_LIBTRACEEVENT 1616 report.tool.tracing_data = perf_event__process_tracing_data; 1617 #endif 1618 report.tool.build_id = perf_event__process_build_id; 1619 report.tool.id_index = perf_event__process_id_index; 1620 report.tool.auxtrace_info = perf_event__process_auxtrace_info; 1621 report.tool.auxtrace = perf_event__process_auxtrace; 1622 report.tool.event_update = perf_event__process_event_update; 1623 report.tool.feature = process_feature_event; 1624 report.tool.ordering_requires_timestamps = true; 1625 report.tool.merge_deferred_callchains = !dump_trace; 1626 1627 session = perf_session__new(&data, &report.tool); 1628 if (IS_ERR(session)) { 1629 ret = PTR_ERR(session); 1630 goto exit; 1631 } 1632 1633 ret = evswitch__init(&report.evswitch, session->evlist, stderr); 1634 if (ret) 1635 goto exit; 1636 1637 if (zstd_init(&(session->zstd_data), 0) < 0) 1638 pr_warning("Decompression initialization failed. Reported data may be incomplete.\n"); 1639 1640 if (report.queue_size) { 1641 ordered_events__set_alloc_size(&session->ordered_events, 1642 report.queue_size); 1643 } 1644 1645 session->itrace_synth_opts = &itrace_synth_opts; 1646 1647 report.session = session; 1648 1649 has_br_stack = perf_header__has_feat(&session->header, 1650 HEADER_BRANCH_STACK); 1651 if (evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER) 1652 has_br_stack = false; 1653 1654 setup_forced_leader(&report, session->evlist); 1655 1656 if (symbol_conf.group_sort_idx && evlist__nr_groups(session->evlist) == 0) { 1657 parse_options_usage(NULL, options, "group-sort-idx", 0); 1658 ret = -EINVAL; 1659 goto error; 1660 } 1661 1662 if (itrace_synth_opts.last_branch || itrace_synth_opts.add_last_branch) 1663 has_br_stack = true; 1664 1665 if (has_br_stack && branch_call_mode) 1666 symbol_conf.show_branchflag_count = true; 1667 1668 memset(&report.brtype_stat, 0, sizeof(struct branch_type_stat)); 1669 1670 /* 1671 * Branch mode is a tristate: 1672 * -1 means default, so decide based on the file having branch data. 1673 * 0/1 means the user chose a mode. 1674 */ 1675 if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) && 1676 !branch_call_mode) { 1677 sort__mode = SORT_MODE__BRANCH; 1678 symbol_conf.cumulate_callchain = false; 1679 } 1680 if (branch_call_mode) { 1681 callchain_param.key = CCKEY_ADDRESS; 1682 callchain_param.branch_callstack = true; 1683 symbol_conf.use_callchain = true; 1684 callchain_register_param(&callchain_param); 1685 if (sort_order == NULL) 1686 sort_order = CALLCHAIN_BRANCH_SORT_ORDER; 1687 } 1688 1689 if (report.mem_mode) { 1690 if (sort__mode == SORT_MODE__BRANCH) { 1691 pr_err("branch and mem mode incompatible\n"); 1692 goto error; 1693 } 1694 sort__mode = SORT_MODE__MEMORY; 1695 symbol_conf.cumulate_callchain = false; 1696 } 1697 1698 if (symbol_conf.report_hierarchy) { 1699 /* 1700 * The hist entries in hierarchy are added during the collpase 1701 * phase. Let's enable it even if no sort keys require it. 1702 */ 1703 perf_hpp_list.need_collapse = true; 1704 } 1705 1706 if (report.use_stdio) 1707 use_browser = 0; 1708 #ifdef HAVE_SLANG_SUPPORT 1709 else if (report.use_tui) 1710 use_browser = 1; 1711 #endif 1712 #ifdef HAVE_GTK2_SUPPORT 1713 else if (report.use_gtk) 1714 use_browser = 2; 1715 #endif 1716 1717 /* Force tty output for header output and per-thread stat. */ 1718 if (report.header || report.header_only || report.show_threads) 1719 use_browser = 0; 1720 if (report.header || report.header_only) 1721 report.tool.show_feat_hdr = SHOW_FEAT_HEADER; 1722 if (report.show_full_info) 1723 report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO; 1724 if (report.stats_mode || report.tasks_mode) 1725 use_browser = 0; 1726 if (report.stats_mode && report.tasks_mode) { 1727 pr_err("Error: --tasks and --mmaps can't be used together with --stats\n"); 1728 goto error; 1729 } 1730 1731 if (report.total_cycles_mode) { 1732 if (sort__mode != SORT_MODE__BRANCH) 1733 report.total_cycles_mode = false; 1734 else 1735 sort_order = NULL; 1736 } 1737 1738 if ((sort_order && strstr(sort_order, "type")) || 1739 (field_order && strstr(field_order, "type"))) { 1740 report.data_type = true; 1741 annotate_opts.annotate_src = false; 1742 1743 /* disable incompatible options */ 1744 symbol_conf.cumulate_callchain = false; 1745 1746 #ifndef HAVE_LIBDW_SUPPORT 1747 pr_err("Error: Data type profiling is disabled due to missing DWARF support\n"); 1748 goto error; 1749 #endif 1750 } 1751 1752 if (strcmp(input_name, "-") != 0) 1753 setup_browser(true); 1754 else 1755 use_browser = 0; 1756 1757 if (report.data_type && use_browser == 1) { 1758 symbol_conf.annotate_data_member = true; 1759 symbol_conf.annotate_data_sample = true; 1760 } 1761 1762 symbol_conf.enable_latency = true; 1763 if (report.disable_order || !perf_session__has_switch_events(session)) { 1764 if (symbol_conf.parallelism_list_str || 1765 symbol_conf.prefer_latency || 1766 (sort_order && (strstr(sort_order, "latency") || 1767 strstr(sort_order, "parallelism"))) || 1768 (field_order && (strstr(field_order, "latency") || 1769 strstr(field_order, "parallelism")))) { 1770 if (report.disable_order) 1771 ui__error("Use of latency profile or parallelism is incompatible with --disable-order.\n"); 1772 else 1773 ui__error("Use of latency profile or parallelism requires --latency flag during record.\n"); 1774 return -1; 1775 } 1776 /* 1777 * If user did not ask for anything related to 1778 * latency/parallelism explicitly, just don't show it. 1779 */ 1780 symbol_conf.enable_latency = false; 1781 } 1782 1783 if (last_key != K_SWITCH_INPUT_DATA) { 1784 if (sort_order && strstr(sort_order, "ipc")) { 1785 parse_options_usage(report_usage, options, "s", 1); 1786 goto error; 1787 } 1788 1789 if (sort_order && strstr(sort_order, "symbol")) { 1790 if (sort__mode == SORT_MODE__BRANCH) { 1791 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s", 1792 sort_order, "ipc_lbr"); 1793 report.symbol_ipc = true; 1794 } else { 1795 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s", 1796 sort_order, "ipc_null"); 1797 } 1798 1799 sort_order = sort_tmp; 1800 } 1801 } 1802 1803 if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) && 1804 (setup_sorting(session->evlist, perf_session__env(session)) < 0)) { 1805 if (sort_order) 1806 parse_options_usage(report_usage, options, "s", 1); 1807 if (field_order) 1808 parse_options_usage(sort_order ? NULL : report_usage, 1809 options, "F", 1); 1810 goto error; 1811 } 1812 1813 if ((report.header || report.header_only) && !quiet) { 1814 perf_session__fprintf_info(session, stdout, 1815 report.show_full_info); 1816 if (report.header_only) { 1817 if (data.is_pipe) { 1818 /* 1819 * we need to process first few records 1820 * which contains PERF_RECORD_HEADER_FEATURE. 1821 */ 1822 perf_session__process_events(session); 1823 } 1824 ret = 0; 1825 goto error; 1826 } 1827 } else if (use_browser == 0 && !quiet && 1828 !report.stats_mode && !report.tasks_mode) { 1829 fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n", 1830 stdout); 1831 } 1832 1833 /* 1834 * Only in the TUI browser we are doing integrated annotation, 1835 * so don't allocate extra space that won't be used in the stdio 1836 * implementation. 1837 */ 1838 if (ui__has_annotation() || report.symbol_ipc || report.data_type || 1839 report.total_cycles_mode) { 1840 ret = symbol__annotation_init(); 1841 if (ret < 0) 1842 goto error; 1843 /* 1844 * For searching by name on the "Browse map details". 1845 * providing it only in verbose mode not to bloat too 1846 * much struct symbol. 1847 */ 1848 if (verbose > 0) { 1849 /* 1850 * XXX: Need to provide a less kludgy way to ask for 1851 * more space per symbol, the u32 is for the index on 1852 * the ui browser. 1853 * See symbol__browser_index. 1854 */ 1855 symbol_conf.priv_size += sizeof(u32); 1856 } 1857 annotation_config__init(); 1858 } 1859 1860 if (symbol__init(perf_session__env(session)) < 0) 1861 goto error; 1862 1863 if (report.time_str) { 1864 ret = perf_time__parse_for_ranges(report.time_str, session, 1865 &report.ptime_range, 1866 &report.range_size, 1867 &report.range_num); 1868 if (ret < 0) 1869 goto error; 1870 1871 itrace_synth_opts__set_time_range(&itrace_synth_opts, 1872 report.ptime_range, 1873 report.range_num); 1874 } 1875 1876 #ifdef HAVE_LIBTRACEEVENT 1877 if (session->tevent.pevent && 1878 tep_set_function_resolver(session->tevent.pevent, 1879 machine__resolve_kernel_addr, 1880 &session->machines.host) < 0) { 1881 pr_err("%s: failed to set libtraceevent function resolver\n", 1882 __func__); 1883 return -1; 1884 } 1885 #endif 1886 sort__setup_elide(stdout); 1887 1888 ret = __cmd_report(&report); 1889 if (ret == K_SWITCH_INPUT_DATA || ret == K_RELOAD) { 1890 perf_session__delete(session); 1891 last_key = K_SWITCH_INPUT_DATA; 1892 /* 1893 * To support switching between data with and without callchains. 1894 * report__setup_sample_type() will update it properly. 1895 */ 1896 symbol_conf.use_callchain = false; 1897 goto repeat; 1898 } else 1899 ret = 0; 1900 1901 if (!use_browser && (verbose > 2 || debug_kmaps)) 1902 perf_session__dump_kmaps(session); 1903 error: 1904 if (report.ptime_range) { 1905 itrace_synth_opts__clear_time_range(&itrace_synth_opts); 1906 zfree(&report.ptime_range); 1907 } 1908 1909 if (report.block_reports) { 1910 block_info__free_report(report.block_reports, 1911 report.nr_block_reports); 1912 report.block_reports = NULL; 1913 } 1914 1915 zstd_fini(&(session->zstd_data)); 1916 perf_session__delete(session); 1917 exit: 1918 annotation_options__exit(); 1919 free(sort_order_help); 1920 free(field_order_help); 1921 return ret; 1922 } 1923