1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * builtin-report.c 4 * 5 * Builtin report command: Analyze the perf.data input file, 6 * look up and read DSOs and symbol information and display 7 * a histogram of results, along various sorting keys. 8 */ 9 #include "builtin.h" 10 11 #include "util/config.h" 12 13 #include "util/annotate.h" 14 #include "util/color.h" 15 #include "util/dso.h" 16 #include <linux/list.h> 17 #include <linux/rbtree.h> 18 #include <linux/err.h> 19 #include <linux/zalloc.h> 20 #include "util/map.h" 21 #include "util/symbol.h" 22 #include "util/map_symbol.h" 23 #include "util/mem-events.h" 24 #include "util/branch.h" 25 #include "util/callchain.h" 26 #include "util/values.h" 27 28 #include "perf.h" 29 #include "util/debug.h" 30 #include "util/evlist.h" 31 #include "util/evsel.h" 32 #include "util/evswitch.h" 33 #include "util/header.h" 34 #include "util/session.h" 35 #include "util/srcline.h" 36 #include "util/tool.h" 37 38 #include <subcmd/parse-options.h> 39 #include <subcmd/exec-cmd.h> 40 #include "util/parse-events.h" 41 42 #include "util/thread.h" 43 #include "util/sort.h" 44 #include "util/hist.h" 45 #include "util/data.h" 46 #include "arch/common.h" 47 #include "util/time-utils.h" 48 #include "util/auxtrace.h" 49 #include "util/units.h" 50 #include "util/util.h" // perf_tip() 51 #include "ui/ui.h" 52 #include "ui/progress.h" 53 #include "util/block-info.h" 54 55 #include <dlfcn.h> 56 #include <errno.h> 57 #include <inttypes.h> 58 #include <regex.h> 59 #include <linux/ctype.h> 60 #include <signal.h> 61 #include <linux/bitmap.h> 62 #include <linux/list_sort.h> 63 #include <linux/string.h> 64 #include <linux/stringify.h> 65 #include <linux/time64.h> 66 #include <sys/types.h> 67 #include <sys/stat.h> 68 #include <unistd.h> 69 #include <linux/mman.h> 70 71 #ifdef HAVE_LIBTRACEEVENT 72 #include <traceevent/event-parse.h> 73 #endif 74 75 struct report { 76 struct perf_tool tool; 77 struct perf_session *session; 78 struct evswitch evswitch; 79 #ifdef HAVE_SLANG_SUPPORT 80 bool use_tui; 81 #endif 82 #ifdef HAVE_GTK2_SUPPORT 83 bool use_gtk; 84 #endif 85 bool use_stdio; 86 bool show_full_info; 87 bool show_threads; 88 bool inverted_callchain; 89 bool mem_mode; 90 bool stats_mode; 91 bool tasks_mode; 92 bool mmaps_mode; 93 bool header; 94 bool header_only; 95 bool nonany_branch_mode; 96 bool group_set; 97 bool stitch_lbr; 98 bool disable_order; 99 bool skip_empty; 100 bool data_type; 101 int max_stack; 102 struct perf_read_values show_threads_values; 103 const char *pretty_printing_style; 104 const char *cpu_list; 105 const char *symbol_filter_str; 106 const char *time_str; 107 struct perf_time_interval *ptime_range; 108 int range_size; 109 int range_num; 110 float min_percent; 111 u64 nr_entries; 112 u64 queue_size; 113 u64 total_cycles; 114 int socket_filter; 115 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 116 struct branch_type_stat brtype_stat; 117 bool symbol_ipc; 118 bool total_cycles_mode; 119 struct block_report *block_reports; 120 int nr_block_reports; 121 }; 122 123 static int report__config(const char *var, const char *value, void *cb) 124 { 125 struct report *rep = cb; 126 127 if (!strcmp(var, "report.group")) { 128 symbol_conf.event_group = perf_config_bool(var, value); 129 return 0; 130 } 131 if (!strcmp(var, "report.percent-limit")) { 132 double pcnt = strtof(value, NULL); 133 134 rep->min_percent = pcnt; 135 callchain_param.min_percent = pcnt; 136 return 0; 137 } 138 if (!strcmp(var, "report.children")) { 139 symbol_conf.cumulate_callchain = perf_config_bool(var, value); 140 return 0; 141 } 142 if (!strcmp(var, "report.queue-size")) 143 return perf_config_u64(&rep->queue_size, var, value); 144 145 if (!strcmp(var, "report.sort_order")) { 146 default_sort_order = strdup(value); 147 if (!default_sort_order) { 148 pr_err("Not enough memory for report.sort_order\n"); 149 return -1; 150 } 151 return 0; 152 } 153 154 if (!strcmp(var, "report.skip-empty")) { 155 rep->skip_empty = perf_config_bool(var, value); 156 return 0; 157 } 158 159 pr_debug("%s variable unknown, ignoring...", var); 160 return 0; 161 } 162 163 static int hist_iter__report_callback(struct hist_entry_iter *iter, 164 struct addr_location *al, bool single, 165 void *arg) 166 { 167 int err = 0; 168 struct report *rep = arg; 169 struct hist_entry *he = iter->he; 170 struct evsel *evsel = iter->evsel; 171 struct perf_sample *sample = iter->sample; 172 struct mem_info *mi; 173 struct branch_info *bi; 174 175 if (!ui__has_annotation() && !rep->symbol_ipc && !rep->data_type) 176 return 0; 177 178 if (sort__mode == SORT_MODE__BRANCH) { 179 bi = he->branch_info; 180 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel); 181 if (err) 182 goto out; 183 184 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel); 185 186 } else if (rep->mem_mode) { 187 mi = he->mem_info; 188 err = addr_map_symbol__inc_samples(&mi->daddr, sample, evsel); 189 if (err) 190 goto out; 191 192 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr); 193 194 } else if (symbol_conf.cumulate_callchain) { 195 if (single) 196 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr); 197 } else { 198 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr); 199 } 200 201 out: 202 return err; 203 } 204 205 static int hist_iter__branch_callback(struct hist_entry_iter *iter, 206 struct addr_location *al __maybe_unused, 207 bool single __maybe_unused, 208 void *arg) 209 { 210 struct hist_entry *he = iter->he; 211 struct report *rep = arg; 212 struct branch_info *bi = he->branch_info; 213 struct perf_sample *sample = iter->sample; 214 struct evsel *evsel = iter->evsel; 215 int err; 216 217 branch_type_count(&rep->brtype_stat, &bi->flags, 218 bi->from.addr, bi->to.addr); 219 220 if (!ui__has_annotation() && !rep->symbol_ipc) 221 return 0; 222 223 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel); 224 if (err) 225 goto out; 226 227 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel); 228 229 out: 230 return err; 231 } 232 233 static void setup_forced_leader(struct report *report, 234 struct evlist *evlist) 235 { 236 if (report->group_set) 237 evlist__force_leader(evlist); 238 } 239 240 static int process_feature_event(struct perf_session *session, 241 union perf_event *event) 242 { 243 struct report *rep = container_of(session->tool, struct report, tool); 244 245 if (event->feat.feat_id < HEADER_LAST_FEATURE) 246 return perf_event__process_feature(session, event); 247 248 if (event->feat.feat_id != HEADER_LAST_FEATURE) { 249 pr_err("failed: wrong feature ID: %" PRI_lu64 "\n", 250 event->feat.feat_id); 251 return -1; 252 } else if (rep->header_only) { 253 session_done = 1; 254 } 255 256 /* 257 * (feat_id = HEADER_LAST_FEATURE) is the end marker which 258 * means all features are received, now we can force the 259 * group if needed. 260 */ 261 setup_forced_leader(rep, session->evlist); 262 return 0; 263 } 264 265 static int process_sample_event(struct perf_tool *tool, 266 union perf_event *event, 267 struct perf_sample *sample, 268 struct evsel *evsel, 269 struct machine *machine) 270 { 271 struct report *rep = container_of(tool, struct report, tool); 272 struct addr_location al; 273 struct hist_entry_iter iter = { 274 .evsel = evsel, 275 .sample = sample, 276 .hide_unresolved = symbol_conf.hide_unresolved, 277 .add_entry_cb = hist_iter__report_callback, 278 }; 279 int ret = 0; 280 281 if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num, 282 sample->time)) { 283 return 0; 284 } 285 286 if (evswitch__discard(&rep->evswitch, evsel)) 287 return 0; 288 289 addr_location__init(&al); 290 if (machine__resolve(machine, &al, sample) < 0) { 291 pr_debug("problem processing %d event, skipping it.\n", 292 event->header.type); 293 ret = -1; 294 goto out_put; 295 } 296 297 if (rep->stitch_lbr) 298 thread__set_lbr_stitch_enable(al.thread, true); 299 300 if (symbol_conf.hide_unresolved && al.sym == NULL) 301 goto out_put; 302 303 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) 304 goto out_put; 305 306 if (sort__mode == SORT_MODE__BRANCH) { 307 /* 308 * A non-synthesized event might not have a branch stack if 309 * branch stacks have been synthesized (using itrace options). 310 */ 311 if (!sample->branch_stack) 312 goto out_put; 313 314 iter.add_entry_cb = hist_iter__branch_callback; 315 iter.ops = &hist_iter_branch; 316 } else if (rep->mem_mode) { 317 iter.ops = &hist_iter_mem; 318 } else if (symbol_conf.cumulate_callchain) { 319 iter.ops = &hist_iter_cumulative; 320 } else { 321 iter.ops = &hist_iter_normal; 322 } 323 324 if (al.map != NULL) 325 map__dso(al.map)->hit = 1; 326 327 if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) { 328 hist__account_cycles(sample->branch_stack, &al, sample, 329 rep->nonany_branch_mode, 330 &rep->total_cycles); 331 } 332 333 ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep); 334 if (ret < 0) 335 pr_debug("problem adding hist entry, skipping event\n"); 336 out_put: 337 addr_location__exit(&al); 338 return ret; 339 } 340 341 static int process_read_event(struct perf_tool *tool, 342 union perf_event *event, 343 struct perf_sample *sample __maybe_unused, 344 struct evsel *evsel, 345 struct machine *machine __maybe_unused) 346 { 347 struct report *rep = container_of(tool, struct report, tool); 348 349 if (rep->show_threads) { 350 const char *name = evsel__name(evsel); 351 int err = perf_read_values_add_value(&rep->show_threads_values, 352 event->read.pid, event->read.tid, 353 evsel->core.idx, 354 name, 355 event->read.value); 356 357 if (err) 358 return err; 359 } 360 361 return 0; 362 } 363 364 /* For pipe mode, sample_type is not currently set */ 365 static int report__setup_sample_type(struct report *rep) 366 { 367 struct perf_session *session = rep->session; 368 u64 sample_type = evlist__combined_sample_type(session->evlist); 369 bool is_pipe = perf_data__is_pipe(session->data); 370 struct evsel *evsel; 371 372 if (session->itrace_synth_opts->callchain || 373 session->itrace_synth_opts->add_callchain || 374 (!is_pipe && 375 perf_header__has_feat(&session->header, HEADER_AUXTRACE) && 376 !session->itrace_synth_opts->set)) 377 sample_type |= PERF_SAMPLE_CALLCHAIN; 378 379 if (session->itrace_synth_opts->last_branch || 380 session->itrace_synth_opts->add_last_branch) 381 sample_type |= PERF_SAMPLE_BRANCH_STACK; 382 383 if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) { 384 if (perf_hpp_list.parent) { 385 ui__error("Selected --sort parent, but no " 386 "callchain data. Did you call " 387 "'perf record' without -g?\n"); 388 return -EINVAL; 389 } 390 if (symbol_conf.use_callchain && 391 !symbol_conf.show_branchflag_count) { 392 ui__error("Selected -g or --branch-history.\n" 393 "But no callchain or branch data.\n" 394 "Did you call 'perf record' without -g or -b?\n"); 395 return -1; 396 } 397 } else if (!callchain_param.enabled && 398 callchain_param.mode != CHAIN_NONE && 399 !symbol_conf.use_callchain) { 400 symbol_conf.use_callchain = true; 401 if (callchain_register_param(&callchain_param) < 0) { 402 ui__error("Can't register callchain params.\n"); 403 return -EINVAL; 404 } 405 } 406 407 if (symbol_conf.cumulate_callchain) { 408 /* Silently ignore if callchain is missing */ 409 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { 410 symbol_conf.cumulate_callchain = false; 411 perf_hpp__cancel_cumulate(); 412 } 413 } 414 415 if (sort__mode == SORT_MODE__BRANCH) { 416 if (!is_pipe && 417 !(sample_type & PERF_SAMPLE_BRANCH_STACK)) { 418 ui__error("Selected -b but no branch data. " 419 "Did you call perf record without -b?\n"); 420 return -1; 421 } 422 } 423 424 if (sort__mode == SORT_MODE__MEMORY) { 425 /* 426 * FIXUP: prior to kernel 5.18, Arm SPE missed to set 427 * PERF_SAMPLE_DATA_SRC bit in sample type. For backward 428 * compatibility, set the bit if it's an old perf data file. 429 */ 430 evlist__for_each_entry(session->evlist, evsel) { 431 if (strstr(evsel->name, "arm_spe") && 432 !(sample_type & PERF_SAMPLE_DATA_SRC)) { 433 evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC; 434 sample_type |= PERF_SAMPLE_DATA_SRC; 435 } 436 } 437 438 if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) { 439 ui__error("Selected --mem-mode but no mem data. " 440 "Did you call perf record without -d?\n"); 441 return -1; 442 } 443 } 444 445 callchain_param_setup(sample_type, perf_env__arch(&rep->session->header.env)); 446 447 if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) { 448 ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n" 449 "Please apply --call-graph lbr when recording.\n"); 450 rep->stitch_lbr = false; 451 } 452 453 /* ??? handle more cases than just ANY? */ 454 if (!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY)) 455 rep->nonany_branch_mode = true; 456 457 #if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_DWARF_SUPPORT) 458 if (dwarf_callchain_users) { 459 ui__warning("Please install libunwind or libdw " 460 "development packages during the perf build.\n"); 461 } 462 #endif 463 464 return 0; 465 } 466 467 static void sig_handler(int sig __maybe_unused) 468 { 469 session_done = 1; 470 } 471 472 static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep, 473 const char *evname, FILE *fp) 474 { 475 size_t ret; 476 char unit; 477 unsigned long nr_samples = hists->stats.nr_samples; 478 u64 nr_events = hists->stats.total_period; 479 struct evsel *evsel = hists_to_evsel(hists); 480 char buf[512]; 481 size_t size = sizeof(buf); 482 int socked_id = hists->socket_filter; 483 484 if (quiet) 485 return 0; 486 487 if (symbol_conf.filter_relative) { 488 nr_samples = hists->stats.nr_non_filtered_samples; 489 nr_events = hists->stats.total_non_filtered_period; 490 } 491 492 if (evsel__is_group_event(evsel)) { 493 struct evsel *pos; 494 495 evsel__group_desc(evsel, buf, size); 496 evname = buf; 497 498 for_each_group_member(pos, evsel) { 499 const struct hists *pos_hists = evsel__hists(pos); 500 501 if (symbol_conf.filter_relative) { 502 nr_samples += pos_hists->stats.nr_non_filtered_samples; 503 nr_events += pos_hists->stats.total_non_filtered_period; 504 } else { 505 nr_samples += pos_hists->stats.nr_samples; 506 nr_events += pos_hists->stats.total_period; 507 } 508 } 509 } 510 511 nr_samples = convert_unit(nr_samples, &unit); 512 ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit); 513 if (evname != NULL) { 514 ret += fprintf(fp, " of event%s '%s'", 515 evsel->core.nr_members > 1 ? "s" : "", evname); 516 } 517 518 if (rep->time_str) 519 ret += fprintf(fp, " (time slices: %s)", rep->time_str); 520 521 if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) { 522 ret += fprintf(fp, ", show reference callgraph"); 523 } 524 525 if (rep->mem_mode) { 526 ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events); 527 ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order); 528 } else 529 ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events); 530 531 if (socked_id > -1) 532 ret += fprintf(fp, "\n# Processor Socket: %d", socked_id); 533 534 return ret + fprintf(fp, "\n#\n"); 535 } 536 537 static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *rep) 538 { 539 struct evsel *pos; 540 int i = 0, ret; 541 542 evlist__for_each_entry(evlist, pos) { 543 ret = report__browse_block_hists(&rep->block_reports[i++].hist, 544 rep->min_percent, pos, 545 &rep->session->header.env); 546 if (ret != 0) 547 return ret; 548 } 549 550 return 0; 551 } 552 553 static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, const char *help) 554 { 555 struct evsel *pos; 556 int i = 0; 557 558 if (!quiet) { 559 fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n", 560 evlist->stats.total_lost_samples); 561 } 562 563 evlist__for_each_entry(evlist, pos) { 564 struct hists *hists = evsel__hists(pos); 565 const char *evname = evsel__name(pos); 566 567 if (symbol_conf.event_group && !evsel__is_group_leader(pos)) 568 continue; 569 570 if (rep->skip_empty && !hists->stats.nr_samples) 571 continue; 572 573 hists__fprintf_nr_sample_events(hists, rep, evname, stdout); 574 575 if (rep->total_cycles_mode) { 576 report__browse_block_hists(&rep->block_reports[i++].hist, 577 rep->min_percent, pos, NULL); 578 continue; 579 } 580 581 hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout, 582 !(symbol_conf.use_callchain || 583 symbol_conf.show_branchflag_count)); 584 fprintf(stdout, "\n\n"); 585 } 586 587 if (!quiet) 588 fprintf(stdout, "#\n# (%s)\n#\n", help); 589 590 if (rep->show_threads) { 591 bool style = !strcmp(rep->pretty_printing_style, "raw"); 592 perf_read_values_display(stdout, &rep->show_threads_values, 593 style); 594 perf_read_values_destroy(&rep->show_threads_values); 595 } 596 597 if (sort__mode == SORT_MODE__BRANCH) 598 branch_type_stat_display(stdout, &rep->brtype_stat); 599 600 return 0; 601 } 602 603 static void report__warn_kptr_restrict(const struct report *rep) 604 { 605 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host); 606 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL; 607 608 if (evlist__exclude_kernel(rep->session->evlist)) 609 return; 610 611 if (kernel_map == NULL || 612 (map__dso(kernel_map)->hit && 613 (kernel_kmap->ref_reloc_sym == NULL || 614 kernel_kmap->ref_reloc_sym->addr == 0))) { 615 const char *desc = 616 "As no suitable kallsyms nor vmlinux was found, kernel samples\n" 617 "can't be resolved."; 618 619 if (kernel_map && map__has_symbols(kernel_map)) { 620 desc = "If some relocation was applied (e.g. " 621 "kexec) symbols may be misresolved."; 622 } 623 624 ui__warning( 625 "Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n" 626 "Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n" 627 "Samples in kernel modules can't be resolved as well.\n\n", 628 desc); 629 } 630 } 631 632 static int report__gtk_browse_hists(struct report *rep, const char *help) 633 { 634 int (*hist_browser)(struct evlist *evlist, const char *help, 635 struct hist_browser_timer *timer, float min_pcnt); 636 637 hist_browser = dlsym(perf_gtk_handle, "evlist__gtk_browse_hists"); 638 639 if (hist_browser == NULL) { 640 ui__error("GTK browser not found!\n"); 641 return -1; 642 } 643 644 return hist_browser(rep->session->evlist, help, NULL, rep->min_percent); 645 } 646 647 static int report__browse_hists(struct report *rep) 648 { 649 int ret; 650 struct perf_session *session = rep->session; 651 struct evlist *evlist = session->evlist; 652 char *help = NULL, *path = NULL; 653 654 path = system_path(TIPDIR); 655 if (perf_tip(&help, path) || help == NULL) { 656 /* fallback for people who don't install perf ;-) */ 657 free(path); 658 path = system_path(DOCDIR); 659 if (perf_tip(&help, path) || help == NULL) 660 help = strdup("Cannot load tips.txt file, please install perf!"); 661 } 662 free(path); 663 664 switch (use_browser) { 665 case 1: 666 if (rep->total_cycles_mode) { 667 ret = evlist__tui_block_hists_browse(evlist, rep); 668 break; 669 } 670 671 ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent, 672 &session->header.env, true); 673 /* 674 * Usually "ret" is the last pressed key, and we only 675 * care if the key notifies us to switch data file. 676 */ 677 if (ret != K_SWITCH_INPUT_DATA && ret != K_RELOAD) 678 ret = 0; 679 break; 680 case 2: 681 ret = report__gtk_browse_hists(rep, help); 682 break; 683 default: 684 ret = evlist__tty_browse_hists(evlist, rep, help); 685 break; 686 } 687 free(help); 688 return ret; 689 } 690 691 static int report__collapse_hists(struct report *rep) 692 { 693 struct perf_session *session = rep->session; 694 struct evlist *evlist = session->evlist; 695 struct ui_progress prog; 696 struct evsel *pos; 697 int ret = 0; 698 699 /* 700 * The pipe data needs to setup hierarchy hpp formats now, because it 701 * cannot know about evsels in the data before reading the data. The 702 * normal file data saves the event (attribute) info in the header 703 * section, but pipe does not have the luxury. 704 */ 705 if (perf_data__is_pipe(session->data)) { 706 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) { 707 ui__error("Failed to setup hierarchy output formats\n"); 708 return -1; 709 } 710 } 711 712 ui_progress__init(&prog, rep->nr_entries, "Merging related events..."); 713 714 evlist__for_each_entry(rep->session->evlist, pos) { 715 struct hists *hists = evsel__hists(pos); 716 717 if (pos->core.idx == 0) 718 hists->symbol_filter_str = rep->symbol_filter_str; 719 720 hists->socket_filter = rep->socket_filter; 721 722 ret = hists__collapse_resort(hists, &prog); 723 if (ret < 0) 724 break; 725 726 /* Non-group events are considered as leader */ 727 if (symbol_conf.event_group && !evsel__is_group_leader(pos)) { 728 struct hists *leader_hists = evsel__hists(evsel__leader(pos)); 729 730 hists__match(leader_hists, hists); 731 hists__link(leader_hists, hists); 732 } 733 } 734 735 ui_progress__finish(); 736 return ret; 737 } 738 739 static int hists__resort_cb(struct hist_entry *he, void *arg) 740 { 741 struct report *rep = arg; 742 struct symbol *sym = he->ms.sym; 743 744 if (rep->symbol_ipc && sym && !sym->annotate2) { 745 struct evsel *evsel = hists_to_evsel(he->hists); 746 747 symbol__annotate2(&he->ms, evsel, NULL); 748 } 749 750 return 0; 751 } 752 753 static void report__output_resort(struct report *rep) 754 { 755 struct ui_progress prog; 756 struct evsel *pos; 757 758 ui_progress__init(&prog, rep->nr_entries, "Sorting events for output..."); 759 760 evlist__for_each_entry(rep->session->evlist, pos) { 761 evsel__output_resort_cb(pos, &prog, hists__resort_cb, rep); 762 } 763 764 ui_progress__finish(); 765 } 766 767 static int count_sample_event(struct perf_tool *tool __maybe_unused, 768 union perf_event *event __maybe_unused, 769 struct perf_sample *sample __maybe_unused, 770 struct evsel *evsel, 771 struct machine *machine __maybe_unused) 772 { 773 struct hists *hists = evsel__hists(evsel); 774 775 hists__inc_nr_events(hists); 776 return 0; 777 } 778 779 static int count_lost_samples_event(struct perf_tool *tool, 780 union perf_event *event, 781 struct perf_sample *sample, 782 struct machine *machine __maybe_unused) 783 { 784 struct report *rep = container_of(tool, struct report, tool); 785 struct evsel *evsel; 786 787 evsel = evlist__id2evsel(rep->session->evlist, sample->id); 788 if (evsel) { 789 hists__inc_nr_lost_samples(evsel__hists(evsel), 790 event->lost_samples.lost); 791 } 792 return 0; 793 } 794 795 static int process_attr(struct perf_tool *tool __maybe_unused, 796 union perf_event *event, 797 struct evlist **pevlist); 798 799 static void stats_setup(struct report *rep) 800 { 801 memset(&rep->tool, 0, sizeof(rep->tool)); 802 rep->tool.attr = process_attr; 803 rep->tool.sample = count_sample_event; 804 rep->tool.lost_samples = count_lost_samples_event; 805 rep->tool.no_warn = true; 806 } 807 808 static int stats_print(struct report *rep) 809 { 810 struct perf_session *session = rep->session; 811 812 perf_session__fprintf_nr_events(session, stdout, rep->skip_empty); 813 evlist__fprintf_nr_events(session->evlist, stdout, rep->skip_empty); 814 return 0; 815 } 816 817 static void tasks_setup(struct report *rep) 818 { 819 memset(&rep->tool, 0, sizeof(rep->tool)); 820 rep->tool.ordered_events = true; 821 if (rep->mmaps_mode) { 822 rep->tool.mmap = perf_event__process_mmap; 823 rep->tool.mmap2 = perf_event__process_mmap2; 824 } 825 rep->tool.attr = process_attr; 826 rep->tool.comm = perf_event__process_comm; 827 rep->tool.exit = perf_event__process_exit; 828 rep->tool.fork = perf_event__process_fork; 829 rep->tool.no_warn = true; 830 } 831 832 struct maps__fprintf_task_args { 833 int indent; 834 FILE *fp; 835 size_t printed; 836 }; 837 838 static int maps__fprintf_task_cb(struct map *map, void *data) 839 { 840 struct maps__fprintf_task_args *args = data; 841 const struct dso *dso = map__dso(map); 842 u32 prot = map__prot(map); 843 int ret; 844 845 ret = fprintf(args->fp, 846 "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n", 847 args->indent, "", map__start(map), map__end(map), 848 prot & PROT_READ ? 'r' : '-', 849 prot & PROT_WRITE ? 'w' : '-', 850 prot & PROT_EXEC ? 'x' : '-', 851 map__flags(map) ? 's' : 'p', 852 map__pgoff(map), 853 dso->id.ino, dso->name); 854 855 if (ret < 0) 856 return ret; 857 858 args->printed += ret; 859 return 0; 860 } 861 862 static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp) 863 { 864 struct maps__fprintf_task_args args = { 865 .indent = indent, 866 .fp = fp, 867 .printed = 0, 868 }; 869 870 maps__for_each_map(maps, maps__fprintf_task_cb, &args); 871 872 return args.printed; 873 } 874 875 static int thread_level(struct machine *machine, const struct thread *thread) 876 { 877 struct thread *parent_thread; 878 int res; 879 880 if (thread__tid(thread) <= 0) 881 return 0; 882 883 if (thread__ppid(thread) <= 0) 884 return 1; 885 886 parent_thread = machine__find_thread(machine, -1, thread__ppid(thread)); 887 if (!parent_thread) { 888 pr_err("Missing parent thread of %d\n", thread__tid(thread)); 889 return 0; 890 } 891 res = 1 + thread_level(machine, parent_thread); 892 thread__put(parent_thread); 893 return res; 894 } 895 896 static void task__print_level(struct machine *machine, struct thread *thread, FILE *fp) 897 { 898 int level = thread_level(machine, thread); 899 int comm_indent = fprintf(fp, " %8d %8d %8d |%*s", 900 thread__pid(thread), thread__tid(thread), 901 thread__ppid(thread), level, ""); 902 903 fprintf(fp, "%s\n", thread__comm_str(thread)); 904 905 maps__fprintf_task(thread__maps(thread), comm_indent, fp); 906 } 907 908 /* 909 * Sort two thread list nodes such that they form a tree. The first node is the 910 * root of the tree, its children are ordered numerically after it. If a child 911 * has children itself then they appear immediately after their parent. For 912 * example, the 4 threads in the order they'd appear in the list: 913 * - init with a TID 1 and a parent of 0 914 * - systemd with a TID 3000 and a parent of init/1 915 * - systemd child thread with TID 4000, the parent is 3000 916 * - NetworkManager is a child of init with a TID of 3500. 917 */ 918 static int task_list_cmp(void *priv, const struct list_head *la, const struct list_head *lb) 919 { 920 struct machine *machine = priv; 921 struct thread_list *task_a = list_entry(la, struct thread_list, list); 922 struct thread_list *task_b = list_entry(lb, struct thread_list, list); 923 struct thread *a = task_a->thread; 924 struct thread *b = task_b->thread; 925 int level_a, level_b, res; 926 927 /* Same thread? */ 928 if (thread__tid(a) == thread__tid(b)) 929 return 0; 930 931 /* Compare a and b to root. */ 932 if (thread__tid(a) == 0) 933 return -1; 934 935 if (thread__tid(b) == 0) 936 return 1; 937 938 /* If parents match sort by tid. */ 939 if (thread__ppid(a) == thread__ppid(b)) 940 return thread__tid(a) < thread__tid(b) ? -1 : 1; 941 942 /* 943 * Find a and b such that if they are a child of each other a and b's 944 * tid's match, otherwise a and b have a common parent and distinct 945 * tid's to sort by. First make the depths of the threads match. 946 */ 947 level_a = thread_level(machine, a); 948 level_b = thread_level(machine, b); 949 a = thread__get(a); 950 b = thread__get(b); 951 for (int i = level_a; i > level_b; i--) { 952 struct thread *parent = machine__find_thread(machine, -1, thread__ppid(a)); 953 954 thread__put(a); 955 if (!parent) { 956 pr_err("Missing parent thread of %d\n", thread__tid(a)); 957 thread__put(b); 958 return -1; 959 } 960 a = parent; 961 } 962 for (int i = level_b; i > level_a; i--) { 963 struct thread *parent = machine__find_thread(machine, -1, thread__ppid(b)); 964 965 thread__put(b); 966 if (!parent) { 967 pr_err("Missing parent thread of %d\n", thread__tid(b)); 968 thread__put(a); 969 return 1; 970 } 971 b = parent; 972 } 973 /* Search up to a common parent. */ 974 while (thread__ppid(a) != thread__ppid(b)) { 975 struct thread *parent; 976 977 parent = machine__find_thread(machine, -1, thread__ppid(a)); 978 thread__put(a); 979 if (!parent) 980 pr_err("Missing parent thread of %d\n", thread__tid(a)); 981 a = parent; 982 parent = machine__find_thread(machine, -1, thread__ppid(b)); 983 thread__put(b); 984 if (!parent) 985 pr_err("Missing parent thread of %d\n", thread__tid(b)); 986 b = parent; 987 if (!a || !b) { 988 /* Handle missing parent (unexpected) with some sanity. */ 989 thread__put(a); 990 thread__put(b); 991 return !a && !b ? 0 : (!a ? -1 : 1); 992 } 993 } 994 if (thread__tid(a) == thread__tid(b)) { 995 /* a is a child of b or vice-versa, deeper levels appear later. */ 996 res = level_a < level_b ? -1 : (level_a > level_b ? 1 : 0); 997 } else { 998 /* Sort by tid now the parent is the same. */ 999 res = thread__tid(a) < thread__tid(b) ? -1 : 1; 1000 } 1001 thread__put(a); 1002 thread__put(b); 1003 return res; 1004 } 1005 1006 static int tasks_print(struct report *rep, FILE *fp) 1007 { 1008 struct machine *machine = &rep->session->machines.host; 1009 LIST_HEAD(tasks); 1010 int ret; 1011 1012 ret = machine__thread_list(machine, &tasks); 1013 if (!ret) { 1014 struct thread_list *task; 1015 1016 list_sort(machine, &tasks, task_list_cmp); 1017 1018 fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm"); 1019 1020 list_for_each_entry(task, &tasks, list) 1021 task__print_level(machine, task->thread, fp); 1022 } 1023 thread_list__delete(&tasks); 1024 return ret; 1025 } 1026 1027 static int __cmd_report(struct report *rep) 1028 { 1029 int ret; 1030 struct perf_session *session = rep->session; 1031 struct evsel *pos; 1032 struct perf_data *data = session->data; 1033 1034 signal(SIGINT, sig_handler); 1035 1036 if (rep->cpu_list) { 1037 ret = perf_session__cpu_bitmap(session, rep->cpu_list, 1038 rep->cpu_bitmap); 1039 if (ret) { 1040 ui__error("failed to set cpu bitmap\n"); 1041 return ret; 1042 } 1043 session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap; 1044 } 1045 1046 if (rep->show_threads) { 1047 ret = perf_read_values_init(&rep->show_threads_values); 1048 if (ret) 1049 return ret; 1050 } 1051 1052 ret = report__setup_sample_type(rep); 1053 if (ret) { 1054 /* report__setup_sample_type() already showed error message */ 1055 return ret; 1056 } 1057 1058 if (rep->stats_mode) 1059 stats_setup(rep); 1060 1061 if (rep->tasks_mode) 1062 tasks_setup(rep); 1063 1064 ret = perf_session__process_events(session); 1065 if (ret) { 1066 ui__error("failed to process sample\n"); 1067 return ret; 1068 } 1069 1070 evlist__check_mem_load_aux(session->evlist); 1071 1072 if (rep->stats_mode) 1073 return stats_print(rep); 1074 1075 if (rep->tasks_mode) 1076 return tasks_print(rep, stdout); 1077 1078 report__warn_kptr_restrict(rep); 1079 1080 evlist__for_each_entry(session->evlist, pos) 1081 rep->nr_entries += evsel__hists(pos)->nr_entries; 1082 1083 if (use_browser == 0) { 1084 if (verbose > 3) 1085 perf_session__fprintf(session, stdout); 1086 1087 if (verbose > 2) 1088 perf_session__fprintf_dsos(session, stdout); 1089 1090 if (dump_trace) { 1091 perf_session__fprintf_nr_events(session, stdout, 1092 rep->skip_empty); 1093 evlist__fprintf_nr_events(session->evlist, stdout, 1094 rep->skip_empty); 1095 return 0; 1096 } 1097 } 1098 1099 ret = report__collapse_hists(rep); 1100 if (ret) { 1101 ui__error("failed to process hist entry\n"); 1102 return ret; 1103 } 1104 1105 if (session_done()) 1106 return 0; 1107 1108 /* 1109 * recalculate number of entries after collapsing since it 1110 * might be changed during the collapse phase. 1111 */ 1112 rep->nr_entries = 0; 1113 evlist__for_each_entry(session->evlist, pos) 1114 rep->nr_entries += evsel__hists(pos)->nr_entries; 1115 1116 if (rep->nr_entries == 0) { 1117 ui__error("The %s data has no samples!\n", data->path); 1118 return 0; 1119 } 1120 1121 report__output_resort(rep); 1122 1123 if (rep->total_cycles_mode) { 1124 int block_hpps[6] = { 1125 PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT, 1126 PERF_HPP_REPORT__BLOCK_LBR_CYCLES, 1127 PERF_HPP_REPORT__BLOCK_CYCLES_PCT, 1128 PERF_HPP_REPORT__BLOCK_AVG_CYCLES, 1129 PERF_HPP_REPORT__BLOCK_RANGE, 1130 PERF_HPP_REPORT__BLOCK_DSO, 1131 }; 1132 1133 rep->block_reports = block_info__create_report(session->evlist, 1134 rep->total_cycles, 1135 block_hpps, 6, 1136 &rep->nr_block_reports); 1137 if (!rep->block_reports) 1138 return -1; 1139 } 1140 1141 return report__browse_hists(rep); 1142 } 1143 1144 static int 1145 report_parse_callchain_opt(const struct option *opt, const char *arg, int unset) 1146 { 1147 struct callchain_param *callchain = opt->value; 1148 1149 callchain->enabled = !unset; 1150 /* 1151 * --no-call-graph 1152 */ 1153 if (unset) { 1154 symbol_conf.use_callchain = false; 1155 callchain->mode = CHAIN_NONE; 1156 return 0; 1157 } 1158 1159 return parse_callchain_report_opt(arg); 1160 } 1161 1162 static int 1163 parse_time_quantum(const struct option *opt, const char *arg, 1164 int unset __maybe_unused) 1165 { 1166 unsigned long *time_q = opt->value; 1167 char *end; 1168 1169 *time_q = strtoul(arg, &end, 0); 1170 if (end == arg) 1171 goto parse_err; 1172 if (*time_q == 0) { 1173 pr_err("time quantum cannot be 0"); 1174 return -1; 1175 } 1176 end = skip_spaces(end); 1177 if (*end == 0) 1178 return 0; 1179 if (!strcmp(end, "s")) { 1180 *time_q *= NSEC_PER_SEC; 1181 return 0; 1182 } 1183 if (!strcmp(end, "ms")) { 1184 *time_q *= NSEC_PER_MSEC; 1185 return 0; 1186 } 1187 if (!strcmp(end, "us")) { 1188 *time_q *= NSEC_PER_USEC; 1189 return 0; 1190 } 1191 if (!strcmp(end, "ns")) 1192 return 0; 1193 parse_err: 1194 pr_err("Cannot parse time quantum `%s'\n", arg); 1195 return -1; 1196 } 1197 1198 int 1199 report_parse_ignore_callees_opt(const struct option *opt __maybe_unused, 1200 const char *arg, int unset __maybe_unused) 1201 { 1202 if (arg) { 1203 int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED); 1204 if (err) { 1205 char buf[BUFSIZ]; 1206 regerror(err, &ignore_callees_regex, buf, sizeof(buf)); 1207 pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf); 1208 return -1; 1209 } 1210 have_ignore_callees = 1; 1211 } 1212 1213 return 0; 1214 } 1215 1216 static int 1217 parse_branch_mode(const struct option *opt, 1218 const char *str __maybe_unused, int unset) 1219 { 1220 int *branch_mode = opt->value; 1221 1222 *branch_mode = !unset; 1223 return 0; 1224 } 1225 1226 static int 1227 parse_percent_limit(const struct option *opt, const char *str, 1228 int unset __maybe_unused) 1229 { 1230 struct report *rep = opt->value; 1231 double pcnt = strtof(str, NULL); 1232 1233 rep->min_percent = pcnt; 1234 callchain_param.min_percent = pcnt; 1235 return 0; 1236 } 1237 1238 static int process_attr(struct perf_tool *tool __maybe_unused, 1239 union perf_event *event, 1240 struct evlist **pevlist) 1241 { 1242 u64 sample_type; 1243 int err; 1244 1245 err = perf_event__process_attr(tool, event, pevlist); 1246 if (err) 1247 return err; 1248 1249 /* 1250 * Check if we need to enable callchains based 1251 * on events sample_type. 1252 */ 1253 sample_type = evlist__combined_sample_type(*pevlist); 1254 callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env)); 1255 return 0; 1256 } 1257 1258 int cmd_report(int argc, const char **argv) 1259 { 1260 struct perf_session *session; 1261 struct itrace_synth_opts itrace_synth_opts = { .set = 0, }; 1262 struct stat st; 1263 bool has_br_stack = false; 1264 int branch_mode = -1; 1265 int last_key = 0; 1266 bool branch_call_mode = false; 1267 #define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent" 1268 static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n" 1269 CALLCHAIN_REPORT_HELP 1270 "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT; 1271 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT; 1272 const char * const report_usage[] = { 1273 "perf report [<options>]", 1274 NULL 1275 }; 1276 struct report report = { 1277 .tool = { 1278 .sample = process_sample_event, 1279 .mmap = perf_event__process_mmap, 1280 .mmap2 = perf_event__process_mmap2, 1281 .comm = perf_event__process_comm, 1282 .namespaces = perf_event__process_namespaces, 1283 .cgroup = perf_event__process_cgroup, 1284 .exit = perf_event__process_exit, 1285 .fork = perf_event__process_fork, 1286 .lost = perf_event__process_lost, 1287 .read = process_read_event, 1288 .attr = process_attr, 1289 #ifdef HAVE_LIBTRACEEVENT 1290 .tracing_data = perf_event__process_tracing_data, 1291 #endif 1292 .build_id = perf_event__process_build_id, 1293 .id_index = perf_event__process_id_index, 1294 .auxtrace_info = perf_event__process_auxtrace_info, 1295 .auxtrace = perf_event__process_auxtrace, 1296 .event_update = perf_event__process_event_update, 1297 .feature = process_feature_event, 1298 .ordered_events = true, 1299 .ordering_requires_timestamps = true, 1300 }, 1301 .max_stack = PERF_MAX_STACK_DEPTH, 1302 .pretty_printing_style = "normal", 1303 .socket_filter = -1, 1304 .skip_empty = true, 1305 }; 1306 char *sort_order_help = sort_help("sort by key(s):"); 1307 char *field_order_help = sort_help("output field(s): overhead period sample "); 1308 const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL; 1309 const struct option options[] = { 1310 OPT_STRING('i', "input", &input_name, "file", 1311 "input file name"), 1312 OPT_INCR('v', "verbose", &verbose, 1313 "be more verbose (show symbol address, etc)"), 1314 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"), 1315 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 1316 "dump raw trace in ASCII"), 1317 OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"), 1318 OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"), 1319 OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"), 1320 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 1321 "file", "vmlinux pathname"), 1322 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux, 1323 "don't load vmlinux even if found"), 1324 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 1325 "file", "kallsyms pathname"), 1326 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"), 1327 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 1328 "load module symbols - WARNING: use only with -k and LIVE kernel"), 1329 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 1330 "Show a column with the number of samples"), 1331 OPT_BOOLEAN('T', "threads", &report.show_threads, 1332 "Show per-thread event counters"), 1333 OPT_STRING(0, "pretty", &report.pretty_printing_style, "key", 1334 "pretty printing style key: normal raw"), 1335 #ifdef HAVE_SLANG_SUPPORT 1336 OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"), 1337 #endif 1338 #ifdef HAVE_GTK2_SUPPORT 1339 OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"), 1340 #endif 1341 OPT_BOOLEAN(0, "stdio", &report.use_stdio, 1342 "Use the stdio interface"), 1343 OPT_BOOLEAN(0, "header", &report.header, "Show data header."), 1344 OPT_BOOLEAN(0, "header-only", &report.header_only, 1345 "Show only data header."), 1346 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 1347 sort_order_help), 1348 OPT_STRING('F', "fields", &field_order, "key[,keys...]", 1349 field_order_help), 1350 OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization, 1351 "Show sample percentage for different cpu modes"), 1352 OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, 1353 "Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN), 1354 OPT_STRING('p', "parent", &parent_pattern, "regex", 1355 "regex filter to identify parent, see: '--sort parent'"), 1356 OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, 1357 "Only display entries with parent-match"), 1358 OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param, 1359 "print_type,threshold[,print_limit],order,sort_key[,branch],value", 1360 report_callchain_help, &report_parse_callchain_opt, 1361 callchain_default_opt), 1362 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain, 1363 "Accumulate callchains of children and show total overhead as well. " 1364 "Enabled by default, use --no-children to disable."), 1365 OPT_INTEGER(0, "max-stack", &report.max_stack, 1366 "Set the maximum stack depth when parsing the callchain, " 1367 "anything beyond the specified depth will be ignored. " 1368 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 1369 OPT_BOOLEAN('G', "inverted", &report.inverted_callchain, 1370 "alias for inverted call graph"), 1371 OPT_CALLBACK(0, "ignore-callees", NULL, "regex", 1372 "ignore callees of these functions in call graphs", 1373 report_parse_ignore_callees_opt), 1374 OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", 1375 "only consider symbols in these dsos"), 1376 OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", 1377 "only consider symbols in these comms"), 1378 OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]", 1379 "only consider symbols in these pids"), 1380 OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]", 1381 "only consider symbols in these tids"), 1382 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 1383 "only consider these symbols"), 1384 OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter", 1385 "only show symbols that (partially) match with this filter"), 1386 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str, 1387 "width[,width...]", 1388 "don't try to adjust column width, use these fixed values"), 1389 OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator", 1390 "separator for columns, no spaces will be added between " 1391 "columns '.' is reserved."), 1392 OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved, 1393 "Only display entries resolved to a symbol"), 1394 OPT_CALLBACK(0, "symfs", NULL, "directory", 1395 "Look for files with symbols relative to this directory", 1396 symbol__config_symfs), 1397 OPT_STRING('C', "cpu", &report.cpu_list, "cpu", 1398 "list of cpus to profile"), 1399 OPT_BOOLEAN('I', "show-info", &report.show_full_info, 1400 "Display extended information about perf.data file"), 1401 OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src, 1402 "Interleave source code with assembly code (default)"), 1403 OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw, 1404 "Display raw encoding of assembly instructions (default)"), 1405 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", 1406 "Specify disassembler style (e.g. -M intel for intel syntax)"), 1407 OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix", 1408 "Add prefix to source file path names in programs (with --prefix-strip)"), 1409 OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N", 1410 "Strip first N entries of source file path name in programs (with --prefix)"), 1411 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, 1412 "Show a column with the sum of periods"), 1413 OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set, 1414 "Show event group information together"), 1415 OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx, 1416 "Sort the output by the event at the index n in group. " 1417 "If n is invalid, sort by the first event. " 1418 "WARNING: should be used on grouped events."), 1419 OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "", 1420 "use branch records for per branch histogram filling", 1421 parse_branch_mode), 1422 OPT_BOOLEAN(0, "branch-history", &branch_call_mode, 1423 "add last branch records to call history"), 1424 OPT_STRING(0, "objdump", &objdump_path, "path", 1425 "objdump binary to use for disassembly and annotations"), 1426 OPT_STRING(0, "addr2line", &addr2line_path, "path", 1427 "addr2line binary to use for line numbers"), 1428 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, 1429 "Disable symbol demangling"), 1430 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, 1431 "Enable kernel symbol demangling"), 1432 OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"), 1433 OPT_INTEGER(0, "samples", &symbol_conf.res_sample, 1434 "Number of samples to save per histogram entry for individual browsing"), 1435 OPT_CALLBACK(0, "percent-limit", &report, "percent", 1436 "Don't show entries under that percent", parse_percent_limit), 1437 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", 1438 "how to display percentage of filtered entries", parse_filter_percentage), 1439 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts", 1440 "Instruction Tracing options\n" ITRACE_HELP, 1441 itrace_parse_synth_opts), 1442 OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename, 1443 "Show full source file name path for source lines"), 1444 OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph, 1445 "Show callgraph from reference event"), 1446 OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr, 1447 "Enable LBR callgraph stitching approach"), 1448 OPT_INTEGER(0, "socket-filter", &report.socket_filter, 1449 "only show processor socket that match with this filter"), 1450 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace, 1451 "Show raw trace event output (do not use print fmt or plugins)"), 1452 OPT_BOOLEAN('H', "hierarchy", &symbol_conf.report_hierarchy, 1453 "Show entries in a hierarchy"), 1454 OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode", 1455 "'always' (default), 'never' or 'auto' only applicable to --stdio mode", 1456 stdio__config_color, "always"), 1457 OPT_STRING(0, "time", &report.time_str, "str", 1458 "Time span of interest (start,stop)"), 1459 OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name, 1460 "Show inline function"), 1461 OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period", 1462 "Set percent type local/global-period/hits", 1463 annotate_parse_percent_type), 1464 OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"), 1465 OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)", 1466 "Set time quantum for time sort key (default 100ms)", 1467 parse_time_quantum), 1468 OPTS_EVSWITCH(&report.evswitch), 1469 OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode, 1470 "Sort all blocks by 'Sampled Cycles%'"), 1471 OPT_BOOLEAN(0, "disable-order", &report.disable_order, 1472 "Disable raw trace ordering"), 1473 OPT_BOOLEAN(0, "skip-empty", &report.skip_empty, 1474 "Do not display empty (or dummy) events in the output"), 1475 OPT_END() 1476 }; 1477 struct perf_data data = { 1478 .mode = PERF_DATA_MODE_READ, 1479 }; 1480 int ret = hists__init(); 1481 char sort_tmp[128]; 1482 1483 if (ret < 0) 1484 goto exit; 1485 1486 /* 1487 * tasks_mode require access to exited threads to list those that are in 1488 * the data file. Off-cpu events are synthesized after other events and 1489 * reference exited threads. 1490 */ 1491 symbol_conf.keep_exited_threads = true; 1492 1493 annotation_options__init(); 1494 1495 ret = perf_config(report__config, &report); 1496 if (ret) 1497 goto exit; 1498 1499 argc = parse_options(argc, argv, options, report_usage, 0); 1500 if (argc) { 1501 /* 1502 * Special case: if there's an argument left then assume that 1503 * it's a symbol filter: 1504 */ 1505 if (argc > 1) 1506 usage_with_options(report_usage, options); 1507 1508 report.symbol_filter_str = argv[0]; 1509 } 1510 1511 if (disassembler_style) { 1512 annotate_opts.disassembler_style = strdup(disassembler_style); 1513 if (!annotate_opts.disassembler_style) 1514 return -ENOMEM; 1515 } 1516 if (objdump_path) { 1517 annotate_opts.objdump_path = strdup(objdump_path); 1518 if (!annotate_opts.objdump_path) 1519 return -ENOMEM; 1520 } 1521 if (addr2line_path) { 1522 symbol_conf.addr2line_path = strdup(addr2line_path); 1523 if (!symbol_conf.addr2line_path) 1524 return -ENOMEM; 1525 } 1526 1527 if (annotate_check_args() < 0) { 1528 ret = -EINVAL; 1529 goto exit; 1530 } 1531 1532 if (report.mmaps_mode) 1533 report.tasks_mode = true; 1534 1535 if (dump_trace && report.disable_order) 1536 report.tool.ordered_events = false; 1537 1538 if (quiet) 1539 perf_quiet_option(); 1540 1541 ret = symbol__validate_sym_arguments(); 1542 if (ret) 1543 goto exit; 1544 1545 if (report.inverted_callchain) 1546 callchain_param.order = ORDER_CALLER; 1547 if (symbol_conf.cumulate_callchain && !callchain_param.order_set) 1548 callchain_param.order = ORDER_CALLER; 1549 1550 if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) && 1551 (int)itrace_synth_opts.callchain_sz > report.max_stack) 1552 report.max_stack = itrace_synth_opts.callchain_sz; 1553 1554 if (!input_name || !strlen(input_name)) { 1555 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 1556 input_name = "-"; 1557 else 1558 input_name = "perf.data"; 1559 } 1560 1561 data.path = input_name; 1562 data.force = symbol_conf.force; 1563 1564 repeat: 1565 session = perf_session__new(&data, &report.tool); 1566 if (IS_ERR(session)) { 1567 ret = PTR_ERR(session); 1568 goto exit; 1569 } 1570 1571 ret = evswitch__init(&report.evswitch, session->evlist, stderr); 1572 if (ret) 1573 goto exit; 1574 1575 if (zstd_init(&(session->zstd_data), 0) < 0) 1576 pr_warning("Decompression initialization failed. Reported data may be incomplete.\n"); 1577 1578 if (report.queue_size) { 1579 ordered_events__set_alloc_size(&session->ordered_events, 1580 report.queue_size); 1581 } 1582 1583 session->itrace_synth_opts = &itrace_synth_opts; 1584 1585 report.session = session; 1586 1587 has_br_stack = perf_header__has_feat(&session->header, 1588 HEADER_BRANCH_STACK); 1589 if (evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER) 1590 has_br_stack = false; 1591 1592 setup_forced_leader(&report, session->evlist); 1593 1594 if (symbol_conf.group_sort_idx && evlist__nr_groups(session->evlist) == 0) { 1595 parse_options_usage(NULL, options, "group-sort-idx", 0); 1596 ret = -EINVAL; 1597 goto error; 1598 } 1599 1600 if (itrace_synth_opts.last_branch || itrace_synth_opts.add_last_branch) 1601 has_br_stack = true; 1602 1603 if (has_br_stack && branch_call_mode) 1604 symbol_conf.show_branchflag_count = true; 1605 1606 memset(&report.brtype_stat, 0, sizeof(struct branch_type_stat)); 1607 1608 /* 1609 * Branch mode is a tristate: 1610 * -1 means default, so decide based on the file having branch data. 1611 * 0/1 means the user chose a mode. 1612 */ 1613 if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) && 1614 !branch_call_mode) { 1615 sort__mode = SORT_MODE__BRANCH; 1616 symbol_conf.cumulate_callchain = false; 1617 } 1618 if (branch_call_mode) { 1619 callchain_param.key = CCKEY_ADDRESS; 1620 callchain_param.branch_callstack = true; 1621 symbol_conf.use_callchain = true; 1622 callchain_register_param(&callchain_param); 1623 if (sort_order == NULL) 1624 sort_order = "srcline,symbol,dso"; 1625 } 1626 1627 if (report.mem_mode) { 1628 if (sort__mode == SORT_MODE__BRANCH) { 1629 pr_err("branch and mem mode incompatible\n"); 1630 goto error; 1631 } 1632 sort__mode = SORT_MODE__MEMORY; 1633 symbol_conf.cumulate_callchain = false; 1634 } 1635 1636 if (symbol_conf.report_hierarchy) { 1637 /* disable incompatible options */ 1638 symbol_conf.cumulate_callchain = false; 1639 1640 if (field_order) { 1641 pr_err("Error: --hierarchy and --fields options cannot be used together\n"); 1642 parse_options_usage(report_usage, options, "F", 1); 1643 parse_options_usage(NULL, options, "hierarchy", 0); 1644 goto error; 1645 } 1646 1647 perf_hpp_list.need_collapse = true; 1648 } 1649 1650 if (report.use_stdio) 1651 use_browser = 0; 1652 #ifdef HAVE_SLANG_SUPPORT 1653 else if (report.use_tui) 1654 use_browser = 1; 1655 #endif 1656 #ifdef HAVE_GTK2_SUPPORT 1657 else if (report.use_gtk) 1658 use_browser = 2; 1659 #endif 1660 1661 /* Force tty output for header output and per-thread stat. */ 1662 if (report.header || report.header_only || report.show_threads) 1663 use_browser = 0; 1664 if (report.header || report.header_only) 1665 report.tool.show_feat_hdr = SHOW_FEAT_HEADER; 1666 if (report.show_full_info) 1667 report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO; 1668 if (report.stats_mode || report.tasks_mode) 1669 use_browser = 0; 1670 if (report.stats_mode && report.tasks_mode) { 1671 pr_err("Error: --tasks and --mmaps can't be used together with --stats\n"); 1672 goto error; 1673 } 1674 1675 if (report.total_cycles_mode) { 1676 if (sort__mode != SORT_MODE__BRANCH) 1677 report.total_cycles_mode = false; 1678 else 1679 sort_order = NULL; 1680 } 1681 1682 if (sort_order && strstr(sort_order, "type")) { 1683 report.data_type = true; 1684 annotate_opts.annotate_src = false; 1685 1686 #ifndef HAVE_DWARF_GETLOCATIONS_SUPPORT 1687 pr_err("Error: Data type profiling is disabled due to missing DWARF support\n"); 1688 goto error; 1689 #endif 1690 } 1691 1692 if (strcmp(input_name, "-") != 0) 1693 setup_browser(true); 1694 else 1695 use_browser = 0; 1696 1697 if (sort_order && strstr(sort_order, "ipc")) { 1698 parse_options_usage(report_usage, options, "s", 1); 1699 goto error; 1700 } 1701 1702 if (sort_order && strstr(sort_order, "symbol")) { 1703 if (sort__mode == SORT_MODE__BRANCH) { 1704 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s", 1705 sort_order, "ipc_lbr"); 1706 report.symbol_ipc = true; 1707 } else { 1708 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s", 1709 sort_order, "ipc_null"); 1710 } 1711 1712 sort_order = sort_tmp; 1713 } 1714 1715 if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) && 1716 (setup_sorting(session->evlist) < 0)) { 1717 if (sort_order) 1718 parse_options_usage(report_usage, options, "s", 1); 1719 if (field_order) 1720 parse_options_usage(sort_order ? NULL : report_usage, 1721 options, "F", 1); 1722 goto error; 1723 } 1724 1725 if ((report.header || report.header_only) && !quiet) { 1726 perf_session__fprintf_info(session, stdout, 1727 report.show_full_info); 1728 if (report.header_only) { 1729 if (data.is_pipe) { 1730 /* 1731 * we need to process first few records 1732 * which contains PERF_RECORD_HEADER_FEATURE. 1733 */ 1734 perf_session__process_events(session); 1735 } 1736 ret = 0; 1737 goto error; 1738 } 1739 } else if (use_browser == 0 && !quiet && 1740 !report.stats_mode && !report.tasks_mode) { 1741 fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n", 1742 stdout); 1743 } 1744 1745 /* 1746 * Only in the TUI browser we are doing integrated annotation, 1747 * so don't allocate extra space that won't be used in the stdio 1748 * implementation. 1749 */ 1750 if (ui__has_annotation() || report.symbol_ipc || report.data_type || 1751 report.total_cycles_mode) { 1752 ret = symbol__annotation_init(); 1753 if (ret < 0) 1754 goto error; 1755 /* 1756 * For searching by name on the "Browse map details". 1757 * providing it only in verbose mode not to bloat too 1758 * much struct symbol. 1759 */ 1760 if (verbose > 0) { 1761 /* 1762 * XXX: Need to provide a less kludgy way to ask for 1763 * more space per symbol, the u32 is for the index on 1764 * the ui browser. 1765 * See symbol__browser_index. 1766 */ 1767 symbol_conf.priv_size += sizeof(u32); 1768 } 1769 annotation_config__init(); 1770 } 1771 1772 if (symbol__init(&session->header.env) < 0) 1773 goto error; 1774 1775 if (report.time_str) { 1776 ret = perf_time__parse_for_ranges(report.time_str, session, 1777 &report.ptime_range, 1778 &report.range_size, 1779 &report.range_num); 1780 if (ret < 0) 1781 goto error; 1782 1783 itrace_synth_opts__set_time_range(&itrace_synth_opts, 1784 report.ptime_range, 1785 report.range_num); 1786 } 1787 1788 #ifdef HAVE_LIBTRACEEVENT 1789 if (session->tevent.pevent && 1790 tep_set_function_resolver(session->tevent.pevent, 1791 machine__resolve_kernel_addr, 1792 &session->machines.host) < 0) { 1793 pr_err("%s: failed to set libtraceevent function resolver\n", 1794 __func__); 1795 return -1; 1796 } 1797 #endif 1798 sort__setup_elide(stdout); 1799 1800 ret = __cmd_report(&report); 1801 if (ret == K_SWITCH_INPUT_DATA || ret == K_RELOAD) { 1802 perf_session__delete(session); 1803 last_key = K_SWITCH_INPUT_DATA; 1804 goto repeat; 1805 } else 1806 ret = 0; 1807 1808 if (!use_browser && (verbose > 2 || debug_kmaps)) 1809 perf_session__dump_kmaps(session); 1810 error: 1811 if (report.ptime_range) { 1812 itrace_synth_opts__clear_time_range(&itrace_synth_opts); 1813 zfree(&report.ptime_range); 1814 } 1815 1816 if (report.block_reports) { 1817 block_info__free_report(report.block_reports, 1818 report.nr_block_reports); 1819 report.block_reports = NULL; 1820 } 1821 1822 zstd_fini(&(session->zstd_data)); 1823 perf_session__delete(session); 1824 exit: 1825 annotation_options__exit(); 1826 free(sort_order_help); 1827 free(field_order_help); 1828 return ret; 1829 } 1830