1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "perf.h" 45 #include "util/cgroup.h" 46 #include <subcmd/parse-options.h> 47 #include "util/parse-events.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evlist-hybrid.h" 52 #include "util/evsel.h" 53 #include "util/debug.h" 54 #include "util/color.h" 55 #include "util/stat.h" 56 #include "util/header.h" 57 #include "util/cpumap.h" 58 #include "util/thread_map.h" 59 #include "util/counts.h" 60 #include "util/topdown.h" 61 #include "util/session.h" 62 #include "util/tool.h" 63 #include "util/string2.h" 64 #include "util/metricgroup.h" 65 #include "util/synthetic-events.h" 66 #include "util/target.h" 67 #include "util/time-utils.h" 68 #include "util/top.h" 69 #include "util/affinity.h" 70 #include "util/pfm.h" 71 #include "util/bpf_counter.h" 72 #include "util/iostat.h" 73 #include "util/pmu-hybrid.h" 74 #include "asm/bug.h" 75 76 #include <linux/time64.h> 77 #include <linux/zalloc.h> 78 #include <api/fs/fs.h> 79 #include <errno.h> 80 #include <signal.h> 81 #include <stdlib.h> 82 #include <sys/prctl.h> 83 #include <inttypes.h> 84 #include <locale.h> 85 #include <math.h> 86 #include <sys/types.h> 87 #include <sys/stat.h> 88 #include <sys/wait.h> 89 #include <unistd.h> 90 #include <sys/time.h> 91 #include <sys/resource.h> 92 #include <linux/err.h> 93 94 #include <linux/ctype.h> 95 #include <perf/evlist.h> 96 97 #define DEFAULT_SEPARATOR " " 98 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 99 100 static void print_counters(struct timespec *ts, int argc, const char **argv); 101 102 /* Default events used for perf stat -T */ 103 static const char *transaction_attrs = { 104 "task-clock," 105 "{" 106 "instructions," 107 "cycles," 108 "cpu/cycles-t/," 109 "cpu/tx-start/," 110 "cpu/el-start/," 111 "cpu/cycles-ct/" 112 "}" 113 }; 114 115 /* More limited version when the CPU does not have all events. */ 116 static const char * transaction_limited_attrs = { 117 "task-clock," 118 "{" 119 "instructions," 120 "cycles," 121 "cpu/cycles-t/," 122 "cpu/tx-start/" 123 "}" 124 }; 125 126 static const char * topdown_attrs[] = { 127 "topdown-total-slots", 128 "topdown-slots-retired", 129 "topdown-recovery-bubbles", 130 "topdown-fetch-bubbles", 131 "topdown-slots-issued", 132 NULL, 133 }; 134 135 static const char *topdown_metric_attrs[] = { 136 "slots", 137 "topdown-retiring", 138 "topdown-bad-spec", 139 "topdown-fe-bound", 140 "topdown-be-bound", 141 NULL, 142 }; 143 144 static const char *topdown_metric_L2_attrs[] = { 145 "slots", 146 "topdown-retiring", 147 "topdown-bad-spec", 148 "topdown-fe-bound", 149 "topdown-be-bound", 150 "topdown-heavy-ops", 151 "topdown-br-mispredict", 152 "topdown-fetch-lat", 153 "topdown-mem-bound", 154 NULL, 155 }; 156 157 #define TOPDOWN_MAX_LEVEL 2 158 159 static const char *smi_cost_attrs = { 160 "{" 161 "msr/aperf/," 162 "msr/smi/," 163 "cycles" 164 "}" 165 }; 166 167 static struct evlist *evsel_list; 168 static bool all_counters_use_bpf = true; 169 170 static struct target target = { 171 .uid = UINT_MAX, 172 }; 173 174 #define METRIC_ONLY_LEN 20 175 176 static volatile pid_t child_pid = -1; 177 static int detailed_run = 0; 178 static bool transaction_run; 179 static bool topdown_run = false; 180 static bool smi_cost = false; 181 static bool smi_reset = false; 182 static int big_num_opt = -1; 183 static bool group = false; 184 static const char *pre_cmd = NULL; 185 static const char *post_cmd = NULL; 186 static bool sync_run = false; 187 static bool forever = false; 188 static bool force_metric_only = false; 189 static struct timespec ref_time; 190 static bool append_file; 191 static bool interval_count; 192 static const char *output_name; 193 static int output_fd; 194 195 struct perf_stat { 196 bool record; 197 struct perf_data data; 198 struct perf_session *session; 199 u64 bytes_written; 200 struct perf_tool tool; 201 bool maps_allocated; 202 struct perf_cpu_map *cpus; 203 struct perf_thread_map *threads; 204 enum aggr_mode aggr_mode; 205 }; 206 207 static struct perf_stat perf_stat; 208 #define STAT_RECORD perf_stat.record 209 210 static volatile int done = 0; 211 212 static struct perf_stat_config stat_config = { 213 .aggr_mode = AGGR_GLOBAL, 214 .scale = true, 215 .unit_width = 4, /* strlen("unit") */ 216 .run_count = 1, 217 .metric_only_len = METRIC_ONLY_LEN, 218 .walltime_nsecs_stats = &walltime_nsecs_stats, 219 .ru_stats = &ru_stats, 220 .big_num = true, 221 .ctl_fd = -1, 222 .ctl_fd_ack = -1, 223 .iostat_run = false, 224 }; 225 226 static bool cpus_map_matched(struct evsel *a, struct evsel *b) 227 { 228 if (!a->core.cpus && !b->core.cpus) 229 return true; 230 231 if (!a->core.cpus || !b->core.cpus) 232 return false; 233 234 if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) 235 return false; 236 237 for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { 238 if (perf_cpu_map__cpu(a->core.cpus, i).cpu != 239 perf_cpu_map__cpu(b->core.cpus, i).cpu) 240 return false; 241 } 242 243 return true; 244 } 245 246 static void evlist__check_cpu_maps(struct evlist *evlist) 247 { 248 struct evsel *evsel, *pos, *leader; 249 char buf[1024]; 250 251 if (evlist__has_hybrid(evlist)) 252 evlist__warn_hybrid_group(evlist); 253 254 evlist__for_each_entry(evlist, evsel) { 255 leader = evsel__leader(evsel); 256 257 /* Check that leader matches cpus with each member. */ 258 if (leader == evsel) 259 continue; 260 if (cpus_map_matched(leader, evsel)) 261 continue; 262 263 /* If there's mismatch disable the group and warn user. */ 264 WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n"); 265 evsel__group_desc(leader, buf, sizeof(buf)); 266 pr_warning(" %s\n", buf); 267 268 if (verbose) { 269 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 270 pr_warning(" %s: %s\n", leader->name, buf); 271 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 272 pr_warning(" %s: %s\n", evsel->name, buf); 273 } 274 275 for_each_group_evsel(pos, leader) 276 evsel__remove_from_group(pos, leader); 277 } 278 } 279 280 static inline void diff_timespec(struct timespec *r, struct timespec *a, 281 struct timespec *b) 282 { 283 r->tv_sec = a->tv_sec - b->tv_sec; 284 if (a->tv_nsec < b->tv_nsec) { 285 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 286 r->tv_sec--; 287 } else { 288 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 289 } 290 } 291 292 static void perf_stat__reset_stats(void) 293 { 294 int i; 295 296 evlist__reset_stats(evsel_list); 297 perf_stat__reset_shadow_stats(); 298 299 for (i = 0; i < stat_config.stats_num; i++) 300 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); 301 } 302 303 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 304 union perf_event *event, 305 struct perf_sample *sample __maybe_unused, 306 struct machine *machine __maybe_unused) 307 { 308 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 309 pr_err("failed to write perf data, error: %m\n"); 310 return -1; 311 } 312 313 perf_stat.bytes_written += event->header.size; 314 return 0; 315 } 316 317 static int write_stat_round_event(u64 tm, u64 type) 318 { 319 return perf_event__synthesize_stat_round(NULL, tm, type, 320 process_synthesized_event, 321 NULL); 322 } 323 324 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 325 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 326 327 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 328 329 static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, 330 struct perf_counts_values *count) 331 { 332 struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); 333 struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); 334 335 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 336 process_synthesized_event, NULL); 337 } 338 339 static int read_single_counter(struct evsel *counter, int cpu_map_idx, 340 int thread, struct timespec *rs) 341 { 342 switch(counter->tool_event) { 343 case PERF_TOOL_DURATION_TIME: { 344 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 345 struct perf_counts_values *count = 346 perf_counts(counter->counts, cpu_map_idx, thread); 347 count->ena = count->run = val; 348 count->val = val; 349 return 0; 350 } 351 case PERF_TOOL_USER_TIME: 352 case PERF_TOOL_SYSTEM_TIME: { 353 u64 val; 354 struct perf_counts_values *count = 355 perf_counts(counter->counts, cpu_map_idx, thread); 356 if (counter->tool_event == PERF_TOOL_USER_TIME) 357 val = ru_stats.ru_utime_usec_stat.mean; 358 else 359 val = ru_stats.ru_stime_usec_stat.mean; 360 count->ena = count->run = val; 361 count->val = val; 362 return 0; 363 } 364 default: 365 case PERF_TOOL_NONE: 366 return evsel__read_counter(counter, cpu_map_idx, thread); 367 case PERF_TOOL_MAX: 368 /* This should never be reached */ 369 return 0; 370 } 371 } 372 373 /* 374 * Read out the results of a single counter: 375 * do not aggregate counts across CPUs in system-wide mode 376 */ 377 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) 378 { 379 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 380 int thread; 381 382 if (!counter->supported) 383 return -ENOENT; 384 385 for (thread = 0; thread < nthreads; thread++) { 386 struct perf_counts_values *count; 387 388 count = perf_counts(counter->counts, cpu_map_idx, thread); 389 390 /* 391 * The leader's group read loads data into its group members 392 * (via evsel__read_counter()) and sets their count->loaded. 393 */ 394 if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && 395 read_single_counter(counter, cpu_map_idx, thread, rs)) { 396 counter->counts->scaled = -1; 397 perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; 398 perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; 399 return -1; 400 } 401 402 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); 403 404 if (STAT_RECORD) { 405 if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { 406 pr_err("failed to write stat event\n"); 407 return -1; 408 } 409 } 410 411 if (verbose > 1) { 412 fprintf(stat_config.output, 413 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 414 evsel__name(counter), 415 perf_cpu_map__cpu(evsel__cpus(counter), 416 cpu_map_idx).cpu, 417 count->val, count->ena, count->run); 418 } 419 } 420 421 return 0; 422 } 423 424 static int read_affinity_counters(struct timespec *rs) 425 { 426 struct evlist_cpu_iterator evlist_cpu_itr; 427 struct affinity saved_affinity, *affinity; 428 429 if (all_counters_use_bpf) 430 return 0; 431 432 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 433 affinity = NULL; 434 else if (affinity__setup(&saved_affinity) < 0) 435 return -1; 436 else 437 affinity = &saved_affinity; 438 439 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 440 struct evsel *counter = evlist_cpu_itr.evsel; 441 442 if (evsel__is_bpf(counter)) 443 continue; 444 445 if (!counter->err) { 446 counter->err = read_counter_cpu(counter, rs, 447 evlist_cpu_itr.cpu_map_idx); 448 } 449 } 450 if (affinity) 451 affinity__cleanup(&saved_affinity); 452 453 return 0; 454 } 455 456 static int read_bpf_map_counters(void) 457 { 458 struct evsel *counter; 459 int err; 460 461 evlist__for_each_entry(evsel_list, counter) { 462 if (!evsel__is_bpf(counter)) 463 continue; 464 465 err = bpf_counter__read(counter); 466 if (err) 467 return err; 468 } 469 return 0; 470 } 471 472 static void read_counters(struct timespec *rs) 473 { 474 struct evsel *counter; 475 476 if (!stat_config.stop_read_counter) { 477 if (read_bpf_map_counters() || 478 read_affinity_counters(rs)) 479 return; 480 } 481 482 evlist__for_each_entry(evsel_list, counter) { 483 if (counter->err) 484 pr_debug("failed to read counter %s\n", counter->name); 485 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 486 pr_warning("failed to process counter %s\n", counter->name); 487 counter->err = 0; 488 } 489 } 490 491 static int runtime_stat_new(struct perf_stat_config *config, int nthreads) 492 { 493 int i; 494 495 config->stats = calloc(nthreads, sizeof(struct runtime_stat)); 496 if (!config->stats) 497 return -1; 498 499 config->stats_num = nthreads; 500 501 for (i = 0; i < nthreads; i++) 502 runtime_stat__init(&config->stats[i]); 503 504 return 0; 505 } 506 507 static void runtime_stat_delete(struct perf_stat_config *config) 508 { 509 int i; 510 511 if (!config->stats) 512 return; 513 514 for (i = 0; i < config->stats_num; i++) 515 runtime_stat__exit(&config->stats[i]); 516 517 zfree(&config->stats); 518 } 519 520 static void runtime_stat_reset(struct perf_stat_config *config) 521 { 522 int i; 523 524 if (!config->stats) 525 return; 526 527 for (i = 0; i < config->stats_num; i++) 528 perf_stat__reset_shadow_per_stat(&config->stats[i]); 529 } 530 531 static void process_interval(void) 532 { 533 struct timespec ts, rs; 534 535 clock_gettime(CLOCK_MONOTONIC, &ts); 536 diff_timespec(&rs, &ts, &ref_time); 537 538 perf_stat__reset_shadow_per_stat(&rt_stat); 539 runtime_stat_reset(&stat_config); 540 read_counters(&rs); 541 542 if (STAT_RECORD) { 543 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 544 pr_err("failed to write stat round event\n"); 545 } 546 547 init_stats(&walltime_nsecs_stats); 548 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 549 print_counters(&rs, 0, NULL); 550 } 551 552 static bool handle_interval(unsigned int interval, int *times) 553 { 554 if (interval) { 555 process_interval(); 556 if (interval_count && !(--(*times))) 557 return true; 558 } 559 return false; 560 } 561 562 static int enable_counters(void) 563 { 564 struct evsel *evsel; 565 int err; 566 567 evlist__for_each_entry(evsel_list, evsel) { 568 if (!evsel__is_bpf(evsel)) 569 continue; 570 571 err = bpf_counter__enable(evsel); 572 if (err) 573 return err; 574 } 575 576 if (stat_config.initial_delay < 0) { 577 pr_info(EVLIST_DISABLED_MSG); 578 return 0; 579 } 580 581 if (stat_config.initial_delay > 0) { 582 pr_info(EVLIST_DISABLED_MSG); 583 usleep(stat_config.initial_delay * USEC_PER_MSEC); 584 } 585 586 /* 587 * We need to enable counters only if: 588 * - we don't have tracee (attaching to task or cpu) 589 * - we have initial delay configured 590 */ 591 if (!target__none(&target) || stat_config.initial_delay) { 592 if (!all_counters_use_bpf) 593 evlist__enable(evsel_list); 594 if (stat_config.initial_delay > 0) 595 pr_info(EVLIST_ENABLED_MSG); 596 } 597 return 0; 598 } 599 600 static void disable_counters(void) 601 { 602 struct evsel *counter; 603 604 /* 605 * If we don't have tracee (attaching to task or cpu), counters may 606 * still be running. To get accurate group ratios, we must stop groups 607 * from counting before reading their constituent counters. 608 */ 609 if (!target__none(&target)) { 610 evlist__for_each_entry(evsel_list, counter) 611 bpf_counter__disable(counter); 612 if (!all_counters_use_bpf) 613 evlist__disable(evsel_list); 614 } 615 } 616 617 static volatile int workload_exec_errno; 618 619 /* 620 * evlist__prepare_workload will send a SIGUSR1 621 * if the fork fails, since we asked by setting its 622 * want_signal to true. 623 */ 624 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 625 void *ucontext __maybe_unused) 626 { 627 workload_exec_errno = info->si_value.sival_int; 628 } 629 630 static bool evsel__should_store_id(struct evsel *counter) 631 { 632 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 633 } 634 635 static bool is_target_alive(struct target *_target, 636 struct perf_thread_map *threads) 637 { 638 struct stat st; 639 int i; 640 641 if (!target__has_task(_target)) 642 return true; 643 644 for (i = 0; i < threads->nr; i++) { 645 char path[PATH_MAX]; 646 647 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 648 threads->map[i].pid); 649 650 if (!stat(path, &st)) 651 return true; 652 } 653 654 return false; 655 } 656 657 static void process_evlist(struct evlist *evlist, unsigned int interval) 658 { 659 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 660 661 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 662 switch (cmd) { 663 case EVLIST_CTL_CMD_ENABLE: 664 if (interval) 665 process_interval(); 666 break; 667 case EVLIST_CTL_CMD_DISABLE: 668 if (interval) 669 process_interval(); 670 break; 671 case EVLIST_CTL_CMD_SNAPSHOT: 672 case EVLIST_CTL_CMD_ACK: 673 case EVLIST_CTL_CMD_UNSUPPORTED: 674 case EVLIST_CTL_CMD_EVLIST: 675 case EVLIST_CTL_CMD_STOP: 676 case EVLIST_CTL_CMD_PING: 677 default: 678 break; 679 } 680 } 681 } 682 683 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 684 int *time_to_sleep) 685 { 686 int tts = *time_to_sleep; 687 struct timespec time_diff; 688 689 diff_timespec(&time_diff, time_stop, time_start); 690 691 tts -= time_diff.tv_sec * MSEC_PER_SEC + 692 time_diff.tv_nsec / NSEC_PER_MSEC; 693 694 if (tts < 0) 695 tts = 0; 696 697 *time_to_sleep = tts; 698 } 699 700 static int dispatch_events(bool forks, int timeout, int interval, int *times) 701 { 702 int child_exited = 0, status = 0; 703 int time_to_sleep, sleep_time; 704 struct timespec time_start, time_stop; 705 706 if (interval) 707 sleep_time = interval; 708 else if (timeout) 709 sleep_time = timeout; 710 else 711 sleep_time = 1000; 712 713 time_to_sleep = sleep_time; 714 715 while (!done) { 716 if (forks) 717 child_exited = waitpid(child_pid, &status, WNOHANG); 718 else 719 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 720 721 if (child_exited) 722 break; 723 724 clock_gettime(CLOCK_MONOTONIC, &time_start); 725 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 726 if (timeout || handle_interval(interval, times)) 727 break; 728 time_to_sleep = sleep_time; 729 } else { /* fd revent */ 730 process_evlist(evsel_list, interval); 731 clock_gettime(CLOCK_MONOTONIC, &time_stop); 732 compute_tts(&time_start, &time_stop, &time_to_sleep); 733 } 734 } 735 736 return status; 737 } 738 739 enum counter_recovery { 740 COUNTER_SKIP, 741 COUNTER_RETRY, 742 COUNTER_FATAL, 743 }; 744 745 static enum counter_recovery stat_handle_error(struct evsel *counter) 746 { 747 char msg[BUFSIZ]; 748 /* 749 * PPC returns ENXIO for HW counters until 2.6.37 750 * (behavior changed with commit b0a873e). 751 */ 752 if (errno == EINVAL || errno == ENOSYS || 753 errno == ENOENT || errno == EOPNOTSUPP || 754 errno == ENXIO) { 755 if (verbose > 0) 756 ui__warning("%s event is not supported by the kernel.\n", 757 evsel__name(counter)); 758 counter->supported = false; 759 /* 760 * errored is a sticky flag that means one of the counter's 761 * cpu event had a problem and needs to be reexamined. 762 */ 763 counter->errored = true; 764 765 if ((evsel__leader(counter) != counter) || 766 !(counter->core.leader->nr_members > 1)) 767 return COUNTER_SKIP; 768 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 769 if (verbose > 0) 770 ui__warning("%s\n", msg); 771 return COUNTER_RETRY; 772 } else if (target__has_per_thread(&target) && 773 evsel_list->core.threads && 774 evsel_list->core.threads->err_thread != -1) { 775 /* 776 * For global --per-thread case, skip current 777 * error thread. 778 */ 779 if (!thread_map__remove(evsel_list->core.threads, 780 evsel_list->core.threads->err_thread)) { 781 evsel_list->core.threads->err_thread = -1; 782 return COUNTER_RETRY; 783 } 784 } 785 786 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 787 ui__error("%s\n", msg); 788 789 if (child_pid != -1) 790 kill(child_pid, SIGTERM); 791 return COUNTER_FATAL; 792 } 793 794 static int __run_perf_stat(int argc, const char **argv, int run_idx) 795 { 796 int interval = stat_config.interval; 797 int times = stat_config.times; 798 int timeout = stat_config.timeout; 799 char msg[BUFSIZ]; 800 unsigned long long t0, t1; 801 struct evsel *counter; 802 size_t l; 803 int status = 0; 804 const bool forks = (argc > 0); 805 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 806 struct evlist_cpu_iterator evlist_cpu_itr; 807 struct affinity saved_affinity, *affinity = NULL; 808 int err; 809 bool second_pass = false; 810 811 if (forks) { 812 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 813 perror("failed to prepare workload"); 814 return -1; 815 } 816 child_pid = evsel_list->workload.pid; 817 } 818 819 if (group) 820 evlist__set_leader(evsel_list); 821 822 if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { 823 if (affinity__setup(&saved_affinity) < 0) 824 return -1; 825 affinity = &saved_affinity; 826 } 827 828 evlist__for_each_entry(evsel_list, counter) { 829 if (bpf_counter__load(counter, &target)) 830 return -1; 831 if (!evsel__is_bpf(counter)) 832 all_counters_use_bpf = false; 833 } 834 835 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 836 counter = evlist_cpu_itr.evsel; 837 838 /* 839 * bperf calls evsel__open_per_cpu() in bperf__load(), so 840 * no need to call it again here. 841 */ 842 if (target.use_bpf) 843 break; 844 845 if (counter->reset_group || counter->errored) 846 continue; 847 if (evsel__is_bpf(counter)) 848 continue; 849 try_again: 850 if (create_perf_stat_counter(counter, &stat_config, &target, 851 evlist_cpu_itr.cpu_map_idx) < 0) { 852 853 /* 854 * Weak group failed. We cannot just undo this here 855 * because earlier CPUs might be in group mode, and the kernel 856 * doesn't support mixing group and non group reads. Defer 857 * it to later. 858 * Don't close here because we're in the wrong affinity. 859 */ 860 if ((errno == EINVAL || errno == EBADF) && 861 evsel__leader(counter) != counter && 862 counter->weak_group) { 863 evlist__reset_weak_group(evsel_list, counter, false); 864 assert(counter->reset_group); 865 second_pass = true; 866 continue; 867 } 868 869 switch (stat_handle_error(counter)) { 870 case COUNTER_FATAL: 871 return -1; 872 case COUNTER_RETRY: 873 goto try_again; 874 case COUNTER_SKIP: 875 continue; 876 default: 877 break; 878 } 879 880 } 881 counter->supported = true; 882 } 883 884 if (second_pass) { 885 /* 886 * Now redo all the weak group after closing them, 887 * and also close errored counters. 888 */ 889 890 /* First close errored or weak retry */ 891 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 892 counter = evlist_cpu_itr.evsel; 893 894 if (!counter->reset_group && !counter->errored) 895 continue; 896 897 perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); 898 } 899 /* Now reopen weak */ 900 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 901 counter = evlist_cpu_itr.evsel; 902 903 if (!counter->reset_group && !counter->errored) 904 continue; 905 if (!counter->reset_group) 906 continue; 907 try_again_reset: 908 pr_debug2("reopening weak %s\n", evsel__name(counter)); 909 if (create_perf_stat_counter(counter, &stat_config, &target, 910 evlist_cpu_itr.cpu_map_idx) < 0) { 911 912 switch (stat_handle_error(counter)) { 913 case COUNTER_FATAL: 914 return -1; 915 case COUNTER_RETRY: 916 goto try_again_reset; 917 case COUNTER_SKIP: 918 continue; 919 default: 920 break; 921 } 922 } 923 counter->supported = true; 924 } 925 } 926 affinity__cleanup(affinity); 927 928 evlist__for_each_entry(evsel_list, counter) { 929 if (!counter->supported) { 930 perf_evsel__free_fd(&counter->core); 931 continue; 932 } 933 934 l = strlen(counter->unit); 935 if (l > stat_config.unit_width) 936 stat_config.unit_width = l; 937 938 if (evsel__should_store_id(counter) && 939 evsel__store_ids(counter, evsel_list)) 940 return -1; 941 } 942 943 if (evlist__apply_filters(evsel_list, &counter)) { 944 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 945 counter->filter, evsel__name(counter), errno, 946 str_error_r(errno, msg, sizeof(msg))); 947 return -1; 948 } 949 950 if (STAT_RECORD) { 951 int fd = perf_data__fd(&perf_stat.data); 952 953 if (is_pipe) { 954 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 955 } else { 956 err = perf_session__write_header(perf_stat.session, evsel_list, 957 fd, false); 958 } 959 960 if (err < 0) 961 return err; 962 963 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 964 process_synthesized_event, is_pipe); 965 if (err < 0) 966 return err; 967 } 968 969 err = enable_counters(); 970 if (err) 971 return -1; 972 973 /* Exec the command, if any */ 974 if (forks) 975 evlist__start_workload(evsel_list); 976 977 t0 = rdclock(); 978 clock_gettime(CLOCK_MONOTONIC, &ref_time); 979 980 if (forks) { 981 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 982 status = dispatch_events(forks, timeout, interval, ×); 983 if (child_pid != -1) { 984 if (timeout) 985 kill(child_pid, SIGTERM); 986 wait4(child_pid, &status, 0, &stat_config.ru_data); 987 } 988 989 if (workload_exec_errno) { 990 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 991 pr_err("Workload failed: %s\n", emsg); 992 return -1; 993 } 994 995 if (WIFSIGNALED(status)) 996 psignal(WTERMSIG(status), argv[0]); 997 } else { 998 status = dispatch_events(forks, timeout, interval, ×); 999 } 1000 1001 disable_counters(); 1002 1003 t1 = rdclock(); 1004 1005 if (stat_config.walltime_run_table) 1006 stat_config.walltime_run[run_idx] = t1 - t0; 1007 1008 if (interval && stat_config.summary) { 1009 stat_config.interval = 0; 1010 stat_config.stop_read_counter = true; 1011 init_stats(&walltime_nsecs_stats); 1012 update_stats(&walltime_nsecs_stats, t1 - t0); 1013 1014 if (stat_config.aggr_mode == AGGR_GLOBAL) 1015 evlist__save_aggr_prev_raw_counts(evsel_list); 1016 1017 evlist__copy_prev_raw_counts(evsel_list); 1018 evlist__reset_prev_raw_counts(evsel_list); 1019 runtime_stat_reset(&stat_config); 1020 perf_stat__reset_shadow_per_stat(&rt_stat); 1021 } else { 1022 update_stats(&walltime_nsecs_stats, t1 - t0); 1023 update_rusage_stats(&ru_stats, &stat_config.ru_data); 1024 } 1025 1026 /* 1027 * Closing a group leader splits the group, and as we only disable 1028 * group leaders, results in remaining events becoming enabled. To 1029 * avoid arbitrary skew, we must read all counters before closing any 1030 * group leaders. 1031 */ 1032 read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); 1033 1034 /* 1035 * We need to keep evsel_list alive, because it's processed 1036 * later the evsel_list will be closed after. 1037 */ 1038 if (!STAT_RECORD) 1039 evlist__close(evsel_list); 1040 1041 return WEXITSTATUS(status); 1042 } 1043 1044 static int run_perf_stat(int argc, const char **argv, int run_idx) 1045 { 1046 int ret; 1047 1048 if (pre_cmd) { 1049 ret = system(pre_cmd); 1050 if (ret) 1051 return ret; 1052 } 1053 1054 if (sync_run) 1055 sync(); 1056 1057 ret = __run_perf_stat(argc, argv, run_idx); 1058 if (ret) 1059 return ret; 1060 1061 if (post_cmd) { 1062 ret = system(post_cmd); 1063 if (ret) 1064 return ret; 1065 } 1066 1067 return ret; 1068 } 1069 1070 static void print_counters(struct timespec *ts, int argc, const char **argv) 1071 { 1072 /* Do not print anything if we record to the pipe. */ 1073 if (STAT_RECORD && perf_stat.data.is_pipe) 1074 return; 1075 if (stat_config.quiet) 1076 return; 1077 1078 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 1079 } 1080 1081 static volatile int signr = -1; 1082 1083 static void skip_signal(int signo) 1084 { 1085 if ((child_pid == -1) || stat_config.interval) 1086 done = 1; 1087 1088 signr = signo; 1089 /* 1090 * render child_pid harmless 1091 * won't send SIGTERM to a random 1092 * process in case of race condition 1093 * and fast PID recycling 1094 */ 1095 child_pid = -1; 1096 } 1097 1098 static void sig_atexit(void) 1099 { 1100 sigset_t set, oset; 1101 1102 /* 1103 * avoid race condition with SIGCHLD handler 1104 * in skip_signal() which is modifying child_pid 1105 * goal is to avoid send SIGTERM to a random 1106 * process 1107 */ 1108 sigemptyset(&set); 1109 sigaddset(&set, SIGCHLD); 1110 sigprocmask(SIG_BLOCK, &set, &oset); 1111 1112 if (child_pid != -1) 1113 kill(child_pid, SIGTERM); 1114 1115 sigprocmask(SIG_SETMASK, &oset, NULL); 1116 1117 if (signr == -1) 1118 return; 1119 1120 signal(signr, SIG_DFL); 1121 kill(getpid(), signr); 1122 } 1123 1124 void perf_stat__set_big_num(int set) 1125 { 1126 stat_config.big_num = (set != 0); 1127 } 1128 1129 void perf_stat__set_no_csv_summary(int set) 1130 { 1131 stat_config.no_csv_summary = (set != 0); 1132 } 1133 1134 static int stat__set_big_num(const struct option *opt __maybe_unused, 1135 const char *s __maybe_unused, int unset) 1136 { 1137 big_num_opt = unset ? 0 : 1; 1138 perf_stat__set_big_num(!unset); 1139 return 0; 1140 } 1141 1142 static int enable_metric_only(const struct option *opt __maybe_unused, 1143 const char *s __maybe_unused, int unset) 1144 { 1145 force_metric_only = true; 1146 stat_config.metric_only = !unset; 1147 return 0; 1148 } 1149 1150 static int parse_metric_groups(const struct option *opt, 1151 const char *str, 1152 int unset __maybe_unused) 1153 { 1154 return metricgroup__parse_groups(opt, str, 1155 stat_config.metric_no_group, 1156 stat_config.metric_no_merge, 1157 &stat_config.metric_events); 1158 } 1159 1160 static int parse_control_option(const struct option *opt, 1161 const char *str, 1162 int unset __maybe_unused) 1163 { 1164 struct perf_stat_config *config = opt->value; 1165 1166 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1167 } 1168 1169 static int parse_stat_cgroups(const struct option *opt, 1170 const char *str, int unset) 1171 { 1172 if (stat_config.cgroup_list) { 1173 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1174 return -1; 1175 } 1176 1177 return parse_cgroups(opt, str, unset); 1178 } 1179 1180 static int parse_hybrid_type(const struct option *opt, 1181 const char *str, 1182 int unset __maybe_unused) 1183 { 1184 struct evlist *evlist = *(struct evlist **)opt->value; 1185 1186 if (!list_empty(&evlist->core.entries)) { 1187 fprintf(stderr, "Must define cputype before events/metrics\n"); 1188 return -1; 1189 } 1190 1191 evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu(str); 1192 if (!evlist->hybrid_pmu_name) { 1193 fprintf(stderr, "--cputype %s is not supported!\n", str); 1194 return -1; 1195 } 1196 1197 return 0; 1198 } 1199 1200 static struct option stat_options[] = { 1201 OPT_BOOLEAN('T', "transaction", &transaction_run, 1202 "hardware transaction statistics"), 1203 OPT_CALLBACK('e', "event", &evsel_list, "event", 1204 "event selector. use 'perf list' to list available events", 1205 parse_events_option), 1206 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1207 "event filter", parse_filter), 1208 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1209 "child tasks do not inherit counters"), 1210 OPT_STRING('p', "pid", &target.pid, "pid", 1211 "stat events on existing process id"), 1212 OPT_STRING('t', "tid", &target.tid, "tid", 1213 "stat events on existing thread id"), 1214 #ifdef HAVE_BPF_SKEL 1215 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 1216 "stat events on existing bpf program id"), 1217 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, 1218 "use bpf program to count events"), 1219 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", 1220 "path to perf_event_attr map"), 1221 #endif 1222 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1223 "system-wide collection from all CPUs"), 1224 OPT_BOOLEAN('g', "group", &group, 1225 "put the counters into a counter group"), 1226 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1227 "Use --no-scale to disable counter scaling for multiplexing"), 1228 OPT_INCR('v', "verbose", &verbose, 1229 "be more verbose (show counter open errors, etc)"), 1230 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1231 "repeat command and print average + stddev (max: 100, forever: 0)"), 1232 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1233 "display details about each run (only with -r option)"), 1234 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1235 "null run - dont start any counters"), 1236 OPT_INCR('d', "detailed", &detailed_run, 1237 "detailed run - start a lot of events"), 1238 OPT_BOOLEAN('S', "sync", &sync_run, 1239 "call sync() before starting a run"), 1240 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1241 "print large numbers with thousands\' separators", 1242 stat__set_big_num), 1243 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1244 "list of cpus to monitor in system-wide"), 1245 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1246 "disable CPU count aggregation", AGGR_NONE), 1247 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 1248 OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge, 1249 "Merge identical named hybrid events"), 1250 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1251 "print counts with custom separator"), 1252 OPT_BOOLEAN('j', "json-output", &stat_config.json_output, 1253 "print counts in JSON format"), 1254 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1255 "monitor event in cgroup name only", parse_stat_cgroups), 1256 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 1257 "expand events for each cgroup"), 1258 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1259 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1260 OPT_INTEGER(0, "log-fd", &output_fd, 1261 "log output to fd, instead of stderr"), 1262 OPT_STRING(0, "pre", &pre_cmd, "command", 1263 "command to run prior to the measured command"), 1264 OPT_STRING(0, "post", &post_cmd, "command", 1265 "command to run after to the measured command"), 1266 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1267 "print counts at regular interval in ms " 1268 "(overhead is possible for values <= 100ms)"), 1269 OPT_INTEGER(0, "interval-count", &stat_config.times, 1270 "print counts for fixed number of times"), 1271 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1272 "clear screen in between new interval"), 1273 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1274 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1275 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1276 "aggregate counts per processor socket", AGGR_SOCKET), 1277 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1278 "aggregate counts per processor die", AGGR_DIE), 1279 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1280 "aggregate counts per physical processor core", AGGR_CORE), 1281 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1282 "aggregate counts per thread", AGGR_THREAD), 1283 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1284 "aggregate counts per numa node", AGGR_NODE), 1285 OPT_INTEGER('D', "delay", &stat_config.initial_delay, 1286 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1287 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1288 "Only print computed metrics. No raw values", enable_metric_only), 1289 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1290 "don't group metric events, impacts multiplexing"), 1291 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1292 "don't try to share events between metrics in a group"), 1293 OPT_BOOLEAN(0, "topdown", &topdown_run, 1294 "measure top-down statistics"), 1295 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 1296 "Set the metrics level for the top-down statistics (0: max level)"), 1297 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1298 "measure SMI cost"), 1299 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1300 "monitor specified metrics or metric groups (separated by ,)", 1301 parse_metric_groups), 1302 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1303 "Configure all used events to run in kernel space.", 1304 PARSE_OPT_EXCLUSIVE), 1305 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1306 "Configure all used events to run in user space.", 1307 PARSE_OPT_EXCLUSIVE), 1308 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1309 "Use with 'percore' event qualifier to show the event " 1310 "counts of one hardware thread by sum up total hardware " 1311 "threads of same physical core"), 1312 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1313 "print summary for interval mode"), 1314 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, 1315 "don't print 'summary' for CSV summary output"), 1316 OPT_BOOLEAN(0, "quiet", &stat_config.quiet, 1317 "don't print output (useful with record)"), 1318 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", 1319 "Only enable events on applying cpu with this type " 1320 "for hybrid platform (e.g. core or atom)", 1321 parse_hybrid_type), 1322 #ifdef HAVE_LIBPFM 1323 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1324 "libpfm4 event selector. use 'perf list' to list available events", 1325 parse_libpfm_events_option), 1326 #endif 1327 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1328 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1329 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1330 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1331 parse_control_option), 1332 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", 1333 "measure I/O performance metrics provided by arch/platform", 1334 iostat_parse), 1335 OPT_END() 1336 }; 1337 1338 static const char *const aggr_mode__string[] = { 1339 [AGGR_CORE] = "core", 1340 [AGGR_DIE] = "die", 1341 [AGGR_GLOBAL] = "global", 1342 [AGGR_NODE] = "node", 1343 [AGGR_NONE] = "none", 1344 [AGGR_SOCKET] = "socket", 1345 [AGGR_THREAD] = "thread", 1346 [AGGR_UNSET] = "unset", 1347 }; 1348 1349 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1350 struct perf_cpu cpu) 1351 { 1352 return aggr_cpu_id__socket(cpu, /*data=*/NULL); 1353 } 1354 1355 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1356 struct perf_cpu cpu) 1357 { 1358 return aggr_cpu_id__die(cpu, /*data=*/NULL); 1359 } 1360 1361 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1362 struct perf_cpu cpu) 1363 { 1364 return aggr_cpu_id__core(cpu, /*data=*/NULL); 1365 } 1366 1367 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1368 struct perf_cpu cpu) 1369 { 1370 return aggr_cpu_id__node(cpu, /*data=*/NULL); 1371 } 1372 1373 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1374 aggr_get_id_t get_id, struct perf_cpu cpu) 1375 { 1376 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1377 1378 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) 1379 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); 1380 1381 id = config->cpus_aggr_map->map[cpu.cpu]; 1382 return id; 1383 } 1384 1385 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1386 struct perf_cpu cpu) 1387 { 1388 return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); 1389 } 1390 1391 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1392 struct perf_cpu cpu) 1393 { 1394 return perf_stat__get_aggr(config, perf_stat__get_die, cpu); 1395 } 1396 1397 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1398 struct perf_cpu cpu) 1399 { 1400 return perf_stat__get_aggr(config, perf_stat__get_core, cpu); 1401 } 1402 1403 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1404 struct perf_cpu cpu) 1405 { 1406 return perf_stat__get_aggr(config, perf_stat__get_node, cpu); 1407 } 1408 1409 static bool term_percore_set(void) 1410 { 1411 struct evsel *counter; 1412 1413 evlist__for_each_entry(evsel_list, counter) { 1414 if (counter->percore) 1415 return true; 1416 } 1417 1418 return false; 1419 } 1420 1421 static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) 1422 { 1423 switch (aggr_mode) { 1424 case AGGR_SOCKET: 1425 return aggr_cpu_id__socket; 1426 case AGGR_DIE: 1427 return aggr_cpu_id__die; 1428 case AGGR_CORE: 1429 return aggr_cpu_id__core; 1430 case AGGR_NODE: 1431 return aggr_cpu_id__node; 1432 case AGGR_NONE: 1433 if (term_percore_set()) 1434 return aggr_cpu_id__core; 1435 1436 return NULL; 1437 case AGGR_GLOBAL: 1438 case AGGR_THREAD: 1439 case AGGR_UNSET: 1440 case AGGR_MAX: 1441 default: 1442 return NULL; 1443 } 1444 } 1445 1446 static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) 1447 { 1448 switch (aggr_mode) { 1449 case AGGR_SOCKET: 1450 return perf_stat__get_socket_cached; 1451 case AGGR_DIE: 1452 return perf_stat__get_die_cached; 1453 case AGGR_CORE: 1454 return perf_stat__get_core_cached; 1455 case AGGR_NODE: 1456 return perf_stat__get_node_cached; 1457 case AGGR_NONE: 1458 if (term_percore_set()) { 1459 return perf_stat__get_core_cached; 1460 } 1461 return NULL; 1462 case AGGR_GLOBAL: 1463 case AGGR_THREAD: 1464 case AGGR_UNSET: 1465 case AGGR_MAX: 1466 default: 1467 return NULL; 1468 } 1469 } 1470 1471 static int perf_stat_init_aggr_mode(void) 1472 { 1473 int nr; 1474 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); 1475 1476 if (get_id) { 1477 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1478 get_id, /*data=*/NULL); 1479 if (!stat_config.aggr_map) { 1480 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1481 return -1; 1482 } 1483 stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); 1484 } 1485 1486 /* 1487 * The evsel_list->cpus is the base we operate on, 1488 * taking the highest cpu number to be the size of 1489 * the aggregation translate cpumap. 1490 */ 1491 if (evsel_list->core.user_requested_cpus) 1492 nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; 1493 else 1494 nr = 0; 1495 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); 1496 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1497 } 1498 1499 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1500 { 1501 if (map) { 1502 WARN_ONCE(refcount_read(&map->refcnt) != 0, 1503 "cpu_aggr_map refcnt unbalanced\n"); 1504 free(map); 1505 } 1506 } 1507 1508 static void cpu_aggr_map__put(struct cpu_aggr_map *map) 1509 { 1510 if (map && refcount_dec_and_test(&map->refcnt)) 1511 cpu_aggr_map__delete(map); 1512 } 1513 1514 static void perf_stat__exit_aggr_mode(void) 1515 { 1516 cpu_aggr_map__put(stat_config.aggr_map); 1517 cpu_aggr_map__put(stat_config.cpus_aggr_map); 1518 stat_config.aggr_map = NULL; 1519 stat_config.cpus_aggr_map = NULL; 1520 } 1521 1522 static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) 1523 { 1524 struct perf_env *env = data; 1525 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1526 1527 if (cpu.cpu != -1) 1528 id.socket = env->cpu[cpu.cpu].socket_id; 1529 1530 return id; 1531 } 1532 1533 static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) 1534 { 1535 struct perf_env *env = data; 1536 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1537 1538 if (cpu.cpu != -1) { 1539 /* 1540 * die_id is relative to socket, so start 1541 * with the socket ID and then add die to 1542 * make a unique ID. 1543 */ 1544 id.socket = env->cpu[cpu.cpu].socket_id; 1545 id.die = env->cpu[cpu.cpu].die_id; 1546 } 1547 1548 return id; 1549 } 1550 1551 static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) 1552 { 1553 struct perf_env *env = data; 1554 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1555 1556 if (cpu.cpu != -1) { 1557 /* 1558 * core_id is relative to socket and die, 1559 * we need a global id. So we set 1560 * socket, die id and core id 1561 */ 1562 id.socket = env->cpu[cpu.cpu].socket_id; 1563 id.die = env->cpu[cpu.cpu].die_id; 1564 id.core = env->cpu[cpu.cpu].core_id; 1565 } 1566 1567 return id; 1568 } 1569 1570 static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) 1571 { 1572 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1573 1574 id.node = perf_env__numa_node(data, cpu); 1575 return id; 1576 } 1577 1578 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1579 struct perf_cpu cpu) 1580 { 1581 return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1582 } 1583 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1584 struct perf_cpu cpu) 1585 { 1586 return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1587 } 1588 1589 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1590 struct perf_cpu cpu) 1591 { 1592 return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1593 } 1594 1595 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1596 struct perf_cpu cpu) 1597 { 1598 return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1599 } 1600 1601 static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) 1602 { 1603 switch (aggr_mode) { 1604 case AGGR_SOCKET: 1605 return perf_env__get_socket_aggr_by_cpu; 1606 case AGGR_DIE: 1607 return perf_env__get_die_aggr_by_cpu; 1608 case AGGR_CORE: 1609 return perf_env__get_core_aggr_by_cpu; 1610 case AGGR_NODE: 1611 return perf_env__get_node_aggr_by_cpu; 1612 case AGGR_NONE: 1613 case AGGR_GLOBAL: 1614 case AGGR_THREAD: 1615 case AGGR_UNSET: 1616 case AGGR_MAX: 1617 default: 1618 return NULL; 1619 } 1620 } 1621 1622 static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) 1623 { 1624 switch (aggr_mode) { 1625 case AGGR_SOCKET: 1626 return perf_stat__get_socket_file; 1627 case AGGR_DIE: 1628 return perf_stat__get_die_file; 1629 case AGGR_CORE: 1630 return perf_stat__get_core_file; 1631 case AGGR_NODE: 1632 return perf_stat__get_node_file; 1633 case AGGR_NONE: 1634 case AGGR_GLOBAL: 1635 case AGGR_THREAD: 1636 case AGGR_UNSET: 1637 case AGGR_MAX: 1638 default: 1639 return NULL; 1640 } 1641 } 1642 1643 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1644 { 1645 struct perf_env *env = &st->session->header.env; 1646 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); 1647 1648 if (!get_id) 1649 return 0; 1650 1651 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, get_id, env); 1652 if (!stat_config.aggr_map) { 1653 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1654 return -1; 1655 } 1656 stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); 1657 return 0; 1658 } 1659 1660 /* 1661 * Add default attributes, if there were no attributes specified or 1662 * if -d/--detailed, -d -d or -d -d -d is used: 1663 */ 1664 static int add_default_attributes(void) 1665 { 1666 int err; 1667 struct perf_event_attr default_attrs0[] = { 1668 1669 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1670 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1671 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1672 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1673 1674 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1675 }; 1676 struct perf_event_attr frontend_attrs[] = { 1677 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1678 }; 1679 struct perf_event_attr backend_attrs[] = { 1680 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1681 }; 1682 struct perf_event_attr default_attrs1[] = { 1683 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1684 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1685 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1686 1687 }; 1688 1689 /* 1690 * Detailed stats (-d), covering the L1 and last level data caches: 1691 */ 1692 struct perf_event_attr detailed_attrs[] = { 1693 1694 { .type = PERF_TYPE_HW_CACHE, 1695 .config = 1696 PERF_COUNT_HW_CACHE_L1D << 0 | 1697 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1698 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1699 1700 { .type = PERF_TYPE_HW_CACHE, 1701 .config = 1702 PERF_COUNT_HW_CACHE_L1D << 0 | 1703 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1704 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1705 1706 { .type = PERF_TYPE_HW_CACHE, 1707 .config = 1708 PERF_COUNT_HW_CACHE_LL << 0 | 1709 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1710 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1711 1712 { .type = PERF_TYPE_HW_CACHE, 1713 .config = 1714 PERF_COUNT_HW_CACHE_LL << 0 | 1715 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1716 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1717 }; 1718 1719 /* 1720 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1721 */ 1722 struct perf_event_attr very_detailed_attrs[] = { 1723 1724 { .type = PERF_TYPE_HW_CACHE, 1725 .config = 1726 PERF_COUNT_HW_CACHE_L1I << 0 | 1727 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1728 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1729 1730 { .type = PERF_TYPE_HW_CACHE, 1731 .config = 1732 PERF_COUNT_HW_CACHE_L1I << 0 | 1733 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1734 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1735 1736 { .type = PERF_TYPE_HW_CACHE, 1737 .config = 1738 PERF_COUNT_HW_CACHE_DTLB << 0 | 1739 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1740 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1741 1742 { .type = PERF_TYPE_HW_CACHE, 1743 .config = 1744 PERF_COUNT_HW_CACHE_DTLB << 0 | 1745 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1746 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1747 1748 { .type = PERF_TYPE_HW_CACHE, 1749 .config = 1750 PERF_COUNT_HW_CACHE_ITLB << 0 | 1751 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1752 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1753 1754 { .type = PERF_TYPE_HW_CACHE, 1755 .config = 1756 PERF_COUNT_HW_CACHE_ITLB << 0 | 1757 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1758 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1759 1760 }; 1761 1762 /* 1763 * Very, very detailed stats (-d -d -d), adding prefetch events: 1764 */ 1765 struct perf_event_attr very_very_detailed_attrs[] = { 1766 1767 { .type = PERF_TYPE_HW_CACHE, 1768 .config = 1769 PERF_COUNT_HW_CACHE_L1D << 0 | 1770 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1771 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1772 1773 { .type = PERF_TYPE_HW_CACHE, 1774 .config = 1775 PERF_COUNT_HW_CACHE_L1D << 0 | 1776 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1777 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1778 }; 1779 1780 struct perf_event_attr default_null_attrs[] = {}; 1781 1782 /* Set attrs if no event is selected and !null_run: */ 1783 if (stat_config.null_run) 1784 return 0; 1785 1786 if (transaction_run) { 1787 struct parse_events_error errinfo; 1788 /* Handle -T as -M transaction. Once platform specific metrics 1789 * support has been added to the json files, all architectures 1790 * will use this approach. To determine transaction support 1791 * on an architecture test for such a metric name. 1792 */ 1793 if (metricgroup__has_metric("transaction")) { 1794 struct option opt = { .value = &evsel_list }; 1795 1796 return metricgroup__parse_groups(&opt, "transaction", 1797 stat_config.metric_no_group, 1798 stat_config.metric_no_merge, 1799 &stat_config.metric_events); 1800 } 1801 1802 parse_events_error__init(&errinfo); 1803 if (pmu_have_event("cpu", "cycles-ct") && 1804 pmu_have_event("cpu", "el-start")) 1805 err = parse_events(evsel_list, transaction_attrs, 1806 &errinfo); 1807 else 1808 err = parse_events(evsel_list, 1809 transaction_limited_attrs, 1810 &errinfo); 1811 if (err) { 1812 fprintf(stderr, "Cannot set up transaction events\n"); 1813 parse_events_error__print(&errinfo, transaction_attrs); 1814 } 1815 parse_events_error__exit(&errinfo); 1816 return err ? -1 : 0; 1817 } 1818 1819 if (smi_cost) { 1820 struct parse_events_error errinfo; 1821 int smi; 1822 1823 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1824 fprintf(stderr, "freeze_on_smi is not supported.\n"); 1825 return -1; 1826 } 1827 1828 if (!smi) { 1829 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1830 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1831 return -1; 1832 } 1833 smi_reset = true; 1834 } 1835 1836 if (!pmu_have_event("msr", "aperf") || 1837 !pmu_have_event("msr", "smi")) { 1838 fprintf(stderr, "To measure SMI cost, it needs " 1839 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); 1840 return -1; 1841 } 1842 if (!force_metric_only) 1843 stat_config.metric_only = true; 1844 1845 parse_events_error__init(&errinfo); 1846 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1847 if (err) { 1848 parse_events_error__print(&errinfo, smi_cost_attrs); 1849 fprintf(stderr, "Cannot set up SMI cost events\n"); 1850 } 1851 parse_events_error__exit(&errinfo); 1852 return err ? -1 : 0; 1853 } 1854 1855 if (topdown_run) { 1856 const char **metric_attrs = topdown_metric_attrs; 1857 unsigned int max_level = 1; 1858 char *str = NULL; 1859 bool warn = false; 1860 const char *pmu_name = arch_get_topdown_pmu_name(evsel_list, true); 1861 1862 if (!force_metric_only) 1863 stat_config.metric_only = true; 1864 1865 if (pmu_have_event(pmu_name, topdown_metric_L2_attrs[5])) { 1866 metric_attrs = topdown_metric_L2_attrs; 1867 max_level = 2; 1868 } 1869 1870 if (stat_config.topdown_level > max_level) { 1871 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 1872 return -1; 1873 } else if (!stat_config.topdown_level) 1874 stat_config.topdown_level = max_level; 1875 1876 if (topdown_filter_events(metric_attrs, &str, 1, pmu_name) < 0) { 1877 pr_err("Out of memory\n"); 1878 return -1; 1879 } 1880 1881 if (metric_attrs[0] && str) { 1882 if (!stat_config.interval && !stat_config.metric_only) { 1883 fprintf(stat_config.output, 1884 "Topdown accuracy may decrease when measuring long periods.\n" 1885 "Please print the result regularly, e.g. -I1000\n"); 1886 } 1887 goto setup_metrics; 1888 } 1889 1890 zfree(&str); 1891 1892 if (stat_config.aggr_mode != AGGR_GLOBAL && 1893 stat_config.aggr_mode != AGGR_CORE) { 1894 pr_err("top down event configuration requires --per-core mode\n"); 1895 return -1; 1896 } 1897 stat_config.aggr_mode = AGGR_CORE; 1898 if (nr_cgroups || !target__has_cpu(&target)) { 1899 pr_err("top down event configuration requires system-wide mode (-a)\n"); 1900 return -1; 1901 } 1902 1903 if (topdown_filter_events(topdown_attrs, &str, 1904 arch_topdown_check_group(&warn), 1905 pmu_name) < 0) { 1906 pr_err("Out of memory\n"); 1907 return -1; 1908 } 1909 1910 if (topdown_attrs[0] && str) { 1911 struct parse_events_error errinfo; 1912 if (warn) 1913 arch_topdown_group_warn(); 1914 setup_metrics: 1915 parse_events_error__init(&errinfo); 1916 err = parse_events(evsel_list, str, &errinfo); 1917 if (err) { 1918 fprintf(stderr, 1919 "Cannot set up top down events %s: %d\n", 1920 str, err); 1921 parse_events_error__print(&errinfo, str); 1922 parse_events_error__exit(&errinfo); 1923 free(str); 1924 return -1; 1925 } 1926 parse_events_error__exit(&errinfo); 1927 } else { 1928 fprintf(stderr, "System does not support topdown\n"); 1929 return -1; 1930 } 1931 free(str); 1932 } 1933 1934 if (!evsel_list->core.nr_entries) { 1935 if (target__has_cpu(&target)) 1936 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1937 1938 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1939 return -1; 1940 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1941 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 1942 return -1; 1943 } 1944 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 1945 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 1946 return -1; 1947 } 1948 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 1949 return -1; 1950 1951 stat_config.topdown_level = TOPDOWN_MAX_LEVEL; 1952 /* Platform specific attrs */ 1953 if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) 1954 return -1; 1955 } 1956 1957 /* Detailed events get appended to the event list: */ 1958 1959 if (detailed_run < 1) 1960 return 0; 1961 1962 /* Append detailed run extra attributes: */ 1963 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1964 return -1; 1965 1966 if (detailed_run < 2) 1967 return 0; 1968 1969 /* Append very detailed run extra attributes: */ 1970 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1971 return -1; 1972 1973 if (detailed_run < 3) 1974 return 0; 1975 1976 /* Append very, very detailed run extra attributes: */ 1977 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1978 } 1979 1980 static const char * const stat_record_usage[] = { 1981 "perf stat record [<options>]", 1982 NULL, 1983 }; 1984 1985 static void init_features(struct perf_session *session) 1986 { 1987 int feat; 1988 1989 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 1990 perf_header__set_feat(&session->header, feat); 1991 1992 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 1993 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 1994 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 1995 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 1996 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 1997 } 1998 1999 static int __cmd_record(int argc, const char **argv) 2000 { 2001 struct perf_session *session; 2002 struct perf_data *data = &perf_stat.data; 2003 2004 argc = parse_options(argc, argv, stat_options, stat_record_usage, 2005 PARSE_OPT_STOP_AT_NON_OPTION); 2006 2007 if (output_name) 2008 data->path = output_name; 2009 2010 if (stat_config.run_count != 1 || forever) { 2011 pr_err("Cannot use -r option with perf stat record.\n"); 2012 return -1; 2013 } 2014 2015 session = perf_session__new(data, NULL); 2016 if (IS_ERR(session)) { 2017 pr_err("Perf session creation failed\n"); 2018 return PTR_ERR(session); 2019 } 2020 2021 init_features(session); 2022 2023 session->evlist = evsel_list; 2024 perf_stat.session = session; 2025 perf_stat.record = true; 2026 return argc; 2027 } 2028 2029 static int process_stat_round_event(struct perf_session *session, 2030 union perf_event *event) 2031 { 2032 struct perf_record_stat_round *stat_round = &event->stat_round; 2033 struct evsel *counter; 2034 struct timespec tsh, *ts = NULL; 2035 const char **argv = session->header.env.cmdline_argv; 2036 int argc = session->header.env.nr_cmdline; 2037 2038 evlist__for_each_entry(evsel_list, counter) 2039 perf_stat_process_counter(&stat_config, counter); 2040 2041 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2042 update_stats(&walltime_nsecs_stats, stat_round->time); 2043 2044 if (stat_config.interval && stat_round->time) { 2045 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 2046 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 2047 ts = &tsh; 2048 } 2049 2050 print_counters(ts, argc, argv); 2051 return 0; 2052 } 2053 2054 static 2055 int process_stat_config_event(struct perf_session *session, 2056 union perf_event *event) 2057 { 2058 struct perf_tool *tool = session->tool; 2059 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2060 2061 perf_event__read_stat_config(&stat_config, &event->stat_config); 2062 2063 if (perf_cpu_map__empty(st->cpus)) { 2064 if (st->aggr_mode != AGGR_UNSET) 2065 pr_warning("warning: processing task data, aggregation mode not set\n"); 2066 return 0; 2067 } 2068 2069 if (st->aggr_mode != AGGR_UNSET) 2070 stat_config.aggr_mode = st->aggr_mode; 2071 2072 if (perf_stat.data.is_pipe) 2073 perf_stat_init_aggr_mode(); 2074 else 2075 perf_stat_init_aggr_mode_file(st); 2076 2077 return 0; 2078 } 2079 2080 static int set_maps(struct perf_stat *st) 2081 { 2082 if (!st->cpus || !st->threads) 2083 return 0; 2084 2085 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 2086 return -EINVAL; 2087 2088 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 2089 2090 if (evlist__alloc_stats(evsel_list, true)) 2091 return -ENOMEM; 2092 2093 st->maps_allocated = true; 2094 return 0; 2095 } 2096 2097 static 2098 int process_thread_map_event(struct perf_session *session, 2099 union perf_event *event) 2100 { 2101 struct perf_tool *tool = session->tool; 2102 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2103 2104 if (st->threads) { 2105 pr_warning("Extra thread map event, ignoring.\n"); 2106 return 0; 2107 } 2108 2109 st->threads = thread_map__new_event(&event->thread_map); 2110 if (!st->threads) 2111 return -ENOMEM; 2112 2113 return set_maps(st); 2114 } 2115 2116 static 2117 int process_cpu_map_event(struct perf_session *session, 2118 union perf_event *event) 2119 { 2120 struct perf_tool *tool = session->tool; 2121 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2122 struct perf_cpu_map *cpus; 2123 2124 if (st->cpus) { 2125 pr_warning("Extra cpu map event, ignoring.\n"); 2126 return 0; 2127 } 2128 2129 cpus = cpu_map__new_data(&event->cpu_map.data); 2130 if (!cpus) 2131 return -ENOMEM; 2132 2133 st->cpus = cpus; 2134 return set_maps(st); 2135 } 2136 2137 static const char * const stat_report_usage[] = { 2138 "perf stat report [<options>]", 2139 NULL, 2140 }; 2141 2142 static struct perf_stat perf_stat = { 2143 .tool = { 2144 .attr = perf_event__process_attr, 2145 .event_update = perf_event__process_event_update, 2146 .thread_map = process_thread_map_event, 2147 .cpu_map = process_cpu_map_event, 2148 .stat_config = process_stat_config_event, 2149 .stat = perf_event__process_stat_event, 2150 .stat_round = process_stat_round_event, 2151 }, 2152 .aggr_mode = AGGR_UNSET, 2153 }; 2154 2155 static int __cmd_report(int argc, const char **argv) 2156 { 2157 struct perf_session *session; 2158 const struct option options[] = { 2159 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2160 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2161 "aggregate counts per processor socket", AGGR_SOCKET), 2162 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2163 "aggregate counts per processor die", AGGR_DIE), 2164 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2165 "aggregate counts per physical processor core", AGGR_CORE), 2166 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2167 "aggregate counts per numa node", AGGR_NODE), 2168 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2169 "disable CPU count aggregation", AGGR_NONE), 2170 OPT_END() 2171 }; 2172 struct stat st; 2173 int ret; 2174 2175 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2176 2177 if (!input_name || !strlen(input_name)) { 2178 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2179 input_name = "-"; 2180 else 2181 input_name = "perf.data"; 2182 } 2183 2184 perf_stat.data.path = input_name; 2185 perf_stat.data.mode = PERF_DATA_MODE_READ; 2186 2187 session = perf_session__new(&perf_stat.data, &perf_stat.tool); 2188 if (IS_ERR(session)) 2189 return PTR_ERR(session); 2190 2191 perf_stat.session = session; 2192 stat_config.output = stderr; 2193 evsel_list = session->evlist; 2194 2195 ret = perf_session__process_events(session); 2196 if (ret) 2197 return ret; 2198 2199 perf_session__delete(session); 2200 return 0; 2201 } 2202 2203 static void setup_system_wide(int forks) 2204 { 2205 /* 2206 * Make system wide (-a) the default target if 2207 * no target was specified and one of following 2208 * conditions is met: 2209 * 2210 * - there's no workload specified 2211 * - there is workload specified but all requested 2212 * events are system wide events 2213 */ 2214 if (!target__none(&target)) 2215 return; 2216 2217 if (!forks) 2218 target.system_wide = true; 2219 else { 2220 struct evsel *counter; 2221 2222 evlist__for_each_entry(evsel_list, counter) { 2223 if (!counter->core.requires_cpu && 2224 strcmp(counter->name, "duration_time")) { 2225 return; 2226 } 2227 } 2228 2229 if (evsel_list->core.nr_entries) 2230 target.system_wide = true; 2231 } 2232 } 2233 2234 int cmd_stat(int argc, const char **argv) 2235 { 2236 const char * const stat_usage[] = { 2237 "perf stat [<options>] [<command>]", 2238 NULL 2239 }; 2240 int status = -EINVAL, run_idx, err; 2241 const char *mode; 2242 FILE *output = stderr; 2243 unsigned int interval, timeout; 2244 const char * const stat_subcommands[] = { "record", "report" }; 2245 char errbuf[BUFSIZ]; 2246 2247 setlocale(LC_ALL, ""); 2248 2249 evsel_list = evlist__new(); 2250 if (evsel_list == NULL) 2251 return -ENOMEM; 2252 2253 parse_events__shrink_config_terms(); 2254 2255 /* String-parsing callback-based options would segfault when negated */ 2256 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2257 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2258 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2259 2260 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2261 (const char **) stat_usage, 2262 PARSE_OPT_STOP_AT_NON_OPTION); 2263 perf_stat__collect_metric_expr(evsel_list); 2264 perf_stat__init_shadow_stats(); 2265 2266 if (stat_config.csv_sep) { 2267 stat_config.csv_output = true; 2268 if (!strcmp(stat_config.csv_sep, "\\t")) 2269 stat_config.csv_sep = "\t"; 2270 } else 2271 stat_config.csv_sep = DEFAULT_SEPARATOR; 2272 2273 if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) { 2274 argc = __cmd_record(argc, argv); 2275 if (argc < 0) 2276 return -1; 2277 } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0])) 2278 return __cmd_report(argc, argv); 2279 2280 interval = stat_config.interval; 2281 timeout = stat_config.timeout; 2282 2283 /* 2284 * For record command the -o is already taken care of. 2285 */ 2286 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2287 output = NULL; 2288 2289 if (output_name && output_fd) { 2290 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2291 parse_options_usage(stat_usage, stat_options, "o", 1); 2292 parse_options_usage(NULL, stat_options, "log-fd", 0); 2293 goto out; 2294 } 2295 2296 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2297 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2298 goto out; 2299 } 2300 2301 if (stat_config.metric_only && stat_config.run_count > 1) { 2302 fprintf(stderr, "--metric-only is not supported with -r\n"); 2303 goto out; 2304 } 2305 2306 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2307 fprintf(stderr, "--table is only supported with -r\n"); 2308 parse_options_usage(stat_usage, stat_options, "r", 1); 2309 parse_options_usage(NULL, stat_options, "table", 0); 2310 goto out; 2311 } 2312 2313 if (output_fd < 0) { 2314 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2315 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2316 goto out; 2317 } 2318 2319 if (!output && !stat_config.quiet) { 2320 struct timespec tm; 2321 mode = append_file ? "a" : "w"; 2322 2323 output = fopen(output_name, mode); 2324 if (!output) { 2325 perror("failed to create output file"); 2326 return -1; 2327 } 2328 clock_gettime(CLOCK_REALTIME, &tm); 2329 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2330 } else if (output_fd > 0) { 2331 mode = append_file ? "a" : "w"; 2332 output = fdopen(output_fd, mode); 2333 if (!output) { 2334 perror("Failed opening logfd"); 2335 return -errno; 2336 } 2337 } 2338 2339 stat_config.output = output; 2340 2341 /* 2342 * let the spreadsheet do the pretty-printing 2343 */ 2344 if (stat_config.csv_output) { 2345 /* User explicitly passed -B? */ 2346 if (big_num_opt == 1) { 2347 fprintf(stderr, "-B option not supported with -x\n"); 2348 parse_options_usage(stat_usage, stat_options, "B", 1); 2349 parse_options_usage(NULL, stat_options, "x", 1); 2350 goto out; 2351 } else /* Nope, so disable big number formatting */ 2352 stat_config.big_num = false; 2353 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2354 stat_config.big_num = false; 2355 2356 err = target__validate(&target); 2357 if (err) { 2358 target__strerror(&target, err, errbuf, BUFSIZ); 2359 pr_warning("%s\n", errbuf); 2360 } 2361 2362 setup_system_wide(argc); 2363 2364 /* 2365 * Display user/system times only for single 2366 * run and when there's specified tracee. 2367 */ 2368 if ((stat_config.run_count == 1) && target__none(&target)) 2369 stat_config.ru_display = true; 2370 2371 if (stat_config.run_count < 0) { 2372 pr_err("Run count must be a positive number\n"); 2373 parse_options_usage(stat_usage, stat_options, "r", 1); 2374 goto out; 2375 } else if (stat_config.run_count == 0) { 2376 forever = true; 2377 stat_config.run_count = 1; 2378 } 2379 2380 if (stat_config.walltime_run_table) { 2381 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2382 if (!stat_config.walltime_run) { 2383 pr_err("failed to setup -r option"); 2384 goto out; 2385 } 2386 } 2387 2388 if ((stat_config.aggr_mode == AGGR_THREAD) && 2389 !target__has_task(&target)) { 2390 if (!target.system_wide || target.cpu_list) { 2391 fprintf(stderr, "The --per-thread option is only " 2392 "available when monitoring via -p -t -a " 2393 "options or only --per-thread.\n"); 2394 parse_options_usage(NULL, stat_options, "p", 1); 2395 parse_options_usage(NULL, stat_options, "t", 1); 2396 goto out; 2397 } 2398 } 2399 2400 /* 2401 * no_aggr, cgroup are for system-wide only 2402 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2403 */ 2404 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2405 stat_config.aggr_mode != AGGR_THREAD) || 2406 (nr_cgroups || stat_config.cgroup_list)) && 2407 !target__has_cpu(&target)) { 2408 fprintf(stderr, "both cgroup and no-aggregation " 2409 "modes only available in system-wide mode\n"); 2410 2411 parse_options_usage(stat_usage, stat_options, "G", 1); 2412 parse_options_usage(NULL, stat_options, "A", 1); 2413 parse_options_usage(NULL, stat_options, "a", 1); 2414 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2415 goto out; 2416 } 2417 2418 if (stat_config.iostat_run) { 2419 status = iostat_prepare(evsel_list, &stat_config); 2420 if (status) 2421 goto out; 2422 if (iostat_mode == IOSTAT_LIST) { 2423 iostat_list(evsel_list, &stat_config); 2424 goto out; 2425 } else if (verbose) 2426 iostat_list(evsel_list, &stat_config); 2427 if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) 2428 target.system_wide = true; 2429 } 2430 2431 if (add_default_attributes()) 2432 goto out; 2433 2434 if (stat_config.cgroup_list) { 2435 if (nr_cgroups > 0) { 2436 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2437 parse_options_usage(stat_usage, stat_options, "G", 1); 2438 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2439 goto out; 2440 } 2441 2442 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, 2443 &stat_config.metric_events, true) < 0) { 2444 parse_options_usage(stat_usage, stat_options, 2445 "for-each-cgroup", 0); 2446 goto out; 2447 } 2448 } 2449 2450 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2451 target.per_thread = true; 2452 2453 if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { 2454 pr_err("failed to use cpu list %s\n", target.cpu_list); 2455 goto out; 2456 } 2457 2458 target.hybrid = perf_pmu__has_hybrid(); 2459 if (evlist__create_maps(evsel_list, &target) < 0) { 2460 if (target__has_task(&target)) { 2461 pr_err("Problems finding threads of monitor\n"); 2462 parse_options_usage(stat_usage, stat_options, "p", 1); 2463 parse_options_usage(NULL, stat_options, "t", 1); 2464 } else if (target__has_cpu(&target)) { 2465 perror("failed to parse CPUs map"); 2466 parse_options_usage(stat_usage, stat_options, "C", 1); 2467 parse_options_usage(NULL, stat_options, "a", 1); 2468 } 2469 goto out; 2470 } 2471 2472 evlist__check_cpu_maps(evsel_list); 2473 2474 /* 2475 * Initialize thread_map with comm names, 2476 * so we could print it out on output. 2477 */ 2478 if (stat_config.aggr_mode == AGGR_THREAD) { 2479 thread_map__read_comms(evsel_list->core.threads); 2480 if (target.system_wide) { 2481 if (runtime_stat_new(&stat_config, 2482 perf_thread_map__nr(evsel_list->core.threads))) { 2483 goto out; 2484 } 2485 } 2486 } 2487 2488 if (stat_config.aggr_mode == AGGR_NODE) 2489 cpu__setup_cpunode_map(); 2490 2491 if (stat_config.times && interval) 2492 interval_count = true; 2493 else if (stat_config.times && !interval) { 2494 pr_err("interval-count option should be used together with " 2495 "interval-print.\n"); 2496 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2497 parse_options_usage(stat_usage, stat_options, "I", 1); 2498 goto out; 2499 } 2500 2501 if (timeout && timeout < 100) { 2502 if (timeout < 10) { 2503 pr_err("timeout must be >= 10ms.\n"); 2504 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2505 goto out; 2506 } else 2507 pr_warning("timeout < 100ms. " 2508 "The overhead percentage could be high in some cases. " 2509 "Please proceed with caution.\n"); 2510 } 2511 if (timeout && interval) { 2512 pr_err("timeout option is not supported with interval-print.\n"); 2513 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2514 parse_options_usage(stat_usage, stat_options, "I", 1); 2515 goto out; 2516 } 2517 2518 if (evlist__alloc_stats(evsel_list, interval)) 2519 goto out; 2520 2521 if (perf_stat_init_aggr_mode()) 2522 goto out; 2523 2524 /* 2525 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2526 * while avoiding that older tools show confusing messages. 2527 * 2528 * However for pipe sessions we need to keep it zero, 2529 * because script's perf_evsel__check_attr is triggered 2530 * by attr->sample_type != 0, and we can't run it on 2531 * stat sessions. 2532 */ 2533 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2534 2535 /* 2536 * We dont want to block the signals - that would cause 2537 * child tasks to inherit that and Ctrl-C would not work. 2538 * What we want is for Ctrl-C to work in the exec()-ed 2539 * task, but being ignored by perf stat itself: 2540 */ 2541 atexit(sig_atexit); 2542 if (!forever) 2543 signal(SIGINT, skip_signal); 2544 signal(SIGCHLD, skip_signal); 2545 signal(SIGALRM, skip_signal); 2546 signal(SIGABRT, skip_signal); 2547 2548 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2549 goto out; 2550 2551 /* Enable ignoring missing threads when -p option is defined. */ 2552 evlist__first(evsel_list)->ignore_missing_thread = target.pid; 2553 status = 0; 2554 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2555 if (stat_config.run_count != 1 && verbose > 0) 2556 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2557 run_idx + 1); 2558 2559 if (run_idx != 0) 2560 evlist__reset_prev_raw_counts(evsel_list); 2561 2562 status = run_perf_stat(argc, argv, run_idx); 2563 if (forever && status != -1 && !interval) { 2564 print_counters(NULL, argc, argv); 2565 perf_stat__reset_stats(); 2566 } 2567 } 2568 2569 if (!forever && status != -1 && (!interval || stat_config.summary)) 2570 print_counters(NULL, argc, argv); 2571 2572 evlist__finalize_ctlfd(evsel_list); 2573 2574 if (STAT_RECORD) { 2575 /* 2576 * We synthesize the kernel mmap record just so that older tools 2577 * don't emit warnings about not being able to resolve symbols 2578 * due to /proc/sys/kernel/kptr_restrict settings and instead provide 2579 * a saner message about no samples being in the perf.data file. 2580 * 2581 * This also serves to suppress a warning about f_header.data.size == 0 2582 * in header.c at the moment 'perf stat record' gets introduced, which 2583 * is not really needed once we start adding the stat specific PERF_RECORD_ 2584 * records, but the need to suppress the kptr_restrict messages in older 2585 * tools remain -acme 2586 */ 2587 int fd = perf_data__fd(&perf_stat.data); 2588 2589 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2590 process_synthesized_event, 2591 &perf_stat.session->machines.host); 2592 if (err) { 2593 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2594 "older tools may produce warnings about this file\n."); 2595 } 2596 2597 if (!interval) { 2598 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2599 pr_err("failed to write stat round event\n"); 2600 } 2601 2602 if (!perf_stat.data.is_pipe) { 2603 perf_stat.session->header.data_size += perf_stat.bytes_written; 2604 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2605 } 2606 2607 evlist__close(evsel_list); 2608 perf_session__delete(perf_stat.session); 2609 } 2610 2611 perf_stat__exit_aggr_mode(); 2612 evlist__free_stats(evsel_list); 2613 out: 2614 if (stat_config.iostat_run) 2615 iostat_release(evsel_list); 2616 2617 zfree(&stat_config.walltime_run); 2618 2619 if (smi_cost && smi_reset) 2620 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2621 2622 evlist__delete(evsel_list); 2623 2624 metricgroup__rblist_exit(&stat_config.metric_events); 2625 runtime_stat_delete(&stat_config); 2626 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 2627 2628 return status; 2629 } 2630