1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "perf.h" 45 #include "util/cgroup.h" 46 #include <subcmd/parse-options.h> 47 #include "util/parse-events.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evsel.h" 52 #include "util/debug.h" 53 #include "util/color.h" 54 #include "util/stat.h" 55 #include "util/header.h" 56 #include "util/cpumap.h" 57 #include "util/thread_map.h" 58 #include "util/counts.h" 59 #include "util/topdown.h" 60 #include "util/session.h" 61 #include "util/tool.h" 62 #include "util/string2.h" 63 #include "util/metricgroup.h" 64 #include "util/synthetic-events.h" 65 #include "util/target.h" 66 #include "util/time-utils.h" 67 #include "util/top.h" 68 #include "util/affinity.h" 69 #include "util/pfm.h" 70 #include "util/bpf_counter.h" 71 #include "asm/bug.h" 72 73 #include <linux/time64.h> 74 #include <linux/zalloc.h> 75 #include <api/fs/fs.h> 76 #include <errno.h> 77 #include <signal.h> 78 #include <stdlib.h> 79 #include <sys/prctl.h> 80 #include <inttypes.h> 81 #include <locale.h> 82 #include <math.h> 83 #include <sys/types.h> 84 #include <sys/stat.h> 85 #include <sys/wait.h> 86 #include <unistd.h> 87 #include <sys/time.h> 88 #include <sys/resource.h> 89 #include <linux/err.h> 90 91 #include <linux/ctype.h> 92 #include <perf/evlist.h> 93 94 #define DEFAULT_SEPARATOR " " 95 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 96 97 static void print_counters(struct timespec *ts, int argc, const char **argv); 98 99 /* Default events used for perf stat -T */ 100 static const char *transaction_attrs = { 101 "task-clock," 102 "{" 103 "instructions," 104 "cycles," 105 "cpu/cycles-t/," 106 "cpu/tx-start/," 107 "cpu/el-start/," 108 "cpu/cycles-ct/" 109 "}" 110 }; 111 112 /* More limited version when the CPU does not have all events. */ 113 static const char * transaction_limited_attrs = { 114 "task-clock," 115 "{" 116 "instructions," 117 "cycles," 118 "cpu/cycles-t/," 119 "cpu/tx-start/" 120 "}" 121 }; 122 123 static const char * topdown_attrs[] = { 124 "topdown-total-slots", 125 "topdown-slots-retired", 126 "topdown-recovery-bubbles", 127 "topdown-fetch-bubbles", 128 "topdown-slots-issued", 129 NULL, 130 }; 131 132 static const char *topdown_metric_attrs[] = { 133 "slots", 134 "topdown-retiring", 135 "topdown-bad-spec", 136 "topdown-fe-bound", 137 "topdown-be-bound", 138 NULL, 139 }; 140 141 static const char *topdown_metric_L2_attrs[] = { 142 "slots", 143 "topdown-retiring", 144 "topdown-bad-spec", 145 "topdown-fe-bound", 146 "topdown-be-bound", 147 "topdown-heavy-ops", 148 "topdown-br-mispredict", 149 "topdown-fetch-lat", 150 "topdown-mem-bound", 151 NULL, 152 }; 153 154 static const char *smi_cost_attrs = { 155 "{" 156 "msr/aperf/," 157 "msr/smi/," 158 "cycles" 159 "}" 160 }; 161 162 static struct evlist *evsel_list; 163 164 static struct target target = { 165 .uid = UINT_MAX, 166 }; 167 168 #define METRIC_ONLY_LEN 20 169 170 static volatile pid_t child_pid = -1; 171 static int detailed_run = 0; 172 static bool transaction_run; 173 static bool topdown_run = false; 174 static bool smi_cost = false; 175 static bool smi_reset = false; 176 static int big_num_opt = -1; 177 static bool group = false; 178 static const char *pre_cmd = NULL; 179 static const char *post_cmd = NULL; 180 static bool sync_run = false; 181 static bool forever = false; 182 static bool force_metric_only = false; 183 static struct timespec ref_time; 184 static bool append_file; 185 static bool interval_count; 186 static const char *output_name; 187 static int output_fd; 188 189 struct perf_stat { 190 bool record; 191 struct perf_data data; 192 struct perf_session *session; 193 u64 bytes_written; 194 struct perf_tool tool; 195 bool maps_allocated; 196 struct perf_cpu_map *cpus; 197 struct perf_thread_map *threads; 198 enum aggr_mode aggr_mode; 199 }; 200 201 static struct perf_stat perf_stat; 202 #define STAT_RECORD perf_stat.record 203 204 static volatile int done = 0; 205 206 static struct perf_stat_config stat_config = { 207 .aggr_mode = AGGR_GLOBAL, 208 .scale = true, 209 .unit_width = 4, /* strlen("unit") */ 210 .run_count = 1, 211 .metric_only_len = METRIC_ONLY_LEN, 212 .walltime_nsecs_stats = &walltime_nsecs_stats, 213 .big_num = true, 214 .ctl_fd = -1, 215 .ctl_fd_ack = -1 216 }; 217 218 static bool cpus_map_matched(struct evsel *a, struct evsel *b) 219 { 220 if (!a->core.cpus && !b->core.cpus) 221 return true; 222 223 if (!a->core.cpus || !b->core.cpus) 224 return false; 225 226 if (a->core.cpus->nr != b->core.cpus->nr) 227 return false; 228 229 for (int i = 0; i < a->core.cpus->nr; i++) { 230 if (a->core.cpus->map[i] != b->core.cpus->map[i]) 231 return false; 232 } 233 234 return true; 235 } 236 237 static void evlist__check_cpu_maps(struct evlist *evlist) 238 { 239 struct evsel *evsel, *pos, *leader; 240 char buf[1024]; 241 242 evlist__for_each_entry(evlist, evsel) { 243 leader = evsel->leader; 244 245 /* Check that leader matches cpus with each member. */ 246 if (leader == evsel) 247 continue; 248 if (cpus_map_matched(leader, evsel)) 249 continue; 250 251 /* If there's mismatch disable the group and warn user. */ 252 WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n"); 253 evsel__group_desc(leader, buf, sizeof(buf)); 254 pr_warning(" %s\n", buf); 255 256 if (verbose) { 257 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 258 pr_warning(" %s: %s\n", leader->name, buf); 259 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 260 pr_warning(" %s: %s\n", evsel->name, buf); 261 } 262 263 for_each_group_evsel(pos, leader) { 264 pos->leader = pos; 265 pos->core.nr_members = 0; 266 } 267 evsel->leader->core.nr_members = 0; 268 } 269 } 270 271 static inline void diff_timespec(struct timespec *r, struct timespec *a, 272 struct timespec *b) 273 { 274 r->tv_sec = a->tv_sec - b->tv_sec; 275 if (a->tv_nsec < b->tv_nsec) { 276 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 277 r->tv_sec--; 278 } else { 279 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 280 } 281 } 282 283 static void perf_stat__reset_stats(void) 284 { 285 int i; 286 287 evlist__reset_stats(evsel_list); 288 perf_stat__reset_shadow_stats(); 289 290 for (i = 0; i < stat_config.stats_num; i++) 291 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); 292 } 293 294 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 295 union perf_event *event, 296 struct perf_sample *sample __maybe_unused, 297 struct machine *machine __maybe_unused) 298 { 299 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 300 pr_err("failed to write perf data, error: %m\n"); 301 return -1; 302 } 303 304 perf_stat.bytes_written += event->header.size; 305 return 0; 306 } 307 308 static int write_stat_round_event(u64 tm, u64 type) 309 { 310 return perf_event__synthesize_stat_round(NULL, tm, type, 311 process_synthesized_event, 312 NULL); 313 } 314 315 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 316 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 317 318 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 319 320 static int evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread, 321 struct perf_counts_values *count) 322 { 323 struct perf_sample_id *sid = SID(counter, cpu, thread); 324 325 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 326 process_synthesized_event, NULL); 327 } 328 329 static int read_single_counter(struct evsel *counter, int cpu, 330 int thread, struct timespec *rs) 331 { 332 if (counter->tool_event == PERF_TOOL_DURATION_TIME) { 333 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 334 struct perf_counts_values *count = 335 perf_counts(counter->counts, cpu, thread); 336 count->ena = count->run = val; 337 count->val = val; 338 return 0; 339 } 340 return evsel__read_counter(counter, cpu, thread); 341 } 342 343 /* 344 * Read out the results of a single counter: 345 * do not aggregate counts across CPUs in system-wide mode 346 */ 347 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) 348 { 349 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 350 int thread; 351 352 if (!counter->supported) 353 return -ENOENT; 354 355 if (counter->core.system_wide) 356 nthreads = 1; 357 358 for (thread = 0; thread < nthreads; thread++) { 359 struct perf_counts_values *count; 360 361 count = perf_counts(counter->counts, cpu, thread); 362 363 /* 364 * The leader's group read loads data into its group members 365 * (via evsel__read_counter()) and sets their count->loaded. 366 */ 367 if (!perf_counts__is_loaded(counter->counts, cpu, thread) && 368 read_single_counter(counter, cpu, thread, rs)) { 369 counter->counts->scaled = -1; 370 perf_counts(counter->counts, cpu, thread)->ena = 0; 371 perf_counts(counter->counts, cpu, thread)->run = 0; 372 return -1; 373 } 374 375 perf_counts__set_loaded(counter->counts, cpu, thread, false); 376 377 if (STAT_RECORD) { 378 if (evsel__write_stat_event(counter, cpu, thread, count)) { 379 pr_err("failed to write stat event\n"); 380 return -1; 381 } 382 } 383 384 if (verbose > 1) { 385 fprintf(stat_config.output, 386 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 387 evsel__name(counter), 388 cpu, 389 count->val, count->ena, count->run); 390 } 391 } 392 393 return 0; 394 } 395 396 static int read_affinity_counters(struct timespec *rs) 397 { 398 struct evsel *counter; 399 struct affinity affinity; 400 int i, ncpus, cpu; 401 402 if (affinity__setup(&affinity) < 0) 403 return -1; 404 405 ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); 406 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 407 ncpus = 1; 408 evlist__for_each_cpu(evsel_list, i, cpu) { 409 if (i >= ncpus) 410 break; 411 affinity__set(&affinity, cpu); 412 413 evlist__for_each_entry(evsel_list, counter) { 414 if (evsel__cpu_iter_skip(counter, cpu)) 415 continue; 416 if (!counter->err) { 417 counter->err = read_counter_cpu(counter, rs, 418 counter->cpu_iter - 1); 419 } 420 } 421 } 422 affinity__cleanup(&affinity); 423 return 0; 424 } 425 426 static int read_bpf_map_counters(void) 427 { 428 struct evsel *counter; 429 int err; 430 431 evlist__for_each_entry(evsel_list, counter) { 432 err = bpf_counter__read(counter); 433 if (err) 434 return err; 435 } 436 return 0; 437 } 438 439 static void read_counters(struct timespec *rs) 440 { 441 struct evsel *counter; 442 int err; 443 444 if (!stat_config.stop_read_counter) { 445 if (target__has_bpf(&target)) 446 err = read_bpf_map_counters(); 447 else 448 err = read_affinity_counters(rs); 449 if (err < 0) 450 return; 451 } 452 453 evlist__for_each_entry(evsel_list, counter) { 454 if (counter->err) 455 pr_debug("failed to read counter %s\n", counter->name); 456 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 457 pr_warning("failed to process counter %s\n", counter->name); 458 counter->err = 0; 459 } 460 } 461 462 static int runtime_stat_new(struct perf_stat_config *config, int nthreads) 463 { 464 int i; 465 466 config->stats = calloc(nthreads, sizeof(struct runtime_stat)); 467 if (!config->stats) 468 return -1; 469 470 config->stats_num = nthreads; 471 472 for (i = 0; i < nthreads; i++) 473 runtime_stat__init(&config->stats[i]); 474 475 return 0; 476 } 477 478 static void runtime_stat_delete(struct perf_stat_config *config) 479 { 480 int i; 481 482 if (!config->stats) 483 return; 484 485 for (i = 0; i < config->stats_num; i++) 486 runtime_stat__exit(&config->stats[i]); 487 488 zfree(&config->stats); 489 } 490 491 static void runtime_stat_reset(struct perf_stat_config *config) 492 { 493 int i; 494 495 if (!config->stats) 496 return; 497 498 for (i = 0; i < config->stats_num; i++) 499 perf_stat__reset_shadow_per_stat(&config->stats[i]); 500 } 501 502 static void process_interval(void) 503 { 504 struct timespec ts, rs; 505 506 clock_gettime(CLOCK_MONOTONIC, &ts); 507 diff_timespec(&rs, &ts, &ref_time); 508 509 perf_stat__reset_shadow_per_stat(&rt_stat); 510 runtime_stat_reset(&stat_config); 511 read_counters(&rs); 512 513 if (STAT_RECORD) { 514 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 515 pr_err("failed to write stat round event\n"); 516 } 517 518 init_stats(&walltime_nsecs_stats); 519 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 520 print_counters(&rs, 0, NULL); 521 } 522 523 static bool handle_interval(unsigned int interval, int *times) 524 { 525 if (interval) { 526 process_interval(); 527 if (interval_count && !(--(*times))) 528 return true; 529 } 530 return false; 531 } 532 533 static int enable_counters(void) 534 { 535 struct evsel *evsel; 536 int err; 537 538 if (target__has_bpf(&target)) { 539 evlist__for_each_entry(evsel_list, evsel) { 540 err = bpf_counter__enable(evsel); 541 if (err) 542 return err; 543 } 544 } 545 546 if (stat_config.initial_delay < 0) { 547 pr_info(EVLIST_DISABLED_MSG); 548 return 0; 549 } 550 551 if (stat_config.initial_delay > 0) { 552 pr_info(EVLIST_DISABLED_MSG); 553 usleep(stat_config.initial_delay * USEC_PER_MSEC); 554 } 555 556 /* 557 * We need to enable counters only if: 558 * - we don't have tracee (attaching to task or cpu) 559 * - we have initial delay configured 560 */ 561 if (!target__none(&target) || stat_config.initial_delay) { 562 evlist__enable(evsel_list); 563 if (stat_config.initial_delay > 0) 564 pr_info(EVLIST_ENABLED_MSG); 565 } 566 return 0; 567 } 568 569 static void disable_counters(void) 570 { 571 /* 572 * If we don't have tracee (attaching to task or cpu), counters may 573 * still be running. To get accurate group ratios, we must stop groups 574 * from counting before reading their constituent counters. 575 */ 576 if (!target__none(&target)) 577 evlist__disable(evsel_list); 578 } 579 580 static volatile int workload_exec_errno; 581 582 /* 583 * evlist__prepare_workload will send a SIGUSR1 584 * if the fork fails, since we asked by setting its 585 * want_signal to true. 586 */ 587 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 588 void *ucontext __maybe_unused) 589 { 590 workload_exec_errno = info->si_value.sival_int; 591 } 592 593 static bool evsel__should_store_id(struct evsel *counter) 594 { 595 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 596 } 597 598 static bool is_target_alive(struct target *_target, 599 struct perf_thread_map *threads) 600 { 601 struct stat st; 602 int i; 603 604 if (!target__has_task(_target)) 605 return true; 606 607 for (i = 0; i < threads->nr; i++) { 608 char path[PATH_MAX]; 609 610 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 611 threads->map[i].pid); 612 613 if (!stat(path, &st)) 614 return true; 615 } 616 617 return false; 618 } 619 620 static void process_evlist(struct evlist *evlist, unsigned int interval) 621 { 622 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 623 624 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 625 switch (cmd) { 626 case EVLIST_CTL_CMD_ENABLE: 627 if (interval) 628 process_interval(); 629 break; 630 case EVLIST_CTL_CMD_DISABLE: 631 if (interval) 632 process_interval(); 633 break; 634 case EVLIST_CTL_CMD_SNAPSHOT: 635 case EVLIST_CTL_CMD_ACK: 636 case EVLIST_CTL_CMD_UNSUPPORTED: 637 case EVLIST_CTL_CMD_EVLIST: 638 case EVLIST_CTL_CMD_STOP: 639 case EVLIST_CTL_CMD_PING: 640 default: 641 break; 642 } 643 } 644 } 645 646 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 647 int *time_to_sleep) 648 { 649 int tts = *time_to_sleep; 650 struct timespec time_diff; 651 652 diff_timespec(&time_diff, time_stop, time_start); 653 654 tts -= time_diff.tv_sec * MSEC_PER_SEC + 655 time_diff.tv_nsec / NSEC_PER_MSEC; 656 657 if (tts < 0) 658 tts = 0; 659 660 *time_to_sleep = tts; 661 } 662 663 static int dispatch_events(bool forks, int timeout, int interval, int *times) 664 { 665 int child_exited = 0, status = 0; 666 int time_to_sleep, sleep_time; 667 struct timespec time_start, time_stop; 668 669 if (interval) 670 sleep_time = interval; 671 else if (timeout) 672 sleep_time = timeout; 673 else 674 sleep_time = 1000; 675 676 time_to_sleep = sleep_time; 677 678 while (!done) { 679 if (forks) 680 child_exited = waitpid(child_pid, &status, WNOHANG); 681 else 682 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 683 684 if (child_exited) 685 break; 686 687 clock_gettime(CLOCK_MONOTONIC, &time_start); 688 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 689 if (timeout || handle_interval(interval, times)) 690 break; 691 time_to_sleep = sleep_time; 692 } else { /* fd revent */ 693 process_evlist(evsel_list, interval); 694 clock_gettime(CLOCK_MONOTONIC, &time_stop); 695 compute_tts(&time_start, &time_stop, &time_to_sleep); 696 } 697 } 698 699 return status; 700 } 701 702 enum counter_recovery { 703 COUNTER_SKIP, 704 COUNTER_RETRY, 705 COUNTER_FATAL, 706 }; 707 708 static enum counter_recovery stat_handle_error(struct evsel *counter) 709 { 710 char msg[BUFSIZ]; 711 /* 712 * PPC returns ENXIO for HW counters until 2.6.37 713 * (behavior changed with commit b0a873e). 714 */ 715 if (errno == EINVAL || errno == ENOSYS || 716 errno == ENOENT || errno == EOPNOTSUPP || 717 errno == ENXIO) { 718 if (verbose > 0) 719 ui__warning("%s event is not supported by the kernel.\n", 720 evsel__name(counter)); 721 counter->supported = false; 722 /* 723 * errored is a sticky flag that means one of the counter's 724 * cpu event had a problem and needs to be reexamined. 725 */ 726 counter->errored = true; 727 728 if ((counter->leader != counter) || 729 !(counter->leader->core.nr_members > 1)) 730 return COUNTER_SKIP; 731 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 732 if (verbose > 0) 733 ui__warning("%s\n", msg); 734 return COUNTER_RETRY; 735 } else if (target__has_per_thread(&target) && 736 evsel_list->core.threads && 737 evsel_list->core.threads->err_thread != -1) { 738 /* 739 * For global --per-thread case, skip current 740 * error thread. 741 */ 742 if (!thread_map__remove(evsel_list->core.threads, 743 evsel_list->core.threads->err_thread)) { 744 evsel_list->core.threads->err_thread = -1; 745 return COUNTER_RETRY; 746 } 747 } 748 749 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 750 ui__error("%s\n", msg); 751 752 if (child_pid != -1) 753 kill(child_pid, SIGTERM); 754 return COUNTER_FATAL; 755 } 756 757 static int __run_perf_stat(int argc, const char **argv, int run_idx) 758 { 759 int interval = stat_config.interval; 760 int times = stat_config.times; 761 int timeout = stat_config.timeout; 762 char msg[BUFSIZ]; 763 unsigned long long t0, t1; 764 struct evsel *counter; 765 size_t l; 766 int status = 0; 767 const bool forks = (argc > 0); 768 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 769 struct affinity affinity; 770 int i, cpu, err; 771 bool second_pass = false; 772 773 if (forks) { 774 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 775 perror("failed to prepare workload"); 776 return -1; 777 } 778 child_pid = evsel_list->workload.pid; 779 } 780 781 if (group) 782 evlist__set_leader(evsel_list); 783 784 if (affinity__setup(&affinity) < 0) 785 return -1; 786 787 if (target__has_bpf(&target)) { 788 evlist__for_each_entry(evsel_list, counter) { 789 if (bpf_counter__load(counter, &target)) 790 return -1; 791 } 792 } 793 794 evlist__for_each_cpu (evsel_list, i, cpu) { 795 affinity__set(&affinity, cpu); 796 797 evlist__for_each_entry(evsel_list, counter) { 798 if (evsel__cpu_iter_skip(counter, cpu)) 799 continue; 800 if (counter->reset_group || counter->errored) 801 continue; 802 try_again: 803 if (create_perf_stat_counter(counter, &stat_config, &target, 804 counter->cpu_iter - 1) < 0) { 805 806 /* 807 * Weak group failed. We cannot just undo this here 808 * because earlier CPUs might be in group mode, and the kernel 809 * doesn't support mixing group and non group reads. Defer 810 * it to later. 811 * Don't close here because we're in the wrong affinity. 812 */ 813 if ((errno == EINVAL || errno == EBADF) && 814 counter->leader != counter && 815 counter->weak_group) { 816 evlist__reset_weak_group(evsel_list, counter, false); 817 assert(counter->reset_group); 818 second_pass = true; 819 continue; 820 } 821 822 switch (stat_handle_error(counter)) { 823 case COUNTER_FATAL: 824 return -1; 825 case COUNTER_RETRY: 826 goto try_again; 827 case COUNTER_SKIP: 828 continue; 829 default: 830 break; 831 } 832 833 } 834 counter->supported = true; 835 } 836 } 837 838 if (second_pass) { 839 /* 840 * Now redo all the weak group after closing them, 841 * and also close errored counters. 842 */ 843 844 evlist__for_each_cpu(evsel_list, i, cpu) { 845 affinity__set(&affinity, cpu); 846 /* First close errored or weak retry */ 847 evlist__for_each_entry(evsel_list, counter) { 848 if (!counter->reset_group && !counter->errored) 849 continue; 850 if (evsel__cpu_iter_skip_no_inc(counter, cpu)) 851 continue; 852 perf_evsel__close_cpu(&counter->core, counter->cpu_iter); 853 } 854 /* Now reopen weak */ 855 evlist__for_each_entry(evsel_list, counter) { 856 if (!counter->reset_group && !counter->errored) 857 continue; 858 if (evsel__cpu_iter_skip(counter, cpu)) 859 continue; 860 if (!counter->reset_group) 861 continue; 862 try_again_reset: 863 pr_debug2("reopening weak %s\n", evsel__name(counter)); 864 if (create_perf_stat_counter(counter, &stat_config, &target, 865 counter->cpu_iter - 1) < 0) { 866 867 switch (stat_handle_error(counter)) { 868 case COUNTER_FATAL: 869 return -1; 870 case COUNTER_RETRY: 871 goto try_again_reset; 872 case COUNTER_SKIP: 873 continue; 874 default: 875 break; 876 } 877 } 878 counter->supported = true; 879 } 880 } 881 } 882 affinity__cleanup(&affinity); 883 884 evlist__for_each_entry(evsel_list, counter) { 885 if (!counter->supported) { 886 perf_evsel__free_fd(&counter->core); 887 continue; 888 } 889 890 l = strlen(counter->unit); 891 if (l > stat_config.unit_width) 892 stat_config.unit_width = l; 893 894 if (evsel__should_store_id(counter) && 895 evsel__store_ids(counter, evsel_list)) 896 return -1; 897 } 898 899 if (evlist__apply_filters(evsel_list, &counter)) { 900 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 901 counter->filter, evsel__name(counter), errno, 902 str_error_r(errno, msg, sizeof(msg))); 903 return -1; 904 } 905 906 if (STAT_RECORD) { 907 int fd = perf_data__fd(&perf_stat.data); 908 909 if (is_pipe) { 910 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 911 } else { 912 err = perf_session__write_header(perf_stat.session, evsel_list, 913 fd, false); 914 } 915 916 if (err < 0) 917 return err; 918 919 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 920 process_synthesized_event, is_pipe); 921 if (err < 0) 922 return err; 923 } 924 925 /* 926 * Enable counters and exec the command: 927 */ 928 t0 = rdclock(); 929 clock_gettime(CLOCK_MONOTONIC, &ref_time); 930 931 if (forks) { 932 evlist__start_workload(evsel_list); 933 err = enable_counters(); 934 if (err) 935 return -1; 936 937 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 938 status = dispatch_events(forks, timeout, interval, ×); 939 if (child_pid != -1) { 940 if (timeout) 941 kill(child_pid, SIGTERM); 942 wait4(child_pid, &status, 0, &stat_config.ru_data); 943 } 944 945 if (workload_exec_errno) { 946 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 947 pr_err("Workload failed: %s\n", emsg); 948 return -1; 949 } 950 951 if (WIFSIGNALED(status)) 952 psignal(WTERMSIG(status), argv[0]); 953 } else { 954 err = enable_counters(); 955 if (err) 956 return -1; 957 status = dispatch_events(forks, timeout, interval, ×); 958 } 959 960 disable_counters(); 961 962 t1 = rdclock(); 963 964 if (stat_config.walltime_run_table) 965 stat_config.walltime_run[run_idx] = t1 - t0; 966 967 if (interval && stat_config.summary) { 968 stat_config.interval = 0; 969 stat_config.stop_read_counter = true; 970 init_stats(&walltime_nsecs_stats); 971 update_stats(&walltime_nsecs_stats, t1 - t0); 972 973 if (stat_config.aggr_mode == AGGR_GLOBAL) 974 evlist__save_aggr_prev_raw_counts(evsel_list); 975 976 evlist__copy_prev_raw_counts(evsel_list); 977 evlist__reset_prev_raw_counts(evsel_list); 978 runtime_stat_reset(&stat_config); 979 perf_stat__reset_shadow_per_stat(&rt_stat); 980 } else 981 update_stats(&walltime_nsecs_stats, t1 - t0); 982 983 /* 984 * Closing a group leader splits the group, and as we only disable 985 * group leaders, results in remaining events becoming enabled. To 986 * avoid arbitrary skew, we must read all counters before closing any 987 * group leaders. 988 */ 989 read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); 990 991 /* 992 * We need to keep evsel_list alive, because it's processed 993 * later the evsel_list will be closed after. 994 */ 995 if (!STAT_RECORD) 996 evlist__close(evsel_list); 997 998 return WEXITSTATUS(status); 999 } 1000 1001 static int run_perf_stat(int argc, const char **argv, int run_idx) 1002 { 1003 int ret; 1004 1005 if (pre_cmd) { 1006 ret = system(pre_cmd); 1007 if (ret) 1008 return ret; 1009 } 1010 1011 if (sync_run) 1012 sync(); 1013 1014 ret = __run_perf_stat(argc, argv, run_idx); 1015 if (ret) 1016 return ret; 1017 1018 if (post_cmd) { 1019 ret = system(post_cmd); 1020 if (ret) 1021 return ret; 1022 } 1023 1024 return ret; 1025 } 1026 1027 static void print_counters(struct timespec *ts, int argc, const char **argv) 1028 { 1029 /* Do not print anything if we record to the pipe. */ 1030 if (STAT_RECORD && perf_stat.data.is_pipe) 1031 return; 1032 if (stat_config.quiet) 1033 return; 1034 1035 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 1036 } 1037 1038 static volatile int signr = -1; 1039 1040 static void skip_signal(int signo) 1041 { 1042 if ((child_pid == -1) || stat_config.interval) 1043 done = 1; 1044 1045 signr = signo; 1046 /* 1047 * render child_pid harmless 1048 * won't send SIGTERM to a random 1049 * process in case of race condition 1050 * and fast PID recycling 1051 */ 1052 child_pid = -1; 1053 } 1054 1055 static void sig_atexit(void) 1056 { 1057 sigset_t set, oset; 1058 1059 /* 1060 * avoid race condition with SIGCHLD handler 1061 * in skip_signal() which is modifying child_pid 1062 * goal is to avoid send SIGTERM to a random 1063 * process 1064 */ 1065 sigemptyset(&set); 1066 sigaddset(&set, SIGCHLD); 1067 sigprocmask(SIG_BLOCK, &set, &oset); 1068 1069 if (child_pid != -1) 1070 kill(child_pid, SIGTERM); 1071 1072 sigprocmask(SIG_SETMASK, &oset, NULL); 1073 1074 if (signr == -1) 1075 return; 1076 1077 signal(signr, SIG_DFL); 1078 kill(getpid(), signr); 1079 } 1080 1081 void perf_stat__set_big_num(int set) 1082 { 1083 stat_config.big_num = (set != 0); 1084 } 1085 1086 static int stat__set_big_num(const struct option *opt __maybe_unused, 1087 const char *s __maybe_unused, int unset) 1088 { 1089 big_num_opt = unset ? 0 : 1; 1090 perf_stat__set_big_num(!unset); 1091 return 0; 1092 } 1093 1094 static int enable_metric_only(const struct option *opt __maybe_unused, 1095 const char *s __maybe_unused, int unset) 1096 { 1097 force_metric_only = true; 1098 stat_config.metric_only = !unset; 1099 return 0; 1100 } 1101 1102 static int parse_metric_groups(const struct option *opt, 1103 const char *str, 1104 int unset __maybe_unused) 1105 { 1106 return metricgroup__parse_groups(opt, str, 1107 stat_config.metric_no_group, 1108 stat_config.metric_no_merge, 1109 &stat_config.metric_events); 1110 } 1111 1112 static int parse_control_option(const struct option *opt, 1113 const char *str, 1114 int unset __maybe_unused) 1115 { 1116 struct perf_stat_config *config = opt->value; 1117 1118 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1119 } 1120 1121 static int parse_stat_cgroups(const struct option *opt, 1122 const char *str, int unset) 1123 { 1124 if (stat_config.cgroup_list) { 1125 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1126 return -1; 1127 } 1128 1129 return parse_cgroups(opt, str, unset); 1130 } 1131 1132 static struct option stat_options[] = { 1133 OPT_BOOLEAN('T', "transaction", &transaction_run, 1134 "hardware transaction statistics"), 1135 OPT_CALLBACK('e', "event", &evsel_list, "event", 1136 "event selector. use 'perf list' to list available events", 1137 parse_events_option), 1138 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1139 "event filter", parse_filter), 1140 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1141 "child tasks do not inherit counters"), 1142 OPT_STRING('p', "pid", &target.pid, "pid", 1143 "stat events on existing process id"), 1144 OPT_STRING('t', "tid", &target.tid, "tid", 1145 "stat events on existing thread id"), 1146 #ifdef HAVE_BPF_SKEL 1147 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 1148 "stat events on existing bpf program id"), 1149 #endif 1150 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1151 "system-wide collection from all CPUs"), 1152 OPT_BOOLEAN('g', "group", &group, 1153 "put the counters into a counter group"), 1154 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1155 "Use --no-scale to disable counter scaling for multiplexing"), 1156 OPT_INCR('v', "verbose", &verbose, 1157 "be more verbose (show counter open errors, etc)"), 1158 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1159 "repeat command and print average + stddev (max: 100, forever: 0)"), 1160 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1161 "display details about each run (only with -r option)"), 1162 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1163 "null run - dont start any counters"), 1164 OPT_INCR('d', "detailed", &detailed_run, 1165 "detailed run - start a lot of events"), 1166 OPT_BOOLEAN('S', "sync", &sync_run, 1167 "call sync() before starting a run"), 1168 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1169 "print large numbers with thousands\' separators", 1170 stat__set_big_num), 1171 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1172 "list of cpus to monitor in system-wide"), 1173 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1174 "disable CPU count aggregation", AGGR_NONE), 1175 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 1176 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1177 "print counts with custom separator"), 1178 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1179 "monitor event in cgroup name only", parse_stat_cgroups), 1180 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 1181 "expand events for each cgroup"), 1182 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1183 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1184 OPT_INTEGER(0, "log-fd", &output_fd, 1185 "log output to fd, instead of stderr"), 1186 OPT_STRING(0, "pre", &pre_cmd, "command", 1187 "command to run prior to the measured command"), 1188 OPT_STRING(0, "post", &post_cmd, "command", 1189 "command to run after to the measured command"), 1190 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1191 "print counts at regular interval in ms " 1192 "(overhead is possible for values <= 100ms)"), 1193 OPT_INTEGER(0, "interval-count", &stat_config.times, 1194 "print counts for fixed number of times"), 1195 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1196 "clear screen in between new interval"), 1197 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1198 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1199 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1200 "aggregate counts per processor socket", AGGR_SOCKET), 1201 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1202 "aggregate counts per processor die", AGGR_DIE), 1203 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1204 "aggregate counts per physical processor core", AGGR_CORE), 1205 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1206 "aggregate counts per thread", AGGR_THREAD), 1207 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1208 "aggregate counts per numa node", AGGR_NODE), 1209 OPT_INTEGER('D', "delay", &stat_config.initial_delay, 1210 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1211 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1212 "Only print computed metrics. No raw values", enable_metric_only), 1213 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1214 "don't group metric events, impacts multiplexing"), 1215 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1216 "don't try to share events between metrics in a group"), 1217 OPT_BOOLEAN(0, "topdown", &topdown_run, 1218 "measure top-down statistics"), 1219 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 1220 "Set the metrics level for the top-down statistics (0: max level)"), 1221 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1222 "measure SMI cost"), 1223 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1224 "monitor specified metrics or metric groups (separated by ,)", 1225 parse_metric_groups), 1226 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1227 "Configure all used events to run in kernel space.", 1228 PARSE_OPT_EXCLUSIVE), 1229 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1230 "Configure all used events to run in user space.", 1231 PARSE_OPT_EXCLUSIVE), 1232 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1233 "Use with 'percore' event qualifier to show the event " 1234 "counts of one hardware thread by sum up total hardware " 1235 "threads of same physical core"), 1236 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1237 "print summary for interval mode"), 1238 OPT_BOOLEAN(0, "quiet", &stat_config.quiet, 1239 "don't print output (useful with record)"), 1240 #ifdef HAVE_LIBPFM 1241 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1242 "libpfm4 event selector. use 'perf list' to list available events", 1243 parse_libpfm_events_option), 1244 #endif 1245 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1246 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1247 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1248 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1249 parse_control_option), 1250 OPT_END() 1251 }; 1252 1253 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1254 struct perf_cpu_map *map, int cpu) 1255 { 1256 return cpu_map__get_socket(map, cpu, NULL); 1257 } 1258 1259 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1260 struct perf_cpu_map *map, int cpu) 1261 { 1262 return cpu_map__get_die(map, cpu, NULL); 1263 } 1264 1265 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1266 struct perf_cpu_map *map, int cpu) 1267 { 1268 return cpu_map__get_core(map, cpu, NULL); 1269 } 1270 1271 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1272 struct perf_cpu_map *map, int cpu) 1273 { 1274 return cpu_map__get_node(map, cpu, NULL); 1275 } 1276 1277 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1278 aggr_get_id_t get_id, struct perf_cpu_map *map, int idx) 1279 { 1280 int cpu; 1281 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); 1282 1283 if (idx >= map->nr) 1284 return id; 1285 1286 cpu = map->map[idx]; 1287 1288 if (cpu_map__aggr_cpu_id_is_empty(config->cpus_aggr_map->map[cpu])) 1289 config->cpus_aggr_map->map[cpu] = get_id(config, map, idx); 1290 1291 id = config->cpus_aggr_map->map[cpu]; 1292 return id; 1293 } 1294 1295 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1296 struct perf_cpu_map *map, int idx) 1297 { 1298 return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx); 1299 } 1300 1301 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1302 struct perf_cpu_map *map, int idx) 1303 { 1304 return perf_stat__get_aggr(config, perf_stat__get_die, map, idx); 1305 } 1306 1307 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1308 struct perf_cpu_map *map, int idx) 1309 { 1310 return perf_stat__get_aggr(config, perf_stat__get_core, map, idx); 1311 } 1312 1313 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1314 struct perf_cpu_map *map, int idx) 1315 { 1316 return perf_stat__get_aggr(config, perf_stat__get_node, map, idx); 1317 } 1318 1319 static bool term_percore_set(void) 1320 { 1321 struct evsel *counter; 1322 1323 evlist__for_each_entry(evsel_list, counter) { 1324 if (counter->percore) 1325 return true; 1326 } 1327 1328 return false; 1329 } 1330 1331 static int perf_stat_init_aggr_mode(void) 1332 { 1333 int nr; 1334 1335 switch (stat_config.aggr_mode) { 1336 case AGGR_SOCKET: 1337 if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1338 perror("cannot build socket map"); 1339 return -1; 1340 } 1341 stat_config.aggr_get_id = perf_stat__get_socket_cached; 1342 break; 1343 case AGGR_DIE: 1344 if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1345 perror("cannot build die map"); 1346 return -1; 1347 } 1348 stat_config.aggr_get_id = perf_stat__get_die_cached; 1349 break; 1350 case AGGR_CORE: 1351 if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1352 perror("cannot build core map"); 1353 return -1; 1354 } 1355 stat_config.aggr_get_id = perf_stat__get_core_cached; 1356 break; 1357 case AGGR_NODE: 1358 if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1359 perror("cannot build core map"); 1360 return -1; 1361 } 1362 stat_config.aggr_get_id = perf_stat__get_node_cached; 1363 break; 1364 case AGGR_NONE: 1365 if (term_percore_set()) { 1366 if (cpu_map__build_core_map(evsel_list->core.cpus, 1367 &stat_config.aggr_map)) { 1368 perror("cannot build core map"); 1369 return -1; 1370 } 1371 stat_config.aggr_get_id = perf_stat__get_core_cached; 1372 } 1373 break; 1374 case AGGR_GLOBAL: 1375 case AGGR_THREAD: 1376 case AGGR_UNSET: 1377 default: 1378 break; 1379 } 1380 1381 /* 1382 * The evsel_list->cpus is the base we operate on, 1383 * taking the highest cpu number to be the size of 1384 * the aggregation translate cpumap. 1385 */ 1386 nr = perf_cpu_map__max(evsel_list->core.cpus); 1387 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); 1388 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1389 } 1390 1391 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1392 { 1393 if (map) { 1394 WARN_ONCE(refcount_read(&map->refcnt) != 0, 1395 "cpu_aggr_map refcnt unbalanced\n"); 1396 free(map); 1397 } 1398 } 1399 1400 static void cpu_aggr_map__put(struct cpu_aggr_map *map) 1401 { 1402 if (map && refcount_dec_and_test(&map->refcnt)) 1403 cpu_aggr_map__delete(map); 1404 } 1405 1406 static void perf_stat__exit_aggr_mode(void) 1407 { 1408 cpu_aggr_map__put(stat_config.aggr_map); 1409 cpu_aggr_map__put(stat_config.cpus_aggr_map); 1410 stat_config.aggr_map = NULL; 1411 stat_config.cpus_aggr_map = NULL; 1412 } 1413 1414 static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx) 1415 { 1416 int cpu; 1417 1418 if (idx > map->nr) 1419 return -1; 1420 1421 cpu = map->map[idx]; 1422 1423 if (cpu >= env->nr_cpus_avail) 1424 return -1; 1425 1426 return cpu; 1427 } 1428 1429 static struct aggr_cpu_id perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data) 1430 { 1431 struct perf_env *env = data; 1432 int cpu = perf_env__get_cpu(env, map, idx); 1433 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); 1434 1435 if (cpu != -1) 1436 id.socket = env->cpu[cpu].socket_id; 1437 1438 return id; 1439 } 1440 1441 static struct aggr_cpu_id perf_env__get_die(struct perf_cpu_map *map, int idx, void *data) 1442 { 1443 struct perf_env *env = data; 1444 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); 1445 int cpu = perf_env__get_cpu(env, map, idx); 1446 1447 if (cpu != -1) { 1448 /* 1449 * die_id is relative to socket, so start 1450 * with the socket ID and then add die to 1451 * make a unique ID. 1452 */ 1453 id.socket = env->cpu[cpu].socket_id; 1454 id.die = env->cpu[cpu].die_id; 1455 } 1456 1457 return id; 1458 } 1459 1460 static struct aggr_cpu_id perf_env__get_core(struct perf_cpu_map *map, int idx, void *data) 1461 { 1462 struct perf_env *env = data; 1463 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); 1464 int cpu = perf_env__get_cpu(env, map, idx); 1465 1466 if (cpu != -1) { 1467 /* 1468 * core_id is relative to socket and die, 1469 * we need a global id. So we set 1470 * socket, die id and core id 1471 */ 1472 id.socket = env->cpu[cpu].socket_id; 1473 id.die = env->cpu[cpu].die_id; 1474 id.core = env->cpu[cpu].core_id; 1475 } 1476 1477 return id; 1478 } 1479 1480 static struct aggr_cpu_id perf_env__get_node(struct perf_cpu_map *map, int idx, void *data) 1481 { 1482 int cpu = perf_env__get_cpu(data, map, idx); 1483 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); 1484 1485 id.node = perf_env__numa_node(data, cpu); 1486 return id; 1487 } 1488 1489 static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus, 1490 struct cpu_aggr_map **sockp) 1491 { 1492 return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env); 1493 } 1494 1495 static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus, 1496 struct cpu_aggr_map **diep) 1497 { 1498 return cpu_map__build_map(cpus, diep, perf_env__get_die, env); 1499 } 1500 1501 static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus, 1502 struct cpu_aggr_map **corep) 1503 { 1504 return cpu_map__build_map(cpus, corep, perf_env__get_core, env); 1505 } 1506 1507 static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus, 1508 struct cpu_aggr_map **nodep) 1509 { 1510 return cpu_map__build_map(cpus, nodep, perf_env__get_node, env); 1511 } 1512 1513 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1514 struct perf_cpu_map *map, int idx) 1515 { 1516 return perf_env__get_socket(map, idx, &perf_stat.session->header.env); 1517 } 1518 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1519 struct perf_cpu_map *map, int idx) 1520 { 1521 return perf_env__get_die(map, idx, &perf_stat.session->header.env); 1522 } 1523 1524 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1525 struct perf_cpu_map *map, int idx) 1526 { 1527 return perf_env__get_core(map, idx, &perf_stat.session->header.env); 1528 } 1529 1530 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1531 struct perf_cpu_map *map, int idx) 1532 { 1533 return perf_env__get_node(map, idx, &perf_stat.session->header.env); 1534 } 1535 1536 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1537 { 1538 struct perf_env *env = &st->session->header.env; 1539 1540 switch (stat_config.aggr_mode) { 1541 case AGGR_SOCKET: 1542 if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1543 perror("cannot build socket map"); 1544 return -1; 1545 } 1546 stat_config.aggr_get_id = perf_stat__get_socket_file; 1547 break; 1548 case AGGR_DIE: 1549 if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1550 perror("cannot build die map"); 1551 return -1; 1552 } 1553 stat_config.aggr_get_id = perf_stat__get_die_file; 1554 break; 1555 case AGGR_CORE: 1556 if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1557 perror("cannot build core map"); 1558 return -1; 1559 } 1560 stat_config.aggr_get_id = perf_stat__get_core_file; 1561 break; 1562 case AGGR_NODE: 1563 if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1564 perror("cannot build core map"); 1565 return -1; 1566 } 1567 stat_config.aggr_get_id = perf_stat__get_node_file; 1568 break; 1569 case AGGR_NONE: 1570 case AGGR_GLOBAL: 1571 case AGGR_THREAD: 1572 case AGGR_UNSET: 1573 default: 1574 break; 1575 } 1576 1577 return 0; 1578 } 1579 1580 /* 1581 * Add default attributes, if there were no attributes specified or 1582 * if -d/--detailed, -d -d or -d -d -d is used: 1583 */ 1584 static int add_default_attributes(void) 1585 { 1586 int err; 1587 struct perf_event_attr default_attrs0[] = { 1588 1589 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1590 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1591 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1592 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1593 1594 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1595 }; 1596 struct perf_event_attr frontend_attrs[] = { 1597 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1598 }; 1599 struct perf_event_attr backend_attrs[] = { 1600 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1601 }; 1602 struct perf_event_attr default_attrs1[] = { 1603 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1604 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1605 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1606 1607 }; 1608 1609 /* 1610 * Detailed stats (-d), covering the L1 and last level data caches: 1611 */ 1612 struct perf_event_attr detailed_attrs[] = { 1613 1614 { .type = PERF_TYPE_HW_CACHE, 1615 .config = 1616 PERF_COUNT_HW_CACHE_L1D << 0 | 1617 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1618 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1619 1620 { .type = PERF_TYPE_HW_CACHE, 1621 .config = 1622 PERF_COUNT_HW_CACHE_L1D << 0 | 1623 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1624 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1625 1626 { .type = PERF_TYPE_HW_CACHE, 1627 .config = 1628 PERF_COUNT_HW_CACHE_LL << 0 | 1629 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1630 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1631 1632 { .type = PERF_TYPE_HW_CACHE, 1633 .config = 1634 PERF_COUNT_HW_CACHE_LL << 0 | 1635 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1636 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1637 }; 1638 1639 /* 1640 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1641 */ 1642 struct perf_event_attr very_detailed_attrs[] = { 1643 1644 { .type = PERF_TYPE_HW_CACHE, 1645 .config = 1646 PERF_COUNT_HW_CACHE_L1I << 0 | 1647 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1648 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1649 1650 { .type = PERF_TYPE_HW_CACHE, 1651 .config = 1652 PERF_COUNT_HW_CACHE_L1I << 0 | 1653 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1654 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1655 1656 { .type = PERF_TYPE_HW_CACHE, 1657 .config = 1658 PERF_COUNT_HW_CACHE_DTLB << 0 | 1659 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1660 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1661 1662 { .type = PERF_TYPE_HW_CACHE, 1663 .config = 1664 PERF_COUNT_HW_CACHE_DTLB << 0 | 1665 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1666 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1667 1668 { .type = PERF_TYPE_HW_CACHE, 1669 .config = 1670 PERF_COUNT_HW_CACHE_ITLB << 0 | 1671 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1672 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1673 1674 { .type = PERF_TYPE_HW_CACHE, 1675 .config = 1676 PERF_COUNT_HW_CACHE_ITLB << 0 | 1677 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1678 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1679 1680 }; 1681 1682 /* 1683 * Very, very detailed stats (-d -d -d), adding prefetch events: 1684 */ 1685 struct perf_event_attr very_very_detailed_attrs[] = { 1686 1687 { .type = PERF_TYPE_HW_CACHE, 1688 .config = 1689 PERF_COUNT_HW_CACHE_L1D << 0 | 1690 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1691 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1692 1693 { .type = PERF_TYPE_HW_CACHE, 1694 .config = 1695 PERF_COUNT_HW_CACHE_L1D << 0 | 1696 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1697 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1698 }; 1699 struct parse_events_error errinfo; 1700 1701 /* Set attrs if no event is selected and !null_run: */ 1702 if (stat_config.null_run) 1703 return 0; 1704 1705 bzero(&errinfo, sizeof(errinfo)); 1706 if (transaction_run) { 1707 /* Handle -T as -M transaction. Once platform specific metrics 1708 * support has been added to the json files, all archictures 1709 * will use this approach. To determine transaction support 1710 * on an architecture test for such a metric name. 1711 */ 1712 if (metricgroup__has_metric("transaction")) { 1713 struct option opt = { .value = &evsel_list }; 1714 1715 return metricgroup__parse_groups(&opt, "transaction", 1716 stat_config.metric_no_group, 1717 stat_config.metric_no_merge, 1718 &stat_config.metric_events); 1719 } 1720 1721 if (pmu_have_event("cpu", "cycles-ct") && 1722 pmu_have_event("cpu", "el-start")) 1723 err = parse_events(evsel_list, transaction_attrs, 1724 &errinfo); 1725 else 1726 err = parse_events(evsel_list, 1727 transaction_limited_attrs, 1728 &errinfo); 1729 if (err) { 1730 fprintf(stderr, "Cannot set up transaction events\n"); 1731 parse_events_print_error(&errinfo, transaction_attrs); 1732 return -1; 1733 } 1734 return 0; 1735 } 1736 1737 if (smi_cost) { 1738 int smi; 1739 1740 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1741 fprintf(stderr, "freeze_on_smi is not supported.\n"); 1742 return -1; 1743 } 1744 1745 if (!smi) { 1746 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1747 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1748 return -1; 1749 } 1750 smi_reset = true; 1751 } 1752 1753 if (pmu_have_event("msr", "aperf") && 1754 pmu_have_event("msr", "smi")) { 1755 if (!force_metric_only) 1756 stat_config.metric_only = true; 1757 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1758 } else { 1759 fprintf(stderr, "To measure SMI cost, it needs " 1760 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); 1761 parse_events_print_error(&errinfo, smi_cost_attrs); 1762 return -1; 1763 } 1764 if (err) { 1765 parse_events_print_error(&errinfo, smi_cost_attrs); 1766 fprintf(stderr, "Cannot set up SMI cost events\n"); 1767 return -1; 1768 } 1769 return 0; 1770 } 1771 1772 if (topdown_run) { 1773 const char **metric_attrs = topdown_metric_attrs; 1774 unsigned int max_level = 1; 1775 char *str = NULL; 1776 bool warn = false; 1777 1778 if (!force_metric_only) 1779 stat_config.metric_only = true; 1780 1781 if (pmu_have_event("cpu", topdown_metric_L2_attrs[5])) { 1782 metric_attrs = topdown_metric_L2_attrs; 1783 max_level = 2; 1784 } 1785 1786 if (stat_config.topdown_level > max_level) { 1787 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 1788 return -1; 1789 } else if (!stat_config.topdown_level) 1790 stat_config.topdown_level = max_level; 1791 1792 if (topdown_filter_events(metric_attrs, &str, 1) < 0) { 1793 pr_err("Out of memory\n"); 1794 return -1; 1795 } 1796 if (metric_attrs[0] && str) { 1797 if (!stat_config.interval && !stat_config.metric_only) { 1798 fprintf(stat_config.output, 1799 "Topdown accuracy may decrease when measuring long periods.\n" 1800 "Please print the result regularly, e.g. -I1000\n"); 1801 } 1802 goto setup_metrics; 1803 } 1804 1805 zfree(&str); 1806 1807 if (stat_config.aggr_mode != AGGR_GLOBAL && 1808 stat_config.aggr_mode != AGGR_CORE) { 1809 pr_err("top down event configuration requires --per-core mode\n"); 1810 return -1; 1811 } 1812 stat_config.aggr_mode = AGGR_CORE; 1813 if (nr_cgroups || !target__has_cpu(&target)) { 1814 pr_err("top down event configuration requires system-wide mode (-a)\n"); 1815 return -1; 1816 } 1817 1818 if (topdown_filter_events(topdown_attrs, &str, 1819 arch_topdown_check_group(&warn)) < 0) { 1820 pr_err("Out of memory\n"); 1821 return -1; 1822 } 1823 if (topdown_attrs[0] && str) { 1824 if (warn) 1825 arch_topdown_group_warn(); 1826 setup_metrics: 1827 err = parse_events(evsel_list, str, &errinfo); 1828 if (err) { 1829 fprintf(stderr, 1830 "Cannot set up top down events %s: %d\n", 1831 str, err); 1832 parse_events_print_error(&errinfo, str); 1833 free(str); 1834 return -1; 1835 } 1836 } else { 1837 fprintf(stderr, "System does not support topdown\n"); 1838 return -1; 1839 } 1840 free(str); 1841 } 1842 1843 if (!evsel_list->core.nr_entries) { 1844 if (target__has_cpu(&target)) 1845 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1846 1847 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1848 return -1; 1849 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1850 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 1851 return -1; 1852 } 1853 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 1854 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 1855 return -1; 1856 } 1857 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 1858 return -1; 1859 1860 if (arch_evlist__add_default_attrs(evsel_list) < 0) 1861 return -1; 1862 } 1863 1864 /* Detailed events get appended to the event list: */ 1865 1866 if (detailed_run < 1) 1867 return 0; 1868 1869 /* Append detailed run extra attributes: */ 1870 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1871 return -1; 1872 1873 if (detailed_run < 2) 1874 return 0; 1875 1876 /* Append very detailed run extra attributes: */ 1877 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1878 return -1; 1879 1880 if (detailed_run < 3) 1881 return 0; 1882 1883 /* Append very, very detailed run extra attributes: */ 1884 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1885 } 1886 1887 static const char * const stat_record_usage[] = { 1888 "perf stat record [<options>]", 1889 NULL, 1890 }; 1891 1892 static void init_features(struct perf_session *session) 1893 { 1894 int feat; 1895 1896 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 1897 perf_header__set_feat(&session->header, feat); 1898 1899 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 1900 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 1901 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 1902 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 1903 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 1904 } 1905 1906 static int __cmd_record(int argc, const char **argv) 1907 { 1908 struct perf_session *session; 1909 struct perf_data *data = &perf_stat.data; 1910 1911 argc = parse_options(argc, argv, stat_options, stat_record_usage, 1912 PARSE_OPT_STOP_AT_NON_OPTION); 1913 1914 if (output_name) 1915 data->path = output_name; 1916 1917 if (stat_config.run_count != 1 || forever) { 1918 pr_err("Cannot use -r option with perf stat record.\n"); 1919 return -1; 1920 } 1921 1922 session = perf_session__new(data, false, NULL); 1923 if (IS_ERR(session)) { 1924 pr_err("Perf session creation failed\n"); 1925 return PTR_ERR(session); 1926 } 1927 1928 init_features(session); 1929 1930 session->evlist = evsel_list; 1931 perf_stat.session = session; 1932 perf_stat.record = true; 1933 return argc; 1934 } 1935 1936 static int process_stat_round_event(struct perf_session *session, 1937 union perf_event *event) 1938 { 1939 struct perf_record_stat_round *stat_round = &event->stat_round; 1940 struct evsel *counter; 1941 struct timespec tsh, *ts = NULL; 1942 const char **argv = session->header.env.cmdline_argv; 1943 int argc = session->header.env.nr_cmdline; 1944 1945 evlist__for_each_entry(evsel_list, counter) 1946 perf_stat_process_counter(&stat_config, counter); 1947 1948 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 1949 update_stats(&walltime_nsecs_stats, stat_round->time); 1950 1951 if (stat_config.interval && stat_round->time) { 1952 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 1953 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 1954 ts = &tsh; 1955 } 1956 1957 print_counters(ts, argc, argv); 1958 return 0; 1959 } 1960 1961 static 1962 int process_stat_config_event(struct perf_session *session, 1963 union perf_event *event) 1964 { 1965 struct perf_tool *tool = session->tool; 1966 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 1967 1968 perf_event__read_stat_config(&stat_config, &event->stat_config); 1969 1970 if (perf_cpu_map__empty(st->cpus)) { 1971 if (st->aggr_mode != AGGR_UNSET) 1972 pr_warning("warning: processing task data, aggregation mode not set\n"); 1973 return 0; 1974 } 1975 1976 if (st->aggr_mode != AGGR_UNSET) 1977 stat_config.aggr_mode = st->aggr_mode; 1978 1979 if (perf_stat.data.is_pipe) 1980 perf_stat_init_aggr_mode(); 1981 else 1982 perf_stat_init_aggr_mode_file(st); 1983 1984 return 0; 1985 } 1986 1987 static int set_maps(struct perf_stat *st) 1988 { 1989 if (!st->cpus || !st->threads) 1990 return 0; 1991 1992 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 1993 return -EINVAL; 1994 1995 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 1996 1997 if (evlist__alloc_stats(evsel_list, true)) 1998 return -ENOMEM; 1999 2000 st->maps_allocated = true; 2001 return 0; 2002 } 2003 2004 static 2005 int process_thread_map_event(struct perf_session *session, 2006 union perf_event *event) 2007 { 2008 struct perf_tool *tool = session->tool; 2009 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2010 2011 if (st->threads) { 2012 pr_warning("Extra thread map event, ignoring.\n"); 2013 return 0; 2014 } 2015 2016 st->threads = thread_map__new_event(&event->thread_map); 2017 if (!st->threads) 2018 return -ENOMEM; 2019 2020 return set_maps(st); 2021 } 2022 2023 static 2024 int process_cpu_map_event(struct perf_session *session, 2025 union perf_event *event) 2026 { 2027 struct perf_tool *tool = session->tool; 2028 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2029 struct perf_cpu_map *cpus; 2030 2031 if (st->cpus) { 2032 pr_warning("Extra cpu map event, ignoring.\n"); 2033 return 0; 2034 } 2035 2036 cpus = cpu_map__new_data(&event->cpu_map.data); 2037 if (!cpus) 2038 return -ENOMEM; 2039 2040 st->cpus = cpus; 2041 return set_maps(st); 2042 } 2043 2044 static const char * const stat_report_usage[] = { 2045 "perf stat report [<options>]", 2046 NULL, 2047 }; 2048 2049 static struct perf_stat perf_stat = { 2050 .tool = { 2051 .attr = perf_event__process_attr, 2052 .event_update = perf_event__process_event_update, 2053 .thread_map = process_thread_map_event, 2054 .cpu_map = process_cpu_map_event, 2055 .stat_config = process_stat_config_event, 2056 .stat = perf_event__process_stat_event, 2057 .stat_round = process_stat_round_event, 2058 }, 2059 .aggr_mode = AGGR_UNSET, 2060 }; 2061 2062 static int __cmd_report(int argc, const char **argv) 2063 { 2064 struct perf_session *session; 2065 const struct option options[] = { 2066 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2067 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2068 "aggregate counts per processor socket", AGGR_SOCKET), 2069 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2070 "aggregate counts per processor die", AGGR_DIE), 2071 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2072 "aggregate counts per physical processor core", AGGR_CORE), 2073 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2074 "aggregate counts per numa node", AGGR_NODE), 2075 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2076 "disable CPU count aggregation", AGGR_NONE), 2077 OPT_END() 2078 }; 2079 struct stat st; 2080 int ret; 2081 2082 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2083 2084 if (!input_name || !strlen(input_name)) { 2085 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2086 input_name = "-"; 2087 else 2088 input_name = "perf.data"; 2089 } 2090 2091 perf_stat.data.path = input_name; 2092 perf_stat.data.mode = PERF_DATA_MODE_READ; 2093 2094 session = perf_session__new(&perf_stat.data, false, &perf_stat.tool); 2095 if (IS_ERR(session)) 2096 return PTR_ERR(session); 2097 2098 perf_stat.session = session; 2099 stat_config.output = stderr; 2100 evsel_list = session->evlist; 2101 2102 ret = perf_session__process_events(session); 2103 if (ret) 2104 return ret; 2105 2106 perf_session__delete(session); 2107 return 0; 2108 } 2109 2110 static void setup_system_wide(int forks) 2111 { 2112 /* 2113 * Make system wide (-a) the default target if 2114 * no target was specified and one of following 2115 * conditions is met: 2116 * 2117 * - there's no workload specified 2118 * - there is workload specified but all requested 2119 * events are system wide events 2120 */ 2121 if (!target__none(&target)) 2122 return; 2123 2124 if (!forks) 2125 target.system_wide = true; 2126 else { 2127 struct evsel *counter; 2128 2129 evlist__for_each_entry(evsel_list, counter) { 2130 if (!counter->core.system_wide && 2131 strcmp(counter->name, "duration_time")) { 2132 return; 2133 } 2134 } 2135 2136 if (evsel_list->core.nr_entries) 2137 target.system_wide = true; 2138 } 2139 } 2140 2141 int cmd_stat(int argc, const char **argv) 2142 { 2143 const char * const stat_usage[] = { 2144 "perf stat [<options>] [<command>]", 2145 NULL 2146 }; 2147 int status = -EINVAL, run_idx, err; 2148 const char *mode; 2149 FILE *output = stderr; 2150 unsigned int interval, timeout; 2151 const char * const stat_subcommands[] = { "record", "report" }; 2152 char errbuf[BUFSIZ]; 2153 2154 setlocale(LC_ALL, ""); 2155 2156 evsel_list = evlist__new(); 2157 if (evsel_list == NULL) 2158 return -ENOMEM; 2159 2160 parse_events__shrink_config_terms(); 2161 2162 /* String-parsing callback-based options would segfault when negated */ 2163 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2164 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2165 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2166 2167 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2168 (const char **) stat_usage, 2169 PARSE_OPT_STOP_AT_NON_OPTION); 2170 perf_stat__collect_metric_expr(evsel_list); 2171 perf_stat__init_shadow_stats(); 2172 2173 if (stat_config.csv_sep) { 2174 stat_config.csv_output = true; 2175 if (!strcmp(stat_config.csv_sep, "\\t")) 2176 stat_config.csv_sep = "\t"; 2177 } else 2178 stat_config.csv_sep = DEFAULT_SEPARATOR; 2179 2180 if (argc && !strncmp(argv[0], "rec", 3)) { 2181 argc = __cmd_record(argc, argv); 2182 if (argc < 0) 2183 return -1; 2184 } else if (argc && !strncmp(argv[0], "rep", 3)) 2185 return __cmd_report(argc, argv); 2186 2187 interval = stat_config.interval; 2188 timeout = stat_config.timeout; 2189 2190 /* 2191 * For record command the -o is already taken care of. 2192 */ 2193 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2194 output = NULL; 2195 2196 if (output_name && output_fd) { 2197 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2198 parse_options_usage(stat_usage, stat_options, "o", 1); 2199 parse_options_usage(NULL, stat_options, "log-fd", 0); 2200 goto out; 2201 } 2202 2203 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2204 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2205 goto out; 2206 } 2207 2208 if (stat_config.metric_only && stat_config.run_count > 1) { 2209 fprintf(stderr, "--metric-only is not supported with -r\n"); 2210 goto out; 2211 } 2212 2213 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2214 fprintf(stderr, "--table is only supported with -r\n"); 2215 parse_options_usage(stat_usage, stat_options, "r", 1); 2216 parse_options_usage(NULL, stat_options, "table", 0); 2217 goto out; 2218 } 2219 2220 if (output_fd < 0) { 2221 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2222 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2223 goto out; 2224 } 2225 2226 if (!output && !stat_config.quiet) { 2227 struct timespec tm; 2228 mode = append_file ? "a" : "w"; 2229 2230 output = fopen(output_name, mode); 2231 if (!output) { 2232 perror("failed to create output file"); 2233 return -1; 2234 } 2235 clock_gettime(CLOCK_REALTIME, &tm); 2236 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2237 } else if (output_fd > 0) { 2238 mode = append_file ? "a" : "w"; 2239 output = fdopen(output_fd, mode); 2240 if (!output) { 2241 perror("Failed opening logfd"); 2242 return -errno; 2243 } 2244 } 2245 2246 stat_config.output = output; 2247 2248 /* 2249 * let the spreadsheet do the pretty-printing 2250 */ 2251 if (stat_config.csv_output) { 2252 /* User explicitly passed -B? */ 2253 if (big_num_opt == 1) { 2254 fprintf(stderr, "-B option not supported with -x\n"); 2255 parse_options_usage(stat_usage, stat_options, "B", 1); 2256 parse_options_usage(NULL, stat_options, "x", 1); 2257 goto out; 2258 } else /* Nope, so disable big number formatting */ 2259 stat_config.big_num = false; 2260 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2261 stat_config.big_num = false; 2262 2263 err = target__validate(&target); 2264 if (err) { 2265 target__strerror(&target, err, errbuf, BUFSIZ); 2266 pr_warning("%s\n", errbuf); 2267 } 2268 2269 setup_system_wide(argc); 2270 2271 /* 2272 * Display user/system times only for single 2273 * run and when there's specified tracee. 2274 */ 2275 if ((stat_config.run_count == 1) && target__none(&target)) 2276 stat_config.ru_display = true; 2277 2278 if (stat_config.run_count < 0) { 2279 pr_err("Run count must be a positive number\n"); 2280 parse_options_usage(stat_usage, stat_options, "r", 1); 2281 goto out; 2282 } else if (stat_config.run_count == 0) { 2283 forever = true; 2284 stat_config.run_count = 1; 2285 } 2286 2287 if (stat_config.walltime_run_table) { 2288 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2289 if (!stat_config.walltime_run) { 2290 pr_err("failed to setup -r option"); 2291 goto out; 2292 } 2293 } 2294 2295 if ((stat_config.aggr_mode == AGGR_THREAD) && 2296 !target__has_task(&target)) { 2297 if (!target.system_wide || target.cpu_list) { 2298 fprintf(stderr, "The --per-thread option is only " 2299 "available when monitoring via -p -t -a " 2300 "options or only --per-thread.\n"); 2301 parse_options_usage(NULL, stat_options, "p", 1); 2302 parse_options_usage(NULL, stat_options, "t", 1); 2303 goto out; 2304 } 2305 } 2306 2307 /* 2308 * no_aggr, cgroup are for system-wide only 2309 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2310 */ 2311 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2312 stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) && 2313 !target__has_cpu(&target)) { 2314 fprintf(stderr, "both cgroup and no-aggregation " 2315 "modes only available in system-wide mode\n"); 2316 2317 parse_options_usage(stat_usage, stat_options, "G", 1); 2318 parse_options_usage(NULL, stat_options, "A", 1); 2319 parse_options_usage(NULL, stat_options, "a", 1); 2320 goto out; 2321 } 2322 2323 if (add_default_attributes()) 2324 goto out; 2325 2326 if (stat_config.cgroup_list) { 2327 if (nr_cgroups > 0) { 2328 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2329 parse_options_usage(stat_usage, stat_options, "G", 1); 2330 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2331 goto out; 2332 } 2333 2334 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, 2335 &stat_config.metric_events, true) < 0) { 2336 parse_options_usage(stat_usage, stat_options, 2337 "for-each-cgroup", 0); 2338 goto out; 2339 } 2340 } 2341 2342 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2343 target.per_thread = true; 2344 2345 if (evlist__create_maps(evsel_list, &target) < 0) { 2346 if (target__has_task(&target)) { 2347 pr_err("Problems finding threads of monitor\n"); 2348 parse_options_usage(stat_usage, stat_options, "p", 1); 2349 parse_options_usage(NULL, stat_options, "t", 1); 2350 } else if (target__has_cpu(&target)) { 2351 perror("failed to parse CPUs map"); 2352 parse_options_usage(stat_usage, stat_options, "C", 1); 2353 parse_options_usage(NULL, stat_options, "a", 1); 2354 } 2355 goto out; 2356 } 2357 2358 evlist__check_cpu_maps(evsel_list); 2359 2360 /* 2361 * Initialize thread_map with comm names, 2362 * so we could print it out on output. 2363 */ 2364 if (stat_config.aggr_mode == AGGR_THREAD) { 2365 thread_map__read_comms(evsel_list->core.threads); 2366 if (target.system_wide) { 2367 if (runtime_stat_new(&stat_config, 2368 perf_thread_map__nr(evsel_list->core.threads))) { 2369 goto out; 2370 } 2371 } 2372 } 2373 2374 if (stat_config.aggr_mode == AGGR_NODE) 2375 cpu__setup_cpunode_map(); 2376 2377 if (stat_config.times && interval) 2378 interval_count = true; 2379 else if (stat_config.times && !interval) { 2380 pr_err("interval-count option should be used together with " 2381 "interval-print.\n"); 2382 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2383 parse_options_usage(stat_usage, stat_options, "I", 1); 2384 goto out; 2385 } 2386 2387 if (timeout && timeout < 100) { 2388 if (timeout < 10) { 2389 pr_err("timeout must be >= 10ms.\n"); 2390 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2391 goto out; 2392 } else 2393 pr_warning("timeout < 100ms. " 2394 "The overhead percentage could be high in some cases. " 2395 "Please proceed with caution.\n"); 2396 } 2397 if (timeout && interval) { 2398 pr_err("timeout option is not supported with interval-print.\n"); 2399 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2400 parse_options_usage(stat_usage, stat_options, "I", 1); 2401 goto out; 2402 } 2403 2404 if (evlist__alloc_stats(evsel_list, interval)) 2405 goto out; 2406 2407 if (perf_stat_init_aggr_mode()) 2408 goto out; 2409 2410 /* 2411 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2412 * while avoiding that older tools show confusing messages. 2413 * 2414 * However for pipe sessions we need to keep it zero, 2415 * because script's perf_evsel__check_attr is triggered 2416 * by attr->sample_type != 0, and we can't run it on 2417 * stat sessions. 2418 */ 2419 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2420 2421 /* 2422 * We dont want to block the signals - that would cause 2423 * child tasks to inherit that and Ctrl-C would not work. 2424 * What we want is for Ctrl-C to work in the exec()-ed 2425 * task, but being ignored by perf stat itself: 2426 */ 2427 atexit(sig_atexit); 2428 if (!forever) 2429 signal(SIGINT, skip_signal); 2430 signal(SIGCHLD, skip_signal); 2431 signal(SIGALRM, skip_signal); 2432 signal(SIGABRT, skip_signal); 2433 2434 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2435 goto out; 2436 2437 status = 0; 2438 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2439 if (stat_config.run_count != 1 && verbose > 0) 2440 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2441 run_idx + 1); 2442 2443 if (run_idx != 0) 2444 evlist__reset_prev_raw_counts(evsel_list); 2445 2446 status = run_perf_stat(argc, argv, run_idx); 2447 if (forever && status != -1 && !interval) { 2448 print_counters(NULL, argc, argv); 2449 perf_stat__reset_stats(); 2450 } 2451 } 2452 2453 if (!forever && status != -1 && (!interval || stat_config.summary)) 2454 print_counters(NULL, argc, argv); 2455 2456 evlist__finalize_ctlfd(evsel_list); 2457 2458 if (STAT_RECORD) { 2459 /* 2460 * We synthesize the kernel mmap record just so that older tools 2461 * don't emit warnings about not being able to resolve symbols 2462 * due to /proc/sys/kernel/kptr_restrict settings and instear provide 2463 * a saner message about no samples being in the perf.data file. 2464 * 2465 * This also serves to suppress a warning about f_header.data.size == 0 2466 * in header.c at the moment 'perf stat record' gets introduced, which 2467 * is not really needed once we start adding the stat specific PERF_RECORD_ 2468 * records, but the need to suppress the kptr_restrict messages in older 2469 * tools remain -acme 2470 */ 2471 int fd = perf_data__fd(&perf_stat.data); 2472 2473 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2474 process_synthesized_event, 2475 &perf_stat.session->machines.host); 2476 if (err) { 2477 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2478 "older tools may produce warnings about this file\n."); 2479 } 2480 2481 if (!interval) { 2482 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2483 pr_err("failed to write stat round event\n"); 2484 } 2485 2486 if (!perf_stat.data.is_pipe) { 2487 perf_stat.session->header.data_size += perf_stat.bytes_written; 2488 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2489 } 2490 2491 evlist__close(evsel_list); 2492 perf_session__delete(perf_stat.session); 2493 } 2494 2495 perf_stat__exit_aggr_mode(); 2496 evlist__free_stats(evsel_list); 2497 out: 2498 zfree(&stat_config.walltime_run); 2499 2500 if (smi_cost && smi_reset) 2501 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2502 2503 evlist__delete(evsel_list); 2504 2505 metricgroup__rblist_exit(&stat_config.metric_events); 2506 runtime_stat_delete(&stat_config); 2507 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 2508 2509 return status; 2510 } 2511