1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "perf.h" 45 #include "util/cgroup.h" 46 #include <subcmd/parse-options.h> 47 #include "util/parse-events.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evlist-hybrid.h" 52 #include "util/evsel.h" 53 #include "util/debug.h" 54 #include "util/color.h" 55 #include "util/stat.h" 56 #include "util/header.h" 57 #include "util/cpumap.h" 58 #include "util/thread_map.h" 59 #include "util/counts.h" 60 #include "util/topdown.h" 61 #include "util/session.h" 62 #include "util/tool.h" 63 #include "util/string2.h" 64 #include "util/metricgroup.h" 65 #include "util/synthetic-events.h" 66 #include "util/target.h" 67 #include "util/time-utils.h" 68 #include "util/top.h" 69 #include "util/affinity.h" 70 #include "util/pfm.h" 71 #include "util/bpf_counter.h" 72 #include "util/iostat.h" 73 #include "util/pmu-hybrid.h" 74 #include "asm/bug.h" 75 76 #include <linux/time64.h> 77 #include <linux/zalloc.h> 78 #include <api/fs/fs.h> 79 #include <errno.h> 80 #include <signal.h> 81 #include <stdlib.h> 82 #include <sys/prctl.h> 83 #include <inttypes.h> 84 #include <locale.h> 85 #include <math.h> 86 #include <sys/types.h> 87 #include <sys/stat.h> 88 #include <sys/wait.h> 89 #include <unistd.h> 90 #include <sys/time.h> 91 #include <sys/resource.h> 92 #include <linux/err.h> 93 94 #include <linux/ctype.h> 95 #include <perf/evlist.h> 96 97 #define DEFAULT_SEPARATOR " " 98 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 99 100 static void print_counters(struct timespec *ts, int argc, const char **argv); 101 102 /* Default events used for perf stat -T */ 103 static const char *transaction_attrs = { 104 "task-clock," 105 "{" 106 "instructions," 107 "cycles," 108 "cpu/cycles-t/," 109 "cpu/tx-start/," 110 "cpu/el-start/," 111 "cpu/cycles-ct/" 112 "}" 113 }; 114 115 /* More limited version when the CPU does not have all events. */ 116 static const char * transaction_limited_attrs = { 117 "task-clock," 118 "{" 119 "instructions," 120 "cycles," 121 "cpu/cycles-t/," 122 "cpu/tx-start/" 123 "}" 124 }; 125 126 static const char * topdown_attrs[] = { 127 "topdown-total-slots", 128 "topdown-slots-retired", 129 "topdown-recovery-bubbles", 130 "topdown-fetch-bubbles", 131 "topdown-slots-issued", 132 NULL, 133 }; 134 135 static const char *topdown_metric_attrs[] = { 136 "slots", 137 "topdown-retiring", 138 "topdown-bad-spec", 139 "topdown-fe-bound", 140 "topdown-be-bound", 141 NULL, 142 }; 143 144 static const char *topdown_metric_L2_attrs[] = { 145 "slots", 146 "topdown-retiring", 147 "topdown-bad-spec", 148 "topdown-fe-bound", 149 "topdown-be-bound", 150 "topdown-heavy-ops", 151 "topdown-br-mispredict", 152 "topdown-fetch-lat", 153 "topdown-mem-bound", 154 NULL, 155 }; 156 157 #define TOPDOWN_MAX_LEVEL 2 158 159 static const char *smi_cost_attrs = { 160 "{" 161 "msr/aperf/," 162 "msr/smi/," 163 "cycles" 164 "}" 165 }; 166 167 static struct evlist *evsel_list; 168 static bool all_counters_use_bpf = true; 169 170 static struct target target = { 171 .uid = UINT_MAX, 172 }; 173 174 #define METRIC_ONLY_LEN 20 175 176 static volatile pid_t child_pid = -1; 177 static int detailed_run = 0; 178 static bool transaction_run; 179 static bool topdown_run = false; 180 static bool smi_cost = false; 181 static bool smi_reset = false; 182 static int big_num_opt = -1; 183 static bool group = false; 184 static const char *pre_cmd = NULL; 185 static const char *post_cmd = NULL; 186 static bool sync_run = false; 187 static bool forever = false; 188 static bool force_metric_only = false; 189 static struct timespec ref_time; 190 static bool append_file; 191 static bool interval_count; 192 static const char *output_name; 193 static int output_fd; 194 195 struct perf_stat { 196 bool record; 197 struct perf_data data; 198 struct perf_session *session; 199 u64 bytes_written; 200 struct perf_tool tool; 201 bool maps_allocated; 202 struct perf_cpu_map *cpus; 203 struct perf_thread_map *threads; 204 enum aggr_mode aggr_mode; 205 }; 206 207 static struct perf_stat perf_stat; 208 #define STAT_RECORD perf_stat.record 209 210 static volatile int done = 0; 211 212 static struct perf_stat_config stat_config = { 213 .aggr_mode = AGGR_GLOBAL, 214 .scale = true, 215 .unit_width = 4, /* strlen("unit") */ 216 .run_count = 1, 217 .metric_only_len = METRIC_ONLY_LEN, 218 .walltime_nsecs_stats = &walltime_nsecs_stats, 219 .big_num = true, 220 .ctl_fd = -1, 221 .ctl_fd_ack = -1, 222 .iostat_run = false, 223 }; 224 225 static bool cpus_map_matched(struct evsel *a, struct evsel *b) 226 { 227 if (!a->core.cpus && !b->core.cpus) 228 return true; 229 230 if (!a->core.cpus || !b->core.cpus) 231 return false; 232 233 if (a->core.cpus->nr != b->core.cpus->nr) 234 return false; 235 236 for (int i = 0; i < a->core.cpus->nr; i++) { 237 if (a->core.cpus->map[i] != b->core.cpus->map[i]) 238 return false; 239 } 240 241 return true; 242 } 243 244 static void evlist__check_cpu_maps(struct evlist *evlist) 245 { 246 struct evsel *evsel, *pos, *leader; 247 char buf[1024]; 248 249 if (evlist__has_hybrid(evlist)) 250 evlist__warn_hybrid_group(evlist); 251 252 evlist__for_each_entry(evlist, evsel) { 253 leader = evsel__leader(evsel); 254 255 /* Check that leader matches cpus with each member. */ 256 if (leader == evsel) 257 continue; 258 if (cpus_map_matched(leader, evsel)) 259 continue; 260 261 /* If there's mismatch disable the group and warn user. */ 262 WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n"); 263 evsel__group_desc(leader, buf, sizeof(buf)); 264 pr_warning(" %s\n", buf); 265 266 if (verbose) { 267 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 268 pr_warning(" %s: %s\n", leader->name, buf); 269 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 270 pr_warning(" %s: %s\n", evsel->name, buf); 271 } 272 273 for_each_group_evsel(pos, leader) { 274 evsel__set_leader(pos, pos); 275 pos->core.nr_members = 0; 276 } 277 evsel->core.leader->nr_members = 0; 278 } 279 } 280 281 static inline void diff_timespec(struct timespec *r, struct timespec *a, 282 struct timespec *b) 283 { 284 r->tv_sec = a->tv_sec - b->tv_sec; 285 if (a->tv_nsec < b->tv_nsec) { 286 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 287 r->tv_sec--; 288 } else { 289 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 290 } 291 } 292 293 static void perf_stat__reset_stats(void) 294 { 295 int i; 296 297 evlist__reset_stats(evsel_list); 298 perf_stat__reset_shadow_stats(); 299 300 for (i = 0; i < stat_config.stats_num; i++) 301 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); 302 } 303 304 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 305 union perf_event *event, 306 struct perf_sample *sample __maybe_unused, 307 struct machine *machine __maybe_unused) 308 { 309 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 310 pr_err("failed to write perf data, error: %m\n"); 311 return -1; 312 } 313 314 perf_stat.bytes_written += event->header.size; 315 return 0; 316 } 317 318 static int write_stat_round_event(u64 tm, u64 type) 319 { 320 return perf_event__synthesize_stat_round(NULL, tm, type, 321 process_synthesized_event, 322 NULL); 323 } 324 325 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 326 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 327 328 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 329 330 static int evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread, 331 struct perf_counts_values *count) 332 { 333 struct perf_sample_id *sid = SID(counter, cpu, thread); 334 335 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 336 process_synthesized_event, NULL); 337 } 338 339 static int read_single_counter(struct evsel *counter, int cpu, 340 int thread, struct timespec *rs) 341 { 342 if (counter->tool_event == PERF_TOOL_DURATION_TIME) { 343 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 344 struct perf_counts_values *count = 345 perf_counts(counter->counts, cpu, thread); 346 count->ena = count->run = val; 347 count->val = val; 348 return 0; 349 } 350 return evsel__read_counter(counter, cpu, thread); 351 } 352 353 /* 354 * Read out the results of a single counter: 355 * do not aggregate counts across CPUs in system-wide mode 356 */ 357 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) 358 { 359 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 360 int thread; 361 362 if (!counter->supported) 363 return -ENOENT; 364 365 if (counter->core.system_wide) 366 nthreads = 1; 367 368 for (thread = 0; thread < nthreads; thread++) { 369 struct perf_counts_values *count; 370 371 count = perf_counts(counter->counts, cpu, thread); 372 373 /* 374 * The leader's group read loads data into its group members 375 * (via evsel__read_counter()) and sets their count->loaded. 376 */ 377 if (!perf_counts__is_loaded(counter->counts, cpu, thread) && 378 read_single_counter(counter, cpu, thread, rs)) { 379 counter->counts->scaled = -1; 380 perf_counts(counter->counts, cpu, thread)->ena = 0; 381 perf_counts(counter->counts, cpu, thread)->run = 0; 382 return -1; 383 } 384 385 perf_counts__set_loaded(counter->counts, cpu, thread, false); 386 387 if (STAT_RECORD) { 388 if (evsel__write_stat_event(counter, cpu, thread, count)) { 389 pr_err("failed to write stat event\n"); 390 return -1; 391 } 392 } 393 394 if (verbose > 1) { 395 fprintf(stat_config.output, 396 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 397 evsel__name(counter), 398 cpu, 399 count->val, count->ena, count->run); 400 } 401 } 402 403 return 0; 404 } 405 406 static int read_affinity_counters(struct timespec *rs) 407 { 408 struct evsel *counter; 409 struct affinity affinity; 410 int i, ncpus, cpu; 411 412 if (all_counters_use_bpf) 413 return 0; 414 415 if (affinity__setup(&affinity) < 0) 416 return -1; 417 418 ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); 419 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 420 ncpus = 1; 421 evlist__for_each_cpu(evsel_list, i, cpu) { 422 if (i >= ncpus) 423 break; 424 affinity__set(&affinity, cpu); 425 426 evlist__for_each_entry(evsel_list, counter) { 427 if (evsel__cpu_iter_skip(counter, cpu)) 428 continue; 429 if (evsel__is_bpf(counter)) 430 continue; 431 if (!counter->err) { 432 counter->err = read_counter_cpu(counter, rs, 433 counter->cpu_iter - 1); 434 } 435 } 436 } 437 affinity__cleanup(&affinity); 438 return 0; 439 } 440 441 static int read_bpf_map_counters(void) 442 { 443 struct evsel *counter; 444 int err; 445 446 evlist__for_each_entry(evsel_list, counter) { 447 if (!evsel__is_bpf(counter)) 448 continue; 449 450 err = bpf_counter__read(counter); 451 if (err) 452 return err; 453 } 454 return 0; 455 } 456 457 static void read_counters(struct timespec *rs) 458 { 459 struct evsel *counter; 460 461 if (!stat_config.stop_read_counter) { 462 if (read_bpf_map_counters() || 463 read_affinity_counters(rs)) 464 return; 465 } 466 467 evlist__for_each_entry(evsel_list, counter) { 468 if (counter->err) 469 pr_debug("failed to read counter %s\n", counter->name); 470 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 471 pr_warning("failed to process counter %s\n", counter->name); 472 counter->err = 0; 473 } 474 } 475 476 static int runtime_stat_new(struct perf_stat_config *config, int nthreads) 477 { 478 int i; 479 480 config->stats = calloc(nthreads, sizeof(struct runtime_stat)); 481 if (!config->stats) 482 return -1; 483 484 config->stats_num = nthreads; 485 486 for (i = 0; i < nthreads; i++) 487 runtime_stat__init(&config->stats[i]); 488 489 return 0; 490 } 491 492 static void runtime_stat_delete(struct perf_stat_config *config) 493 { 494 int i; 495 496 if (!config->stats) 497 return; 498 499 for (i = 0; i < config->stats_num; i++) 500 runtime_stat__exit(&config->stats[i]); 501 502 zfree(&config->stats); 503 } 504 505 static void runtime_stat_reset(struct perf_stat_config *config) 506 { 507 int i; 508 509 if (!config->stats) 510 return; 511 512 for (i = 0; i < config->stats_num; i++) 513 perf_stat__reset_shadow_per_stat(&config->stats[i]); 514 } 515 516 static void process_interval(void) 517 { 518 struct timespec ts, rs; 519 520 clock_gettime(CLOCK_MONOTONIC, &ts); 521 diff_timespec(&rs, &ts, &ref_time); 522 523 perf_stat__reset_shadow_per_stat(&rt_stat); 524 runtime_stat_reset(&stat_config); 525 read_counters(&rs); 526 527 if (STAT_RECORD) { 528 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 529 pr_err("failed to write stat round event\n"); 530 } 531 532 init_stats(&walltime_nsecs_stats); 533 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 534 print_counters(&rs, 0, NULL); 535 } 536 537 static bool handle_interval(unsigned int interval, int *times) 538 { 539 if (interval) { 540 process_interval(); 541 if (interval_count && !(--(*times))) 542 return true; 543 } 544 return false; 545 } 546 547 static int enable_counters(void) 548 { 549 struct evsel *evsel; 550 int err; 551 552 evlist__for_each_entry(evsel_list, evsel) { 553 if (!evsel__is_bpf(evsel)) 554 continue; 555 556 err = bpf_counter__enable(evsel); 557 if (err) 558 return err; 559 } 560 561 if (stat_config.initial_delay < 0) { 562 pr_info(EVLIST_DISABLED_MSG); 563 return 0; 564 } 565 566 if (stat_config.initial_delay > 0) { 567 pr_info(EVLIST_DISABLED_MSG); 568 usleep(stat_config.initial_delay * USEC_PER_MSEC); 569 } 570 571 /* 572 * We need to enable counters only if: 573 * - we don't have tracee (attaching to task or cpu) 574 * - we have initial delay configured 575 */ 576 if (!target__none(&target) || stat_config.initial_delay) { 577 if (!all_counters_use_bpf) 578 evlist__enable(evsel_list); 579 if (stat_config.initial_delay > 0) 580 pr_info(EVLIST_ENABLED_MSG); 581 } 582 return 0; 583 } 584 585 static void disable_counters(void) 586 { 587 struct evsel *counter; 588 589 /* 590 * If we don't have tracee (attaching to task or cpu), counters may 591 * still be running. To get accurate group ratios, we must stop groups 592 * from counting before reading their constituent counters. 593 */ 594 if (!target__none(&target)) { 595 evlist__for_each_entry(evsel_list, counter) 596 bpf_counter__disable(counter); 597 if (!all_counters_use_bpf) 598 evlist__disable(evsel_list); 599 } 600 } 601 602 static volatile int workload_exec_errno; 603 604 /* 605 * evlist__prepare_workload will send a SIGUSR1 606 * if the fork fails, since we asked by setting its 607 * want_signal to true. 608 */ 609 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 610 void *ucontext __maybe_unused) 611 { 612 workload_exec_errno = info->si_value.sival_int; 613 } 614 615 static bool evsel__should_store_id(struct evsel *counter) 616 { 617 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 618 } 619 620 static bool is_target_alive(struct target *_target, 621 struct perf_thread_map *threads) 622 { 623 struct stat st; 624 int i; 625 626 if (!target__has_task(_target)) 627 return true; 628 629 for (i = 0; i < threads->nr; i++) { 630 char path[PATH_MAX]; 631 632 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 633 threads->map[i].pid); 634 635 if (!stat(path, &st)) 636 return true; 637 } 638 639 return false; 640 } 641 642 static void process_evlist(struct evlist *evlist, unsigned int interval) 643 { 644 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 645 646 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 647 switch (cmd) { 648 case EVLIST_CTL_CMD_ENABLE: 649 if (interval) 650 process_interval(); 651 break; 652 case EVLIST_CTL_CMD_DISABLE: 653 if (interval) 654 process_interval(); 655 break; 656 case EVLIST_CTL_CMD_SNAPSHOT: 657 case EVLIST_CTL_CMD_ACK: 658 case EVLIST_CTL_CMD_UNSUPPORTED: 659 case EVLIST_CTL_CMD_EVLIST: 660 case EVLIST_CTL_CMD_STOP: 661 case EVLIST_CTL_CMD_PING: 662 default: 663 break; 664 } 665 } 666 } 667 668 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 669 int *time_to_sleep) 670 { 671 int tts = *time_to_sleep; 672 struct timespec time_diff; 673 674 diff_timespec(&time_diff, time_stop, time_start); 675 676 tts -= time_diff.tv_sec * MSEC_PER_SEC + 677 time_diff.tv_nsec / NSEC_PER_MSEC; 678 679 if (tts < 0) 680 tts = 0; 681 682 *time_to_sleep = tts; 683 } 684 685 static int dispatch_events(bool forks, int timeout, int interval, int *times) 686 { 687 int child_exited = 0, status = 0; 688 int time_to_sleep, sleep_time; 689 struct timespec time_start, time_stop; 690 691 if (interval) 692 sleep_time = interval; 693 else if (timeout) 694 sleep_time = timeout; 695 else 696 sleep_time = 1000; 697 698 time_to_sleep = sleep_time; 699 700 while (!done) { 701 if (forks) 702 child_exited = waitpid(child_pid, &status, WNOHANG); 703 else 704 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 705 706 if (child_exited) 707 break; 708 709 clock_gettime(CLOCK_MONOTONIC, &time_start); 710 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 711 if (timeout || handle_interval(interval, times)) 712 break; 713 time_to_sleep = sleep_time; 714 } else { /* fd revent */ 715 process_evlist(evsel_list, interval); 716 clock_gettime(CLOCK_MONOTONIC, &time_stop); 717 compute_tts(&time_start, &time_stop, &time_to_sleep); 718 } 719 } 720 721 return status; 722 } 723 724 enum counter_recovery { 725 COUNTER_SKIP, 726 COUNTER_RETRY, 727 COUNTER_FATAL, 728 }; 729 730 static enum counter_recovery stat_handle_error(struct evsel *counter) 731 { 732 char msg[BUFSIZ]; 733 /* 734 * PPC returns ENXIO for HW counters until 2.6.37 735 * (behavior changed with commit b0a873e). 736 */ 737 if (errno == EINVAL || errno == ENOSYS || 738 errno == ENOENT || errno == EOPNOTSUPP || 739 errno == ENXIO) { 740 if (verbose > 0) 741 ui__warning("%s event is not supported by the kernel.\n", 742 evsel__name(counter)); 743 counter->supported = false; 744 /* 745 * errored is a sticky flag that means one of the counter's 746 * cpu event had a problem and needs to be reexamined. 747 */ 748 counter->errored = true; 749 750 if ((evsel__leader(counter) != counter) || 751 !(counter->core.leader->nr_members > 1)) 752 return COUNTER_SKIP; 753 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 754 if (verbose > 0) 755 ui__warning("%s\n", msg); 756 return COUNTER_RETRY; 757 } else if (target__has_per_thread(&target) && 758 evsel_list->core.threads && 759 evsel_list->core.threads->err_thread != -1) { 760 /* 761 * For global --per-thread case, skip current 762 * error thread. 763 */ 764 if (!thread_map__remove(evsel_list->core.threads, 765 evsel_list->core.threads->err_thread)) { 766 evsel_list->core.threads->err_thread = -1; 767 return COUNTER_RETRY; 768 } 769 } 770 771 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 772 ui__error("%s\n", msg); 773 774 if (child_pid != -1) 775 kill(child_pid, SIGTERM); 776 return COUNTER_FATAL; 777 } 778 779 static int __run_perf_stat(int argc, const char **argv, int run_idx) 780 { 781 int interval = stat_config.interval; 782 int times = stat_config.times; 783 int timeout = stat_config.timeout; 784 char msg[BUFSIZ]; 785 unsigned long long t0, t1; 786 struct evsel *counter; 787 size_t l; 788 int status = 0; 789 const bool forks = (argc > 0); 790 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 791 struct affinity affinity; 792 int i, cpu, err; 793 bool second_pass = false; 794 795 if (forks) { 796 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 797 perror("failed to prepare workload"); 798 return -1; 799 } 800 child_pid = evsel_list->workload.pid; 801 } 802 803 if (group) 804 evlist__set_leader(evsel_list); 805 806 if (affinity__setup(&affinity) < 0) 807 return -1; 808 809 evlist__for_each_entry(evsel_list, counter) { 810 if (bpf_counter__load(counter, &target)) 811 return -1; 812 if (!evsel__is_bpf(counter)) 813 all_counters_use_bpf = false; 814 } 815 816 evlist__for_each_cpu (evsel_list, i, cpu) { 817 /* 818 * bperf calls evsel__open_per_cpu() in bperf__load(), so 819 * no need to call it again here. 820 */ 821 if (target.use_bpf) 822 break; 823 affinity__set(&affinity, cpu); 824 825 evlist__for_each_entry(evsel_list, counter) { 826 if (evsel__cpu_iter_skip(counter, cpu)) 827 continue; 828 if (counter->reset_group || counter->errored) 829 continue; 830 if (evsel__is_bpf(counter)) 831 continue; 832 try_again: 833 if (create_perf_stat_counter(counter, &stat_config, &target, 834 counter->cpu_iter - 1) < 0) { 835 836 /* 837 * Weak group failed. We cannot just undo this here 838 * because earlier CPUs might be in group mode, and the kernel 839 * doesn't support mixing group and non group reads. Defer 840 * it to later. 841 * Don't close here because we're in the wrong affinity. 842 */ 843 if ((errno == EINVAL || errno == EBADF) && 844 evsel__leader(counter) != counter && 845 counter->weak_group) { 846 evlist__reset_weak_group(evsel_list, counter, false); 847 assert(counter->reset_group); 848 second_pass = true; 849 continue; 850 } 851 852 switch (stat_handle_error(counter)) { 853 case COUNTER_FATAL: 854 return -1; 855 case COUNTER_RETRY: 856 goto try_again; 857 case COUNTER_SKIP: 858 continue; 859 default: 860 break; 861 } 862 863 } 864 counter->supported = true; 865 } 866 } 867 868 if (second_pass) { 869 /* 870 * Now redo all the weak group after closing them, 871 * and also close errored counters. 872 */ 873 874 evlist__for_each_cpu(evsel_list, i, cpu) { 875 affinity__set(&affinity, cpu); 876 /* First close errored or weak retry */ 877 evlist__for_each_entry(evsel_list, counter) { 878 if (!counter->reset_group && !counter->errored) 879 continue; 880 if (evsel__cpu_iter_skip_no_inc(counter, cpu)) 881 continue; 882 perf_evsel__close_cpu(&counter->core, counter->cpu_iter); 883 } 884 /* Now reopen weak */ 885 evlist__for_each_entry(evsel_list, counter) { 886 if (!counter->reset_group && !counter->errored) 887 continue; 888 if (evsel__cpu_iter_skip(counter, cpu)) 889 continue; 890 if (!counter->reset_group) 891 continue; 892 try_again_reset: 893 pr_debug2("reopening weak %s\n", evsel__name(counter)); 894 if (create_perf_stat_counter(counter, &stat_config, &target, 895 counter->cpu_iter - 1) < 0) { 896 897 switch (stat_handle_error(counter)) { 898 case COUNTER_FATAL: 899 return -1; 900 case COUNTER_RETRY: 901 goto try_again_reset; 902 case COUNTER_SKIP: 903 continue; 904 default: 905 break; 906 } 907 } 908 counter->supported = true; 909 } 910 } 911 } 912 affinity__cleanup(&affinity); 913 914 evlist__for_each_entry(evsel_list, counter) { 915 if (!counter->supported) { 916 perf_evsel__free_fd(&counter->core); 917 continue; 918 } 919 920 l = strlen(counter->unit); 921 if (l > stat_config.unit_width) 922 stat_config.unit_width = l; 923 924 if (evsel__should_store_id(counter) && 925 evsel__store_ids(counter, evsel_list)) 926 return -1; 927 } 928 929 if (evlist__apply_filters(evsel_list, &counter)) { 930 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 931 counter->filter, evsel__name(counter), errno, 932 str_error_r(errno, msg, sizeof(msg))); 933 return -1; 934 } 935 936 if (STAT_RECORD) { 937 int fd = perf_data__fd(&perf_stat.data); 938 939 if (is_pipe) { 940 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 941 } else { 942 err = perf_session__write_header(perf_stat.session, evsel_list, 943 fd, false); 944 } 945 946 if (err < 0) 947 return err; 948 949 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 950 process_synthesized_event, is_pipe); 951 if (err < 0) 952 return err; 953 } 954 955 /* 956 * Enable counters and exec the command: 957 */ 958 if (forks) { 959 evlist__start_workload(evsel_list); 960 err = enable_counters(); 961 if (err) 962 return -1; 963 964 t0 = rdclock(); 965 clock_gettime(CLOCK_MONOTONIC, &ref_time); 966 967 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 968 status = dispatch_events(forks, timeout, interval, ×); 969 if (child_pid != -1) { 970 if (timeout) 971 kill(child_pid, SIGTERM); 972 wait4(child_pid, &status, 0, &stat_config.ru_data); 973 } 974 975 if (workload_exec_errno) { 976 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 977 pr_err("Workload failed: %s\n", emsg); 978 return -1; 979 } 980 981 if (WIFSIGNALED(status)) 982 psignal(WTERMSIG(status), argv[0]); 983 } else { 984 err = enable_counters(); 985 if (err) 986 return -1; 987 988 t0 = rdclock(); 989 clock_gettime(CLOCK_MONOTONIC, &ref_time); 990 991 status = dispatch_events(forks, timeout, interval, ×); 992 } 993 994 disable_counters(); 995 996 t1 = rdclock(); 997 998 if (stat_config.walltime_run_table) 999 stat_config.walltime_run[run_idx] = t1 - t0; 1000 1001 if (interval && stat_config.summary) { 1002 stat_config.interval = 0; 1003 stat_config.stop_read_counter = true; 1004 init_stats(&walltime_nsecs_stats); 1005 update_stats(&walltime_nsecs_stats, t1 - t0); 1006 1007 if (stat_config.aggr_mode == AGGR_GLOBAL) 1008 evlist__save_aggr_prev_raw_counts(evsel_list); 1009 1010 evlist__copy_prev_raw_counts(evsel_list); 1011 evlist__reset_prev_raw_counts(evsel_list); 1012 runtime_stat_reset(&stat_config); 1013 perf_stat__reset_shadow_per_stat(&rt_stat); 1014 } else 1015 update_stats(&walltime_nsecs_stats, t1 - t0); 1016 1017 /* 1018 * Closing a group leader splits the group, and as we only disable 1019 * group leaders, results in remaining events becoming enabled. To 1020 * avoid arbitrary skew, we must read all counters before closing any 1021 * group leaders. 1022 */ 1023 read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); 1024 1025 /* 1026 * We need to keep evsel_list alive, because it's processed 1027 * later the evsel_list will be closed after. 1028 */ 1029 if (!STAT_RECORD) 1030 evlist__close(evsel_list); 1031 1032 return WEXITSTATUS(status); 1033 } 1034 1035 static int run_perf_stat(int argc, const char **argv, int run_idx) 1036 { 1037 int ret; 1038 1039 if (pre_cmd) { 1040 ret = system(pre_cmd); 1041 if (ret) 1042 return ret; 1043 } 1044 1045 if (sync_run) 1046 sync(); 1047 1048 ret = __run_perf_stat(argc, argv, run_idx); 1049 if (ret) 1050 return ret; 1051 1052 if (post_cmd) { 1053 ret = system(post_cmd); 1054 if (ret) 1055 return ret; 1056 } 1057 1058 return ret; 1059 } 1060 1061 static void print_counters(struct timespec *ts, int argc, const char **argv) 1062 { 1063 /* Do not print anything if we record to the pipe. */ 1064 if (STAT_RECORD && perf_stat.data.is_pipe) 1065 return; 1066 if (stat_config.quiet) 1067 return; 1068 1069 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 1070 } 1071 1072 static volatile int signr = -1; 1073 1074 static void skip_signal(int signo) 1075 { 1076 if ((child_pid == -1) || stat_config.interval) 1077 done = 1; 1078 1079 signr = signo; 1080 /* 1081 * render child_pid harmless 1082 * won't send SIGTERM to a random 1083 * process in case of race condition 1084 * and fast PID recycling 1085 */ 1086 child_pid = -1; 1087 } 1088 1089 static void sig_atexit(void) 1090 { 1091 sigset_t set, oset; 1092 1093 /* 1094 * avoid race condition with SIGCHLD handler 1095 * in skip_signal() which is modifying child_pid 1096 * goal is to avoid send SIGTERM to a random 1097 * process 1098 */ 1099 sigemptyset(&set); 1100 sigaddset(&set, SIGCHLD); 1101 sigprocmask(SIG_BLOCK, &set, &oset); 1102 1103 if (child_pid != -1) 1104 kill(child_pid, SIGTERM); 1105 1106 sigprocmask(SIG_SETMASK, &oset, NULL); 1107 1108 if (signr == -1) 1109 return; 1110 1111 signal(signr, SIG_DFL); 1112 kill(getpid(), signr); 1113 } 1114 1115 void perf_stat__set_big_num(int set) 1116 { 1117 stat_config.big_num = (set != 0); 1118 } 1119 1120 void perf_stat__set_no_csv_summary(int set) 1121 { 1122 stat_config.no_csv_summary = (set != 0); 1123 } 1124 1125 static int stat__set_big_num(const struct option *opt __maybe_unused, 1126 const char *s __maybe_unused, int unset) 1127 { 1128 big_num_opt = unset ? 0 : 1; 1129 perf_stat__set_big_num(!unset); 1130 return 0; 1131 } 1132 1133 static int enable_metric_only(const struct option *opt __maybe_unused, 1134 const char *s __maybe_unused, int unset) 1135 { 1136 force_metric_only = true; 1137 stat_config.metric_only = !unset; 1138 return 0; 1139 } 1140 1141 static int parse_metric_groups(const struct option *opt, 1142 const char *str, 1143 int unset __maybe_unused) 1144 { 1145 return metricgroup__parse_groups(opt, str, 1146 stat_config.metric_no_group, 1147 stat_config.metric_no_merge, 1148 &stat_config.metric_events); 1149 } 1150 1151 static int parse_control_option(const struct option *opt, 1152 const char *str, 1153 int unset __maybe_unused) 1154 { 1155 struct perf_stat_config *config = opt->value; 1156 1157 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1158 } 1159 1160 static int parse_stat_cgroups(const struct option *opt, 1161 const char *str, int unset) 1162 { 1163 if (stat_config.cgroup_list) { 1164 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1165 return -1; 1166 } 1167 1168 return parse_cgroups(opt, str, unset); 1169 } 1170 1171 static int parse_hybrid_type(const struct option *opt, 1172 const char *str, 1173 int unset __maybe_unused) 1174 { 1175 struct evlist *evlist = *(struct evlist **)opt->value; 1176 1177 if (!list_empty(&evlist->core.entries)) { 1178 fprintf(stderr, "Must define cputype before events/metrics\n"); 1179 return -1; 1180 } 1181 1182 evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu(str); 1183 if (!evlist->hybrid_pmu_name) { 1184 fprintf(stderr, "--cputype %s is not supported!\n", str); 1185 return -1; 1186 } 1187 1188 return 0; 1189 } 1190 1191 static struct option stat_options[] = { 1192 OPT_BOOLEAN('T', "transaction", &transaction_run, 1193 "hardware transaction statistics"), 1194 OPT_CALLBACK('e', "event", &evsel_list, "event", 1195 "event selector. use 'perf list' to list available events", 1196 parse_events_option), 1197 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1198 "event filter", parse_filter), 1199 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1200 "child tasks do not inherit counters"), 1201 OPT_STRING('p', "pid", &target.pid, "pid", 1202 "stat events on existing process id"), 1203 OPT_STRING('t', "tid", &target.tid, "tid", 1204 "stat events on existing thread id"), 1205 #ifdef HAVE_BPF_SKEL 1206 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 1207 "stat events on existing bpf program id"), 1208 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, 1209 "use bpf program to count events"), 1210 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", 1211 "path to perf_event_attr map"), 1212 #endif 1213 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1214 "system-wide collection from all CPUs"), 1215 OPT_BOOLEAN('g', "group", &group, 1216 "put the counters into a counter group"), 1217 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1218 "Use --no-scale to disable counter scaling for multiplexing"), 1219 OPT_INCR('v', "verbose", &verbose, 1220 "be more verbose (show counter open errors, etc)"), 1221 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1222 "repeat command and print average + stddev (max: 100, forever: 0)"), 1223 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1224 "display details about each run (only with -r option)"), 1225 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1226 "null run - dont start any counters"), 1227 OPT_INCR('d', "detailed", &detailed_run, 1228 "detailed run - start a lot of events"), 1229 OPT_BOOLEAN('S', "sync", &sync_run, 1230 "call sync() before starting a run"), 1231 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1232 "print large numbers with thousands\' separators", 1233 stat__set_big_num), 1234 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1235 "list of cpus to monitor in system-wide"), 1236 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1237 "disable CPU count aggregation", AGGR_NONE), 1238 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 1239 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1240 "print counts with custom separator"), 1241 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1242 "monitor event in cgroup name only", parse_stat_cgroups), 1243 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 1244 "expand events for each cgroup"), 1245 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1246 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1247 OPT_INTEGER(0, "log-fd", &output_fd, 1248 "log output to fd, instead of stderr"), 1249 OPT_STRING(0, "pre", &pre_cmd, "command", 1250 "command to run prior to the measured command"), 1251 OPT_STRING(0, "post", &post_cmd, "command", 1252 "command to run after to the measured command"), 1253 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1254 "print counts at regular interval in ms " 1255 "(overhead is possible for values <= 100ms)"), 1256 OPT_INTEGER(0, "interval-count", &stat_config.times, 1257 "print counts for fixed number of times"), 1258 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1259 "clear screen in between new interval"), 1260 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1261 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1262 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1263 "aggregate counts per processor socket", AGGR_SOCKET), 1264 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1265 "aggregate counts per processor die", AGGR_DIE), 1266 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1267 "aggregate counts per physical processor core", AGGR_CORE), 1268 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1269 "aggregate counts per thread", AGGR_THREAD), 1270 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1271 "aggregate counts per numa node", AGGR_NODE), 1272 OPT_INTEGER('D', "delay", &stat_config.initial_delay, 1273 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1274 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1275 "Only print computed metrics. No raw values", enable_metric_only), 1276 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1277 "don't group metric events, impacts multiplexing"), 1278 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1279 "don't try to share events between metrics in a group"), 1280 OPT_BOOLEAN(0, "topdown", &topdown_run, 1281 "measure top-down statistics"), 1282 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 1283 "Set the metrics level for the top-down statistics (0: max level)"), 1284 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1285 "measure SMI cost"), 1286 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1287 "monitor specified metrics or metric groups (separated by ,)", 1288 parse_metric_groups), 1289 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1290 "Configure all used events to run in kernel space.", 1291 PARSE_OPT_EXCLUSIVE), 1292 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1293 "Configure all used events to run in user space.", 1294 PARSE_OPT_EXCLUSIVE), 1295 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1296 "Use with 'percore' event qualifier to show the event " 1297 "counts of one hardware thread by sum up total hardware " 1298 "threads of same physical core"), 1299 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1300 "print summary for interval mode"), 1301 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, 1302 "don't print 'summary' for CSV summary output"), 1303 OPT_BOOLEAN(0, "quiet", &stat_config.quiet, 1304 "don't print output (useful with record)"), 1305 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", 1306 "Only enable events on applying cpu with this type " 1307 "for hybrid platform (e.g. core or atom)", 1308 parse_hybrid_type), 1309 #ifdef HAVE_LIBPFM 1310 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1311 "libpfm4 event selector. use 'perf list' to list available events", 1312 parse_libpfm_events_option), 1313 #endif 1314 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1315 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1316 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1317 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1318 parse_control_option), 1319 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", 1320 "measure I/O performance metrics provided by arch/platform", 1321 iostat_parse), 1322 OPT_END() 1323 }; 1324 1325 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1326 int cpu) 1327 { 1328 return cpu_map__get_socket_aggr_by_cpu(cpu, /*data=*/NULL); 1329 } 1330 1331 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1332 int cpu) 1333 { 1334 return cpu_map__get_die_aggr_by_cpu(cpu, /*data=*/NULL); 1335 } 1336 1337 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1338 int cpu) 1339 { 1340 return cpu_map__get_core_aggr_by_cpu(cpu, /*data=*/NULL); 1341 } 1342 1343 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1344 int cpu) 1345 { 1346 return cpu_map__get_node_aggr_by_cpu(cpu, /*data=*/NULL); 1347 } 1348 1349 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1350 aggr_get_id_t get_id, int cpu) 1351 { 1352 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1353 1354 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu])) 1355 config->cpus_aggr_map->map[cpu] = get_id(config, cpu); 1356 1357 id = config->cpus_aggr_map->map[cpu]; 1358 return id; 1359 } 1360 1361 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1362 int cpu) 1363 { 1364 return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); 1365 } 1366 1367 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1368 int cpu) 1369 { 1370 return perf_stat__get_aggr(config, perf_stat__get_die, cpu); 1371 } 1372 1373 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1374 int cpu) 1375 { 1376 return perf_stat__get_aggr(config, perf_stat__get_core, cpu); 1377 } 1378 1379 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1380 int cpu) 1381 { 1382 return perf_stat__get_aggr(config, perf_stat__get_node, cpu); 1383 } 1384 1385 static bool term_percore_set(void) 1386 { 1387 struct evsel *counter; 1388 1389 evlist__for_each_entry(evsel_list, counter) { 1390 if (counter->percore) 1391 return true; 1392 } 1393 1394 return false; 1395 } 1396 1397 static int perf_stat_init_aggr_mode(void) 1398 { 1399 int nr; 1400 1401 switch (stat_config.aggr_mode) { 1402 case AGGR_SOCKET: 1403 if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1404 perror("cannot build socket map"); 1405 return -1; 1406 } 1407 stat_config.aggr_get_id = perf_stat__get_socket_cached; 1408 break; 1409 case AGGR_DIE: 1410 if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1411 perror("cannot build die map"); 1412 return -1; 1413 } 1414 stat_config.aggr_get_id = perf_stat__get_die_cached; 1415 break; 1416 case AGGR_CORE: 1417 if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1418 perror("cannot build core map"); 1419 return -1; 1420 } 1421 stat_config.aggr_get_id = perf_stat__get_core_cached; 1422 break; 1423 case AGGR_NODE: 1424 if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1425 perror("cannot build core map"); 1426 return -1; 1427 } 1428 stat_config.aggr_get_id = perf_stat__get_node_cached; 1429 break; 1430 case AGGR_NONE: 1431 if (term_percore_set()) { 1432 if (cpu_map__build_core_map(evsel_list->core.cpus, 1433 &stat_config.aggr_map)) { 1434 perror("cannot build core map"); 1435 return -1; 1436 } 1437 stat_config.aggr_get_id = perf_stat__get_core_cached; 1438 } 1439 break; 1440 case AGGR_GLOBAL: 1441 case AGGR_THREAD: 1442 case AGGR_UNSET: 1443 default: 1444 break; 1445 } 1446 1447 /* 1448 * The evsel_list->cpus is the base we operate on, 1449 * taking the highest cpu number to be the size of 1450 * the aggregation translate cpumap. 1451 */ 1452 nr = perf_cpu_map__max(evsel_list->core.cpus); 1453 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); 1454 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1455 } 1456 1457 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1458 { 1459 if (map) { 1460 WARN_ONCE(refcount_read(&map->refcnt) != 0, 1461 "cpu_aggr_map refcnt unbalanced\n"); 1462 free(map); 1463 } 1464 } 1465 1466 static void cpu_aggr_map__put(struct cpu_aggr_map *map) 1467 { 1468 if (map && refcount_dec_and_test(&map->refcnt)) 1469 cpu_aggr_map__delete(map); 1470 } 1471 1472 static void perf_stat__exit_aggr_mode(void) 1473 { 1474 cpu_aggr_map__put(stat_config.aggr_map); 1475 cpu_aggr_map__put(stat_config.cpus_aggr_map); 1476 stat_config.aggr_map = NULL; 1477 stat_config.cpus_aggr_map = NULL; 1478 } 1479 1480 static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(int cpu, void *data) 1481 { 1482 struct perf_env *env = data; 1483 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1484 1485 if (cpu != -1) 1486 id.socket = env->cpu[cpu].socket_id; 1487 1488 return id; 1489 } 1490 1491 static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(int cpu, void *data) 1492 { 1493 struct perf_env *env = data; 1494 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1495 1496 if (cpu != -1) { 1497 /* 1498 * die_id is relative to socket, so start 1499 * with the socket ID and then add die to 1500 * make a unique ID. 1501 */ 1502 id.socket = env->cpu[cpu].socket_id; 1503 id.die = env->cpu[cpu].die_id; 1504 } 1505 1506 return id; 1507 } 1508 1509 static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(int cpu, void *data) 1510 { 1511 struct perf_env *env = data; 1512 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1513 1514 if (cpu != -1) { 1515 /* 1516 * core_id is relative to socket and die, 1517 * we need a global id. So we set 1518 * socket, die id and core id 1519 */ 1520 id.socket = env->cpu[cpu].socket_id; 1521 id.die = env->cpu[cpu].die_id; 1522 id.core = env->cpu[cpu].core_id; 1523 } 1524 1525 return id; 1526 } 1527 1528 static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(int cpu, void *data) 1529 { 1530 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1531 1532 id.node = perf_env__numa_node(data, cpu); 1533 return id; 1534 } 1535 1536 static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus, 1537 struct cpu_aggr_map **sockp) 1538 { 1539 return cpu_map__build_map(cpus, sockp, perf_env__get_socket_aggr_by_cpu, env); 1540 } 1541 1542 static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus, 1543 struct cpu_aggr_map **diep) 1544 { 1545 return cpu_map__build_map(cpus, diep, perf_env__get_die_aggr_by_cpu, env); 1546 } 1547 1548 static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus, 1549 struct cpu_aggr_map **corep) 1550 { 1551 return cpu_map__build_map(cpus, corep, perf_env__get_core_aggr_by_cpu, env); 1552 } 1553 1554 static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus, 1555 struct cpu_aggr_map **nodep) 1556 { 1557 return cpu_map__build_map(cpus, nodep, perf_env__get_node_aggr_by_cpu, env); 1558 } 1559 1560 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1561 int cpu) 1562 { 1563 return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1564 } 1565 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1566 int cpu) 1567 { 1568 return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1569 } 1570 1571 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1572 int cpu) 1573 { 1574 return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1575 } 1576 1577 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1578 int cpu) 1579 { 1580 return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1581 } 1582 1583 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1584 { 1585 struct perf_env *env = &st->session->header.env; 1586 1587 switch (stat_config.aggr_mode) { 1588 case AGGR_SOCKET: 1589 if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1590 perror("cannot build socket map"); 1591 return -1; 1592 } 1593 stat_config.aggr_get_id = perf_stat__get_socket_file; 1594 break; 1595 case AGGR_DIE: 1596 if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1597 perror("cannot build die map"); 1598 return -1; 1599 } 1600 stat_config.aggr_get_id = perf_stat__get_die_file; 1601 break; 1602 case AGGR_CORE: 1603 if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1604 perror("cannot build core map"); 1605 return -1; 1606 } 1607 stat_config.aggr_get_id = perf_stat__get_core_file; 1608 break; 1609 case AGGR_NODE: 1610 if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1611 perror("cannot build core map"); 1612 return -1; 1613 } 1614 stat_config.aggr_get_id = perf_stat__get_node_file; 1615 break; 1616 case AGGR_NONE: 1617 case AGGR_GLOBAL: 1618 case AGGR_THREAD: 1619 case AGGR_UNSET: 1620 default: 1621 break; 1622 } 1623 1624 return 0; 1625 } 1626 1627 /* 1628 * Add default attributes, if there were no attributes specified or 1629 * if -d/--detailed, -d -d or -d -d -d is used: 1630 */ 1631 static int add_default_attributes(void) 1632 { 1633 int err; 1634 struct perf_event_attr default_attrs0[] = { 1635 1636 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1637 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1638 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1639 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1640 1641 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1642 }; 1643 struct perf_event_attr frontend_attrs[] = { 1644 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1645 }; 1646 struct perf_event_attr backend_attrs[] = { 1647 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1648 }; 1649 struct perf_event_attr default_attrs1[] = { 1650 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1651 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1652 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1653 1654 }; 1655 struct perf_event_attr default_sw_attrs[] = { 1656 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1657 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1658 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1659 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1660 }; 1661 1662 /* 1663 * Detailed stats (-d), covering the L1 and last level data caches: 1664 */ 1665 struct perf_event_attr detailed_attrs[] = { 1666 1667 { .type = PERF_TYPE_HW_CACHE, 1668 .config = 1669 PERF_COUNT_HW_CACHE_L1D << 0 | 1670 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1671 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1672 1673 { .type = PERF_TYPE_HW_CACHE, 1674 .config = 1675 PERF_COUNT_HW_CACHE_L1D << 0 | 1676 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1677 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1678 1679 { .type = PERF_TYPE_HW_CACHE, 1680 .config = 1681 PERF_COUNT_HW_CACHE_LL << 0 | 1682 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1683 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1684 1685 { .type = PERF_TYPE_HW_CACHE, 1686 .config = 1687 PERF_COUNT_HW_CACHE_LL << 0 | 1688 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1689 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1690 }; 1691 1692 /* 1693 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1694 */ 1695 struct perf_event_attr very_detailed_attrs[] = { 1696 1697 { .type = PERF_TYPE_HW_CACHE, 1698 .config = 1699 PERF_COUNT_HW_CACHE_L1I << 0 | 1700 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1701 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1702 1703 { .type = PERF_TYPE_HW_CACHE, 1704 .config = 1705 PERF_COUNT_HW_CACHE_L1I << 0 | 1706 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1707 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1708 1709 { .type = PERF_TYPE_HW_CACHE, 1710 .config = 1711 PERF_COUNT_HW_CACHE_DTLB << 0 | 1712 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1713 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1714 1715 { .type = PERF_TYPE_HW_CACHE, 1716 .config = 1717 PERF_COUNT_HW_CACHE_DTLB << 0 | 1718 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1719 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1720 1721 { .type = PERF_TYPE_HW_CACHE, 1722 .config = 1723 PERF_COUNT_HW_CACHE_ITLB << 0 | 1724 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1725 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1726 1727 { .type = PERF_TYPE_HW_CACHE, 1728 .config = 1729 PERF_COUNT_HW_CACHE_ITLB << 0 | 1730 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1731 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1732 1733 }; 1734 1735 /* 1736 * Very, very detailed stats (-d -d -d), adding prefetch events: 1737 */ 1738 struct perf_event_attr very_very_detailed_attrs[] = { 1739 1740 { .type = PERF_TYPE_HW_CACHE, 1741 .config = 1742 PERF_COUNT_HW_CACHE_L1D << 0 | 1743 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1744 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1745 1746 { .type = PERF_TYPE_HW_CACHE, 1747 .config = 1748 PERF_COUNT_HW_CACHE_L1D << 0 | 1749 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1750 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1751 }; 1752 /* Set attrs if no event is selected and !null_run: */ 1753 if (stat_config.null_run) 1754 return 0; 1755 1756 if (transaction_run) { 1757 struct parse_events_error errinfo; 1758 /* Handle -T as -M transaction. Once platform specific metrics 1759 * support has been added to the json files, all architectures 1760 * will use this approach. To determine transaction support 1761 * on an architecture test for such a metric name. 1762 */ 1763 if (metricgroup__has_metric("transaction")) { 1764 struct option opt = { .value = &evsel_list }; 1765 1766 return metricgroup__parse_groups(&opt, "transaction", 1767 stat_config.metric_no_group, 1768 stat_config.metric_no_merge, 1769 &stat_config.metric_events); 1770 } 1771 1772 parse_events_error__init(&errinfo); 1773 if (pmu_have_event("cpu", "cycles-ct") && 1774 pmu_have_event("cpu", "el-start")) 1775 err = parse_events(evsel_list, transaction_attrs, 1776 &errinfo); 1777 else 1778 err = parse_events(evsel_list, 1779 transaction_limited_attrs, 1780 &errinfo); 1781 if (err) { 1782 fprintf(stderr, "Cannot set up transaction events\n"); 1783 parse_events_error__print(&errinfo, transaction_attrs); 1784 } 1785 parse_events_error__exit(&errinfo); 1786 return err ? -1 : 0; 1787 } 1788 1789 if (smi_cost) { 1790 struct parse_events_error errinfo; 1791 int smi; 1792 1793 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1794 fprintf(stderr, "freeze_on_smi is not supported.\n"); 1795 return -1; 1796 } 1797 1798 if (!smi) { 1799 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1800 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1801 return -1; 1802 } 1803 smi_reset = true; 1804 } 1805 1806 if (!pmu_have_event("msr", "aperf") || 1807 !pmu_have_event("msr", "smi")) { 1808 fprintf(stderr, "To measure SMI cost, it needs " 1809 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); 1810 return -1; 1811 } 1812 if (!force_metric_only) 1813 stat_config.metric_only = true; 1814 1815 parse_events_error__init(&errinfo); 1816 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1817 if (err) { 1818 parse_events_error__print(&errinfo, smi_cost_attrs); 1819 fprintf(stderr, "Cannot set up SMI cost events\n"); 1820 } 1821 parse_events_error__exit(&errinfo); 1822 return err ? -1 : 0; 1823 } 1824 1825 if (topdown_run) { 1826 const char **metric_attrs = topdown_metric_attrs; 1827 unsigned int max_level = 1; 1828 char *str = NULL; 1829 bool warn = false; 1830 1831 if (!force_metric_only) 1832 stat_config.metric_only = true; 1833 1834 if (pmu_have_event("cpu", topdown_metric_L2_attrs[5])) { 1835 metric_attrs = topdown_metric_L2_attrs; 1836 max_level = 2; 1837 } 1838 1839 if (stat_config.topdown_level > max_level) { 1840 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 1841 return -1; 1842 } else if (!stat_config.topdown_level) 1843 stat_config.topdown_level = max_level; 1844 1845 if (topdown_filter_events(metric_attrs, &str, 1) < 0) { 1846 pr_err("Out of memory\n"); 1847 return -1; 1848 } 1849 if (metric_attrs[0] && str) { 1850 if (!stat_config.interval && !stat_config.metric_only) { 1851 fprintf(stat_config.output, 1852 "Topdown accuracy may decrease when measuring long periods.\n" 1853 "Please print the result regularly, e.g. -I1000\n"); 1854 } 1855 goto setup_metrics; 1856 } 1857 1858 zfree(&str); 1859 1860 if (stat_config.aggr_mode != AGGR_GLOBAL && 1861 stat_config.aggr_mode != AGGR_CORE) { 1862 pr_err("top down event configuration requires --per-core mode\n"); 1863 return -1; 1864 } 1865 stat_config.aggr_mode = AGGR_CORE; 1866 if (nr_cgroups || !target__has_cpu(&target)) { 1867 pr_err("top down event configuration requires system-wide mode (-a)\n"); 1868 return -1; 1869 } 1870 1871 if (topdown_filter_events(topdown_attrs, &str, 1872 arch_topdown_check_group(&warn)) < 0) { 1873 pr_err("Out of memory\n"); 1874 return -1; 1875 } 1876 if (topdown_attrs[0] && str) { 1877 struct parse_events_error errinfo; 1878 if (warn) 1879 arch_topdown_group_warn(); 1880 setup_metrics: 1881 parse_events_error__init(&errinfo); 1882 err = parse_events(evsel_list, str, &errinfo); 1883 if (err) { 1884 fprintf(stderr, 1885 "Cannot set up top down events %s: %d\n", 1886 str, err); 1887 parse_events_error__print(&errinfo, str); 1888 parse_events_error__exit(&errinfo); 1889 free(str); 1890 return -1; 1891 } 1892 parse_events_error__exit(&errinfo); 1893 } else { 1894 fprintf(stderr, "System does not support topdown\n"); 1895 return -1; 1896 } 1897 free(str); 1898 } 1899 1900 if (!evsel_list->core.nr_entries) { 1901 if (perf_pmu__has_hybrid()) { 1902 struct parse_events_error errinfo; 1903 const char *hybrid_str = "cycles,instructions,branches,branch-misses"; 1904 1905 if (target__has_cpu(&target)) 1906 default_sw_attrs[0].config = PERF_COUNT_SW_CPU_CLOCK; 1907 1908 if (evlist__add_default_attrs(evsel_list, 1909 default_sw_attrs) < 0) { 1910 return -1; 1911 } 1912 1913 parse_events_error__init(&errinfo); 1914 err = parse_events(evsel_list, hybrid_str, &errinfo); 1915 if (err) { 1916 fprintf(stderr, 1917 "Cannot set up hybrid events %s: %d\n", 1918 hybrid_str, err); 1919 parse_events_error__print(&errinfo, hybrid_str); 1920 } 1921 parse_events_error__exit(&errinfo); 1922 return err ? -1 : 0; 1923 } 1924 1925 if (target__has_cpu(&target)) 1926 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1927 1928 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1929 return -1; 1930 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1931 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 1932 return -1; 1933 } 1934 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 1935 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 1936 return -1; 1937 } 1938 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 1939 return -1; 1940 1941 stat_config.topdown_level = TOPDOWN_MAX_LEVEL; 1942 if (arch_evlist__add_default_attrs(evsel_list) < 0) 1943 return -1; 1944 } 1945 1946 /* Detailed events get appended to the event list: */ 1947 1948 if (detailed_run < 1) 1949 return 0; 1950 1951 /* Append detailed run extra attributes: */ 1952 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1953 return -1; 1954 1955 if (detailed_run < 2) 1956 return 0; 1957 1958 /* Append very detailed run extra attributes: */ 1959 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1960 return -1; 1961 1962 if (detailed_run < 3) 1963 return 0; 1964 1965 /* Append very, very detailed run extra attributes: */ 1966 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1967 } 1968 1969 static const char * const stat_record_usage[] = { 1970 "perf stat record [<options>]", 1971 NULL, 1972 }; 1973 1974 static void init_features(struct perf_session *session) 1975 { 1976 int feat; 1977 1978 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 1979 perf_header__set_feat(&session->header, feat); 1980 1981 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 1982 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 1983 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 1984 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 1985 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 1986 } 1987 1988 static int __cmd_record(int argc, const char **argv) 1989 { 1990 struct perf_session *session; 1991 struct perf_data *data = &perf_stat.data; 1992 1993 argc = parse_options(argc, argv, stat_options, stat_record_usage, 1994 PARSE_OPT_STOP_AT_NON_OPTION); 1995 1996 if (output_name) 1997 data->path = output_name; 1998 1999 if (stat_config.run_count != 1 || forever) { 2000 pr_err("Cannot use -r option with perf stat record.\n"); 2001 return -1; 2002 } 2003 2004 session = perf_session__new(data, NULL); 2005 if (IS_ERR(session)) { 2006 pr_err("Perf session creation failed\n"); 2007 return PTR_ERR(session); 2008 } 2009 2010 init_features(session); 2011 2012 session->evlist = evsel_list; 2013 perf_stat.session = session; 2014 perf_stat.record = true; 2015 return argc; 2016 } 2017 2018 static int process_stat_round_event(struct perf_session *session, 2019 union perf_event *event) 2020 { 2021 struct perf_record_stat_round *stat_round = &event->stat_round; 2022 struct evsel *counter; 2023 struct timespec tsh, *ts = NULL; 2024 const char **argv = session->header.env.cmdline_argv; 2025 int argc = session->header.env.nr_cmdline; 2026 2027 evlist__for_each_entry(evsel_list, counter) 2028 perf_stat_process_counter(&stat_config, counter); 2029 2030 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2031 update_stats(&walltime_nsecs_stats, stat_round->time); 2032 2033 if (stat_config.interval && stat_round->time) { 2034 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 2035 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 2036 ts = &tsh; 2037 } 2038 2039 print_counters(ts, argc, argv); 2040 return 0; 2041 } 2042 2043 static 2044 int process_stat_config_event(struct perf_session *session, 2045 union perf_event *event) 2046 { 2047 struct perf_tool *tool = session->tool; 2048 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2049 2050 perf_event__read_stat_config(&stat_config, &event->stat_config); 2051 2052 if (perf_cpu_map__empty(st->cpus)) { 2053 if (st->aggr_mode != AGGR_UNSET) 2054 pr_warning("warning: processing task data, aggregation mode not set\n"); 2055 return 0; 2056 } 2057 2058 if (st->aggr_mode != AGGR_UNSET) 2059 stat_config.aggr_mode = st->aggr_mode; 2060 2061 if (perf_stat.data.is_pipe) 2062 perf_stat_init_aggr_mode(); 2063 else 2064 perf_stat_init_aggr_mode_file(st); 2065 2066 return 0; 2067 } 2068 2069 static int set_maps(struct perf_stat *st) 2070 { 2071 if (!st->cpus || !st->threads) 2072 return 0; 2073 2074 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 2075 return -EINVAL; 2076 2077 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 2078 2079 if (evlist__alloc_stats(evsel_list, true)) 2080 return -ENOMEM; 2081 2082 st->maps_allocated = true; 2083 return 0; 2084 } 2085 2086 static 2087 int process_thread_map_event(struct perf_session *session, 2088 union perf_event *event) 2089 { 2090 struct perf_tool *tool = session->tool; 2091 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2092 2093 if (st->threads) { 2094 pr_warning("Extra thread map event, ignoring.\n"); 2095 return 0; 2096 } 2097 2098 st->threads = thread_map__new_event(&event->thread_map); 2099 if (!st->threads) 2100 return -ENOMEM; 2101 2102 return set_maps(st); 2103 } 2104 2105 static 2106 int process_cpu_map_event(struct perf_session *session, 2107 union perf_event *event) 2108 { 2109 struct perf_tool *tool = session->tool; 2110 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2111 struct perf_cpu_map *cpus; 2112 2113 if (st->cpus) { 2114 pr_warning("Extra cpu map event, ignoring.\n"); 2115 return 0; 2116 } 2117 2118 cpus = cpu_map__new_data(&event->cpu_map.data); 2119 if (!cpus) 2120 return -ENOMEM; 2121 2122 st->cpus = cpus; 2123 return set_maps(st); 2124 } 2125 2126 static const char * const stat_report_usage[] = { 2127 "perf stat report [<options>]", 2128 NULL, 2129 }; 2130 2131 static struct perf_stat perf_stat = { 2132 .tool = { 2133 .attr = perf_event__process_attr, 2134 .event_update = perf_event__process_event_update, 2135 .thread_map = process_thread_map_event, 2136 .cpu_map = process_cpu_map_event, 2137 .stat_config = process_stat_config_event, 2138 .stat = perf_event__process_stat_event, 2139 .stat_round = process_stat_round_event, 2140 }, 2141 .aggr_mode = AGGR_UNSET, 2142 }; 2143 2144 static int __cmd_report(int argc, const char **argv) 2145 { 2146 struct perf_session *session; 2147 const struct option options[] = { 2148 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2149 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2150 "aggregate counts per processor socket", AGGR_SOCKET), 2151 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2152 "aggregate counts per processor die", AGGR_DIE), 2153 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2154 "aggregate counts per physical processor core", AGGR_CORE), 2155 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2156 "aggregate counts per numa node", AGGR_NODE), 2157 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2158 "disable CPU count aggregation", AGGR_NONE), 2159 OPT_END() 2160 }; 2161 struct stat st; 2162 int ret; 2163 2164 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2165 2166 if (!input_name || !strlen(input_name)) { 2167 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2168 input_name = "-"; 2169 else 2170 input_name = "perf.data"; 2171 } 2172 2173 perf_stat.data.path = input_name; 2174 perf_stat.data.mode = PERF_DATA_MODE_READ; 2175 2176 session = perf_session__new(&perf_stat.data, &perf_stat.tool); 2177 if (IS_ERR(session)) 2178 return PTR_ERR(session); 2179 2180 perf_stat.session = session; 2181 stat_config.output = stderr; 2182 evsel_list = session->evlist; 2183 2184 ret = perf_session__process_events(session); 2185 if (ret) 2186 return ret; 2187 2188 perf_session__delete(session); 2189 return 0; 2190 } 2191 2192 static void setup_system_wide(int forks) 2193 { 2194 /* 2195 * Make system wide (-a) the default target if 2196 * no target was specified and one of following 2197 * conditions is met: 2198 * 2199 * - there's no workload specified 2200 * - there is workload specified but all requested 2201 * events are system wide events 2202 */ 2203 if (!target__none(&target)) 2204 return; 2205 2206 if (!forks) 2207 target.system_wide = true; 2208 else { 2209 struct evsel *counter; 2210 2211 evlist__for_each_entry(evsel_list, counter) { 2212 if (!counter->core.system_wide && 2213 strcmp(counter->name, "duration_time")) { 2214 return; 2215 } 2216 } 2217 2218 if (evsel_list->core.nr_entries) 2219 target.system_wide = true; 2220 } 2221 } 2222 2223 int cmd_stat(int argc, const char **argv) 2224 { 2225 const char * const stat_usage[] = { 2226 "perf stat [<options>] [<command>]", 2227 NULL 2228 }; 2229 int status = -EINVAL, run_idx, err; 2230 const char *mode; 2231 FILE *output = stderr; 2232 unsigned int interval, timeout; 2233 const char * const stat_subcommands[] = { "record", "report" }; 2234 char errbuf[BUFSIZ]; 2235 2236 setlocale(LC_ALL, ""); 2237 2238 evsel_list = evlist__new(); 2239 if (evsel_list == NULL) 2240 return -ENOMEM; 2241 2242 parse_events__shrink_config_terms(); 2243 2244 /* String-parsing callback-based options would segfault when negated */ 2245 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2246 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2247 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2248 2249 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2250 (const char **) stat_usage, 2251 PARSE_OPT_STOP_AT_NON_OPTION); 2252 perf_stat__collect_metric_expr(evsel_list); 2253 perf_stat__init_shadow_stats(); 2254 2255 if (stat_config.csv_sep) { 2256 stat_config.csv_output = true; 2257 if (!strcmp(stat_config.csv_sep, "\\t")) 2258 stat_config.csv_sep = "\t"; 2259 } else 2260 stat_config.csv_sep = DEFAULT_SEPARATOR; 2261 2262 if (argc && !strncmp(argv[0], "rec", 3)) { 2263 argc = __cmd_record(argc, argv); 2264 if (argc < 0) 2265 return -1; 2266 } else if (argc && !strncmp(argv[0], "rep", 3)) 2267 return __cmd_report(argc, argv); 2268 2269 interval = stat_config.interval; 2270 timeout = stat_config.timeout; 2271 2272 /* 2273 * For record command the -o is already taken care of. 2274 */ 2275 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2276 output = NULL; 2277 2278 if (output_name && output_fd) { 2279 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2280 parse_options_usage(stat_usage, stat_options, "o", 1); 2281 parse_options_usage(NULL, stat_options, "log-fd", 0); 2282 goto out; 2283 } 2284 2285 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2286 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2287 goto out; 2288 } 2289 2290 if (stat_config.metric_only && stat_config.run_count > 1) { 2291 fprintf(stderr, "--metric-only is not supported with -r\n"); 2292 goto out; 2293 } 2294 2295 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2296 fprintf(stderr, "--table is only supported with -r\n"); 2297 parse_options_usage(stat_usage, stat_options, "r", 1); 2298 parse_options_usage(NULL, stat_options, "table", 0); 2299 goto out; 2300 } 2301 2302 if (output_fd < 0) { 2303 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2304 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2305 goto out; 2306 } 2307 2308 if (!output && !stat_config.quiet) { 2309 struct timespec tm; 2310 mode = append_file ? "a" : "w"; 2311 2312 output = fopen(output_name, mode); 2313 if (!output) { 2314 perror("failed to create output file"); 2315 return -1; 2316 } 2317 clock_gettime(CLOCK_REALTIME, &tm); 2318 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2319 } else if (output_fd > 0) { 2320 mode = append_file ? "a" : "w"; 2321 output = fdopen(output_fd, mode); 2322 if (!output) { 2323 perror("Failed opening logfd"); 2324 return -errno; 2325 } 2326 } 2327 2328 stat_config.output = output; 2329 2330 /* 2331 * let the spreadsheet do the pretty-printing 2332 */ 2333 if (stat_config.csv_output) { 2334 /* User explicitly passed -B? */ 2335 if (big_num_opt == 1) { 2336 fprintf(stderr, "-B option not supported with -x\n"); 2337 parse_options_usage(stat_usage, stat_options, "B", 1); 2338 parse_options_usage(NULL, stat_options, "x", 1); 2339 goto out; 2340 } else /* Nope, so disable big number formatting */ 2341 stat_config.big_num = false; 2342 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2343 stat_config.big_num = false; 2344 2345 err = target__validate(&target); 2346 if (err) { 2347 target__strerror(&target, err, errbuf, BUFSIZ); 2348 pr_warning("%s\n", errbuf); 2349 } 2350 2351 setup_system_wide(argc); 2352 2353 /* 2354 * Display user/system times only for single 2355 * run and when there's specified tracee. 2356 */ 2357 if ((stat_config.run_count == 1) && target__none(&target)) 2358 stat_config.ru_display = true; 2359 2360 if (stat_config.run_count < 0) { 2361 pr_err("Run count must be a positive number\n"); 2362 parse_options_usage(stat_usage, stat_options, "r", 1); 2363 goto out; 2364 } else if (stat_config.run_count == 0) { 2365 forever = true; 2366 stat_config.run_count = 1; 2367 } 2368 2369 if (stat_config.walltime_run_table) { 2370 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2371 if (!stat_config.walltime_run) { 2372 pr_err("failed to setup -r option"); 2373 goto out; 2374 } 2375 } 2376 2377 if ((stat_config.aggr_mode == AGGR_THREAD) && 2378 !target__has_task(&target)) { 2379 if (!target.system_wide || target.cpu_list) { 2380 fprintf(stderr, "The --per-thread option is only " 2381 "available when monitoring via -p -t -a " 2382 "options or only --per-thread.\n"); 2383 parse_options_usage(NULL, stat_options, "p", 1); 2384 parse_options_usage(NULL, stat_options, "t", 1); 2385 goto out; 2386 } 2387 } 2388 2389 /* 2390 * no_aggr, cgroup are for system-wide only 2391 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2392 */ 2393 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2394 stat_config.aggr_mode != AGGR_THREAD) || 2395 (nr_cgroups || stat_config.cgroup_list)) && 2396 !target__has_cpu(&target)) { 2397 fprintf(stderr, "both cgroup and no-aggregation " 2398 "modes only available in system-wide mode\n"); 2399 2400 parse_options_usage(stat_usage, stat_options, "G", 1); 2401 parse_options_usage(NULL, stat_options, "A", 1); 2402 parse_options_usage(NULL, stat_options, "a", 1); 2403 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2404 goto out; 2405 } 2406 2407 if (stat_config.iostat_run) { 2408 status = iostat_prepare(evsel_list, &stat_config); 2409 if (status) 2410 goto out; 2411 if (iostat_mode == IOSTAT_LIST) { 2412 iostat_list(evsel_list, &stat_config); 2413 goto out; 2414 } else if (verbose) 2415 iostat_list(evsel_list, &stat_config); 2416 if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) 2417 target.system_wide = true; 2418 } 2419 2420 if (add_default_attributes()) 2421 goto out; 2422 2423 if (stat_config.cgroup_list) { 2424 if (nr_cgroups > 0) { 2425 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2426 parse_options_usage(stat_usage, stat_options, "G", 1); 2427 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2428 goto out; 2429 } 2430 2431 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, 2432 &stat_config.metric_events, true) < 0) { 2433 parse_options_usage(stat_usage, stat_options, 2434 "for-each-cgroup", 0); 2435 goto out; 2436 } 2437 } 2438 2439 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2440 target.per_thread = true; 2441 2442 if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { 2443 pr_err("failed to use cpu list %s\n", target.cpu_list); 2444 goto out; 2445 } 2446 2447 target.hybrid = perf_pmu__has_hybrid(); 2448 if (evlist__create_maps(evsel_list, &target) < 0) { 2449 if (target__has_task(&target)) { 2450 pr_err("Problems finding threads of monitor\n"); 2451 parse_options_usage(stat_usage, stat_options, "p", 1); 2452 parse_options_usage(NULL, stat_options, "t", 1); 2453 } else if (target__has_cpu(&target)) { 2454 perror("failed to parse CPUs map"); 2455 parse_options_usage(stat_usage, stat_options, "C", 1); 2456 parse_options_usage(NULL, stat_options, "a", 1); 2457 } 2458 goto out; 2459 } 2460 2461 evlist__check_cpu_maps(evsel_list); 2462 2463 /* 2464 * Initialize thread_map with comm names, 2465 * so we could print it out on output. 2466 */ 2467 if (stat_config.aggr_mode == AGGR_THREAD) { 2468 thread_map__read_comms(evsel_list->core.threads); 2469 if (target.system_wide) { 2470 if (runtime_stat_new(&stat_config, 2471 perf_thread_map__nr(evsel_list->core.threads))) { 2472 goto out; 2473 } 2474 } 2475 } 2476 2477 if (stat_config.aggr_mode == AGGR_NODE) 2478 cpu__setup_cpunode_map(); 2479 2480 if (stat_config.times && interval) 2481 interval_count = true; 2482 else if (stat_config.times && !interval) { 2483 pr_err("interval-count option should be used together with " 2484 "interval-print.\n"); 2485 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2486 parse_options_usage(stat_usage, stat_options, "I", 1); 2487 goto out; 2488 } 2489 2490 if (timeout && timeout < 100) { 2491 if (timeout < 10) { 2492 pr_err("timeout must be >= 10ms.\n"); 2493 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2494 goto out; 2495 } else 2496 pr_warning("timeout < 100ms. " 2497 "The overhead percentage could be high in some cases. " 2498 "Please proceed with caution.\n"); 2499 } 2500 if (timeout && interval) { 2501 pr_err("timeout option is not supported with interval-print.\n"); 2502 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2503 parse_options_usage(stat_usage, stat_options, "I", 1); 2504 goto out; 2505 } 2506 2507 if (evlist__alloc_stats(evsel_list, interval)) 2508 goto out; 2509 2510 if (perf_stat_init_aggr_mode()) 2511 goto out; 2512 2513 /* 2514 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2515 * while avoiding that older tools show confusing messages. 2516 * 2517 * However for pipe sessions we need to keep it zero, 2518 * because script's perf_evsel__check_attr is triggered 2519 * by attr->sample_type != 0, and we can't run it on 2520 * stat sessions. 2521 */ 2522 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2523 2524 /* 2525 * We dont want to block the signals - that would cause 2526 * child tasks to inherit that and Ctrl-C would not work. 2527 * What we want is for Ctrl-C to work in the exec()-ed 2528 * task, but being ignored by perf stat itself: 2529 */ 2530 atexit(sig_atexit); 2531 if (!forever) 2532 signal(SIGINT, skip_signal); 2533 signal(SIGCHLD, skip_signal); 2534 signal(SIGALRM, skip_signal); 2535 signal(SIGABRT, skip_signal); 2536 2537 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2538 goto out; 2539 2540 status = 0; 2541 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2542 if (stat_config.run_count != 1 && verbose > 0) 2543 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2544 run_idx + 1); 2545 2546 if (run_idx != 0) 2547 evlist__reset_prev_raw_counts(evsel_list); 2548 2549 status = run_perf_stat(argc, argv, run_idx); 2550 if (forever && status != -1 && !interval) { 2551 print_counters(NULL, argc, argv); 2552 perf_stat__reset_stats(); 2553 } 2554 } 2555 2556 if (!forever && status != -1 && (!interval || stat_config.summary)) 2557 print_counters(NULL, argc, argv); 2558 2559 evlist__finalize_ctlfd(evsel_list); 2560 2561 if (STAT_RECORD) { 2562 /* 2563 * We synthesize the kernel mmap record just so that older tools 2564 * don't emit warnings about not being able to resolve symbols 2565 * due to /proc/sys/kernel/kptr_restrict settings and instead provide 2566 * a saner message about no samples being in the perf.data file. 2567 * 2568 * This also serves to suppress a warning about f_header.data.size == 0 2569 * in header.c at the moment 'perf stat record' gets introduced, which 2570 * is not really needed once we start adding the stat specific PERF_RECORD_ 2571 * records, but the need to suppress the kptr_restrict messages in older 2572 * tools remain -acme 2573 */ 2574 int fd = perf_data__fd(&perf_stat.data); 2575 2576 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2577 process_synthesized_event, 2578 &perf_stat.session->machines.host); 2579 if (err) { 2580 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2581 "older tools may produce warnings about this file\n."); 2582 } 2583 2584 if (!interval) { 2585 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2586 pr_err("failed to write stat round event\n"); 2587 } 2588 2589 if (!perf_stat.data.is_pipe) { 2590 perf_stat.session->header.data_size += perf_stat.bytes_written; 2591 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2592 } 2593 2594 evlist__close(evsel_list); 2595 perf_session__delete(perf_stat.session); 2596 } 2597 2598 perf_stat__exit_aggr_mode(); 2599 evlist__free_stats(evsel_list); 2600 out: 2601 if (stat_config.iostat_run) 2602 iostat_release(evsel_list); 2603 2604 zfree(&stat_config.walltime_run); 2605 2606 if (smi_cost && smi_reset) 2607 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2608 2609 evlist__delete(evsel_list); 2610 2611 metricgroup__rblist_exit(&stat_config.metric_events); 2612 runtime_stat_delete(&stat_config); 2613 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 2614 2615 return status; 2616 } 2617