1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "perf.h" 45 #include "util/cgroup.h" 46 #include <subcmd/parse-options.h> 47 #include "util/parse-events.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evlist-hybrid.h" 52 #include "util/evsel.h" 53 #include "util/debug.h" 54 #include "util/color.h" 55 #include "util/stat.h" 56 #include "util/header.h" 57 #include "util/cpumap.h" 58 #include "util/thread_map.h" 59 #include "util/counts.h" 60 #include "util/topdown.h" 61 #include "util/session.h" 62 #include "util/tool.h" 63 #include "util/string2.h" 64 #include "util/metricgroup.h" 65 #include "util/synthetic-events.h" 66 #include "util/target.h" 67 #include "util/time-utils.h" 68 #include "util/top.h" 69 #include "util/affinity.h" 70 #include "util/pfm.h" 71 #include "util/bpf_counter.h" 72 #include "util/iostat.h" 73 #include "util/pmu-hybrid.h" 74 #include "asm/bug.h" 75 76 #include <linux/time64.h> 77 #include <linux/zalloc.h> 78 #include <api/fs/fs.h> 79 #include <errno.h> 80 #include <signal.h> 81 #include <stdlib.h> 82 #include <sys/prctl.h> 83 #include <inttypes.h> 84 #include <locale.h> 85 #include <math.h> 86 #include <sys/types.h> 87 #include <sys/stat.h> 88 #include <sys/wait.h> 89 #include <unistd.h> 90 #include <sys/time.h> 91 #include <sys/resource.h> 92 #include <linux/err.h> 93 94 #include <linux/ctype.h> 95 #include <perf/evlist.h> 96 97 #define DEFAULT_SEPARATOR " " 98 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 99 100 static void print_counters(struct timespec *ts, int argc, const char **argv); 101 102 /* Default events used for perf stat -T */ 103 static const char *transaction_attrs = { 104 "task-clock," 105 "{" 106 "instructions," 107 "cycles," 108 "cpu/cycles-t/," 109 "cpu/tx-start/," 110 "cpu/el-start/," 111 "cpu/cycles-ct/" 112 "}" 113 }; 114 115 /* More limited version when the CPU does not have all events. */ 116 static const char * transaction_limited_attrs = { 117 "task-clock," 118 "{" 119 "instructions," 120 "cycles," 121 "cpu/cycles-t/," 122 "cpu/tx-start/" 123 "}" 124 }; 125 126 static const char * topdown_attrs[] = { 127 "topdown-total-slots", 128 "topdown-slots-retired", 129 "topdown-recovery-bubbles", 130 "topdown-fetch-bubbles", 131 "topdown-slots-issued", 132 NULL, 133 }; 134 135 static const char *topdown_metric_attrs[] = { 136 "slots", 137 "topdown-retiring", 138 "topdown-bad-spec", 139 "topdown-fe-bound", 140 "topdown-be-bound", 141 NULL, 142 }; 143 144 static const char *topdown_metric_L2_attrs[] = { 145 "slots", 146 "topdown-retiring", 147 "topdown-bad-spec", 148 "topdown-fe-bound", 149 "topdown-be-bound", 150 "topdown-heavy-ops", 151 "topdown-br-mispredict", 152 "topdown-fetch-lat", 153 "topdown-mem-bound", 154 NULL, 155 }; 156 157 #define TOPDOWN_MAX_LEVEL 2 158 159 static const char *smi_cost_attrs = { 160 "{" 161 "msr/aperf/," 162 "msr/smi/," 163 "cycles" 164 "}" 165 }; 166 167 static struct evlist *evsel_list; 168 static bool all_counters_use_bpf = true; 169 170 static struct target target = { 171 .uid = UINT_MAX, 172 }; 173 174 #define METRIC_ONLY_LEN 20 175 176 static volatile pid_t child_pid = -1; 177 static int detailed_run = 0; 178 static bool transaction_run; 179 static bool topdown_run = false; 180 static bool smi_cost = false; 181 static bool smi_reset = false; 182 static int big_num_opt = -1; 183 static bool group = false; 184 static const char *pre_cmd = NULL; 185 static const char *post_cmd = NULL; 186 static bool sync_run = false; 187 static bool forever = false; 188 static bool force_metric_only = false; 189 static struct timespec ref_time; 190 static bool append_file; 191 static bool interval_count; 192 static const char *output_name; 193 static int output_fd; 194 static char *metrics; 195 196 struct perf_stat { 197 bool record; 198 struct perf_data data; 199 struct perf_session *session; 200 u64 bytes_written; 201 struct perf_tool tool; 202 bool maps_allocated; 203 struct perf_cpu_map *cpus; 204 struct perf_thread_map *threads; 205 enum aggr_mode aggr_mode; 206 }; 207 208 static struct perf_stat perf_stat; 209 #define STAT_RECORD perf_stat.record 210 211 static volatile int done = 0; 212 213 static struct perf_stat_config stat_config = { 214 .aggr_mode = AGGR_GLOBAL, 215 .scale = true, 216 .unit_width = 4, /* strlen("unit") */ 217 .run_count = 1, 218 .metric_only_len = METRIC_ONLY_LEN, 219 .walltime_nsecs_stats = &walltime_nsecs_stats, 220 .ru_stats = &ru_stats, 221 .big_num = true, 222 .ctl_fd = -1, 223 .ctl_fd_ack = -1, 224 .iostat_run = false, 225 }; 226 227 static bool cpus_map_matched(struct evsel *a, struct evsel *b) 228 { 229 if (!a->core.cpus && !b->core.cpus) 230 return true; 231 232 if (!a->core.cpus || !b->core.cpus) 233 return false; 234 235 if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) 236 return false; 237 238 for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { 239 if (perf_cpu_map__cpu(a->core.cpus, i).cpu != 240 perf_cpu_map__cpu(b->core.cpus, i).cpu) 241 return false; 242 } 243 244 return true; 245 } 246 247 static void evlist__check_cpu_maps(struct evlist *evlist) 248 { 249 struct evsel *evsel, *pos, *leader; 250 char buf[1024]; 251 252 if (evlist__has_hybrid(evlist)) 253 evlist__warn_hybrid_group(evlist); 254 255 evlist__for_each_entry(evlist, evsel) { 256 leader = evsel__leader(evsel); 257 258 /* Check that leader matches cpus with each member. */ 259 if (leader == evsel) 260 continue; 261 if (cpus_map_matched(leader, evsel)) 262 continue; 263 264 /* If there's mismatch disable the group and warn user. */ 265 WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n"); 266 evsel__group_desc(leader, buf, sizeof(buf)); 267 pr_warning(" %s\n", buf); 268 269 if (verbose) { 270 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 271 pr_warning(" %s: %s\n", leader->name, buf); 272 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 273 pr_warning(" %s: %s\n", evsel->name, buf); 274 } 275 276 for_each_group_evsel(pos, leader) 277 evsel__remove_from_group(pos, leader); 278 } 279 } 280 281 static inline void diff_timespec(struct timespec *r, struct timespec *a, 282 struct timespec *b) 283 { 284 r->tv_sec = a->tv_sec - b->tv_sec; 285 if (a->tv_nsec < b->tv_nsec) { 286 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 287 r->tv_sec--; 288 } else { 289 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 290 } 291 } 292 293 static void perf_stat__reset_stats(void) 294 { 295 evlist__reset_stats(evsel_list); 296 perf_stat__reset_shadow_stats(); 297 } 298 299 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 300 union perf_event *event, 301 struct perf_sample *sample __maybe_unused, 302 struct machine *machine __maybe_unused) 303 { 304 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 305 pr_err("failed to write perf data, error: %m\n"); 306 return -1; 307 } 308 309 perf_stat.bytes_written += event->header.size; 310 return 0; 311 } 312 313 static int write_stat_round_event(u64 tm, u64 type) 314 { 315 return perf_event__synthesize_stat_round(NULL, tm, type, 316 process_synthesized_event, 317 NULL); 318 } 319 320 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 321 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 322 323 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 324 325 static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, 326 struct perf_counts_values *count) 327 { 328 struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); 329 struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); 330 331 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 332 process_synthesized_event, NULL); 333 } 334 335 static int read_single_counter(struct evsel *counter, int cpu_map_idx, 336 int thread, struct timespec *rs) 337 { 338 switch(counter->tool_event) { 339 case PERF_TOOL_DURATION_TIME: { 340 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 341 struct perf_counts_values *count = 342 perf_counts(counter->counts, cpu_map_idx, thread); 343 count->ena = count->run = val; 344 count->val = val; 345 return 0; 346 } 347 case PERF_TOOL_USER_TIME: 348 case PERF_TOOL_SYSTEM_TIME: { 349 u64 val; 350 struct perf_counts_values *count = 351 perf_counts(counter->counts, cpu_map_idx, thread); 352 if (counter->tool_event == PERF_TOOL_USER_TIME) 353 val = ru_stats.ru_utime_usec_stat.mean; 354 else 355 val = ru_stats.ru_stime_usec_stat.mean; 356 count->ena = count->run = val; 357 count->val = val; 358 return 0; 359 } 360 default: 361 case PERF_TOOL_NONE: 362 return evsel__read_counter(counter, cpu_map_idx, thread); 363 case PERF_TOOL_MAX: 364 /* This should never be reached */ 365 return 0; 366 } 367 } 368 369 /* 370 * Read out the results of a single counter: 371 * do not aggregate counts across CPUs in system-wide mode 372 */ 373 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) 374 { 375 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 376 int thread; 377 378 if (!counter->supported) 379 return -ENOENT; 380 381 for (thread = 0; thread < nthreads; thread++) { 382 struct perf_counts_values *count; 383 384 count = perf_counts(counter->counts, cpu_map_idx, thread); 385 386 /* 387 * The leader's group read loads data into its group members 388 * (via evsel__read_counter()) and sets their count->loaded. 389 */ 390 if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && 391 read_single_counter(counter, cpu_map_idx, thread, rs)) { 392 counter->counts->scaled = -1; 393 perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; 394 perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; 395 return -1; 396 } 397 398 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); 399 400 if (STAT_RECORD) { 401 if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { 402 pr_err("failed to write stat event\n"); 403 return -1; 404 } 405 } 406 407 if (verbose > 1) { 408 fprintf(stat_config.output, 409 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 410 evsel__name(counter), 411 perf_cpu_map__cpu(evsel__cpus(counter), 412 cpu_map_idx).cpu, 413 count->val, count->ena, count->run); 414 } 415 } 416 417 return 0; 418 } 419 420 static int read_affinity_counters(struct timespec *rs) 421 { 422 struct evlist_cpu_iterator evlist_cpu_itr; 423 struct affinity saved_affinity, *affinity; 424 425 if (all_counters_use_bpf) 426 return 0; 427 428 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 429 affinity = NULL; 430 else if (affinity__setup(&saved_affinity) < 0) 431 return -1; 432 else 433 affinity = &saved_affinity; 434 435 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 436 struct evsel *counter = evlist_cpu_itr.evsel; 437 438 if (evsel__is_bpf(counter)) 439 continue; 440 441 if (!counter->err) { 442 counter->err = read_counter_cpu(counter, rs, 443 evlist_cpu_itr.cpu_map_idx); 444 } 445 } 446 if (affinity) 447 affinity__cleanup(&saved_affinity); 448 449 return 0; 450 } 451 452 static int read_bpf_map_counters(void) 453 { 454 struct evsel *counter; 455 int err; 456 457 evlist__for_each_entry(evsel_list, counter) { 458 if (!evsel__is_bpf(counter)) 459 continue; 460 461 err = bpf_counter__read(counter); 462 if (err) 463 return err; 464 } 465 return 0; 466 } 467 468 static void read_counters(struct timespec *rs) 469 { 470 struct evsel *counter; 471 472 if (!stat_config.stop_read_counter) { 473 if (read_bpf_map_counters() || 474 read_affinity_counters(rs)) 475 return; 476 } 477 478 evlist__for_each_entry(evsel_list, counter) { 479 if (counter->err) 480 pr_debug("failed to read counter %s\n", counter->name); 481 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 482 pr_warning("failed to process counter %s\n", counter->name); 483 counter->err = 0; 484 } 485 } 486 487 static void process_interval(void) 488 { 489 struct timespec ts, rs; 490 491 clock_gettime(CLOCK_MONOTONIC, &ts); 492 diff_timespec(&rs, &ts, &ref_time); 493 494 perf_stat__reset_shadow_per_stat(&rt_stat); 495 read_counters(&rs); 496 497 if (STAT_RECORD) { 498 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 499 pr_err("failed to write stat round event\n"); 500 } 501 502 init_stats(&walltime_nsecs_stats); 503 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 504 print_counters(&rs, 0, NULL); 505 } 506 507 static bool handle_interval(unsigned int interval, int *times) 508 { 509 if (interval) { 510 process_interval(); 511 if (interval_count && !(--(*times))) 512 return true; 513 } 514 return false; 515 } 516 517 static int enable_counters(void) 518 { 519 struct evsel *evsel; 520 int err; 521 522 evlist__for_each_entry(evsel_list, evsel) { 523 if (!evsel__is_bpf(evsel)) 524 continue; 525 526 err = bpf_counter__enable(evsel); 527 if (err) 528 return err; 529 } 530 531 if (stat_config.initial_delay < 0) { 532 pr_info(EVLIST_DISABLED_MSG); 533 return 0; 534 } 535 536 if (stat_config.initial_delay > 0) { 537 pr_info(EVLIST_DISABLED_MSG); 538 usleep(stat_config.initial_delay * USEC_PER_MSEC); 539 } 540 541 /* 542 * We need to enable counters only if: 543 * - we don't have tracee (attaching to task or cpu) 544 * - we have initial delay configured 545 */ 546 if (!target__none(&target) || stat_config.initial_delay) { 547 if (!all_counters_use_bpf) 548 evlist__enable(evsel_list); 549 if (stat_config.initial_delay > 0) 550 pr_info(EVLIST_ENABLED_MSG); 551 } 552 return 0; 553 } 554 555 static void disable_counters(void) 556 { 557 struct evsel *counter; 558 559 /* 560 * If we don't have tracee (attaching to task or cpu), counters may 561 * still be running. To get accurate group ratios, we must stop groups 562 * from counting before reading their constituent counters. 563 */ 564 if (!target__none(&target)) { 565 evlist__for_each_entry(evsel_list, counter) 566 bpf_counter__disable(counter); 567 if (!all_counters_use_bpf) 568 evlist__disable(evsel_list); 569 } 570 } 571 572 static volatile int workload_exec_errno; 573 574 /* 575 * evlist__prepare_workload will send a SIGUSR1 576 * if the fork fails, since we asked by setting its 577 * want_signal to true. 578 */ 579 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 580 void *ucontext __maybe_unused) 581 { 582 workload_exec_errno = info->si_value.sival_int; 583 } 584 585 static bool evsel__should_store_id(struct evsel *counter) 586 { 587 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 588 } 589 590 static bool is_target_alive(struct target *_target, 591 struct perf_thread_map *threads) 592 { 593 struct stat st; 594 int i; 595 596 if (!target__has_task(_target)) 597 return true; 598 599 for (i = 0; i < threads->nr; i++) { 600 char path[PATH_MAX]; 601 602 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 603 threads->map[i].pid); 604 605 if (!stat(path, &st)) 606 return true; 607 } 608 609 return false; 610 } 611 612 static void process_evlist(struct evlist *evlist, unsigned int interval) 613 { 614 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 615 616 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 617 switch (cmd) { 618 case EVLIST_CTL_CMD_ENABLE: 619 __fallthrough; 620 case EVLIST_CTL_CMD_DISABLE: 621 if (interval) 622 process_interval(); 623 break; 624 case EVLIST_CTL_CMD_SNAPSHOT: 625 case EVLIST_CTL_CMD_ACK: 626 case EVLIST_CTL_CMD_UNSUPPORTED: 627 case EVLIST_CTL_CMD_EVLIST: 628 case EVLIST_CTL_CMD_STOP: 629 case EVLIST_CTL_CMD_PING: 630 default: 631 break; 632 } 633 } 634 } 635 636 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 637 int *time_to_sleep) 638 { 639 int tts = *time_to_sleep; 640 struct timespec time_diff; 641 642 diff_timespec(&time_diff, time_stop, time_start); 643 644 tts -= time_diff.tv_sec * MSEC_PER_SEC + 645 time_diff.tv_nsec / NSEC_PER_MSEC; 646 647 if (tts < 0) 648 tts = 0; 649 650 *time_to_sleep = tts; 651 } 652 653 static int dispatch_events(bool forks, int timeout, int interval, int *times) 654 { 655 int child_exited = 0, status = 0; 656 int time_to_sleep, sleep_time; 657 struct timespec time_start, time_stop; 658 659 if (interval) 660 sleep_time = interval; 661 else if (timeout) 662 sleep_time = timeout; 663 else 664 sleep_time = 1000; 665 666 time_to_sleep = sleep_time; 667 668 while (!done) { 669 if (forks) 670 child_exited = waitpid(child_pid, &status, WNOHANG); 671 else 672 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 673 674 if (child_exited) 675 break; 676 677 clock_gettime(CLOCK_MONOTONIC, &time_start); 678 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 679 if (timeout || handle_interval(interval, times)) 680 break; 681 time_to_sleep = sleep_time; 682 } else { /* fd revent */ 683 process_evlist(evsel_list, interval); 684 clock_gettime(CLOCK_MONOTONIC, &time_stop); 685 compute_tts(&time_start, &time_stop, &time_to_sleep); 686 } 687 } 688 689 return status; 690 } 691 692 enum counter_recovery { 693 COUNTER_SKIP, 694 COUNTER_RETRY, 695 COUNTER_FATAL, 696 }; 697 698 static enum counter_recovery stat_handle_error(struct evsel *counter) 699 { 700 char msg[BUFSIZ]; 701 /* 702 * PPC returns ENXIO for HW counters until 2.6.37 703 * (behavior changed with commit b0a873e). 704 */ 705 if (errno == EINVAL || errno == ENOSYS || 706 errno == ENOENT || errno == EOPNOTSUPP || 707 errno == ENXIO) { 708 if (verbose > 0) 709 ui__warning("%s event is not supported by the kernel.\n", 710 evsel__name(counter)); 711 counter->supported = false; 712 /* 713 * errored is a sticky flag that means one of the counter's 714 * cpu event had a problem and needs to be reexamined. 715 */ 716 counter->errored = true; 717 718 if ((evsel__leader(counter) != counter) || 719 !(counter->core.leader->nr_members > 1)) 720 return COUNTER_SKIP; 721 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 722 if (verbose > 0) 723 ui__warning("%s\n", msg); 724 return COUNTER_RETRY; 725 } else if (target__has_per_thread(&target) && 726 evsel_list->core.threads && 727 evsel_list->core.threads->err_thread != -1) { 728 /* 729 * For global --per-thread case, skip current 730 * error thread. 731 */ 732 if (!thread_map__remove(evsel_list->core.threads, 733 evsel_list->core.threads->err_thread)) { 734 evsel_list->core.threads->err_thread = -1; 735 return COUNTER_RETRY; 736 } 737 } 738 739 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 740 ui__error("%s\n", msg); 741 742 if (child_pid != -1) 743 kill(child_pid, SIGTERM); 744 return COUNTER_FATAL; 745 } 746 747 static int __run_perf_stat(int argc, const char **argv, int run_idx) 748 { 749 int interval = stat_config.interval; 750 int times = stat_config.times; 751 int timeout = stat_config.timeout; 752 char msg[BUFSIZ]; 753 unsigned long long t0, t1; 754 struct evsel *counter; 755 size_t l; 756 int status = 0; 757 const bool forks = (argc > 0); 758 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 759 struct evlist_cpu_iterator evlist_cpu_itr; 760 struct affinity saved_affinity, *affinity = NULL; 761 int err; 762 bool second_pass = false; 763 764 if (forks) { 765 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 766 perror("failed to prepare workload"); 767 return -1; 768 } 769 child_pid = evsel_list->workload.pid; 770 } 771 772 if (group) 773 evlist__set_leader(evsel_list); 774 775 if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { 776 if (affinity__setup(&saved_affinity) < 0) 777 return -1; 778 affinity = &saved_affinity; 779 } 780 781 evlist__for_each_entry(evsel_list, counter) { 782 counter->reset_group = false; 783 if (bpf_counter__load(counter, &target)) 784 return -1; 785 if (!evsel__is_bpf(counter)) 786 all_counters_use_bpf = false; 787 } 788 789 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 790 counter = evlist_cpu_itr.evsel; 791 792 /* 793 * bperf calls evsel__open_per_cpu() in bperf__load(), so 794 * no need to call it again here. 795 */ 796 if (target.use_bpf) 797 break; 798 799 if (counter->reset_group || counter->errored) 800 continue; 801 if (evsel__is_bpf(counter)) 802 continue; 803 try_again: 804 if (create_perf_stat_counter(counter, &stat_config, &target, 805 evlist_cpu_itr.cpu_map_idx) < 0) { 806 807 /* 808 * Weak group failed. We cannot just undo this here 809 * because earlier CPUs might be in group mode, and the kernel 810 * doesn't support mixing group and non group reads. Defer 811 * it to later. 812 * Don't close here because we're in the wrong affinity. 813 */ 814 if ((errno == EINVAL || errno == EBADF) && 815 evsel__leader(counter) != counter && 816 counter->weak_group) { 817 evlist__reset_weak_group(evsel_list, counter, false); 818 assert(counter->reset_group); 819 second_pass = true; 820 continue; 821 } 822 823 switch (stat_handle_error(counter)) { 824 case COUNTER_FATAL: 825 return -1; 826 case COUNTER_RETRY: 827 goto try_again; 828 case COUNTER_SKIP: 829 continue; 830 default: 831 break; 832 } 833 834 } 835 counter->supported = true; 836 } 837 838 if (second_pass) { 839 /* 840 * Now redo all the weak group after closing them, 841 * and also close errored counters. 842 */ 843 844 /* First close errored or weak retry */ 845 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 846 counter = evlist_cpu_itr.evsel; 847 848 if (!counter->reset_group && !counter->errored) 849 continue; 850 851 perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); 852 } 853 /* Now reopen weak */ 854 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 855 counter = evlist_cpu_itr.evsel; 856 857 if (!counter->reset_group) 858 continue; 859 try_again_reset: 860 pr_debug2("reopening weak %s\n", evsel__name(counter)); 861 if (create_perf_stat_counter(counter, &stat_config, &target, 862 evlist_cpu_itr.cpu_map_idx) < 0) { 863 864 switch (stat_handle_error(counter)) { 865 case COUNTER_FATAL: 866 return -1; 867 case COUNTER_RETRY: 868 goto try_again_reset; 869 case COUNTER_SKIP: 870 continue; 871 default: 872 break; 873 } 874 } 875 counter->supported = true; 876 } 877 } 878 affinity__cleanup(affinity); 879 880 evlist__for_each_entry(evsel_list, counter) { 881 if (!counter->supported) { 882 perf_evsel__free_fd(&counter->core); 883 continue; 884 } 885 886 l = strlen(counter->unit); 887 if (l > stat_config.unit_width) 888 stat_config.unit_width = l; 889 890 if (evsel__should_store_id(counter) && 891 evsel__store_ids(counter, evsel_list)) 892 return -1; 893 } 894 895 if (evlist__apply_filters(evsel_list, &counter)) { 896 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 897 counter->filter, evsel__name(counter), errno, 898 str_error_r(errno, msg, sizeof(msg))); 899 return -1; 900 } 901 902 if (STAT_RECORD) { 903 int fd = perf_data__fd(&perf_stat.data); 904 905 if (is_pipe) { 906 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 907 } else { 908 err = perf_session__write_header(perf_stat.session, evsel_list, 909 fd, false); 910 } 911 912 if (err < 0) 913 return err; 914 915 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 916 process_synthesized_event, is_pipe); 917 if (err < 0) 918 return err; 919 } 920 921 err = enable_counters(); 922 if (err) 923 return -1; 924 925 /* Exec the command, if any */ 926 if (forks) 927 evlist__start_workload(evsel_list); 928 929 t0 = rdclock(); 930 clock_gettime(CLOCK_MONOTONIC, &ref_time); 931 932 if (forks) { 933 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 934 status = dispatch_events(forks, timeout, interval, ×); 935 if (child_pid != -1) { 936 if (timeout) 937 kill(child_pid, SIGTERM); 938 wait4(child_pid, &status, 0, &stat_config.ru_data); 939 } 940 941 if (workload_exec_errno) { 942 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 943 pr_err("Workload failed: %s\n", emsg); 944 return -1; 945 } 946 947 if (WIFSIGNALED(status)) 948 psignal(WTERMSIG(status), argv[0]); 949 } else { 950 status = dispatch_events(forks, timeout, interval, ×); 951 } 952 953 disable_counters(); 954 955 t1 = rdclock(); 956 957 if (stat_config.walltime_run_table) 958 stat_config.walltime_run[run_idx] = t1 - t0; 959 960 if (interval && stat_config.summary) { 961 stat_config.interval = 0; 962 stat_config.stop_read_counter = true; 963 init_stats(&walltime_nsecs_stats); 964 update_stats(&walltime_nsecs_stats, t1 - t0); 965 966 if (stat_config.aggr_mode == AGGR_GLOBAL) 967 evlist__save_aggr_prev_raw_counts(evsel_list); 968 969 evlist__copy_prev_raw_counts(evsel_list); 970 evlist__reset_prev_raw_counts(evsel_list); 971 perf_stat__reset_shadow_per_stat(&rt_stat); 972 } else { 973 update_stats(&walltime_nsecs_stats, t1 - t0); 974 update_rusage_stats(&ru_stats, &stat_config.ru_data); 975 } 976 977 /* 978 * Closing a group leader splits the group, and as we only disable 979 * group leaders, results in remaining events becoming enabled. To 980 * avoid arbitrary skew, we must read all counters before closing any 981 * group leaders. 982 */ 983 read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); 984 985 /* 986 * We need to keep evsel_list alive, because it's processed 987 * later the evsel_list will be closed after. 988 */ 989 if (!STAT_RECORD) 990 evlist__close(evsel_list); 991 992 return WEXITSTATUS(status); 993 } 994 995 static int run_perf_stat(int argc, const char **argv, int run_idx) 996 { 997 int ret; 998 999 if (pre_cmd) { 1000 ret = system(pre_cmd); 1001 if (ret) 1002 return ret; 1003 } 1004 1005 if (sync_run) 1006 sync(); 1007 1008 ret = __run_perf_stat(argc, argv, run_idx); 1009 if (ret) 1010 return ret; 1011 1012 if (post_cmd) { 1013 ret = system(post_cmd); 1014 if (ret) 1015 return ret; 1016 } 1017 1018 return ret; 1019 } 1020 1021 static void print_counters(struct timespec *ts, int argc, const char **argv) 1022 { 1023 /* Do not print anything if we record to the pipe. */ 1024 if (STAT_RECORD && perf_stat.data.is_pipe) 1025 return; 1026 if (stat_config.quiet) 1027 return; 1028 1029 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 1030 } 1031 1032 static volatile int signr = -1; 1033 1034 static void skip_signal(int signo) 1035 { 1036 if ((child_pid == -1) || stat_config.interval) 1037 done = 1; 1038 1039 signr = signo; 1040 /* 1041 * render child_pid harmless 1042 * won't send SIGTERM to a random 1043 * process in case of race condition 1044 * and fast PID recycling 1045 */ 1046 child_pid = -1; 1047 } 1048 1049 static void sig_atexit(void) 1050 { 1051 sigset_t set, oset; 1052 1053 /* 1054 * avoid race condition with SIGCHLD handler 1055 * in skip_signal() which is modifying child_pid 1056 * goal is to avoid send SIGTERM to a random 1057 * process 1058 */ 1059 sigemptyset(&set); 1060 sigaddset(&set, SIGCHLD); 1061 sigprocmask(SIG_BLOCK, &set, &oset); 1062 1063 if (child_pid != -1) 1064 kill(child_pid, SIGTERM); 1065 1066 sigprocmask(SIG_SETMASK, &oset, NULL); 1067 1068 if (signr == -1) 1069 return; 1070 1071 signal(signr, SIG_DFL); 1072 kill(getpid(), signr); 1073 } 1074 1075 void perf_stat__set_big_num(int set) 1076 { 1077 stat_config.big_num = (set != 0); 1078 } 1079 1080 void perf_stat__set_no_csv_summary(int set) 1081 { 1082 stat_config.no_csv_summary = (set != 0); 1083 } 1084 1085 static int stat__set_big_num(const struct option *opt __maybe_unused, 1086 const char *s __maybe_unused, int unset) 1087 { 1088 big_num_opt = unset ? 0 : 1; 1089 perf_stat__set_big_num(!unset); 1090 return 0; 1091 } 1092 1093 static int enable_metric_only(const struct option *opt __maybe_unused, 1094 const char *s __maybe_unused, int unset) 1095 { 1096 force_metric_only = true; 1097 stat_config.metric_only = !unset; 1098 return 0; 1099 } 1100 1101 static int append_metric_groups(const struct option *opt __maybe_unused, 1102 const char *str, 1103 int unset __maybe_unused) 1104 { 1105 if (metrics) { 1106 char *tmp; 1107 1108 if (asprintf(&tmp, "%s,%s", metrics, str) < 0) 1109 return -ENOMEM; 1110 free(metrics); 1111 metrics = tmp; 1112 } else { 1113 metrics = strdup(str); 1114 if (!metrics) 1115 return -ENOMEM; 1116 } 1117 return 0; 1118 } 1119 1120 static int parse_control_option(const struct option *opt, 1121 const char *str, 1122 int unset __maybe_unused) 1123 { 1124 struct perf_stat_config *config = opt->value; 1125 1126 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1127 } 1128 1129 static int parse_stat_cgroups(const struct option *opt, 1130 const char *str, int unset) 1131 { 1132 if (stat_config.cgroup_list) { 1133 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1134 return -1; 1135 } 1136 1137 return parse_cgroups(opt, str, unset); 1138 } 1139 1140 static int parse_hybrid_type(const struct option *opt, 1141 const char *str, 1142 int unset __maybe_unused) 1143 { 1144 struct evlist *evlist = *(struct evlist **)opt->value; 1145 1146 if (!list_empty(&evlist->core.entries)) { 1147 fprintf(stderr, "Must define cputype before events/metrics\n"); 1148 return -1; 1149 } 1150 1151 evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu(str); 1152 if (!evlist->hybrid_pmu_name) { 1153 fprintf(stderr, "--cputype %s is not supported!\n", str); 1154 return -1; 1155 } 1156 1157 return 0; 1158 } 1159 1160 static struct option stat_options[] = { 1161 OPT_BOOLEAN('T', "transaction", &transaction_run, 1162 "hardware transaction statistics"), 1163 OPT_CALLBACK('e', "event", &evsel_list, "event", 1164 "event selector. use 'perf list' to list available events", 1165 parse_events_option), 1166 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1167 "event filter", parse_filter), 1168 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1169 "child tasks do not inherit counters"), 1170 OPT_STRING('p', "pid", &target.pid, "pid", 1171 "stat events on existing process id"), 1172 OPT_STRING('t', "tid", &target.tid, "tid", 1173 "stat events on existing thread id"), 1174 #ifdef HAVE_BPF_SKEL 1175 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 1176 "stat events on existing bpf program id"), 1177 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, 1178 "use bpf program to count events"), 1179 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", 1180 "path to perf_event_attr map"), 1181 #endif 1182 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1183 "system-wide collection from all CPUs"), 1184 OPT_BOOLEAN('g', "group", &group, 1185 "put the counters into a counter group"), 1186 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1187 "Use --no-scale to disable counter scaling for multiplexing"), 1188 OPT_INCR('v', "verbose", &verbose, 1189 "be more verbose (show counter open errors, etc)"), 1190 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1191 "repeat command and print average + stddev (max: 100, forever: 0)"), 1192 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1193 "display details about each run (only with -r option)"), 1194 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1195 "null run - dont start any counters"), 1196 OPT_INCR('d', "detailed", &detailed_run, 1197 "detailed run - start a lot of events"), 1198 OPT_BOOLEAN('S', "sync", &sync_run, 1199 "call sync() before starting a run"), 1200 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1201 "print large numbers with thousands\' separators", 1202 stat__set_big_num), 1203 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1204 "list of cpus to monitor in system-wide"), 1205 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1206 "disable CPU count aggregation", AGGR_NONE), 1207 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 1208 OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge, 1209 "Merge identical named hybrid events"), 1210 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1211 "print counts with custom separator"), 1212 OPT_BOOLEAN('j', "json-output", &stat_config.json_output, 1213 "print counts in JSON format"), 1214 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1215 "monitor event in cgroup name only", parse_stat_cgroups), 1216 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 1217 "expand events for each cgroup"), 1218 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1219 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1220 OPT_INTEGER(0, "log-fd", &output_fd, 1221 "log output to fd, instead of stderr"), 1222 OPT_STRING(0, "pre", &pre_cmd, "command", 1223 "command to run prior to the measured command"), 1224 OPT_STRING(0, "post", &post_cmd, "command", 1225 "command to run after to the measured command"), 1226 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1227 "print counts at regular interval in ms " 1228 "(overhead is possible for values <= 100ms)"), 1229 OPT_INTEGER(0, "interval-count", &stat_config.times, 1230 "print counts for fixed number of times"), 1231 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1232 "clear screen in between new interval"), 1233 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1234 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1235 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1236 "aggregate counts per processor socket", AGGR_SOCKET), 1237 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1238 "aggregate counts per processor die", AGGR_DIE), 1239 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1240 "aggregate counts per physical processor core", AGGR_CORE), 1241 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1242 "aggregate counts per thread", AGGR_THREAD), 1243 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1244 "aggregate counts per numa node", AGGR_NODE), 1245 OPT_INTEGER('D', "delay", &stat_config.initial_delay, 1246 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1247 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1248 "Only print computed metrics. No raw values", enable_metric_only), 1249 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1250 "don't group metric events, impacts multiplexing"), 1251 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1252 "don't try to share events between metrics in a group"), 1253 OPT_BOOLEAN(0, "topdown", &topdown_run, 1254 "measure top-down statistics"), 1255 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 1256 "Set the metrics level for the top-down statistics (0: max level)"), 1257 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1258 "measure SMI cost"), 1259 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1260 "monitor specified metrics or metric groups (separated by ,)", 1261 append_metric_groups), 1262 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1263 "Configure all used events to run in kernel space.", 1264 PARSE_OPT_EXCLUSIVE), 1265 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1266 "Configure all used events to run in user space.", 1267 PARSE_OPT_EXCLUSIVE), 1268 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1269 "Use with 'percore' event qualifier to show the event " 1270 "counts of one hardware thread by sum up total hardware " 1271 "threads of same physical core"), 1272 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1273 "print summary for interval mode"), 1274 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, 1275 "don't print 'summary' for CSV summary output"), 1276 OPT_BOOLEAN(0, "quiet", &stat_config.quiet, 1277 "don't print output (useful with record)"), 1278 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", 1279 "Only enable events on applying cpu with this type " 1280 "for hybrid platform (e.g. core or atom)", 1281 parse_hybrid_type), 1282 #ifdef HAVE_LIBPFM 1283 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1284 "libpfm4 event selector. use 'perf list' to list available events", 1285 parse_libpfm_events_option), 1286 #endif 1287 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1288 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1289 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1290 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1291 parse_control_option), 1292 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", 1293 "measure I/O performance metrics provided by arch/platform", 1294 iostat_parse), 1295 OPT_END() 1296 }; 1297 1298 static const char *const aggr_mode__string[] = { 1299 [AGGR_CORE] = "core", 1300 [AGGR_DIE] = "die", 1301 [AGGR_GLOBAL] = "global", 1302 [AGGR_NODE] = "node", 1303 [AGGR_NONE] = "none", 1304 [AGGR_SOCKET] = "socket", 1305 [AGGR_THREAD] = "thread", 1306 [AGGR_UNSET] = "unset", 1307 }; 1308 1309 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1310 struct perf_cpu cpu) 1311 { 1312 return aggr_cpu_id__socket(cpu, /*data=*/NULL); 1313 } 1314 1315 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1316 struct perf_cpu cpu) 1317 { 1318 return aggr_cpu_id__die(cpu, /*data=*/NULL); 1319 } 1320 1321 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1322 struct perf_cpu cpu) 1323 { 1324 return aggr_cpu_id__core(cpu, /*data=*/NULL); 1325 } 1326 1327 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1328 struct perf_cpu cpu) 1329 { 1330 return aggr_cpu_id__node(cpu, /*data=*/NULL); 1331 } 1332 1333 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1334 aggr_get_id_t get_id, struct perf_cpu cpu) 1335 { 1336 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1337 1338 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) 1339 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); 1340 1341 id = config->cpus_aggr_map->map[cpu.cpu]; 1342 return id; 1343 } 1344 1345 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1346 struct perf_cpu cpu) 1347 { 1348 return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); 1349 } 1350 1351 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1352 struct perf_cpu cpu) 1353 { 1354 return perf_stat__get_aggr(config, perf_stat__get_die, cpu); 1355 } 1356 1357 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1358 struct perf_cpu cpu) 1359 { 1360 return perf_stat__get_aggr(config, perf_stat__get_core, cpu); 1361 } 1362 1363 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1364 struct perf_cpu cpu) 1365 { 1366 return perf_stat__get_aggr(config, perf_stat__get_node, cpu); 1367 } 1368 1369 static bool term_percore_set(void) 1370 { 1371 struct evsel *counter; 1372 1373 evlist__for_each_entry(evsel_list, counter) { 1374 if (counter->percore) 1375 return true; 1376 } 1377 1378 return false; 1379 } 1380 1381 static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) 1382 { 1383 switch (aggr_mode) { 1384 case AGGR_SOCKET: 1385 return aggr_cpu_id__socket; 1386 case AGGR_DIE: 1387 return aggr_cpu_id__die; 1388 case AGGR_CORE: 1389 return aggr_cpu_id__core; 1390 case AGGR_NODE: 1391 return aggr_cpu_id__node; 1392 case AGGR_NONE: 1393 if (term_percore_set()) 1394 return aggr_cpu_id__core; 1395 1396 return NULL; 1397 case AGGR_GLOBAL: 1398 case AGGR_THREAD: 1399 case AGGR_UNSET: 1400 case AGGR_MAX: 1401 default: 1402 return NULL; 1403 } 1404 } 1405 1406 static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) 1407 { 1408 switch (aggr_mode) { 1409 case AGGR_SOCKET: 1410 return perf_stat__get_socket_cached; 1411 case AGGR_DIE: 1412 return perf_stat__get_die_cached; 1413 case AGGR_CORE: 1414 return perf_stat__get_core_cached; 1415 case AGGR_NODE: 1416 return perf_stat__get_node_cached; 1417 case AGGR_NONE: 1418 if (term_percore_set()) { 1419 return perf_stat__get_core_cached; 1420 } 1421 return NULL; 1422 case AGGR_GLOBAL: 1423 case AGGR_THREAD: 1424 case AGGR_UNSET: 1425 case AGGR_MAX: 1426 default: 1427 return NULL; 1428 } 1429 } 1430 1431 static int perf_stat_init_aggr_mode(void) 1432 { 1433 int nr; 1434 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); 1435 1436 if (get_id) { 1437 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1438 get_id, /*data=*/NULL); 1439 if (!stat_config.aggr_map) { 1440 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1441 return -1; 1442 } 1443 stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); 1444 } 1445 1446 /* 1447 * The evsel_list->cpus is the base we operate on, 1448 * taking the highest cpu number to be the size of 1449 * the aggregation translate cpumap. 1450 */ 1451 if (evsel_list->core.user_requested_cpus) 1452 nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; 1453 else 1454 nr = 0; 1455 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); 1456 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1457 } 1458 1459 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1460 { 1461 if (map) { 1462 WARN_ONCE(refcount_read(&map->refcnt) != 0, 1463 "cpu_aggr_map refcnt unbalanced\n"); 1464 free(map); 1465 } 1466 } 1467 1468 static void cpu_aggr_map__put(struct cpu_aggr_map *map) 1469 { 1470 if (map && refcount_dec_and_test(&map->refcnt)) 1471 cpu_aggr_map__delete(map); 1472 } 1473 1474 static void perf_stat__exit_aggr_mode(void) 1475 { 1476 cpu_aggr_map__put(stat_config.aggr_map); 1477 cpu_aggr_map__put(stat_config.cpus_aggr_map); 1478 stat_config.aggr_map = NULL; 1479 stat_config.cpus_aggr_map = NULL; 1480 } 1481 1482 static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) 1483 { 1484 struct perf_env *env = data; 1485 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1486 1487 if (cpu.cpu != -1) 1488 id.socket = env->cpu[cpu.cpu].socket_id; 1489 1490 return id; 1491 } 1492 1493 static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) 1494 { 1495 struct perf_env *env = data; 1496 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1497 1498 if (cpu.cpu != -1) { 1499 /* 1500 * die_id is relative to socket, so start 1501 * with the socket ID and then add die to 1502 * make a unique ID. 1503 */ 1504 id.socket = env->cpu[cpu.cpu].socket_id; 1505 id.die = env->cpu[cpu.cpu].die_id; 1506 } 1507 1508 return id; 1509 } 1510 1511 static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) 1512 { 1513 struct perf_env *env = data; 1514 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1515 1516 if (cpu.cpu != -1) { 1517 /* 1518 * core_id is relative to socket and die, 1519 * we need a global id. So we set 1520 * socket, die id and core id 1521 */ 1522 id.socket = env->cpu[cpu.cpu].socket_id; 1523 id.die = env->cpu[cpu.cpu].die_id; 1524 id.core = env->cpu[cpu.cpu].core_id; 1525 } 1526 1527 return id; 1528 } 1529 1530 static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) 1531 { 1532 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1533 1534 id.node = perf_env__numa_node(data, cpu); 1535 return id; 1536 } 1537 1538 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1539 struct perf_cpu cpu) 1540 { 1541 return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1542 } 1543 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1544 struct perf_cpu cpu) 1545 { 1546 return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1547 } 1548 1549 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1550 struct perf_cpu cpu) 1551 { 1552 return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1553 } 1554 1555 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1556 struct perf_cpu cpu) 1557 { 1558 return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1559 } 1560 1561 static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) 1562 { 1563 switch (aggr_mode) { 1564 case AGGR_SOCKET: 1565 return perf_env__get_socket_aggr_by_cpu; 1566 case AGGR_DIE: 1567 return perf_env__get_die_aggr_by_cpu; 1568 case AGGR_CORE: 1569 return perf_env__get_core_aggr_by_cpu; 1570 case AGGR_NODE: 1571 return perf_env__get_node_aggr_by_cpu; 1572 case AGGR_NONE: 1573 case AGGR_GLOBAL: 1574 case AGGR_THREAD: 1575 case AGGR_UNSET: 1576 case AGGR_MAX: 1577 default: 1578 return NULL; 1579 } 1580 } 1581 1582 static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) 1583 { 1584 switch (aggr_mode) { 1585 case AGGR_SOCKET: 1586 return perf_stat__get_socket_file; 1587 case AGGR_DIE: 1588 return perf_stat__get_die_file; 1589 case AGGR_CORE: 1590 return perf_stat__get_core_file; 1591 case AGGR_NODE: 1592 return perf_stat__get_node_file; 1593 case AGGR_NONE: 1594 case AGGR_GLOBAL: 1595 case AGGR_THREAD: 1596 case AGGR_UNSET: 1597 case AGGR_MAX: 1598 default: 1599 return NULL; 1600 } 1601 } 1602 1603 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1604 { 1605 struct perf_env *env = &st->session->header.env; 1606 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); 1607 1608 if (!get_id) 1609 return 0; 1610 1611 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, get_id, env); 1612 if (!stat_config.aggr_map) { 1613 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1614 return -1; 1615 } 1616 stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); 1617 return 0; 1618 } 1619 1620 /* 1621 * Add default attributes, if there were no attributes specified or 1622 * if -d/--detailed, -d -d or -d -d -d is used: 1623 */ 1624 static int add_default_attributes(void) 1625 { 1626 int err; 1627 struct perf_event_attr default_attrs0[] = { 1628 1629 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1630 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1631 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1632 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1633 1634 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1635 }; 1636 struct perf_event_attr frontend_attrs[] = { 1637 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1638 }; 1639 struct perf_event_attr backend_attrs[] = { 1640 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1641 }; 1642 struct perf_event_attr default_attrs1[] = { 1643 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1644 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1645 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1646 1647 }; 1648 1649 /* 1650 * Detailed stats (-d), covering the L1 and last level data caches: 1651 */ 1652 struct perf_event_attr detailed_attrs[] = { 1653 1654 { .type = PERF_TYPE_HW_CACHE, 1655 .config = 1656 PERF_COUNT_HW_CACHE_L1D << 0 | 1657 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1658 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1659 1660 { .type = PERF_TYPE_HW_CACHE, 1661 .config = 1662 PERF_COUNT_HW_CACHE_L1D << 0 | 1663 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1664 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1665 1666 { .type = PERF_TYPE_HW_CACHE, 1667 .config = 1668 PERF_COUNT_HW_CACHE_LL << 0 | 1669 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1670 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1671 1672 { .type = PERF_TYPE_HW_CACHE, 1673 .config = 1674 PERF_COUNT_HW_CACHE_LL << 0 | 1675 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1676 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1677 }; 1678 1679 /* 1680 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1681 */ 1682 struct perf_event_attr very_detailed_attrs[] = { 1683 1684 { .type = PERF_TYPE_HW_CACHE, 1685 .config = 1686 PERF_COUNT_HW_CACHE_L1I << 0 | 1687 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1688 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1689 1690 { .type = PERF_TYPE_HW_CACHE, 1691 .config = 1692 PERF_COUNT_HW_CACHE_L1I << 0 | 1693 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1694 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1695 1696 { .type = PERF_TYPE_HW_CACHE, 1697 .config = 1698 PERF_COUNT_HW_CACHE_DTLB << 0 | 1699 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1700 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1701 1702 { .type = PERF_TYPE_HW_CACHE, 1703 .config = 1704 PERF_COUNT_HW_CACHE_DTLB << 0 | 1705 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1706 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1707 1708 { .type = PERF_TYPE_HW_CACHE, 1709 .config = 1710 PERF_COUNT_HW_CACHE_ITLB << 0 | 1711 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1712 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1713 1714 { .type = PERF_TYPE_HW_CACHE, 1715 .config = 1716 PERF_COUNT_HW_CACHE_ITLB << 0 | 1717 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1718 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1719 1720 }; 1721 1722 /* 1723 * Very, very detailed stats (-d -d -d), adding prefetch events: 1724 */ 1725 struct perf_event_attr very_very_detailed_attrs[] = { 1726 1727 { .type = PERF_TYPE_HW_CACHE, 1728 .config = 1729 PERF_COUNT_HW_CACHE_L1D << 0 | 1730 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1731 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1732 1733 { .type = PERF_TYPE_HW_CACHE, 1734 .config = 1735 PERF_COUNT_HW_CACHE_L1D << 0 | 1736 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1737 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1738 }; 1739 1740 struct perf_event_attr default_null_attrs[] = {}; 1741 1742 /* Set attrs if no event is selected and !null_run: */ 1743 if (stat_config.null_run) 1744 return 0; 1745 1746 if (transaction_run) { 1747 struct parse_events_error errinfo; 1748 /* Handle -T as -M transaction. Once platform specific metrics 1749 * support has been added to the json files, all architectures 1750 * will use this approach. To determine transaction support 1751 * on an architecture test for such a metric name. 1752 */ 1753 if (metricgroup__has_metric("transaction")) { 1754 return metricgroup__parse_groups(evsel_list, "transaction", 1755 stat_config.metric_no_group, 1756 stat_config.metric_no_merge, 1757 stat_config.user_requested_cpu_list, 1758 stat_config.system_wide, 1759 &stat_config.metric_events); 1760 } 1761 1762 parse_events_error__init(&errinfo); 1763 if (pmu_have_event("cpu", "cycles-ct") && 1764 pmu_have_event("cpu", "el-start")) 1765 err = parse_events(evsel_list, transaction_attrs, 1766 &errinfo); 1767 else 1768 err = parse_events(evsel_list, 1769 transaction_limited_attrs, 1770 &errinfo); 1771 if (err) { 1772 fprintf(stderr, "Cannot set up transaction events\n"); 1773 parse_events_error__print(&errinfo, transaction_attrs); 1774 } 1775 parse_events_error__exit(&errinfo); 1776 return err ? -1 : 0; 1777 } 1778 1779 if (smi_cost) { 1780 struct parse_events_error errinfo; 1781 int smi; 1782 1783 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1784 fprintf(stderr, "freeze_on_smi is not supported.\n"); 1785 return -1; 1786 } 1787 1788 if (!smi) { 1789 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1790 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1791 return -1; 1792 } 1793 smi_reset = true; 1794 } 1795 1796 if (!pmu_have_event("msr", "aperf") || 1797 !pmu_have_event("msr", "smi")) { 1798 fprintf(stderr, "To measure SMI cost, it needs " 1799 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); 1800 return -1; 1801 } 1802 if (!force_metric_only) 1803 stat_config.metric_only = true; 1804 1805 parse_events_error__init(&errinfo); 1806 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1807 if (err) { 1808 parse_events_error__print(&errinfo, smi_cost_attrs); 1809 fprintf(stderr, "Cannot set up SMI cost events\n"); 1810 } 1811 parse_events_error__exit(&errinfo); 1812 return err ? -1 : 0; 1813 } 1814 1815 if (topdown_run) { 1816 const char **metric_attrs = topdown_metric_attrs; 1817 unsigned int max_level = 1; 1818 char *str = NULL; 1819 bool warn = false; 1820 const char *pmu_name = arch_get_topdown_pmu_name(evsel_list, true); 1821 1822 if (!force_metric_only) 1823 stat_config.metric_only = true; 1824 1825 if (pmu_have_event(pmu_name, topdown_metric_L2_attrs[5])) { 1826 metric_attrs = topdown_metric_L2_attrs; 1827 max_level = 2; 1828 } 1829 1830 if (stat_config.topdown_level > max_level) { 1831 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 1832 return -1; 1833 } else if (!stat_config.topdown_level) 1834 stat_config.topdown_level = max_level; 1835 1836 if (topdown_filter_events(metric_attrs, &str, 1, pmu_name) < 0) { 1837 pr_err("Out of memory\n"); 1838 return -1; 1839 } 1840 1841 if (metric_attrs[0] && str) { 1842 if (!stat_config.interval && !stat_config.metric_only) { 1843 fprintf(stat_config.output, 1844 "Topdown accuracy may decrease when measuring long periods.\n" 1845 "Please print the result regularly, e.g. -I1000\n"); 1846 } 1847 goto setup_metrics; 1848 } 1849 1850 zfree(&str); 1851 1852 if (stat_config.aggr_mode != AGGR_GLOBAL && 1853 stat_config.aggr_mode != AGGR_CORE) { 1854 pr_err("top down event configuration requires --per-core mode\n"); 1855 return -1; 1856 } 1857 stat_config.aggr_mode = AGGR_CORE; 1858 if (nr_cgroups || !target__has_cpu(&target)) { 1859 pr_err("top down event configuration requires system-wide mode (-a)\n"); 1860 return -1; 1861 } 1862 1863 if (topdown_filter_events(topdown_attrs, &str, 1864 arch_topdown_check_group(&warn), 1865 pmu_name) < 0) { 1866 pr_err("Out of memory\n"); 1867 return -1; 1868 } 1869 1870 if (topdown_attrs[0] && str) { 1871 struct parse_events_error errinfo; 1872 if (warn) 1873 arch_topdown_group_warn(); 1874 setup_metrics: 1875 parse_events_error__init(&errinfo); 1876 err = parse_events(evsel_list, str, &errinfo); 1877 if (err) { 1878 fprintf(stderr, 1879 "Cannot set up top down events %s: %d\n", 1880 str, err); 1881 parse_events_error__print(&errinfo, str); 1882 parse_events_error__exit(&errinfo); 1883 free(str); 1884 return -1; 1885 } 1886 parse_events_error__exit(&errinfo); 1887 } else { 1888 fprintf(stderr, "System does not support topdown\n"); 1889 return -1; 1890 } 1891 free(str); 1892 } 1893 1894 if (!stat_config.topdown_level) 1895 stat_config.topdown_level = TOPDOWN_MAX_LEVEL; 1896 1897 if (!evsel_list->core.nr_entries) { 1898 if (target__has_cpu(&target)) 1899 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1900 1901 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1902 return -1; 1903 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1904 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 1905 return -1; 1906 } 1907 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 1908 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 1909 return -1; 1910 } 1911 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 1912 return -1; 1913 /* Platform specific attrs */ 1914 if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) 1915 return -1; 1916 } 1917 1918 /* Detailed events get appended to the event list: */ 1919 1920 if (detailed_run < 1) 1921 return 0; 1922 1923 /* Append detailed run extra attributes: */ 1924 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1925 return -1; 1926 1927 if (detailed_run < 2) 1928 return 0; 1929 1930 /* Append very detailed run extra attributes: */ 1931 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1932 return -1; 1933 1934 if (detailed_run < 3) 1935 return 0; 1936 1937 /* Append very, very detailed run extra attributes: */ 1938 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1939 } 1940 1941 static const char * const stat_record_usage[] = { 1942 "perf stat record [<options>]", 1943 NULL, 1944 }; 1945 1946 static void init_features(struct perf_session *session) 1947 { 1948 int feat; 1949 1950 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 1951 perf_header__set_feat(&session->header, feat); 1952 1953 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 1954 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 1955 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 1956 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 1957 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 1958 } 1959 1960 static int __cmd_record(int argc, const char **argv) 1961 { 1962 struct perf_session *session; 1963 struct perf_data *data = &perf_stat.data; 1964 1965 argc = parse_options(argc, argv, stat_options, stat_record_usage, 1966 PARSE_OPT_STOP_AT_NON_OPTION); 1967 1968 if (output_name) 1969 data->path = output_name; 1970 1971 if (stat_config.run_count != 1 || forever) { 1972 pr_err("Cannot use -r option with perf stat record.\n"); 1973 return -1; 1974 } 1975 1976 session = perf_session__new(data, NULL); 1977 if (IS_ERR(session)) { 1978 pr_err("Perf session creation failed\n"); 1979 return PTR_ERR(session); 1980 } 1981 1982 init_features(session); 1983 1984 session->evlist = evsel_list; 1985 perf_stat.session = session; 1986 perf_stat.record = true; 1987 return argc; 1988 } 1989 1990 static int process_stat_round_event(struct perf_session *session, 1991 union perf_event *event) 1992 { 1993 struct perf_record_stat_round *stat_round = &event->stat_round; 1994 struct evsel *counter; 1995 struct timespec tsh, *ts = NULL; 1996 const char **argv = session->header.env.cmdline_argv; 1997 int argc = session->header.env.nr_cmdline; 1998 1999 evlist__for_each_entry(evsel_list, counter) 2000 perf_stat_process_counter(&stat_config, counter); 2001 2002 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2003 update_stats(&walltime_nsecs_stats, stat_round->time); 2004 2005 if (stat_config.interval && stat_round->time) { 2006 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 2007 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 2008 ts = &tsh; 2009 } 2010 2011 print_counters(ts, argc, argv); 2012 return 0; 2013 } 2014 2015 static 2016 int process_stat_config_event(struct perf_session *session, 2017 union perf_event *event) 2018 { 2019 struct perf_tool *tool = session->tool; 2020 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2021 2022 perf_event__read_stat_config(&stat_config, &event->stat_config); 2023 2024 if (perf_cpu_map__empty(st->cpus)) { 2025 if (st->aggr_mode != AGGR_UNSET) 2026 pr_warning("warning: processing task data, aggregation mode not set\n"); 2027 return 0; 2028 } 2029 2030 if (st->aggr_mode != AGGR_UNSET) 2031 stat_config.aggr_mode = st->aggr_mode; 2032 2033 if (perf_stat.data.is_pipe) 2034 perf_stat_init_aggr_mode(); 2035 else 2036 perf_stat_init_aggr_mode_file(st); 2037 2038 return 0; 2039 } 2040 2041 static int set_maps(struct perf_stat *st) 2042 { 2043 if (!st->cpus || !st->threads) 2044 return 0; 2045 2046 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 2047 return -EINVAL; 2048 2049 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 2050 2051 if (evlist__alloc_stats(evsel_list, true)) 2052 return -ENOMEM; 2053 2054 st->maps_allocated = true; 2055 return 0; 2056 } 2057 2058 static 2059 int process_thread_map_event(struct perf_session *session, 2060 union perf_event *event) 2061 { 2062 struct perf_tool *tool = session->tool; 2063 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2064 2065 if (st->threads) { 2066 pr_warning("Extra thread map event, ignoring.\n"); 2067 return 0; 2068 } 2069 2070 st->threads = thread_map__new_event(&event->thread_map); 2071 if (!st->threads) 2072 return -ENOMEM; 2073 2074 return set_maps(st); 2075 } 2076 2077 static 2078 int process_cpu_map_event(struct perf_session *session, 2079 union perf_event *event) 2080 { 2081 struct perf_tool *tool = session->tool; 2082 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2083 struct perf_cpu_map *cpus; 2084 2085 if (st->cpus) { 2086 pr_warning("Extra cpu map event, ignoring.\n"); 2087 return 0; 2088 } 2089 2090 cpus = cpu_map__new_data(&event->cpu_map.data); 2091 if (!cpus) 2092 return -ENOMEM; 2093 2094 st->cpus = cpus; 2095 return set_maps(st); 2096 } 2097 2098 static const char * const stat_report_usage[] = { 2099 "perf stat report [<options>]", 2100 NULL, 2101 }; 2102 2103 static struct perf_stat perf_stat = { 2104 .tool = { 2105 .attr = perf_event__process_attr, 2106 .event_update = perf_event__process_event_update, 2107 .thread_map = process_thread_map_event, 2108 .cpu_map = process_cpu_map_event, 2109 .stat_config = process_stat_config_event, 2110 .stat = perf_event__process_stat_event, 2111 .stat_round = process_stat_round_event, 2112 }, 2113 .aggr_mode = AGGR_UNSET, 2114 }; 2115 2116 static int __cmd_report(int argc, const char **argv) 2117 { 2118 struct perf_session *session; 2119 const struct option options[] = { 2120 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2121 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2122 "aggregate counts per processor socket", AGGR_SOCKET), 2123 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2124 "aggregate counts per processor die", AGGR_DIE), 2125 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2126 "aggregate counts per physical processor core", AGGR_CORE), 2127 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2128 "aggregate counts per numa node", AGGR_NODE), 2129 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2130 "disable CPU count aggregation", AGGR_NONE), 2131 OPT_END() 2132 }; 2133 struct stat st; 2134 int ret; 2135 2136 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2137 2138 if (!input_name || !strlen(input_name)) { 2139 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2140 input_name = "-"; 2141 else 2142 input_name = "perf.data"; 2143 } 2144 2145 perf_stat__init_shadow_stats(); 2146 2147 perf_stat.data.path = input_name; 2148 perf_stat.data.mode = PERF_DATA_MODE_READ; 2149 2150 session = perf_session__new(&perf_stat.data, &perf_stat.tool); 2151 if (IS_ERR(session)) 2152 return PTR_ERR(session); 2153 2154 perf_stat.session = session; 2155 stat_config.output = stderr; 2156 evsel_list = session->evlist; 2157 2158 ret = perf_session__process_events(session); 2159 if (ret) 2160 return ret; 2161 2162 perf_session__delete(session); 2163 return 0; 2164 } 2165 2166 static void setup_system_wide(int forks) 2167 { 2168 /* 2169 * Make system wide (-a) the default target if 2170 * no target was specified and one of following 2171 * conditions is met: 2172 * 2173 * - there's no workload specified 2174 * - there is workload specified but all requested 2175 * events are system wide events 2176 */ 2177 if (!target__none(&target)) 2178 return; 2179 2180 if (!forks) 2181 target.system_wide = true; 2182 else { 2183 struct evsel *counter; 2184 2185 evlist__for_each_entry(evsel_list, counter) { 2186 if (!counter->core.requires_cpu && 2187 strcmp(counter->name, "duration_time")) { 2188 return; 2189 } 2190 } 2191 2192 if (evsel_list->core.nr_entries) 2193 target.system_wide = true; 2194 } 2195 } 2196 2197 int cmd_stat(int argc, const char **argv) 2198 { 2199 const char * const stat_usage[] = { 2200 "perf stat [<options>] [<command>]", 2201 NULL 2202 }; 2203 int status = -EINVAL, run_idx, err; 2204 const char *mode; 2205 FILE *output = stderr; 2206 unsigned int interval, timeout; 2207 const char * const stat_subcommands[] = { "record", "report" }; 2208 char errbuf[BUFSIZ]; 2209 2210 setlocale(LC_ALL, ""); 2211 2212 evsel_list = evlist__new(); 2213 if (evsel_list == NULL) 2214 return -ENOMEM; 2215 2216 parse_events__shrink_config_terms(); 2217 2218 /* String-parsing callback-based options would segfault when negated */ 2219 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2220 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2221 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2222 2223 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2224 (const char **) stat_usage, 2225 PARSE_OPT_STOP_AT_NON_OPTION); 2226 2227 if (stat_config.csv_sep) { 2228 stat_config.csv_output = true; 2229 if (!strcmp(stat_config.csv_sep, "\\t")) 2230 stat_config.csv_sep = "\t"; 2231 } else 2232 stat_config.csv_sep = DEFAULT_SEPARATOR; 2233 2234 if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) { 2235 argc = __cmd_record(argc, argv); 2236 if (argc < 0) 2237 return -1; 2238 } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0])) 2239 return __cmd_report(argc, argv); 2240 2241 interval = stat_config.interval; 2242 timeout = stat_config.timeout; 2243 2244 /* 2245 * For record command the -o is already taken care of. 2246 */ 2247 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2248 output = NULL; 2249 2250 if (output_name && output_fd) { 2251 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2252 parse_options_usage(stat_usage, stat_options, "o", 1); 2253 parse_options_usage(NULL, stat_options, "log-fd", 0); 2254 goto out; 2255 } 2256 2257 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2258 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2259 goto out; 2260 } 2261 2262 if (stat_config.metric_only && stat_config.run_count > 1) { 2263 fprintf(stderr, "--metric-only is not supported with -r\n"); 2264 goto out; 2265 } 2266 2267 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2268 fprintf(stderr, "--table is only supported with -r\n"); 2269 parse_options_usage(stat_usage, stat_options, "r", 1); 2270 parse_options_usage(NULL, stat_options, "table", 0); 2271 goto out; 2272 } 2273 2274 if (output_fd < 0) { 2275 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2276 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2277 goto out; 2278 } 2279 2280 if (!output && !stat_config.quiet) { 2281 struct timespec tm; 2282 mode = append_file ? "a" : "w"; 2283 2284 output = fopen(output_name, mode); 2285 if (!output) { 2286 perror("failed to create output file"); 2287 return -1; 2288 } 2289 clock_gettime(CLOCK_REALTIME, &tm); 2290 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2291 } else if (output_fd > 0) { 2292 mode = append_file ? "a" : "w"; 2293 output = fdopen(output_fd, mode); 2294 if (!output) { 2295 perror("Failed opening logfd"); 2296 return -errno; 2297 } 2298 } 2299 2300 stat_config.output = output; 2301 2302 /* 2303 * let the spreadsheet do the pretty-printing 2304 */ 2305 if (stat_config.csv_output) { 2306 /* User explicitly passed -B? */ 2307 if (big_num_opt == 1) { 2308 fprintf(stderr, "-B option not supported with -x\n"); 2309 parse_options_usage(stat_usage, stat_options, "B", 1); 2310 parse_options_usage(NULL, stat_options, "x", 1); 2311 goto out; 2312 } else /* Nope, so disable big number formatting */ 2313 stat_config.big_num = false; 2314 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2315 stat_config.big_num = false; 2316 2317 err = target__validate(&target); 2318 if (err) { 2319 target__strerror(&target, err, errbuf, BUFSIZ); 2320 pr_warning("%s\n", errbuf); 2321 } 2322 2323 setup_system_wide(argc); 2324 2325 /* 2326 * Display user/system times only for single 2327 * run and when there's specified tracee. 2328 */ 2329 if ((stat_config.run_count == 1) && target__none(&target)) 2330 stat_config.ru_display = true; 2331 2332 if (stat_config.run_count < 0) { 2333 pr_err("Run count must be a positive number\n"); 2334 parse_options_usage(stat_usage, stat_options, "r", 1); 2335 goto out; 2336 } else if (stat_config.run_count == 0) { 2337 forever = true; 2338 stat_config.run_count = 1; 2339 } 2340 2341 if (stat_config.walltime_run_table) { 2342 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2343 if (!stat_config.walltime_run) { 2344 pr_err("failed to setup -r option"); 2345 goto out; 2346 } 2347 } 2348 2349 if ((stat_config.aggr_mode == AGGR_THREAD) && 2350 !target__has_task(&target)) { 2351 if (!target.system_wide || target.cpu_list) { 2352 fprintf(stderr, "The --per-thread option is only " 2353 "available when monitoring via -p -t -a " 2354 "options or only --per-thread.\n"); 2355 parse_options_usage(NULL, stat_options, "p", 1); 2356 parse_options_usage(NULL, stat_options, "t", 1); 2357 goto out; 2358 } 2359 } 2360 2361 /* 2362 * no_aggr, cgroup are for system-wide only 2363 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2364 */ 2365 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2366 stat_config.aggr_mode != AGGR_THREAD) || 2367 (nr_cgroups || stat_config.cgroup_list)) && 2368 !target__has_cpu(&target)) { 2369 fprintf(stderr, "both cgroup and no-aggregation " 2370 "modes only available in system-wide mode\n"); 2371 2372 parse_options_usage(stat_usage, stat_options, "G", 1); 2373 parse_options_usage(NULL, stat_options, "A", 1); 2374 parse_options_usage(NULL, stat_options, "a", 1); 2375 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2376 goto out; 2377 } 2378 2379 if (stat_config.iostat_run) { 2380 status = iostat_prepare(evsel_list, &stat_config); 2381 if (status) 2382 goto out; 2383 if (iostat_mode == IOSTAT_LIST) { 2384 iostat_list(evsel_list, &stat_config); 2385 goto out; 2386 } else if (verbose) 2387 iostat_list(evsel_list, &stat_config); 2388 if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) 2389 target.system_wide = true; 2390 } 2391 2392 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2393 target.per_thread = true; 2394 2395 stat_config.system_wide = target.system_wide; 2396 if (target.cpu_list) { 2397 stat_config.user_requested_cpu_list = strdup(target.cpu_list); 2398 if (!stat_config.user_requested_cpu_list) { 2399 status = -ENOMEM; 2400 goto out; 2401 } 2402 } 2403 2404 /* 2405 * Metric parsing needs to be delayed as metrics may optimize events 2406 * knowing the target is system-wide. 2407 */ 2408 if (metrics) { 2409 metricgroup__parse_groups(evsel_list, metrics, 2410 stat_config.metric_no_group, 2411 stat_config.metric_no_merge, 2412 stat_config.user_requested_cpu_list, 2413 stat_config.system_wide, 2414 &stat_config.metric_events); 2415 zfree(&metrics); 2416 } 2417 perf_stat__collect_metric_expr(evsel_list); 2418 perf_stat__init_shadow_stats(); 2419 2420 if (add_default_attributes()) 2421 goto out; 2422 2423 if (stat_config.cgroup_list) { 2424 if (nr_cgroups > 0) { 2425 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2426 parse_options_usage(stat_usage, stat_options, "G", 1); 2427 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2428 goto out; 2429 } 2430 2431 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, 2432 &stat_config.metric_events, true) < 0) { 2433 parse_options_usage(stat_usage, stat_options, 2434 "for-each-cgroup", 0); 2435 goto out; 2436 } 2437 } 2438 2439 if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { 2440 pr_err("failed to use cpu list %s\n", target.cpu_list); 2441 goto out; 2442 } 2443 2444 target.hybrid = perf_pmu__has_hybrid(); 2445 if (evlist__create_maps(evsel_list, &target) < 0) { 2446 if (target__has_task(&target)) { 2447 pr_err("Problems finding threads of monitor\n"); 2448 parse_options_usage(stat_usage, stat_options, "p", 1); 2449 parse_options_usage(NULL, stat_options, "t", 1); 2450 } else if (target__has_cpu(&target)) { 2451 perror("failed to parse CPUs map"); 2452 parse_options_usage(stat_usage, stat_options, "C", 1); 2453 parse_options_usage(NULL, stat_options, "a", 1); 2454 } 2455 goto out; 2456 } 2457 2458 evlist__check_cpu_maps(evsel_list); 2459 2460 /* 2461 * Initialize thread_map with comm names, 2462 * so we could print it out on output. 2463 */ 2464 if (stat_config.aggr_mode == AGGR_THREAD) { 2465 thread_map__read_comms(evsel_list->core.threads); 2466 } 2467 2468 if (stat_config.aggr_mode == AGGR_NODE) 2469 cpu__setup_cpunode_map(); 2470 2471 if (stat_config.times && interval) 2472 interval_count = true; 2473 else if (stat_config.times && !interval) { 2474 pr_err("interval-count option should be used together with " 2475 "interval-print.\n"); 2476 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2477 parse_options_usage(stat_usage, stat_options, "I", 1); 2478 goto out; 2479 } 2480 2481 if (timeout && timeout < 100) { 2482 if (timeout < 10) { 2483 pr_err("timeout must be >= 10ms.\n"); 2484 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2485 goto out; 2486 } else 2487 pr_warning("timeout < 100ms. " 2488 "The overhead percentage could be high in some cases. " 2489 "Please proceed with caution.\n"); 2490 } 2491 if (timeout && interval) { 2492 pr_err("timeout option is not supported with interval-print.\n"); 2493 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2494 parse_options_usage(stat_usage, stat_options, "I", 1); 2495 goto out; 2496 } 2497 2498 if (evlist__alloc_stats(evsel_list, interval)) 2499 goto out; 2500 2501 if (perf_stat_init_aggr_mode()) 2502 goto out; 2503 2504 /* 2505 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2506 * while avoiding that older tools show confusing messages. 2507 * 2508 * However for pipe sessions we need to keep it zero, 2509 * because script's perf_evsel__check_attr is triggered 2510 * by attr->sample_type != 0, and we can't run it on 2511 * stat sessions. 2512 */ 2513 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2514 2515 /* 2516 * We dont want to block the signals - that would cause 2517 * child tasks to inherit that and Ctrl-C would not work. 2518 * What we want is for Ctrl-C to work in the exec()-ed 2519 * task, but being ignored by perf stat itself: 2520 */ 2521 atexit(sig_atexit); 2522 if (!forever) 2523 signal(SIGINT, skip_signal); 2524 signal(SIGCHLD, skip_signal); 2525 signal(SIGALRM, skip_signal); 2526 signal(SIGABRT, skip_signal); 2527 2528 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2529 goto out; 2530 2531 /* Enable ignoring missing threads when -p option is defined. */ 2532 evlist__first(evsel_list)->ignore_missing_thread = target.pid; 2533 status = 0; 2534 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2535 if (stat_config.run_count != 1 && verbose > 0) 2536 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2537 run_idx + 1); 2538 2539 if (run_idx != 0) 2540 evlist__reset_prev_raw_counts(evsel_list); 2541 2542 status = run_perf_stat(argc, argv, run_idx); 2543 if (forever && status != -1 && !interval) { 2544 print_counters(NULL, argc, argv); 2545 perf_stat__reset_stats(); 2546 } 2547 } 2548 2549 if (!forever && status != -1 && (!interval || stat_config.summary)) 2550 print_counters(NULL, argc, argv); 2551 2552 evlist__finalize_ctlfd(evsel_list); 2553 2554 if (STAT_RECORD) { 2555 /* 2556 * We synthesize the kernel mmap record just so that older tools 2557 * don't emit warnings about not being able to resolve symbols 2558 * due to /proc/sys/kernel/kptr_restrict settings and instead provide 2559 * a saner message about no samples being in the perf.data file. 2560 * 2561 * This also serves to suppress a warning about f_header.data.size == 0 2562 * in header.c at the moment 'perf stat record' gets introduced, which 2563 * is not really needed once we start adding the stat specific PERF_RECORD_ 2564 * records, but the need to suppress the kptr_restrict messages in older 2565 * tools remain -acme 2566 */ 2567 int fd = perf_data__fd(&perf_stat.data); 2568 2569 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2570 process_synthesized_event, 2571 &perf_stat.session->machines.host); 2572 if (err) { 2573 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2574 "older tools may produce warnings about this file\n."); 2575 } 2576 2577 if (!interval) { 2578 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2579 pr_err("failed to write stat round event\n"); 2580 } 2581 2582 if (!perf_stat.data.is_pipe) { 2583 perf_stat.session->header.data_size += perf_stat.bytes_written; 2584 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2585 } 2586 2587 evlist__close(evsel_list); 2588 perf_session__delete(perf_stat.session); 2589 } 2590 2591 perf_stat__exit_aggr_mode(); 2592 evlist__free_stats(evsel_list); 2593 out: 2594 if (stat_config.iostat_run) 2595 iostat_release(evsel_list); 2596 2597 zfree(&stat_config.walltime_run); 2598 zfree(&stat_config.user_requested_cpu_list); 2599 2600 if (smi_cost && smi_reset) 2601 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2602 2603 evlist__delete(evsel_list); 2604 2605 metricgroup__rblist_exit(&stat_config.metric_events); 2606 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 2607 2608 return status; 2609 } 2610