1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "util/cgroup.h" 45 #include <subcmd/parse-options.h> 46 #include "util/parse-events.h" 47 #include "util/pmus.h" 48 #include "util/pmu.h" 49 #include "util/tool_pmu.h" 50 #include "util/event.h" 51 #include "util/evlist.h" 52 #include "util/evsel.h" 53 #include "util/debug.h" 54 #include "util/color.h" 55 #include "util/stat.h" 56 #include "util/header.h" 57 #include "util/cpumap.h" 58 #include "util/thread_map.h" 59 #include "util/counts.h" 60 #include "util/topdown.h" 61 #include "util/session.h" 62 #include "util/tool.h" 63 #include "util/string2.h" 64 #include "util/metricgroup.h" 65 #include "util/synthetic-events.h" 66 #include "util/target.h" 67 #include "util/time-utils.h" 68 #include "util/top.h" 69 #include "util/affinity.h" 70 #include "util/pfm.h" 71 #include "util/bpf_counter.h" 72 #include "util/iostat.h" 73 #include "util/util.h" 74 #include "util/intel-tpebs.h" 75 #include "asm/bug.h" 76 77 #include <linux/list_sort.h> 78 #include <linux/time64.h> 79 #include <linux/zalloc.h> 80 #include <api/fs/fs.h> 81 #include <errno.h> 82 #include <signal.h> 83 #include <stdlib.h> 84 #include <sys/prctl.h> 85 #include <inttypes.h> 86 #include <locale.h> 87 #include <math.h> 88 #include <sys/types.h> 89 #include <sys/stat.h> 90 #include <sys/wait.h> 91 #include <unistd.h> 92 #include <sys/time.h> 93 #include <sys/resource.h> 94 #include <linux/err.h> 95 96 #include <linux/ctype.h> 97 #include <perf/evlist.h> 98 #include <internal/threadmap.h> 99 100 #ifdef HAVE_BPF_SKEL 101 #include "util/bpf_skel/bperf_cgroup.h" 102 #endif 103 104 #define DEFAULT_SEPARATOR " " 105 #define FREEZE_ON_SMI_PATH "bus/event_source/devices/cpu/freeze_on_smi" 106 107 struct rusage_stats { 108 struct stats ru_utime_usec_stat; 109 struct stats ru_stime_usec_stat; 110 }; 111 112 static void print_counters(struct timespec *ts, int argc, const char **argv); 113 114 static struct evlist *evsel_list; 115 static struct parse_events_option_args parse_events_option_args = { 116 .evlistp = &evsel_list, 117 }; 118 119 static bool all_counters_use_bpf = true; 120 121 static struct target target; 122 123 static volatile sig_atomic_t child_pid = -1; 124 static int detailed_run = 0; 125 static bool transaction_run; 126 static bool topdown_run = false; 127 static bool smi_cost = false; 128 static bool smi_reset = false; 129 static int big_num_opt = -1; 130 static const char *pre_cmd = NULL; 131 static const char *post_cmd = NULL; 132 static bool sync_run = false; 133 static bool forever = false; 134 static bool force_metric_only = false; 135 static struct timespec ref_time; 136 static bool append_file; 137 static bool interval_count; 138 static const char *output_name; 139 static int output_fd; 140 static char *metrics; 141 static struct rusage_stats ru_stats; 142 143 struct perf_stat { 144 bool record; 145 struct perf_data data; 146 struct perf_session *session; 147 u64 bytes_written; 148 struct perf_tool tool; 149 bool maps_allocated; 150 struct perf_cpu_map *cpus; 151 struct perf_thread_map *threads; 152 enum aggr_mode aggr_mode; 153 u32 aggr_level; 154 }; 155 156 static struct perf_stat perf_stat; 157 #define STAT_RECORD perf_stat.record 158 159 static volatile sig_atomic_t done = 0; 160 161 /* Options set from the command line. */ 162 struct opt_aggr_mode { 163 bool node, socket, die, cluster, cache, core, thread, no_aggr; 164 }; 165 166 /* Turn command line option into most generic aggregation mode setting. */ 167 static enum aggr_mode opt_aggr_mode_to_aggr_mode(struct opt_aggr_mode *opt_mode) 168 { 169 enum aggr_mode mode = AGGR_GLOBAL; 170 171 if (opt_mode->node) 172 mode = AGGR_NODE; 173 if (opt_mode->socket) 174 mode = AGGR_SOCKET; 175 if (opt_mode->die) 176 mode = AGGR_DIE; 177 if (opt_mode->cluster) 178 mode = AGGR_CLUSTER; 179 if (opt_mode->cache) 180 mode = AGGR_CACHE; 181 if (opt_mode->core) 182 mode = AGGR_CORE; 183 if (opt_mode->thread) 184 mode = AGGR_THREAD; 185 if (opt_mode->no_aggr) 186 mode = AGGR_NONE; 187 return mode; 188 } 189 190 static void evlist__check_cpu_maps(struct evlist *evlist) 191 { 192 struct evsel *evsel, *warned_leader = NULL; 193 194 evlist__for_each_entry(evlist, evsel) { 195 struct evsel *leader = evsel__leader(evsel); 196 197 /* Check that leader matches cpus with each member. */ 198 if (leader == evsel) 199 continue; 200 if (perf_cpu_map__equal(leader->core.cpus, evsel->core.cpus)) 201 continue; 202 203 /* If there's mismatch disable the group and warn user. */ 204 if (warned_leader != leader) { 205 char buf[200]; 206 207 pr_warning("WARNING: grouped events cpus do not match.\n" 208 "Events with CPUs not matching the leader will " 209 "be removed from the group.\n"); 210 evsel__group_desc(leader, buf, sizeof(buf)); 211 pr_warning(" %s\n", buf); 212 warned_leader = leader; 213 } 214 if (verbose > 0) { 215 char buf[200]; 216 217 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 218 pr_warning(" %s: %s\n", leader->name, buf); 219 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 220 pr_warning(" %s: %s\n", evsel->name, buf); 221 } 222 223 evsel__remove_from_group(evsel, leader); 224 } 225 } 226 227 static inline void diff_timespec(struct timespec *r, struct timespec *a, 228 struct timespec *b) 229 { 230 r->tv_sec = a->tv_sec - b->tv_sec; 231 if (a->tv_nsec < b->tv_nsec) { 232 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 233 r->tv_sec--; 234 } else { 235 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 236 } 237 } 238 239 static void perf_stat__reset_stats(void) 240 { 241 evlist__reset_stats(evsel_list); 242 memset(stat_config.walltime_nsecs_stats, 0, sizeof(*stat_config.walltime_nsecs_stats)); 243 } 244 245 static int process_synthesized_event(const struct perf_tool *tool __maybe_unused, 246 union perf_event *event, 247 struct perf_sample *sample __maybe_unused, 248 struct machine *machine __maybe_unused) 249 { 250 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 251 pr_err("failed to write perf data, error: %m\n"); 252 return -1; 253 } 254 255 perf_stat.bytes_written += event->header.size; 256 return 0; 257 } 258 259 static int write_stat_round_event(u64 tm, u64 type) 260 { 261 return perf_event__synthesize_stat_round(NULL, tm, type, 262 process_synthesized_event, 263 NULL); 264 } 265 266 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 267 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 268 269 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 270 271 static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, 272 struct perf_counts_values *count) 273 { 274 struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); 275 struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); 276 277 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 278 process_synthesized_event, NULL); 279 } 280 281 static int read_single_counter(struct evsel *counter, int cpu_map_idx, int thread) 282 { 283 int err = evsel__read_counter(counter, cpu_map_idx, thread); 284 285 /* 286 * Reading user and system time will fail when the process 287 * terminates. Use the wait4 values in that case. 288 */ 289 if (err && cpu_map_idx == 0 && 290 (evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME || 291 evsel__tool_event(counter) == TOOL_PMU__EVENT_SYSTEM_TIME)) { 292 struct perf_counts_values *count = 293 perf_counts(counter->counts, cpu_map_idx, thread); 294 struct perf_counts_values *old_count = NULL; 295 u64 val; 296 297 if (counter->prev_raw_counts) 298 old_count = perf_counts(counter->prev_raw_counts, cpu_map_idx, thread); 299 300 if (evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME) 301 val = ru_stats.ru_utime_usec_stat.mean; 302 else 303 val = ru_stats.ru_stime_usec_stat.mean; 304 305 count->val = val; 306 if (old_count) { 307 count->run = old_count->run + 1; 308 count->ena = old_count->ena + 1; 309 } else { 310 count->run++; 311 count->ena++; 312 } 313 return 0; 314 } 315 return err; 316 } 317 318 /* 319 * Read out the results of a single counter: 320 * do not aggregate counts across CPUs in system-wide mode 321 */ 322 static int read_counter_cpu(struct evsel *counter, int cpu_map_idx) 323 { 324 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 325 int thread; 326 327 if (!counter->supported) 328 return -ENOENT; 329 330 for (thread = 0; thread < nthreads; thread++) { 331 struct perf_counts_values *count; 332 333 count = perf_counts(counter->counts, cpu_map_idx, thread); 334 335 /* 336 * The leader's group read loads data into its group members 337 * (via evsel__read_counter()) and sets their count->loaded. 338 */ 339 if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && 340 read_single_counter(counter, cpu_map_idx, thread)) { 341 counter->counts->scaled = -1; 342 perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; 343 perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; 344 return -1; 345 } 346 347 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); 348 349 if (STAT_RECORD) { 350 if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { 351 pr_err("failed to write stat event\n"); 352 return -1; 353 } 354 } 355 356 if (verbose > 1) { 357 fprintf(stat_config.output, 358 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 359 evsel__name(counter), 360 perf_cpu_map__cpu(evsel__cpus(counter), 361 cpu_map_idx).cpu, 362 count->val, count->ena, count->run); 363 } 364 } 365 366 return 0; 367 } 368 369 static int read_counters_with_affinity(void) 370 { 371 struct evlist_cpu_iterator evlist_cpu_itr; 372 struct affinity saved_affinity, *affinity; 373 374 if (all_counters_use_bpf) 375 return 0; 376 377 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 378 affinity = NULL; 379 else if (affinity__setup(&saved_affinity) < 0) 380 return -1; 381 else 382 affinity = &saved_affinity; 383 384 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 385 struct evsel *counter = evlist_cpu_itr.evsel; 386 387 if (evsel__is_bpf(counter)) 388 continue; 389 390 if (evsel__is_tool(counter)) 391 continue; 392 393 if (!counter->err) 394 counter->err = read_counter_cpu(counter, evlist_cpu_itr.cpu_map_idx); 395 } 396 if (affinity) 397 affinity__cleanup(&saved_affinity); 398 399 return 0; 400 } 401 402 static int read_bpf_map_counters(void) 403 { 404 struct evsel *counter; 405 int err; 406 407 evlist__for_each_entry(evsel_list, counter) { 408 if (!evsel__is_bpf(counter)) 409 continue; 410 411 err = bpf_counter__read(counter); 412 if (err) 413 return err; 414 } 415 return 0; 416 } 417 418 static int read_tool_counters(void) 419 { 420 struct evsel *counter; 421 422 evlist__for_each_entry(evsel_list, counter) { 423 int idx; 424 425 if (!evsel__is_tool(counter)) 426 continue; 427 428 perf_cpu_map__for_each_idx(idx, counter->core.cpus) { 429 if (!counter->err) 430 counter->err = read_counter_cpu(counter, idx); 431 } 432 } 433 return 0; 434 } 435 436 static int read_counters(void) 437 { 438 int ret; 439 440 if (stat_config.stop_read_counter) 441 return 0; 442 443 // Read all BPF counters first. 444 ret = read_bpf_map_counters(); 445 if (ret) 446 return ret; 447 448 // Read non-BPF and non-tool counters next. 449 ret = read_counters_with_affinity(); 450 if (ret) 451 return ret; 452 453 // Read the tool counters last. This way the duration_time counter 454 // should always be greater than any other counter's enabled time. 455 return read_tool_counters(); 456 } 457 458 static void process_counters(void) 459 { 460 struct evsel *counter; 461 462 evlist__for_each_entry(evsel_list, counter) { 463 if (counter->err) 464 pr_debug("failed to read counter %s\n", counter->name); 465 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 466 pr_warning("failed to process counter %s\n", counter->name); 467 counter->err = 0; 468 } 469 470 perf_stat_merge_counters(&stat_config, evsel_list); 471 perf_stat_process_percore(&stat_config, evsel_list); 472 } 473 474 static void process_interval(void) 475 { 476 struct timespec ts, rs; 477 478 clock_gettime(CLOCK_MONOTONIC, &ts); 479 diff_timespec(&rs, &ts, &ref_time); 480 481 evlist__reset_aggr_stats(evsel_list); 482 483 if (read_counters() == 0) 484 process_counters(); 485 486 if (STAT_RECORD) { 487 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 488 pr_err("failed to write stat round event\n"); 489 } 490 491 init_stats(stat_config.walltime_nsecs_stats); 492 update_stats(stat_config.walltime_nsecs_stats, stat_config.interval * 1000000ULL); 493 print_counters(&rs, 0, NULL); 494 } 495 496 static bool handle_interval(unsigned int interval, int *times) 497 { 498 if (interval) { 499 process_interval(); 500 if (interval_count && !(--(*times))) 501 return true; 502 } 503 return false; 504 } 505 506 static int enable_counters(void) 507 { 508 struct evsel *evsel; 509 int err; 510 511 evlist__for_each_entry(evsel_list, evsel) { 512 if (!evsel__is_bpf(evsel)) 513 continue; 514 515 err = bpf_counter__enable(evsel); 516 if (err) 517 return err; 518 } 519 520 if (!target__enable_on_exec(&target)) { 521 if (!all_counters_use_bpf) 522 evlist__enable(evsel_list); 523 } 524 return 0; 525 } 526 527 static void disable_counters(void) 528 { 529 struct evsel *counter; 530 531 /* 532 * If we don't have tracee (attaching to task or cpu), counters may 533 * still be running. To get accurate group ratios, we must stop groups 534 * from counting before reading their constituent counters. 535 */ 536 if (!target__none(&target)) { 537 evlist__for_each_entry(evsel_list, counter) 538 bpf_counter__disable(counter); 539 if (!all_counters_use_bpf) 540 evlist__disable(evsel_list); 541 } 542 } 543 544 static volatile sig_atomic_t workload_exec_errno; 545 546 /* 547 * evlist__prepare_workload will send a SIGUSR1 548 * if the fork fails, since we asked by setting its 549 * want_signal to true. 550 */ 551 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 552 void *ucontext __maybe_unused) 553 { 554 workload_exec_errno = info->si_value.sival_int; 555 } 556 557 static bool evsel__should_store_id(struct evsel *counter) 558 { 559 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 560 } 561 562 static bool is_target_alive(struct target *_target, 563 struct perf_thread_map *threads) 564 { 565 struct stat st; 566 int i; 567 568 if (!target__has_task(_target)) 569 return true; 570 571 for (i = 0; i < threads->nr; i++) { 572 char path[PATH_MAX]; 573 574 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 575 threads->map[i].pid); 576 577 if (!stat(path, &st)) 578 return true; 579 } 580 581 return false; 582 } 583 584 static void process_evlist(struct evlist *evlist, unsigned int interval) 585 { 586 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 587 588 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 589 switch (cmd) { 590 case EVLIST_CTL_CMD_ENABLE: 591 fallthrough; 592 case EVLIST_CTL_CMD_DISABLE: 593 if (interval) 594 process_interval(); 595 break; 596 case EVLIST_CTL_CMD_SNAPSHOT: 597 case EVLIST_CTL_CMD_ACK: 598 case EVLIST_CTL_CMD_UNSUPPORTED: 599 case EVLIST_CTL_CMD_EVLIST: 600 case EVLIST_CTL_CMD_STOP: 601 case EVLIST_CTL_CMD_PING: 602 default: 603 break; 604 } 605 } 606 } 607 608 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 609 int *time_to_sleep) 610 { 611 int tts = *time_to_sleep; 612 struct timespec time_diff; 613 614 diff_timespec(&time_diff, time_stop, time_start); 615 616 tts -= time_diff.tv_sec * MSEC_PER_SEC + 617 time_diff.tv_nsec / NSEC_PER_MSEC; 618 619 if (tts < 0) 620 tts = 0; 621 622 *time_to_sleep = tts; 623 } 624 625 static int dispatch_events(bool forks, int timeout, int interval, int *times) 626 { 627 int child_exited = 0, status = 0; 628 int time_to_sleep, sleep_time; 629 struct timespec time_start, time_stop; 630 631 if (interval) 632 sleep_time = interval; 633 else if (timeout) 634 sleep_time = timeout; 635 else 636 sleep_time = 1000; 637 638 time_to_sleep = sleep_time; 639 640 while (!done) { 641 if (forks) 642 child_exited = waitpid(child_pid, &status, WNOHANG); 643 else 644 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 645 646 if (child_exited) 647 break; 648 649 clock_gettime(CLOCK_MONOTONIC, &time_start); 650 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 651 if (timeout || handle_interval(interval, times)) 652 break; 653 time_to_sleep = sleep_time; 654 } else { /* fd revent */ 655 process_evlist(evsel_list, interval); 656 clock_gettime(CLOCK_MONOTONIC, &time_stop); 657 compute_tts(&time_start, &time_stop, &time_to_sleep); 658 } 659 } 660 661 return status; 662 } 663 664 enum counter_recovery { 665 COUNTER_SKIP, 666 COUNTER_RETRY, 667 }; 668 669 static enum counter_recovery stat_handle_error(struct evsel *counter, int err) 670 { 671 char msg[BUFSIZ]; 672 673 assert(!counter->supported); 674 675 /* 676 * PPC returns ENXIO for HW counters until 2.6.37 677 * (behavior changed with commit b0a873e). 678 */ 679 if (err == EINVAL || err == ENOSYS || err == ENOENT || err == ENXIO) { 680 if (verbose > 0) { 681 evsel__open_strerror(counter, &target, err, msg, sizeof(msg)); 682 ui__warning("%s event is not supported by the kernel.\n%s\n", 683 evsel__name(counter), msg); 684 } 685 return COUNTER_SKIP; 686 } 687 if (evsel__fallback(counter, &target, err, msg, sizeof(msg))) { 688 if (verbose > 0) 689 ui__warning("%s\n", msg); 690 counter->supported = true; 691 return COUNTER_RETRY; 692 } 693 if (target__has_per_thread(&target) && err != EOPNOTSUPP && 694 evsel_list->core.threads && evsel_list->core.threads->err_thread != -1) { 695 /* 696 * For global --per-thread case, skip current 697 * error thread. 698 */ 699 if (!thread_map__remove(evsel_list->core.threads, 700 evsel_list->core.threads->err_thread)) { 701 evsel_list->core.threads->err_thread = -1; 702 counter->supported = true; 703 return COUNTER_RETRY; 704 } 705 } 706 if (verbose > 0) { 707 evsel__open_strerror(counter, &target, err, msg, sizeof(msg)); 708 ui__warning(err == EOPNOTSUPP 709 ? "%s event is not supported by the kernel.\n%s\n" 710 : "skipping event %s that kernel failed to open.\n%s\n", 711 evsel__name(counter), msg); 712 } 713 return COUNTER_SKIP; 714 } 715 716 static int create_perf_stat_counter(struct evsel *evsel, 717 struct perf_stat_config *config, 718 int cpu_map_idx) 719 { 720 struct perf_event_attr *attr = &evsel->core.attr; 721 struct evsel *leader = evsel__leader(evsel); 722 723 /* Reset supported flag as creating a stat counter is retried. */ 724 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 725 PERF_FORMAT_TOTAL_TIME_RUNNING; 726 727 /* 728 * The event is part of non trivial group, let's enable 729 * the group read (for leader) and ID retrieval for all 730 * members. 731 */ 732 if (leader->core.nr_members > 1) 733 attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP; 734 735 attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list); 736 737 /* 738 * Some events get initialized with sample_(period/type) set, 739 * like tracepoints. Clear it up for counting. 740 */ 741 attr->sample_period = 0; 742 743 if (config->identifier) 744 attr->sample_type = PERF_SAMPLE_IDENTIFIER; 745 746 if (config->all_user) { 747 attr->exclude_kernel = 1; 748 attr->exclude_user = 0; 749 } 750 751 if (config->all_kernel) { 752 attr->exclude_kernel = 0; 753 attr->exclude_user = 1; 754 } 755 756 /* 757 * Disabling all counters initially, they will be enabled 758 * either manually by us or by kernel via enable_on_exec 759 * set later. 760 */ 761 if (evsel__is_group_leader(evsel)) { 762 attr->disabled = 1; 763 764 if (target__enable_on_exec(&target)) 765 attr->enable_on_exec = 1; 766 } 767 768 return evsel__open_per_cpu_and_thread(evsel, evsel__cpus(evsel), cpu_map_idx, 769 evsel->core.threads); 770 } 771 772 static void update_rusage_stats(const struct rusage *rusage) 773 { 774 const u64 us_to_ns = 1000; 775 const u64 s_to_ns = 1000000000; 776 777 update_stats(&ru_stats.ru_utime_usec_stat, 778 (rusage->ru_utime.tv_usec * us_to_ns + rusage->ru_utime.tv_sec * s_to_ns)); 779 update_stats(&ru_stats.ru_stime_usec_stat, 780 (rusage->ru_stime.tv_usec * us_to_ns + rusage->ru_stime.tv_sec * s_to_ns)); 781 } 782 783 static int __run_perf_stat(int argc, const char **argv, int run_idx) 784 { 785 int interval = stat_config.interval; 786 int times = stat_config.times; 787 int timeout = stat_config.timeout; 788 char msg[BUFSIZ]; 789 unsigned long long t0, t1; 790 struct evsel *counter; 791 size_t l; 792 int status = 0; 793 const bool forks = (argc > 0); 794 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 795 struct evlist_cpu_iterator evlist_cpu_itr; 796 struct affinity saved_affinity, *affinity = NULL; 797 int err, open_err = 0; 798 bool second_pass = false, has_supported_counters; 799 800 if (forks) { 801 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 802 perror("failed to prepare workload"); 803 return -1; 804 } 805 child_pid = evsel_list->workload.pid; 806 } 807 808 if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { 809 if (affinity__setup(&saved_affinity) < 0) { 810 err = -1; 811 goto err_out; 812 } 813 affinity = &saved_affinity; 814 } 815 816 evlist__for_each_entry(evsel_list, counter) { 817 counter->reset_group = false; 818 if (bpf_counter__load(counter, &target)) { 819 err = -1; 820 goto err_out; 821 } 822 if (!(evsel__is_bperf(counter))) 823 all_counters_use_bpf = false; 824 } 825 826 evlist__reset_aggr_stats(evsel_list); 827 828 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 829 counter = evlist_cpu_itr.evsel; 830 831 /* 832 * bperf calls evsel__open_per_cpu() in bperf__load(), so 833 * no need to call it again here. 834 */ 835 if (target.use_bpf) 836 break; 837 838 if (counter->reset_group || !counter->supported) 839 continue; 840 if (evsel__is_bperf(counter)) 841 continue; 842 843 while (true) { 844 if (create_perf_stat_counter(counter, &stat_config, 845 evlist_cpu_itr.cpu_map_idx) == 0) 846 break; 847 848 open_err = errno; 849 /* 850 * Weak group failed. We cannot just undo this here 851 * because earlier CPUs might be in group mode, and the kernel 852 * doesn't support mixing group and non group reads. Defer 853 * it to later. 854 * Don't close here because we're in the wrong affinity. 855 */ 856 if ((open_err == EINVAL || open_err == EBADF) && 857 evsel__leader(counter) != counter && 858 counter->weak_group) { 859 evlist__reset_weak_group(evsel_list, counter, false); 860 assert(counter->reset_group); 861 counter->supported = true; 862 second_pass = true; 863 break; 864 } 865 866 if (stat_handle_error(counter, open_err) != COUNTER_RETRY) 867 break; 868 } 869 } 870 871 if (second_pass) { 872 /* 873 * Now redo all the weak group after closing them, 874 * and also close errored counters. 875 */ 876 877 /* First close errored or weak retry */ 878 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 879 counter = evlist_cpu_itr.evsel; 880 881 if (!counter->reset_group && counter->supported) 882 continue; 883 884 perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); 885 } 886 /* Now reopen weak */ 887 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 888 counter = evlist_cpu_itr.evsel; 889 890 if (!counter->reset_group) 891 continue; 892 893 while (true) { 894 pr_debug2("reopening weak %s\n", evsel__name(counter)); 895 if (create_perf_stat_counter(counter, &stat_config, 896 evlist_cpu_itr.cpu_map_idx) == 0) 897 break; 898 899 open_err = errno; 900 if (stat_handle_error(counter, open_err) != COUNTER_RETRY) 901 break; 902 } 903 } 904 } 905 affinity__cleanup(affinity); 906 affinity = NULL; 907 908 has_supported_counters = false; 909 evlist__for_each_entry(evsel_list, counter) { 910 if (!counter->supported) { 911 perf_evsel__free_fd(&counter->core); 912 continue; 913 } 914 has_supported_counters = true; 915 916 l = strlen(counter->unit); 917 if (l > stat_config.unit_width) 918 stat_config.unit_width = l; 919 920 if (evsel__should_store_id(counter) && 921 evsel__store_ids(counter, evsel_list)) { 922 err = -1; 923 goto err_out; 924 } 925 } 926 if (!has_supported_counters) { 927 evsel__open_strerror(evlist__first(evsel_list), &target, open_err, 928 msg, sizeof(msg)); 929 ui__error("No supported events found.\n%s\n", msg); 930 931 if (child_pid != -1) 932 kill(child_pid, SIGTERM); 933 err = -1; 934 goto err_out; 935 } 936 937 if (evlist__apply_filters(evsel_list, &counter, &target)) { 938 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 939 counter->filter, evsel__name(counter), errno, 940 str_error_r(errno, msg, sizeof(msg))); 941 return -1; 942 } 943 944 if (STAT_RECORD) { 945 int fd = perf_data__fd(&perf_stat.data); 946 947 if (is_pipe) { 948 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 949 } else { 950 err = perf_session__write_header(perf_stat.session, evsel_list, 951 fd, false); 952 } 953 954 if (err < 0) 955 goto err_out; 956 957 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 958 process_synthesized_event, is_pipe); 959 if (err < 0) 960 goto err_out; 961 962 } 963 964 if (target.initial_delay) { 965 pr_info(EVLIST_DISABLED_MSG); 966 } else { 967 err = enable_counters(); 968 if (err) { 969 err = -1; 970 goto err_out; 971 } 972 } 973 974 /* Exec the command, if any */ 975 if (forks) 976 evlist__start_workload(evsel_list); 977 978 if (target.initial_delay > 0) { 979 usleep(target.initial_delay * USEC_PER_MSEC); 980 err = enable_counters(); 981 if (err) { 982 err = -1; 983 goto err_out; 984 } 985 986 pr_info(EVLIST_ENABLED_MSG); 987 } 988 989 t0 = rdclock(); 990 clock_gettime(CLOCK_MONOTONIC, &ref_time); 991 992 if (forks) { 993 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 994 status = dispatch_events(forks, timeout, interval, ×); 995 if (child_pid != -1) { 996 if (timeout) 997 kill(child_pid, SIGTERM); 998 wait4(child_pid, &status, 0, &stat_config.ru_data); 999 } 1000 1001 if (workload_exec_errno) { 1002 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 1003 pr_err("Workload failed: %s\n", emsg); 1004 err = -1; 1005 goto err_out; 1006 } 1007 1008 if (WIFSIGNALED(status)) 1009 psignal(WTERMSIG(status), argv[0]); 1010 } else { 1011 status = dispatch_events(forks, timeout, interval, ×); 1012 } 1013 1014 disable_counters(); 1015 1016 t1 = rdclock(); 1017 1018 if (stat_config.walltime_run_table) 1019 stat_config.walltime_run[run_idx] = t1 - t0; 1020 1021 if (interval && stat_config.summary) { 1022 stat_config.interval = 0; 1023 stat_config.stop_read_counter = true; 1024 init_stats(stat_config.walltime_nsecs_stats); 1025 update_stats(stat_config.walltime_nsecs_stats, t1 - t0); 1026 1027 evlist__copy_prev_raw_counts(evsel_list); 1028 evlist__reset_prev_raw_counts(evsel_list); 1029 evlist__reset_aggr_stats(evsel_list); 1030 } else { 1031 update_stats(stat_config.walltime_nsecs_stats, t1 - t0); 1032 update_rusage_stats(&stat_config.ru_data); 1033 } 1034 1035 /* 1036 * Closing a group leader splits the group, and as we only disable 1037 * group leaders, results in remaining events becoming enabled. To 1038 * avoid arbitrary skew, we must read all counters before closing any 1039 * group leaders. 1040 */ 1041 if (read_counters() == 0) 1042 process_counters(); 1043 1044 /* 1045 * We need to keep evsel_list alive, because it's processed 1046 * later the evsel_list will be closed after. 1047 */ 1048 if (!STAT_RECORD) 1049 evlist__close(evsel_list); 1050 1051 return WEXITSTATUS(status); 1052 1053 err_out: 1054 if (forks) 1055 evlist__cancel_workload(evsel_list); 1056 1057 affinity__cleanup(affinity); 1058 return err; 1059 } 1060 1061 /* 1062 * Returns -1 for fatal errors which signifies to not continue 1063 * when in repeat mode. 1064 * 1065 * Returns < -1 error codes when stat record is used. These 1066 * result in the stat information being displayed, but writing 1067 * to the file fails and is non fatal. 1068 */ 1069 static int run_perf_stat(int argc, const char **argv, int run_idx) 1070 { 1071 int ret; 1072 1073 if (pre_cmd) { 1074 ret = system(pre_cmd); 1075 if (ret) 1076 return ret; 1077 } 1078 1079 if (sync_run) 1080 sync(); 1081 1082 ret = __run_perf_stat(argc, argv, run_idx); 1083 if (ret) 1084 return ret; 1085 1086 if (post_cmd) { 1087 ret = system(post_cmd); 1088 if (ret) 1089 return ret; 1090 } 1091 1092 return ret; 1093 } 1094 1095 static void print_counters(struct timespec *ts, int argc, const char **argv) 1096 { 1097 /* Do not print anything if we record to the pipe. */ 1098 if (STAT_RECORD && perf_stat.data.is_pipe) 1099 return; 1100 if (quiet) 1101 return; 1102 1103 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 1104 } 1105 1106 static volatile sig_atomic_t signr = -1; 1107 1108 static void skip_signal(int signo) 1109 { 1110 if ((child_pid == -1) || stat_config.interval) 1111 done = 1; 1112 1113 signr = signo; 1114 /* 1115 * render child_pid harmless 1116 * won't send SIGTERM to a random 1117 * process in case of race condition 1118 * and fast PID recycling 1119 */ 1120 child_pid = -1; 1121 } 1122 1123 static void sig_atexit(void) 1124 { 1125 sigset_t set, oset; 1126 1127 /* 1128 * avoid race condition with SIGCHLD handler 1129 * in skip_signal() which is modifying child_pid 1130 * goal is to avoid send SIGTERM to a random 1131 * process 1132 */ 1133 sigemptyset(&set); 1134 sigaddset(&set, SIGCHLD); 1135 sigprocmask(SIG_BLOCK, &set, &oset); 1136 1137 if (child_pid != -1) 1138 kill(child_pid, SIGTERM); 1139 1140 sigprocmask(SIG_SETMASK, &oset, NULL); 1141 1142 if (signr == -1) 1143 return; 1144 1145 signal(signr, SIG_DFL); 1146 kill(getpid(), signr); 1147 } 1148 1149 static int stat__set_big_num(const struct option *opt __maybe_unused, 1150 const char *s __maybe_unused, int unset) 1151 { 1152 big_num_opt = unset ? 0 : 1; 1153 perf_stat__set_big_num(!unset); 1154 return 0; 1155 } 1156 1157 static int enable_metric_only(const struct option *opt __maybe_unused, 1158 const char *s __maybe_unused, int unset) 1159 { 1160 force_metric_only = true; 1161 stat_config.metric_only = !unset; 1162 return 0; 1163 } 1164 1165 static int append_metric_groups(const struct option *opt __maybe_unused, 1166 const char *str, 1167 int unset __maybe_unused) 1168 { 1169 if (metrics) { 1170 char *tmp; 1171 1172 if (asprintf(&tmp, "%s,%s", metrics, str) < 0) 1173 return -ENOMEM; 1174 free(metrics); 1175 metrics = tmp; 1176 } else { 1177 metrics = strdup(str); 1178 if (!metrics) 1179 return -ENOMEM; 1180 } 1181 return 0; 1182 } 1183 1184 static int parse_control_option(const struct option *opt, 1185 const char *str, 1186 int unset __maybe_unused) 1187 { 1188 struct perf_stat_config *config = opt->value; 1189 1190 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1191 } 1192 1193 static int parse_stat_cgroups(const struct option *opt, 1194 const char *str, int unset) 1195 { 1196 if (stat_config.cgroup_list) { 1197 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1198 return -1; 1199 } 1200 1201 return parse_cgroups(opt, str, unset); 1202 } 1203 1204 static int parse_cputype(const struct option *opt, 1205 const char *str, 1206 int unset __maybe_unused) 1207 { 1208 const struct perf_pmu *pmu; 1209 struct evlist *evlist = *(struct evlist **)opt->value; 1210 1211 if (!list_empty(&evlist->core.entries)) { 1212 fprintf(stderr, "Must define cputype before events/metrics\n"); 1213 return -1; 1214 } 1215 1216 pmu = perf_pmus__pmu_for_pmu_filter(str); 1217 if (!pmu) { 1218 fprintf(stderr, "--cputype %s is not supported!\n", str); 1219 return -1; 1220 } 1221 parse_events_option_args.pmu_filter = pmu->name; 1222 1223 return 0; 1224 } 1225 1226 static int parse_cache_level(const struct option *opt, 1227 const char *str, 1228 int unset __maybe_unused) 1229 { 1230 int level; 1231 struct opt_aggr_mode *opt_aggr_mode = (struct opt_aggr_mode *)opt->value; 1232 u32 *aggr_level = (u32 *)opt->data; 1233 1234 /* 1235 * If no string is specified, aggregate based on the topology of 1236 * Last Level Cache (LLC). Since the LLC level can change from 1237 * architecture to architecture, set level greater than 1238 * MAX_CACHE_LVL which will be interpreted as LLC. 1239 */ 1240 if (str == NULL) { 1241 level = MAX_CACHE_LVL + 1; 1242 goto out; 1243 } 1244 1245 /* 1246 * The format to specify cache level is LX or lX where X is the 1247 * cache level. 1248 */ 1249 if (strlen(str) != 2 || (str[0] != 'l' && str[0] != 'L')) { 1250 pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n", 1251 MAX_CACHE_LVL, 1252 MAX_CACHE_LVL); 1253 return -EINVAL; 1254 } 1255 1256 level = atoi(&str[1]); 1257 if (level < 1) { 1258 pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n", 1259 MAX_CACHE_LVL, 1260 MAX_CACHE_LVL); 1261 return -EINVAL; 1262 } 1263 1264 if (level > MAX_CACHE_LVL) { 1265 pr_err("perf only supports max cache level of %d.\n" 1266 "Consider increasing MAX_CACHE_LVL\n", MAX_CACHE_LVL); 1267 return -EINVAL; 1268 } 1269 out: 1270 opt_aggr_mode->cache = true; 1271 *aggr_level = level; 1272 return 0; 1273 } 1274 1275 /** 1276 * Calculate the cache instance ID from the map in 1277 * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list 1278 * Cache instance ID is the first CPU reported in the shared_cpu_list file. 1279 */ 1280 static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map) 1281 { 1282 int id; 1283 struct perf_cpu_map *cpu_map = perf_cpu_map__new(map); 1284 1285 /* 1286 * If the map contains no CPU, consider the current CPU to 1287 * be the first online CPU in the cache domain else use the 1288 * first online CPU of the cache domain as the ID. 1289 */ 1290 id = perf_cpu_map__min(cpu_map).cpu; 1291 if (id == -1) 1292 id = cpu.cpu; 1293 1294 /* Free the perf_cpu_map used to find the cache ID */ 1295 perf_cpu_map__put(cpu_map); 1296 1297 return id; 1298 } 1299 1300 /** 1301 * cpu__get_cache_id - Returns 0 if successful in populating the 1302 * cache level and cache id. Cache level is read from 1303 * /sys/devices/system/cpu/cpuX/cache/indexY/level where as cache instance ID 1304 * is the first CPU reported by 1305 * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list 1306 */ 1307 static int cpu__get_cache_details(struct perf_cpu cpu, struct perf_cache *cache) 1308 { 1309 int ret = 0; 1310 u32 cache_level = stat_config.aggr_level; 1311 struct cpu_cache_level caches[MAX_CACHE_LVL]; 1312 u32 i = 0, caches_cnt = 0; 1313 1314 cache->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; 1315 cache->cache = -1; 1316 1317 ret = build_caches_for_cpu(cpu.cpu, caches, &caches_cnt); 1318 if (ret) { 1319 /* 1320 * If caches_cnt is not 0, cpu_cache_level data 1321 * was allocated when building the topology. 1322 * Free the allocated data before returning. 1323 */ 1324 if (caches_cnt) 1325 goto free_caches; 1326 1327 return ret; 1328 } 1329 1330 if (!caches_cnt) 1331 return -1; 1332 1333 /* 1334 * Save the data for the highest level if no 1335 * level was specified by the user. 1336 */ 1337 if (cache_level > MAX_CACHE_LVL) { 1338 int max_level_index = 0; 1339 1340 for (i = 1; i < caches_cnt; ++i) { 1341 if (caches[i].level > caches[max_level_index].level) 1342 max_level_index = i; 1343 } 1344 1345 cache->cache_lvl = caches[max_level_index].level; 1346 cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map); 1347 1348 /* Reset i to 0 to free entire caches[] */ 1349 i = 0; 1350 goto free_caches; 1351 } 1352 1353 for (i = 0; i < caches_cnt; ++i) { 1354 if (caches[i].level == cache_level) { 1355 cache->cache_lvl = cache_level; 1356 cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map); 1357 } 1358 1359 cpu_cache_level__free(&caches[i]); 1360 } 1361 1362 free_caches: 1363 /* 1364 * Free all the allocated cpu_cache_level data. 1365 */ 1366 while (i < caches_cnt) 1367 cpu_cache_level__free(&caches[i++]); 1368 1369 return ret; 1370 } 1371 1372 /** 1373 * aggr_cpu_id__cache - Create an aggr_cpu_id with cache instache ID, cache 1374 * level, die and socket populated with the cache instache ID, cache level, 1375 * die and socket for cpu. The function signature is compatible with 1376 * aggr_cpu_id_get_t. 1377 */ 1378 static struct aggr_cpu_id aggr_cpu_id__cache(struct perf_cpu cpu, void *data) 1379 { 1380 int ret; 1381 struct aggr_cpu_id id; 1382 struct perf_cache cache; 1383 1384 id = aggr_cpu_id__die(cpu, data); 1385 if (aggr_cpu_id__is_empty(&id)) 1386 return id; 1387 1388 ret = cpu__get_cache_details(cpu, &cache); 1389 if (ret) 1390 return id; 1391 1392 id.cache_lvl = cache.cache_lvl; 1393 id.cache = cache.cache; 1394 return id; 1395 } 1396 1397 static const char *const aggr_mode__string[] = { 1398 [AGGR_CORE] = "core", 1399 [AGGR_CACHE] = "cache", 1400 [AGGR_CLUSTER] = "cluster", 1401 [AGGR_DIE] = "die", 1402 [AGGR_GLOBAL] = "global", 1403 [AGGR_NODE] = "node", 1404 [AGGR_NONE] = "none", 1405 [AGGR_SOCKET] = "socket", 1406 [AGGR_THREAD] = "thread", 1407 [AGGR_UNSET] = "unset", 1408 }; 1409 1410 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1411 struct perf_cpu cpu) 1412 { 1413 return aggr_cpu_id__socket(cpu, /*data=*/NULL); 1414 } 1415 1416 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1417 struct perf_cpu cpu) 1418 { 1419 return aggr_cpu_id__die(cpu, /*data=*/NULL); 1420 } 1421 1422 static struct aggr_cpu_id perf_stat__get_cache_id(struct perf_stat_config *config __maybe_unused, 1423 struct perf_cpu cpu) 1424 { 1425 return aggr_cpu_id__cache(cpu, /*data=*/NULL); 1426 } 1427 1428 static struct aggr_cpu_id perf_stat__get_cluster(struct perf_stat_config *config __maybe_unused, 1429 struct perf_cpu cpu) 1430 { 1431 return aggr_cpu_id__cluster(cpu, /*data=*/NULL); 1432 } 1433 1434 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1435 struct perf_cpu cpu) 1436 { 1437 return aggr_cpu_id__core(cpu, /*data=*/NULL); 1438 } 1439 1440 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1441 struct perf_cpu cpu) 1442 { 1443 return aggr_cpu_id__node(cpu, /*data=*/NULL); 1444 } 1445 1446 static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused, 1447 struct perf_cpu cpu) 1448 { 1449 return aggr_cpu_id__global(cpu, /*data=*/NULL); 1450 } 1451 1452 static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused, 1453 struct perf_cpu cpu) 1454 { 1455 return aggr_cpu_id__cpu(cpu, /*data=*/NULL); 1456 } 1457 1458 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1459 aggr_get_id_t get_id, struct perf_cpu cpu) 1460 { 1461 struct aggr_cpu_id id; 1462 1463 /* per-process mode - should use global aggr mode */ 1464 if (cpu.cpu == -1 || cpu.cpu >= config->cpus_aggr_map->nr) 1465 return get_id(config, cpu); 1466 1467 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) 1468 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); 1469 1470 id = config->cpus_aggr_map->map[cpu.cpu]; 1471 return id; 1472 } 1473 1474 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1475 struct perf_cpu cpu) 1476 { 1477 return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); 1478 } 1479 1480 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1481 struct perf_cpu cpu) 1482 { 1483 return perf_stat__get_aggr(config, perf_stat__get_die, cpu); 1484 } 1485 1486 static struct aggr_cpu_id perf_stat__get_cluster_cached(struct perf_stat_config *config, 1487 struct perf_cpu cpu) 1488 { 1489 return perf_stat__get_aggr(config, perf_stat__get_cluster, cpu); 1490 } 1491 1492 static struct aggr_cpu_id perf_stat__get_cache_id_cached(struct perf_stat_config *config, 1493 struct perf_cpu cpu) 1494 { 1495 return perf_stat__get_aggr(config, perf_stat__get_cache_id, cpu); 1496 } 1497 1498 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1499 struct perf_cpu cpu) 1500 { 1501 return perf_stat__get_aggr(config, perf_stat__get_core, cpu); 1502 } 1503 1504 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1505 struct perf_cpu cpu) 1506 { 1507 return perf_stat__get_aggr(config, perf_stat__get_node, cpu); 1508 } 1509 1510 static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config, 1511 struct perf_cpu cpu) 1512 { 1513 return perf_stat__get_aggr(config, perf_stat__get_global, cpu); 1514 } 1515 1516 static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config, 1517 struct perf_cpu cpu) 1518 { 1519 return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu); 1520 } 1521 1522 static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) 1523 { 1524 switch (aggr_mode) { 1525 case AGGR_SOCKET: 1526 return aggr_cpu_id__socket; 1527 case AGGR_DIE: 1528 return aggr_cpu_id__die; 1529 case AGGR_CLUSTER: 1530 return aggr_cpu_id__cluster; 1531 case AGGR_CACHE: 1532 return aggr_cpu_id__cache; 1533 case AGGR_CORE: 1534 return aggr_cpu_id__core; 1535 case AGGR_NODE: 1536 return aggr_cpu_id__node; 1537 case AGGR_NONE: 1538 return aggr_cpu_id__cpu; 1539 case AGGR_GLOBAL: 1540 return aggr_cpu_id__global; 1541 case AGGR_THREAD: 1542 case AGGR_UNSET: 1543 case AGGR_MAX: 1544 default: 1545 return NULL; 1546 } 1547 } 1548 1549 static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) 1550 { 1551 switch (aggr_mode) { 1552 case AGGR_SOCKET: 1553 return perf_stat__get_socket_cached; 1554 case AGGR_DIE: 1555 return perf_stat__get_die_cached; 1556 case AGGR_CLUSTER: 1557 return perf_stat__get_cluster_cached; 1558 case AGGR_CACHE: 1559 return perf_stat__get_cache_id_cached; 1560 case AGGR_CORE: 1561 return perf_stat__get_core_cached; 1562 case AGGR_NODE: 1563 return perf_stat__get_node_cached; 1564 case AGGR_NONE: 1565 return perf_stat__get_cpu_cached; 1566 case AGGR_GLOBAL: 1567 return perf_stat__get_global_cached; 1568 case AGGR_THREAD: 1569 case AGGR_UNSET: 1570 case AGGR_MAX: 1571 default: 1572 return NULL; 1573 } 1574 } 1575 1576 static int perf_stat_init_aggr_mode(void) 1577 { 1578 int nr; 1579 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); 1580 1581 if (get_id) { 1582 bool needs_sort = stat_config.aggr_mode != AGGR_NONE; 1583 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1584 get_id, /*data=*/NULL, needs_sort); 1585 if (!stat_config.aggr_map) { 1586 pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]); 1587 return -1; 1588 } 1589 stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); 1590 } 1591 1592 if (stat_config.aggr_mode == AGGR_THREAD) { 1593 nr = perf_thread_map__nr(evsel_list->core.threads); 1594 stat_config.aggr_map = cpu_aggr_map__empty_new(nr); 1595 if (stat_config.aggr_map == NULL) 1596 return -ENOMEM; 1597 1598 for (int s = 0; s < nr; s++) { 1599 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1600 1601 id.thread_idx = s; 1602 stat_config.aggr_map->map[s] = id; 1603 } 1604 return 0; 1605 } 1606 1607 /* 1608 * The evsel_list->cpus is the base we operate on, 1609 * taking the highest cpu number to be the size of 1610 * the aggregation translate cpumap. 1611 */ 1612 nr = perf_cpu_map__max(evsel_list->core.all_cpus).cpu + 1; 1613 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr); 1614 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1615 } 1616 1617 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1618 { 1619 free(map); 1620 } 1621 1622 static void perf_stat__exit_aggr_mode(void) 1623 { 1624 cpu_aggr_map__delete(stat_config.aggr_map); 1625 cpu_aggr_map__delete(stat_config.cpus_aggr_map); 1626 stat_config.aggr_map = NULL; 1627 stat_config.cpus_aggr_map = NULL; 1628 } 1629 1630 static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) 1631 { 1632 struct perf_env *env = data; 1633 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1634 1635 if (cpu.cpu != -1) 1636 id.socket = env->cpu[cpu.cpu].socket_id; 1637 1638 return id; 1639 } 1640 1641 static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) 1642 { 1643 struct perf_env *env = data; 1644 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1645 1646 if (cpu.cpu != -1) { 1647 /* 1648 * die_id is relative to socket, so start 1649 * with the socket ID and then add die to 1650 * make a unique ID. 1651 */ 1652 id.socket = env->cpu[cpu.cpu].socket_id; 1653 id.die = env->cpu[cpu.cpu].die_id; 1654 } 1655 1656 return id; 1657 } 1658 1659 static void perf_env__get_cache_id_for_cpu(struct perf_cpu cpu, struct perf_env *env, 1660 u32 cache_level, struct aggr_cpu_id *id) 1661 { 1662 int i; 1663 int caches_cnt = env->caches_cnt; 1664 struct cpu_cache_level *caches = env->caches; 1665 1666 id->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; 1667 id->cache = -1; 1668 1669 if (!caches_cnt) 1670 return; 1671 1672 for (i = caches_cnt - 1; i > -1; --i) { 1673 struct perf_cpu_map *cpu_map; 1674 int map_contains_cpu; 1675 1676 /* 1677 * If user has not specified a level, find the fist level with 1678 * the cpu in the map. Since building the map is expensive, do 1679 * this only if levels match. 1680 */ 1681 if (cache_level <= MAX_CACHE_LVL && caches[i].level != cache_level) 1682 continue; 1683 1684 cpu_map = perf_cpu_map__new(caches[i].map); 1685 map_contains_cpu = perf_cpu_map__idx(cpu_map, cpu); 1686 perf_cpu_map__put(cpu_map); 1687 1688 if (map_contains_cpu != -1) { 1689 id->cache_lvl = caches[i].level; 1690 id->cache = cpu__get_cache_id_from_map(cpu, caches[i].map); 1691 return; 1692 } 1693 } 1694 } 1695 1696 static struct aggr_cpu_id perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu, 1697 void *data) 1698 { 1699 struct perf_env *env = data; 1700 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1701 1702 if (cpu.cpu != -1) { 1703 u32 cache_level = (perf_stat.aggr_level) ?: stat_config.aggr_level; 1704 1705 id.socket = env->cpu[cpu.cpu].socket_id; 1706 id.die = env->cpu[cpu.cpu].die_id; 1707 perf_env__get_cache_id_for_cpu(cpu, env, cache_level, &id); 1708 } 1709 1710 return id; 1711 } 1712 1713 static struct aggr_cpu_id perf_env__get_cluster_aggr_by_cpu(struct perf_cpu cpu, 1714 void *data) 1715 { 1716 struct perf_env *env = data; 1717 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1718 1719 if (cpu.cpu != -1) { 1720 id.socket = env->cpu[cpu.cpu].socket_id; 1721 id.die = env->cpu[cpu.cpu].die_id; 1722 id.cluster = env->cpu[cpu.cpu].cluster_id; 1723 } 1724 1725 return id; 1726 } 1727 1728 static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) 1729 { 1730 struct perf_env *env = data; 1731 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1732 1733 if (cpu.cpu != -1) { 1734 /* 1735 * core_id is relative to socket, die and cluster, we need a 1736 * global id. So we set socket, die id, cluster id and core id. 1737 */ 1738 id.socket = env->cpu[cpu.cpu].socket_id; 1739 id.die = env->cpu[cpu.cpu].die_id; 1740 id.cluster = env->cpu[cpu.cpu].cluster_id; 1741 id.core = env->cpu[cpu.cpu].core_id; 1742 } 1743 1744 return id; 1745 } 1746 1747 static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data) 1748 { 1749 struct perf_env *env = data; 1750 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1751 1752 if (cpu.cpu != -1) { 1753 /* 1754 * core_id is relative to socket and die, 1755 * we need a global id. So we set 1756 * socket, die id and core id 1757 */ 1758 id.socket = env->cpu[cpu.cpu].socket_id; 1759 id.die = env->cpu[cpu.cpu].die_id; 1760 id.core = env->cpu[cpu.cpu].core_id; 1761 id.cpu = cpu; 1762 } 1763 1764 return id; 1765 } 1766 1767 static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) 1768 { 1769 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1770 1771 id.node = perf_env__numa_node(data, cpu); 1772 return id; 1773 } 1774 1775 static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused, 1776 void *data __maybe_unused) 1777 { 1778 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1779 1780 /* it always aggregates to the cpu 0 */ 1781 id.cpu = (struct perf_cpu){ .cpu = 0 }; 1782 return id; 1783 } 1784 1785 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1786 struct perf_cpu cpu) 1787 { 1788 return perf_env__get_socket_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); 1789 } 1790 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1791 struct perf_cpu cpu) 1792 { 1793 return perf_env__get_die_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); 1794 } 1795 1796 static struct aggr_cpu_id perf_stat__get_cluster_file(struct perf_stat_config *config __maybe_unused, 1797 struct perf_cpu cpu) 1798 { 1799 return perf_env__get_cluster_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); 1800 } 1801 1802 static struct aggr_cpu_id perf_stat__get_cache_file(struct perf_stat_config *config __maybe_unused, 1803 struct perf_cpu cpu) 1804 { 1805 return perf_env__get_cache_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); 1806 } 1807 1808 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1809 struct perf_cpu cpu) 1810 { 1811 return perf_env__get_core_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); 1812 } 1813 1814 static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused, 1815 struct perf_cpu cpu) 1816 { 1817 return perf_env__get_cpu_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); 1818 } 1819 1820 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1821 struct perf_cpu cpu) 1822 { 1823 return perf_env__get_node_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); 1824 } 1825 1826 static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused, 1827 struct perf_cpu cpu) 1828 { 1829 return perf_env__get_global_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); 1830 } 1831 1832 static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) 1833 { 1834 switch (aggr_mode) { 1835 case AGGR_SOCKET: 1836 return perf_env__get_socket_aggr_by_cpu; 1837 case AGGR_DIE: 1838 return perf_env__get_die_aggr_by_cpu; 1839 case AGGR_CLUSTER: 1840 return perf_env__get_cluster_aggr_by_cpu; 1841 case AGGR_CACHE: 1842 return perf_env__get_cache_aggr_by_cpu; 1843 case AGGR_CORE: 1844 return perf_env__get_core_aggr_by_cpu; 1845 case AGGR_NODE: 1846 return perf_env__get_node_aggr_by_cpu; 1847 case AGGR_GLOBAL: 1848 return perf_env__get_global_aggr_by_cpu; 1849 case AGGR_NONE: 1850 return perf_env__get_cpu_aggr_by_cpu; 1851 case AGGR_THREAD: 1852 case AGGR_UNSET: 1853 case AGGR_MAX: 1854 default: 1855 return NULL; 1856 } 1857 } 1858 1859 static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) 1860 { 1861 switch (aggr_mode) { 1862 case AGGR_SOCKET: 1863 return perf_stat__get_socket_file; 1864 case AGGR_DIE: 1865 return perf_stat__get_die_file; 1866 case AGGR_CLUSTER: 1867 return perf_stat__get_cluster_file; 1868 case AGGR_CACHE: 1869 return perf_stat__get_cache_file; 1870 case AGGR_CORE: 1871 return perf_stat__get_core_file; 1872 case AGGR_NODE: 1873 return perf_stat__get_node_file; 1874 case AGGR_GLOBAL: 1875 return perf_stat__get_global_file; 1876 case AGGR_NONE: 1877 return perf_stat__get_cpu_file; 1878 case AGGR_THREAD: 1879 case AGGR_UNSET: 1880 case AGGR_MAX: 1881 default: 1882 return NULL; 1883 } 1884 } 1885 1886 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1887 { 1888 struct perf_env *env = perf_session__env(st->session); 1889 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); 1890 bool needs_sort = stat_config.aggr_mode != AGGR_NONE; 1891 1892 if (stat_config.aggr_mode == AGGR_THREAD) { 1893 int nr = perf_thread_map__nr(evsel_list->core.threads); 1894 1895 stat_config.aggr_map = cpu_aggr_map__empty_new(nr); 1896 if (stat_config.aggr_map == NULL) 1897 return -ENOMEM; 1898 1899 for (int s = 0; s < nr; s++) { 1900 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1901 1902 id.thread_idx = s; 1903 stat_config.aggr_map->map[s] = id; 1904 } 1905 return 0; 1906 } 1907 1908 if (!get_id) 1909 return 0; 1910 1911 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1912 get_id, env, needs_sort); 1913 if (!stat_config.aggr_map) { 1914 pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]); 1915 return -1; 1916 } 1917 stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); 1918 return 0; 1919 } 1920 1921 static int default_evlist_evsel_cmp(void *priv __maybe_unused, 1922 const struct list_head *l, 1923 const struct list_head *r) 1924 { 1925 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 1926 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 1927 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 1928 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 1929 1930 if (evsel__leader(lhs) == evsel__leader(rhs)) { 1931 /* Within the same group, respect the original order. */ 1932 return lhs_core->idx - rhs_core->idx; 1933 } 1934 1935 /* Sort default metrics evsels first, and default show events before those. */ 1936 if (lhs->default_metricgroup != rhs->default_metricgroup) 1937 return lhs->default_metricgroup ? -1 : 1; 1938 1939 if (lhs->default_show_events != rhs->default_show_events) 1940 return lhs->default_show_events ? -1 : 1; 1941 1942 /* Sort by PMU type (prefers legacy types first). */ 1943 if (lhs->pmu != rhs->pmu) 1944 return lhs->pmu->type - rhs->pmu->type; 1945 1946 /* Sort by name. */ 1947 return strcmp(evsel__name((struct evsel *)lhs), evsel__name((struct evsel *)rhs)); 1948 } 1949 1950 /* 1951 * Add default events, if there were no attributes specified or 1952 * if -d/--detailed, -d -d or -d -d -d is used: 1953 */ 1954 static int add_default_events(void) 1955 { 1956 const char *pmu = parse_events_option_args.pmu_filter ?: "all"; 1957 struct parse_events_error err; 1958 struct evlist *evlist = evlist__new(); 1959 struct evsel *evsel; 1960 int ret = 0; 1961 1962 if (!evlist) 1963 return -ENOMEM; 1964 1965 parse_events_error__init(&err); 1966 1967 /* Set attrs if no event is selected and !null_run: */ 1968 if (stat_config.null_run) 1969 goto out; 1970 1971 if (transaction_run) { 1972 /* Handle -T as -M transaction. Once platform specific metrics 1973 * support has been added to the json files, all architectures 1974 * will use this approach. To determine transaction support 1975 * on an architecture test for such a metric name. 1976 */ 1977 if (!metricgroup__has_metric_or_groups(pmu, "transaction")) { 1978 pr_err("Missing transaction metrics\n"); 1979 ret = -1; 1980 goto out; 1981 } 1982 ret = metricgroup__parse_groups(evlist, pmu, "transaction", 1983 stat_config.metric_no_group, 1984 stat_config.metric_no_merge, 1985 stat_config.metric_no_threshold, 1986 stat_config.user_requested_cpu_list, 1987 stat_config.system_wide, 1988 stat_config.hardware_aware_grouping); 1989 goto out; 1990 } 1991 1992 if (smi_cost) { 1993 int smi; 1994 1995 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1996 pr_err("freeze_on_smi is not supported.\n"); 1997 ret = -1; 1998 goto out; 1999 } 2000 2001 if (!smi) { 2002 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 2003 pr_err("Failed to set freeze_on_smi.\n"); 2004 ret = -1; 2005 goto out; 2006 } 2007 smi_reset = true; 2008 } 2009 2010 if (!metricgroup__has_metric_or_groups(pmu, "smi")) { 2011 pr_err("Missing smi metrics\n"); 2012 ret = -1; 2013 goto out; 2014 } 2015 2016 if (!force_metric_only) 2017 stat_config.metric_only = true; 2018 2019 ret = metricgroup__parse_groups(evlist, pmu, "smi", 2020 stat_config.metric_no_group, 2021 stat_config.metric_no_merge, 2022 stat_config.metric_no_threshold, 2023 stat_config.user_requested_cpu_list, 2024 stat_config.system_wide, 2025 stat_config.hardware_aware_grouping); 2026 goto out; 2027 } 2028 2029 if (topdown_run) { 2030 unsigned int max_level = metricgroups__topdown_max_level(); 2031 char str[] = "TopdownL1"; 2032 2033 if (!force_metric_only) 2034 stat_config.metric_only = true; 2035 2036 if (!max_level) { 2037 pr_err("Topdown requested but the topdown metric groups aren't present.\n" 2038 "(See perf list the metric groups have names like TopdownL1)\n"); 2039 ret = -1; 2040 goto out; 2041 } 2042 if (stat_config.topdown_level > max_level) { 2043 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 2044 ret = -1; 2045 goto out; 2046 } else if (!stat_config.topdown_level) { 2047 stat_config.topdown_level = 1; 2048 } 2049 if (!stat_config.interval && !stat_config.metric_only) { 2050 fprintf(stat_config.output, 2051 "Topdown accuracy may decrease when measuring long periods.\n" 2052 "Please print the result regularly, e.g. -I1000\n"); 2053 } 2054 str[8] = stat_config.topdown_level + '0'; 2055 if (metricgroup__parse_groups(evlist, 2056 pmu, str, 2057 /*metric_no_group=*/false, 2058 /*metric_no_merge=*/false, 2059 /*metric_no_threshold=*/true, 2060 stat_config.user_requested_cpu_list, 2061 stat_config.system_wide, 2062 stat_config.hardware_aware_grouping) < 0) { 2063 ret = -1; 2064 goto out; 2065 } 2066 } 2067 2068 if (!stat_config.topdown_level) 2069 stat_config.topdown_level = 1; 2070 2071 if (!evlist->core.nr_entries && !evsel_list->core.nr_entries) { 2072 /* 2073 * Add Default metrics. To minimize multiplexing, don't request 2074 * threshold computation, but it will be computed if the events 2075 * are present. 2076 */ 2077 const char *default_metricgroup_names[] = { 2078 "Default", "Default2", "Default3", "Default4", 2079 }; 2080 2081 for (size_t i = 0; i < ARRAY_SIZE(default_metricgroup_names); i++) { 2082 struct evlist *metric_evlist; 2083 2084 if (!metricgroup__has_metric_or_groups(pmu, default_metricgroup_names[i])) 2085 continue; 2086 2087 if ((int)i > detailed_run) 2088 break; 2089 2090 metric_evlist = evlist__new(); 2091 if (!metric_evlist) { 2092 ret = -ENOMEM; 2093 break; 2094 } 2095 if (metricgroup__parse_groups(metric_evlist, pmu, default_metricgroup_names[i], 2096 /*metric_no_group=*/false, 2097 /*metric_no_merge=*/false, 2098 /*metric_no_threshold=*/true, 2099 stat_config.user_requested_cpu_list, 2100 stat_config.system_wide, 2101 stat_config.hardware_aware_grouping) < 0) { 2102 evlist__delete(metric_evlist); 2103 ret = -1; 2104 break; 2105 } 2106 2107 evlist__for_each_entry(metric_evlist, evsel) 2108 evsel->default_metricgroup = true; 2109 2110 evlist__splice_list_tail(evlist, &metric_evlist->core.entries); 2111 metricgroup__copy_metric_events(evlist, /*cgrp=*/NULL, 2112 &evlist->metric_events, 2113 &metric_evlist->metric_events); 2114 evlist__delete(metric_evlist); 2115 } 2116 list_sort(/*priv=*/NULL, &evlist->core.entries, default_evlist_evsel_cmp); 2117 2118 } 2119 out: 2120 if (!ret) { 2121 evlist__for_each_entry(evlist, evsel) { 2122 /* 2123 * Make at least one event non-skippable so fatal errors are visible. 2124 * 'cycles' always used to be default and non-skippable, so use that. 2125 */ 2126 if (!evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) 2127 evsel->skippable = true; 2128 } 2129 } 2130 parse_events_error__exit(&err); 2131 evlist__splice_list_tail(evsel_list, &evlist->core.entries); 2132 metricgroup__copy_metric_events(evsel_list, /*cgrp=*/NULL, 2133 &evsel_list->metric_events, 2134 &evlist->metric_events); 2135 evlist__delete(evlist); 2136 return ret; 2137 } 2138 2139 static const char * const stat_record_usage[] = { 2140 "perf stat record [<options>]", 2141 NULL, 2142 }; 2143 2144 static void init_features(struct perf_session *session) 2145 { 2146 int feat; 2147 2148 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 2149 perf_header__set_feat(&session->header, feat); 2150 2151 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 2152 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 2153 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 2154 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 2155 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 2156 } 2157 2158 static int __cmd_record(const struct option stat_options[], struct opt_aggr_mode *opt_mode, 2159 int argc, const char **argv) 2160 { 2161 struct perf_session *session; 2162 struct perf_data *data = &perf_stat.data; 2163 2164 argc = parse_options(argc, argv, stat_options, stat_record_usage, 2165 PARSE_OPT_STOP_AT_NON_OPTION); 2166 stat_config.aggr_mode = opt_aggr_mode_to_aggr_mode(opt_mode); 2167 2168 if (output_name) 2169 data->path = output_name; 2170 2171 if (stat_config.run_count != 1 || forever) { 2172 pr_err("Cannot use -r option with perf stat record.\n"); 2173 return -1; 2174 } 2175 2176 session = perf_session__new(data, NULL); 2177 if (IS_ERR(session)) { 2178 pr_err("Perf session creation failed\n"); 2179 return PTR_ERR(session); 2180 } 2181 2182 init_features(session); 2183 2184 session->evlist = evsel_list; 2185 perf_stat.session = session; 2186 perf_stat.record = true; 2187 return argc; 2188 } 2189 2190 static int process_stat_round_event(const struct perf_tool *tool __maybe_unused, 2191 struct perf_session *session, 2192 union perf_event *event) 2193 { 2194 struct perf_record_stat_round *stat_round = &event->stat_round; 2195 struct timespec tsh, *ts = NULL; 2196 struct perf_env *env = perf_session__env(session); 2197 const char **argv = env->cmdline_argv; 2198 int argc = env->nr_cmdline; 2199 2200 process_counters(); 2201 2202 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2203 update_stats(stat_config.walltime_nsecs_stats, stat_round->time); 2204 2205 if (stat_config.interval && stat_round->time) { 2206 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 2207 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 2208 ts = &tsh; 2209 } 2210 2211 print_counters(ts, argc, argv); 2212 return 0; 2213 } 2214 2215 static 2216 int process_stat_config_event(const struct perf_tool *tool, 2217 struct perf_session *session, 2218 union perf_event *event) 2219 { 2220 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2221 2222 perf_event__read_stat_config(&stat_config, &event->stat_config); 2223 2224 if (perf_cpu_map__is_empty(st->cpus)) { 2225 if (st->aggr_mode != AGGR_UNSET) 2226 pr_warning("warning: processing task data, aggregation mode not set\n"); 2227 } else if (st->aggr_mode != AGGR_UNSET) { 2228 stat_config.aggr_mode = st->aggr_mode; 2229 } 2230 2231 if (perf_stat.data.is_pipe) 2232 perf_stat_init_aggr_mode(); 2233 else 2234 perf_stat_init_aggr_mode_file(st); 2235 2236 if (stat_config.aggr_map) { 2237 int nr_aggr = stat_config.aggr_map->nr; 2238 2239 if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) { 2240 pr_err("cannot allocate aggr counts\n"); 2241 return -1; 2242 } 2243 } 2244 return 0; 2245 } 2246 2247 static int set_maps(struct perf_stat *st) 2248 { 2249 if (!st->cpus || !st->threads) 2250 return 0; 2251 2252 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 2253 return -EINVAL; 2254 2255 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 2256 2257 if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true)) 2258 return -ENOMEM; 2259 2260 st->maps_allocated = true; 2261 return 0; 2262 } 2263 2264 static 2265 int process_thread_map_event(const struct perf_tool *tool, 2266 struct perf_session *session __maybe_unused, 2267 union perf_event *event) 2268 { 2269 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2270 2271 if (st->threads) { 2272 pr_warning("Extra thread map event, ignoring.\n"); 2273 return 0; 2274 } 2275 2276 st->threads = thread_map__new_event(&event->thread_map); 2277 if (!st->threads) 2278 return -ENOMEM; 2279 2280 return set_maps(st); 2281 } 2282 2283 static 2284 int process_cpu_map_event(const struct perf_tool *tool, 2285 struct perf_session *session __maybe_unused, 2286 union perf_event *event) 2287 { 2288 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2289 struct perf_cpu_map *cpus; 2290 2291 if (st->cpus) { 2292 pr_warning("Extra cpu map event, ignoring.\n"); 2293 return 0; 2294 } 2295 2296 cpus = cpu_map__new_data(&event->cpu_map.data); 2297 if (!cpus) 2298 return -ENOMEM; 2299 2300 st->cpus = cpus; 2301 return set_maps(st); 2302 } 2303 2304 static const char * const stat_report_usage[] = { 2305 "perf stat report [<options>]", 2306 NULL, 2307 }; 2308 2309 static struct perf_stat perf_stat = { 2310 .aggr_mode = AGGR_UNSET, 2311 .aggr_level = 0, 2312 }; 2313 2314 static int __cmd_report(int argc, const char **argv) 2315 { 2316 struct perf_session *session; 2317 const struct option options[] = { 2318 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2319 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2320 "aggregate counts per processor socket", AGGR_SOCKET), 2321 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2322 "aggregate counts per processor die", AGGR_DIE), 2323 OPT_SET_UINT(0, "per-cluster", &perf_stat.aggr_mode, 2324 "aggregate counts perf processor cluster", AGGR_CLUSTER), 2325 OPT_CALLBACK_OPTARG(0, "per-cache", &perf_stat.aggr_mode, &perf_stat.aggr_level, 2326 "cache level", 2327 "aggregate count at this cache level (Default: LLC)", 2328 parse_cache_level), 2329 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2330 "aggregate counts per physical processor core", AGGR_CORE), 2331 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2332 "aggregate counts per numa node", AGGR_NODE), 2333 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2334 "disable CPU count aggregation", AGGR_NONE), 2335 OPT_END() 2336 }; 2337 struct stat st; 2338 int ret; 2339 2340 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2341 2342 if (!input_name || !strlen(input_name)) { 2343 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2344 input_name = "-"; 2345 else 2346 input_name = "perf.data"; 2347 } 2348 2349 perf_stat.data.path = input_name; 2350 perf_stat.data.mode = PERF_DATA_MODE_READ; 2351 2352 perf_tool__init(&perf_stat.tool, /*ordered_events=*/false); 2353 perf_stat.tool.attr = perf_event__process_attr; 2354 perf_stat.tool.event_update = perf_event__process_event_update; 2355 perf_stat.tool.thread_map = process_thread_map_event; 2356 perf_stat.tool.cpu_map = process_cpu_map_event; 2357 perf_stat.tool.stat_config = process_stat_config_event; 2358 perf_stat.tool.stat = perf_event__process_stat_event; 2359 perf_stat.tool.stat_round = process_stat_round_event; 2360 2361 session = perf_session__new(&perf_stat.data, &perf_stat.tool); 2362 if (IS_ERR(session)) 2363 return PTR_ERR(session); 2364 2365 perf_stat.session = session; 2366 stat_config.output = stderr; 2367 evlist__delete(evsel_list); 2368 evsel_list = session->evlist; 2369 2370 ret = perf_session__process_events(session); 2371 if (ret) 2372 return ret; 2373 2374 perf_session__delete(session); 2375 return 0; 2376 } 2377 2378 static void setup_system_wide(int forks) 2379 { 2380 /* 2381 * Make system wide (-a) the default target if 2382 * no target was specified and one of following 2383 * conditions is met: 2384 * 2385 * - there's no workload specified 2386 * - there is workload specified but all requested 2387 * events are system wide events 2388 */ 2389 if (!target__none(&target)) 2390 return; 2391 2392 if (!forks) 2393 target.system_wide = true; 2394 else { 2395 struct evsel *counter; 2396 2397 evlist__for_each_entry(evsel_list, counter) { 2398 if (!counter->core.requires_cpu && 2399 !evsel__name_is(counter, "duration_time")) { 2400 return; 2401 } 2402 } 2403 2404 if (evsel_list->core.nr_entries) 2405 target.system_wide = true; 2406 } 2407 } 2408 2409 #ifdef HAVE_ARCH_X86_64_SUPPORT 2410 static int parse_tpebs_mode(const struct option *opt, const char *str, 2411 int unset __maybe_unused) 2412 { 2413 enum tpebs_mode *mode = opt->value; 2414 2415 if (!strcasecmp("mean", str)) { 2416 *mode = TPEBS_MODE__MEAN; 2417 return 0; 2418 } 2419 if (!strcasecmp("min", str)) { 2420 *mode = TPEBS_MODE__MIN; 2421 return 0; 2422 } 2423 if (!strcasecmp("max", str)) { 2424 *mode = TPEBS_MODE__MAX; 2425 return 0; 2426 } 2427 if (!strcasecmp("last", str)) { 2428 *mode = TPEBS_MODE__LAST; 2429 return 0; 2430 } 2431 return -1; 2432 } 2433 #endif // HAVE_ARCH_X86_64_SUPPORT 2434 2435 int cmd_stat(int argc, const char **argv) 2436 { 2437 struct opt_aggr_mode opt_mode = {}; 2438 struct option stat_options[] = { 2439 OPT_BOOLEAN('T', "transaction", &transaction_run, 2440 "hardware transaction statistics"), 2441 OPT_CALLBACK('e', "event", &parse_events_option_args, "event", 2442 "event selector. use 'perf list' to list available events", 2443 parse_events_option), 2444 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 2445 "event filter", parse_filter), 2446 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 2447 "child tasks do not inherit counters"), 2448 OPT_STRING('p', "pid", &target.pid, "pid", 2449 "stat events on existing process id"), 2450 OPT_STRING('t', "tid", &target.tid, "tid", 2451 "stat events on existing thread id"), 2452 #ifdef HAVE_BPF_SKEL 2453 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 2454 "stat events on existing bpf program id"), 2455 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, 2456 "use bpf program to count events"), 2457 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", 2458 "path to perf_event_attr map"), 2459 #endif 2460 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 2461 "system-wide collection from all CPUs"), 2462 OPT_BOOLEAN(0, "scale", &stat_config.scale, 2463 "Use --no-scale to disable counter scaling for multiplexing"), 2464 OPT_INCR('v', "verbose", &verbose, 2465 "be more verbose (show counter open errors, etc)"), 2466 OPT_INTEGER('r', "repeat", &stat_config.run_count, 2467 "repeat command and print average + stddev (max: 100, forever: 0)"), 2468 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 2469 "display details about each run (only with -r option)"), 2470 OPT_BOOLEAN('n', "null", &stat_config.null_run, 2471 "null run - dont start any counters"), 2472 OPT_INCR('d', "detailed", &detailed_run, 2473 "detailed run - start a lot of events"), 2474 OPT_BOOLEAN('S', "sync", &sync_run, 2475 "call sync() before starting a run"), 2476 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 2477 "print large numbers with thousands\' separators", 2478 stat__set_big_num), 2479 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 2480 "list of cpus to monitor in system-wide"), 2481 OPT_BOOLEAN('A', "no-aggr", &opt_mode.no_aggr, 2482 "disable aggregation across CPUs or PMUs"), 2483 OPT_BOOLEAN(0, "no-merge", &opt_mode.no_aggr, 2484 "disable aggregation the same as -A or -no-aggr"), 2485 OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge, 2486 "Merge identical named hybrid events"), 2487 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 2488 "print counts with custom separator"), 2489 OPT_BOOLEAN('j', "json-output", &stat_config.json_output, 2490 "print counts in JSON format"), 2491 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 2492 "monitor event in cgroup name only", parse_stat_cgroups), 2493 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 2494 "expand events for each cgroup"), 2495 OPT_STRING('o', "output", &output_name, "file", "output file name"), 2496 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 2497 OPT_INTEGER(0, "log-fd", &output_fd, 2498 "log output to fd, instead of stderr"), 2499 OPT_STRING(0, "pre", &pre_cmd, "command", 2500 "command to run prior to the measured command"), 2501 OPT_STRING(0, "post", &post_cmd, "command", 2502 "command to run after to the measured command"), 2503 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 2504 "print counts at regular interval in ms " 2505 "(overhead is possible for values <= 100ms)"), 2506 OPT_INTEGER(0, "interval-count", &stat_config.times, 2507 "print counts for fixed number of times"), 2508 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 2509 "clear screen in between new interval"), 2510 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 2511 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 2512 OPT_BOOLEAN(0, "per-socket", &opt_mode.socket, 2513 "aggregate counts per processor socket"), 2514 OPT_BOOLEAN(0, "per-die", &opt_mode.die, "aggregate counts per processor die"), 2515 OPT_BOOLEAN(0, "per-cluster", &opt_mode.cluster, 2516 "aggregate counts per processor cluster"), 2517 OPT_CALLBACK_OPTARG(0, "per-cache", &opt_mode, &stat_config.aggr_level, 2518 "cache level", "aggregate count at this cache level (Default: LLC)", 2519 parse_cache_level), 2520 OPT_BOOLEAN(0, "per-core", &opt_mode.core, 2521 "aggregate counts per physical processor core"), 2522 OPT_BOOLEAN(0, "per-thread", &opt_mode.thread, "aggregate counts per thread"), 2523 OPT_BOOLEAN(0, "per-node", &opt_mode.node, "aggregate counts per numa node"), 2524 OPT_INTEGER('D', "delay", &target.initial_delay, 2525 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 2526 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 2527 "Only print computed metrics. No raw values", enable_metric_only), 2528 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 2529 "don't group metric events, impacts multiplexing"), 2530 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 2531 "don't try to share events between metrics in a group"), 2532 OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold, 2533 "disable adding events for the metric threshold calculation"), 2534 OPT_BOOLEAN(0, "topdown", &topdown_run, 2535 "measure top-down statistics"), 2536 #ifdef HAVE_ARCH_X86_64_SUPPORT 2537 OPT_BOOLEAN(0, "record-tpebs", &tpebs_recording, 2538 "enable recording for tpebs when retire_latency required"), 2539 OPT_CALLBACK(0, "tpebs-mode", &tpebs_mode, "tpebs-mode", 2540 "Mode of TPEBS recording: mean, min or max", 2541 parse_tpebs_mode), 2542 #endif 2543 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 2544 "Set the metrics level for the top-down statistics (0: max level)"), 2545 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 2546 "measure SMI cost"), 2547 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 2548 "monitor specified metrics or metric groups (separated by ,)", 2549 append_metric_groups), 2550 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 2551 "Configure all used events to run in kernel space.", 2552 PARSE_OPT_EXCLUSIVE), 2553 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 2554 "Configure all used events to run in user space.", 2555 PARSE_OPT_EXCLUSIVE), 2556 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 2557 "Use with 'percore' event qualifier to show the event " 2558 "counts of one hardware thread by sum up total hardware " 2559 "threads of same physical core"), 2560 OPT_BOOLEAN(0, "summary", &stat_config.summary, 2561 "print summary for interval mode"), 2562 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, 2563 "don't print 'summary' for CSV summary output"), 2564 OPT_BOOLEAN(0, "quiet", &quiet, 2565 "don't print any output, messages or warnings (useful with record)"), 2566 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", 2567 "Only enable events on applying cpu with this type " 2568 "for hybrid platform (e.g. core or atom)", 2569 parse_cputype), 2570 #ifdef HAVE_LIBPFM 2571 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 2572 "libpfm4 event selector. use 'perf list' to list available events", 2573 parse_libpfm_events_option), 2574 #endif 2575 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 2576 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 2577 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 2578 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 2579 parse_control_option), 2580 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", 2581 "measure I/O performance metrics provided by arch/platform", 2582 iostat_parse), 2583 OPT_END() 2584 }; 2585 const char * const stat_usage[] = { 2586 "perf stat [<options>] [<command>]", 2587 NULL 2588 }; 2589 int status = -EINVAL, run_idx, err; 2590 const char *mode; 2591 FILE *output = stderr; 2592 unsigned int interval, timeout; 2593 const char * const stat_subcommands[] = { "record", "report" }; 2594 char errbuf[BUFSIZ]; 2595 struct evsel *counter; 2596 2597 setlocale(LC_ALL, ""); 2598 2599 evsel_list = evlist__new(); 2600 if (evsel_list == NULL) 2601 return -ENOMEM; 2602 2603 parse_events__shrink_config_terms(); 2604 2605 /* String-parsing callback-based options would segfault when negated */ 2606 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2607 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2608 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2609 2610 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2611 (const char **) stat_usage, 2612 PARSE_OPT_STOP_AT_NON_OPTION); 2613 2614 stat_config.aggr_mode = opt_aggr_mode_to_aggr_mode(&opt_mode); 2615 2616 if (stat_config.csv_sep) { 2617 stat_config.csv_output = true; 2618 if (!strcmp(stat_config.csv_sep, "\\t")) 2619 stat_config.csv_sep = "\t"; 2620 } else 2621 stat_config.csv_sep = DEFAULT_SEPARATOR; 2622 2623 if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) { 2624 argc = __cmd_record(stat_options, &opt_mode, argc, argv); 2625 if (argc < 0) 2626 return -1; 2627 } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0])) 2628 return __cmd_report(argc, argv); 2629 2630 interval = stat_config.interval; 2631 timeout = stat_config.timeout; 2632 2633 /* 2634 * For record command the -o is already taken care of. 2635 */ 2636 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2637 output = NULL; 2638 2639 if (output_name && output_fd) { 2640 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2641 parse_options_usage(stat_usage, stat_options, "o", 1); 2642 parse_options_usage(NULL, stat_options, "log-fd", 0); 2643 goto out; 2644 } 2645 2646 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2647 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2648 goto out; 2649 } 2650 2651 if (stat_config.metric_only && stat_config.run_count > 1) { 2652 fprintf(stderr, "--metric-only is not supported with -r\n"); 2653 goto out; 2654 } 2655 2656 if (stat_config.csv_output || (stat_config.metric_only && stat_config.json_output)) { 2657 /* 2658 * Current CSV and metric-only JSON output doesn't display the 2659 * metric threshold so don't compute it. 2660 */ 2661 stat_config.metric_no_threshold = true; 2662 } 2663 2664 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2665 fprintf(stderr, "--table is only supported with -r\n"); 2666 parse_options_usage(stat_usage, stat_options, "r", 1); 2667 parse_options_usage(NULL, stat_options, "table", 0); 2668 goto out; 2669 } 2670 2671 if (output_fd < 0) { 2672 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2673 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2674 goto out; 2675 } 2676 2677 if (!output && !quiet) { 2678 struct timespec tm; 2679 mode = append_file ? "a" : "w"; 2680 2681 output = fopen(output_name, mode); 2682 if (!output) { 2683 perror("failed to create output file"); 2684 return -1; 2685 } 2686 if (!stat_config.json_output) { 2687 clock_gettime(CLOCK_REALTIME, &tm); 2688 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2689 } 2690 } else if (output_fd > 0) { 2691 mode = append_file ? "a" : "w"; 2692 output = fdopen(output_fd, mode); 2693 if (!output) { 2694 perror("Failed opening logfd"); 2695 return -errno; 2696 } 2697 } 2698 2699 if (stat_config.interval_clear && !isatty(fileno(output))) { 2700 fprintf(stderr, "--interval-clear does not work with output\n"); 2701 parse_options_usage(stat_usage, stat_options, "o", 1); 2702 parse_options_usage(NULL, stat_options, "log-fd", 0); 2703 parse_options_usage(NULL, stat_options, "interval-clear", 0); 2704 return -1; 2705 } 2706 2707 stat_config.output = output; 2708 2709 /* 2710 * let the spreadsheet do the pretty-printing 2711 */ 2712 if (stat_config.csv_output) { 2713 /* User explicitly passed -B? */ 2714 if (big_num_opt == 1) { 2715 fprintf(stderr, "-B option not supported with -x\n"); 2716 parse_options_usage(stat_usage, stat_options, "B", 1); 2717 parse_options_usage(NULL, stat_options, "x", 1); 2718 goto out; 2719 } else /* Nope, so disable big number formatting */ 2720 stat_config.big_num = false; 2721 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2722 stat_config.big_num = false; 2723 2724 target.inherit = !stat_config.no_inherit; 2725 err = target__validate(&target); 2726 if (err) { 2727 target__strerror(&target, err, errbuf, BUFSIZ); 2728 pr_warning("%s\n", errbuf); 2729 } 2730 2731 setup_system_wide(argc); 2732 2733 /* 2734 * Display user/system times only for single 2735 * run and when there's specified tracee. 2736 */ 2737 if ((stat_config.run_count == 1) && target__none(&target)) 2738 stat_config.ru_display = true; 2739 2740 if (stat_config.run_count < 0) { 2741 pr_err("Run count must be a positive number\n"); 2742 parse_options_usage(stat_usage, stat_options, "r", 1); 2743 goto out; 2744 } else if (stat_config.run_count == 0) { 2745 forever = true; 2746 stat_config.run_count = 1; 2747 } 2748 2749 if (stat_config.walltime_run_table) { 2750 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2751 if (!stat_config.walltime_run) { 2752 pr_err("failed to setup -r option"); 2753 goto out; 2754 } 2755 } 2756 2757 if ((stat_config.aggr_mode == AGGR_THREAD) && 2758 !target__has_task(&target)) { 2759 if (!target.system_wide || target.cpu_list) { 2760 fprintf(stderr, "The --per-thread option is only " 2761 "available when monitoring via -p -t -a " 2762 "options or only --per-thread.\n"); 2763 parse_options_usage(NULL, stat_options, "p", 1); 2764 parse_options_usage(NULL, stat_options, "t", 1); 2765 goto out; 2766 } 2767 } 2768 2769 /* 2770 * no_aggr, cgroup are for system-wide only 2771 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2772 */ 2773 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2774 stat_config.aggr_mode != AGGR_THREAD) || 2775 (nr_cgroups || stat_config.cgroup_list)) && 2776 !target__has_cpu(&target)) { 2777 fprintf(stderr, "both cgroup and no-aggregation " 2778 "modes only available in system-wide mode\n"); 2779 2780 parse_options_usage(stat_usage, stat_options, "G", 1); 2781 parse_options_usage(NULL, stat_options, "A", 1); 2782 parse_options_usage(NULL, stat_options, "a", 1); 2783 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2784 goto out; 2785 } 2786 2787 if (stat_config.iostat_run) { 2788 status = iostat_prepare(evsel_list, &stat_config); 2789 if (status) 2790 goto out; 2791 if (iostat_mode == IOSTAT_LIST) { 2792 iostat_list(evsel_list, &stat_config); 2793 goto out; 2794 } else if (verbose > 0) 2795 iostat_list(evsel_list, &stat_config); 2796 if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) 2797 target.system_wide = true; 2798 } 2799 2800 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2801 target.per_thread = true; 2802 2803 stat_config.system_wide = target.system_wide; 2804 if (target.cpu_list) { 2805 stat_config.user_requested_cpu_list = strdup(target.cpu_list); 2806 if (!stat_config.user_requested_cpu_list) { 2807 status = -ENOMEM; 2808 goto out; 2809 } 2810 } 2811 2812 /* 2813 * Metric parsing needs to be delayed as metrics may optimize events 2814 * knowing the target is system-wide. 2815 */ 2816 if (metrics) { 2817 const char *pmu = parse_events_option_args.pmu_filter ?: "all"; 2818 int ret = metricgroup__parse_groups(evsel_list, pmu, metrics, 2819 stat_config.metric_no_group, 2820 stat_config.metric_no_merge, 2821 stat_config.metric_no_threshold, 2822 stat_config.user_requested_cpu_list, 2823 stat_config.system_wide, 2824 stat_config.hardware_aware_grouping); 2825 2826 zfree(&metrics); 2827 if (ret) { 2828 status = ret; 2829 goto out; 2830 } 2831 } 2832 2833 if (add_default_events()) 2834 goto out; 2835 2836 if (stat_config.cgroup_list) { 2837 if (nr_cgroups > 0) { 2838 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2839 parse_options_usage(stat_usage, stat_options, "G", 1); 2840 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2841 goto out; 2842 } 2843 2844 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, true) < 0) { 2845 parse_options_usage(stat_usage, stat_options, 2846 "for-each-cgroup", 0); 2847 goto out; 2848 } 2849 } 2850 #ifdef HAVE_BPF_SKEL 2851 if (target.use_bpf && nr_cgroups && 2852 (evsel_list->core.nr_entries / nr_cgroups) > BPERF_CGROUP__MAX_EVENTS) { 2853 pr_warning("Disabling BPF counters due to more events (%d) than the max (%d)\n", 2854 evsel_list->core.nr_entries / nr_cgroups, BPERF_CGROUP__MAX_EVENTS); 2855 target.use_bpf = false; 2856 } 2857 #endif // HAVE_BPF_SKEL 2858 evlist__warn_user_requested_cpus(evsel_list, target.cpu_list); 2859 2860 evlist__for_each_entry(evsel_list, counter) { 2861 /* 2862 * Setup BPF counters to require CPUs as any(-1) isn't 2863 * supported. evlist__create_maps below will propagate this 2864 * information to the evsels. Note, evsel__is_bperf isn't yet 2865 * set up, and this change must happen early, so directly use 2866 * the bpf_counter variable and target information. 2867 */ 2868 if ((counter->bpf_counter || target.use_bpf) && !target__has_cpu(&target)) 2869 counter->core.requires_cpu = true; 2870 } 2871 2872 if (evlist__create_maps(evsel_list, &target) < 0) { 2873 if (target__has_task(&target)) { 2874 pr_err("Problems finding threads of monitor\n"); 2875 parse_options_usage(stat_usage, stat_options, "p", 1); 2876 parse_options_usage(NULL, stat_options, "t", 1); 2877 } else if (target__has_cpu(&target)) { 2878 perror("failed to parse CPUs map"); 2879 parse_options_usage(stat_usage, stat_options, "C", 1); 2880 parse_options_usage(NULL, stat_options, "a", 1); 2881 } 2882 goto out; 2883 } 2884 2885 evlist__check_cpu_maps(evsel_list); 2886 2887 /* 2888 * Initialize thread_map with comm names, 2889 * so we could print it out on output. 2890 */ 2891 if (stat_config.aggr_mode == AGGR_THREAD) { 2892 thread_map__read_comms(evsel_list->core.threads); 2893 } 2894 2895 if (stat_config.aggr_mode == AGGR_NODE) 2896 cpu__setup_cpunode_map(); 2897 2898 if (stat_config.times && interval) 2899 interval_count = true; 2900 else if (stat_config.times && !interval) { 2901 pr_err("interval-count option should be used together with " 2902 "interval-print.\n"); 2903 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2904 parse_options_usage(stat_usage, stat_options, "I", 1); 2905 goto out; 2906 } 2907 2908 if (timeout && timeout < 100) { 2909 if (timeout < 10) { 2910 pr_err("timeout must be >= 10ms.\n"); 2911 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2912 goto out; 2913 } else 2914 pr_warning("timeout < 100ms. " 2915 "The overhead percentage could be high in some cases. " 2916 "Please proceed with caution.\n"); 2917 } 2918 if (timeout && interval) { 2919 pr_err("timeout option is not supported with interval-print.\n"); 2920 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2921 parse_options_usage(stat_usage, stat_options, "I", 1); 2922 goto out; 2923 } 2924 2925 if (perf_stat_init_aggr_mode()) 2926 goto out; 2927 2928 if (evlist__alloc_stats(&stat_config, evsel_list, interval)) 2929 goto out; 2930 2931 /* 2932 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2933 * while avoiding that older tools show confusing messages. 2934 * 2935 * However for pipe sessions we need to keep it zero, 2936 * because script's perf_evsel__check_attr is triggered 2937 * by attr->sample_type != 0, and we can't run it on 2938 * stat sessions. 2939 */ 2940 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2941 2942 /* 2943 * We dont want to block the signals - that would cause 2944 * child tasks to inherit that and Ctrl-C would not work. 2945 * What we want is for Ctrl-C to work in the exec()-ed 2946 * task, but being ignored by perf stat itself: 2947 */ 2948 atexit(sig_atexit); 2949 if (!forever) 2950 signal(SIGINT, skip_signal); 2951 signal(SIGCHLD, skip_signal); 2952 signal(SIGALRM, skip_signal); 2953 signal(SIGABRT, skip_signal); 2954 2955 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2956 goto out; 2957 2958 /* Enable ignoring missing threads when -p option is defined. */ 2959 evlist__first(evsel_list)->ignore_missing_thread = target.pid; 2960 status = 0; 2961 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2962 if (stat_config.run_count != 1 && verbose > 0) 2963 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2964 run_idx + 1); 2965 2966 if (run_idx != 0) 2967 evlist__reset_prev_raw_counts(evsel_list); 2968 2969 status = run_perf_stat(argc, argv, run_idx); 2970 if (status == -1) 2971 break; 2972 2973 if (forever && !interval) { 2974 print_counters(NULL, argc, argv); 2975 perf_stat__reset_stats(); 2976 } 2977 } 2978 2979 if (!forever && status != -1 && (!interval || stat_config.summary)) { 2980 if (stat_config.run_count > 1) 2981 evlist__copy_res_stats(&stat_config, evsel_list); 2982 print_counters(NULL, argc, argv); 2983 } 2984 2985 evlist__finalize_ctlfd(evsel_list); 2986 2987 if (STAT_RECORD) { 2988 /* 2989 * We synthesize the kernel mmap record just so that older tools 2990 * don't emit warnings about not being able to resolve symbols 2991 * due to /proc/sys/kernel/kptr_restrict settings and instead provide 2992 * a saner message about no samples being in the perf.data file. 2993 * 2994 * This also serves to suppress a warning about f_header.data.size == 0 2995 * in header.c at the moment 'perf stat record' gets introduced, which 2996 * is not really needed once we start adding the stat specific PERF_RECORD_ 2997 * records, but the need to suppress the kptr_restrict messages in older 2998 * tools remain -acme 2999 */ 3000 int fd = perf_data__fd(&perf_stat.data); 3001 3002 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 3003 process_synthesized_event, 3004 &perf_stat.session->machines.host); 3005 if (err) { 3006 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 3007 "older tools may produce warnings about this file\n."); 3008 } 3009 3010 if (!interval) { 3011 if (WRITE_STAT_ROUND_EVENT(stat_config.walltime_nsecs_stats->max, FINAL)) 3012 pr_err("failed to write stat round event\n"); 3013 } 3014 3015 if (!perf_stat.data.is_pipe) { 3016 perf_stat.session->header.data_size += perf_stat.bytes_written; 3017 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 3018 } 3019 3020 evlist__close(evsel_list); 3021 perf_session__delete(perf_stat.session); 3022 } 3023 3024 perf_stat__exit_aggr_mode(); 3025 evlist__free_stats(evsel_list); 3026 out: 3027 if (stat_config.iostat_run) 3028 iostat_release(evsel_list); 3029 3030 zfree(&stat_config.walltime_run); 3031 zfree(&stat_config.user_requested_cpu_list); 3032 3033 if (smi_cost && smi_reset) 3034 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 3035 3036 evlist__delete(evsel_list); 3037 3038 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 3039 3040 return status; 3041 } 3042