1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "util/cgroup.h" 45 #include <subcmd/parse-options.h> 46 #include "util/parse-events.h" 47 #include "util/pmus.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evsel.h" 52 #include "util/debug.h" 53 #include "util/color.h" 54 #include "util/stat.h" 55 #include "util/header.h" 56 #include "util/cpumap.h" 57 #include "util/thread_map.h" 58 #include "util/counts.h" 59 #include "util/topdown.h" 60 #include "util/session.h" 61 #include "util/tool.h" 62 #include "util/string2.h" 63 #include "util/metricgroup.h" 64 #include "util/synthetic-events.h" 65 #include "util/target.h" 66 #include "util/time-utils.h" 67 #include "util/top.h" 68 #include "util/affinity.h" 69 #include "util/pfm.h" 70 #include "util/bpf_counter.h" 71 #include "util/iostat.h" 72 #include "util/util.h" 73 #include "asm/bug.h" 74 75 #include <linux/time64.h> 76 #include <linux/zalloc.h> 77 #include <api/fs/fs.h> 78 #include <errno.h> 79 #include <signal.h> 80 #include <stdlib.h> 81 #include <sys/prctl.h> 82 #include <inttypes.h> 83 #include <locale.h> 84 #include <math.h> 85 #include <sys/types.h> 86 #include <sys/stat.h> 87 #include <sys/wait.h> 88 #include <unistd.h> 89 #include <sys/time.h> 90 #include <sys/resource.h> 91 #include <linux/err.h> 92 93 #include <linux/ctype.h> 94 #include <perf/evlist.h> 95 #include <internal/threadmap.h> 96 97 #define DEFAULT_SEPARATOR " " 98 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 99 100 static void print_counters(struct timespec *ts, int argc, const char **argv); 101 102 static struct evlist *evsel_list; 103 static struct parse_events_option_args parse_events_option_args = { 104 .evlistp = &evsel_list, 105 }; 106 107 static bool all_counters_use_bpf = true; 108 109 static struct target target = { 110 .uid = UINT_MAX, 111 }; 112 113 #define METRIC_ONLY_LEN 20 114 115 static volatile sig_atomic_t child_pid = -1; 116 static int detailed_run = 0; 117 static bool transaction_run; 118 static bool topdown_run = false; 119 static bool smi_cost = false; 120 static bool smi_reset = false; 121 static int big_num_opt = -1; 122 static const char *pre_cmd = NULL; 123 static const char *post_cmd = NULL; 124 static bool sync_run = false; 125 static bool forever = false; 126 static bool force_metric_only = false; 127 static struct timespec ref_time; 128 static bool append_file; 129 static bool interval_count; 130 static const char *output_name; 131 static int output_fd; 132 static char *metrics; 133 134 struct perf_stat { 135 bool record; 136 struct perf_data data; 137 struct perf_session *session; 138 u64 bytes_written; 139 struct perf_tool tool; 140 bool maps_allocated; 141 struct perf_cpu_map *cpus; 142 struct perf_thread_map *threads; 143 enum aggr_mode aggr_mode; 144 u32 aggr_level; 145 }; 146 147 static struct perf_stat perf_stat; 148 #define STAT_RECORD perf_stat.record 149 150 static volatile sig_atomic_t done = 0; 151 152 static struct perf_stat_config stat_config = { 153 .aggr_mode = AGGR_GLOBAL, 154 .aggr_level = MAX_CACHE_LVL + 1, 155 .scale = true, 156 .unit_width = 4, /* strlen("unit") */ 157 .run_count = 1, 158 .metric_only_len = METRIC_ONLY_LEN, 159 .walltime_nsecs_stats = &walltime_nsecs_stats, 160 .ru_stats = &ru_stats, 161 .big_num = true, 162 .ctl_fd = -1, 163 .ctl_fd_ack = -1, 164 .iostat_run = false, 165 }; 166 167 static void evlist__check_cpu_maps(struct evlist *evlist) 168 { 169 struct evsel *evsel, *warned_leader = NULL; 170 171 evlist__for_each_entry(evlist, evsel) { 172 struct evsel *leader = evsel__leader(evsel); 173 174 /* Check that leader matches cpus with each member. */ 175 if (leader == evsel) 176 continue; 177 if (perf_cpu_map__equal(leader->core.cpus, evsel->core.cpus)) 178 continue; 179 180 /* If there's mismatch disable the group and warn user. */ 181 if (warned_leader != leader) { 182 char buf[200]; 183 184 pr_warning("WARNING: grouped events cpus do not match.\n" 185 "Events with CPUs not matching the leader will " 186 "be removed from the group.\n"); 187 evsel__group_desc(leader, buf, sizeof(buf)); 188 pr_warning(" %s\n", buf); 189 warned_leader = leader; 190 } 191 if (verbose > 0) { 192 char buf[200]; 193 194 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 195 pr_warning(" %s: %s\n", leader->name, buf); 196 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 197 pr_warning(" %s: %s\n", evsel->name, buf); 198 } 199 200 evsel__remove_from_group(evsel, leader); 201 } 202 } 203 204 static inline void diff_timespec(struct timespec *r, struct timespec *a, 205 struct timespec *b) 206 { 207 r->tv_sec = a->tv_sec - b->tv_sec; 208 if (a->tv_nsec < b->tv_nsec) { 209 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 210 r->tv_sec--; 211 } else { 212 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 213 } 214 } 215 216 static void perf_stat__reset_stats(void) 217 { 218 evlist__reset_stats(evsel_list); 219 perf_stat__reset_shadow_stats(); 220 } 221 222 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 223 union perf_event *event, 224 struct perf_sample *sample __maybe_unused, 225 struct machine *machine __maybe_unused) 226 { 227 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 228 pr_err("failed to write perf data, error: %m\n"); 229 return -1; 230 } 231 232 perf_stat.bytes_written += event->header.size; 233 return 0; 234 } 235 236 static int write_stat_round_event(u64 tm, u64 type) 237 { 238 return perf_event__synthesize_stat_round(NULL, tm, type, 239 process_synthesized_event, 240 NULL); 241 } 242 243 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 244 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 245 246 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 247 248 static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, 249 struct perf_counts_values *count) 250 { 251 struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); 252 struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); 253 254 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 255 process_synthesized_event, NULL); 256 } 257 258 static int read_single_counter(struct evsel *counter, int cpu_map_idx, 259 int thread, struct timespec *rs) 260 { 261 switch(counter->tool_event) { 262 case PERF_TOOL_DURATION_TIME: { 263 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 264 struct perf_counts_values *count = 265 perf_counts(counter->counts, cpu_map_idx, thread); 266 count->ena = count->run = val; 267 count->val = val; 268 return 0; 269 } 270 case PERF_TOOL_USER_TIME: 271 case PERF_TOOL_SYSTEM_TIME: { 272 u64 val; 273 struct perf_counts_values *count = 274 perf_counts(counter->counts, cpu_map_idx, thread); 275 if (counter->tool_event == PERF_TOOL_USER_TIME) 276 val = ru_stats.ru_utime_usec_stat.mean; 277 else 278 val = ru_stats.ru_stime_usec_stat.mean; 279 count->ena = count->run = val; 280 count->val = val; 281 return 0; 282 } 283 default: 284 case PERF_TOOL_NONE: 285 return evsel__read_counter(counter, cpu_map_idx, thread); 286 case PERF_TOOL_MAX: 287 /* This should never be reached */ 288 return 0; 289 } 290 } 291 292 /* 293 * Read out the results of a single counter: 294 * do not aggregate counts across CPUs in system-wide mode 295 */ 296 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) 297 { 298 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 299 int thread; 300 301 if (!counter->supported) 302 return -ENOENT; 303 304 for (thread = 0; thread < nthreads; thread++) { 305 struct perf_counts_values *count; 306 307 count = perf_counts(counter->counts, cpu_map_idx, thread); 308 309 /* 310 * The leader's group read loads data into its group members 311 * (via evsel__read_counter()) and sets their count->loaded. 312 */ 313 if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && 314 read_single_counter(counter, cpu_map_idx, thread, rs)) { 315 counter->counts->scaled = -1; 316 perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; 317 perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; 318 return -1; 319 } 320 321 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); 322 323 if (STAT_RECORD) { 324 if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { 325 pr_err("failed to write stat event\n"); 326 return -1; 327 } 328 } 329 330 if (verbose > 1) { 331 fprintf(stat_config.output, 332 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 333 evsel__name(counter), 334 perf_cpu_map__cpu(evsel__cpus(counter), 335 cpu_map_idx).cpu, 336 count->val, count->ena, count->run); 337 } 338 } 339 340 return 0; 341 } 342 343 static int read_affinity_counters(struct timespec *rs) 344 { 345 struct evlist_cpu_iterator evlist_cpu_itr; 346 struct affinity saved_affinity, *affinity; 347 348 if (all_counters_use_bpf) 349 return 0; 350 351 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 352 affinity = NULL; 353 else if (affinity__setup(&saved_affinity) < 0) 354 return -1; 355 else 356 affinity = &saved_affinity; 357 358 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 359 struct evsel *counter = evlist_cpu_itr.evsel; 360 361 if (evsel__is_bpf(counter)) 362 continue; 363 364 if (!counter->err) { 365 counter->err = read_counter_cpu(counter, rs, 366 evlist_cpu_itr.cpu_map_idx); 367 } 368 } 369 if (affinity) 370 affinity__cleanup(&saved_affinity); 371 372 return 0; 373 } 374 375 static int read_bpf_map_counters(void) 376 { 377 struct evsel *counter; 378 int err; 379 380 evlist__for_each_entry(evsel_list, counter) { 381 if (!evsel__is_bpf(counter)) 382 continue; 383 384 err = bpf_counter__read(counter); 385 if (err) 386 return err; 387 } 388 return 0; 389 } 390 391 static int read_counters(struct timespec *rs) 392 { 393 if (!stat_config.stop_read_counter) { 394 if (read_bpf_map_counters() || 395 read_affinity_counters(rs)) 396 return -1; 397 } 398 return 0; 399 } 400 401 static void process_counters(void) 402 { 403 struct evsel *counter; 404 405 evlist__for_each_entry(evsel_list, counter) { 406 if (counter->err) 407 pr_debug("failed to read counter %s\n", counter->name); 408 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 409 pr_warning("failed to process counter %s\n", counter->name); 410 counter->err = 0; 411 } 412 413 perf_stat_merge_counters(&stat_config, evsel_list); 414 perf_stat_process_percore(&stat_config, evsel_list); 415 } 416 417 static void process_interval(void) 418 { 419 struct timespec ts, rs; 420 421 clock_gettime(CLOCK_MONOTONIC, &ts); 422 diff_timespec(&rs, &ts, &ref_time); 423 424 evlist__reset_aggr_stats(evsel_list); 425 426 if (read_counters(&rs) == 0) 427 process_counters(); 428 429 if (STAT_RECORD) { 430 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 431 pr_err("failed to write stat round event\n"); 432 } 433 434 init_stats(&walltime_nsecs_stats); 435 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 436 print_counters(&rs, 0, NULL); 437 } 438 439 static bool handle_interval(unsigned int interval, int *times) 440 { 441 if (interval) { 442 process_interval(); 443 if (interval_count && !(--(*times))) 444 return true; 445 } 446 return false; 447 } 448 449 static int enable_counters(void) 450 { 451 struct evsel *evsel; 452 int err; 453 454 evlist__for_each_entry(evsel_list, evsel) { 455 if (!evsel__is_bpf(evsel)) 456 continue; 457 458 err = bpf_counter__enable(evsel); 459 if (err) 460 return err; 461 } 462 463 if (!target__enable_on_exec(&target)) { 464 if (!all_counters_use_bpf) 465 evlist__enable(evsel_list); 466 } 467 return 0; 468 } 469 470 static void disable_counters(void) 471 { 472 struct evsel *counter; 473 474 /* 475 * If we don't have tracee (attaching to task or cpu), counters may 476 * still be running. To get accurate group ratios, we must stop groups 477 * from counting before reading their constituent counters. 478 */ 479 if (!target__none(&target)) { 480 evlist__for_each_entry(evsel_list, counter) 481 bpf_counter__disable(counter); 482 if (!all_counters_use_bpf) 483 evlist__disable(evsel_list); 484 } 485 } 486 487 static volatile sig_atomic_t workload_exec_errno; 488 489 /* 490 * evlist__prepare_workload will send a SIGUSR1 491 * if the fork fails, since we asked by setting its 492 * want_signal to true. 493 */ 494 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 495 void *ucontext __maybe_unused) 496 { 497 workload_exec_errno = info->si_value.sival_int; 498 } 499 500 static bool evsel__should_store_id(struct evsel *counter) 501 { 502 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 503 } 504 505 static bool is_target_alive(struct target *_target, 506 struct perf_thread_map *threads) 507 { 508 struct stat st; 509 int i; 510 511 if (!target__has_task(_target)) 512 return true; 513 514 for (i = 0; i < threads->nr; i++) { 515 char path[PATH_MAX]; 516 517 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 518 threads->map[i].pid); 519 520 if (!stat(path, &st)) 521 return true; 522 } 523 524 return false; 525 } 526 527 static void process_evlist(struct evlist *evlist, unsigned int interval) 528 { 529 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 530 531 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 532 switch (cmd) { 533 case EVLIST_CTL_CMD_ENABLE: 534 fallthrough; 535 case EVLIST_CTL_CMD_DISABLE: 536 if (interval) 537 process_interval(); 538 break; 539 case EVLIST_CTL_CMD_SNAPSHOT: 540 case EVLIST_CTL_CMD_ACK: 541 case EVLIST_CTL_CMD_UNSUPPORTED: 542 case EVLIST_CTL_CMD_EVLIST: 543 case EVLIST_CTL_CMD_STOP: 544 case EVLIST_CTL_CMD_PING: 545 default: 546 break; 547 } 548 } 549 } 550 551 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 552 int *time_to_sleep) 553 { 554 int tts = *time_to_sleep; 555 struct timespec time_diff; 556 557 diff_timespec(&time_diff, time_stop, time_start); 558 559 tts -= time_diff.tv_sec * MSEC_PER_SEC + 560 time_diff.tv_nsec / NSEC_PER_MSEC; 561 562 if (tts < 0) 563 tts = 0; 564 565 *time_to_sleep = tts; 566 } 567 568 static int dispatch_events(bool forks, int timeout, int interval, int *times) 569 { 570 int child_exited = 0, status = 0; 571 int time_to_sleep, sleep_time; 572 struct timespec time_start, time_stop; 573 574 if (interval) 575 sleep_time = interval; 576 else if (timeout) 577 sleep_time = timeout; 578 else 579 sleep_time = 1000; 580 581 time_to_sleep = sleep_time; 582 583 while (!done) { 584 if (forks) 585 child_exited = waitpid(child_pid, &status, WNOHANG); 586 else 587 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 588 589 if (child_exited) 590 break; 591 592 clock_gettime(CLOCK_MONOTONIC, &time_start); 593 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 594 if (timeout || handle_interval(interval, times)) 595 break; 596 time_to_sleep = sleep_time; 597 } else { /* fd revent */ 598 process_evlist(evsel_list, interval); 599 clock_gettime(CLOCK_MONOTONIC, &time_stop); 600 compute_tts(&time_start, &time_stop, &time_to_sleep); 601 } 602 } 603 604 return status; 605 } 606 607 enum counter_recovery { 608 COUNTER_SKIP, 609 COUNTER_RETRY, 610 COUNTER_FATAL, 611 }; 612 613 static enum counter_recovery stat_handle_error(struct evsel *counter) 614 { 615 char msg[BUFSIZ]; 616 /* 617 * PPC returns ENXIO for HW counters until 2.6.37 618 * (behavior changed with commit b0a873e). 619 */ 620 if (errno == EINVAL || errno == ENOSYS || 621 errno == ENOENT || errno == EOPNOTSUPP || 622 errno == ENXIO) { 623 if (verbose > 0) 624 ui__warning("%s event is not supported by the kernel.\n", 625 evsel__name(counter)); 626 counter->supported = false; 627 /* 628 * errored is a sticky flag that means one of the counter's 629 * cpu event had a problem and needs to be reexamined. 630 */ 631 counter->errored = true; 632 633 if ((evsel__leader(counter) != counter) || 634 !(counter->core.leader->nr_members > 1)) 635 return COUNTER_SKIP; 636 } else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) { 637 if (verbose > 0) 638 ui__warning("%s\n", msg); 639 return COUNTER_RETRY; 640 } else if (target__has_per_thread(&target) && 641 evsel_list->core.threads && 642 evsel_list->core.threads->err_thread != -1) { 643 /* 644 * For global --per-thread case, skip current 645 * error thread. 646 */ 647 if (!thread_map__remove(evsel_list->core.threads, 648 evsel_list->core.threads->err_thread)) { 649 evsel_list->core.threads->err_thread = -1; 650 return COUNTER_RETRY; 651 } 652 } else if (counter->skippable) { 653 if (verbose > 0) 654 ui__warning("skipping event %s that kernel failed to open .\n", 655 evsel__name(counter)); 656 counter->supported = false; 657 counter->errored = true; 658 return COUNTER_SKIP; 659 } 660 661 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 662 ui__error("%s\n", msg); 663 664 if (child_pid != -1) 665 kill(child_pid, SIGTERM); 666 return COUNTER_FATAL; 667 } 668 669 static int __run_perf_stat(int argc, const char **argv, int run_idx) 670 { 671 int interval = stat_config.interval; 672 int times = stat_config.times; 673 int timeout = stat_config.timeout; 674 char msg[BUFSIZ]; 675 unsigned long long t0, t1; 676 struct evsel *counter; 677 size_t l; 678 int status = 0; 679 const bool forks = (argc > 0); 680 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 681 struct evlist_cpu_iterator evlist_cpu_itr; 682 struct affinity saved_affinity, *affinity = NULL; 683 int err; 684 bool second_pass = false; 685 686 if (forks) { 687 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 688 perror("failed to prepare workload"); 689 return -1; 690 } 691 child_pid = evsel_list->workload.pid; 692 } 693 694 if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { 695 if (affinity__setup(&saved_affinity) < 0) 696 return -1; 697 affinity = &saved_affinity; 698 } 699 700 evlist__for_each_entry(evsel_list, counter) { 701 counter->reset_group = false; 702 if (bpf_counter__load(counter, &target)) 703 return -1; 704 if (!(evsel__is_bperf(counter))) 705 all_counters_use_bpf = false; 706 } 707 708 evlist__reset_aggr_stats(evsel_list); 709 710 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 711 counter = evlist_cpu_itr.evsel; 712 713 /* 714 * bperf calls evsel__open_per_cpu() in bperf__load(), so 715 * no need to call it again here. 716 */ 717 if (target.use_bpf) 718 break; 719 720 if (counter->reset_group || counter->errored) 721 continue; 722 if (evsel__is_bperf(counter)) 723 continue; 724 try_again: 725 if (create_perf_stat_counter(counter, &stat_config, &target, 726 evlist_cpu_itr.cpu_map_idx) < 0) { 727 728 /* 729 * Weak group failed. We cannot just undo this here 730 * because earlier CPUs might be in group mode, and the kernel 731 * doesn't support mixing group and non group reads. Defer 732 * it to later. 733 * Don't close here because we're in the wrong affinity. 734 */ 735 if ((errno == EINVAL || errno == EBADF) && 736 evsel__leader(counter) != counter && 737 counter->weak_group) { 738 evlist__reset_weak_group(evsel_list, counter, false); 739 assert(counter->reset_group); 740 second_pass = true; 741 continue; 742 } 743 744 switch (stat_handle_error(counter)) { 745 case COUNTER_FATAL: 746 return -1; 747 case COUNTER_RETRY: 748 goto try_again; 749 case COUNTER_SKIP: 750 continue; 751 default: 752 break; 753 } 754 755 } 756 counter->supported = true; 757 } 758 759 if (second_pass) { 760 /* 761 * Now redo all the weak group after closing them, 762 * and also close errored counters. 763 */ 764 765 /* First close errored or weak retry */ 766 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 767 counter = evlist_cpu_itr.evsel; 768 769 if (!counter->reset_group && !counter->errored) 770 continue; 771 772 perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); 773 } 774 /* Now reopen weak */ 775 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 776 counter = evlist_cpu_itr.evsel; 777 778 if (!counter->reset_group) 779 continue; 780 try_again_reset: 781 pr_debug2("reopening weak %s\n", evsel__name(counter)); 782 if (create_perf_stat_counter(counter, &stat_config, &target, 783 evlist_cpu_itr.cpu_map_idx) < 0) { 784 785 switch (stat_handle_error(counter)) { 786 case COUNTER_FATAL: 787 return -1; 788 case COUNTER_RETRY: 789 goto try_again_reset; 790 case COUNTER_SKIP: 791 continue; 792 default: 793 break; 794 } 795 } 796 counter->supported = true; 797 } 798 } 799 affinity__cleanup(affinity); 800 801 evlist__for_each_entry(evsel_list, counter) { 802 if (!counter->supported) { 803 perf_evsel__free_fd(&counter->core); 804 continue; 805 } 806 807 l = strlen(counter->unit); 808 if (l > stat_config.unit_width) 809 stat_config.unit_width = l; 810 811 if (evsel__should_store_id(counter) && 812 evsel__store_ids(counter, evsel_list)) 813 return -1; 814 } 815 816 if (evlist__apply_filters(evsel_list, &counter)) { 817 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 818 counter->filter, evsel__name(counter), errno, 819 str_error_r(errno, msg, sizeof(msg))); 820 return -1; 821 } 822 823 if (STAT_RECORD) { 824 int fd = perf_data__fd(&perf_stat.data); 825 826 if (is_pipe) { 827 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 828 } else { 829 err = perf_session__write_header(perf_stat.session, evsel_list, 830 fd, false); 831 } 832 833 if (err < 0) 834 return err; 835 836 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 837 process_synthesized_event, is_pipe); 838 if (err < 0) 839 return err; 840 } 841 842 if (target.initial_delay) { 843 pr_info(EVLIST_DISABLED_MSG); 844 } else { 845 err = enable_counters(); 846 if (err) 847 return -1; 848 } 849 850 /* Exec the command, if any */ 851 if (forks) 852 evlist__start_workload(evsel_list); 853 854 if (target.initial_delay > 0) { 855 usleep(target.initial_delay * USEC_PER_MSEC); 856 err = enable_counters(); 857 if (err) 858 return -1; 859 860 pr_info(EVLIST_ENABLED_MSG); 861 } 862 863 t0 = rdclock(); 864 clock_gettime(CLOCK_MONOTONIC, &ref_time); 865 866 if (forks) { 867 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 868 status = dispatch_events(forks, timeout, interval, ×); 869 if (child_pid != -1) { 870 if (timeout) 871 kill(child_pid, SIGTERM); 872 wait4(child_pid, &status, 0, &stat_config.ru_data); 873 } 874 875 if (workload_exec_errno) { 876 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 877 pr_err("Workload failed: %s\n", emsg); 878 return -1; 879 } 880 881 if (WIFSIGNALED(status)) 882 psignal(WTERMSIG(status), argv[0]); 883 } else { 884 status = dispatch_events(forks, timeout, interval, ×); 885 } 886 887 disable_counters(); 888 889 t1 = rdclock(); 890 891 if (stat_config.walltime_run_table) 892 stat_config.walltime_run[run_idx] = t1 - t0; 893 894 if (interval && stat_config.summary) { 895 stat_config.interval = 0; 896 stat_config.stop_read_counter = true; 897 init_stats(&walltime_nsecs_stats); 898 update_stats(&walltime_nsecs_stats, t1 - t0); 899 900 evlist__copy_prev_raw_counts(evsel_list); 901 evlist__reset_prev_raw_counts(evsel_list); 902 evlist__reset_aggr_stats(evsel_list); 903 } else { 904 update_stats(&walltime_nsecs_stats, t1 - t0); 905 update_rusage_stats(&ru_stats, &stat_config.ru_data); 906 } 907 908 /* 909 * Closing a group leader splits the group, and as we only disable 910 * group leaders, results in remaining events becoming enabled. To 911 * avoid arbitrary skew, we must read all counters before closing any 912 * group leaders. 913 */ 914 if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0) 915 process_counters(); 916 917 /* 918 * We need to keep evsel_list alive, because it's processed 919 * later the evsel_list will be closed after. 920 */ 921 if (!STAT_RECORD) 922 evlist__close(evsel_list); 923 924 return WEXITSTATUS(status); 925 } 926 927 static int run_perf_stat(int argc, const char **argv, int run_idx) 928 { 929 int ret; 930 931 if (pre_cmd) { 932 ret = system(pre_cmd); 933 if (ret) 934 return ret; 935 } 936 937 if (sync_run) 938 sync(); 939 940 ret = __run_perf_stat(argc, argv, run_idx); 941 if (ret) 942 return ret; 943 944 if (post_cmd) { 945 ret = system(post_cmd); 946 if (ret) 947 return ret; 948 } 949 950 return ret; 951 } 952 953 static void print_counters(struct timespec *ts, int argc, const char **argv) 954 { 955 /* Do not print anything if we record to the pipe. */ 956 if (STAT_RECORD && perf_stat.data.is_pipe) 957 return; 958 if (quiet) 959 return; 960 961 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 962 } 963 964 static volatile sig_atomic_t signr = -1; 965 966 static void skip_signal(int signo) 967 { 968 if ((child_pid == -1) || stat_config.interval) 969 done = 1; 970 971 signr = signo; 972 /* 973 * render child_pid harmless 974 * won't send SIGTERM to a random 975 * process in case of race condition 976 * and fast PID recycling 977 */ 978 child_pid = -1; 979 } 980 981 static void sig_atexit(void) 982 { 983 sigset_t set, oset; 984 985 /* 986 * avoid race condition with SIGCHLD handler 987 * in skip_signal() which is modifying child_pid 988 * goal is to avoid send SIGTERM to a random 989 * process 990 */ 991 sigemptyset(&set); 992 sigaddset(&set, SIGCHLD); 993 sigprocmask(SIG_BLOCK, &set, &oset); 994 995 if (child_pid != -1) 996 kill(child_pid, SIGTERM); 997 998 sigprocmask(SIG_SETMASK, &oset, NULL); 999 1000 if (signr == -1) 1001 return; 1002 1003 signal(signr, SIG_DFL); 1004 kill(getpid(), signr); 1005 } 1006 1007 void perf_stat__set_big_num(int set) 1008 { 1009 stat_config.big_num = (set != 0); 1010 } 1011 1012 void perf_stat__set_no_csv_summary(int set) 1013 { 1014 stat_config.no_csv_summary = (set != 0); 1015 } 1016 1017 static int stat__set_big_num(const struct option *opt __maybe_unused, 1018 const char *s __maybe_unused, int unset) 1019 { 1020 big_num_opt = unset ? 0 : 1; 1021 perf_stat__set_big_num(!unset); 1022 return 0; 1023 } 1024 1025 static int enable_metric_only(const struct option *opt __maybe_unused, 1026 const char *s __maybe_unused, int unset) 1027 { 1028 force_metric_only = true; 1029 stat_config.metric_only = !unset; 1030 return 0; 1031 } 1032 1033 static int append_metric_groups(const struct option *opt __maybe_unused, 1034 const char *str, 1035 int unset __maybe_unused) 1036 { 1037 if (metrics) { 1038 char *tmp; 1039 1040 if (asprintf(&tmp, "%s,%s", metrics, str) < 0) 1041 return -ENOMEM; 1042 free(metrics); 1043 metrics = tmp; 1044 } else { 1045 metrics = strdup(str); 1046 if (!metrics) 1047 return -ENOMEM; 1048 } 1049 return 0; 1050 } 1051 1052 static int parse_control_option(const struct option *opt, 1053 const char *str, 1054 int unset __maybe_unused) 1055 { 1056 struct perf_stat_config *config = opt->value; 1057 1058 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1059 } 1060 1061 static int parse_stat_cgroups(const struct option *opt, 1062 const char *str, int unset) 1063 { 1064 if (stat_config.cgroup_list) { 1065 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1066 return -1; 1067 } 1068 1069 return parse_cgroups(opt, str, unset); 1070 } 1071 1072 static int parse_cputype(const struct option *opt, 1073 const char *str, 1074 int unset __maybe_unused) 1075 { 1076 const struct perf_pmu *pmu; 1077 struct evlist *evlist = *(struct evlist **)opt->value; 1078 1079 if (!list_empty(&evlist->core.entries)) { 1080 fprintf(stderr, "Must define cputype before events/metrics\n"); 1081 return -1; 1082 } 1083 1084 pmu = perf_pmus__pmu_for_pmu_filter(str); 1085 if (!pmu) { 1086 fprintf(stderr, "--cputype %s is not supported!\n", str); 1087 return -1; 1088 } 1089 parse_events_option_args.pmu_filter = pmu->name; 1090 1091 return 0; 1092 } 1093 1094 static int parse_cache_level(const struct option *opt, 1095 const char *str, 1096 int unset __maybe_unused) 1097 { 1098 int level; 1099 u32 *aggr_mode = (u32 *)opt->value; 1100 u32 *aggr_level = (u32 *)opt->data; 1101 1102 /* 1103 * If no string is specified, aggregate based on the topology of 1104 * Last Level Cache (LLC). Since the LLC level can change from 1105 * architecture to architecture, set level greater than 1106 * MAX_CACHE_LVL which will be interpreted as LLC. 1107 */ 1108 if (str == NULL) { 1109 level = MAX_CACHE_LVL + 1; 1110 goto out; 1111 } 1112 1113 /* 1114 * The format to specify cache level is LX or lX where X is the 1115 * cache level. 1116 */ 1117 if (strlen(str) != 2 || (str[0] != 'l' && str[0] != 'L')) { 1118 pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n", 1119 MAX_CACHE_LVL, 1120 MAX_CACHE_LVL); 1121 return -EINVAL; 1122 } 1123 1124 level = atoi(&str[1]); 1125 if (level < 1) { 1126 pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n", 1127 MAX_CACHE_LVL, 1128 MAX_CACHE_LVL); 1129 return -EINVAL; 1130 } 1131 1132 if (level > MAX_CACHE_LVL) { 1133 pr_err("perf only supports max cache level of %d.\n" 1134 "Consider increasing MAX_CACHE_LVL\n", MAX_CACHE_LVL); 1135 return -EINVAL; 1136 } 1137 out: 1138 *aggr_mode = AGGR_CACHE; 1139 *aggr_level = level; 1140 return 0; 1141 } 1142 1143 static struct option stat_options[] = { 1144 OPT_BOOLEAN('T', "transaction", &transaction_run, 1145 "hardware transaction statistics"), 1146 OPT_CALLBACK('e', "event", &parse_events_option_args, "event", 1147 "event selector. use 'perf list' to list available events", 1148 parse_events_option), 1149 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1150 "event filter", parse_filter), 1151 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1152 "child tasks do not inherit counters"), 1153 OPT_STRING('p', "pid", &target.pid, "pid", 1154 "stat events on existing process id"), 1155 OPT_STRING('t', "tid", &target.tid, "tid", 1156 "stat events on existing thread id"), 1157 #ifdef HAVE_BPF_SKEL 1158 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 1159 "stat events on existing bpf program id"), 1160 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, 1161 "use bpf program to count events"), 1162 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", 1163 "path to perf_event_attr map"), 1164 #endif 1165 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1166 "system-wide collection from all CPUs"), 1167 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1168 "Use --no-scale to disable counter scaling for multiplexing"), 1169 OPT_INCR('v', "verbose", &verbose, 1170 "be more verbose (show counter open errors, etc)"), 1171 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1172 "repeat command and print average + stddev (max: 100, forever: 0)"), 1173 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1174 "display details about each run (only with -r option)"), 1175 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1176 "null run - dont start any counters"), 1177 OPT_INCR('d', "detailed", &detailed_run, 1178 "detailed run - start a lot of events"), 1179 OPT_BOOLEAN('S', "sync", &sync_run, 1180 "call sync() before starting a run"), 1181 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1182 "print large numbers with thousands\' separators", 1183 stat__set_big_num), 1184 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1185 "list of cpus to monitor in system-wide"), 1186 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1187 "disable aggregation across CPUs or PMUs", AGGR_NONE), 1188 OPT_SET_UINT(0, "no-merge", &stat_config.aggr_mode, 1189 "disable aggregation the same as -A or -no-aggr", AGGR_NONE), 1190 OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge, 1191 "Merge identical named hybrid events"), 1192 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1193 "print counts with custom separator"), 1194 OPT_BOOLEAN('j', "json-output", &stat_config.json_output, 1195 "print counts in JSON format"), 1196 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1197 "monitor event in cgroup name only", parse_stat_cgroups), 1198 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 1199 "expand events for each cgroup"), 1200 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1201 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1202 OPT_INTEGER(0, "log-fd", &output_fd, 1203 "log output to fd, instead of stderr"), 1204 OPT_STRING(0, "pre", &pre_cmd, "command", 1205 "command to run prior to the measured command"), 1206 OPT_STRING(0, "post", &post_cmd, "command", 1207 "command to run after to the measured command"), 1208 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1209 "print counts at regular interval in ms " 1210 "(overhead is possible for values <= 100ms)"), 1211 OPT_INTEGER(0, "interval-count", &stat_config.times, 1212 "print counts for fixed number of times"), 1213 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1214 "clear screen in between new interval"), 1215 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1216 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1217 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1218 "aggregate counts per processor socket", AGGR_SOCKET), 1219 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1220 "aggregate counts per processor die", AGGR_DIE), 1221 OPT_SET_UINT(0, "per-cluster", &stat_config.aggr_mode, 1222 "aggregate counts per processor cluster", AGGR_CLUSTER), 1223 OPT_CALLBACK_OPTARG(0, "per-cache", &stat_config.aggr_mode, &stat_config.aggr_level, 1224 "cache level", "aggregate count at this cache level (Default: LLC)", 1225 parse_cache_level), 1226 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1227 "aggregate counts per physical processor core", AGGR_CORE), 1228 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1229 "aggregate counts per thread", AGGR_THREAD), 1230 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1231 "aggregate counts per numa node", AGGR_NODE), 1232 OPT_INTEGER('D', "delay", &target.initial_delay, 1233 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1234 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1235 "Only print computed metrics. No raw values", enable_metric_only), 1236 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1237 "don't group metric events, impacts multiplexing"), 1238 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1239 "don't try to share events between metrics in a group"), 1240 OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold, 1241 "disable adding events for the metric threshold calculation"), 1242 OPT_BOOLEAN(0, "topdown", &topdown_run, 1243 "measure top-down statistics"), 1244 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 1245 "Set the metrics level for the top-down statistics (0: max level)"), 1246 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1247 "measure SMI cost"), 1248 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1249 "monitor specified metrics or metric groups (separated by ,)", 1250 append_metric_groups), 1251 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1252 "Configure all used events to run in kernel space.", 1253 PARSE_OPT_EXCLUSIVE), 1254 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1255 "Configure all used events to run in user space.", 1256 PARSE_OPT_EXCLUSIVE), 1257 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1258 "Use with 'percore' event qualifier to show the event " 1259 "counts of one hardware thread by sum up total hardware " 1260 "threads of same physical core"), 1261 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1262 "print summary for interval mode"), 1263 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, 1264 "don't print 'summary' for CSV summary output"), 1265 OPT_BOOLEAN(0, "quiet", &quiet, 1266 "don't print any output, messages or warnings (useful with record)"), 1267 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", 1268 "Only enable events on applying cpu with this type " 1269 "for hybrid platform (e.g. core or atom)", 1270 parse_cputype), 1271 #ifdef HAVE_LIBPFM 1272 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1273 "libpfm4 event selector. use 'perf list' to list available events", 1274 parse_libpfm_events_option), 1275 #endif 1276 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1277 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1278 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1279 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1280 parse_control_option), 1281 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", 1282 "measure I/O performance metrics provided by arch/platform", 1283 iostat_parse), 1284 OPT_END() 1285 }; 1286 1287 /** 1288 * Calculate the cache instance ID from the map in 1289 * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list 1290 * Cache instance ID is the first CPU reported in the shared_cpu_list file. 1291 */ 1292 static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map) 1293 { 1294 int id; 1295 struct perf_cpu_map *cpu_map = perf_cpu_map__new(map); 1296 1297 /* 1298 * If the map contains no CPU, consider the current CPU to 1299 * be the first online CPU in the cache domain else use the 1300 * first online CPU of the cache domain as the ID. 1301 */ 1302 id = perf_cpu_map__min(cpu_map).cpu; 1303 if (id == -1) 1304 id = cpu.cpu; 1305 1306 /* Free the perf_cpu_map used to find the cache ID */ 1307 perf_cpu_map__put(cpu_map); 1308 1309 return id; 1310 } 1311 1312 /** 1313 * cpu__get_cache_id - Returns 0 if successful in populating the 1314 * cache level and cache id. Cache level is read from 1315 * /sys/devices/system/cpu/cpuX/cache/indexY/level where as cache instance ID 1316 * is the first CPU reported by 1317 * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list 1318 */ 1319 static int cpu__get_cache_details(struct perf_cpu cpu, struct perf_cache *cache) 1320 { 1321 int ret = 0; 1322 u32 cache_level = stat_config.aggr_level; 1323 struct cpu_cache_level caches[MAX_CACHE_LVL]; 1324 u32 i = 0, caches_cnt = 0; 1325 1326 cache->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; 1327 cache->cache = -1; 1328 1329 ret = build_caches_for_cpu(cpu.cpu, caches, &caches_cnt); 1330 if (ret) { 1331 /* 1332 * If caches_cnt is not 0, cpu_cache_level data 1333 * was allocated when building the topology. 1334 * Free the allocated data before returning. 1335 */ 1336 if (caches_cnt) 1337 goto free_caches; 1338 1339 return ret; 1340 } 1341 1342 if (!caches_cnt) 1343 return -1; 1344 1345 /* 1346 * Save the data for the highest level if no 1347 * level was specified by the user. 1348 */ 1349 if (cache_level > MAX_CACHE_LVL) { 1350 int max_level_index = 0; 1351 1352 for (i = 1; i < caches_cnt; ++i) { 1353 if (caches[i].level > caches[max_level_index].level) 1354 max_level_index = i; 1355 } 1356 1357 cache->cache_lvl = caches[max_level_index].level; 1358 cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map); 1359 1360 /* Reset i to 0 to free entire caches[] */ 1361 i = 0; 1362 goto free_caches; 1363 } 1364 1365 for (i = 0; i < caches_cnt; ++i) { 1366 if (caches[i].level == cache_level) { 1367 cache->cache_lvl = cache_level; 1368 cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map); 1369 } 1370 1371 cpu_cache_level__free(&caches[i]); 1372 } 1373 1374 free_caches: 1375 /* 1376 * Free all the allocated cpu_cache_level data. 1377 */ 1378 while (i < caches_cnt) 1379 cpu_cache_level__free(&caches[i++]); 1380 1381 return ret; 1382 } 1383 1384 /** 1385 * aggr_cpu_id__cache - Create an aggr_cpu_id with cache instache ID, cache 1386 * level, die and socket populated with the cache instache ID, cache level, 1387 * die and socket for cpu. The function signature is compatible with 1388 * aggr_cpu_id_get_t. 1389 */ 1390 static struct aggr_cpu_id aggr_cpu_id__cache(struct perf_cpu cpu, void *data) 1391 { 1392 int ret; 1393 struct aggr_cpu_id id; 1394 struct perf_cache cache; 1395 1396 id = aggr_cpu_id__die(cpu, data); 1397 if (aggr_cpu_id__is_empty(&id)) 1398 return id; 1399 1400 ret = cpu__get_cache_details(cpu, &cache); 1401 if (ret) 1402 return id; 1403 1404 id.cache_lvl = cache.cache_lvl; 1405 id.cache = cache.cache; 1406 return id; 1407 } 1408 1409 static const char *const aggr_mode__string[] = { 1410 [AGGR_CORE] = "core", 1411 [AGGR_CACHE] = "cache", 1412 [AGGR_CLUSTER] = "cluster", 1413 [AGGR_DIE] = "die", 1414 [AGGR_GLOBAL] = "global", 1415 [AGGR_NODE] = "node", 1416 [AGGR_NONE] = "none", 1417 [AGGR_SOCKET] = "socket", 1418 [AGGR_THREAD] = "thread", 1419 [AGGR_UNSET] = "unset", 1420 }; 1421 1422 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1423 struct perf_cpu cpu) 1424 { 1425 return aggr_cpu_id__socket(cpu, /*data=*/NULL); 1426 } 1427 1428 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1429 struct perf_cpu cpu) 1430 { 1431 return aggr_cpu_id__die(cpu, /*data=*/NULL); 1432 } 1433 1434 static struct aggr_cpu_id perf_stat__get_cache_id(struct perf_stat_config *config __maybe_unused, 1435 struct perf_cpu cpu) 1436 { 1437 return aggr_cpu_id__cache(cpu, /*data=*/NULL); 1438 } 1439 1440 static struct aggr_cpu_id perf_stat__get_cluster(struct perf_stat_config *config __maybe_unused, 1441 struct perf_cpu cpu) 1442 { 1443 return aggr_cpu_id__cluster(cpu, /*data=*/NULL); 1444 } 1445 1446 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1447 struct perf_cpu cpu) 1448 { 1449 return aggr_cpu_id__core(cpu, /*data=*/NULL); 1450 } 1451 1452 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1453 struct perf_cpu cpu) 1454 { 1455 return aggr_cpu_id__node(cpu, /*data=*/NULL); 1456 } 1457 1458 static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused, 1459 struct perf_cpu cpu) 1460 { 1461 return aggr_cpu_id__global(cpu, /*data=*/NULL); 1462 } 1463 1464 static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused, 1465 struct perf_cpu cpu) 1466 { 1467 return aggr_cpu_id__cpu(cpu, /*data=*/NULL); 1468 } 1469 1470 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1471 aggr_get_id_t get_id, struct perf_cpu cpu) 1472 { 1473 struct aggr_cpu_id id; 1474 1475 /* per-process mode - should use global aggr mode */ 1476 if (cpu.cpu == -1) 1477 return get_id(config, cpu); 1478 1479 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) 1480 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); 1481 1482 id = config->cpus_aggr_map->map[cpu.cpu]; 1483 return id; 1484 } 1485 1486 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1487 struct perf_cpu cpu) 1488 { 1489 return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); 1490 } 1491 1492 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1493 struct perf_cpu cpu) 1494 { 1495 return perf_stat__get_aggr(config, perf_stat__get_die, cpu); 1496 } 1497 1498 static struct aggr_cpu_id perf_stat__get_cluster_cached(struct perf_stat_config *config, 1499 struct perf_cpu cpu) 1500 { 1501 return perf_stat__get_aggr(config, perf_stat__get_cluster, cpu); 1502 } 1503 1504 static struct aggr_cpu_id perf_stat__get_cache_id_cached(struct perf_stat_config *config, 1505 struct perf_cpu cpu) 1506 { 1507 return perf_stat__get_aggr(config, perf_stat__get_cache_id, cpu); 1508 } 1509 1510 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1511 struct perf_cpu cpu) 1512 { 1513 return perf_stat__get_aggr(config, perf_stat__get_core, cpu); 1514 } 1515 1516 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1517 struct perf_cpu cpu) 1518 { 1519 return perf_stat__get_aggr(config, perf_stat__get_node, cpu); 1520 } 1521 1522 static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config, 1523 struct perf_cpu cpu) 1524 { 1525 return perf_stat__get_aggr(config, perf_stat__get_global, cpu); 1526 } 1527 1528 static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config, 1529 struct perf_cpu cpu) 1530 { 1531 return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu); 1532 } 1533 1534 static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) 1535 { 1536 switch (aggr_mode) { 1537 case AGGR_SOCKET: 1538 return aggr_cpu_id__socket; 1539 case AGGR_DIE: 1540 return aggr_cpu_id__die; 1541 case AGGR_CLUSTER: 1542 return aggr_cpu_id__cluster; 1543 case AGGR_CACHE: 1544 return aggr_cpu_id__cache; 1545 case AGGR_CORE: 1546 return aggr_cpu_id__core; 1547 case AGGR_NODE: 1548 return aggr_cpu_id__node; 1549 case AGGR_NONE: 1550 return aggr_cpu_id__cpu; 1551 case AGGR_GLOBAL: 1552 return aggr_cpu_id__global; 1553 case AGGR_THREAD: 1554 case AGGR_UNSET: 1555 case AGGR_MAX: 1556 default: 1557 return NULL; 1558 } 1559 } 1560 1561 static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) 1562 { 1563 switch (aggr_mode) { 1564 case AGGR_SOCKET: 1565 return perf_stat__get_socket_cached; 1566 case AGGR_DIE: 1567 return perf_stat__get_die_cached; 1568 case AGGR_CLUSTER: 1569 return perf_stat__get_cluster_cached; 1570 case AGGR_CACHE: 1571 return perf_stat__get_cache_id_cached; 1572 case AGGR_CORE: 1573 return perf_stat__get_core_cached; 1574 case AGGR_NODE: 1575 return perf_stat__get_node_cached; 1576 case AGGR_NONE: 1577 return perf_stat__get_cpu_cached; 1578 case AGGR_GLOBAL: 1579 return perf_stat__get_global_cached; 1580 case AGGR_THREAD: 1581 case AGGR_UNSET: 1582 case AGGR_MAX: 1583 default: 1584 return NULL; 1585 } 1586 } 1587 1588 static int perf_stat_init_aggr_mode(void) 1589 { 1590 int nr; 1591 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); 1592 1593 if (get_id) { 1594 bool needs_sort = stat_config.aggr_mode != AGGR_NONE; 1595 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1596 get_id, /*data=*/NULL, needs_sort); 1597 if (!stat_config.aggr_map) { 1598 pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]); 1599 return -1; 1600 } 1601 stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); 1602 } 1603 1604 if (stat_config.aggr_mode == AGGR_THREAD) { 1605 nr = perf_thread_map__nr(evsel_list->core.threads); 1606 stat_config.aggr_map = cpu_aggr_map__empty_new(nr); 1607 if (stat_config.aggr_map == NULL) 1608 return -ENOMEM; 1609 1610 for (int s = 0; s < nr; s++) { 1611 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1612 1613 id.thread_idx = s; 1614 stat_config.aggr_map->map[s] = id; 1615 } 1616 return 0; 1617 } 1618 1619 /* 1620 * The evsel_list->cpus is the base we operate on, 1621 * taking the highest cpu number to be the size of 1622 * the aggregation translate cpumap. 1623 */ 1624 if (!perf_cpu_map__is_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus)) 1625 nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; 1626 else 1627 nr = 0; 1628 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); 1629 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1630 } 1631 1632 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1633 { 1634 if (map) { 1635 WARN_ONCE(refcount_read(&map->refcnt) != 0, 1636 "cpu_aggr_map refcnt unbalanced\n"); 1637 free(map); 1638 } 1639 } 1640 1641 static void cpu_aggr_map__put(struct cpu_aggr_map *map) 1642 { 1643 if (map && refcount_dec_and_test(&map->refcnt)) 1644 cpu_aggr_map__delete(map); 1645 } 1646 1647 static void perf_stat__exit_aggr_mode(void) 1648 { 1649 cpu_aggr_map__put(stat_config.aggr_map); 1650 cpu_aggr_map__put(stat_config.cpus_aggr_map); 1651 stat_config.aggr_map = NULL; 1652 stat_config.cpus_aggr_map = NULL; 1653 } 1654 1655 static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) 1656 { 1657 struct perf_env *env = data; 1658 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1659 1660 if (cpu.cpu != -1) 1661 id.socket = env->cpu[cpu.cpu].socket_id; 1662 1663 return id; 1664 } 1665 1666 static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) 1667 { 1668 struct perf_env *env = data; 1669 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1670 1671 if (cpu.cpu != -1) { 1672 /* 1673 * die_id is relative to socket, so start 1674 * with the socket ID and then add die to 1675 * make a unique ID. 1676 */ 1677 id.socket = env->cpu[cpu.cpu].socket_id; 1678 id.die = env->cpu[cpu.cpu].die_id; 1679 } 1680 1681 return id; 1682 } 1683 1684 static void perf_env__get_cache_id_for_cpu(struct perf_cpu cpu, struct perf_env *env, 1685 u32 cache_level, struct aggr_cpu_id *id) 1686 { 1687 int i; 1688 int caches_cnt = env->caches_cnt; 1689 struct cpu_cache_level *caches = env->caches; 1690 1691 id->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; 1692 id->cache = -1; 1693 1694 if (!caches_cnt) 1695 return; 1696 1697 for (i = caches_cnt - 1; i > -1; --i) { 1698 struct perf_cpu_map *cpu_map; 1699 int map_contains_cpu; 1700 1701 /* 1702 * If user has not specified a level, find the fist level with 1703 * the cpu in the map. Since building the map is expensive, do 1704 * this only if levels match. 1705 */ 1706 if (cache_level <= MAX_CACHE_LVL && caches[i].level != cache_level) 1707 continue; 1708 1709 cpu_map = perf_cpu_map__new(caches[i].map); 1710 map_contains_cpu = perf_cpu_map__idx(cpu_map, cpu); 1711 perf_cpu_map__put(cpu_map); 1712 1713 if (map_contains_cpu != -1) { 1714 id->cache_lvl = caches[i].level; 1715 id->cache = cpu__get_cache_id_from_map(cpu, caches[i].map); 1716 return; 1717 } 1718 } 1719 } 1720 1721 static struct aggr_cpu_id perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu, 1722 void *data) 1723 { 1724 struct perf_env *env = data; 1725 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1726 1727 if (cpu.cpu != -1) { 1728 u32 cache_level = (perf_stat.aggr_level) ?: stat_config.aggr_level; 1729 1730 id.socket = env->cpu[cpu.cpu].socket_id; 1731 id.die = env->cpu[cpu.cpu].die_id; 1732 perf_env__get_cache_id_for_cpu(cpu, env, cache_level, &id); 1733 } 1734 1735 return id; 1736 } 1737 1738 static struct aggr_cpu_id perf_env__get_cluster_aggr_by_cpu(struct perf_cpu cpu, 1739 void *data) 1740 { 1741 struct perf_env *env = data; 1742 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1743 1744 if (cpu.cpu != -1) { 1745 id.socket = env->cpu[cpu.cpu].socket_id; 1746 id.die = env->cpu[cpu.cpu].die_id; 1747 id.cluster = env->cpu[cpu.cpu].cluster_id; 1748 } 1749 1750 return id; 1751 } 1752 1753 static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) 1754 { 1755 struct perf_env *env = data; 1756 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1757 1758 if (cpu.cpu != -1) { 1759 /* 1760 * core_id is relative to socket, die and cluster, we need a 1761 * global id. So we set socket, die id, cluster id and core id. 1762 */ 1763 id.socket = env->cpu[cpu.cpu].socket_id; 1764 id.die = env->cpu[cpu.cpu].die_id; 1765 id.cluster = env->cpu[cpu.cpu].cluster_id; 1766 id.core = env->cpu[cpu.cpu].core_id; 1767 } 1768 1769 return id; 1770 } 1771 1772 static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data) 1773 { 1774 struct perf_env *env = data; 1775 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1776 1777 if (cpu.cpu != -1) { 1778 /* 1779 * core_id is relative to socket and die, 1780 * we need a global id. So we set 1781 * socket, die id and core id 1782 */ 1783 id.socket = env->cpu[cpu.cpu].socket_id; 1784 id.die = env->cpu[cpu.cpu].die_id; 1785 id.core = env->cpu[cpu.cpu].core_id; 1786 id.cpu = cpu; 1787 } 1788 1789 return id; 1790 } 1791 1792 static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) 1793 { 1794 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1795 1796 id.node = perf_env__numa_node(data, cpu); 1797 return id; 1798 } 1799 1800 static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused, 1801 void *data __maybe_unused) 1802 { 1803 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1804 1805 /* it always aggregates to the cpu 0 */ 1806 id.cpu = (struct perf_cpu){ .cpu = 0 }; 1807 return id; 1808 } 1809 1810 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1811 struct perf_cpu cpu) 1812 { 1813 return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1814 } 1815 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1816 struct perf_cpu cpu) 1817 { 1818 return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1819 } 1820 1821 static struct aggr_cpu_id perf_stat__get_cluster_file(struct perf_stat_config *config __maybe_unused, 1822 struct perf_cpu cpu) 1823 { 1824 return perf_env__get_cluster_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1825 } 1826 1827 static struct aggr_cpu_id perf_stat__get_cache_file(struct perf_stat_config *config __maybe_unused, 1828 struct perf_cpu cpu) 1829 { 1830 return perf_env__get_cache_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1831 } 1832 1833 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1834 struct perf_cpu cpu) 1835 { 1836 return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1837 } 1838 1839 static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused, 1840 struct perf_cpu cpu) 1841 { 1842 return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1843 } 1844 1845 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1846 struct perf_cpu cpu) 1847 { 1848 return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1849 } 1850 1851 static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused, 1852 struct perf_cpu cpu) 1853 { 1854 return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1855 } 1856 1857 static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) 1858 { 1859 switch (aggr_mode) { 1860 case AGGR_SOCKET: 1861 return perf_env__get_socket_aggr_by_cpu; 1862 case AGGR_DIE: 1863 return perf_env__get_die_aggr_by_cpu; 1864 case AGGR_CLUSTER: 1865 return perf_env__get_cluster_aggr_by_cpu; 1866 case AGGR_CACHE: 1867 return perf_env__get_cache_aggr_by_cpu; 1868 case AGGR_CORE: 1869 return perf_env__get_core_aggr_by_cpu; 1870 case AGGR_NODE: 1871 return perf_env__get_node_aggr_by_cpu; 1872 case AGGR_GLOBAL: 1873 return perf_env__get_global_aggr_by_cpu; 1874 case AGGR_NONE: 1875 return perf_env__get_cpu_aggr_by_cpu; 1876 case AGGR_THREAD: 1877 case AGGR_UNSET: 1878 case AGGR_MAX: 1879 default: 1880 return NULL; 1881 } 1882 } 1883 1884 static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) 1885 { 1886 switch (aggr_mode) { 1887 case AGGR_SOCKET: 1888 return perf_stat__get_socket_file; 1889 case AGGR_DIE: 1890 return perf_stat__get_die_file; 1891 case AGGR_CLUSTER: 1892 return perf_stat__get_cluster_file; 1893 case AGGR_CACHE: 1894 return perf_stat__get_cache_file; 1895 case AGGR_CORE: 1896 return perf_stat__get_core_file; 1897 case AGGR_NODE: 1898 return perf_stat__get_node_file; 1899 case AGGR_GLOBAL: 1900 return perf_stat__get_global_file; 1901 case AGGR_NONE: 1902 return perf_stat__get_cpu_file; 1903 case AGGR_THREAD: 1904 case AGGR_UNSET: 1905 case AGGR_MAX: 1906 default: 1907 return NULL; 1908 } 1909 } 1910 1911 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1912 { 1913 struct perf_env *env = &st->session->header.env; 1914 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); 1915 bool needs_sort = stat_config.aggr_mode != AGGR_NONE; 1916 1917 if (stat_config.aggr_mode == AGGR_THREAD) { 1918 int nr = perf_thread_map__nr(evsel_list->core.threads); 1919 1920 stat_config.aggr_map = cpu_aggr_map__empty_new(nr); 1921 if (stat_config.aggr_map == NULL) 1922 return -ENOMEM; 1923 1924 for (int s = 0; s < nr; s++) { 1925 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1926 1927 id.thread_idx = s; 1928 stat_config.aggr_map->map[s] = id; 1929 } 1930 return 0; 1931 } 1932 1933 if (!get_id) 1934 return 0; 1935 1936 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1937 get_id, env, needs_sort); 1938 if (!stat_config.aggr_map) { 1939 pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]); 1940 return -1; 1941 } 1942 stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); 1943 return 0; 1944 } 1945 1946 /* 1947 * Add default attributes, if there were no attributes specified or 1948 * if -d/--detailed, -d -d or -d -d -d is used: 1949 */ 1950 static int add_default_attributes(void) 1951 { 1952 struct perf_event_attr default_attrs0[] = { 1953 1954 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1955 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1956 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1957 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1958 1959 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1960 }; 1961 struct perf_event_attr frontend_attrs[] = { 1962 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1963 }; 1964 struct perf_event_attr backend_attrs[] = { 1965 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1966 }; 1967 struct perf_event_attr default_attrs1[] = { 1968 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1969 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1970 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1971 1972 }; 1973 1974 /* 1975 * Detailed stats (-d), covering the L1 and last level data caches: 1976 */ 1977 struct perf_event_attr detailed_attrs[] = { 1978 1979 { .type = PERF_TYPE_HW_CACHE, 1980 .config = 1981 PERF_COUNT_HW_CACHE_L1D << 0 | 1982 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1983 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1984 1985 { .type = PERF_TYPE_HW_CACHE, 1986 .config = 1987 PERF_COUNT_HW_CACHE_L1D << 0 | 1988 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1989 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1990 1991 { .type = PERF_TYPE_HW_CACHE, 1992 .config = 1993 PERF_COUNT_HW_CACHE_LL << 0 | 1994 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1995 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1996 1997 { .type = PERF_TYPE_HW_CACHE, 1998 .config = 1999 PERF_COUNT_HW_CACHE_LL << 0 | 2000 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 2001 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 2002 }; 2003 2004 /* 2005 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 2006 */ 2007 struct perf_event_attr very_detailed_attrs[] = { 2008 2009 { .type = PERF_TYPE_HW_CACHE, 2010 .config = 2011 PERF_COUNT_HW_CACHE_L1I << 0 | 2012 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 2013 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 2014 2015 { .type = PERF_TYPE_HW_CACHE, 2016 .config = 2017 PERF_COUNT_HW_CACHE_L1I << 0 | 2018 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 2019 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 2020 2021 { .type = PERF_TYPE_HW_CACHE, 2022 .config = 2023 PERF_COUNT_HW_CACHE_DTLB << 0 | 2024 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 2025 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 2026 2027 { .type = PERF_TYPE_HW_CACHE, 2028 .config = 2029 PERF_COUNT_HW_CACHE_DTLB << 0 | 2030 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 2031 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 2032 2033 { .type = PERF_TYPE_HW_CACHE, 2034 .config = 2035 PERF_COUNT_HW_CACHE_ITLB << 0 | 2036 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 2037 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 2038 2039 { .type = PERF_TYPE_HW_CACHE, 2040 .config = 2041 PERF_COUNT_HW_CACHE_ITLB << 0 | 2042 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 2043 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 2044 2045 }; 2046 2047 /* 2048 * Very, very detailed stats (-d -d -d), adding prefetch events: 2049 */ 2050 struct perf_event_attr very_very_detailed_attrs[] = { 2051 2052 { .type = PERF_TYPE_HW_CACHE, 2053 .config = 2054 PERF_COUNT_HW_CACHE_L1D << 0 | 2055 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 2056 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 2057 2058 { .type = PERF_TYPE_HW_CACHE, 2059 .config = 2060 PERF_COUNT_HW_CACHE_L1D << 0 | 2061 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 2062 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 2063 }; 2064 2065 struct perf_event_attr default_null_attrs[] = {}; 2066 const char *pmu = parse_events_option_args.pmu_filter ?: "all"; 2067 2068 /* Set attrs if no event is selected and !null_run: */ 2069 if (stat_config.null_run) 2070 return 0; 2071 2072 if (transaction_run) { 2073 /* Handle -T as -M transaction. Once platform specific metrics 2074 * support has been added to the json files, all architectures 2075 * will use this approach. To determine transaction support 2076 * on an architecture test for such a metric name. 2077 */ 2078 if (!metricgroup__has_metric(pmu, "transaction")) { 2079 pr_err("Missing transaction metrics\n"); 2080 return -1; 2081 } 2082 return metricgroup__parse_groups(evsel_list, pmu, "transaction", 2083 stat_config.metric_no_group, 2084 stat_config.metric_no_merge, 2085 stat_config.metric_no_threshold, 2086 stat_config.user_requested_cpu_list, 2087 stat_config.system_wide, 2088 stat_config.hardware_aware_grouping, 2089 &stat_config.metric_events); 2090 } 2091 2092 if (smi_cost) { 2093 int smi; 2094 2095 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 2096 pr_err("freeze_on_smi is not supported.\n"); 2097 return -1; 2098 } 2099 2100 if (!smi) { 2101 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 2102 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 2103 return -1; 2104 } 2105 smi_reset = true; 2106 } 2107 2108 if (!metricgroup__has_metric(pmu, "smi")) { 2109 pr_err("Missing smi metrics\n"); 2110 return -1; 2111 } 2112 2113 if (!force_metric_only) 2114 stat_config.metric_only = true; 2115 2116 return metricgroup__parse_groups(evsel_list, pmu, "smi", 2117 stat_config.metric_no_group, 2118 stat_config.metric_no_merge, 2119 stat_config.metric_no_threshold, 2120 stat_config.user_requested_cpu_list, 2121 stat_config.system_wide, 2122 stat_config.hardware_aware_grouping, 2123 &stat_config.metric_events); 2124 } 2125 2126 if (topdown_run) { 2127 unsigned int max_level = metricgroups__topdown_max_level(); 2128 char str[] = "TopdownL1"; 2129 2130 if (!force_metric_only) 2131 stat_config.metric_only = true; 2132 2133 if (!max_level) { 2134 pr_err("Topdown requested but the topdown metric groups aren't present.\n" 2135 "(See perf list the metric groups have names like TopdownL1)\n"); 2136 return -1; 2137 } 2138 if (stat_config.topdown_level > max_level) { 2139 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 2140 return -1; 2141 } else if (!stat_config.topdown_level) 2142 stat_config.topdown_level = 1; 2143 2144 if (!stat_config.interval && !stat_config.metric_only) { 2145 fprintf(stat_config.output, 2146 "Topdown accuracy may decrease when measuring long periods.\n" 2147 "Please print the result regularly, e.g. -I1000\n"); 2148 } 2149 str[8] = stat_config.topdown_level + '0'; 2150 if (metricgroup__parse_groups(evsel_list, 2151 pmu, str, 2152 /*metric_no_group=*/false, 2153 /*metric_no_merge=*/false, 2154 /*metric_no_threshold=*/true, 2155 stat_config.user_requested_cpu_list, 2156 stat_config.system_wide, 2157 stat_config.hardware_aware_grouping, 2158 &stat_config.metric_events) < 0) 2159 return -1; 2160 } 2161 2162 if (!stat_config.topdown_level) 2163 stat_config.topdown_level = 1; 2164 2165 if (!evsel_list->core.nr_entries) { 2166 /* No events so add defaults. */ 2167 if (target__has_cpu(&target)) 2168 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 2169 2170 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 2171 return -1; 2172 if (perf_pmus__have_event("cpu", "stalled-cycles-frontend")) { 2173 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 2174 return -1; 2175 } 2176 if (perf_pmus__have_event("cpu", "stalled-cycles-backend")) { 2177 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 2178 return -1; 2179 } 2180 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 2181 return -1; 2182 /* 2183 * Add TopdownL1 metrics if they exist. To minimize 2184 * multiplexing, don't request threshold computation. 2185 */ 2186 if (metricgroup__has_metric(pmu, "Default")) { 2187 struct evlist *metric_evlist = evlist__new(); 2188 struct evsel *metric_evsel; 2189 2190 if (!metric_evlist) 2191 return -1; 2192 2193 if (metricgroup__parse_groups(metric_evlist, pmu, "Default", 2194 /*metric_no_group=*/false, 2195 /*metric_no_merge=*/false, 2196 /*metric_no_threshold=*/true, 2197 stat_config.user_requested_cpu_list, 2198 stat_config.system_wide, 2199 stat_config.hardware_aware_grouping, 2200 &stat_config.metric_events) < 0) 2201 return -1; 2202 2203 evlist__for_each_entry(metric_evlist, metric_evsel) { 2204 metric_evsel->skippable = true; 2205 metric_evsel->default_metricgroup = true; 2206 } 2207 evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries); 2208 evlist__delete(metric_evlist); 2209 } 2210 2211 /* Platform specific attrs */ 2212 if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) 2213 return -1; 2214 } 2215 2216 /* Detailed events get appended to the event list: */ 2217 2218 if (detailed_run < 1) 2219 return 0; 2220 2221 /* Append detailed run extra attributes: */ 2222 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 2223 return -1; 2224 2225 if (detailed_run < 2) 2226 return 0; 2227 2228 /* Append very detailed run extra attributes: */ 2229 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 2230 return -1; 2231 2232 if (detailed_run < 3) 2233 return 0; 2234 2235 /* Append very, very detailed run extra attributes: */ 2236 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 2237 } 2238 2239 static const char * const stat_record_usage[] = { 2240 "perf stat record [<options>]", 2241 NULL, 2242 }; 2243 2244 static void init_features(struct perf_session *session) 2245 { 2246 int feat; 2247 2248 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 2249 perf_header__set_feat(&session->header, feat); 2250 2251 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 2252 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 2253 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 2254 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 2255 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 2256 } 2257 2258 static int __cmd_record(int argc, const char **argv) 2259 { 2260 struct perf_session *session; 2261 struct perf_data *data = &perf_stat.data; 2262 2263 argc = parse_options(argc, argv, stat_options, stat_record_usage, 2264 PARSE_OPT_STOP_AT_NON_OPTION); 2265 2266 if (output_name) 2267 data->path = output_name; 2268 2269 if (stat_config.run_count != 1 || forever) { 2270 pr_err("Cannot use -r option with perf stat record.\n"); 2271 return -1; 2272 } 2273 2274 session = perf_session__new(data, NULL); 2275 if (IS_ERR(session)) { 2276 pr_err("Perf session creation failed\n"); 2277 return PTR_ERR(session); 2278 } 2279 2280 init_features(session); 2281 2282 session->evlist = evsel_list; 2283 perf_stat.session = session; 2284 perf_stat.record = true; 2285 return argc; 2286 } 2287 2288 static int process_stat_round_event(struct perf_session *session, 2289 union perf_event *event) 2290 { 2291 struct perf_record_stat_round *stat_round = &event->stat_round; 2292 struct timespec tsh, *ts = NULL; 2293 const char **argv = session->header.env.cmdline_argv; 2294 int argc = session->header.env.nr_cmdline; 2295 2296 process_counters(); 2297 2298 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2299 update_stats(&walltime_nsecs_stats, stat_round->time); 2300 2301 if (stat_config.interval && stat_round->time) { 2302 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 2303 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 2304 ts = &tsh; 2305 } 2306 2307 print_counters(ts, argc, argv); 2308 return 0; 2309 } 2310 2311 static 2312 int process_stat_config_event(struct perf_session *session, 2313 union perf_event *event) 2314 { 2315 struct perf_tool *tool = session->tool; 2316 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2317 2318 perf_event__read_stat_config(&stat_config, &event->stat_config); 2319 2320 if (perf_cpu_map__is_empty(st->cpus)) { 2321 if (st->aggr_mode != AGGR_UNSET) 2322 pr_warning("warning: processing task data, aggregation mode not set\n"); 2323 } else if (st->aggr_mode != AGGR_UNSET) { 2324 stat_config.aggr_mode = st->aggr_mode; 2325 } 2326 2327 if (perf_stat.data.is_pipe) 2328 perf_stat_init_aggr_mode(); 2329 else 2330 perf_stat_init_aggr_mode_file(st); 2331 2332 if (stat_config.aggr_map) { 2333 int nr_aggr = stat_config.aggr_map->nr; 2334 2335 if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) { 2336 pr_err("cannot allocate aggr counts\n"); 2337 return -1; 2338 } 2339 } 2340 return 0; 2341 } 2342 2343 static int set_maps(struct perf_stat *st) 2344 { 2345 if (!st->cpus || !st->threads) 2346 return 0; 2347 2348 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 2349 return -EINVAL; 2350 2351 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 2352 2353 if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true)) 2354 return -ENOMEM; 2355 2356 st->maps_allocated = true; 2357 return 0; 2358 } 2359 2360 static 2361 int process_thread_map_event(struct perf_session *session, 2362 union perf_event *event) 2363 { 2364 struct perf_tool *tool = session->tool; 2365 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2366 2367 if (st->threads) { 2368 pr_warning("Extra thread map event, ignoring.\n"); 2369 return 0; 2370 } 2371 2372 st->threads = thread_map__new_event(&event->thread_map); 2373 if (!st->threads) 2374 return -ENOMEM; 2375 2376 return set_maps(st); 2377 } 2378 2379 static 2380 int process_cpu_map_event(struct perf_session *session, 2381 union perf_event *event) 2382 { 2383 struct perf_tool *tool = session->tool; 2384 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2385 struct perf_cpu_map *cpus; 2386 2387 if (st->cpus) { 2388 pr_warning("Extra cpu map event, ignoring.\n"); 2389 return 0; 2390 } 2391 2392 cpus = cpu_map__new_data(&event->cpu_map.data); 2393 if (!cpus) 2394 return -ENOMEM; 2395 2396 st->cpus = cpus; 2397 return set_maps(st); 2398 } 2399 2400 static const char * const stat_report_usage[] = { 2401 "perf stat report [<options>]", 2402 NULL, 2403 }; 2404 2405 static struct perf_stat perf_stat = { 2406 .tool = { 2407 .attr = perf_event__process_attr, 2408 .event_update = perf_event__process_event_update, 2409 .thread_map = process_thread_map_event, 2410 .cpu_map = process_cpu_map_event, 2411 .stat_config = process_stat_config_event, 2412 .stat = perf_event__process_stat_event, 2413 .stat_round = process_stat_round_event, 2414 }, 2415 .aggr_mode = AGGR_UNSET, 2416 .aggr_level = 0, 2417 }; 2418 2419 static int __cmd_report(int argc, const char **argv) 2420 { 2421 struct perf_session *session; 2422 const struct option options[] = { 2423 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2424 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2425 "aggregate counts per processor socket", AGGR_SOCKET), 2426 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2427 "aggregate counts per processor die", AGGR_DIE), 2428 OPT_SET_UINT(0, "per-cluster", &perf_stat.aggr_mode, 2429 "aggregate counts perf processor cluster", AGGR_CLUSTER), 2430 OPT_CALLBACK_OPTARG(0, "per-cache", &perf_stat.aggr_mode, &perf_stat.aggr_level, 2431 "cache level", 2432 "aggregate count at this cache level (Default: LLC)", 2433 parse_cache_level), 2434 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2435 "aggregate counts per physical processor core", AGGR_CORE), 2436 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2437 "aggregate counts per numa node", AGGR_NODE), 2438 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2439 "disable CPU count aggregation", AGGR_NONE), 2440 OPT_END() 2441 }; 2442 struct stat st; 2443 int ret; 2444 2445 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2446 2447 if (!input_name || !strlen(input_name)) { 2448 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2449 input_name = "-"; 2450 else 2451 input_name = "perf.data"; 2452 } 2453 2454 perf_stat.data.path = input_name; 2455 perf_stat.data.mode = PERF_DATA_MODE_READ; 2456 2457 session = perf_session__new(&perf_stat.data, &perf_stat.tool); 2458 if (IS_ERR(session)) 2459 return PTR_ERR(session); 2460 2461 perf_stat.session = session; 2462 stat_config.output = stderr; 2463 evlist__delete(evsel_list); 2464 evsel_list = session->evlist; 2465 2466 ret = perf_session__process_events(session); 2467 if (ret) 2468 return ret; 2469 2470 perf_session__delete(session); 2471 return 0; 2472 } 2473 2474 static void setup_system_wide(int forks) 2475 { 2476 /* 2477 * Make system wide (-a) the default target if 2478 * no target was specified and one of following 2479 * conditions is met: 2480 * 2481 * - there's no workload specified 2482 * - there is workload specified but all requested 2483 * events are system wide events 2484 */ 2485 if (!target__none(&target)) 2486 return; 2487 2488 if (!forks) 2489 target.system_wide = true; 2490 else { 2491 struct evsel *counter; 2492 2493 evlist__for_each_entry(evsel_list, counter) { 2494 if (!counter->core.requires_cpu && 2495 !evsel__name_is(counter, "duration_time")) { 2496 return; 2497 } 2498 } 2499 2500 if (evsel_list->core.nr_entries) 2501 target.system_wide = true; 2502 } 2503 } 2504 2505 int cmd_stat(int argc, const char **argv) 2506 { 2507 const char * const stat_usage[] = { 2508 "perf stat [<options>] [<command>]", 2509 NULL 2510 }; 2511 int status = -EINVAL, run_idx, err; 2512 const char *mode; 2513 FILE *output = stderr; 2514 unsigned int interval, timeout; 2515 const char * const stat_subcommands[] = { "record", "report" }; 2516 char errbuf[BUFSIZ]; 2517 2518 setlocale(LC_ALL, ""); 2519 2520 evsel_list = evlist__new(); 2521 if (evsel_list == NULL) 2522 return -ENOMEM; 2523 2524 parse_events__shrink_config_terms(); 2525 2526 /* String-parsing callback-based options would segfault when negated */ 2527 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2528 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2529 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2530 2531 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2532 (const char **) stat_usage, 2533 PARSE_OPT_STOP_AT_NON_OPTION); 2534 2535 if (stat_config.csv_sep) { 2536 stat_config.csv_output = true; 2537 if (!strcmp(stat_config.csv_sep, "\\t")) 2538 stat_config.csv_sep = "\t"; 2539 } else 2540 stat_config.csv_sep = DEFAULT_SEPARATOR; 2541 2542 if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) { 2543 argc = __cmd_record(argc, argv); 2544 if (argc < 0) 2545 return -1; 2546 } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0])) 2547 return __cmd_report(argc, argv); 2548 2549 interval = stat_config.interval; 2550 timeout = stat_config.timeout; 2551 2552 /* 2553 * For record command the -o is already taken care of. 2554 */ 2555 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2556 output = NULL; 2557 2558 if (output_name && output_fd) { 2559 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2560 parse_options_usage(stat_usage, stat_options, "o", 1); 2561 parse_options_usage(NULL, stat_options, "log-fd", 0); 2562 goto out; 2563 } 2564 2565 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2566 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2567 goto out; 2568 } 2569 2570 if (stat_config.metric_only && stat_config.run_count > 1) { 2571 fprintf(stderr, "--metric-only is not supported with -r\n"); 2572 goto out; 2573 } 2574 2575 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2576 fprintf(stderr, "--table is only supported with -r\n"); 2577 parse_options_usage(stat_usage, stat_options, "r", 1); 2578 parse_options_usage(NULL, stat_options, "table", 0); 2579 goto out; 2580 } 2581 2582 if (output_fd < 0) { 2583 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2584 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2585 goto out; 2586 } 2587 2588 if (!output && !quiet) { 2589 struct timespec tm; 2590 mode = append_file ? "a" : "w"; 2591 2592 output = fopen(output_name, mode); 2593 if (!output) { 2594 perror("failed to create output file"); 2595 return -1; 2596 } 2597 if (!stat_config.json_output) { 2598 clock_gettime(CLOCK_REALTIME, &tm); 2599 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2600 } 2601 } else if (output_fd > 0) { 2602 mode = append_file ? "a" : "w"; 2603 output = fdopen(output_fd, mode); 2604 if (!output) { 2605 perror("Failed opening logfd"); 2606 return -errno; 2607 } 2608 } 2609 2610 if (stat_config.interval_clear && !isatty(fileno(output))) { 2611 fprintf(stderr, "--interval-clear does not work with output\n"); 2612 parse_options_usage(stat_usage, stat_options, "o", 1); 2613 parse_options_usage(NULL, stat_options, "log-fd", 0); 2614 parse_options_usage(NULL, stat_options, "interval-clear", 0); 2615 return -1; 2616 } 2617 2618 stat_config.output = output; 2619 2620 /* 2621 * let the spreadsheet do the pretty-printing 2622 */ 2623 if (stat_config.csv_output) { 2624 /* User explicitly passed -B? */ 2625 if (big_num_opt == 1) { 2626 fprintf(stderr, "-B option not supported with -x\n"); 2627 parse_options_usage(stat_usage, stat_options, "B", 1); 2628 parse_options_usage(NULL, stat_options, "x", 1); 2629 goto out; 2630 } else /* Nope, so disable big number formatting */ 2631 stat_config.big_num = false; 2632 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2633 stat_config.big_num = false; 2634 2635 err = target__validate(&target); 2636 if (err) { 2637 target__strerror(&target, err, errbuf, BUFSIZ); 2638 pr_warning("%s\n", errbuf); 2639 } 2640 2641 setup_system_wide(argc); 2642 2643 /* 2644 * Display user/system times only for single 2645 * run and when there's specified tracee. 2646 */ 2647 if ((stat_config.run_count == 1) && target__none(&target)) 2648 stat_config.ru_display = true; 2649 2650 if (stat_config.run_count < 0) { 2651 pr_err("Run count must be a positive number\n"); 2652 parse_options_usage(stat_usage, stat_options, "r", 1); 2653 goto out; 2654 } else if (stat_config.run_count == 0) { 2655 forever = true; 2656 stat_config.run_count = 1; 2657 } 2658 2659 if (stat_config.walltime_run_table) { 2660 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2661 if (!stat_config.walltime_run) { 2662 pr_err("failed to setup -r option"); 2663 goto out; 2664 } 2665 } 2666 2667 if ((stat_config.aggr_mode == AGGR_THREAD) && 2668 !target__has_task(&target)) { 2669 if (!target.system_wide || target.cpu_list) { 2670 fprintf(stderr, "The --per-thread option is only " 2671 "available when monitoring via -p -t -a " 2672 "options or only --per-thread.\n"); 2673 parse_options_usage(NULL, stat_options, "p", 1); 2674 parse_options_usage(NULL, stat_options, "t", 1); 2675 goto out; 2676 } 2677 } 2678 2679 /* 2680 * no_aggr, cgroup are for system-wide only 2681 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2682 */ 2683 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2684 stat_config.aggr_mode != AGGR_THREAD) || 2685 (nr_cgroups || stat_config.cgroup_list)) && 2686 !target__has_cpu(&target)) { 2687 fprintf(stderr, "both cgroup and no-aggregation " 2688 "modes only available in system-wide mode\n"); 2689 2690 parse_options_usage(stat_usage, stat_options, "G", 1); 2691 parse_options_usage(NULL, stat_options, "A", 1); 2692 parse_options_usage(NULL, stat_options, "a", 1); 2693 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2694 goto out; 2695 } 2696 2697 if (stat_config.iostat_run) { 2698 status = iostat_prepare(evsel_list, &stat_config); 2699 if (status) 2700 goto out; 2701 if (iostat_mode == IOSTAT_LIST) { 2702 iostat_list(evsel_list, &stat_config); 2703 goto out; 2704 } else if (verbose > 0) 2705 iostat_list(evsel_list, &stat_config); 2706 if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) 2707 target.system_wide = true; 2708 } 2709 2710 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2711 target.per_thread = true; 2712 2713 stat_config.system_wide = target.system_wide; 2714 if (target.cpu_list) { 2715 stat_config.user_requested_cpu_list = strdup(target.cpu_list); 2716 if (!stat_config.user_requested_cpu_list) { 2717 status = -ENOMEM; 2718 goto out; 2719 } 2720 } 2721 2722 /* 2723 * Metric parsing needs to be delayed as metrics may optimize events 2724 * knowing the target is system-wide. 2725 */ 2726 if (metrics) { 2727 const char *pmu = parse_events_option_args.pmu_filter ?: "all"; 2728 int ret = metricgroup__parse_groups(evsel_list, pmu, metrics, 2729 stat_config.metric_no_group, 2730 stat_config.metric_no_merge, 2731 stat_config.metric_no_threshold, 2732 stat_config.user_requested_cpu_list, 2733 stat_config.system_wide, 2734 stat_config.hardware_aware_grouping, 2735 &stat_config.metric_events); 2736 2737 zfree(&metrics); 2738 if (ret) { 2739 status = ret; 2740 goto out; 2741 } 2742 } 2743 2744 if (add_default_attributes()) 2745 goto out; 2746 2747 if (stat_config.cgroup_list) { 2748 if (nr_cgroups > 0) { 2749 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2750 parse_options_usage(stat_usage, stat_options, "G", 1); 2751 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2752 goto out; 2753 } 2754 2755 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, 2756 &stat_config.metric_events, true) < 0) { 2757 parse_options_usage(stat_usage, stat_options, 2758 "for-each-cgroup", 0); 2759 goto out; 2760 } 2761 } 2762 2763 evlist__warn_user_requested_cpus(evsel_list, target.cpu_list); 2764 2765 if (evlist__create_maps(evsel_list, &target) < 0) { 2766 if (target__has_task(&target)) { 2767 pr_err("Problems finding threads of monitor\n"); 2768 parse_options_usage(stat_usage, stat_options, "p", 1); 2769 parse_options_usage(NULL, stat_options, "t", 1); 2770 } else if (target__has_cpu(&target)) { 2771 perror("failed to parse CPUs map"); 2772 parse_options_usage(stat_usage, stat_options, "C", 1); 2773 parse_options_usage(NULL, stat_options, "a", 1); 2774 } 2775 goto out; 2776 } 2777 2778 evlist__check_cpu_maps(evsel_list); 2779 2780 /* 2781 * Initialize thread_map with comm names, 2782 * so we could print it out on output. 2783 */ 2784 if (stat_config.aggr_mode == AGGR_THREAD) { 2785 thread_map__read_comms(evsel_list->core.threads); 2786 } 2787 2788 if (stat_config.aggr_mode == AGGR_NODE) 2789 cpu__setup_cpunode_map(); 2790 2791 if (stat_config.times && interval) 2792 interval_count = true; 2793 else if (stat_config.times && !interval) { 2794 pr_err("interval-count option should be used together with " 2795 "interval-print.\n"); 2796 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2797 parse_options_usage(stat_usage, stat_options, "I", 1); 2798 goto out; 2799 } 2800 2801 if (timeout && timeout < 100) { 2802 if (timeout < 10) { 2803 pr_err("timeout must be >= 10ms.\n"); 2804 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2805 goto out; 2806 } else 2807 pr_warning("timeout < 100ms. " 2808 "The overhead percentage could be high in some cases. " 2809 "Please proceed with caution.\n"); 2810 } 2811 if (timeout && interval) { 2812 pr_err("timeout option is not supported with interval-print.\n"); 2813 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2814 parse_options_usage(stat_usage, stat_options, "I", 1); 2815 goto out; 2816 } 2817 2818 if (perf_stat_init_aggr_mode()) 2819 goto out; 2820 2821 if (evlist__alloc_stats(&stat_config, evsel_list, interval)) 2822 goto out; 2823 2824 /* 2825 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2826 * while avoiding that older tools show confusing messages. 2827 * 2828 * However for pipe sessions we need to keep it zero, 2829 * because script's perf_evsel__check_attr is triggered 2830 * by attr->sample_type != 0, and we can't run it on 2831 * stat sessions. 2832 */ 2833 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2834 2835 /* 2836 * We dont want to block the signals - that would cause 2837 * child tasks to inherit that and Ctrl-C would not work. 2838 * What we want is for Ctrl-C to work in the exec()-ed 2839 * task, but being ignored by perf stat itself: 2840 */ 2841 atexit(sig_atexit); 2842 if (!forever) 2843 signal(SIGINT, skip_signal); 2844 signal(SIGCHLD, skip_signal); 2845 signal(SIGALRM, skip_signal); 2846 signal(SIGABRT, skip_signal); 2847 2848 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2849 goto out; 2850 2851 /* Enable ignoring missing threads when -p option is defined. */ 2852 evlist__first(evsel_list)->ignore_missing_thread = target.pid; 2853 status = 0; 2854 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2855 if (stat_config.run_count != 1 && verbose > 0) 2856 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2857 run_idx + 1); 2858 2859 if (run_idx != 0) 2860 evlist__reset_prev_raw_counts(evsel_list); 2861 2862 status = run_perf_stat(argc, argv, run_idx); 2863 if (forever && status != -1 && !interval) { 2864 print_counters(NULL, argc, argv); 2865 perf_stat__reset_stats(); 2866 } 2867 } 2868 2869 if (!forever && status != -1 && (!interval || stat_config.summary)) { 2870 if (stat_config.run_count > 1) 2871 evlist__copy_res_stats(&stat_config, evsel_list); 2872 print_counters(NULL, argc, argv); 2873 } 2874 2875 evlist__finalize_ctlfd(evsel_list); 2876 2877 if (STAT_RECORD) { 2878 /* 2879 * We synthesize the kernel mmap record just so that older tools 2880 * don't emit warnings about not being able to resolve symbols 2881 * due to /proc/sys/kernel/kptr_restrict settings and instead provide 2882 * a saner message about no samples being in the perf.data file. 2883 * 2884 * This also serves to suppress a warning about f_header.data.size == 0 2885 * in header.c at the moment 'perf stat record' gets introduced, which 2886 * is not really needed once we start adding the stat specific PERF_RECORD_ 2887 * records, but the need to suppress the kptr_restrict messages in older 2888 * tools remain -acme 2889 */ 2890 int fd = perf_data__fd(&perf_stat.data); 2891 2892 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2893 process_synthesized_event, 2894 &perf_stat.session->machines.host); 2895 if (err) { 2896 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2897 "older tools may produce warnings about this file\n."); 2898 } 2899 2900 if (!interval) { 2901 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2902 pr_err("failed to write stat round event\n"); 2903 } 2904 2905 if (!perf_stat.data.is_pipe) { 2906 perf_stat.session->header.data_size += perf_stat.bytes_written; 2907 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2908 } 2909 2910 evlist__close(evsel_list); 2911 perf_session__delete(perf_stat.session); 2912 } 2913 2914 perf_stat__exit_aggr_mode(); 2915 evlist__free_stats(evsel_list); 2916 out: 2917 if (stat_config.iostat_run) 2918 iostat_release(evsel_list); 2919 2920 zfree(&stat_config.walltime_run); 2921 zfree(&stat_config.user_requested_cpu_list); 2922 2923 if (smi_cost && smi_reset) 2924 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2925 2926 evlist__delete(evsel_list); 2927 2928 metricgroup__rblist_exit(&stat_config.metric_events); 2929 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 2930 2931 return status; 2932 } 2933