1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "perf.h" 45 #include "util/cgroup.h" 46 #include <subcmd/parse-options.h> 47 #include "util/parse-events.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evsel.h" 52 #include "util/debug.h" 53 #include "util/color.h" 54 #include "util/stat.h" 55 #include "util/header.h" 56 #include "util/cpumap.h" 57 #include "util/thread_map.h" 58 #include "util/counts.h" 59 #include "util/group.h" 60 #include "util/session.h" 61 #include "util/tool.h" 62 #include "util/string2.h" 63 #include "util/metricgroup.h" 64 #include "util/synthetic-events.h" 65 #include "util/target.h" 66 #include "util/time-utils.h" 67 #include "util/top.h" 68 #include "util/affinity.h" 69 #include "util/pfm.h" 70 #include "asm/bug.h" 71 72 #include <linux/time64.h> 73 #include <linux/zalloc.h> 74 #include <api/fs/fs.h> 75 #include <errno.h> 76 #include <signal.h> 77 #include <stdlib.h> 78 #include <sys/prctl.h> 79 #include <inttypes.h> 80 #include <locale.h> 81 #include <math.h> 82 #include <sys/types.h> 83 #include <sys/stat.h> 84 #include <sys/wait.h> 85 #include <unistd.h> 86 #include <sys/time.h> 87 #include <sys/resource.h> 88 #include <linux/err.h> 89 90 #include <linux/ctype.h> 91 #include <perf/evlist.h> 92 93 #define DEFAULT_SEPARATOR " " 94 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 95 96 static void print_counters(struct timespec *ts, int argc, const char **argv); 97 98 /* Default events used for perf stat -T */ 99 static const char *transaction_attrs = { 100 "task-clock," 101 "{" 102 "instructions," 103 "cycles," 104 "cpu/cycles-t/," 105 "cpu/tx-start/," 106 "cpu/el-start/," 107 "cpu/cycles-ct/" 108 "}" 109 }; 110 111 /* More limited version when the CPU does not have all events. */ 112 static const char * transaction_limited_attrs = { 113 "task-clock," 114 "{" 115 "instructions," 116 "cycles," 117 "cpu/cycles-t/," 118 "cpu/tx-start/" 119 "}" 120 }; 121 122 static const char * topdown_attrs[] = { 123 "topdown-total-slots", 124 "topdown-slots-retired", 125 "topdown-recovery-bubbles", 126 "topdown-fetch-bubbles", 127 "topdown-slots-issued", 128 NULL, 129 }; 130 131 static const char *smi_cost_attrs = { 132 "{" 133 "msr/aperf/," 134 "msr/smi/," 135 "cycles" 136 "}" 137 }; 138 139 static struct evlist *evsel_list; 140 141 static struct target target = { 142 .uid = UINT_MAX, 143 }; 144 145 #define METRIC_ONLY_LEN 20 146 147 static volatile pid_t child_pid = -1; 148 static int detailed_run = 0; 149 static bool transaction_run; 150 static bool topdown_run = false; 151 static bool smi_cost = false; 152 static bool smi_reset = false; 153 static int big_num_opt = -1; 154 static bool group = false; 155 static const char *pre_cmd = NULL; 156 static const char *post_cmd = NULL; 157 static bool sync_run = false; 158 static bool forever = false; 159 static bool force_metric_only = false; 160 static struct timespec ref_time; 161 static bool append_file; 162 static bool interval_count; 163 static const char *output_name; 164 static int output_fd; 165 166 struct perf_stat { 167 bool record; 168 struct perf_data data; 169 struct perf_session *session; 170 u64 bytes_written; 171 struct perf_tool tool; 172 bool maps_allocated; 173 struct perf_cpu_map *cpus; 174 struct perf_thread_map *threads; 175 enum aggr_mode aggr_mode; 176 }; 177 178 static struct perf_stat perf_stat; 179 #define STAT_RECORD perf_stat.record 180 181 static volatile int done = 0; 182 183 static struct perf_stat_config stat_config = { 184 .aggr_mode = AGGR_GLOBAL, 185 .scale = true, 186 .unit_width = 4, /* strlen("unit") */ 187 .run_count = 1, 188 .metric_only_len = METRIC_ONLY_LEN, 189 .walltime_nsecs_stats = &walltime_nsecs_stats, 190 .big_num = true, 191 .ctl_fd = -1, 192 .ctl_fd_ack = -1 193 }; 194 195 static bool cpus_map_matched(struct evsel *a, struct evsel *b) 196 { 197 if (!a->core.cpus && !b->core.cpus) 198 return true; 199 200 if (!a->core.cpus || !b->core.cpus) 201 return false; 202 203 if (a->core.cpus->nr != b->core.cpus->nr) 204 return false; 205 206 for (int i = 0; i < a->core.cpus->nr; i++) { 207 if (a->core.cpus->map[i] != b->core.cpus->map[i]) 208 return false; 209 } 210 211 return true; 212 } 213 214 static void evlist__check_cpu_maps(struct evlist *evlist) 215 { 216 struct evsel *evsel, *pos, *leader; 217 char buf[1024]; 218 219 evlist__for_each_entry(evlist, evsel) { 220 leader = evsel->leader; 221 222 /* Check that leader matches cpus with each member. */ 223 if (leader == evsel) 224 continue; 225 if (cpus_map_matched(leader, evsel)) 226 continue; 227 228 /* If there's mismatch disable the group and warn user. */ 229 WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n"); 230 evsel__group_desc(leader, buf, sizeof(buf)); 231 pr_warning(" %s\n", buf); 232 233 if (verbose) { 234 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 235 pr_warning(" %s: %s\n", leader->name, buf); 236 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 237 pr_warning(" %s: %s\n", evsel->name, buf); 238 } 239 240 for_each_group_evsel(pos, leader) { 241 pos->leader = pos; 242 pos->core.nr_members = 0; 243 } 244 evsel->leader->core.nr_members = 0; 245 } 246 } 247 248 static inline void diff_timespec(struct timespec *r, struct timespec *a, 249 struct timespec *b) 250 { 251 r->tv_sec = a->tv_sec - b->tv_sec; 252 if (a->tv_nsec < b->tv_nsec) { 253 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 254 r->tv_sec--; 255 } else { 256 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 257 } 258 } 259 260 static void perf_stat__reset_stats(void) 261 { 262 int i; 263 264 perf_evlist__reset_stats(evsel_list); 265 perf_stat__reset_shadow_stats(); 266 267 for (i = 0; i < stat_config.stats_num; i++) 268 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); 269 } 270 271 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 272 union perf_event *event, 273 struct perf_sample *sample __maybe_unused, 274 struct machine *machine __maybe_unused) 275 { 276 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 277 pr_err("failed to write perf data, error: %m\n"); 278 return -1; 279 } 280 281 perf_stat.bytes_written += event->header.size; 282 return 0; 283 } 284 285 static int write_stat_round_event(u64 tm, u64 type) 286 { 287 return perf_event__synthesize_stat_round(NULL, tm, type, 288 process_synthesized_event, 289 NULL); 290 } 291 292 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 293 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 294 295 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 296 297 static int evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread, 298 struct perf_counts_values *count) 299 { 300 struct perf_sample_id *sid = SID(counter, cpu, thread); 301 302 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 303 process_synthesized_event, NULL); 304 } 305 306 static int read_single_counter(struct evsel *counter, int cpu, 307 int thread, struct timespec *rs) 308 { 309 if (counter->tool_event == PERF_TOOL_DURATION_TIME) { 310 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 311 struct perf_counts_values *count = 312 perf_counts(counter->counts, cpu, thread); 313 count->ena = count->run = val; 314 count->val = val; 315 return 0; 316 } 317 return evsel__read_counter(counter, cpu, thread); 318 } 319 320 /* 321 * Read out the results of a single counter: 322 * do not aggregate counts across CPUs in system-wide mode 323 */ 324 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) 325 { 326 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 327 int thread; 328 329 if (!counter->supported) 330 return -ENOENT; 331 332 if (counter->core.system_wide) 333 nthreads = 1; 334 335 for (thread = 0; thread < nthreads; thread++) { 336 struct perf_counts_values *count; 337 338 count = perf_counts(counter->counts, cpu, thread); 339 340 /* 341 * The leader's group read loads data into its group members 342 * (via evsel__read_counter()) and sets their count->loaded. 343 */ 344 if (!perf_counts__is_loaded(counter->counts, cpu, thread) && 345 read_single_counter(counter, cpu, thread, rs)) { 346 counter->counts->scaled = -1; 347 perf_counts(counter->counts, cpu, thread)->ena = 0; 348 perf_counts(counter->counts, cpu, thread)->run = 0; 349 return -1; 350 } 351 352 perf_counts__set_loaded(counter->counts, cpu, thread, false); 353 354 if (STAT_RECORD) { 355 if (evsel__write_stat_event(counter, cpu, thread, count)) { 356 pr_err("failed to write stat event\n"); 357 return -1; 358 } 359 } 360 361 if (verbose > 1) { 362 fprintf(stat_config.output, 363 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 364 evsel__name(counter), 365 cpu, 366 count->val, count->ena, count->run); 367 } 368 } 369 370 return 0; 371 } 372 373 static int read_affinity_counters(struct timespec *rs) 374 { 375 struct evsel *counter; 376 struct affinity affinity; 377 int i, ncpus, cpu; 378 379 if (affinity__setup(&affinity) < 0) 380 return -1; 381 382 ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); 383 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 384 ncpus = 1; 385 evlist__for_each_cpu(evsel_list, i, cpu) { 386 if (i >= ncpus) 387 break; 388 affinity__set(&affinity, cpu); 389 390 evlist__for_each_entry(evsel_list, counter) { 391 if (evsel__cpu_iter_skip(counter, cpu)) 392 continue; 393 if (!counter->err) { 394 counter->err = read_counter_cpu(counter, rs, 395 counter->cpu_iter - 1); 396 } 397 } 398 } 399 affinity__cleanup(&affinity); 400 return 0; 401 } 402 403 static void read_counters(struct timespec *rs) 404 { 405 struct evsel *counter; 406 407 if (!stat_config.stop_read_counter && (read_affinity_counters(rs) < 0)) 408 return; 409 410 evlist__for_each_entry(evsel_list, counter) { 411 if (counter->err) 412 pr_debug("failed to read counter %s\n", counter->name); 413 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 414 pr_warning("failed to process counter %s\n", counter->name); 415 counter->err = 0; 416 } 417 } 418 419 static int runtime_stat_new(struct perf_stat_config *config, int nthreads) 420 { 421 int i; 422 423 config->stats = calloc(nthreads, sizeof(struct runtime_stat)); 424 if (!config->stats) 425 return -1; 426 427 config->stats_num = nthreads; 428 429 for (i = 0; i < nthreads; i++) 430 runtime_stat__init(&config->stats[i]); 431 432 return 0; 433 } 434 435 static void runtime_stat_delete(struct perf_stat_config *config) 436 { 437 int i; 438 439 if (!config->stats) 440 return; 441 442 for (i = 0; i < config->stats_num; i++) 443 runtime_stat__exit(&config->stats[i]); 444 445 zfree(&config->stats); 446 } 447 448 static void runtime_stat_reset(struct perf_stat_config *config) 449 { 450 int i; 451 452 if (!config->stats) 453 return; 454 455 for (i = 0; i < config->stats_num; i++) 456 perf_stat__reset_shadow_per_stat(&config->stats[i]); 457 } 458 459 static void process_interval(void) 460 { 461 struct timespec ts, rs; 462 463 clock_gettime(CLOCK_MONOTONIC, &ts); 464 diff_timespec(&rs, &ts, &ref_time); 465 466 perf_stat__reset_shadow_per_stat(&rt_stat); 467 runtime_stat_reset(&stat_config); 468 read_counters(&rs); 469 470 if (STAT_RECORD) { 471 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 472 pr_err("failed to write stat round event\n"); 473 } 474 475 init_stats(&walltime_nsecs_stats); 476 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 477 print_counters(&rs, 0, NULL); 478 } 479 480 static bool handle_interval(unsigned int interval, int *times) 481 { 482 if (interval) { 483 process_interval(); 484 if (interval_count && !(--(*times))) 485 return true; 486 } 487 return false; 488 } 489 490 static void enable_counters(void) 491 { 492 if (stat_config.initial_delay < 0) { 493 pr_info(EVLIST_DISABLED_MSG); 494 return; 495 } 496 497 if (stat_config.initial_delay > 0) { 498 pr_info(EVLIST_DISABLED_MSG); 499 usleep(stat_config.initial_delay * USEC_PER_MSEC); 500 } 501 502 /* 503 * We need to enable counters only if: 504 * - we don't have tracee (attaching to task or cpu) 505 * - we have initial delay configured 506 */ 507 if (!target__none(&target) || stat_config.initial_delay) { 508 evlist__enable(evsel_list); 509 if (stat_config.initial_delay > 0) 510 pr_info(EVLIST_ENABLED_MSG); 511 } 512 } 513 514 static void disable_counters(void) 515 { 516 /* 517 * If we don't have tracee (attaching to task or cpu), counters may 518 * still be running. To get accurate group ratios, we must stop groups 519 * from counting before reading their constituent counters. 520 */ 521 if (!target__none(&target)) 522 evlist__disable(evsel_list); 523 } 524 525 static volatile int workload_exec_errno; 526 527 /* 528 * perf_evlist__prepare_workload will send a SIGUSR1 529 * if the fork fails, since we asked by setting its 530 * want_signal to true. 531 */ 532 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 533 void *ucontext __maybe_unused) 534 { 535 workload_exec_errno = info->si_value.sival_int; 536 } 537 538 static bool evsel__should_store_id(struct evsel *counter) 539 { 540 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 541 } 542 543 static bool is_target_alive(struct target *_target, 544 struct perf_thread_map *threads) 545 { 546 struct stat st; 547 int i; 548 549 if (!target__has_task(_target)) 550 return true; 551 552 for (i = 0; i < threads->nr; i++) { 553 char path[PATH_MAX]; 554 555 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 556 threads->map[i].pid); 557 558 if (!stat(path, &st)) 559 return true; 560 } 561 562 return false; 563 } 564 565 static void process_evlist(struct evlist *evlist, unsigned int interval) 566 { 567 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 568 569 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 570 switch (cmd) { 571 case EVLIST_CTL_CMD_ENABLE: 572 pr_info(EVLIST_ENABLED_MSG); 573 if (interval) 574 process_interval(); 575 break; 576 case EVLIST_CTL_CMD_DISABLE: 577 if (interval) 578 process_interval(); 579 pr_info(EVLIST_DISABLED_MSG); 580 break; 581 case EVLIST_CTL_CMD_SNAPSHOT: 582 case EVLIST_CTL_CMD_ACK: 583 case EVLIST_CTL_CMD_UNSUPPORTED: 584 default: 585 break; 586 } 587 } 588 } 589 590 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 591 int *time_to_sleep) 592 { 593 int tts = *time_to_sleep; 594 struct timespec time_diff; 595 596 diff_timespec(&time_diff, time_stop, time_start); 597 598 tts -= time_diff.tv_sec * MSEC_PER_SEC + 599 time_diff.tv_nsec / NSEC_PER_MSEC; 600 601 if (tts < 0) 602 tts = 0; 603 604 *time_to_sleep = tts; 605 } 606 607 static int dispatch_events(bool forks, int timeout, int interval, int *times) 608 { 609 int child_exited = 0, status = 0; 610 int time_to_sleep, sleep_time; 611 struct timespec time_start, time_stop; 612 613 if (interval) 614 sleep_time = interval; 615 else if (timeout) 616 sleep_time = timeout; 617 else 618 sleep_time = 1000; 619 620 time_to_sleep = sleep_time; 621 622 while (!done) { 623 if (forks) 624 child_exited = waitpid(child_pid, &status, WNOHANG); 625 else 626 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 627 628 if (child_exited) 629 break; 630 631 clock_gettime(CLOCK_MONOTONIC, &time_start); 632 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 633 if (timeout || handle_interval(interval, times)) 634 break; 635 time_to_sleep = sleep_time; 636 } else { /* fd revent */ 637 process_evlist(evsel_list, interval); 638 clock_gettime(CLOCK_MONOTONIC, &time_stop); 639 compute_tts(&time_start, &time_stop, &time_to_sleep); 640 } 641 } 642 643 return status; 644 } 645 646 enum counter_recovery { 647 COUNTER_SKIP, 648 COUNTER_RETRY, 649 COUNTER_FATAL, 650 }; 651 652 static enum counter_recovery stat_handle_error(struct evsel *counter) 653 { 654 char msg[BUFSIZ]; 655 /* 656 * PPC returns ENXIO for HW counters until 2.6.37 657 * (behavior changed with commit b0a873e). 658 */ 659 if (errno == EINVAL || errno == ENOSYS || 660 errno == ENOENT || errno == EOPNOTSUPP || 661 errno == ENXIO) { 662 if (verbose > 0) 663 ui__warning("%s event is not supported by the kernel.\n", 664 evsel__name(counter)); 665 counter->supported = false; 666 /* 667 * errored is a sticky flag that means one of the counter's 668 * cpu event had a problem and needs to be reexamined. 669 */ 670 counter->errored = true; 671 672 if ((counter->leader != counter) || 673 !(counter->leader->core.nr_members > 1)) 674 return COUNTER_SKIP; 675 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 676 if (verbose > 0) 677 ui__warning("%s\n", msg); 678 return COUNTER_RETRY; 679 } else if (target__has_per_thread(&target) && 680 evsel_list->core.threads && 681 evsel_list->core.threads->err_thread != -1) { 682 /* 683 * For global --per-thread case, skip current 684 * error thread. 685 */ 686 if (!thread_map__remove(evsel_list->core.threads, 687 evsel_list->core.threads->err_thread)) { 688 evsel_list->core.threads->err_thread = -1; 689 return COUNTER_RETRY; 690 } 691 } 692 693 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 694 ui__error("%s\n", msg); 695 696 if (child_pid != -1) 697 kill(child_pid, SIGTERM); 698 return COUNTER_FATAL; 699 } 700 701 static int __run_perf_stat(int argc, const char **argv, int run_idx) 702 { 703 int interval = stat_config.interval; 704 int times = stat_config.times; 705 int timeout = stat_config.timeout; 706 char msg[BUFSIZ]; 707 unsigned long long t0, t1; 708 struct evsel *counter; 709 size_t l; 710 int status = 0; 711 const bool forks = (argc > 0); 712 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 713 struct affinity affinity; 714 int i, cpu; 715 bool second_pass = false; 716 717 if (forks) { 718 if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe, 719 workload_exec_failed_signal) < 0) { 720 perror("failed to prepare workload"); 721 return -1; 722 } 723 child_pid = evsel_list->workload.pid; 724 } 725 726 if (group) 727 perf_evlist__set_leader(evsel_list); 728 729 if (affinity__setup(&affinity) < 0) 730 return -1; 731 732 evlist__for_each_cpu (evsel_list, i, cpu) { 733 affinity__set(&affinity, cpu); 734 735 evlist__for_each_entry(evsel_list, counter) { 736 if (evsel__cpu_iter_skip(counter, cpu)) 737 continue; 738 if (counter->reset_group || counter->errored) 739 continue; 740 try_again: 741 if (create_perf_stat_counter(counter, &stat_config, &target, 742 counter->cpu_iter - 1) < 0) { 743 744 /* 745 * Weak group failed. We cannot just undo this here 746 * because earlier CPUs might be in group mode, and the kernel 747 * doesn't support mixing group and non group reads. Defer 748 * it to later. 749 * Don't close here because we're in the wrong affinity. 750 */ 751 if ((errno == EINVAL || errno == EBADF) && 752 counter->leader != counter && 753 counter->weak_group) { 754 perf_evlist__reset_weak_group(evsel_list, counter, false); 755 assert(counter->reset_group); 756 second_pass = true; 757 continue; 758 } 759 760 switch (stat_handle_error(counter)) { 761 case COUNTER_FATAL: 762 return -1; 763 case COUNTER_RETRY: 764 goto try_again; 765 case COUNTER_SKIP: 766 continue; 767 default: 768 break; 769 } 770 771 } 772 counter->supported = true; 773 } 774 } 775 776 if (second_pass) { 777 /* 778 * Now redo all the weak group after closing them, 779 * and also close errored counters. 780 */ 781 782 evlist__for_each_cpu(evsel_list, i, cpu) { 783 affinity__set(&affinity, cpu); 784 /* First close errored or weak retry */ 785 evlist__for_each_entry(evsel_list, counter) { 786 if (!counter->reset_group && !counter->errored) 787 continue; 788 if (evsel__cpu_iter_skip_no_inc(counter, cpu)) 789 continue; 790 perf_evsel__close_cpu(&counter->core, counter->cpu_iter); 791 } 792 /* Now reopen weak */ 793 evlist__for_each_entry(evsel_list, counter) { 794 if (!counter->reset_group && !counter->errored) 795 continue; 796 if (evsel__cpu_iter_skip(counter, cpu)) 797 continue; 798 if (!counter->reset_group) 799 continue; 800 try_again_reset: 801 pr_debug2("reopening weak %s\n", evsel__name(counter)); 802 if (create_perf_stat_counter(counter, &stat_config, &target, 803 counter->cpu_iter - 1) < 0) { 804 805 switch (stat_handle_error(counter)) { 806 case COUNTER_FATAL: 807 return -1; 808 case COUNTER_RETRY: 809 goto try_again_reset; 810 case COUNTER_SKIP: 811 continue; 812 default: 813 break; 814 } 815 } 816 counter->supported = true; 817 } 818 } 819 } 820 affinity__cleanup(&affinity); 821 822 evlist__for_each_entry(evsel_list, counter) { 823 if (!counter->supported) { 824 perf_evsel__free_fd(&counter->core); 825 continue; 826 } 827 828 l = strlen(counter->unit); 829 if (l > stat_config.unit_width) 830 stat_config.unit_width = l; 831 832 if (evsel__should_store_id(counter) && 833 evsel__store_ids(counter, evsel_list)) 834 return -1; 835 } 836 837 if (perf_evlist__apply_filters(evsel_list, &counter)) { 838 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 839 counter->filter, evsel__name(counter), errno, 840 str_error_r(errno, msg, sizeof(msg))); 841 return -1; 842 } 843 844 if (STAT_RECORD) { 845 int err, fd = perf_data__fd(&perf_stat.data); 846 847 if (is_pipe) { 848 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 849 } else { 850 err = perf_session__write_header(perf_stat.session, evsel_list, 851 fd, false); 852 } 853 854 if (err < 0) 855 return err; 856 857 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 858 process_synthesized_event, is_pipe); 859 if (err < 0) 860 return err; 861 } 862 863 /* 864 * Enable counters and exec the command: 865 */ 866 t0 = rdclock(); 867 clock_gettime(CLOCK_MONOTONIC, &ref_time); 868 869 if (forks) { 870 perf_evlist__start_workload(evsel_list); 871 enable_counters(); 872 873 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 874 status = dispatch_events(forks, timeout, interval, ×); 875 if (child_pid != -1) { 876 if (timeout) 877 kill(child_pid, SIGTERM); 878 wait4(child_pid, &status, 0, &stat_config.ru_data); 879 } 880 881 if (workload_exec_errno) { 882 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 883 pr_err("Workload failed: %s\n", emsg); 884 return -1; 885 } 886 887 if (WIFSIGNALED(status)) 888 psignal(WTERMSIG(status), argv[0]); 889 } else { 890 enable_counters(); 891 status = dispatch_events(forks, timeout, interval, ×); 892 } 893 894 disable_counters(); 895 896 t1 = rdclock(); 897 898 if (stat_config.walltime_run_table) 899 stat_config.walltime_run[run_idx] = t1 - t0; 900 901 if (interval && stat_config.summary) { 902 stat_config.interval = 0; 903 stat_config.stop_read_counter = true; 904 init_stats(&walltime_nsecs_stats); 905 update_stats(&walltime_nsecs_stats, t1 - t0); 906 907 if (stat_config.aggr_mode == AGGR_GLOBAL) 908 perf_evlist__save_aggr_prev_raw_counts(evsel_list); 909 910 perf_evlist__copy_prev_raw_counts(evsel_list); 911 perf_evlist__reset_prev_raw_counts(evsel_list); 912 runtime_stat_reset(&stat_config); 913 perf_stat__reset_shadow_per_stat(&rt_stat); 914 } else 915 update_stats(&walltime_nsecs_stats, t1 - t0); 916 917 /* 918 * Closing a group leader splits the group, and as we only disable 919 * group leaders, results in remaining events becoming enabled. To 920 * avoid arbitrary skew, we must read all counters before closing any 921 * group leaders. 922 */ 923 read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); 924 925 /* 926 * We need to keep evsel_list alive, because it's processed 927 * later the evsel_list will be closed after. 928 */ 929 if (!STAT_RECORD) 930 evlist__close(evsel_list); 931 932 return WEXITSTATUS(status); 933 } 934 935 static int run_perf_stat(int argc, const char **argv, int run_idx) 936 { 937 int ret; 938 939 if (pre_cmd) { 940 ret = system(pre_cmd); 941 if (ret) 942 return ret; 943 } 944 945 if (sync_run) 946 sync(); 947 948 ret = __run_perf_stat(argc, argv, run_idx); 949 if (ret) 950 return ret; 951 952 if (post_cmd) { 953 ret = system(post_cmd); 954 if (ret) 955 return ret; 956 } 957 958 return ret; 959 } 960 961 static void print_counters(struct timespec *ts, int argc, const char **argv) 962 { 963 /* Do not print anything if we record to the pipe. */ 964 if (STAT_RECORD && perf_stat.data.is_pipe) 965 return; 966 967 perf_evlist__print_counters(evsel_list, &stat_config, &target, 968 ts, argc, argv); 969 } 970 971 static volatile int signr = -1; 972 973 static void skip_signal(int signo) 974 { 975 if ((child_pid == -1) || stat_config.interval) 976 done = 1; 977 978 signr = signo; 979 /* 980 * render child_pid harmless 981 * won't send SIGTERM to a random 982 * process in case of race condition 983 * and fast PID recycling 984 */ 985 child_pid = -1; 986 } 987 988 static void sig_atexit(void) 989 { 990 sigset_t set, oset; 991 992 /* 993 * avoid race condition with SIGCHLD handler 994 * in skip_signal() which is modifying child_pid 995 * goal is to avoid send SIGTERM to a random 996 * process 997 */ 998 sigemptyset(&set); 999 sigaddset(&set, SIGCHLD); 1000 sigprocmask(SIG_BLOCK, &set, &oset); 1001 1002 if (child_pid != -1) 1003 kill(child_pid, SIGTERM); 1004 1005 sigprocmask(SIG_SETMASK, &oset, NULL); 1006 1007 if (signr == -1) 1008 return; 1009 1010 signal(signr, SIG_DFL); 1011 kill(getpid(), signr); 1012 } 1013 1014 void perf_stat__set_big_num(int set) 1015 { 1016 stat_config.big_num = (set != 0); 1017 } 1018 1019 static int stat__set_big_num(const struct option *opt __maybe_unused, 1020 const char *s __maybe_unused, int unset) 1021 { 1022 big_num_opt = unset ? 0 : 1; 1023 perf_stat__set_big_num(!unset); 1024 return 0; 1025 } 1026 1027 static int enable_metric_only(const struct option *opt __maybe_unused, 1028 const char *s __maybe_unused, int unset) 1029 { 1030 force_metric_only = true; 1031 stat_config.metric_only = !unset; 1032 return 0; 1033 } 1034 1035 static int parse_metric_groups(const struct option *opt, 1036 const char *str, 1037 int unset __maybe_unused) 1038 { 1039 return metricgroup__parse_groups(opt, str, 1040 stat_config.metric_no_group, 1041 stat_config.metric_no_merge, 1042 &stat_config.metric_events); 1043 } 1044 1045 static int parse_control_option(const struct option *opt, 1046 const char *str, 1047 int unset __maybe_unused) 1048 { 1049 struct perf_stat_config *config = opt->value; 1050 1051 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1052 } 1053 1054 static void close_control_option(struct perf_stat_config *config) 1055 { 1056 if (config->ctl_fd_close) { 1057 config->ctl_fd_close = false; 1058 close(config->ctl_fd); 1059 if (config->ctl_fd_ack >= 0) 1060 close(config->ctl_fd_ack); 1061 } 1062 } 1063 1064 static struct option stat_options[] = { 1065 OPT_BOOLEAN('T', "transaction", &transaction_run, 1066 "hardware transaction statistics"), 1067 OPT_CALLBACK('e', "event", &evsel_list, "event", 1068 "event selector. use 'perf list' to list available events", 1069 parse_events_option), 1070 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1071 "event filter", parse_filter), 1072 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1073 "child tasks do not inherit counters"), 1074 OPT_STRING('p', "pid", &target.pid, "pid", 1075 "stat events on existing process id"), 1076 OPT_STRING('t', "tid", &target.tid, "tid", 1077 "stat events on existing thread id"), 1078 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1079 "system-wide collection from all CPUs"), 1080 OPT_BOOLEAN('g', "group", &group, 1081 "put the counters into a counter group"), 1082 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1083 "Use --no-scale to disable counter scaling for multiplexing"), 1084 OPT_INCR('v', "verbose", &verbose, 1085 "be more verbose (show counter open errors, etc)"), 1086 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1087 "repeat command and print average + stddev (max: 100, forever: 0)"), 1088 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1089 "display details about each run (only with -r option)"), 1090 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1091 "null run - dont start any counters"), 1092 OPT_INCR('d', "detailed", &detailed_run, 1093 "detailed run - start a lot of events"), 1094 OPT_BOOLEAN('S', "sync", &sync_run, 1095 "call sync() before starting a run"), 1096 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1097 "print large numbers with thousands\' separators", 1098 stat__set_big_num), 1099 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1100 "list of cpus to monitor in system-wide"), 1101 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1102 "disable CPU count aggregation", AGGR_NONE), 1103 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 1104 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1105 "print counts with custom separator"), 1106 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1107 "monitor event in cgroup name only", parse_cgroups), 1108 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1109 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1110 OPT_INTEGER(0, "log-fd", &output_fd, 1111 "log output to fd, instead of stderr"), 1112 OPT_STRING(0, "pre", &pre_cmd, "command", 1113 "command to run prior to the measured command"), 1114 OPT_STRING(0, "post", &post_cmd, "command", 1115 "command to run after to the measured command"), 1116 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1117 "print counts at regular interval in ms " 1118 "(overhead is possible for values <= 100ms)"), 1119 OPT_INTEGER(0, "interval-count", &stat_config.times, 1120 "print counts for fixed number of times"), 1121 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1122 "clear screen in between new interval"), 1123 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1124 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1125 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1126 "aggregate counts per processor socket", AGGR_SOCKET), 1127 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1128 "aggregate counts per processor die", AGGR_DIE), 1129 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1130 "aggregate counts per physical processor core", AGGR_CORE), 1131 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1132 "aggregate counts per thread", AGGR_THREAD), 1133 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1134 "aggregate counts per numa node", AGGR_NODE), 1135 OPT_INTEGER('D', "delay", &stat_config.initial_delay, 1136 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1137 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1138 "Only print computed metrics. No raw values", enable_metric_only), 1139 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1140 "don't group metric events, impacts multiplexing"), 1141 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1142 "don't try to share events between metrics in a group"), 1143 OPT_BOOLEAN(0, "topdown", &topdown_run, 1144 "measure topdown level 1 statistics"), 1145 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1146 "measure SMI cost"), 1147 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1148 "monitor specified metrics or metric groups (separated by ,)", 1149 parse_metric_groups), 1150 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1151 "Configure all used events to run in kernel space.", 1152 PARSE_OPT_EXCLUSIVE), 1153 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1154 "Configure all used events to run in user space.", 1155 PARSE_OPT_EXCLUSIVE), 1156 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1157 "Use with 'percore' event qualifier to show the event " 1158 "counts of one hardware thread by sum up total hardware " 1159 "threads of same physical core"), 1160 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1161 "print summary for interval mode"), 1162 #ifdef HAVE_LIBPFM 1163 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1164 "libpfm4 event selector. use 'perf list' to list available events", 1165 parse_libpfm_events_option), 1166 #endif 1167 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1168 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1169 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1170 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1171 parse_control_option), 1172 OPT_END() 1173 }; 1174 1175 static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1176 struct perf_cpu_map *map, int cpu) 1177 { 1178 return cpu_map__get_socket(map, cpu, NULL); 1179 } 1180 1181 static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1182 struct perf_cpu_map *map, int cpu) 1183 { 1184 return cpu_map__get_die(map, cpu, NULL); 1185 } 1186 1187 static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1188 struct perf_cpu_map *map, int cpu) 1189 { 1190 return cpu_map__get_core(map, cpu, NULL); 1191 } 1192 1193 static int perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1194 struct perf_cpu_map *map, int cpu) 1195 { 1196 return cpu_map__get_node(map, cpu, NULL); 1197 } 1198 1199 static int perf_stat__get_aggr(struct perf_stat_config *config, 1200 aggr_get_id_t get_id, struct perf_cpu_map *map, int idx) 1201 { 1202 int cpu; 1203 1204 if (idx >= map->nr) 1205 return -1; 1206 1207 cpu = map->map[idx]; 1208 1209 if (config->cpus_aggr_map->map[cpu] == -1) 1210 config->cpus_aggr_map->map[cpu] = get_id(config, map, idx); 1211 1212 return config->cpus_aggr_map->map[cpu]; 1213 } 1214 1215 static int perf_stat__get_socket_cached(struct perf_stat_config *config, 1216 struct perf_cpu_map *map, int idx) 1217 { 1218 return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx); 1219 } 1220 1221 static int perf_stat__get_die_cached(struct perf_stat_config *config, 1222 struct perf_cpu_map *map, int idx) 1223 { 1224 return perf_stat__get_aggr(config, perf_stat__get_die, map, idx); 1225 } 1226 1227 static int perf_stat__get_core_cached(struct perf_stat_config *config, 1228 struct perf_cpu_map *map, int idx) 1229 { 1230 return perf_stat__get_aggr(config, perf_stat__get_core, map, idx); 1231 } 1232 1233 static int perf_stat__get_node_cached(struct perf_stat_config *config, 1234 struct perf_cpu_map *map, int idx) 1235 { 1236 return perf_stat__get_aggr(config, perf_stat__get_node, map, idx); 1237 } 1238 1239 static bool term_percore_set(void) 1240 { 1241 struct evsel *counter; 1242 1243 evlist__for_each_entry(evsel_list, counter) { 1244 if (counter->percore) 1245 return true; 1246 } 1247 1248 return false; 1249 } 1250 1251 static int perf_stat_init_aggr_mode(void) 1252 { 1253 int nr; 1254 1255 switch (stat_config.aggr_mode) { 1256 case AGGR_SOCKET: 1257 if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1258 perror("cannot build socket map"); 1259 return -1; 1260 } 1261 stat_config.aggr_get_id = perf_stat__get_socket_cached; 1262 break; 1263 case AGGR_DIE: 1264 if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1265 perror("cannot build die map"); 1266 return -1; 1267 } 1268 stat_config.aggr_get_id = perf_stat__get_die_cached; 1269 break; 1270 case AGGR_CORE: 1271 if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1272 perror("cannot build core map"); 1273 return -1; 1274 } 1275 stat_config.aggr_get_id = perf_stat__get_core_cached; 1276 break; 1277 case AGGR_NODE: 1278 if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1279 perror("cannot build core map"); 1280 return -1; 1281 } 1282 stat_config.aggr_get_id = perf_stat__get_node_cached; 1283 break; 1284 case AGGR_NONE: 1285 if (term_percore_set()) { 1286 if (cpu_map__build_core_map(evsel_list->core.cpus, 1287 &stat_config.aggr_map)) { 1288 perror("cannot build core map"); 1289 return -1; 1290 } 1291 stat_config.aggr_get_id = perf_stat__get_core_cached; 1292 } 1293 break; 1294 case AGGR_GLOBAL: 1295 case AGGR_THREAD: 1296 case AGGR_UNSET: 1297 default: 1298 break; 1299 } 1300 1301 /* 1302 * The evsel_list->cpus is the base we operate on, 1303 * taking the highest cpu number to be the size of 1304 * the aggregation translate cpumap. 1305 */ 1306 nr = perf_cpu_map__max(evsel_list->core.cpus); 1307 stat_config.cpus_aggr_map = perf_cpu_map__empty_new(nr + 1); 1308 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1309 } 1310 1311 static void perf_stat__exit_aggr_mode(void) 1312 { 1313 perf_cpu_map__put(stat_config.aggr_map); 1314 perf_cpu_map__put(stat_config.cpus_aggr_map); 1315 stat_config.aggr_map = NULL; 1316 stat_config.cpus_aggr_map = NULL; 1317 } 1318 1319 static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx) 1320 { 1321 int cpu; 1322 1323 if (idx > map->nr) 1324 return -1; 1325 1326 cpu = map->map[idx]; 1327 1328 if (cpu >= env->nr_cpus_avail) 1329 return -1; 1330 1331 return cpu; 1332 } 1333 1334 static int perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data) 1335 { 1336 struct perf_env *env = data; 1337 int cpu = perf_env__get_cpu(env, map, idx); 1338 1339 return cpu == -1 ? -1 : env->cpu[cpu].socket_id; 1340 } 1341 1342 static int perf_env__get_die(struct perf_cpu_map *map, int idx, void *data) 1343 { 1344 struct perf_env *env = data; 1345 int die_id = -1, cpu = perf_env__get_cpu(env, map, idx); 1346 1347 if (cpu != -1) { 1348 /* 1349 * Encode socket in bit range 15:8 1350 * die_id is relative to socket, 1351 * we need a global id. So we combine 1352 * socket + die id 1353 */ 1354 if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n")) 1355 return -1; 1356 1357 if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n")) 1358 return -1; 1359 1360 die_id = (env->cpu[cpu].socket_id << 8) | (env->cpu[cpu].die_id & 0xff); 1361 } 1362 1363 return die_id; 1364 } 1365 1366 static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data) 1367 { 1368 struct perf_env *env = data; 1369 int core = -1, cpu = perf_env__get_cpu(env, map, idx); 1370 1371 if (cpu != -1) { 1372 /* 1373 * Encode socket in bit range 31:24 1374 * encode die id in bit range 23:16 1375 * core_id is relative to socket and die, 1376 * we need a global id. So we combine 1377 * socket + die id + core id 1378 */ 1379 if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n")) 1380 return -1; 1381 1382 if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n")) 1383 return -1; 1384 1385 if (WARN_ONCE(env->cpu[cpu].core_id >> 16, "The core id number is too big.\n")) 1386 return -1; 1387 1388 core = (env->cpu[cpu].socket_id << 24) | 1389 (env->cpu[cpu].die_id << 16) | 1390 (env->cpu[cpu].core_id & 0xffff); 1391 } 1392 1393 return core; 1394 } 1395 1396 static int perf_env__get_node(struct perf_cpu_map *map, int idx, void *data) 1397 { 1398 int cpu = perf_env__get_cpu(data, map, idx); 1399 1400 return perf_env__numa_node(data, cpu); 1401 } 1402 1403 static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus, 1404 struct perf_cpu_map **sockp) 1405 { 1406 return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env); 1407 } 1408 1409 static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus, 1410 struct perf_cpu_map **diep) 1411 { 1412 return cpu_map__build_map(cpus, diep, perf_env__get_die, env); 1413 } 1414 1415 static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus, 1416 struct perf_cpu_map **corep) 1417 { 1418 return cpu_map__build_map(cpus, corep, perf_env__get_core, env); 1419 } 1420 1421 static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus, 1422 struct perf_cpu_map **nodep) 1423 { 1424 return cpu_map__build_map(cpus, nodep, perf_env__get_node, env); 1425 } 1426 1427 static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1428 struct perf_cpu_map *map, int idx) 1429 { 1430 return perf_env__get_socket(map, idx, &perf_stat.session->header.env); 1431 } 1432 static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1433 struct perf_cpu_map *map, int idx) 1434 { 1435 return perf_env__get_die(map, idx, &perf_stat.session->header.env); 1436 } 1437 1438 static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1439 struct perf_cpu_map *map, int idx) 1440 { 1441 return perf_env__get_core(map, idx, &perf_stat.session->header.env); 1442 } 1443 1444 static int perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1445 struct perf_cpu_map *map, int idx) 1446 { 1447 return perf_env__get_node(map, idx, &perf_stat.session->header.env); 1448 } 1449 1450 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1451 { 1452 struct perf_env *env = &st->session->header.env; 1453 1454 switch (stat_config.aggr_mode) { 1455 case AGGR_SOCKET: 1456 if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1457 perror("cannot build socket map"); 1458 return -1; 1459 } 1460 stat_config.aggr_get_id = perf_stat__get_socket_file; 1461 break; 1462 case AGGR_DIE: 1463 if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1464 perror("cannot build die map"); 1465 return -1; 1466 } 1467 stat_config.aggr_get_id = perf_stat__get_die_file; 1468 break; 1469 case AGGR_CORE: 1470 if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1471 perror("cannot build core map"); 1472 return -1; 1473 } 1474 stat_config.aggr_get_id = perf_stat__get_core_file; 1475 break; 1476 case AGGR_NODE: 1477 if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1478 perror("cannot build core map"); 1479 return -1; 1480 } 1481 stat_config.aggr_get_id = perf_stat__get_node_file; 1482 break; 1483 case AGGR_NONE: 1484 case AGGR_GLOBAL: 1485 case AGGR_THREAD: 1486 case AGGR_UNSET: 1487 default: 1488 break; 1489 } 1490 1491 return 0; 1492 } 1493 1494 static int topdown_filter_events(const char **attr, char **str, bool use_group) 1495 { 1496 int off = 0; 1497 int i; 1498 int len = 0; 1499 char *s; 1500 1501 for (i = 0; attr[i]; i++) { 1502 if (pmu_have_event("cpu", attr[i])) { 1503 len += strlen(attr[i]) + 1; 1504 attr[i - off] = attr[i]; 1505 } else 1506 off++; 1507 } 1508 attr[i - off] = NULL; 1509 1510 *str = malloc(len + 1 + 2); 1511 if (!*str) 1512 return -1; 1513 s = *str; 1514 if (i - off == 0) { 1515 *s = 0; 1516 return 0; 1517 } 1518 if (use_group) 1519 *s++ = '{'; 1520 for (i = 0; attr[i]; i++) { 1521 strcpy(s, attr[i]); 1522 s += strlen(s); 1523 *s++ = ','; 1524 } 1525 if (use_group) { 1526 s[-1] = '}'; 1527 *s = 0; 1528 } else 1529 s[-1] = 0; 1530 return 0; 1531 } 1532 1533 __weak bool arch_topdown_check_group(bool *warn) 1534 { 1535 *warn = false; 1536 return false; 1537 } 1538 1539 __weak void arch_topdown_group_warn(void) 1540 { 1541 } 1542 1543 /* 1544 * Add default attributes, if there were no attributes specified or 1545 * if -d/--detailed, -d -d or -d -d -d is used: 1546 */ 1547 static int add_default_attributes(void) 1548 { 1549 int err; 1550 struct perf_event_attr default_attrs0[] = { 1551 1552 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1553 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1554 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1555 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1556 1557 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1558 }; 1559 struct perf_event_attr frontend_attrs[] = { 1560 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1561 }; 1562 struct perf_event_attr backend_attrs[] = { 1563 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1564 }; 1565 struct perf_event_attr default_attrs1[] = { 1566 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1567 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1568 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1569 1570 }; 1571 1572 /* 1573 * Detailed stats (-d), covering the L1 and last level data caches: 1574 */ 1575 struct perf_event_attr detailed_attrs[] = { 1576 1577 { .type = PERF_TYPE_HW_CACHE, 1578 .config = 1579 PERF_COUNT_HW_CACHE_L1D << 0 | 1580 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1581 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1582 1583 { .type = PERF_TYPE_HW_CACHE, 1584 .config = 1585 PERF_COUNT_HW_CACHE_L1D << 0 | 1586 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1587 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1588 1589 { .type = PERF_TYPE_HW_CACHE, 1590 .config = 1591 PERF_COUNT_HW_CACHE_LL << 0 | 1592 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1593 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1594 1595 { .type = PERF_TYPE_HW_CACHE, 1596 .config = 1597 PERF_COUNT_HW_CACHE_LL << 0 | 1598 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1599 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1600 }; 1601 1602 /* 1603 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1604 */ 1605 struct perf_event_attr very_detailed_attrs[] = { 1606 1607 { .type = PERF_TYPE_HW_CACHE, 1608 .config = 1609 PERF_COUNT_HW_CACHE_L1I << 0 | 1610 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1611 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1612 1613 { .type = PERF_TYPE_HW_CACHE, 1614 .config = 1615 PERF_COUNT_HW_CACHE_L1I << 0 | 1616 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1617 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1618 1619 { .type = PERF_TYPE_HW_CACHE, 1620 .config = 1621 PERF_COUNT_HW_CACHE_DTLB << 0 | 1622 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1623 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1624 1625 { .type = PERF_TYPE_HW_CACHE, 1626 .config = 1627 PERF_COUNT_HW_CACHE_DTLB << 0 | 1628 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1629 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1630 1631 { .type = PERF_TYPE_HW_CACHE, 1632 .config = 1633 PERF_COUNT_HW_CACHE_ITLB << 0 | 1634 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1635 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1636 1637 { .type = PERF_TYPE_HW_CACHE, 1638 .config = 1639 PERF_COUNT_HW_CACHE_ITLB << 0 | 1640 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1641 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1642 1643 }; 1644 1645 /* 1646 * Very, very detailed stats (-d -d -d), adding prefetch events: 1647 */ 1648 struct perf_event_attr very_very_detailed_attrs[] = { 1649 1650 { .type = PERF_TYPE_HW_CACHE, 1651 .config = 1652 PERF_COUNT_HW_CACHE_L1D << 0 | 1653 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1654 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1655 1656 { .type = PERF_TYPE_HW_CACHE, 1657 .config = 1658 PERF_COUNT_HW_CACHE_L1D << 0 | 1659 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1660 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1661 }; 1662 struct parse_events_error errinfo; 1663 1664 /* Set attrs if no event is selected and !null_run: */ 1665 if (stat_config.null_run) 1666 return 0; 1667 1668 bzero(&errinfo, sizeof(errinfo)); 1669 if (transaction_run) { 1670 /* Handle -T as -M transaction. Once platform specific metrics 1671 * support has been added to the json files, all archictures 1672 * will use this approach. To determine transaction support 1673 * on an architecture test for such a metric name. 1674 */ 1675 if (metricgroup__has_metric("transaction")) { 1676 struct option opt = { .value = &evsel_list }; 1677 1678 return metricgroup__parse_groups(&opt, "transaction", 1679 stat_config.metric_no_group, 1680 stat_config.metric_no_merge, 1681 &stat_config.metric_events); 1682 } 1683 1684 if (pmu_have_event("cpu", "cycles-ct") && 1685 pmu_have_event("cpu", "el-start")) 1686 err = parse_events(evsel_list, transaction_attrs, 1687 &errinfo); 1688 else 1689 err = parse_events(evsel_list, 1690 transaction_limited_attrs, 1691 &errinfo); 1692 if (err) { 1693 fprintf(stderr, "Cannot set up transaction events\n"); 1694 parse_events_print_error(&errinfo, transaction_attrs); 1695 return -1; 1696 } 1697 return 0; 1698 } 1699 1700 if (smi_cost) { 1701 int smi; 1702 1703 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1704 fprintf(stderr, "freeze_on_smi is not supported.\n"); 1705 return -1; 1706 } 1707 1708 if (!smi) { 1709 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1710 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1711 return -1; 1712 } 1713 smi_reset = true; 1714 } 1715 1716 if (pmu_have_event("msr", "aperf") && 1717 pmu_have_event("msr", "smi")) { 1718 if (!force_metric_only) 1719 stat_config.metric_only = true; 1720 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1721 } else { 1722 fprintf(stderr, "To measure SMI cost, it needs " 1723 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); 1724 parse_events_print_error(&errinfo, smi_cost_attrs); 1725 return -1; 1726 } 1727 if (err) { 1728 parse_events_print_error(&errinfo, smi_cost_attrs); 1729 fprintf(stderr, "Cannot set up SMI cost events\n"); 1730 return -1; 1731 } 1732 return 0; 1733 } 1734 1735 if (topdown_run) { 1736 char *str = NULL; 1737 bool warn = false; 1738 1739 if (stat_config.aggr_mode != AGGR_GLOBAL && 1740 stat_config.aggr_mode != AGGR_CORE) { 1741 pr_err("top down event configuration requires --per-core mode\n"); 1742 return -1; 1743 } 1744 stat_config.aggr_mode = AGGR_CORE; 1745 if (nr_cgroups || !target__has_cpu(&target)) { 1746 pr_err("top down event configuration requires system-wide mode (-a)\n"); 1747 return -1; 1748 } 1749 1750 if (!force_metric_only) 1751 stat_config.metric_only = true; 1752 if (topdown_filter_events(topdown_attrs, &str, 1753 arch_topdown_check_group(&warn)) < 0) { 1754 pr_err("Out of memory\n"); 1755 return -1; 1756 } 1757 if (topdown_attrs[0] && str) { 1758 if (warn) 1759 arch_topdown_group_warn(); 1760 err = parse_events(evsel_list, str, &errinfo); 1761 if (err) { 1762 fprintf(stderr, 1763 "Cannot set up top down events %s: %d\n", 1764 str, err); 1765 parse_events_print_error(&errinfo, str); 1766 free(str); 1767 return -1; 1768 } 1769 } else { 1770 fprintf(stderr, "System does not support topdown\n"); 1771 return -1; 1772 } 1773 free(str); 1774 } 1775 1776 if (!evsel_list->core.nr_entries) { 1777 if (target__has_cpu(&target)) 1778 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1779 1780 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1781 return -1; 1782 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1783 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 1784 return -1; 1785 } 1786 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 1787 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 1788 return -1; 1789 } 1790 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 1791 return -1; 1792 } 1793 1794 /* Detailed events get appended to the event list: */ 1795 1796 if (detailed_run < 1) 1797 return 0; 1798 1799 /* Append detailed run extra attributes: */ 1800 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1801 return -1; 1802 1803 if (detailed_run < 2) 1804 return 0; 1805 1806 /* Append very detailed run extra attributes: */ 1807 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1808 return -1; 1809 1810 if (detailed_run < 3) 1811 return 0; 1812 1813 /* Append very, very detailed run extra attributes: */ 1814 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1815 } 1816 1817 static const char * const stat_record_usage[] = { 1818 "perf stat record [<options>]", 1819 NULL, 1820 }; 1821 1822 static void init_features(struct perf_session *session) 1823 { 1824 int feat; 1825 1826 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 1827 perf_header__set_feat(&session->header, feat); 1828 1829 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 1830 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 1831 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 1832 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 1833 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 1834 } 1835 1836 static int __cmd_record(int argc, const char **argv) 1837 { 1838 struct perf_session *session; 1839 struct perf_data *data = &perf_stat.data; 1840 1841 argc = parse_options(argc, argv, stat_options, stat_record_usage, 1842 PARSE_OPT_STOP_AT_NON_OPTION); 1843 1844 if (output_name) 1845 data->path = output_name; 1846 1847 if (stat_config.run_count != 1 || forever) { 1848 pr_err("Cannot use -r option with perf stat record.\n"); 1849 return -1; 1850 } 1851 1852 session = perf_session__new(data, false, NULL); 1853 if (IS_ERR(session)) { 1854 pr_err("Perf session creation failed\n"); 1855 return PTR_ERR(session); 1856 } 1857 1858 init_features(session); 1859 1860 session->evlist = evsel_list; 1861 perf_stat.session = session; 1862 perf_stat.record = true; 1863 return argc; 1864 } 1865 1866 static int process_stat_round_event(struct perf_session *session, 1867 union perf_event *event) 1868 { 1869 struct perf_record_stat_round *stat_round = &event->stat_round; 1870 struct evsel *counter; 1871 struct timespec tsh, *ts = NULL; 1872 const char **argv = session->header.env.cmdline_argv; 1873 int argc = session->header.env.nr_cmdline; 1874 1875 evlist__for_each_entry(evsel_list, counter) 1876 perf_stat_process_counter(&stat_config, counter); 1877 1878 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 1879 update_stats(&walltime_nsecs_stats, stat_round->time); 1880 1881 if (stat_config.interval && stat_round->time) { 1882 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 1883 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 1884 ts = &tsh; 1885 } 1886 1887 print_counters(ts, argc, argv); 1888 return 0; 1889 } 1890 1891 static 1892 int process_stat_config_event(struct perf_session *session, 1893 union perf_event *event) 1894 { 1895 struct perf_tool *tool = session->tool; 1896 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 1897 1898 perf_event__read_stat_config(&stat_config, &event->stat_config); 1899 1900 if (perf_cpu_map__empty(st->cpus)) { 1901 if (st->aggr_mode != AGGR_UNSET) 1902 pr_warning("warning: processing task data, aggregation mode not set\n"); 1903 return 0; 1904 } 1905 1906 if (st->aggr_mode != AGGR_UNSET) 1907 stat_config.aggr_mode = st->aggr_mode; 1908 1909 if (perf_stat.data.is_pipe) 1910 perf_stat_init_aggr_mode(); 1911 else 1912 perf_stat_init_aggr_mode_file(st); 1913 1914 return 0; 1915 } 1916 1917 static int set_maps(struct perf_stat *st) 1918 { 1919 if (!st->cpus || !st->threads) 1920 return 0; 1921 1922 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 1923 return -EINVAL; 1924 1925 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 1926 1927 if (perf_evlist__alloc_stats(evsel_list, true)) 1928 return -ENOMEM; 1929 1930 st->maps_allocated = true; 1931 return 0; 1932 } 1933 1934 static 1935 int process_thread_map_event(struct perf_session *session, 1936 union perf_event *event) 1937 { 1938 struct perf_tool *tool = session->tool; 1939 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 1940 1941 if (st->threads) { 1942 pr_warning("Extra thread map event, ignoring.\n"); 1943 return 0; 1944 } 1945 1946 st->threads = thread_map__new_event(&event->thread_map); 1947 if (!st->threads) 1948 return -ENOMEM; 1949 1950 return set_maps(st); 1951 } 1952 1953 static 1954 int process_cpu_map_event(struct perf_session *session, 1955 union perf_event *event) 1956 { 1957 struct perf_tool *tool = session->tool; 1958 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 1959 struct perf_cpu_map *cpus; 1960 1961 if (st->cpus) { 1962 pr_warning("Extra cpu map event, ignoring.\n"); 1963 return 0; 1964 } 1965 1966 cpus = cpu_map__new_data(&event->cpu_map.data); 1967 if (!cpus) 1968 return -ENOMEM; 1969 1970 st->cpus = cpus; 1971 return set_maps(st); 1972 } 1973 1974 static const char * const stat_report_usage[] = { 1975 "perf stat report [<options>]", 1976 NULL, 1977 }; 1978 1979 static struct perf_stat perf_stat = { 1980 .tool = { 1981 .attr = perf_event__process_attr, 1982 .event_update = perf_event__process_event_update, 1983 .thread_map = process_thread_map_event, 1984 .cpu_map = process_cpu_map_event, 1985 .stat_config = process_stat_config_event, 1986 .stat = perf_event__process_stat_event, 1987 .stat_round = process_stat_round_event, 1988 }, 1989 .aggr_mode = AGGR_UNSET, 1990 }; 1991 1992 static int __cmd_report(int argc, const char **argv) 1993 { 1994 struct perf_session *session; 1995 const struct option options[] = { 1996 OPT_STRING('i', "input", &input_name, "file", "input file name"), 1997 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 1998 "aggregate counts per processor socket", AGGR_SOCKET), 1999 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2000 "aggregate counts per processor die", AGGR_DIE), 2001 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2002 "aggregate counts per physical processor core", AGGR_CORE), 2003 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2004 "aggregate counts per numa node", AGGR_NODE), 2005 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2006 "disable CPU count aggregation", AGGR_NONE), 2007 OPT_END() 2008 }; 2009 struct stat st; 2010 int ret; 2011 2012 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2013 2014 if (!input_name || !strlen(input_name)) { 2015 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2016 input_name = "-"; 2017 else 2018 input_name = "perf.data"; 2019 } 2020 2021 perf_stat.data.path = input_name; 2022 perf_stat.data.mode = PERF_DATA_MODE_READ; 2023 2024 session = perf_session__new(&perf_stat.data, false, &perf_stat.tool); 2025 if (IS_ERR(session)) 2026 return PTR_ERR(session); 2027 2028 perf_stat.session = session; 2029 stat_config.output = stderr; 2030 evsel_list = session->evlist; 2031 2032 ret = perf_session__process_events(session); 2033 if (ret) 2034 return ret; 2035 2036 perf_session__delete(session); 2037 return 0; 2038 } 2039 2040 static void setup_system_wide(int forks) 2041 { 2042 /* 2043 * Make system wide (-a) the default target if 2044 * no target was specified and one of following 2045 * conditions is met: 2046 * 2047 * - there's no workload specified 2048 * - there is workload specified but all requested 2049 * events are system wide events 2050 */ 2051 if (!target__none(&target)) 2052 return; 2053 2054 if (!forks) 2055 target.system_wide = true; 2056 else { 2057 struct evsel *counter; 2058 2059 evlist__for_each_entry(evsel_list, counter) { 2060 if (!counter->core.system_wide) 2061 return; 2062 } 2063 2064 if (evsel_list->core.nr_entries) 2065 target.system_wide = true; 2066 } 2067 } 2068 2069 int cmd_stat(int argc, const char **argv) 2070 { 2071 const char * const stat_usage[] = { 2072 "perf stat [<options>] [<command>]", 2073 NULL 2074 }; 2075 int status = -EINVAL, run_idx; 2076 const char *mode; 2077 FILE *output = stderr; 2078 unsigned int interval, timeout; 2079 const char * const stat_subcommands[] = { "record", "report" }; 2080 2081 setlocale(LC_ALL, ""); 2082 2083 evsel_list = evlist__new(); 2084 if (evsel_list == NULL) 2085 return -ENOMEM; 2086 2087 parse_events__shrink_config_terms(); 2088 2089 /* String-parsing callback-based options would segfault when negated */ 2090 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2091 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2092 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2093 2094 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2095 (const char **) stat_usage, 2096 PARSE_OPT_STOP_AT_NON_OPTION); 2097 perf_stat__collect_metric_expr(evsel_list); 2098 perf_stat__init_shadow_stats(); 2099 2100 if (stat_config.csv_sep) { 2101 stat_config.csv_output = true; 2102 if (!strcmp(stat_config.csv_sep, "\\t")) 2103 stat_config.csv_sep = "\t"; 2104 } else 2105 stat_config.csv_sep = DEFAULT_SEPARATOR; 2106 2107 if (argc && !strncmp(argv[0], "rec", 3)) { 2108 argc = __cmd_record(argc, argv); 2109 if (argc < 0) 2110 return -1; 2111 } else if (argc && !strncmp(argv[0], "rep", 3)) 2112 return __cmd_report(argc, argv); 2113 2114 interval = stat_config.interval; 2115 timeout = stat_config.timeout; 2116 2117 /* 2118 * For record command the -o is already taken care of. 2119 */ 2120 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2121 output = NULL; 2122 2123 if (output_name && output_fd) { 2124 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2125 parse_options_usage(stat_usage, stat_options, "o", 1); 2126 parse_options_usage(NULL, stat_options, "log-fd", 0); 2127 goto out; 2128 } 2129 2130 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2131 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2132 goto out; 2133 } 2134 2135 if (stat_config.metric_only && stat_config.run_count > 1) { 2136 fprintf(stderr, "--metric-only is not supported with -r\n"); 2137 goto out; 2138 } 2139 2140 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2141 fprintf(stderr, "--table is only supported with -r\n"); 2142 parse_options_usage(stat_usage, stat_options, "r", 1); 2143 parse_options_usage(NULL, stat_options, "table", 0); 2144 goto out; 2145 } 2146 2147 if (output_fd < 0) { 2148 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2149 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2150 goto out; 2151 } 2152 2153 if (!output) { 2154 struct timespec tm; 2155 mode = append_file ? "a" : "w"; 2156 2157 output = fopen(output_name, mode); 2158 if (!output) { 2159 perror("failed to create output file"); 2160 return -1; 2161 } 2162 clock_gettime(CLOCK_REALTIME, &tm); 2163 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2164 } else if (output_fd > 0) { 2165 mode = append_file ? "a" : "w"; 2166 output = fdopen(output_fd, mode); 2167 if (!output) { 2168 perror("Failed opening logfd"); 2169 return -errno; 2170 } 2171 } 2172 2173 stat_config.output = output; 2174 2175 /* 2176 * let the spreadsheet do the pretty-printing 2177 */ 2178 if (stat_config.csv_output) { 2179 /* User explicitly passed -B? */ 2180 if (big_num_opt == 1) { 2181 fprintf(stderr, "-B option not supported with -x\n"); 2182 parse_options_usage(stat_usage, stat_options, "B", 1); 2183 parse_options_usage(NULL, stat_options, "x", 1); 2184 goto out; 2185 } else /* Nope, so disable big number formatting */ 2186 stat_config.big_num = false; 2187 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2188 stat_config.big_num = false; 2189 2190 setup_system_wide(argc); 2191 2192 /* 2193 * Display user/system times only for single 2194 * run and when there's specified tracee. 2195 */ 2196 if ((stat_config.run_count == 1) && target__none(&target)) 2197 stat_config.ru_display = true; 2198 2199 if (stat_config.run_count < 0) { 2200 pr_err("Run count must be a positive number\n"); 2201 parse_options_usage(stat_usage, stat_options, "r", 1); 2202 goto out; 2203 } else if (stat_config.run_count == 0) { 2204 forever = true; 2205 stat_config.run_count = 1; 2206 } 2207 2208 if (stat_config.walltime_run_table) { 2209 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2210 if (!stat_config.walltime_run) { 2211 pr_err("failed to setup -r option"); 2212 goto out; 2213 } 2214 } 2215 2216 if ((stat_config.aggr_mode == AGGR_THREAD) && 2217 !target__has_task(&target)) { 2218 if (!target.system_wide || target.cpu_list) { 2219 fprintf(stderr, "The --per-thread option is only " 2220 "available when monitoring via -p -t -a " 2221 "options or only --per-thread.\n"); 2222 parse_options_usage(NULL, stat_options, "p", 1); 2223 parse_options_usage(NULL, stat_options, "t", 1); 2224 goto out; 2225 } 2226 } 2227 2228 /* 2229 * no_aggr, cgroup are for system-wide only 2230 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2231 */ 2232 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2233 stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) && 2234 !target__has_cpu(&target)) { 2235 fprintf(stderr, "both cgroup and no-aggregation " 2236 "modes only available in system-wide mode\n"); 2237 2238 parse_options_usage(stat_usage, stat_options, "G", 1); 2239 parse_options_usage(NULL, stat_options, "A", 1); 2240 parse_options_usage(NULL, stat_options, "a", 1); 2241 goto out; 2242 } 2243 2244 if (add_default_attributes()) 2245 goto out; 2246 2247 target__validate(&target); 2248 2249 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2250 target.per_thread = true; 2251 2252 if (perf_evlist__create_maps(evsel_list, &target) < 0) { 2253 if (target__has_task(&target)) { 2254 pr_err("Problems finding threads of monitor\n"); 2255 parse_options_usage(stat_usage, stat_options, "p", 1); 2256 parse_options_usage(NULL, stat_options, "t", 1); 2257 } else if (target__has_cpu(&target)) { 2258 perror("failed to parse CPUs map"); 2259 parse_options_usage(stat_usage, stat_options, "C", 1); 2260 parse_options_usage(NULL, stat_options, "a", 1); 2261 } 2262 goto out; 2263 } 2264 2265 evlist__check_cpu_maps(evsel_list); 2266 2267 /* 2268 * Initialize thread_map with comm names, 2269 * so we could print it out on output. 2270 */ 2271 if (stat_config.aggr_mode == AGGR_THREAD) { 2272 thread_map__read_comms(evsel_list->core.threads); 2273 if (target.system_wide) { 2274 if (runtime_stat_new(&stat_config, 2275 perf_thread_map__nr(evsel_list->core.threads))) { 2276 goto out; 2277 } 2278 } 2279 } 2280 2281 if (stat_config.aggr_mode == AGGR_NODE) 2282 cpu__setup_cpunode_map(); 2283 2284 if (stat_config.times && interval) 2285 interval_count = true; 2286 else if (stat_config.times && !interval) { 2287 pr_err("interval-count option should be used together with " 2288 "interval-print.\n"); 2289 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2290 parse_options_usage(stat_usage, stat_options, "I", 1); 2291 goto out; 2292 } 2293 2294 if (timeout && timeout < 100) { 2295 if (timeout < 10) { 2296 pr_err("timeout must be >= 10ms.\n"); 2297 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2298 goto out; 2299 } else 2300 pr_warning("timeout < 100ms. " 2301 "The overhead percentage could be high in some cases. " 2302 "Please proceed with caution.\n"); 2303 } 2304 if (timeout && interval) { 2305 pr_err("timeout option is not supported with interval-print.\n"); 2306 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2307 parse_options_usage(stat_usage, stat_options, "I", 1); 2308 goto out; 2309 } 2310 2311 if (perf_evlist__alloc_stats(evsel_list, interval)) 2312 goto out; 2313 2314 if (perf_stat_init_aggr_mode()) 2315 goto out; 2316 2317 /* 2318 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2319 * while avoiding that older tools show confusing messages. 2320 * 2321 * However for pipe sessions we need to keep it zero, 2322 * because script's perf_evsel__check_attr is triggered 2323 * by attr->sample_type != 0, and we can't run it on 2324 * stat sessions. 2325 */ 2326 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2327 2328 /* 2329 * We dont want to block the signals - that would cause 2330 * child tasks to inherit that and Ctrl-C would not work. 2331 * What we want is for Ctrl-C to work in the exec()-ed 2332 * task, but being ignored by perf stat itself: 2333 */ 2334 atexit(sig_atexit); 2335 if (!forever) 2336 signal(SIGINT, skip_signal); 2337 signal(SIGCHLD, skip_signal); 2338 signal(SIGALRM, skip_signal); 2339 signal(SIGABRT, skip_signal); 2340 2341 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2342 goto out; 2343 2344 status = 0; 2345 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2346 if (stat_config.run_count != 1 && verbose > 0) 2347 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2348 run_idx + 1); 2349 2350 if (run_idx != 0) 2351 perf_evlist__reset_prev_raw_counts(evsel_list); 2352 2353 status = run_perf_stat(argc, argv, run_idx); 2354 if (forever && status != -1 && !interval) { 2355 print_counters(NULL, argc, argv); 2356 perf_stat__reset_stats(); 2357 } 2358 } 2359 2360 if (!forever && status != -1 && (!interval || stat_config.summary)) 2361 print_counters(NULL, argc, argv); 2362 2363 evlist__finalize_ctlfd(evsel_list); 2364 2365 if (STAT_RECORD) { 2366 /* 2367 * We synthesize the kernel mmap record just so that older tools 2368 * don't emit warnings about not being able to resolve symbols 2369 * due to /proc/sys/kernel/kptr_restrict settings and instear provide 2370 * a saner message about no samples being in the perf.data file. 2371 * 2372 * This also serves to suppress a warning about f_header.data.size == 0 2373 * in header.c at the moment 'perf stat record' gets introduced, which 2374 * is not really needed once we start adding the stat specific PERF_RECORD_ 2375 * records, but the need to suppress the kptr_restrict messages in older 2376 * tools remain -acme 2377 */ 2378 int fd = perf_data__fd(&perf_stat.data); 2379 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2380 process_synthesized_event, 2381 &perf_stat.session->machines.host); 2382 if (err) { 2383 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2384 "older tools may produce warnings about this file\n."); 2385 } 2386 2387 if (!interval) { 2388 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2389 pr_err("failed to write stat round event\n"); 2390 } 2391 2392 if (!perf_stat.data.is_pipe) { 2393 perf_stat.session->header.data_size += perf_stat.bytes_written; 2394 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2395 } 2396 2397 evlist__close(evsel_list); 2398 perf_session__delete(perf_stat.session); 2399 } 2400 2401 perf_stat__exit_aggr_mode(); 2402 perf_evlist__free_stats(evsel_list); 2403 out: 2404 zfree(&stat_config.walltime_run); 2405 2406 if (smi_cost && smi_reset) 2407 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2408 2409 evlist__delete(evsel_list); 2410 2411 metricgroup__rblist_exit(&stat_config.metric_events); 2412 runtime_stat_delete(&stat_config); 2413 close_control_option(&stat_config); 2414 2415 return status; 2416 } 2417