1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "util/cgroup.h" 45 #include <subcmd/parse-options.h> 46 #include "util/parse-events.h" 47 #include "util/pmus.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evlist-hybrid.h" 52 #include "util/evsel.h" 53 #include "util/debug.h" 54 #include "util/color.h" 55 #include "util/stat.h" 56 #include "util/header.h" 57 #include "util/cpumap.h" 58 #include "util/thread_map.h" 59 #include "util/counts.h" 60 #include "util/topdown.h" 61 #include "util/session.h" 62 #include "util/tool.h" 63 #include "util/string2.h" 64 #include "util/metricgroup.h" 65 #include "util/synthetic-events.h" 66 #include "util/target.h" 67 #include "util/time-utils.h" 68 #include "util/top.h" 69 #include "util/affinity.h" 70 #include "util/pfm.h" 71 #include "util/bpf_counter.h" 72 #include "util/iostat.h" 73 #include "util/util.h" 74 #include "asm/bug.h" 75 76 #include <linux/time64.h> 77 #include <linux/zalloc.h> 78 #include <api/fs/fs.h> 79 #include <errno.h> 80 #include <signal.h> 81 #include <stdlib.h> 82 #include <sys/prctl.h> 83 #include <inttypes.h> 84 #include <locale.h> 85 #include <math.h> 86 #include <sys/types.h> 87 #include <sys/stat.h> 88 #include <sys/wait.h> 89 #include <unistd.h> 90 #include <sys/time.h> 91 #include <sys/resource.h> 92 #include <linux/err.h> 93 94 #include <linux/ctype.h> 95 #include <perf/evlist.h> 96 #include <internal/threadmap.h> 97 98 #define DEFAULT_SEPARATOR " " 99 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 100 101 static void print_counters(struct timespec *ts, int argc, const char **argv); 102 103 static struct evlist *evsel_list; 104 static struct parse_events_option_args parse_events_option_args = { 105 .evlistp = &evsel_list, 106 }; 107 108 static bool all_counters_use_bpf = true; 109 110 static struct target target = { 111 .uid = UINT_MAX, 112 }; 113 114 #define METRIC_ONLY_LEN 20 115 116 static volatile sig_atomic_t child_pid = -1; 117 static int detailed_run = 0; 118 static bool transaction_run; 119 static bool topdown_run = false; 120 static bool smi_cost = false; 121 static bool smi_reset = false; 122 static int big_num_opt = -1; 123 static const char *pre_cmd = NULL; 124 static const char *post_cmd = NULL; 125 static bool sync_run = false; 126 static bool forever = false; 127 static bool force_metric_only = false; 128 static struct timespec ref_time; 129 static bool append_file; 130 static bool interval_count; 131 static const char *output_name; 132 static int output_fd; 133 static char *metrics; 134 135 struct perf_stat { 136 bool record; 137 struct perf_data data; 138 struct perf_session *session; 139 u64 bytes_written; 140 struct perf_tool tool; 141 bool maps_allocated; 142 struct perf_cpu_map *cpus; 143 struct perf_thread_map *threads; 144 enum aggr_mode aggr_mode; 145 }; 146 147 static struct perf_stat perf_stat; 148 #define STAT_RECORD perf_stat.record 149 150 static volatile sig_atomic_t done = 0; 151 152 static struct perf_stat_config stat_config = { 153 .aggr_mode = AGGR_GLOBAL, 154 .scale = true, 155 .unit_width = 4, /* strlen("unit") */ 156 .run_count = 1, 157 .metric_only_len = METRIC_ONLY_LEN, 158 .walltime_nsecs_stats = &walltime_nsecs_stats, 159 .ru_stats = &ru_stats, 160 .big_num = true, 161 .ctl_fd = -1, 162 .ctl_fd_ack = -1, 163 .iostat_run = false, 164 }; 165 166 static bool cpus_map_matched(struct evsel *a, struct evsel *b) 167 { 168 if (!a->core.cpus && !b->core.cpus) 169 return true; 170 171 if (!a->core.cpus || !b->core.cpus) 172 return false; 173 174 if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) 175 return false; 176 177 for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { 178 if (perf_cpu_map__cpu(a->core.cpus, i).cpu != 179 perf_cpu_map__cpu(b->core.cpus, i).cpu) 180 return false; 181 } 182 183 return true; 184 } 185 186 static void evlist__check_cpu_maps(struct evlist *evlist) 187 { 188 struct evsel *evsel, *warned_leader = NULL; 189 190 if (evlist__has_hybrid(evlist)) 191 evlist__warn_hybrid_group(evlist); 192 193 evlist__for_each_entry(evlist, evsel) { 194 struct evsel *leader = evsel__leader(evsel); 195 196 /* Check that leader matches cpus with each member. */ 197 if (leader == evsel) 198 continue; 199 if (cpus_map_matched(leader, evsel)) 200 continue; 201 202 /* If there's mismatch disable the group and warn user. */ 203 if (warned_leader != leader) { 204 char buf[200]; 205 206 pr_warning("WARNING: grouped events cpus do not match.\n" 207 "Events with CPUs not matching the leader will " 208 "be removed from the group.\n"); 209 evsel__group_desc(leader, buf, sizeof(buf)); 210 pr_warning(" %s\n", buf); 211 warned_leader = leader; 212 } 213 if (verbose > 0) { 214 char buf[200]; 215 216 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 217 pr_warning(" %s: %s\n", leader->name, buf); 218 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 219 pr_warning(" %s: %s\n", evsel->name, buf); 220 } 221 222 evsel__remove_from_group(evsel, leader); 223 } 224 } 225 226 static inline void diff_timespec(struct timespec *r, struct timespec *a, 227 struct timespec *b) 228 { 229 r->tv_sec = a->tv_sec - b->tv_sec; 230 if (a->tv_nsec < b->tv_nsec) { 231 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 232 r->tv_sec--; 233 } else { 234 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 235 } 236 } 237 238 static void perf_stat__reset_stats(void) 239 { 240 evlist__reset_stats(evsel_list); 241 perf_stat__reset_shadow_stats(); 242 } 243 244 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 245 union perf_event *event, 246 struct perf_sample *sample __maybe_unused, 247 struct machine *machine __maybe_unused) 248 { 249 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 250 pr_err("failed to write perf data, error: %m\n"); 251 return -1; 252 } 253 254 perf_stat.bytes_written += event->header.size; 255 return 0; 256 } 257 258 static int write_stat_round_event(u64 tm, u64 type) 259 { 260 return perf_event__synthesize_stat_round(NULL, tm, type, 261 process_synthesized_event, 262 NULL); 263 } 264 265 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 266 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 267 268 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 269 270 static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, 271 struct perf_counts_values *count) 272 { 273 struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); 274 struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); 275 276 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 277 process_synthesized_event, NULL); 278 } 279 280 static int read_single_counter(struct evsel *counter, int cpu_map_idx, 281 int thread, struct timespec *rs) 282 { 283 switch(counter->tool_event) { 284 case PERF_TOOL_DURATION_TIME: { 285 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 286 struct perf_counts_values *count = 287 perf_counts(counter->counts, cpu_map_idx, thread); 288 count->ena = count->run = val; 289 count->val = val; 290 return 0; 291 } 292 case PERF_TOOL_USER_TIME: 293 case PERF_TOOL_SYSTEM_TIME: { 294 u64 val; 295 struct perf_counts_values *count = 296 perf_counts(counter->counts, cpu_map_idx, thread); 297 if (counter->tool_event == PERF_TOOL_USER_TIME) 298 val = ru_stats.ru_utime_usec_stat.mean; 299 else 300 val = ru_stats.ru_stime_usec_stat.mean; 301 count->ena = count->run = val; 302 count->val = val; 303 return 0; 304 } 305 default: 306 case PERF_TOOL_NONE: 307 return evsel__read_counter(counter, cpu_map_idx, thread); 308 case PERF_TOOL_MAX: 309 /* This should never be reached */ 310 return 0; 311 } 312 } 313 314 /* 315 * Read out the results of a single counter: 316 * do not aggregate counts across CPUs in system-wide mode 317 */ 318 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) 319 { 320 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 321 int thread; 322 323 if (!counter->supported) 324 return -ENOENT; 325 326 for (thread = 0; thread < nthreads; thread++) { 327 struct perf_counts_values *count; 328 329 count = perf_counts(counter->counts, cpu_map_idx, thread); 330 331 /* 332 * The leader's group read loads data into its group members 333 * (via evsel__read_counter()) and sets their count->loaded. 334 */ 335 if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && 336 read_single_counter(counter, cpu_map_idx, thread, rs)) { 337 counter->counts->scaled = -1; 338 perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; 339 perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; 340 return -1; 341 } 342 343 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); 344 345 if (STAT_RECORD) { 346 if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { 347 pr_err("failed to write stat event\n"); 348 return -1; 349 } 350 } 351 352 if (verbose > 1) { 353 fprintf(stat_config.output, 354 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 355 evsel__name(counter), 356 perf_cpu_map__cpu(evsel__cpus(counter), 357 cpu_map_idx).cpu, 358 count->val, count->ena, count->run); 359 } 360 } 361 362 return 0; 363 } 364 365 static int read_affinity_counters(struct timespec *rs) 366 { 367 struct evlist_cpu_iterator evlist_cpu_itr; 368 struct affinity saved_affinity, *affinity; 369 370 if (all_counters_use_bpf) 371 return 0; 372 373 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 374 affinity = NULL; 375 else if (affinity__setup(&saved_affinity) < 0) 376 return -1; 377 else 378 affinity = &saved_affinity; 379 380 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 381 struct evsel *counter = evlist_cpu_itr.evsel; 382 383 if (evsel__is_bpf(counter)) 384 continue; 385 386 if (!counter->err) { 387 counter->err = read_counter_cpu(counter, rs, 388 evlist_cpu_itr.cpu_map_idx); 389 } 390 } 391 if (affinity) 392 affinity__cleanup(&saved_affinity); 393 394 return 0; 395 } 396 397 static int read_bpf_map_counters(void) 398 { 399 struct evsel *counter; 400 int err; 401 402 evlist__for_each_entry(evsel_list, counter) { 403 if (!evsel__is_bpf(counter)) 404 continue; 405 406 err = bpf_counter__read(counter); 407 if (err) 408 return err; 409 } 410 return 0; 411 } 412 413 static int read_counters(struct timespec *rs) 414 { 415 if (!stat_config.stop_read_counter) { 416 if (read_bpf_map_counters() || 417 read_affinity_counters(rs)) 418 return -1; 419 } 420 return 0; 421 } 422 423 static void process_counters(void) 424 { 425 struct evsel *counter; 426 427 evlist__for_each_entry(evsel_list, counter) { 428 if (counter->err) 429 pr_debug("failed to read counter %s\n", counter->name); 430 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 431 pr_warning("failed to process counter %s\n", counter->name); 432 counter->err = 0; 433 } 434 435 perf_stat_merge_counters(&stat_config, evsel_list); 436 perf_stat_process_percore(&stat_config, evsel_list); 437 } 438 439 static void process_interval(void) 440 { 441 struct timespec ts, rs; 442 443 clock_gettime(CLOCK_MONOTONIC, &ts); 444 diff_timespec(&rs, &ts, &ref_time); 445 446 evlist__reset_aggr_stats(evsel_list); 447 448 if (read_counters(&rs) == 0) 449 process_counters(); 450 451 if (STAT_RECORD) { 452 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 453 pr_err("failed to write stat round event\n"); 454 } 455 456 init_stats(&walltime_nsecs_stats); 457 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 458 print_counters(&rs, 0, NULL); 459 } 460 461 static bool handle_interval(unsigned int interval, int *times) 462 { 463 if (interval) { 464 process_interval(); 465 if (interval_count && !(--(*times))) 466 return true; 467 } 468 return false; 469 } 470 471 static int enable_counters(void) 472 { 473 struct evsel *evsel; 474 int err; 475 476 evlist__for_each_entry(evsel_list, evsel) { 477 if (!evsel__is_bpf(evsel)) 478 continue; 479 480 err = bpf_counter__enable(evsel); 481 if (err) 482 return err; 483 } 484 485 if (!target__enable_on_exec(&target)) { 486 if (!all_counters_use_bpf) 487 evlist__enable(evsel_list); 488 } 489 return 0; 490 } 491 492 static void disable_counters(void) 493 { 494 struct evsel *counter; 495 496 /* 497 * If we don't have tracee (attaching to task or cpu), counters may 498 * still be running. To get accurate group ratios, we must stop groups 499 * from counting before reading their constituent counters. 500 */ 501 if (!target__none(&target)) { 502 evlist__for_each_entry(evsel_list, counter) 503 bpf_counter__disable(counter); 504 if (!all_counters_use_bpf) 505 evlist__disable(evsel_list); 506 } 507 } 508 509 static volatile sig_atomic_t workload_exec_errno; 510 511 /* 512 * evlist__prepare_workload will send a SIGUSR1 513 * if the fork fails, since we asked by setting its 514 * want_signal to true. 515 */ 516 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 517 void *ucontext __maybe_unused) 518 { 519 workload_exec_errno = info->si_value.sival_int; 520 } 521 522 static bool evsel__should_store_id(struct evsel *counter) 523 { 524 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 525 } 526 527 static bool is_target_alive(struct target *_target, 528 struct perf_thread_map *threads) 529 { 530 struct stat st; 531 int i; 532 533 if (!target__has_task(_target)) 534 return true; 535 536 for (i = 0; i < threads->nr; i++) { 537 char path[PATH_MAX]; 538 539 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 540 threads->map[i].pid); 541 542 if (!stat(path, &st)) 543 return true; 544 } 545 546 return false; 547 } 548 549 static void process_evlist(struct evlist *evlist, unsigned int interval) 550 { 551 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 552 553 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 554 switch (cmd) { 555 case EVLIST_CTL_CMD_ENABLE: 556 fallthrough; 557 case EVLIST_CTL_CMD_DISABLE: 558 if (interval) 559 process_interval(); 560 break; 561 case EVLIST_CTL_CMD_SNAPSHOT: 562 case EVLIST_CTL_CMD_ACK: 563 case EVLIST_CTL_CMD_UNSUPPORTED: 564 case EVLIST_CTL_CMD_EVLIST: 565 case EVLIST_CTL_CMD_STOP: 566 case EVLIST_CTL_CMD_PING: 567 default: 568 break; 569 } 570 } 571 } 572 573 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 574 int *time_to_sleep) 575 { 576 int tts = *time_to_sleep; 577 struct timespec time_diff; 578 579 diff_timespec(&time_diff, time_stop, time_start); 580 581 tts -= time_diff.tv_sec * MSEC_PER_SEC + 582 time_diff.tv_nsec / NSEC_PER_MSEC; 583 584 if (tts < 0) 585 tts = 0; 586 587 *time_to_sleep = tts; 588 } 589 590 static int dispatch_events(bool forks, int timeout, int interval, int *times) 591 { 592 int child_exited = 0, status = 0; 593 int time_to_sleep, sleep_time; 594 struct timespec time_start, time_stop; 595 596 if (interval) 597 sleep_time = interval; 598 else if (timeout) 599 sleep_time = timeout; 600 else 601 sleep_time = 1000; 602 603 time_to_sleep = sleep_time; 604 605 while (!done) { 606 if (forks) 607 child_exited = waitpid(child_pid, &status, WNOHANG); 608 else 609 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 610 611 if (child_exited) 612 break; 613 614 clock_gettime(CLOCK_MONOTONIC, &time_start); 615 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 616 if (timeout || handle_interval(interval, times)) 617 break; 618 time_to_sleep = sleep_time; 619 } else { /* fd revent */ 620 process_evlist(evsel_list, interval); 621 clock_gettime(CLOCK_MONOTONIC, &time_stop); 622 compute_tts(&time_start, &time_stop, &time_to_sleep); 623 } 624 } 625 626 return status; 627 } 628 629 enum counter_recovery { 630 COUNTER_SKIP, 631 COUNTER_RETRY, 632 COUNTER_FATAL, 633 }; 634 635 static enum counter_recovery stat_handle_error(struct evsel *counter) 636 { 637 char msg[BUFSIZ]; 638 /* 639 * PPC returns ENXIO for HW counters until 2.6.37 640 * (behavior changed with commit b0a873e). 641 */ 642 if (errno == EINVAL || errno == ENOSYS || 643 errno == ENOENT || errno == EOPNOTSUPP || 644 errno == ENXIO) { 645 if (verbose > 0) 646 ui__warning("%s event is not supported by the kernel.\n", 647 evsel__name(counter)); 648 counter->supported = false; 649 /* 650 * errored is a sticky flag that means one of the counter's 651 * cpu event had a problem and needs to be reexamined. 652 */ 653 counter->errored = true; 654 655 if ((evsel__leader(counter) != counter) || 656 !(counter->core.leader->nr_members > 1)) 657 return COUNTER_SKIP; 658 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 659 if (verbose > 0) 660 ui__warning("%s\n", msg); 661 return COUNTER_RETRY; 662 } else if (target__has_per_thread(&target) && 663 evsel_list->core.threads && 664 evsel_list->core.threads->err_thread != -1) { 665 /* 666 * For global --per-thread case, skip current 667 * error thread. 668 */ 669 if (!thread_map__remove(evsel_list->core.threads, 670 evsel_list->core.threads->err_thread)) { 671 evsel_list->core.threads->err_thread = -1; 672 return COUNTER_RETRY; 673 } 674 } else if (counter->skippable) { 675 if (verbose > 0) 676 ui__warning("skipping event %s that kernel failed to open .\n", 677 evsel__name(counter)); 678 counter->supported = false; 679 counter->errored = true; 680 return COUNTER_SKIP; 681 } 682 683 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 684 ui__error("%s\n", msg); 685 686 if (child_pid != -1) 687 kill(child_pid, SIGTERM); 688 return COUNTER_FATAL; 689 } 690 691 static int __run_perf_stat(int argc, const char **argv, int run_idx) 692 { 693 int interval = stat_config.interval; 694 int times = stat_config.times; 695 int timeout = stat_config.timeout; 696 char msg[BUFSIZ]; 697 unsigned long long t0, t1; 698 struct evsel *counter; 699 size_t l; 700 int status = 0; 701 const bool forks = (argc > 0); 702 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 703 struct evlist_cpu_iterator evlist_cpu_itr; 704 struct affinity saved_affinity, *affinity = NULL; 705 int err; 706 bool second_pass = false; 707 708 if (forks) { 709 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 710 perror("failed to prepare workload"); 711 return -1; 712 } 713 child_pid = evsel_list->workload.pid; 714 } 715 716 if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { 717 if (affinity__setup(&saved_affinity) < 0) 718 return -1; 719 affinity = &saved_affinity; 720 } 721 722 evlist__for_each_entry(evsel_list, counter) { 723 counter->reset_group = false; 724 if (bpf_counter__load(counter, &target)) 725 return -1; 726 if (!(evsel__is_bperf(counter))) 727 all_counters_use_bpf = false; 728 } 729 730 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 731 counter = evlist_cpu_itr.evsel; 732 733 /* 734 * bperf calls evsel__open_per_cpu() in bperf__load(), so 735 * no need to call it again here. 736 */ 737 if (target.use_bpf) 738 break; 739 740 if (counter->reset_group || counter->errored) 741 continue; 742 if (evsel__is_bperf(counter)) 743 continue; 744 try_again: 745 if (create_perf_stat_counter(counter, &stat_config, &target, 746 evlist_cpu_itr.cpu_map_idx) < 0) { 747 748 /* 749 * Weak group failed. We cannot just undo this here 750 * because earlier CPUs might be in group mode, and the kernel 751 * doesn't support mixing group and non group reads. Defer 752 * it to later. 753 * Don't close here because we're in the wrong affinity. 754 */ 755 if ((errno == EINVAL || errno == EBADF) && 756 evsel__leader(counter) != counter && 757 counter->weak_group) { 758 evlist__reset_weak_group(evsel_list, counter, false); 759 assert(counter->reset_group); 760 second_pass = true; 761 continue; 762 } 763 764 switch (stat_handle_error(counter)) { 765 case COUNTER_FATAL: 766 return -1; 767 case COUNTER_RETRY: 768 goto try_again; 769 case COUNTER_SKIP: 770 continue; 771 default: 772 break; 773 } 774 775 } 776 counter->supported = true; 777 } 778 779 if (second_pass) { 780 /* 781 * Now redo all the weak group after closing them, 782 * and also close errored counters. 783 */ 784 785 /* First close errored or weak retry */ 786 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 787 counter = evlist_cpu_itr.evsel; 788 789 if (!counter->reset_group && !counter->errored) 790 continue; 791 792 perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); 793 } 794 /* Now reopen weak */ 795 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 796 counter = evlist_cpu_itr.evsel; 797 798 if (!counter->reset_group) 799 continue; 800 try_again_reset: 801 pr_debug2("reopening weak %s\n", evsel__name(counter)); 802 if (create_perf_stat_counter(counter, &stat_config, &target, 803 evlist_cpu_itr.cpu_map_idx) < 0) { 804 805 switch (stat_handle_error(counter)) { 806 case COUNTER_FATAL: 807 return -1; 808 case COUNTER_RETRY: 809 goto try_again_reset; 810 case COUNTER_SKIP: 811 continue; 812 default: 813 break; 814 } 815 } 816 counter->supported = true; 817 } 818 } 819 affinity__cleanup(affinity); 820 821 evlist__for_each_entry(evsel_list, counter) { 822 if (!counter->supported) { 823 perf_evsel__free_fd(&counter->core); 824 continue; 825 } 826 827 l = strlen(counter->unit); 828 if (l > stat_config.unit_width) 829 stat_config.unit_width = l; 830 831 if (evsel__should_store_id(counter) && 832 evsel__store_ids(counter, evsel_list)) 833 return -1; 834 } 835 836 if (evlist__apply_filters(evsel_list, &counter)) { 837 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 838 counter->filter, evsel__name(counter), errno, 839 str_error_r(errno, msg, sizeof(msg))); 840 return -1; 841 } 842 843 if (STAT_RECORD) { 844 int fd = perf_data__fd(&perf_stat.data); 845 846 if (is_pipe) { 847 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 848 } else { 849 err = perf_session__write_header(perf_stat.session, evsel_list, 850 fd, false); 851 } 852 853 if (err < 0) 854 return err; 855 856 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 857 process_synthesized_event, is_pipe); 858 if (err < 0) 859 return err; 860 } 861 862 if (target.initial_delay) { 863 pr_info(EVLIST_DISABLED_MSG); 864 } else { 865 err = enable_counters(); 866 if (err) 867 return -1; 868 } 869 870 /* Exec the command, if any */ 871 if (forks) 872 evlist__start_workload(evsel_list); 873 874 if (target.initial_delay > 0) { 875 usleep(target.initial_delay * USEC_PER_MSEC); 876 err = enable_counters(); 877 if (err) 878 return -1; 879 880 pr_info(EVLIST_ENABLED_MSG); 881 } 882 883 t0 = rdclock(); 884 clock_gettime(CLOCK_MONOTONIC, &ref_time); 885 886 if (forks) { 887 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 888 status = dispatch_events(forks, timeout, interval, ×); 889 if (child_pid != -1) { 890 if (timeout) 891 kill(child_pid, SIGTERM); 892 wait4(child_pid, &status, 0, &stat_config.ru_data); 893 } 894 895 if (workload_exec_errno) { 896 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 897 pr_err("Workload failed: %s\n", emsg); 898 return -1; 899 } 900 901 if (WIFSIGNALED(status)) 902 psignal(WTERMSIG(status), argv[0]); 903 } else { 904 status = dispatch_events(forks, timeout, interval, ×); 905 } 906 907 disable_counters(); 908 909 t1 = rdclock(); 910 911 if (stat_config.walltime_run_table) 912 stat_config.walltime_run[run_idx] = t1 - t0; 913 914 if (interval && stat_config.summary) { 915 stat_config.interval = 0; 916 stat_config.stop_read_counter = true; 917 init_stats(&walltime_nsecs_stats); 918 update_stats(&walltime_nsecs_stats, t1 - t0); 919 920 evlist__copy_prev_raw_counts(evsel_list); 921 evlist__reset_prev_raw_counts(evsel_list); 922 evlist__reset_aggr_stats(evsel_list); 923 } else { 924 update_stats(&walltime_nsecs_stats, t1 - t0); 925 update_rusage_stats(&ru_stats, &stat_config.ru_data); 926 } 927 928 /* 929 * Closing a group leader splits the group, and as we only disable 930 * group leaders, results in remaining events becoming enabled. To 931 * avoid arbitrary skew, we must read all counters before closing any 932 * group leaders. 933 */ 934 if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0) 935 process_counters(); 936 937 /* 938 * We need to keep evsel_list alive, because it's processed 939 * later the evsel_list will be closed after. 940 */ 941 if (!STAT_RECORD) 942 evlist__close(evsel_list); 943 944 return WEXITSTATUS(status); 945 } 946 947 static int run_perf_stat(int argc, const char **argv, int run_idx) 948 { 949 int ret; 950 951 if (pre_cmd) { 952 ret = system(pre_cmd); 953 if (ret) 954 return ret; 955 } 956 957 if (sync_run) 958 sync(); 959 960 ret = __run_perf_stat(argc, argv, run_idx); 961 if (ret) 962 return ret; 963 964 if (post_cmd) { 965 ret = system(post_cmd); 966 if (ret) 967 return ret; 968 } 969 970 return ret; 971 } 972 973 static void print_counters(struct timespec *ts, int argc, const char **argv) 974 { 975 /* Do not print anything if we record to the pipe. */ 976 if (STAT_RECORD && perf_stat.data.is_pipe) 977 return; 978 if (quiet) 979 return; 980 981 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 982 } 983 984 static volatile sig_atomic_t signr = -1; 985 986 static void skip_signal(int signo) 987 { 988 if ((child_pid == -1) || stat_config.interval) 989 done = 1; 990 991 signr = signo; 992 /* 993 * render child_pid harmless 994 * won't send SIGTERM to a random 995 * process in case of race condition 996 * and fast PID recycling 997 */ 998 child_pid = -1; 999 } 1000 1001 static void sig_atexit(void) 1002 { 1003 sigset_t set, oset; 1004 1005 /* 1006 * avoid race condition with SIGCHLD handler 1007 * in skip_signal() which is modifying child_pid 1008 * goal is to avoid send SIGTERM to a random 1009 * process 1010 */ 1011 sigemptyset(&set); 1012 sigaddset(&set, SIGCHLD); 1013 sigprocmask(SIG_BLOCK, &set, &oset); 1014 1015 if (child_pid != -1) 1016 kill(child_pid, SIGTERM); 1017 1018 sigprocmask(SIG_SETMASK, &oset, NULL); 1019 1020 if (signr == -1) 1021 return; 1022 1023 signal(signr, SIG_DFL); 1024 kill(getpid(), signr); 1025 } 1026 1027 void perf_stat__set_big_num(int set) 1028 { 1029 stat_config.big_num = (set != 0); 1030 } 1031 1032 void perf_stat__set_no_csv_summary(int set) 1033 { 1034 stat_config.no_csv_summary = (set != 0); 1035 } 1036 1037 static int stat__set_big_num(const struct option *opt __maybe_unused, 1038 const char *s __maybe_unused, int unset) 1039 { 1040 big_num_opt = unset ? 0 : 1; 1041 perf_stat__set_big_num(!unset); 1042 return 0; 1043 } 1044 1045 static int enable_metric_only(const struct option *opt __maybe_unused, 1046 const char *s __maybe_unused, int unset) 1047 { 1048 force_metric_only = true; 1049 stat_config.metric_only = !unset; 1050 return 0; 1051 } 1052 1053 static int append_metric_groups(const struct option *opt __maybe_unused, 1054 const char *str, 1055 int unset __maybe_unused) 1056 { 1057 if (metrics) { 1058 char *tmp; 1059 1060 if (asprintf(&tmp, "%s,%s", metrics, str) < 0) 1061 return -ENOMEM; 1062 free(metrics); 1063 metrics = tmp; 1064 } else { 1065 metrics = strdup(str); 1066 if (!metrics) 1067 return -ENOMEM; 1068 } 1069 return 0; 1070 } 1071 1072 static int parse_control_option(const struct option *opt, 1073 const char *str, 1074 int unset __maybe_unused) 1075 { 1076 struct perf_stat_config *config = opt->value; 1077 1078 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1079 } 1080 1081 static int parse_stat_cgroups(const struct option *opt, 1082 const char *str, int unset) 1083 { 1084 if (stat_config.cgroup_list) { 1085 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1086 return -1; 1087 } 1088 1089 return parse_cgroups(opt, str, unset); 1090 } 1091 1092 static int parse_cputype(const struct option *opt, 1093 const char *str, 1094 int unset __maybe_unused) 1095 { 1096 const struct perf_pmu *pmu; 1097 struct evlist *evlist = *(struct evlist **)opt->value; 1098 1099 if (!list_empty(&evlist->core.entries)) { 1100 fprintf(stderr, "Must define cputype before events/metrics\n"); 1101 return -1; 1102 } 1103 1104 pmu = perf_pmus__pmu_for_pmu_filter(str); 1105 if (!pmu) { 1106 fprintf(stderr, "--cputype %s is not supported!\n", str); 1107 return -1; 1108 } 1109 parse_events_option_args.pmu_filter = pmu->name; 1110 1111 return 0; 1112 } 1113 1114 static struct option stat_options[] = { 1115 OPT_BOOLEAN('T', "transaction", &transaction_run, 1116 "hardware transaction statistics"), 1117 OPT_CALLBACK('e', "event", &parse_events_option_args, "event", 1118 "event selector. use 'perf list' to list available events", 1119 parse_events_option), 1120 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1121 "event filter", parse_filter), 1122 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1123 "child tasks do not inherit counters"), 1124 OPT_STRING('p', "pid", &target.pid, "pid", 1125 "stat events on existing process id"), 1126 OPT_STRING('t', "tid", &target.tid, "tid", 1127 "stat events on existing thread id"), 1128 #ifdef HAVE_BPF_SKEL 1129 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 1130 "stat events on existing bpf program id"), 1131 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, 1132 "use bpf program to count events"), 1133 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", 1134 "path to perf_event_attr map"), 1135 #endif 1136 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1137 "system-wide collection from all CPUs"), 1138 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1139 "Use --no-scale to disable counter scaling for multiplexing"), 1140 OPT_INCR('v', "verbose", &verbose, 1141 "be more verbose (show counter open errors, etc)"), 1142 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1143 "repeat command and print average + stddev (max: 100, forever: 0)"), 1144 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1145 "display details about each run (only with -r option)"), 1146 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1147 "null run - dont start any counters"), 1148 OPT_INCR('d', "detailed", &detailed_run, 1149 "detailed run - start a lot of events"), 1150 OPT_BOOLEAN('S', "sync", &sync_run, 1151 "call sync() before starting a run"), 1152 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1153 "print large numbers with thousands\' separators", 1154 stat__set_big_num), 1155 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1156 "list of cpus to monitor in system-wide"), 1157 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1158 "disable CPU count aggregation", AGGR_NONE), 1159 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 1160 OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge, 1161 "Merge identical named hybrid events"), 1162 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1163 "print counts with custom separator"), 1164 OPT_BOOLEAN('j', "json-output", &stat_config.json_output, 1165 "print counts in JSON format"), 1166 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1167 "monitor event in cgroup name only", parse_stat_cgroups), 1168 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 1169 "expand events for each cgroup"), 1170 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1171 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1172 OPT_INTEGER(0, "log-fd", &output_fd, 1173 "log output to fd, instead of stderr"), 1174 OPT_STRING(0, "pre", &pre_cmd, "command", 1175 "command to run prior to the measured command"), 1176 OPT_STRING(0, "post", &post_cmd, "command", 1177 "command to run after to the measured command"), 1178 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1179 "print counts at regular interval in ms " 1180 "(overhead is possible for values <= 100ms)"), 1181 OPT_INTEGER(0, "interval-count", &stat_config.times, 1182 "print counts for fixed number of times"), 1183 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1184 "clear screen in between new interval"), 1185 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1186 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1187 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1188 "aggregate counts per processor socket", AGGR_SOCKET), 1189 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1190 "aggregate counts per processor die", AGGR_DIE), 1191 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1192 "aggregate counts per physical processor core", AGGR_CORE), 1193 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1194 "aggregate counts per thread", AGGR_THREAD), 1195 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1196 "aggregate counts per numa node", AGGR_NODE), 1197 OPT_INTEGER('D', "delay", &target.initial_delay, 1198 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1199 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1200 "Only print computed metrics. No raw values", enable_metric_only), 1201 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1202 "don't group metric events, impacts multiplexing"), 1203 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1204 "don't try to share events between metrics in a group"), 1205 OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold, 1206 "don't try to share events between metrics in a group "), 1207 OPT_BOOLEAN(0, "topdown", &topdown_run, 1208 "measure top-down statistics"), 1209 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 1210 "Set the metrics level for the top-down statistics (0: max level)"), 1211 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1212 "measure SMI cost"), 1213 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1214 "monitor specified metrics or metric groups (separated by ,)", 1215 append_metric_groups), 1216 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1217 "Configure all used events to run in kernel space.", 1218 PARSE_OPT_EXCLUSIVE), 1219 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1220 "Configure all used events to run in user space.", 1221 PARSE_OPT_EXCLUSIVE), 1222 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1223 "Use with 'percore' event qualifier to show the event " 1224 "counts of one hardware thread by sum up total hardware " 1225 "threads of same physical core"), 1226 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1227 "print summary for interval mode"), 1228 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, 1229 "don't print 'summary' for CSV summary output"), 1230 OPT_BOOLEAN(0, "quiet", &quiet, 1231 "don't print any output, messages or warnings (useful with record)"), 1232 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", 1233 "Only enable events on applying cpu with this type " 1234 "for hybrid platform (e.g. core or atom)", 1235 parse_cputype), 1236 #ifdef HAVE_LIBPFM 1237 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1238 "libpfm4 event selector. use 'perf list' to list available events", 1239 parse_libpfm_events_option), 1240 #endif 1241 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1242 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1243 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1244 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1245 parse_control_option), 1246 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", 1247 "measure I/O performance metrics provided by arch/platform", 1248 iostat_parse), 1249 OPT_END() 1250 }; 1251 1252 static const char *const aggr_mode__string[] = { 1253 [AGGR_CORE] = "core", 1254 [AGGR_DIE] = "die", 1255 [AGGR_GLOBAL] = "global", 1256 [AGGR_NODE] = "node", 1257 [AGGR_NONE] = "none", 1258 [AGGR_SOCKET] = "socket", 1259 [AGGR_THREAD] = "thread", 1260 [AGGR_UNSET] = "unset", 1261 }; 1262 1263 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1264 struct perf_cpu cpu) 1265 { 1266 return aggr_cpu_id__socket(cpu, /*data=*/NULL); 1267 } 1268 1269 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1270 struct perf_cpu cpu) 1271 { 1272 return aggr_cpu_id__die(cpu, /*data=*/NULL); 1273 } 1274 1275 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1276 struct perf_cpu cpu) 1277 { 1278 return aggr_cpu_id__core(cpu, /*data=*/NULL); 1279 } 1280 1281 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1282 struct perf_cpu cpu) 1283 { 1284 return aggr_cpu_id__node(cpu, /*data=*/NULL); 1285 } 1286 1287 static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused, 1288 struct perf_cpu cpu) 1289 { 1290 return aggr_cpu_id__global(cpu, /*data=*/NULL); 1291 } 1292 1293 static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused, 1294 struct perf_cpu cpu) 1295 { 1296 return aggr_cpu_id__cpu(cpu, /*data=*/NULL); 1297 } 1298 1299 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1300 aggr_get_id_t get_id, struct perf_cpu cpu) 1301 { 1302 struct aggr_cpu_id id; 1303 1304 /* per-process mode - should use global aggr mode */ 1305 if (cpu.cpu == -1) 1306 return get_id(config, cpu); 1307 1308 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) 1309 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); 1310 1311 id = config->cpus_aggr_map->map[cpu.cpu]; 1312 return id; 1313 } 1314 1315 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1316 struct perf_cpu cpu) 1317 { 1318 return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); 1319 } 1320 1321 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1322 struct perf_cpu cpu) 1323 { 1324 return perf_stat__get_aggr(config, perf_stat__get_die, cpu); 1325 } 1326 1327 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1328 struct perf_cpu cpu) 1329 { 1330 return perf_stat__get_aggr(config, perf_stat__get_core, cpu); 1331 } 1332 1333 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1334 struct perf_cpu cpu) 1335 { 1336 return perf_stat__get_aggr(config, perf_stat__get_node, cpu); 1337 } 1338 1339 static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config, 1340 struct perf_cpu cpu) 1341 { 1342 return perf_stat__get_aggr(config, perf_stat__get_global, cpu); 1343 } 1344 1345 static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config, 1346 struct perf_cpu cpu) 1347 { 1348 return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu); 1349 } 1350 1351 static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) 1352 { 1353 switch (aggr_mode) { 1354 case AGGR_SOCKET: 1355 return aggr_cpu_id__socket; 1356 case AGGR_DIE: 1357 return aggr_cpu_id__die; 1358 case AGGR_CORE: 1359 return aggr_cpu_id__core; 1360 case AGGR_NODE: 1361 return aggr_cpu_id__node; 1362 case AGGR_NONE: 1363 return aggr_cpu_id__cpu; 1364 case AGGR_GLOBAL: 1365 return aggr_cpu_id__global; 1366 case AGGR_THREAD: 1367 case AGGR_UNSET: 1368 case AGGR_MAX: 1369 default: 1370 return NULL; 1371 } 1372 } 1373 1374 static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) 1375 { 1376 switch (aggr_mode) { 1377 case AGGR_SOCKET: 1378 return perf_stat__get_socket_cached; 1379 case AGGR_DIE: 1380 return perf_stat__get_die_cached; 1381 case AGGR_CORE: 1382 return perf_stat__get_core_cached; 1383 case AGGR_NODE: 1384 return perf_stat__get_node_cached; 1385 case AGGR_NONE: 1386 return perf_stat__get_cpu_cached; 1387 case AGGR_GLOBAL: 1388 return perf_stat__get_global_cached; 1389 case AGGR_THREAD: 1390 case AGGR_UNSET: 1391 case AGGR_MAX: 1392 default: 1393 return NULL; 1394 } 1395 } 1396 1397 static int perf_stat_init_aggr_mode(void) 1398 { 1399 int nr; 1400 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); 1401 1402 if (get_id) { 1403 bool needs_sort = stat_config.aggr_mode != AGGR_NONE; 1404 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1405 get_id, /*data=*/NULL, needs_sort); 1406 if (!stat_config.aggr_map) { 1407 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1408 return -1; 1409 } 1410 stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); 1411 } 1412 1413 if (stat_config.aggr_mode == AGGR_THREAD) { 1414 nr = perf_thread_map__nr(evsel_list->core.threads); 1415 stat_config.aggr_map = cpu_aggr_map__empty_new(nr); 1416 if (stat_config.aggr_map == NULL) 1417 return -ENOMEM; 1418 1419 for (int s = 0; s < nr; s++) { 1420 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1421 1422 id.thread_idx = s; 1423 stat_config.aggr_map->map[s] = id; 1424 } 1425 return 0; 1426 } 1427 1428 /* 1429 * The evsel_list->cpus is the base we operate on, 1430 * taking the highest cpu number to be the size of 1431 * the aggregation translate cpumap. 1432 */ 1433 if (evsel_list->core.user_requested_cpus) 1434 nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; 1435 else 1436 nr = 0; 1437 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); 1438 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1439 } 1440 1441 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1442 { 1443 if (map) { 1444 WARN_ONCE(refcount_read(&map->refcnt) != 0, 1445 "cpu_aggr_map refcnt unbalanced\n"); 1446 free(map); 1447 } 1448 } 1449 1450 static void cpu_aggr_map__put(struct cpu_aggr_map *map) 1451 { 1452 if (map && refcount_dec_and_test(&map->refcnt)) 1453 cpu_aggr_map__delete(map); 1454 } 1455 1456 static void perf_stat__exit_aggr_mode(void) 1457 { 1458 cpu_aggr_map__put(stat_config.aggr_map); 1459 cpu_aggr_map__put(stat_config.cpus_aggr_map); 1460 stat_config.aggr_map = NULL; 1461 stat_config.cpus_aggr_map = NULL; 1462 } 1463 1464 static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) 1465 { 1466 struct perf_env *env = data; 1467 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1468 1469 if (cpu.cpu != -1) 1470 id.socket = env->cpu[cpu.cpu].socket_id; 1471 1472 return id; 1473 } 1474 1475 static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) 1476 { 1477 struct perf_env *env = data; 1478 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1479 1480 if (cpu.cpu != -1) { 1481 /* 1482 * die_id is relative to socket, so start 1483 * with the socket ID and then add die to 1484 * make a unique ID. 1485 */ 1486 id.socket = env->cpu[cpu.cpu].socket_id; 1487 id.die = env->cpu[cpu.cpu].die_id; 1488 } 1489 1490 return id; 1491 } 1492 1493 static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) 1494 { 1495 struct perf_env *env = data; 1496 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1497 1498 if (cpu.cpu != -1) { 1499 /* 1500 * core_id is relative to socket and die, 1501 * we need a global id. So we set 1502 * socket, die id and core id 1503 */ 1504 id.socket = env->cpu[cpu.cpu].socket_id; 1505 id.die = env->cpu[cpu.cpu].die_id; 1506 id.core = env->cpu[cpu.cpu].core_id; 1507 } 1508 1509 return id; 1510 } 1511 1512 static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data) 1513 { 1514 struct perf_env *env = data; 1515 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1516 1517 if (cpu.cpu != -1) { 1518 /* 1519 * core_id is relative to socket and die, 1520 * we need a global id. So we set 1521 * socket, die id and core id 1522 */ 1523 id.socket = env->cpu[cpu.cpu].socket_id; 1524 id.die = env->cpu[cpu.cpu].die_id; 1525 id.core = env->cpu[cpu.cpu].core_id; 1526 id.cpu = cpu; 1527 } 1528 1529 return id; 1530 } 1531 1532 static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) 1533 { 1534 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1535 1536 id.node = perf_env__numa_node(data, cpu); 1537 return id; 1538 } 1539 1540 static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused, 1541 void *data __maybe_unused) 1542 { 1543 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1544 1545 /* it always aggregates to the cpu 0 */ 1546 id.cpu = (struct perf_cpu){ .cpu = 0 }; 1547 return id; 1548 } 1549 1550 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1551 struct perf_cpu cpu) 1552 { 1553 return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1554 } 1555 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1556 struct perf_cpu cpu) 1557 { 1558 return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1559 } 1560 1561 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1562 struct perf_cpu cpu) 1563 { 1564 return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1565 } 1566 1567 static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused, 1568 struct perf_cpu cpu) 1569 { 1570 return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1571 } 1572 1573 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1574 struct perf_cpu cpu) 1575 { 1576 return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1577 } 1578 1579 static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused, 1580 struct perf_cpu cpu) 1581 { 1582 return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1583 } 1584 1585 static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) 1586 { 1587 switch (aggr_mode) { 1588 case AGGR_SOCKET: 1589 return perf_env__get_socket_aggr_by_cpu; 1590 case AGGR_DIE: 1591 return perf_env__get_die_aggr_by_cpu; 1592 case AGGR_CORE: 1593 return perf_env__get_core_aggr_by_cpu; 1594 case AGGR_NODE: 1595 return perf_env__get_node_aggr_by_cpu; 1596 case AGGR_GLOBAL: 1597 return perf_env__get_global_aggr_by_cpu; 1598 case AGGR_NONE: 1599 return perf_env__get_cpu_aggr_by_cpu; 1600 case AGGR_THREAD: 1601 case AGGR_UNSET: 1602 case AGGR_MAX: 1603 default: 1604 return NULL; 1605 } 1606 } 1607 1608 static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) 1609 { 1610 switch (aggr_mode) { 1611 case AGGR_SOCKET: 1612 return perf_stat__get_socket_file; 1613 case AGGR_DIE: 1614 return perf_stat__get_die_file; 1615 case AGGR_CORE: 1616 return perf_stat__get_core_file; 1617 case AGGR_NODE: 1618 return perf_stat__get_node_file; 1619 case AGGR_GLOBAL: 1620 return perf_stat__get_global_file; 1621 case AGGR_NONE: 1622 return perf_stat__get_cpu_file; 1623 case AGGR_THREAD: 1624 case AGGR_UNSET: 1625 case AGGR_MAX: 1626 default: 1627 return NULL; 1628 } 1629 } 1630 1631 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1632 { 1633 struct perf_env *env = &st->session->header.env; 1634 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); 1635 bool needs_sort = stat_config.aggr_mode != AGGR_NONE; 1636 1637 if (stat_config.aggr_mode == AGGR_THREAD) { 1638 int nr = perf_thread_map__nr(evsel_list->core.threads); 1639 1640 stat_config.aggr_map = cpu_aggr_map__empty_new(nr); 1641 if (stat_config.aggr_map == NULL) 1642 return -ENOMEM; 1643 1644 for (int s = 0; s < nr; s++) { 1645 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1646 1647 id.thread_idx = s; 1648 stat_config.aggr_map->map[s] = id; 1649 } 1650 return 0; 1651 } 1652 1653 if (!get_id) 1654 return 0; 1655 1656 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1657 get_id, env, needs_sort); 1658 if (!stat_config.aggr_map) { 1659 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1660 return -1; 1661 } 1662 stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); 1663 return 0; 1664 } 1665 1666 /* 1667 * Add default attributes, if there were no attributes specified or 1668 * if -d/--detailed, -d -d or -d -d -d is used: 1669 */ 1670 static int add_default_attributes(void) 1671 { 1672 struct perf_event_attr default_attrs0[] = { 1673 1674 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1675 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1676 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1677 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1678 1679 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1680 }; 1681 struct perf_event_attr frontend_attrs[] = { 1682 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1683 }; 1684 struct perf_event_attr backend_attrs[] = { 1685 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1686 }; 1687 struct perf_event_attr default_attrs1[] = { 1688 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1689 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1690 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1691 1692 }; 1693 1694 /* 1695 * Detailed stats (-d), covering the L1 and last level data caches: 1696 */ 1697 struct perf_event_attr detailed_attrs[] = { 1698 1699 { .type = PERF_TYPE_HW_CACHE, 1700 .config = 1701 PERF_COUNT_HW_CACHE_L1D << 0 | 1702 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1703 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1704 1705 { .type = PERF_TYPE_HW_CACHE, 1706 .config = 1707 PERF_COUNT_HW_CACHE_L1D << 0 | 1708 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1709 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1710 1711 { .type = PERF_TYPE_HW_CACHE, 1712 .config = 1713 PERF_COUNT_HW_CACHE_LL << 0 | 1714 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1715 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1716 1717 { .type = PERF_TYPE_HW_CACHE, 1718 .config = 1719 PERF_COUNT_HW_CACHE_LL << 0 | 1720 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1721 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1722 }; 1723 1724 /* 1725 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1726 */ 1727 struct perf_event_attr very_detailed_attrs[] = { 1728 1729 { .type = PERF_TYPE_HW_CACHE, 1730 .config = 1731 PERF_COUNT_HW_CACHE_L1I << 0 | 1732 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1733 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1734 1735 { .type = PERF_TYPE_HW_CACHE, 1736 .config = 1737 PERF_COUNT_HW_CACHE_L1I << 0 | 1738 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1739 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1740 1741 { .type = PERF_TYPE_HW_CACHE, 1742 .config = 1743 PERF_COUNT_HW_CACHE_DTLB << 0 | 1744 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1745 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1746 1747 { .type = PERF_TYPE_HW_CACHE, 1748 .config = 1749 PERF_COUNT_HW_CACHE_DTLB << 0 | 1750 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1751 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1752 1753 { .type = PERF_TYPE_HW_CACHE, 1754 .config = 1755 PERF_COUNT_HW_CACHE_ITLB << 0 | 1756 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1757 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1758 1759 { .type = PERF_TYPE_HW_CACHE, 1760 .config = 1761 PERF_COUNT_HW_CACHE_ITLB << 0 | 1762 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1763 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1764 1765 }; 1766 1767 /* 1768 * Very, very detailed stats (-d -d -d), adding prefetch events: 1769 */ 1770 struct perf_event_attr very_very_detailed_attrs[] = { 1771 1772 { .type = PERF_TYPE_HW_CACHE, 1773 .config = 1774 PERF_COUNT_HW_CACHE_L1D << 0 | 1775 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1776 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1777 1778 { .type = PERF_TYPE_HW_CACHE, 1779 .config = 1780 PERF_COUNT_HW_CACHE_L1D << 0 | 1781 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1782 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1783 }; 1784 1785 struct perf_event_attr default_null_attrs[] = {}; 1786 const char *pmu = parse_events_option_args.pmu_filter ?: "all"; 1787 1788 /* Set attrs if no event is selected and !null_run: */ 1789 if (stat_config.null_run) 1790 return 0; 1791 1792 if (transaction_run) { 1793 /* Handle -T as -M transaction. Once platform specific metrics 1794 * support has been added to the json files, all architectures 1795 * will use this approach. To determine transaction support 1796 * on an architecture test for such a metric name. 1797 */ 1798 if (!metricgroup__has_metric(pmu, "transaction")) { 1799 pr_err("Missing transaction metrics"); 1800 return -1; 1801 } 1802 return metricgroup__parse_groups(evsel_list, pmu, "transaction", 1803 stat_config.metric_no_group, 1804 stat_config.metric_no_merge, 1805 stat_config.metric_no_threshold, 1806 stat_config.user_requested_cpu_list, 1807 stat_config.system_wide, 1808 &stat_config.metric_events); 1809 } 1810 1811 if (smi_cost) { 1812 int smi; 1813 1814 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1815 pr_err("freeze_on_smi is not supported."); 1816 return -1; 1817 } 1818 1819 if (!smi) { 1820 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1821 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1822 return -1; 1823 } 1824 smi_reset = true; 1825 } 1826 1827 if (!metricgroup__has_metric(pmu, "smi")) { 1828 pr_err("Missing smi metrics"); 1829 return -1; 1830 } 1831 1832 if (!force_metric_only) 1833 stat_config.metric_only = true; 1834 1835 return metricgroup__parse_groups(evsel_list, pmu, "smi", 1836 stat_config.metric_no_group, 1837 stat_config.metric_no_merge, 1838 stat_config.metric_no_threshold, 1839 stat_config.user_requested_cpu_list, 1840 stat_config.system_wide, 1841 &stat_config.metric_events); 1842 } 1843 1844 if (topdown_run) { 1845 unsigned int max_level = metricgroups__topdown_max_level(); 1846 char str[] = "TopdownL1"; 1847 1848 if (!force_metric_only) 1849 stat_config.metric_only = true; 1850 1851 if (!max_level) { 1852 pr_err("Topdown requested but the topdown metric groups aren't present.\n" 1853 "(See perf list the metric groups have names like TopdownL1)"); 1854 return -1; 1855 } 1856 if (stat_config.topdown_level > max_level) { 1857 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 1858 return -1; 1859 } else if (!stat_config.topdown_level) 1860 stat_config.topdown_level = 1; 1861 1862 if (!stat_config.interval && !stat_config.metric_only) { 1863 fprintf(stat_config.output, 1864 "Topdown accuracy may decrease when measuring long periods.\n" 1865 "Please print the result regularly, e.g. -I1000\n"); 1866 } 1867 str[8] = stat_config.topdown_level + '0'; 1868 if (metricgroup__parse_groups(evsel_list, 1869 pmu, str, 1870 /*metric_no_group=*/false, 1871 /*metric_no_merge=*/false, 1872 /*metric_no_threshold=*/true, 1873 stat_config.user_requested_cpu_list, 1874 stat_config.system_wide, 1875 &stat_config.metric_events) < 0) 1876 return -1; 1877 } 1878 1879 if (!stat_config.topdown_level) 1880 stat_config.topdown_level = 1; 1881 1882 if (!evsel_list->core.nr_entries) { 1883 /* No events so add defaults. */ 1884 if (target__has_cpu(&target)) 1885 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1886 1887 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1888 return -1; 1889 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1890 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 1891 return -1; 1892 } 1893 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 1894 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 1895 return -1; 1896 } 1897 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 1898 return -1; 1899 /* 1900 * Add TopdownL1 metrics if they exist. To minimize 1901 * multiplexing, don't request threshold computation. 1902 */ 1903 if (metricgroup__has_metric(pmu, "TopdownL1")) { 1904 struct evlist *metric_evlist = evlist__new(); 1905 struct evsel *metric_evsel; 1906 1907 if (!metric_evlist) 1908 return -1; 1909 1910 if (metricgroup__parse_groups(metric_evlist, pmu, "TopdownL1", 1911 /*metric_no_group=*/false, 1912 /*metric_no_merge=*/false, 1913 /*metric_no_threshold=*/true, 1914 stat_config.user_requested_cpu_list, 1915 stat_config.system_wide, 1916 &stat_config.metric_events) < 0) 1917 return -1; 1918 1919 evlist__for_each_entry(metric_evlist, metric_evsel) { 1920 metric_evsel->skippable = true; 1921 } 1922 evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries); 1923 evlist__delete(metric_evlist); 1924 } 1925 1926 /* Platform specific attrs */ 1927 if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) 1928 return -1; 1929 } 1930 1931 /* Detailed events get appended to the event list: */ 1932 1933 if (detailed_run < 1) 1934 return 0; 1935 1936 /* Append detailed run extra attributes: */ 1937 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1938 return -1; 1939 1940 if (detailed_run < 2) 1941 return 0; 1942 1943 /* Append very detailed run extra attributes: */ 1944 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1945 return -1; 1946 1947 if (detailed_run < 3) 1948 return 0; 1949 1950 /* Append very, very detailed run extra attributes: */ 1951 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1952 } 1953 1954 static const char * const stat_record_usage[] = { 1955 "perf stat record [<options>]", 1956 NULL, 1957 }; 1958 1959 static void init_features(struct perf_session *session) 1960 { 1961 int feat; 1962 1963 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 1964 perf_header__set_feat(&session->header, feat); 1965 1966 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 1967 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 1968 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 1969 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 1970 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 1971 } 1972 1973 static int __cmd_record(int argc, const char **argv) 1974 { 1975 struct perf_session *session; 1976 struct perf_data *data = &perf_stat.data; 1977 1978 argc = parse_options(argc, argv, stat_options, stat_record_usage, 1979 PARSE_OPT_STOP_AT_NON_OPTION); 1980 1981 if (output_name) 1982 data->path = output_name; 1983 1984 if (stat_config.run_count != 1 || forever) { 1985 pr_err("Cannot use -r option with perf stat record.\n"); 1986 return -1; 1987 } 1988 1989 session = perf_session__new(data, NULL); 1990 if (IS_ERR(session)) { 1991 pr_err("Perf session creation failed\n"); 1992 return PTR_ERR(session); 1993 } 1994 1995 init_features(session); 1996 1997 session->evlist = evsel_list; 1998 perf_stat.session = session; 1999 perf_stat.record = true; 2000 return argc; 2001 } 2002 2003 static int process_stat_round_event(struct perf_session *session, 2004 union perf_event *event) 2005 { 2006 struct perf_record_stat_round *stat_round = &event->stat_round; 2007 struct timespec tsh, *ts = NULL; 2008 const char **argv = session->header.env.cmdline_argv; 2009 int argc = session->header.env.nr_cmdline; 2010 2011 process_counters(); 2012 2013 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2014 update_stats(&walltime_nsecs_stats, stat_round->time); 2015 2016 if (stat_config.interval && stat_round->time) { 2017 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 2018 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 2019 ts = &tsh; 2020 } 2021 2022 print_counters(ts, argc, argv); 2023 return 0; 2024 } 2025 2026 static 2027 int process_stat_config_event(struct perf_session *session, 2028 union perf_event *event) 2029 { 2030 struct perf_tool *tool = session->tool; 2031 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2032 2033 perf_event__read_stat_config(&stat_config, &event->stat_config); 2034 2035 if (perf_cpu_map__empty(st->cpus)) { 2036 if (st->aggr_mode != AGGR_UNSET) 2037 pr_warning("warning: processing task data, aggregation mode not set\n"); 2038 } else if (st->aggr_mode != AGGR_UNSET) { 2039 stat_config.aggr_mode = st->aggr_mode; 2040 } 2041 2042 if (perf_stat.data.is_pipe) 2043 perf_stat_init_aggr_mode(); 2044 else 2045 perf_stat_init_aggr_mode_file(st); 2046 2047 if (stat_config.aggr_map) { 2048 int nr_aggr = stat_config.aggr_map->nr; 2049 2050 if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) { 2051 pr_err("cannot allocate aggr counts\n"); 2052 return -1; 2053 } 2054 } 2055 return 0; 2056 } 2057 2058 static int set_maps(struct perf_stat *st) 2059 { 2060 if (!st->cpus || !st->threads) 2061 return 0; 2062 2063 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 2064 return -EINVAL; 2065 2066 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 2067 2068 if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true)) 2069 return -ENOMEM; 2070 2071 st->maps_allocated = true; 2072 return 0; 2073 } 2074 2075 static 2076 int process_thread_map_event(struct perf_session *session, 2077 union perf_event *event) 2078 { 2079 struct perf_tool *tool = session->tool; 2080 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2081 2082 if (st->threads) { 2083 pr_warning("Extra thread map event, ignoring.\n"); 2084 return 0; 2085 } 2086 2087 st->threads = thread_map__new_event(&event->thread_map); 2088 if (!st->threads) 2089 return -ENOMEM; 2090 2091 return set_maps(st); 2092 } 2093 2094 static 2095 int process_cpu_map_event(struct perf_session *session, 2096 union perf_event *event) 2097 { 2098 struct perf_tool *tool = session->tool; 2099 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2100 struct perf_cpu_map *cpus; 2101 2102 if (st->cpus) { 2103 pr_warning("Extra cpu map event, ignoring.\n"); 2104 return 0; 2105 } 2106 2107 cpus = cpu_map__new_data(&event->cpu_map.data); 2108 if (!cpus) 2109 return -ENOMEM; 2110 2111 st->cpus = cpus; 2112 return set_maps(st); 2113 } 2114 2115 static const char * const stat_report_usage[] = { 2116 "perf stat report [<options>]", 2117 NULL, 2118 }; 2119 2120 static struct perf_stat perf_stat = { 2121 .tool = { 2122 .attr = perf_event__process_attr, 2123 .event_update = perf_event__process_event_update, 2124 .thread_map = process_thread_map_event, 2125 .cpu_map = process_cpu_map_event, 2126 .stat_config = process_stat_config_event, 2127 .stat = perf_event__process_stat_event, 2128 .stat_round = process_stat_round_event, 2129 }, 2130 .aggr_mode = AGGR_UNSET, 2131 }; 2132 2133 static int __cmd_report(int argc, const char **argv) 2134 { 2135 struct perf_session *session; 2136 const struct option options[] = { 2137 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2138 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2139 "aggregate counts per processor socket", AGGR_SOCKET), 2140 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2141 "aggregate counts per processor die", AGGR_DIE), 2142 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2143 "aggregate counts per physical processor core", AGGR_CORE), 2144 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2145 "aggregate counts per numa node", AGGR_NODE), 2146 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2147 "disable CPU count aggregation", AGGR_NONE), 2148 OPT_END() 2149 }; 2150 struct stat st; 2151 int ret; 2152 2153 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2154 2155 if (!input_name || !strlen(input_name)) { 2156 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2157 input_name = "-"; 2158 else 2159 input_name = "perf.data"; 2160 } 2161 2162 perf_stat.data.path = input_name; 2163 perf_stat.data.mode = PERF_DATA_MODE_READ; 2164 2165 session = perf_session__new(&perf_stat.data, &perf_stat.tool); 2166 if (IS_ERR(session)) 2167 return PTR_ERR(session); 2168 2169 perf_stat.session = session; 2170 stat_config.output = stderr; 2171 evsel_list = session->evlist; 2172 2173 ret = perf_session__process_events(session); 2174 if (ret) 2175 return ret; 2176 2177 perf_session__delete(session); 2178 return 0; 2179 } 2180 2181 static void setup_system_wide(int forks) 2182 { 2183 /* 2184 * Make system wide (-a) the default target if 2185 * no target was specified and one of following 2186 * conditions is met: 2187 * 2188 * - there's no workload specified 2189 * - there is workload specified but all requested 2190 * events are system wide events 2191 */ 2192 if (!target__none(&target)) 2193 return; 2194 2195 if (!forks) 2196 target.system_wide = true; 2197 else { 2198 struct evsel *counter; 2199 2200 evlist__for_each_entry(evsel_list, counter) { 2201 if (!counter->core.requires_cpu && 2202 !evsel__name_is(counter, "duration_time")) { 2203 return; 2204 } 2205 } 2206 2207 if (evsel_list->core.nr_entries) 2208 target.system_wide = true; 2209 } 2210 } 2211 2212 int cmd_stat(int argc, const char **argv) 2213 { 2214 const char * const stat_usage[] = { 2215 "perf stat [<options>] [<command>]", 2216 NULL 2217 }; 2218 int status = -EINVAL, run_idx, err; 2219 const char *mode; 2220 FILE *output = stderr; 2221 unsigned int interval, timeout; 2222 const char * const stat_subcommands[] = { "record", "report" }; 2223 char errbuf[BUFSIZ]; 2224 2225 setlocale(LC_ALL, ""); 2226 2227 evsel_list = evlist__new(); 2228 if (evsel_list == NULL) 2229 return -ENOMEM; 2230 2231 parse_events__shrink_config_terms(); 2232 2233 /* String-parsing callback-based options would segfault when negated */ 2234 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2235 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2236 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2237 2238 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2239 (const char **) stat_usage, 2240 PARSE_OPT_STOP_AT_NON_OPTION); 2241 2242 if (stat_config.csv_sep) { 2243 stat_config.csv_output = true; 2244 if (!strcmp(stat_config.csv_sep, "\\t")) 2245 stat_config.csv_sep = "\t"; 2246 } else 2247 stat_config.csv_sep = DEFAULT_SEPARATOR; 2248 2249 if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) { 2250 argc = __cmd_record(argc, argv); 2251 if (argc < 0) 2252 return -1; 2253 } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0])) 2254 return __cmd_report(argc, argv); 2255 2256 interval = stat_config.interval; 2257 timeout = stat_config.timeout; 2258 2259 /* 2260 * For record command the -o is already taken care of. 2261 */ 2262 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2263 output = NULL; 2264 2265 if (output_name && output_fd) { 2266 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2267 parse_options_usage(stat_usage, stat_options, "o", 1); 2268 parse_options_usage(NULL, stat_options, "log-fd", 0); 2269 goto out; 2270 } 2271 2272 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2273 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2274 goto out; 2275 } 2276 2277 if (stat_config.metric_only && stat_config.run_count > 1) { 2278 fprintf(stderr, "--metric-only is not supported with -r\n"); 2279 goto out; 2280 } 2281 2282 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2283 fprintf(stderr, "--table is only supported with -r\n"); 2284 parse_options_usage(stat_usage, stat_options, "r", 1); 2285 parse_options_usage(NULL, stat_options, "table", 0); 2286 goto out; 2287 } 2288 2289 if (output_fd < 0) { 2290 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2291 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2292 goto out; 2293 } 2294 2295 if (!output && !quiet) { 2296 struct timespec tm; 2297 mode = append_file ? "a" : "w"; 2298 2299 output = fopen(output_name, mode); 2300 if (!output) { 2301 perror("failed to create output file"); 2302 return -1; 2303 } 2304 if (!stat_config.json_output) { 2305 clock_gettime(CLOCK_REALTIME, &tm); 2306 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2307 } 2308 } else if (output_fd > 0) { 2309 mode = append_file ? "a" : "w"; 2310 output = fdopen(output_fd, mode); 2311 if (!output) { 2312 perror("Failed opening logfd"); 2313 return -errno; 2314 } 2315 } 2316 2317 if (stat_config.interval_clear && !isatty(fileno(output))) { 2318 fprintf(stderr, "--interval-clear does not work with output\n"); 2319 parse_options_usage(stat_usage, stat_options, "o", 1); 2320 parse_options_usage(NULL, stat_options, "log-fd", 0); 2321 parse_options_usage(NULL, stat_options, "interval-clear", 0); 2322 return -1; 2323 } 2324 2325 stat_config.output = output; 2326 2327 /* 2328 * let the spreadsheet do the pretty-printing 2329 */ 2330 if (stat_config.csv_output) { 2331 /* User explicitly passed -B? */ 2332 if (big_num_opt == 1) { 2333 fprintf(stderr, "-B option not supported with -x\n"); 2334 parse_options_usage(stat_usage, stat_options, "B", 1); 2335 parse_options_usage(NULL, stat_options, "x", 1); 2336 goto out; 2337 } else /* Nope, so disable big number formatting */ 2338 stat_config.big_num = false; 2339 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2340 stat_config.big_num = false; 2341 2342 err = target__validate(&target); 2343 if (err) { 2344 target__strerror(&target, err, errbuf, BUFSIZ); 2345 pr_warning("%s\n", errbuf); 2346 } 2347 2348 setup_system_wide(argc); 2349 2350 /* 2351 * Display user/system times only for single 2352 * run and when there's specified tracee. 2353 */ 2354 if ((stat_config.run_count == 1) && target__none(&target)) 2355 stat_config.ru_display = true; 2356 2357 if (stat_config.run_count < 0) { 2358 pr_err("Run count must be a positive number\n"); 2359 parse_options_usage(stat_usage, stat_options, "r", 1); 2360 goto out; 2361 } else if (stat_config.run_count == 0) { 2362 forever = true; 2363 stat_config.run_count = 1; 2364 } 2365 2366 if (stat_config.walltime_run_table) { 2367 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2368 if (!stat_config.walltime_run) { 2369 pr_err("failed to setup -r option"); 2370 goto out; 2371 } 2372 } 2373 2374 if ((stat_config.aggr_mode == AGGR_THREAD) && 2375 !target__has_task(&target)) { 2376 if (!target.system_wide || target.cpu_list) { 2377 fprintf(stderr, "The --per-thread option is only " 2378 "available when monitoring via -p -t -a " 2379 "options or only --per-thread.\n"); 2380 parse_options_usage(NULL, stat_options, "p", 1); 2381 parse_options_usage(NULL, stat_options, "t", 1); 2382 goto out; 2383 } 2384 } 2385 2386 /* 2387 * no_aggr, cgroup are for system-wide only 2388 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2389 */ 2390 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2391 stat_config.aggr_mode != AGGR_THREAD) || 2392 (nr_cgroups || stat_config.cgroup_list)) && 2393 !target__has_cpu(&target)) { 2394 fprintf(stderr, "both cgroup and no-aggregation " 2395 "modes only available in system-wide mode\n"); 2396 2397 parse_options_usage(stat_usage, stat_options, "G", 1); 2398 parse_options_usage(NULL, stat_options, "A", 1); 2399 parse_options_usage(NULL, stat_options, "a", 1); 2400 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2401 goto out; 2402 } 2403 2404 if (stat_config.iostat_run) { 2405 status = iostat_prepare(evsel_list, &stat_config); 2406 if (status) 2407 goto out; 2408 if (iostat_mode == IOSTAT_LIST) { 2409 iostat_list(evsel_list, &stat_config); 2410 goto out; 2411 } else if (verbose > 0) 2412 iostat_list(evsel_list, &stat_config); 2413 if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) 2414 target.system_wide = true; 2415 } 2416 2417 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2418 target.per_thread = true; 2419 2420 stat_config.system_wide = target.system_wide; 2421 if (target.cpu_list) { 2422 stat_config.user_requested_cpu_list = strdup(target.cpu_list); 2423 if (!stat_config.user_requested_cpu_list) { 2424 status = -ENOMEM; 2425 goto out; 2426 } 2427 } 2428 2429 /* 2430 * Metric parsing needs to be delayed as metrics may optimize events 2431 * knowing the target is system-wide. 2432 */ 2433 if (metrics) { 2434 const char *pmu = parse_events_option_args.pmu_filter ?: "all"; 2435 2436 metricgroup__parse_groups(evsel_list, pmu, metrics, 2437 stat_config.metric_no_group, 2438 stat_config.metric_no_merge, 2439 stat_config.metric_no_threshold, 2440 stat_config.user_requested_cpu_list, 2441 stat_config.system_wide, 2442 &stat_config.metric_events); 2443 zfree(&metrics); 2444 } 2445 2446 if (add_default_attributes()) 2447 goto out; 2448 2449 if (stat_config.cgroup_list) { 2450 if (nr_cgroups > 0) { 2451 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2452 parse_options_usage(stat_usage, stat_options, "G", 1); 2453 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2454 goto out; 2455 } 2456 2457 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, 2458 &stat_config.metric_events, true) < 0) { 2459 parse_options_usage(stat_usage, stat_options, 2460 "for-each-cgroup", 0); 2461 goto out; 2462 } 2463 } 2464 2465 if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { 2466 pr_err("failed to use cpu list %s\n", target.cpu_list); 2467 goto out; 2468 } 2469 2470 target.hybrid = perf_pmu__has_hybrid(); 2471 if (evlist__create_maps(evsel_list, &target) < 0) { 2472 if (target__has_task(&target)) { 2473 pr_err("Problems finding threads of monitor\n"); 2474 parse_options_usage(stat_usage, stat_options, "p", 1); 2475 parse_options_usage(NULL, stat_options, "t", 1); 2476 } else if (target__has_cpu(&target)) { 2477 perror("failed to parse CPUs map"); 2478 parse_options_usage(stat_usage, stat_options, "C", 1); 2479 parse_options_usage(NULL, stat_options, "a", 1); 2480 } 2481 goto out; 2482 } 2483 2484 evlist__check_cpu_maps(evsel_list); 2485 2486 /* 2487 * Initialize thread_map with comm names, 2488 * so we could print it out on output. 2489 */ 2490 if (stat_config.aggr_mode == AGGR_THREAD) { 2491 thread_map__read_comms(evsel_list->core.threads); 2492 } 2493 2494 if (stat_config.aggr_mode == AGGR_NODE) 2495 cpu__setup_cpunode_map(); 2496 2497 if (stat_config.times && interval) 2498 interval_count = true; 2499 else if (stat_config.times && !interval) { 2500 pr_err("interval-count option should be used together with " 2501 "interval-print.\n"); 2502 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2503 parse_options_usage(stat_usage, stat_options, "I", 1); 2504 goto out; 2505 } 2506 2507 if (timeout && timeout < 100) { 2508 if (timeout < 10) { 2509 pr_err("timeout must be >= 10ms.\n"); 2510 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2511 goto out; 2512 } else 2513 pr_warning("timeout < 100ms. " 2514 "The overhead percentage could be high in some cases. " 2515 "Please proceed with caution.\n"); 2516 } 2517 if (timeout && interval) { 2518 pr_err("timeout option is not supported with interval-print.\n"); 2519 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2520 parse_options_usage(stat_usage, stat_options, "I", 1); 2521 goto out; 2522 } 2523 2524 if (perf_stat_init_aggr_mode()) 2525 goto out; 2526 2527 if (evlist__alloc_stats(&stat_config, evsel_list, interval)) 2528 goto out; 2529 2530 /* 2531 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2532 * while avoiding that older tools show confusing messages. 2533 * 2534 * However for pipe sessions we need to keep it zero, 2535 * because script's perf_evsel__check_attr is triggered 2536 * by attr->sample_type != 0, and we can't run it on 2537 * stat sessions. 2538 */ 2539 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2540 2541 /* 2542 * We dont want to block the signals - that would cause 2543 * child tasks to inherit that and Ctrl-C would not work. 2544 * What we want is for Ctrl-C to work in the exec()-ed 2545 * task, but being ignored by perf stat itself: 2546 */ 2547 atexit(sig_atexit); 2548 if (!forever) 2549 signal(SIGINT, skip_signal); 2550 signal(SIGCHLD, skip_signal); 2551 signal(SIGALRM, skip_signal); 2552 signal(SIGABRT, skip_signal); 2553 2554 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2555 goto out; 2556 2557 /* Enable ignoring missing threads when -p option is defined. */ 2558 evlist__first(evsel_list)->ignore_missing_thread = target.pid; 2559 status = 0; 2560 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2561 if (stat_config.run_count != 1 && verbose > 0) 2562 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2563 run_idx + 1); 2564 2565 if (run_idx != 0) 2566 evlist__reset_prev_raw_counts(evsel_list); 2567 2568 status = run_perf_stat(argc, argv, run_idx); 2569 if (forever && status != -1 && !interval) { 2570 print_counters(NULL, argc, argv); 2571 perf_stat__reset_stats(); 2572 } 2573 } 2574 2575 if (!forever && status != -1 && (!interval || stat_config.summary)) 2576 print_counters(NULL, argc, argv); 2577 2578 evlist__finalize_ctlfd(evsel_list); 2579 2580 if (STAT_RECORD) { 2581 /* 2582 * We synthesize the kernel mmap record just so that older tools 2583 * don't emit warnings about not being able to resolve symbols 2584 * due to /proc/sys/kernel/kptr_restrict settings and instead provide 2585 * a saner message about no samples being in the perf.data file. 2586 * 2587 * This also serves to suppress a warning about f_header.data.size == 0 2588 * in header.c at the moment 'perf stat record' gets introduced, which 2589 * is not really needed once we start adding the stat specific PERF_RECORD_ 2590 * records, but the need to suppress the kptr_restrict messages in older 2591 * tools remain -acme 2592 */ 2593 int fd = perf_data__fd(&perf_stat.data); 2594 2595 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2596 process_synthesized_event, 2597 &perf_stat.session->machines.host); 2598 if (err) { 2599 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2600 "older tools may produce warnings about this file\n."); 2601 } 2602 2603 if (!interval) { 2604 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2605 pr_err("failed to write stat round event\n"); 2606 } 2607 2608 if (!perf_stat.data.is_pipe) { 2609 perf_stat.session->header.data_size += perf_stat.bytes_written; 2610 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2611 } 2612 2613 evlist__close(evsel_list); 2614 perf_session__delete(perf_stat.session); 2615 } 2616 2617 perf_stat__exit_aggr_mode(); 2618 evlist__free_stats(evsel_list); 2619 out: 2620 if (stat_config.iostat_run) 2621 iostat_release(evsel_list); 2622 2623 zfree(&stat_config.walltime_run); 2624 zfree(&stat_config.user_requested_cpu_list); 2625 2626 if (smi_cost && smi_reset) 2627 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2628 2629 evlist__delete(evsel_list); 2630 2631 metricgroup__rblist_exit(&stat_config.metric_events); 2632 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 2633 2634 return status; 2635 } 2636