1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 #include <api/fs/fs.h> 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "util/mmap.h" 14 #include "thread_map.h" 15 #include "target.h" 16 #include "dwarf-regs.h" 17 #include "evlist.h" 18 #include "evsel.h" 19 #include "record.h" 20 #include "debug.h" 21 #include "units.h" 22 #include "bpf_counter.h" 23 #include <internal/lib.h> // page_size 24 #include "affinity.h" 25 #include "../perf.h" 26 #include "asm/bug.h" 27 #include "bpf-event.h" 28 #include "util/event.h" 29 #include "util/string2.h" 30 #include "util/perf_api_probe.h" 31 #include "util/evsel_fprintf.h" 32 #include "util/pmu.h" 33 #include "util/sample.h" 34 #include "util/bpf-filter.h" 35 #include "util/stat.h" 36 #include "util/util.h" 37 #include "util/env.h" 38 #include "util/intel-tpebs.h" 39 #include "util/metricgroup.h" 40 #include "util/strbuf.h" 41 #include <signal.h> 42 #include <unistd.h> 43 #include <sched.h> 44 #include <stdlib.h> 45 46 #include "parse-events.h" 47 #include <subcmd/parse-options.h> 48 49 #include <fcntl.h> 50 #include <sys/ioctl.h> 51 #include <sys/mman.h> 52 #include <sys/prctl.h> 53 #include <sys/timerfd.h> 54 #include <sys/wait.h> 55 56 #include <linux/bitops.h> 57 #include <linux/hash.h> 58 #include <linux/log2.h> 59 #include <linux/err.h> 60 #include <linux/string.h> 61 #include <linux/time64.h> 62 #include <linux/zalloc.h> 63 #include <perf/evlist.h> 64 #include <perf/evsel.h> 65 #include <perf/cpumap.h> 66 #include <perf/mmap.h> 67 68 #include <internal/xyarray.h> 69 70 #ifdef LACKS_SIGQUEUE_PROTOTYPE 71 int sigqueue(pid_t pid, int sig, const union sigval value); 72 #endif 73 74 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 75 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 76 77 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, 78 struct perf_thread_map *threads) 79 { 80 perf_evlist__init(&evlist->core); 81 perf_evlist__set_maps(&evlist->core, cpus, threads); 82 evlist->workload.pid = -1; 83 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; 84 evlist->ctl_fd.fd = -1; 85 evlist->ctl_fd.ack = -1; 86 evlist->ctl_fd.pos = -1; 87 evlist->nr_br_cntr = -1; 88 metricgroup__rblist_init(&evlist->metric_events); 89 INIT_LIST_HEAD(&evlist->deferred_samples); 90 } 91 92 struct evlist *evlist__new(void) 93 { 94 struct evlist *evlist = zalloc(sizeof(*evlist)); 95 96 if (evlist != NULL) 97 evlist__init(evlist, NULL, NULL); 98 99 return evlist; 100 } 101 102 struct evlist *evlist__new_default(const struct target *target, bool sample_callchains) 103 { 104 struct evlist *evlist = evlist__new(); 105 bool can_profile_kernel; 106 struct perf_pmu *pmu = NULL; 107 struct evsel *evsel; 108 char buf[256]; 109 int err; 110 111 if (!evlist) 112 return NULL; 113 114 can_profile_kernel = perf_event_paranoid_check(1); 115 116 if (EM_HOST == EM_S390 && sample_callchains) { 117 snprintf(buf, sizeof(buf), "software/%s/%s", 118 target__has_cpu(target) ? "cpu-clock" : "task-clock", 119 can_profile_kernel ? "P" : "Pu"); 120 err = parse_event(evlist, buf); 121 if (err) 122 goto out_err; 123 } else { 124 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 125 snprintf(buf, sizeof(buf), "%s/cycles/%s", pmu->name, 126 can_profile_kernel ? "P" : "Pu"); 127 err = parse_event(evlist, buf); 128 if (err) 129 goto out_err; 130 } 131 } 132 133 /* If there is only 1 event a sample identifier isn't necessary. */ 134 if (evlist->core.nr_entries > 1) { 135 evlist__for_each_entry(evlist, evsel) 136 evsel__set_sample_id(evsel, /*can_sample_identifier=*/false); 137 } 138 139 return evlist; 140 out_err: 141 evlist__delete(evlist); 142 return NULL; 143 } 144 145 struct evlist *evlist__new_dummy(void) 146 { 147 struct evlist *evlist = evlist__new(); 148 149 if (evlist && evlist__add_dummy(evlist)) { 150 evlist__delete(evlist); 151 evlist = NULL; 152 } 153 154 return evlist; 155 } 156 157 /** 158 * evlist__set_id_pos - set the positions of event ids. 159 * @evlist: selected event list 160 * 161 * Events with compatible sample types all have the same id_pos 162 * and is_pos. For convenience, put a copy on evlist. 163 */ 164 void evlist__set_id_pos(struct evlist *evlist) 165 { 166 struct evsel *first = evlist__first(evlist); 167 168 evlist->id_pos = first->id_pos; 169 evlist->is_pos = first->is_pos; 170 } 171 172 static void evlist__update_id_pos(struct evlist *evlist) 173 { 174 struct evsel *evsel; 175 176 evlist__for_each_entry(evlist, evsel) 177 evsel__calc_id_pos(evsel); 178 179 evlist__set_id_pos(evlist); 180 } 181 182 static void evlist__purge(struct evlist *evlist) 183 { 184 struct evsel *pos, *n; 185 186 evlist__for_each_entry_safe(evlist, n, pos) { 187 list_del_init(&pos->core.node); 188 pos->evlist = NULL; 189 evsel__delete(pos); 190 } 191 192 evlist->core.nr_entries = 0; 193 } 194 195 void evlist__exit(struct evlist *evlist) 196 { 197 metricgroup__rblist_exit(&evlist->metric_events); 198 event_enable_timer__exit(&evlist->eet); 199 zfree(&evlist->mmap); 200 zfree(&evlist->overwrite_mmap); 201 perf_evlist__exit(&evlist->core); 202 } 203 204 void evlist__delete(struct evlist *evlist) 205 { 206 if (evlist == NULL) 207 return; 208 209 evlist__free_stats(evlist); 210 evlist__munmap(evlist); 211 evlist__close(evlist); 212 evlist__purge(evlist); 213 evlist__exit(evlist); 214 free(evlist); 215 } 216 217 void evlist__add(struct evlist *evlist, struct evsel *entry) 218 { 219 perf_evlist__add(&evlist->core, &entry->core); 220 entry->evlist = evlist; 221 entry->tracking = !entry->core.idx; 222 223 if (evlist->core.nr_entries == 1) 224 evlist__set_id_pos(evlist); 225 } 226 227 void evlist__remove(struct evlist *evlist, struct evsel *evsel) 228 { 229 evsel->evlist = NULL; 230 perf_evlist__remove(&evlist->core, &evsel->core); 231 } 232 233 void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list) 234 { 235 while (!list_empty(list)) { 236 struct evsel *evsel, *temp, *leader = NULL; 237 238 __evlist__for_each_entry_safe(list, temp, evsel) { 239 list_del_init(&evsel->core.node); 240 evlist__add(evlist, evsel); 241 leader = evsel; 242 break; 243 } 244 245 __evlist__for_each_entry_safe(list, temp, evsel) { 246 if (evsel__has_leader(evsel, leader)) { 247 list_del_init(&evsel->core.node); 248 evlist__add(evlist, evsel); 249 } 250 } 251 } 252 } 253 254 int __evlist__set_tracepoints_handlers(struct evlist *evlist, 255 const struct evsel_str_handler *assocs, size_t nr_assocs) 256 { 257 size_t i; 258 int err; 259 260 for (i = 0; i < nr_assocs; i++) { 261 // Adding a handler for an event not in this evlist, just ignore it. 262 struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name); 263 if (evsel == NULL) 264 continue; 265 266 err = -EEXIST; 267 if (evsel->handler != NULL) 268 goto out; 269 evsel->handler = assocs[i].handler; 270 } 271 272 err = 0; 273 out: 274 return err; 275 } 276 277 static void evlist__set_leader(struct evlist *evlist) 278 { 279 perf_evlist__set_leader(&evlist->core); 280 } 281 282 static struct evsel *evlist__dummy_event(struct evlist *evlist) 283 { 284 struct perf_event_attr attr = { 285 .type = PERF_TYPE_SOFTWARE, 286 .config = PERF_COUNT_SW_DUMMY, 287 .size = sizeof(attr), /* to capture ABI version */ 288 /* Avoid frequency mode for dummy events to avoid associated timers. */ 289 .freq = 0, 290 .sample_period = 1, 291 }; 292 293 return evsel__new_idx(&attr, evlist->core.nr_entries); 294 } 295 296 int evlist__add_dummy(struct evlist *evlist) 297 { 298 struct evsel *evsel = evlist__dummy_event(evlist); 299 300 if (evsel == NULL) 301 return -ENOMEM; 302 303 evlist__add(evlist, evsel); 304 return 0; 305 } 306 307 struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide) 308 { 309 struct evsel *evsel = evlist__dummy_event(evlist); 310 311 if (!evsel) 312 return NULL; 313 314 evsel->core.attr.exclude_kernel = 1; 315 evsel->core.attr.exclude_guest = 1; 316 evsel->core.attr.exclude_hv = 1; 317 evsel->core.system_wide = system_wide; 318 evsel->no_aux_samples = true; 319 evsel->name = strdup("dummy:u"); 320 321 evlist__add(evlist, evsel); 322 return evsel; 323 } 324 325 #ifdef HAVE_LIBTRACEEVENT 326 struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide) 327 { 328 struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0, 329 /*format=*/true); 330 331 if (IS_ERR(evsel)) 332 return evsel; 333 334 evsel__set_sample_bit(evsel, CPU); 335 evsel__set_sample_bit(evsel, TIME); 336 337 evsel->core.system_wide = system_wide; 338 evsel->no_aux_samples = true; 339 340 evlist__add(evlist, evsel); 341 return evsel; 342 } 343 #endif 344 345 struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name) 346 { 347 struct evsel *evsel; 348 349 evlist__for_each_entry(evlist, evsel) { 350 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) && 351 (strcmp(evsel->name, name) == 0)) 352 return evsel; 353 } 354 355 return NULL; 356 } 357 358 #ifdef HAVE_LIBTRACEEVENT 359 int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler) 360 { 361 struct evsel *evsel = evsel__newtp(sys, name); 362 363 if (IS_ERR(evsel)) 364 return -1; 365 366 evsel->handler = handler; 367 evlist__add(evlist, evsel); 368 return 0; 369 } 370 #endif 371 372 /* 373 * Should sched_setaffinity be used with evlist__for_each_cpu? Determine if 374 * migrating the thread will avoid possibly numerous IPIs. 375 */ 376 static bool evlist__use_affinity(struct evlist *evlist) 377 { 378 struct evsel *pos; 379 struct perf_cpu_map *used_cpus = NULL; 380 bool ret = false; 381 382 if (evlist->no_affinity || !evlist->core.user_requested_cpus || 383 cpu_map__is_dummy(evlist->core.user_requested_cpus)) 384 return false; 385 386 evlist__for_each_entry(evlist, pos) { 387 struct perf_cpu_map *intersect; 388 389 if (!perf_pmu__benefits_from_affinity(pos->pmu)) 390 continue; 391 392 if (evsel__is_dummy_event(pos)) { 393 /* 394 * The dummy event is opened on all CPUs so assume >1 395 * event with shared CPUs. 396 */ 397 ret = true; 398 break; 399 } 400 if (evsel__is_retire_lat(pos)) { 401 /* 402 * Retirement latency events are similar to tool ones in 403 * their implementation, and so don't require affinity. 404 */ 405 continue; 406 } 407 if (perf_cpu_map__is_empty(used_cpus)) { 408 /* First benefitting event, we want >1 on a common CPU. */ 409 used_cpus = perf_cpu_map__get(pos->core.cpus); 410 continue; 411 } 412 if ((pos->core.attr.read_format & PERF_FORMAT_GROUP) && 413 evsel__leader(pos) != pos) { 414 /* Skip members of the same sample group. */ 415 continue; 416 } 417 intersect = perf_cpu_map__intersect(used_cpus, pos->core.cpus); 418 if (!perf_cpu_map__is_empty(intersect)) { 419 /* >1 event with shared CPUs. */ 420 perf_cpu_map__put(intersect); 421 ret = true; 422 break; 423 } 424 perf_cpu_map__put(intersect); 425 perf_cpu_map__merge(&used_cpus, pos->core.cpus); 426 } 427 perf_cpu_map__put(used_cpus); 428 return ret; 429 } 430 431 void evlist_cpu_iterator__init(struct evlist_cpu_iterator *itr, struct evlist *evlist) 432 { 433 *itr = (struct evlist_cpu_iterator){ 434 .container = evlist, 435 .evsel = NULL, 436 .cpu_map_idx = 0, 437 .evlist_cpu_map_idx = 0, 438 .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus), 439 .cpu = (struct perf_cpu){ .cpu = -1}, 440 .affinity = NULL, 441 }; 442 443 if (evlist__empty(evlist)) { 444 /* Ensure the empty list doesn't iterate. */ 445 itr->evlist_cpu_map_idx = itr->evlist_cpu_map_nr; 446 return; 447 } 448 449 if (evlist__use_affinity(evlist)) { 450 if (affinity__setup(&itr->saved_affinity) == 0) 451 itr->affinity = &itr->saved_affinity; 452 } 453 itr->evsel = evlist__first(evlist); 454 itr->cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); 455 if (itr->affinity) 456 affinity__set(itr->affinity, itr->cpu.cpu); 457 itr->cpu_map_idx = perf_cpu_map__idx(itr->evsel->core.cpus, itr->cpu); 458 /* 459 * If this CPU isn't in the evsel's cpu map then advance 460 * through the list. 461 */ 462 if (itr->cpu_map_idx == -1) 463 evlist_cpu_iterator__next(itr); 464 } 465 466 void evlist_cpu_iterator__exit(struct evlist_cpu_iterator *itr) 467 { 468 if (!itr->affinity) 469 return; 470 471 affinity__cleanup(itr->affinity); 472 itr->affinity = NULL; 473 } 474 475 void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr) 476 { 477 while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) { 478 evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel); 479 evlist_cpu_itr->cpu_map_idx = 480 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 481 evlist_cpu_itr->cpu); 482 if (evlist_cpu_itr->cpu_map_idx != -1) 483 return; 484 } 485 evlist_cpu_itr->evlist_cpu_map_idx++; 486 if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) { 487 evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container); 488 evlist_cpu_itr->cpu = 489 perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus, 490 evlist_cpu_itr->evlist_cpu_map_idx); 491 if (evlist_cpu_itr->affinity) 492 affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu); 493 evlist_cpu_itr->cpu_map_idx = 494 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 495 evlist_cpu_itr->cpu); 496 /* 497 * If this CPU isn't in the evsel's cpu map then advance through 498 * the list. 499 */ 500 if (evlist_cpu_itr->cpu_map_idx == -1) 501 evlist_cpu_iterator__next(evlist_cpu_itr); 502 } else { 503 evlist_cpu_iterator__exit(evlist_cpu_itr); 504 } 505 } 506 507 static int evsel__strcmp(struct evsel *pos, char *evsel_name) 508 { 509 if (!evsel_name) 510 return 0; 511 if (evsel__is_dummy_event(pos)) 512 return 1; 513 return !evsel__name_is(pos, evsel_name); 514 } 515 516 static int evlist__is_enabled(struct evlist *evlist) 517 { 518 struct evsel *pos; 519 520 evlist__for_each_entry(evlist, pos) { 521 if (!evsel__is_group_leader(pos) || !pos->core.fd) 522 continue; 523 /* If at least one event is enabled, evlist is enabled. */ 524 if (!pos->disabled) 525 return true; 526 } 527 return false; 528 } 529 530 static void __evlist__disable(struct evlist *evlist, char *evsel_name, bool excl_dummy) 531 { 532 struct evsel *pos; 533 struct evlist_cpu_iterator evlist_cpu_itr; 534 bool has_imm = false; 535 536 /* Disable 'immediate' events last */ 537 for (int imm = 0; imm <= 1; imm++) { 538 evlist__for_each_cpu(evlist_cpu_itr, evlist) { 539 pos = evlist_cpu_itr.evsel; 540 if (evsel__strcmp(pos, evsel_name)) 541 continue; 542 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) 543 continue; 544 if (excl_dummy && evsel__is_dummy_event(pos)) 545 continue; 546 if (pos->immediate) 547 has_imm = true; 548 if (pos->immediate != imm) 549 continue; 550 evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 551 } 552 if (!has_imm) 553 break; 554 } 555 556 evlist__for_each_entry(evlist, pos) { 557 if (evsel__strcmp(pos, evsel_name)) 558 continue; 559 if (!evsel__is_group_leader(pos) || !pos->core.fd) 560 continue; 561 if (excl_dummy && evsel__is_dummy_event(pos)) 562 continue; 563 pos->disabled = true; 564 } 565 566 /* 567 * If we disabled only single event, we need to check 568 * the enabled state of the evlist manually. 569 */ 570 if (evsel_name) 571 evlist->enabled = evlist__is_enabled(evlist); 572 else 573 evlist->enabled = false; 574 } 575 576 void evlist__disable(struct evlist *evlist) 577 { 578 __evlist__disable(evlist, NULL, false); 579 } 580 581 void evlist__disable_non_dummy(struct evlist *evlist) 582 { 583 __evlist__disable(evlist, NULL, true); 584 } 585 586 void evlist__disable_evsel(struct evlist *evlist, char *evsel_name) 587 { 588 __evlist__disable(evlist, evsel_name, false); 589 } 590 591 static void __evlist__enable(struct evlist *evlist, char *evsel_name, bool excl_dummy) 592 { 593 struct evsel *pos; 594 struct evlist_cpu_iterator evlist_cpu_itr; 595 596 evlist__for_each_cpu(evlist_cpu_itr, evlist) { 597 pos = evlist_cpu_itr.evsel; 598 if (evsel__strcmp(pos, evsel_name)) 599 continue; 600 if (!evsel__is_group_leader(pos) || !pos->core.fd) 601 continue; 602 if (excl_dummy && evsel__is_dummy_event(pos)) 603 continue; 604 evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 605 } 606 evlist__for_each_entry(evlist, pos) { 607 if (evsel__strcmp(pos, evsel_name)) 608 continue; 609 if (!evsel__is_group_leader(pos) || !pos->core.fd) 610 continue; 611 if (excl_dummy && evsel__is_dummy_event(pos)) 612 continue; 613 pos->disabled = false; 614 } 615 616 /* 617 * Even single event sets the 'enabled' for evlist, 618 * so the toggle can work properly and toggle to 619 * 'disabled' state. 620 */ 621 evlist->enabled = true; 622 } 623 624 void evlist__enable(struct evlist *evlist) 625 { 626 __evlist__enable(evlist, NULL, false); 627 } 628 629 void evlist__enable_non_dummy(struct evlist *evlist) 630 { 631 __evlist__enable(evlist, NULL, true); 632 } 633 634 void evlist__enable_evsel(struct evlist *evlist, char *evsel_name) 635 { 636 __evlist__enable(evlist, evsel_name, false); 637 } 638 639 void evlist__toggle_enable(struct evlist *evlist) 640 { 641 (evlist->enabled ? evlist__disable : evlist__enable)(evlist); 642 } 643 644 int evlist__add_pollfd(struct evlist *evlist, int fd) 645 { 646 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default); 647 } 648 649 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) 650 { 651 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask); 652 } 653 654 #ifdef HAVE_EVENTFD_SUPPORT 655 int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd) 656 { 657 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 658 fdarray_flag__nonfilterable | 659 fdarray_flag__non_perf_event); 660 } 661 #endif 662 663 int evlist__poll(struct evlist *evlist, int timeout) 664 { 665 return perf_evlist__poll(&evlist->core, timeout); 666 } 667 668 struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id) 669 { 670 struct hlist_head *head; 671 struct perf_sample_id *sid; 672 int hash; 673 674 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 675 head = &evlist->core.heads[hash]; 676 677 hlist_for_each_entry(sid, head, node) 678 if (sid->id == id) 679 return sid; 680 681 return NULL; 682 } 683 684 struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id) 685 { 686 struct perf_sample_id *sid; 687 688 if (evlist->core.nr_entries == 1 || !id) 689 return evlist__first(evlist); 690 691 sid = evlist__id2sid(evlist, id); 692 if (sid) 693 return container_of(sid->evsel, struct evsel, core); 694 695 if (!evlist__sample_id_all(evlist)) 696 return evlist__first(evlist); 697 698 return NULL; 699 } 700 701 struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id) 702 { 703 struct perf_sample_id *sid; 704 705 if (!id) 706 return NULL; 707 708 sid = evlist__id2sid(evlist, id); 709 if (sid) 710 return container_of(sid->evsel, struct evsel, core); 711 712 return NULL; 713 } 714 715 static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id) 716 { 717 const __u64 *array = event->sample.array; 718 ssize_t n; 719 720 n = (event->header.size - sizeof(event->header)) >> 3; 721 722 if (event->header.type == PERF_RECORD_SAMPLE) { 723 if (evlist->id_pos >= n) 724 return -1; 725 *id = array[evlist->id_pos]; 726 } else { 727 if (evlist->is_pos > n) 728 return -1; 729 n -= evlist->is_pos; 730 *id = array[n]; 731 } 732 return 0; 733 } 734 735 struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event) 736 { 737 struct evsel *first = evlist__first(evlist); 738 struct hlist_head *head; 739 struct perf_sample_id *sid; 740 int hash; 741 u64 id; 742 743 if (evlist->core.nr_entries == 1) 744 return first; 745 746 if (!first->core.attr.sample_id_all && 747 event->header.type != PERF_RECORD_SAMPLE) 748 return first; 749 750 if (evlist__event2id(evlist, event, &id)) 751 return NULL; 752 753 /* Synthesized events have an id of zero */ 754 if (!id) 755 return first; 756 757 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 758 head = &evlist->core.heads[hash]; 759 760 hlist_for_each_entry(sid, head, node) { 761 if (sid->id == id) 762 return container_of(sid->evsel, struct evsel, core); 763 } 764 return NULL; 765 } 766 767 static int evlist__set_paused(struct evlist *evlist, bool value) 768 { 769 int i; 770 771 if (!evlist->overwrite_mmap) 772 return 0; 773 774 for (i = 0; i < evlist->core.nr_mmaps; i++) { 775 int fd = evlist->overwrite_mmap[i].core.fd; 776 int err; 777 778 if (fd < 0) 779 continue; 780 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); 781 if (err) 782 return err; 783 } 784 return 0; 785 } 786 787 static int evlist__pause(struct evlist *evlist) 788 { 789 return evlist__set_paused(evlist, true); 790 } 791 792 static int evlist__resume(struct evlist *evlist) 793 { 794 return evlist__set_paused(evlist, false); 795 } 796 797 static void evlist__munmap_nofree(struct evlist *evlist) 798 { 799 int i; 800 801 if (evlist->mmap) 802 for (i = 0; i < evlist->core.nr_mmaps; i++) 803 perf_mmap__munmap(&evlist->mmap[i].core); 804 805 if (evlist->overwrite_mmap) 806 for (i = 0; i < evlist->core.nr_mmaps; i++) 807 perf_mmap__munmap(&evlist->overwrite_mmap[i].core); 808 } 809 810 void evlist__munmap(struct evlist *evlist) 811 { 812 evlist__munmap_nofree(evlist); 813 zfree(&evlist->mmap); 814 zfree(&evlist->overwrite_mmap); 815 } 816 817 static void perf_mmap__unmap_cb(struct perf_mmap *map) 818 { 819 struct mmap *m = container_of(map, struct mmap, core); 820 821 mmap__munmap(m); 822 } 823 824 static struct mmap *evlist__alloc_mmap(struct evlist *evlist, 825 bool overwrite) 826 { 827 int i; 828 struct mmap *map = calloc(evlist->core.nr_mmaps, sizeof(struct mmap)); 829 830 if (!map) 831 return NULL; 832 833 for (i = 0; i < evlist->core.nr_mmaps; i++) { 834 struct perf_mmap *prev = i ? &map[i - 1].core : NULL; 835 836 /* 837 * When the perf_mmap() call is made we grab one refcount, plus 838 * one extra to let perf_mmap__consume() get the last 839 * events after all real references (perf_mmap__get()) are 840 * dropped. 841 * 842 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and 843 * thus does perf_mmap__get() on it. 844 */ 845 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb); 846 } 847 848 return map; 849 } 850 851 static void 852 perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist, 853 struct perf_evsel *_evsel, 854 struct perf_mmap_param *_mp, 855 int idx) 856 { 857 struct evlist *evlist = container_of(_evlist, struct evlist, core); 858 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 859 struct evsel *evsel = container_of(_evsel, struct evsel, core); 860 861 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx); 862 } 863 864 static struct perf_mmap* 865 perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx) 866 { 867 struct evlist *evlist = container_of(_evlist, struct evlist, core); 868 struct mmap *maps; 869 870 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; 871 872 if (!maps) { 873 maps = evlist__alloc_mmap(evlist, overwrite); 874 if (!maps) 875 return NULL; 876 877 if (overwrite) { 878 evlist->overwrite_mmap = maps; 879 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) 880 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); 881 } else { 882 evlist->mmap = maps; 883 } 884 } 885 886 return &maps[idx].core; 887 } 888 889 static int 890 perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp, 891 int output, struct perf_cpu cpu) 892 { 893 struct mmap *map = container_of(_map, struct mmap, core); 894 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 895 896 return mmap__mmap(map, mp, output, cpu); 897 } 898 899 unsigned long perf_event_mlock_kb_in_pages(void) 900 { 901 unsigned long pages; 902 int max; 903 904 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { 905 /* 906 * Pick a once upon a time good value, i.e. things look 907 * strange since we can't read a sysctl value, but lets not 908 * die yet... 909 */ 910 max = 512; 911 } else { 912 max -= (page_size / 1024); 913 } 914 915 pages = (max * 1024) / page_size; 916 if (!is_power_of_2(pages)) 917 pages = rounddown_pow_of_two(pages); 918 919 return pages; 920 } 921 922 size_t evlist__mmap_size(unsigned long pages) 923 { 924 if (pages == UINT_MAX) 925 pages = perf_event_mlock_kb_in_pages(); 926 else if (!is_power_of_2(pages)) 927 return 0; 928 929 return (pages + 1) * page_size; 930 } 931 932 static long parse_pages_arg(const char *str, unsigned long min, 933 unsigned long max) 934 { 935 unsigned long pages, val; 936 static struct parse_tag tags[] = { 937 { .tag = 'B', .mult = 1 }, 938 { .tag = 'K', .mult = 1 << 10 }, 939 { .tag = 'M', .mult = 1 << 20 }, 940 { .tag = 'G', .mult = 1 << 30 }, 941 { .tag = 0 }, 942 }; 943 944 if (str == NULL) 945 return -EINVAL; 946 947 val = parse_tag_value(str, tags); 948 if (val != (unsigned long) -1) { 949 /* we got file size value */ 950 pages = PERF_ALIGN(val, page_size) / page_size; 951 } else { 952 /* we got pages count value */ 953 char *eptr; 954 pages = strtoul(str, &eptr, 10); 955 if (*eptr != '\0') 956 return -EINVAL; 957 } 958 959 if (pages == 0 && min == 0) { 960 /* leave number of pages at 0 */ 961 } else if (!is_power_of_2(pages)) { 962 char buf[100]; 963 964 /* round pages up to next power of 2 */ 965 pages = roundup_pow_of_two(pages); 966 if (!pages) 967 return -EINVAL; 968 969 unit_number__scnprintf(buf, sizeof(buf), pages * page_size); 970 pr_info("rounding mmap pages size to %s (%lu pages)\n", 971 buf, pages); 972 } 973 974 if (pages > max) 975 return -EINVAL; 976 977 return pages; 978 } 979 980 int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) 981 { 982 unsigned long max = UINT_MAX; 983 long pages; 984 985 if (max > SIZE_MAX / page_size) 986 max = SIZE_MAX / page_size; 987 988 pages = parse_pages_arg(str, 1, max); 989 if (pages < 0) { 990 pr_err("Invalid argument for --mmap_pages/-m\n"); 991 return -1; 992 } 993 994 *mmap_pages = pages; 995 return 0; 996 } 997 998 int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused) 999 { 1000 return __evlist__parse_mmap_pages(opt->value, str); 1001 } 1002 1003 /** 1004 * evlist__mmap_ex - Create mmaps to receive events. 1005 * @evlist: list of events 1006 * @pages: map length in pages 1007 * @overwrite: overwrite older events? 1008 * @auxtrace_pages - auxtrace map length in pages 1009 * @auxtrace_overwrite - overwrite older auxtrace data? 1010 * 1011 * If @overwrite is %false the user needs to signal event consumption using 1012 * perf_mmap__write_tail(). Using evlist__mmap_read() does this 1013 * automatically. 1014 * 1015 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data 1016 * consumption using auxtrace_mmap__write_tail(). 1017 * 1018 * Return: %0 on success, negative error code otherwise. 1019 */ 1020 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 1021 unsigned int auxtrace_pages, 1022 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, 1023 int comp_level) 1024 { 1025 /* 1026 * Delay setting mp.prot: set it before calling perf_mmap__mmap. 1027 * Its value is decided by evsel's write_backward. 1028 * So &mp should not be passed through const pointer. 1029 */ 1030 struct mmap_params mp = { 1031 .nr_cblocks = nr_cblocks, 1032 .affinity = affinity, 1033 .flush = flush, 1034 .comp_level = comp_level 1035 }; 1036 struct perf_evlist_mmap_ops ops = { 1037 .idx = perf_evlist__mmap_cb_idx, 1038 .get = perf_evlist__mmap_cb_get, 1039 .mmap = perf_evlist__mmap_cb_mmap, 1040 }; 1041 1042 evlist->core.mmap_len = evlist__mmap_size(pages); 1043 pr_debug("mmap size %zuB\n", evlist->core.mmap_len); 1044 1045 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, 1046 auxtrace_pages, auxtrace_overwrite); 1047 1048 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); 1049 } 1050 1051 int evlist__mmap(struct evlist *evlist, unsigned int pages) 1052 { 1053 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); 1054 } 1055 1056 int evlist__create_maps(struct evlist *evlist, struct target *target) 1057 { 1058 bool all_threads = (target->per_thread && target->system_wide); 1059 struct perf_cpu_map *cpus; 1060 struct perf_thread_map *threads; 1061 1062 /* 1063 * If specify '-a' and '--per-thread' to perf record, perf record 1064 * will override '--per-thread'. target->per_thread = false and 1065 * target->system_wide = true. 1066 * 1067 * If specify '--per-thread' only to perf record, 1068 * target->per_thread = true and target->system_wide = false. 1069 * 1070 * So target->per_thread && target->system_wide is false. 1071 * For perf record, thread_map__new_str doesn't call 1072 * thread_map__new_all_cpus. That will keep perf record's 1073 * current behavior. 1074 * 1075 * For perf stat, it allows the case that target->per_thread and 1076 * target->system_wide are all true. It means to collect system-wide 1077 * per-thread data. thread_map__new_str will call 1078 * thread_map__new_all_cpus to enumerate all threads. 1079 */ 1080 threads = thread_map__new_str(target->pid, target->tid, all_threads); 1081 1082 if (!threads) 1083 return -1; 1084 1085 if (target__uses_dummy_map(target) && !evlist__has_bpf_output(evlist)) 1086 cpus = perf_cpu_map__new_any_cpu(); 1087 else 1088 cpus = perf_cpu_map__new(target->cpu_list); 1089 1090 if (!cpus) 1091 goto out_delete_threads; 1092 1093 evlist->core.has_user_cpus = !!target->cpu_list; 1094 1095 perf_evlist__set_maps(&evlist->core, cpus, threads); 1096 1097 /* as evlist now has references, put count here */ 1098 perf_cpu_map__put(cpus); 1099 perf_thread_map__put(threads); 1100 1101 return 0; 1102 1103 out_delete_threads: 1104 perf_thread_map__put(threads); 1105 return -1; 1106 } 1107 1108 int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel, 1109 struct target *target) 1110 { 1111 struct evsel *evsel; 1112 int err = 0; 1113 1114 evlist__for_each_entry(evlist, evsel) { 1115 /* 1116 * filters only work for tracepoint event, which doesn't have cpu limit. 1117 * So evlist and evsel should always be same. 1118 */ 1119 if (evsel->filter) { 1120 err = perf_evsel__apply_filter(&evsel->core, evsel->filter); 1121 if (err) { 1122 *err_evsel = evsel; 1123 break; 1124 } 1125 } 1126 1127 /* 1128 * non-tracepoint events can have BPF filters. 1129 */ 1130 if (!list_empty(&evsel->bpf_filters)) { 1131 err = perf_bpf_filter__prepare(evsel, target); 1132 if (err) { 1133 *err_evsel = evsel; 1134 break; 1135 } 1136 } 1137 } 1138 1139 return err; 1140 } 1141 1142 int evlist__set_tp_filter(struct evlist *evlist, const char *filter) 1143 { 1144 struct evsel *evsel; 1145 int err = 0; 1146 1147 if (filter == NULL) 1148 return -1; 1149 1150 evlist__for_each_entry(evlist, evsel) { 1151 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1152 continue; 1153 1154 err = evsel__set_filter(evsel, filter); 1155 if (err) 1156 break; 1157 } 1158 1159 return err; 1160 } 1161 1162 int evlist__append_tp_filter(struct evlist *evlist, const char *filter) 1163 { 1164 struct evsel *evsel; 1165 int err = 0; 1166 1167 if (filter == NULL) 1168 return -1; 1169 1170 evlist__for_each_entry(evlist, evsel) { 1171 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1172 continue; 1173 1174 err = evsel__append_tp_filter(evsel, filter); 1175 if (err) 1176 break; 1177 } 1178 1179 return err; 1180 } 1181 1182 char *asprintf__tp_filter_pids(size_t npids, pid_t *pids) 1183 { 1184 char *filter; 1185 size_t i; 1186 1187 for (i = 0; i < npids; ++i) { 1188 if (i == 0) { 1189 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) 1190 return NULL; 1191 } else { 1192 char *tmp; 1193 1194 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) 1195 goto out_free; 1196 1197 free(filter); 1198 filter = tmp; 1199 } 1200 } 1201 1202 return filter; 1203 out_free: 1204 free(filter); 1205 return NULL; 1206 } 1207 1208 int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1209 { 1210 char *filter = asprintf__tp_filter_pids(npids, pids); 1211 int ret = evlist__set_tp_filter(evlist, filter); 1212 1213 free(filter); 1214 return ret; 1215 } 1216 1217 int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1218 { 1219 char *filter = asprintf__tp_filter_pids(npids, pids); 1220 int ret = evlist__append_tp_filter(evlist, filter); 1221 1222 free(filter); 1223 return ret; 1224 } 1225 1226 int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid) 1227 { 1228 return evlist__append_tp_filter_pids(evlist, 1, &pid); 1229 } 1230 1231 bool evlist__valid_sample_type(struct evlist *evlist) 1232 { 1233 struct evsel *pos; 1234 1235 if (evlist->core.nr_entries == 1) 1236 return true; 1237 1238 if (evlist->id_pos < 0 || evlist->is_pos < 0) 1239 return false; 1240 1241 evlist__for_each_entry(evlist, pos) { 1242 if (pos->id_pos != evlist->id_pos || 1243 pos->is_pos != evlist->is_pos) 1244 return false; 1245 } 1246 1247 return true; 1248 } 1249 1250 u64 __evlist__combined_sample_type(struct evlist *evlist) 1251 { 1252 struct evsel *evsel; 1253 1254 if (evlist->combined_sample_type) 1255 return evlist->combined_sample_type; 1256 1257 evlist__for_each_entry(evlist, evsel) 1258 evlist->combined_sample_type |= evsel->core.attr.sample_type; 1259 1260 return evlist->combined_sample_type; 1261 } 1262 1263 u64 evlist__combined_sample_type(struct evlist *evlist) 1264 { 1265 evlist->combined_sample_type = 0; 1266 return __evlist__combined_sample_type(evlist); 1267 } 1268 1269 u64 evlist__combined_branch_type(struct evlist *evlist) 1270 { 1271 struct evsel *evsel; 1272 u64 branch_type = 0; 1273 1274 evlist__for_each_entry(evlist, evsel) 1275 branch_type |= evsel->core.attr.branch_sample_type; 1276 return branch_type; 1277 } 1278 1279 static struct evsel * 1280 evlist__find_dup_event_from_prev(struct evlist *evlist, struct evsel *event) 1281 { 1282 struct evsel *pos; 1283 1284 evlist__for_each_entry(evlist, pos) { 1285 if (event == pos) 1286 break; 1287 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) && 1288 !strcmp(pos->name, event->name)) 1289 return pos; 1290 } 1291 return NULL; 1292 } 1293 1294 #define MAX_NR_ABBR_NAME (26 * 11) 1295 1296 /* 1297 * The abbr name is from A to Z9. If the number of event 1298 * which requires the branch counter > MAX_NR_ABBR_NAME, 1299 * return NA. 1300 */ 1301 static void evlist__new_abbr_name(char *name) 1302 { 1303 static int idx; 1304 int i = idx / 26; 1305 1306 if (idx >= MAX_NR_ABBR_NAME) { 1307 name[0] = 'N'; 1308 name[1] = 'A'; 1309 name[2] = '\0'; 1310 return; 1311 } 1312 1313 name[0] = 'A' + (idx % 26); 1314 1315 if (!i) 1316 name[1] = '\0'; 1317 else { 1318 name[1] = '0' + i - 1; 1319 name[2] = '\0'; 1320 } 1321 1322 idx++; 1323 } 1324 1325 void evlist__update_br_cntr(struct evlist *evlist) 1326 { 1327 struct evsel *evsel, *dup; 1328 int i = 0; 1329 1330 evlist__for_each_entry(evlist, evsel) { 1331 if (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) { 1332 evsel->br_cntr_idx = i++; 1333 evsel__leader(evsel)->br_cntr_nr++; 1334 1335 dup = evlist__find_dup_event_from_prev(evlist, evsel); 1336 if (dup) 1337 memcpy(evsel->abbr_name, dup->abbr_name, 3 * sizeof(char)); 1338 else 1339 evlist__new_abbr_name(evsel->abbr_name); 1340 } 1341 } 1342 evlist->nr_br_cntr = i; 1343 } 1344 1345 bool evlist__valid_read_format(struct evlist *evlist) 1346 { 1347 struct evsel *first = evlist__first(evlist), *pos = first; 1348 u64 read_format = first->core.attr.read_format; 1349 u64 sample_type = first->core.attr.sample_type; 1350 1351 evlist__for_each_entry(evlist, pos) { 1352 if (read_format != pos->core.attr.read_format) { 1353 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n", 1354 read_format, (u64)pos->core.attr.read_format); 1355 } 1356 } 1357 1358 /* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */ 1359 if ((sample_type & PERF_SAMPLE_READ) && 1360 !(read_format & PERF_FORMAT_ID)) { 1361 return false; 1362 } 1363 1364 return true; 1365 } 1366 1367 u16 evlist__id_hdr_size(struct evlist *evlist) 1368 { 1369 struct evsel *first = evlist__first(evlist); 1370 1371 return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0; 1372 } 1373 1374 bool evlist__valid_sample_id_all(struct evlist *evlist) 1375 { 1376 struct evsel *first = evlist__first(evlist), *pos = first; 1377 1378 evlist__for_each_entry_continue(evlist, pos) { 1379 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) 1380 return false; 1381 } 1382 1383 return true; 1384 } 1385 1386 bool evlist__sample_id_all(struct evlist *evlist) 1387 { 1388 struct evsel *first = evlist__first(evlist); 1389 return first->core.attr.sample_id_all; 1390 } 1391 1392 void evlist__set_selected(struct evlist *evlist, struct evsel *evsel) 1393 { 1394 evlist->selected = evsel; 1395 } 1396 1397 void evlist__close(struct evlist *evlist) 1398 { 1399 struct evsel *evsel; 1400 struct evlist_cpu_iterator evlist_cpu_itr; 1401 1402 evlist__for_each_cpu(evlist_cpu_itr, evlist) { 1403 if (evlist_cpu_itr.cpu_map_idx == 0 && evsel__is_retire_lat(evlist_cpu_itr.evsel)) 1404 evsel__tpebs_close(evlist_cpu_itr.evsel); 1405 perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core, 1406 evlist_cpu_itr.cpu_map_idx); 1407 } 1408 1409 evlist__for_each_entry_reverse(evlist, evsel) { 1410 perf_evsel__free_fd(&evsel->core); 1411 perf_evsel__free_id(&evsel->core); 1412 } 1413 perf_evlist__reset_id_hash(&evlist->core); 1414 } 1415 1416 static int evlist__create_syswide_maps(struct evlist *evlist) 1417 { 1418 struct perf_cpu_map *cpus; 1419 struct perf_thread_map *threads; 1420 1421 /* 1422 * Try reading /sys/devices/system/cpu/online to get 1423 * an all cpus map. 1424 * 1425 * FIXME: -ENOMEM is the best we can do here, the cpu_map 1426 * code needs an overhaul to properly forward the 1427 * error, and we may not want to do that fallback to a 1428 * default cpu identity map :-\ 1429 */ 1430 cpus = perf_cpu_map__new_online_cpus(); 1431 if (!cpus) 1432 return -ENOMEM; 1433 1434 threads = perf_thread_map__new_dummy(); 1435 if (!threads) { 1436 perf_cpu_map__put(cpus); 1437 return -ENOMEM; 1438 } 1439 1440 perf_evlist__set_maps(&evlist->core, cpus, threads); 1441 perf_thread_map__put(threads); 1442 perf_cpu_map__put(cpus); 1443 return 0; 1444 } 1445 1446 int evlist__open(struct evlist *evlist) 1447 { 1448 struct evsel *evsel; 1449 int err; 1450 1451 /* 1452 * Default: one fd per CPU, all threads, aka systemwide 1453 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL 1454 */ 1455 if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) { 1456 err = evlist__create_syswide_maps(evlist); 1457 if (err < 0) 1458 goto out_err; 1459 } 1460 1461 evlist__update_id_pos(evlist); 1462 1463 evlist__for_each_entry(evlist, evsel) { 1464 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); 1465 if (err < 0) 1466 goto out_err; 1467 } 1468 1469 return 0; 1470 out_err: 1471 evlist__close(evlist); 1472 errno = -err; 1473 return err; 1474 } 1475 1476 int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[], 1477 bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1478 { 1479 int child_ready_pipe[2], go_pipe[2]; 1480 char bf; 1481 1482 evlist->workload.cork_fd = -1; 1483 1484 if (pipe(child_ready_pipe) < 0) { 1485 perror("failed to create 'ready' pipe"); 1486 return -1; 1487 } 1488 1489 if (pipe(go_pipe) < 0) { 1490 perror("failed to create 'go' pipe"); 1491 goto out_close_ready_pipe; 1492 } 1493 1494 evlist->workload.pid = fork(); 1495 if (evlist->workload.pid < 0) { 1496 perror("failed to fork"); 1497 goto out_close_pipes; 1498 } 1499 1500 if (!evlist->workload.pid) { 1501 int ret; 1502 1503 if (pipe_output) 1504 dup2(2, 1); 1505 1506 signal(SIGTERM, SIG_DFL); 1507 1508 close(child_ready_pipe[0]); 1509 close(go_pipe[1]); 1510 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1511 1512 /* 1513 * Change the name of this process not to confuse --exclude-perf users 1514 * that sees 'perf' in the window up to the execvp() and thinks that 1515 * perf samples are not being excluded. 1516 */ 1517 prctl(PR_SET_NAME, "perf-exec"); 1518 1519 /* 1520 * Tell the parent we're ready to go 1521 */ 1522 close(child_ready_pipe[1]); 1523 1524 /* 1525 * Wait until the parent tells us to go. 1526 */ 1527 ret = read(go_pipe[0], &bf, 1); 1528 /* 1529 * The parent will ask for the execvp() to be performed by 1530 * writing exactly one byte, in workload.cork_fd, usually via 1531 * evlist__start_workload(). 1532 * 1533 * For cancelling the workload without actually running it, 1534 * the parent will just close workload.cork_fd, without writing 1535 * anything, i.e. read will return zero and we just exit() 1536 * here (See evlist__cancel_workload()). 1537 */ 1538 if (ret != 1) { 1539 if (ret == -1) 1540 perror("unable to read pipe"); 1541 exit(ret); 1542 } 1543 1544 execvp(argv[0], (char **)argv); 1545 1546 if (exec_error) { 1547 union sigval val; 1548 1549 val.sival_int = errno; 1550 if (sigqueue(getppid(), SIGUSR1, val)) 1551 perror(argv[0]); 1552 } else 1553 perror(argv[0]); 1554 exit(-1); 1555 } 1556 1557 if (exec_error) { 1558 struct sigaction act = { 1559 .sa_flags = SA_SIGINFO, 1560 .sa_sigaction = exec_error, 1561 }; 1562 sigaction(SIGUSR1, &act, NULL); 1563 } 1564 1565 if (target__none(target)) { 1566 if (evlist->core.threads == NULL) { 1567 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", 1568 __func__, __LINE__); 1569 goto out_close_pipes; 1570 } 1571 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); 1572 } 1573 1574 close(child_ready_pipe[1]); 1575 close(go_pipe[0]); 1576 /* 1577 * wait for child to settle 1578 */ 1579 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1580 perror("unable to read pipe"); 1581 goto out_close_pipes; 1582 } 1583 1584 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1585 evlist->workload.cork_fd = go_pipe[1]; 1586 close(child_ready_pipe[0]); 1587 return 0; 1588 1589 out_close_pipes: 1590 close(go_pipe[0]); 1591 close(go_pipe[1]); 1592 out_close_ready_pipe: 1593 close(child_ready_pipe[0]); 1594 close(child_ready_pipe[1]); 1595 return -1; 1596 } 1597 1598 int evlist__start_workload(struct evlist *evlist) 1599 { 1600 if (evlist->workload.cork_fd >= 0) { 1601 char bf = 0; 1602 int ret; 1603 /* 1604 * Remove the cork, let it rip! 1605 */ 1606 ret = write(evlist->workload.cork_fd, &bf, 1); 1607 if (ret < 0) 1608 perror("unable to write to pipe"); 1609 1610 close(evlist->workload.cork_fd); 1611 evlist->workload.cork_fd = -1; 1612 return ret; 1613 } 1614 1615 return 0; 1616 } 1617 1618 void evlist__cancel_workload(struct evlist *evlist) 1619 { 1620 int status; 1621 1622 if (evlist->workload.cork_fd >= 0) { 1623 close(evlist->workload.cork_fd); 1624 evlist->workload.cork_fd = -1; 1625 waitpid(evlist->workload.pid, &status, WNOHANG); 1626 } 1627 } 1628 1629 int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) 1630 { 1631 struct evsel *evsel = evlist__event2evsel(evlist, event); 1632 int ret; 1633 1634 if (!evsel) { 1635 /* Ensure the sample is okay for perf_sample__exit. */ 1636 perf_sample__init(sample, /*all=*/false); 1637 return -EFAULT; 1638 } 1639 ret = evsel__parse_sample(evsel, event, sample); 1640 if (ret) 1641 return ret; 1642 if (perf_guest && sample->id) { 1643 struct perf_sample_id *sid = evlist__id2sid(evlist, sample->id); 1644 1645 if (sid) { 1646 sample->machine_pid = sid->machine_pid; 1647 sample->vcpu = sid->vcpu.cpu; 1648 } 1649 } 1650 return 0; 1651 } 1652 1653 int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp) 1654 { 1655 struct evsel *evsel = evlist__event2evsel(evlist, event); 1656 1657 if (!evsel) 1658 return -EFAULT; 1659 return evsel__parse_sample_timestamp(evsel, event, timestamp); 1660 } 1661 1662 int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size) 1663 { 1664 int printed, value; 1665 1666 switch (err) { 1667 case EACCES: 1668 case EPERM: 1669 errno = err; 1670 printed = scnprintf(buf, size, 1671 "Error:\t%m.\n" 1672 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting."); 1673 1674 value = perf_event_paranoid(); 1675 1676 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1677 1678 if (value >= 2) { 1679 printed += scnprintf(buf + printed, size - printed, 1680 "For your workloads it needs to be <= 1\nHint:\t"); 1681 } 1682 printed += scnprintf(buf + printed, size - printed, 1683 "For system wide tracing it needs to be set to -1.\n"); 1684 1685 printed += scnprintf(buf + printed, size - printed, 1686 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" 1687 "Hint:\tThe current value is %d.", value); 1688 break; 1689 case EINVAL: { 1690 struct evsel *first = evlist__first(evlist); 1691 int max_freq; 1692 1693 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) 1694 goto out_default; 1695 1696 if (first->core.attr.sample_freq < (u64)max_freq) 1697 goto out_default; 1698 1699 errno = err; 1700 printed = scnprintf(buf, size, 1701 "Error:\t%m.\n" 1702 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n" 1703 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.", 1704 max_freq, first->core.attr.sample_freq); 1705 break; 1706 } 1707 default: 1708 out_default: 1709 errno = err; 1710 scnprintf(buf, size, "%m"); 1711 break; 1712 } 1713 1714 return 0; 1715 } 1716 1717 int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size) 1718 { 1719 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; 1720 1721 switch (err) { 1722 case EPERM: 1723 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); 1724 errno = err; 1725 printed += scnprintf(buf + printed, size - printed, 1726 "Error:\t%m.\n" 1727 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" 1728 "Hint:\tTried using %zd kB.\n", 1729 pages_max_per_user, pages_attempted); 1730 1731 if (pages_attempted >= pages_max_per_user) { 1732 printed += scnprintf(buf + printed, size - printed, 1733 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", 1734 pages_max_per_user + pages_attempted); 1735 } 1736 1737 printed += scnprintf(buf + printed, size - printed, 1738 "Hint:\tTry using a smaller -m/--mmap-pages value."); 1739 break; 1740 default: 1741 errno = err; 1742 scnprintf(buf, size, "%m"); 1743 break; 1744 } 1745 1746 return 0; 1747 } 1748 1749 void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel) 1750 { 1751 struct evsel *evsel, *n; 1752 LIST_HEAD(move); 1753 1754 if (move_evsel == evlist__first(evlist)) 1755 return; 1756 1757 evlist__for_each_entry_safe(evlist, n, evsel) { 1758 if (evsel__leader(evsel) == evsel__leader(move_evsel)) 1759 list_move_tail(&evsel->core.node, &move); 1760 } 1761 1762 list_splice(&move, &evlist->core.entries); 1763 } 1764 1765 struct evsel *evlist__get_tracking_event(struct evlist *evlist) 1766 { 1767 struct evsel *evsel; 1768 1769 evlist__for_each_entry(evlist, evsel) { 1770 if (evsel->tracking) 1771 return evsel; 1772 } 1773 1774 return evlist__first(evlist); 1775 } 1776 1777 void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel) 1778 { 1779 struct evsel *evsel; 1780 1781 if (tracking_evsel->tracking) 1782 return; 1783 1784 evlist__for_each_entry(evlist, evsel) { 1785 if (evsel != tracking_evsel) 1786 evsel->tracking = false; 1787 } 1788 1789 tracking_evsel->tracking = true; 1790 } 1791 1792 struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide) 1793 { 1794 struct evsel *evsel; 1795 1796 evsel = evlist__get_tracking_event(evlist); 1797 if (!evsel__is_dummy_event(evsel)) { 1798 evsel = evlist__add_aux_dummy(evlist, system_wide); 1799 if (!evsel) 1800 return NULL; 1801 1802 evlist__set_tracking_event(evlist, evsel); 1803 } else if (system_wide) { 1804 perf_evlist__go_system_wide(&evlist->core, &evsel->core); 1805 } 1806 1807 return evsel; 1808 } 1809 1810 struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str) 1811 { 1812 struct evsel *evsel; 1813 1814 evlist__for_each_entry(evlist, evsel) { 1815 if (!evsel->name) 1816 continue; 1817 if (evsel__name_is(evsel, str)) 1818 return evsel; 1819 } 1820 1821 return NULL; 1822 } 1823 1824 void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state) 1825 { 1826 enum bkw_mmap_state old_state = evlist->bkw_mmap_state; 1827 enum action { 1828 NONE, 1829 PAUSE, 1830 RESUME, 1831 } action = NONE; 1832 1833 if (!evlist->overwrite_mmap) 1834 return; 1835 1836 switch (old_state) { 1837 case BKW_MMAP_NOTREADY: { 1838 if (state != BKW_MMAP_RUNNING) 1839 goto state_err; 1840 break; 1841 } 1842 case BKW_MMAP_RUNNING: { 1843 if (state != BKW_MMAP_DATA_PENDING) 1844 goto state_err; 1845 action = PAUSE; 1846 break; 1847 } 1848 case BKW_MMAP_DATA_PENDING: { 1849 if (state != BKW_MMAP_EMPTY) 1850 goto state_err; 1851 break; 1852 } 1853 case BKW_MMAP_EMPTY: { 1854 if (state != BKW_MMAP_RUNNING) 1855 goto state_err; 1856 action = RESUME; 1857 break; 1858 } 1859 default: 1860 WARN_ONCE(1, "Shouldn't get there\n"); 1861 } 1862 1863 evlist->bkw_mmap_state = state; 1864 1865 switch (action) { 1866 case PAUSE: 1867 evlist__pause(evlist); 1868 break; 1869 case RESUME: 1870 evlist__resume(evlist); 1871 break; 1872 case NONE: 1873 default: 1874 break; 1875 } 1876 1877 state_err: 1878 return; 1879 } 1880 1881 bool evlist__exclude_kernel(struct evlist *evlist) 1882 { 1883 struct evsel *evsel; 1884 1885 evlist__for_each_entry(evlist, evsel) { 1886 if (!evsel->core.attr.exclude_kernel) 1887 return false; 1888 } 1889 1890 return true; 1891 } 1892 1893 /* 1894 * Events in data file are not collect in groups, but we still want 1895 * the group display. Set the artificial group and set the leader's 1896 * forced_leader flag to notify the display code. 1897 */ 1898 void evlist__force_leader(struct evlist *evlist) 1899 { 1900 if (evlist__nr_groups(evlist) == 0) { 1901 struct evsel *leader = evlist__first(evlist); 1902 1903 evlist__set_leader(evlist); 1904 leader->forced_leader = true; 1905 } 1906 } 1907 1908 struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close) 1909 { 1910 struct evsel *c2, *leader; 1911 bool is_open = true; 1912 1913 leader = evsel__leader(evsel); 1914 1915 pr_debug("Weak group for %s/%d failed\n", 1916 leader->name, leader->core.nr_members); 1917 1918 /* 1919 * for_each_group_member doesn't work here because it doesn't 1920 * include the first entry. 1921 */ 1922 evlist__for_each_entry(evsel_list, c2) { 1923 if (c2 == evsel) 1924 is_open = false; 1925 if (evsel__has_leader(c2, leader)) { 1926 if (is_open && close) 1927 perf_evsel__close(&c2->core); 1928 /* 1929 * We want to close all members of the group and reopen 1930 * them. Some events, like Intel topdown, require being 1931 * in a group and so keep these in the group. 1932 */ 1933 evsel__remove_from_group(c2, leader); 1934 1935 /* 1936 * Set this for all former members of the group 1937 * to indicate they get reopened. 1938 */ 1939 c2->reset_group = true; 1940 } 1941 } 1942 /* Reset the leader count if all entries were removed. */ 1943 if (leader->core.nr_members == 1) 1944 leader->core.nr_members = 0; 1945 return leader; 1946 } 1947 1948 static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1949 { 1950 char *s, *p; 1951 int ret = 0, fd; 1952 1953 if (strncmp(str, "fifo:", 5)) 1954 return -EINVAL; 1955 1956 str += 5; 1957 if (!*str || *str == ',') 1958 return -EINVAL; 1959 1960 s = strdup(str); 1961 if (!s) 1962 return -ENOMEM; 1963 1964 p = strchr(s, ','); 1965 if (p) 1966 *p = '\0'; 1967 1968 /* 1969 * O_RDWR avoids POLLHUPs which is necessary to allow the other 1970 * end of a FIFO to be repeatedly opened and closed. 1971 */ 1972 fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1973 if (fd < 0) { 1974 ret = -errno; 1975 pr_err("Failed to open '%s': %m\n", s); 1976 goto out_free; 1977 } 1978 *ctl_fd = fd; 1979 *ctl_fd_close = true; 1980 1981 if (p && *++p) { 1982 /* O_RDWR | O_NONBLOCK means the other end need not be open */ 1983 fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1984 if (fd < 0) { 1985 pr_err("Failed to open '%s': %m\n", p); 1986 ret = -errno; 1987 goto out_free; 1988 } 1989 *ctl_fd_ack = fd; 1990 } 1991 1992 out_free: 1993 free(s); 1994 return ret; 1995 } 1996 1997 int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1998 { 1999 const char *comma = NULL; 2000 char *endptr = NULL; 2001 2002 *ctl_fd_close = false; 2003 2004 if (strncmp(str, "fd:", 3)) 2005 return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close); 2006 2007 *ctl_fd = strtoul(&str[3], &endptr, 0); 2008 if (endptr == &str[3]) 2009 return -EINVAL; 2010 2011 comma = strchr(str, ','); 2012 if (comma) { 2013 if (endptr != comma) 2014 return -EINVAL; 2015 2016 *ctl_fd_ack = strtoul(comma + 1, &endptr, 0); 2017 if (endptr == comma + 1 || *endptr != '\0') 2018 return -EINVAL; 2019 } 2020 2021 return 0; 2022 } 2023 2024 void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close) 2025 { 2026 if (*ctl_fd_close) { 2027 *ctl_fd_close = false; 2028 close(ctl_fd); 2029 if (ctl_fd_ack >= 0) 2030 close(ctl_fd_ack); 2031 } 2032 } 2033 2034 int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack) 2035 { 2036 if (fd == -1) { 2037 pr_debug("Control descriptor is not initialized\n"); 2038 return 0; 2039 } 2040 2041 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 2042 fdarray_flag__nonfilterable | 2043 fdarray_flag__non_perf_event); 2044 if (evlist->ctl_fd.pos < 0) { 2045 evlist->ctl_fd.pos = -1; 2046 pr_err("Failed to add ctl fd entry: %m\n"); 2047 return -1; 2048 } 2049 2050 evlist->ctl_fd.fd = fd; 2051 evlist->ctl_fd.ack = ack; 2052 2053 return 0; 2054 } 2055 2056 bool evlist__ctlfd_initialized(struct evlist *evlist) 2057 { 2058 return evlist->ctl_fd.pos >= 0; 2059 } 2060 2061 int evlist__finalize_ctlfd(struct evlist *evlist) 2062 { 2063 struct pollfd *entries = evlist->core.pollfd.entries; 2064 2065 if (!evlist__ctlfd_initialized(evlist)) 2066 return 0; 2067 2068 entries[evlist->ctl_fd.pos].fd = -1; 2069 entries[evlist->ctl_fd.pos].events = 0; 2070 entries[evlist->ctl_fd.pos].revents = 0; 2071 2072 evlist->ctl_fd.pos = -1; 2073 evlist->ctl_fd.ack = -1; 2074 evlist->ctl_fd.fd = -1; 2075 2076 return 0; 2077 } 2078 2079 static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd, 2080 char *cmd_data, size_t data_size) 2081 { 2082 int err; 2083 char c; 2084 size_t bytes_read = 0; 2085 2086 *cmd = EVLIST_CTL_CMD_UNSUPPORTED; 2087 memset(cmd_data, 0, data_size); 2088 data_size--; 2089 2090 do { 2091 err = read(evlist->ctl_fd.fd, &c, 1); 2092 if (err > 0) { 2093 if (c == '\n' || c == '\0') 2094 break; 2095 cmd_data[bytes_read++] = c; 2096 if (bytes_read == data_size) 2097 break; 2098 continue; 2099 } else if (err == -1) { 2100 if (errno == EINTR) 2101 continue; 2102 if (errno == EAGAIN || errno == EWOULDBLOCK) 2103 err = 0; 2104 else 2105 pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd); 2106 } 2107 break; 2108 } while (1); 2109 2110 pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data, 2111 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0"); 2112 2113 if (bytes_read > 0) { 2114 if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG, 2115 (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) { 2116 *cmd = EVLIST_CTL_CMD_ENABLE; 2117 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG, 2118 (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) { 2119 *cmd = EVLIST_CTL_CMD_DISABLE; 2120 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG, 2121 (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) { 2122 *cmd = EVLIST_CTL_CMD_SNAPSHOT; 2123 pr_debug("is snapshot\n"); 2124 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG, 2125 (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) { 2126 *cmd = EVLIST_CTL_CMD_EVLIST; 2127 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG, 2128 (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) { 2129 *cmd = EVLIST_CTL_CMD_STOP; 2130 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG, 2131 (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) { 2132 *cmd = EVLIST_CTL_CMD_PING; 2133 } 2134 } 2135 2136 return bytes_read ? (int)bytes_read : err; 2137 } 2138 2139 int evlist__ctlfd_ack(struct evlist *evlist) 2140 { 2141 int err; 2142 2143 if (evlist->ctl_fd.ack == -1) 2144 return 0; 2145 2146 err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG, 2147 sizeof(EVLIST_CTL_CMD_ACK_TAG)); 2148 if (err == -1) 2149 pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack); 2150 2151 return err; 2152 } 2153 2154 static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg) 2155 { 2156 char *data = cmd_data + cmd_size; 2157 2158 /* no argument */ 2159 if (!*data) 2160 return 0; 2161 2162 /* there's argument */ 2163 if (*data == ' ') { 2164 *arg = data + 1; 2165 return 1; 2166 } 2167 2168 /* malformed */ 2169 return -1; 2170 } 2171 2172 static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable) 2173 { 2174 struct evsel *evsel; 2175 char *name; 2176 int err; 2177 2178 err = get_cmd_arg(cmd_data, 2179 enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 : 2180 sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1, 2181 &name); 2182 if (err < 0) { 2183 pr_info("failed: wrong command\n"); 2184 return -1; 2185 } 2186 2187 if (err) { 2188 evsel = evlist__find_evsel_by_str(evlist, name); 2189 if (evsel) { 2190 if (enable) 2191 evlist__enable_evsel(evlist, name); 2192 else 2193 evlist__disable_evsel(evlist, name); 2194 pr_info("Event %s %s\n", evsel->name, 2195 enable ? "enabled" : "disabled"); 2196 } else { 2197 pr_info("failed: can't find '%s' event\n", name); 2198 } 2199 } else { 2200 if (enable) { 2201 evlist__enable(evlist); 2202 pr_info(EVLIST_ENABLED_MSG); 2203 } else { 2204 evlist__disable(evlist); 2205 pr_info(EVLIST_DISABLED_MSG); 2206 } 2207 } 2208 2209 return 0; 2210 } 2211 2212 static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data) 2213 { 2214 struct perf_attr_details details = { .verbose = false, }; 2215 struct evsel *evsel; 2216 char *arg; 2217 int err; 2218 2219 err = get_cmd_arg(cmd_data, 2220 sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1, 2221 &arg); 2222 if (err < 0) { 2223 pr_info("failed: wrong command\n"); 2224 return -1; 2225 } 2226 2227 if (err) { 2228 if (!strcmp(arg, "-v")) { 2229 details.verbose = true; 2230 } else if (!strcmp(arg, "-g")) { 2231 details.event_group = true; 2232 } else if (!strcmp(arg, "-F")) { 2233 details.freq = true; 2234 } else { 2235 pr_info("failed: wrong command\n"); 2236 return -1; 2237 } 2238 } 2239 2240 evlist__for_each_entry(evlist, evsel) 2241 evsel__fprintf(evsel, &details, stderr); 2242 2243 return 0; 2244 } 2245 2246 int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd) 2247 { 2248 int err = 0; 2249 char cmd_data[EVLIST_CTL_CMD_MAX_LEN]; 2250 int ctlfd_pos = evlist->ctl_fd.pos; 2251 struct pollfd *entries = evlist->core.pollfd.entries; 2252 2253 if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents) 2254 return 0; 2255 2256 if (entries[ctlfd_pos].revents & POLLIN) { 2257 err = evlist__ctlfd_recv(evlist, cmd, cmd_data, 2258 EVLIST_CTL_CMD_MAX_LEN); 2259 if (err > 0) { 2260 switch (*cmd) { 2261 case EVLIST_CTL_CMD_ENABLE: 2262 case EVLIST_CTL_CMD_DISABLE: 2263 err = evlist__ctlfd_enable(evlist, cmd_data, 2264 *cmd == EVLIST_CTL_CMD_ENABLE); 2265 break; 2266 case EVLIST_CTL_CMD_EVLIST: 2267 err = evlist__ctlfd_list(evlist, cmd_data); 2268 break; 2269 case EVLIST_CTL_CMD_SNAPSHOT: 2270 case EVLIST_CTL_CMD_STOP: 2271 case EVLIST_CTL_CMD_PING: 2272 break; 2273 case EVLIST_CTL_CMD_ACK: 2274 case EVLIST_CTL_CMD_UNSUPPORTED: 2275 default: 2276 pr_debug("ctlfd: unsupported %d\n", *cmd); 2277 break; 2278 } 2279 if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED || 2280 *cmd == EVLIST_CTL_CMD_SNAPSHOT)) 2281 evlist__ctlfd_ack(evlist); 2282 } 2283 } 2284 2285 if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR)) 2286 evlist__finalize_ctlfd(evlist); 2287 else 2288 entries[ctlfd_pos].revents = 0; 2289 2290 return err; 2291 } 2292 2293 /** 2294 * struct event_enable_time - perf record -D/--delay single time range. 2295 * @start: start of time range to enable events in milliseconds 2296 * @end: end of time range to enable events in milliseconds 2297 * 2298 * N.B. this structure is also accessed as an array of int. 2299 */ 2300 struct event_enable_time { 2301 int start; 2302 int end; 2303 }; 2304 2305 static int parse_event_enable_time(const char *str, struct event_enable_time *range, bool first) 2306 { 2307 const char *fmt = first ? "%u - %u %n" : " , %u - %u %n"; 2308 int ret, start, end, n; 2309 2310 ret = sscanf(str, fmt, &start, &end, &n); 2311 if (ret != 2 || end <= start) 2312 return -EINVAL; 2313 if (range) { 2314 range->start = start; 2315 range->end = end; 2316 } 2317 return n; 2318 } 2319 2320 static ssize_t parse_event_enable_times(const char *str, struct event_enable_time *range) 2321 { 2322 int incr = !!range; 2323 bool first = true; 2324 ssize_t ret, cnt; 2325 2326 for (cnt = 0; *str; cnt++) { 2327 ret = parse_event_enable_time(str, range, first); 2328 if (ret < 0) 2329 return ret; 2330 /* Check no overlap */ 2331 if (!first && range && range->start <= range[-1].end) 2332 return -EINVAL; 2333 str += ret; 2334 range += incr; 2335 first = false; 2336 } 2337 return cnt; 2338 } 2339 2340 /** 2341 * struct event_enable_timer - control structure for perf record -D/--delay. 2342 * @evlist: event list 2343 * @times: time ranges that events are enabled (N.B. this is also accessed as an 2344 * array of int) 2345 * @times_cnt: number of time ranges 2346 * @timerfd: timer file descriptor 2347 * @pollfd_pos: position in @evlist array of file descriptors to poll (fdarray) 2348 * @times_step: current position in (int *)@times)[], 2349 * refer event_enable_timer__process() 2350 * 2351 * Note, this structure is only used when there are time ranges, not when there 2352 * is only an initial delay. 2353 */ 2354 struct event_enable_timer { 2355 struct evlist *evlist; 2356 struct event_enable_time *times; 2357 size_t times_cnt; 2358 int timerfd; 2359 int pollfd_pos; 2360 size_t times_step; 2361 }; 2362 2363 static int str_to_delay(const char *str) 2364 { 2365 char *endptr; 2366 long d; 2367 2368 d = strtol(str, &endptr, 10); 2369 if (*endptr || d > INT_MAX || d < -1) 2370 return 0; 2371 return d; 2372 } 2373 2374 int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *opts, 2375 const char *str, int unset) 2376 { 2377 enum fdarray_flags flags = fdarray_flag__nonfilterable | fdarray_flag__non_perf_event; 2378 struct event_enable_timer *eet; 2379 ssize_t times_cnt; 2380 ssize_t ret; 2381 int err; 2382 2383 if (unset) 2384 return 0; 2385 2386 opts->target.initial_delay = str_to_delay(str); 2387 if (opts->target.initial_delay) 2388 return 0; 2389 2390 ret = parse_event_enable_times(str, NULL); 2391 if (ret < 0) 2392 return ret; 2393 2394 times_cnt = ret; 2395 if (times_cnt == 0) 2396 return -EINVAL; 2397 2398 eet = zalloc(sizeof(*eet)); 2399 if (!eet) 2400 return -ENOMEM; 2401 2402 eet->times = calloc(times_cnt, sizeof(*eet->times)); 2403 if (!eet->times) { 2404 err = -ENOMEM; 2405 goto free_eet; 2406 } 2407 2408 if (parse_event_enable_times(str, eet->times) != times_cnt) { 2409 err = -EINVAL; 2410 goto free_eet_times; 2411 } 2412 2413 eet->times_cnt = times_cnt; 2414 2415 eet->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC); 2416 if (eet->timerfd == -1) { 2417 err = -errno; 2418 pr_err("timerfd_create failed: %m\n"); 2419 goto free_eet_times; 2420 } 2421 2422 eet->pollfd_pos = perf_evlist__add_pollfd(&evlist->core, eet->timerfd, NULL, POLLIN, flags); 2423 if (eet->pollfd_pos < 0) { 2424 err = eet->pollfd_pos; 2425 goto close_timerfd; 2426 } 2427 2428 eet->evlist = evlist; 2429 evlist->eet = eet; 2430 opts->target.initial_delay = eet->times[0].start; 2431 2432 return 0; 2433 2434 close_timerfd: 2435 close(eet->timerfd); 2436 free_eet_times: 2437 zfree(&eet->times); 2438 free_eet: 2439 free(eet); 2440 return err; 2441 } 2442 2443 static int event_enable_timer__set_timer(struct event_enable_timer *eet, int ms) 2444 { 2445 struct itimerspec its = { 2446 .it_value.tv_sec = ms / MSEC_PER_SEC, 2447 .it_value.tv_nsec = (ms % MSEC_PER_SEC) * NSEC_PER_MSEC, 2448 }; 2449 int err = 0; 2450 2451 if (timerfd_settime(eet->timerfd, 0, &its, NULL) < 0) { 2452 err = -errno; 2453 pr_err("timerfd_settime failed: %m\n"); 2454 } 2455 return err; 2456 } 2457 2458 int event_enable_timer__start(struct event_enable_timer *eet) 2459 { 2460 int ms; 2461 2462 if (!eet) 2463 return 0; 2464 2465 ms = eet->times[0].end - eet->times[0].start; 2466 eet->times_step = 1; 2467 2468 return event_enable_timer__set_timer(eet, ms); 2469 } 2470 2471 int event_enable_timer__process(struct event_enable_timer *eet) 2472 { 2473 struct pollfd *entries; 2474 short revents; 2475 2476 if (!eet) 2477 return 0; 2478 2479 entries = eet->evlist->core.pollfd.entries; 2480 revents = entries[eet->pollfd_pos].revents; 2481 entries[eet->pollfd_pos].revents = 0; 2482 2483 if (revents & POLLIN) { 2484 size_t step = eet->times_step; 2485 size_t pos = step / 2; 2486 2487 if (step & 1) { 2488 evlist__disable_non_dummy(eet->evlist); 2489 pr_info(EVLIST_DISABLED_MSG); 2490 if (pos >= eet->times_cnt - 1) { 2491 /* Disarm timer */ 2492 event_enable_timer__set_timer(eet, 0); 2493 return 1; /* Stop */ 2494 } 2495 } else { 2496 evlist__enable_non_dummy(eet->evlist); 2497 pr_info(EVLIST_ENABLED_MSG); 2498 } 2499 2500 step += 1; 2501 pos = step / 2; 2502 2503 if (pos < eet->times_cnt) { 2504 int *times = (int *)eet->times; /* Accessing 'times' as array of int */ 2505 int ms = times[step] - times[step - 1]; 2506 2507 eet->times_step = step; 2508 return event_enable_timer__set_timer(eet, ms); 2509 } 2510 } 2511 2512 return 0; 2513 } 2514 2515 void event_enable_timer__exit(struct event_enable_timer **ep) 2516 { 2517 if (!ep || !*ep) 2518 return; 2519 zfree(&(*ep)->times); 2520 zfree(ep); 2521 } 2522 2523 struct evsel *evlist__find_evsel(struct evlist *evlist, int idx) 2524 { 2525 struct evsel *evsel; 2526 2527 evlist__for_each_entry(evlist, evsel) { 2528 if (evsel->core.idx == idx) 2529 return evsel; 2530 } 2531 return NULL; 2532 } 2533 2534 void evlist__format_evsels(struct evlist *evlist, struct strbuf *sb, size_t max_length) 2535 { 2536 struct evsel *evsel, *leader = NULL; 2537 bool first = true; 2538 2539 evlist__for_each_entry(evlist, evsel) { 2540 struct evsel *new_leader = evsel__leader(evsel); 2541 2542 if (evsel__is_dummy_event(evsel)) 2543 continue; 2544 2545 if (leader != new_leader && leader && leader->core.nr_members > 1) 2546 strbuf_addch(sb, '}'); 2547 2548 if (!first) 2549 strbuf_addch(sb, ','); 2550 2551 if (sb->len > max_length) { 2552 strbuf_addstr(sb, "..."); 2553 return; 2554 } 2555 if (leader != new_leader && new_leader->core.nr_members > 1) 2556 strbuf_addch(sb, '{'); 2557 2558 strbuf_addstr(sb, evsel__name(evsel)); 2559 first = false; 2560 leader = new_leader; 2561 } 2562 if (leader && leader->core.nr_members > 1) 2563 strbuf_addch(sb, '}'); 2564 } 2565 2566 void evlist__check_mem_load_aux(struct evlist *evlist) 2567 { 2568 struct evsel *leader, *evsel, *pos; 2569 2570 /* 2571 * For some platforms, the 'mem-loads' event is required to use 2572 * together with 'mem-loads-aux' within a group and 'mem-loads-aux' 2573 * must be the group leader. Now we disable this group before reporting 2574 * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry 2575 * any valid memory load information. 2576 */ 2577 evlist__for_each_entry(evlist, evsel) { 2578 leader = evsel__leader(evsel); 2579 if (leader == evsel) 2580 continue; 2581 2582 if (leader->name && strstr(leader->name, "mem-loads-aux")) { 2583 for_each_group_evsel(pos, leader) { 2584 evsel__set_leader(pos, pos); 2585 pos->core.nr_members = 0; 2586 } 2587 } 2588 } 2589 } 2590 2591 /** 2592 * evlist__warn_user_requested_cpus() - Check each evsel against requested CPUs 2593 * and warn if the user CPU list is inapplicable for the event's PMU's 2594 * CPUs. Not core PMUs list a CPU in sysfs, but this may be overwritten by a 2595 * user requested CPU and so any online CPU is applicable. Core PMUs handle 2596 * events on the CPUs in their list and otherwise the event isn't supported. 2597 * @evlist: The list of events being checked. 2598 * @cpu_list: The user provided list of CPUs. 2599 */ 2600 void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list) 2601 { 2602 struct perf_cpu_map *user_requested_cpus; 2603 struct evsel *pos; 2604 2605 if (!cpu_list) 2606 return; 2607 2608 user_requested_cpus = perf_cpu_map__new(cpu_list); 2609 if (!user_requested_cpus) 2610 return; 2611 2612 evlist__for_each_entry(evlist, pos) { 2613 evsel__warn_user_requested_cpus(pos, user_requested_cpus); 2614 } 2615 perf_cpu_map__put(user_requested_cpus); 2616 } 2617 2618 /* Should uniquify be disabled for the evlist? */ 2619 static bool evlist__disable_uniquify(const struct evlist *evlist) 2620 { 2621 struct evsel *counter; 2622 struct perf_pmu *last_pmu = NULL; 2623 bool first = true; 2624 2625 evlist__for_each_entry(evlist, counter) { 2626 /* If PMUs vary then uniquify can be useful. */ 2627 if (!first && counter->pmu != last_pmu) 2628 return false; 2629 first = false; 2630 if (counter->pmu) { 2631 /* Allow uniquify for uncore PMUs. */ 2632 if (!counter->pmu->is_core) 2633 return false; 2634 /* Keep hybrid event names uniquified for clarity. */ 2635 if (perf_pmus__num_core_pmus() > 1) 2636 return false; 2637 } 2638 last_pmu = counter->pmu; 2639 } 2640 return true; 2641 } 2642 2643 static bool evlist__set_needs_uniquify(struct evlist *evlist, const struct perf_stat_config *config) 2644 { 2645 struct evsel *counter; 2646 bool needs_uniquify = false; 2647 2648 if (evlist__disable_uniquify(evlist)) { 2649 evlist__for_each_entry(evlist, counter) 2650 counter->uniquified_name = true; 2651 return false; 2652 } 2653 2654 evlist__for_each_entry(evlist, counter) { 2655 if (evsel__set_needs_uniquify(counter, config)) 2656 needs_uniquify = true; 2657 } 2658 return needs_uniquify; 2659 } 2660 2661 void evlist__uniquify_evsel_names(struct evlist *evlist, const struct perf_stat_config *config) 2662 { 2663 if (evlist__set_needs_uniquify(evlist, config)) { 2664 struct evsel *pos; 2665 2666 evlist__for_each_entry(evlist, pos) 2667 evsel__uniquify_counter(pos); 2668 } 2669 } 2670 2671 bool evlist__has_bpf_output(struct evlist *evlist) 2672 { 2673 struct evsel *evsel; 2674 2675 evlist__for_each_entry(evlist, evsel) { 2676 if (evsel__is_bpf_output(evsel)) 2677 return true; 2678 } 2679 2680 return false; 2681 } 2682 2683 bool evlist__needs_bpf_sb_event(struct evlist *evlist) 2684 { 2685 struct evsel *evsel; 2686 2687 evlist__for_each_entry(evlist, evsel) { 2688 if (evsel__is_dummy_event(evsel)) 2689 continue; 2690 if (!evsel->core.attr.exclude_kernel) 2691 return true; 2692 } 2693 2694 return false; 2695 } 2696