1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 #include <api/fs/fs.h> 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "util/mmap.h" 14 #include "thread_map.h" 15 #include "target.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "record.h" 19 #include "debug.h" 20 #include "units.h" 21 #include "bpf_counter.h" 22 #include <internal/lib.h> // page_size 23 #include "affinity.h" 24 #include "../perf.h" 25 #include "asm/bug.h" 26 #include "bpf-event.h" 27 #include "util/event.h" 28 #include "util/string2.h" 29 #include "util/perf_api_probe.h" 30 #include "util/evsel_fprintf.h" 31 #include "util/pmu.h" 32 #include "util/sample.h" 33 #include "util/bpf-filter.h" 34 #include "util/stat.h" 35 #include "util/util.h" 36 #include "util/env.h" 37 #include "util/intel-tpebs.h" 38 #include "util/metricgroup.h" 39 #include "util/strbuf.h" 40 #include <signal.h> 41 #include <unistd.h> 42 #include <sched.h> 43 #include <stdlib.h> 44 45 #include "parse-events.h" 46 #include <subcmd/parse-options.h> 47 48 #include <fcntl.h> 49 #include <sys/ioctl.h> 50 #include <sys/mman.h> 51 #include <sys/prctl.h> 52 #include <sys/timerfd.h> 53 #include <sys/wait.h> 54 55 #include <linux/bitops.h> 56 #include <linux/hash.h> 57 #include <linux/log2.h> 58 #include <linux/err.h> 59 #include <linux/string.h> 60 #include <linux/time64.h> 61 #include <linux/zalloc.h> 62 #include <perf/evlist.h> 63 #include <perf/evsel.h> 64 #include <perf/cpumap.h> 65 #include <perf/mmap.h> 66 67 #include <internal/xyarray.h> 68 69 #ifdef LACKS_SIGQUEUE_PROTOTYPE 70 int sigqueue(pid_t pid, int sig, const union sigval value); 71 #endif 72 73 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 74 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 75 76 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, 77 struct perf_thread_map *threads) 78 { 79 perf_evlist__init(&evlist->core); 80 perf_evlist__set_maps(&evlist->core, cpus, threads); 81 evlist->workload.pid = -1; 82 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; 83 evlist->ctl_fd.fd = -1; 84 evlist->ctl_fd.ack = -1; 85 evlist->ctl_fd.pos = -1; 86 evlist->nr_br_cntr = -1; 87 metricgroup__rblist_init(&evlist->metric_events); 88 } 89 90 struct evlist *evlist__new(void) 91 { 92 struct evlist *evlist = zalloc(sizeof(*evlist)); 93 94 if (evlist != NULL) 95 evlist__init(evlist, NULL, NULL); 96 97 return evlist; 98 } 99 100 struct evlist *evlist__new_default(void) 101 { 102 struct evlist *evlist = evlist__new(); 103 bool can_profile_kernel; 104 struct perf_pmu *pmu = NULL; 105 106 if (!evlist) 107 return NULL; 108 109 can_profile_kernel = perf_event_paranoid_check(1); 110 111 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 112 char buf[256]; 113 int err; 114 115 snprintf(buf, sizeof(buf), "%s/cycles/%s", pmu->name, 116 can_profile_kernel ? "P" : "Pu"); 117 err = parse_event(evlist, buf); 118 if (err) { 119 evlist__delete(evlist); 120 return NULL; 121 } 122 } 123 124 if (evlist->core.nr_entries > 1) { 125 struct evsel *evsel; 126 127 evlist__for_each_entry(evlist, evsel) 128 evsel__set_sample_id(evsel, /*can_sample_identifier=*/false); 129 } 130 131 return evlist; 132 } 133 134 struct evlist *evlist__new_dummy(void) 135 { 136 struct evlist *evlist = evlist__new(); 137 138 if (evlist && evlist__add_dummy(evlist)) { 139 evlist__delete(evlist); 140 evlist = NULL; 141 } 142 143 return evlist; 144 } 145 146 /** 147 * evlist__set_id_pos - set the positions of event ids. 148 * @evlist: selected event list 149 * 150 * Events with compatible sample types all have the same id_pos 151 * and is_pos. For convenience, put a copy on evlist. 152 */ 153 void evlist__set_id_pos(struct evlist *evlist) 154 { 155 struct evsel *first = evlist__first(evlist); 156 157 evlist->id_pos = first->id_pos; 158 evlist->is_pos = first->is_pos; 159 } 160 161 static void evlist__update_id_pos(struct evlist *evlist) 162 { 163 struct evsel *evsel; 164 165 evlist__for_each_entry(evlist, evsel) 166 evsel__calc_id_pos(evsel); 167 168 evlist__set_id_pos(evlist); 169 } 170 171 static void evlist__purge(struct evlist *evlist) 172 { 173 struct evsel *pos, *n; 174 175 evlist__for_each_entry_safe(evlist, n, pos) { 176 list_del_init(&pos->core.node); 177 pos->evlist = NULL; 178 evsel__delete(pos); 179 } 180 181 evlist->core.nr_entries = 0; 182 } 183 184 void evlist__exit(struct evlist *evlist) 185 { 186 metricgroup__rblist_exit(&evlist->metric_events); 187 event_enable_timer__exit(&evlist->eet); 188 zfree(&evlist->mmap); 189 zfree(&evlist->overwrite_mmap); 190 perf_evlist__exit(&evlist->core); 191 } 192 193 void evlist__delete(struct evlist *evlist) 194 { 195 if (evlist == NULL) 196 return; 197 198 evlist__free_stats(evlist); 199 evlist__munmap(evlist); 200 evlist__close(evlist); 201 evlist__purge(evlist); 202 evlist__exit(evlist); 203 free(evlist); 204 } 205 206 void evlist__add(struct evlist *evlist, struct evsel *entry) 207 { 208 perf_evlist__add(&evlist->core, &entry->core); 209 entry->evlist = evlist; 210 entry->tracking = !entry->core.idx; 211 212 if (evlist->core.nr_entries == 1) 213 evlist__set_id_pos(evlist); 214 } 215 216 void evlist__remove(struct evlist *evlist, struct evsel *evsel) 217 { 218 evsel->evlist = NULL; 219 perf_evlist__remove(&evlist->core, &evsel->core); 220 } 221 222 void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list) 223 { 224 while (!list_empty(list)) { 225 struct evsel *evsel, *temp, *leader = NULL; 226 227 __evlist__for_each_entry_safe(list, temp, evsel) { 228 list_del_init(&evsel->core.node); 229 evlist__add(evlist, evsel); 230 leader = evsel; 231 break; 232 } 233 234 __evlist__for_each_entry_safe(list, temp, evsel) { 235 if (evsel__has_leader(evsel, leader)) { 236 list_del_init(&evsel->core.node); 237 evlist__add(evlist, evsel); 238 } 239 } 240 } 241 } 242 243 int __evlist__set_tracepoints_handlers(struct evlist *evlist, 244 const struct evsel_str_handler *assocs, size_t nr_assocs) 245 { 246 size_t i; 247 int err; 248 249 for (i = 0; i < nr_assocs; i++) { 250 // Adding a handler for an event not in this evlist, just ignore it. 251 struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name); 252 if (evsel == NULL) 253 continue; 254 255 err = -EEXIST; 256 if (evsel->handler != NULL) 257 goto out; 258 evsel->handler = assocs[i].handler; 259 } 260 261 err = 0; 262 out: 263 return err; 264 } 265 266 static void evlist__set_leader(struct evlist *evlist) 267 { 268 perf_evlist__set_leader(&evlist->core); 269 } 270 271 static struct evsel *evlist__dummy_event(struct evlist *evlist) 272 { 273 struct perf_event_attr attr = { 274 .type = PERF_TYPE_SOFTWARE, 275 .config = PERF_COUNT_SW_DUMMY, 276 .size = sizeof(attr), /* to capture ABI version */ 277 /* Avoid frequency mode for dummy events to avoid associated timers. */ 278 .freq = 0, 279 .sample_period = 1, 280 }; 281 282 return evsel__new_idx(&attr, evlist->core.nr_entries); 283 } 284 285 int evlist__add_dummy(struct evlist *evlist) 286 { 287 struct evsel *evsel = evlist__dummy_event(evlist); 288 289 if (evsel == NULL) 290 return -ENOMEM; 291 292 evlist__add(evlist, evsel); 293 return 0; 294 } 295 296 struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide) 297 { 298 struct evsel *evsel = evlist__dummy_event(evlist); 299 300 if (!evsel) 301 return NULL; 302 303 evsel->core.attr.exclude_kernel = 1; 304 evsel->core.attr.exclude_guest = 1; 305 evsel->core.attr.exclude_hv = 1; 306 evsel->core.system_wide = system_wide; 307 evsel->no_aux_samples = true; 308 evsel->name = strdup("dummy:u"); 309 310 evlist__add(evlist, evsel); 311 return evsel; 312 } 313 314 #ifdef HAVE_LIBTRACEEVENT 315 struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide) 316 { 317 struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0, 318 /*format=*/true); 319 320 if (IS_ERR(evsel)) 321 return evsel; 322 323 evsel__set_sample_bit(evsel, CPU); 324 evsel__set_sample_bit(evsel, TIME); 325 326 evsel->core.system_wide = system_wide; 327 evsel->no_aux_samples = true; 328 329 evlist__add(evlist, evsel); 330 return evsel; 331 } 332 #endif 333 334 struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name) 335 { 336 struct evsel *evsel; 337 338 evlist__for_each_entry(evlist, evsel) { 339 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) && 340 (strcmp(evsel->name, name) == 0)) 341 return evsel; 342 } 343 344 return NULL; 345 } 346 347 #ifdef HAVE_LIBTRACEEVENT 348 int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler) 349 { 350 struct evsel *evsel = evsel__newtp(sys, name); 351 352 if (IS_ERR(evsel)) 353 return -1; 354 355 evsel->handler = handler; 356 evlist__add(evlist, evsel); 357 return 0; 358 } 359 #endif 360 361 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity) 362 { 363 struct evlist_cpu_iterator itr = { 364 .container = evlist, 365 .evsel = NULL, 366 .cpu_map_idx = 0, 367 .evlist_cpu_map_idx = 0, 368 .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus), 369 .cpu = (struct perf_cpu){ .cpu = -1}, 370 .affinity = affinity, 371 }; 372 373 if (evlist__empty(evlist)) { 374 /* Ensure the empty list doesn't iterate. */ 375 itr.evlist_cpu_map_idx = itr.evlist_cpu_map_nr; 376 } else { 377 itr.evsel = evlist__first(evlist); 378 if (itr.affinity) { 379 itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); 380 affinity__set(itr.affinity, itr.cpu.cpu); 381 itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu); 382 /* 383 * If this CPU isn't in the evsel's cpu map then advance 384 * through the list. 385 */ 386 if (itr.cpu_map_idx == -1) 387 evlist_cpu_iterator__next(&itr); 388 } 389 } 390 return itr; 391 } 392 393 void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr) 394 { 395 while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) { 396 evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel); 397 evlist_cpu_itr->cpu_map_idx = 398 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 399 evlist_cpu_itr->cpu); 400 if (evlist_cpu_itr->cpu_map_idx != -1) 401 return; 402 } 403 evlist_cpu_itr->evlist_cpu_map_idx++; 404 if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) { 405 evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container); 406 evlist_cpu_itr->cpu = 407 perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus, 408 evlist_cpu_itr->evlist_cpu_map_idx); 409 if (evlist_cpu_itr->affinity) 410 affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu); 411 evlist_cpu_itr->cpu_map_idx = 412 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 413 evlist_cpu_itr->cpu); 414 /* 415 * If this CPU isn't in the evsel's cpu map then advance through 416 * the list. 417 */ 418 if (evlist_cpu_itr->cpu_map_idx == -1) 419 evlist_cpu_iterator__next(evlist_cpu_itr); 420 } 421 } 422 423 bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr) 424 { 425 return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr; 426 } 427 428 static int evsel__strcmp(struct evsel *pos, char *evsel_name) 429 { 430 if (!evsel_name) 431 return 0; 432 if (evsel__is_dummy_event(pos)) 433 return 1; 434 return !evsel__name_is(pos, evsel_name); 435 } 436 437 static int evlist__is_enabled(struct evlist *evlist) 438 { 439 struct evsel *pos; 440 441 evlist__for_each_entry(evlist, pos) { 442 if (!evsel__is_group_leader(pos) || !pos->core.fd) 443 continue; 444 /* If at least one event is enabled, evlist is enabled. */ 445 if (!pos->disabled) 446 return true; 447 } 448 return false; 449 } 450 451 static void __evlist__disable(struct evlist *evlist, char *evsel_name, bool excl_dummy) 452 { 453 struct evsel *pos; 454 struct evlist_cpu_iterator evlist_cpu_itr; 455 struct affinity saved_affinity, *affinity = NULL; 456 bool has_imm = false; 457 458 // See explanation in evlist__close() 459 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 460 if (affinity__setup(&saved_affinity) < 0) 461 return; 462 affinity = &saved_affinity; 463 } 464 465 /* Disable 'immediate' events last */ 466 for (int imm = 0; imm <= 1; imm++) { 467 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { 468 pos = evlist_cpu_itr.evsel; 469 if (evsel__strcmp(pos, evsel_name)) 470 continue; 471 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) 472 continue; 473 if (excl_dummy && evsel__is_dummy_event(pos)) 474 continue; 475 if (pos->immediate) 476 has_imm = true; 477 if (pos->immediate != imm) 478 continue; 479 evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 480 } 481 if (!has_imm) 482 break; 483 } 484 485 affinity__cleanup(affinity); 486 evlist__for_each_entry(evlist, pos) { 487 if (evsel__strcmp(pos, evsel_name)) 488 continue; 489 if (!evsel__is_group_leader(pos) || !pos->core.fd) 490 continue; 491 if (excl_dummy && evsel__is_dummy_event(pos)) 492 continue; 493 pos->disabled = true; 494 } 495 496 /* 497 * If we disabled only single event, we need to check 498 * the enabled state of the evlist manually. 499 */ 500 if (evsel_name) 501 evlist->enabled = evlist__is_enabled(evlist); 502 else 503 evlist->enabled = false; 504 } 505 506 void evlist__disable(struct evlist *evlist) 507 { 508 __evlist__disable(evlist, NULL, false); 509 } 510 511 void evlist__disable_non_dummy(struct evlist *evlist) 512 { 513 __evlist__disable(evlist, NULL, true); 514 } 515 516 void evlist__disable_evsel(struct evlist *evlist, char *evsel_name) 517 { 518 __evlist__disable(evlist, evsel_name, false); 519 } 520 521 static void __evlist__enable(struct evlist *evlist, char *evsel_name, bool excl_dummy) 522 { 523 struct evsel *pos; 524 struct evlist_cpu_iterator evlist_cpu_itr; 525 struct affinity saved_affinity, *affinity = NULL; 526 527 // See explanation in evlist__close() 528 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 529 if (affinity__setup(&saved_affinity) < 0) 530 return; 531 affinity = &saved_affinity; 532 } 533 534 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { 535 pos = evlist_cpu_itr.evsel; 536 if (evsel__strcmp(pos, evsel_name)) 537 continue; 538 if (!evsel__is_group_leader(pos) || !pos->core.fd) 539 continue; 540 if (excl_dummy && evsel__is_dummy_event(pos)) 541 continue; 542 evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 543 } 544 affinity__cleanup(affinity); 545 evlist__for_each_entry(evlist, pos) { 546 if (evsel__strcmp(pos, evsel_name)) 547 continue; 548 if (!evsel__is_group_leader(pos) || !pos->core.fd) 549 continue; 550 if (excl_dummy && evsel__is_dummy_event(pos)) 551 continue; 552 pos->disabled = false; 553 } 554 555 /* 556 * Even single event sets the 'enabled' for evlist, 557 * so the toggle can work properly and toggle to 558 * 'disabled' state. 559 */ 560 evlist->enabled = true; 561 } 562 563 void evlist__enable(struct evlist *evlist) 564 { 565 __evlist__enable(evlist, NULL, false); 566 } 567 568 void evlist__enable_non_dummy(struct evlist *evlist) 569 { 570 __evlist__enable(evlist, NULL, true); 571 } 572 573 void evlist__enable_evsel(struct evlist *evlist, char *evsel_name) 574 { 575 __evlist__enable(evlist, evsel_name, false); 576 } 577 578 void evlist__toggle_enable(struct evlist *evlist) 579 { 580 (evlist->enabled ? evlist__disable : evlist__enable)(evlist); 581 } 582 583 int evlist__add_pollfd(struct evlist *evlist, int fd) 584 { 585 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default); 586 } 587 588 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) 589 { 590 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask); 591 } 592 593 #ifdef HAVE_EVENTFD_SUPPORT 594 int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd) 595 { 596 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 597 fdarray_flag__nonfilterable | 598 fdarray_flag__non_perf_event); 599 } 600 #endif 601 602 int evlist__poll(struct evlist *evlist, int timeout) 603 { 604 return perf_evlist__poll(&evlist->core, timeout); 605 } 606 607 struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id) 608 { 609 struct hlist_head *head; 610 struct perf_sample_id *sid; 611 int hash; 612 613 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 614 head = &evlist->core.heads[hash]; 615 616 hlist_for_each_entry(sid, head, node) 617 if (sid->id == id) 618 return sid; 619 620 return NULL; 621 } 622 623 struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id) 624 { 625 struct perf_sample_id *sid; 626 627 if (evlist->core.nr_entries == 1 || !id) 628 return evlist__first(evlist); 629 630 sid = evlist__id2sid(evlist, id); 631 if (sid) 632 return container_of(sid->evsel, struct evsel, core); 633 634 if (!evlist__sample_id_all(evlist)) 635 return evlist__first(evlist); 636 637 return NULL; 638 } 639 640 struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id) 641 { 642 struct perf_sample_id *sid; 643 644 if (!id) 645 return NULL; 646 647 sid = evlist__id2sid(evlist, id); 648 if (sid) 649 return container_of(sid->evsel, struct evsel, core); 650 651 return NULL; 652 } 653 654 static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id) 655 { 656 const __u64 *array = event->sample.array; 657 ssize_t n; 658 659 n = (event->header.size - sizeof(event->header)) >> 3; 660 661 if (event->header.type == PERF_RECORD_SAMPLE) { 662 if (evlist->id_pos >= n) 663 return -1; 664 *id = array[evlist->id_pos]; 665 } else { 666 if (evlist->is_pos > n) 667 return -1; 668 n -= evlist->is_pos; 669 *id = array[n]; 670 } 671 return 0; 672 } 673 674 struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event) 675 { 676 struct evsel *first = evlist__first(evlist); 677 struct hlist_head *head; 678 struct perf_sample_id *sid; 679 int hash; 680 u64 id; 681 682 if (evlist->core.nr_entries == 1) 683 return first; 684 685 if (!first->core.attr.sample_id_all && 686 event->header.type != PERF_RECORD_SAMPLE) 687 return first; 688 689 if (evlist__event2id(evlist, event, &id)) 690 return NULL; 691 692 /* Synthesized events have an id of zero */ 693 if (!id) 694 return first; 695 696 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 697 head = &evlist->core.heads[hash]; 698 699 hlist_for_each_entry(sid, head, node) { 700 if (sid->id == id) 701 return container_of(sid->evsel, struct evsel, core); 702 } 703 return NULL; 704 } 705 706 static int evlist__set_paused(struct evlist *evlist, bool value) 707 { 708 int i; 709 710 if (!evlist->overwrite_mmap) 711 return 0; 712 713 for (i = 0; i < evlist->core.nr_mmaps; i++) { 714 int fd = evlist->overwrite_mmap[i].core.fd; 715 int err; 716 717 if (fd < 0) 718 continue; 719 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); 720 if (err) 721 return err; 722 } 723 return 0; 724 } 725 726 static int evlist__pause(struct evlist *evlist) 727 { 728 return evlist__set_paused(evlist, true); 729 } 730 731 static int evlist__resume(struct evlist *evlist) 732 { 733 return evlist__set_paused(evlist, false); 734 } 735 736 static void evlist__munmap_nofree(struct evlist *evlist) 737 { 738 int i; 739 740 if (evlist->mmap) 741 for (i = 0; i < evlist->core.nr_mmaps; i++) 742 perf_mmap__munmap(&evlist->mmap[i].core); 743 744 if (evlist->overwrite_mmap) 745 for (i = 0; i < evlist->core.nr_mmaps; i++) 746 perf_mmap__munmap(&evlist->overwrite_mmap[i].core); 747 } 748 749 void evlist__munmap(struct evlist *evlist) 750 { 751 evlist__munmap_nofree(evlist); 752 zfree(&evlist->mmap); 753 zfree(&evlist->overwrite_mmap); 754 } 755 756 static void perf_mmap__unmap_cb(struct perf_mmap *map) 757 { 758 struct mmap *m = container_of(map, struct mmap, core); 759 760 mmap__munmap(m); 761 } 762 763 static struct mmap *evlist__alloc_mmap(struct evlist *evlist, 764 bool overwrite) 765 { 766 int i; 767 struct mmap *map; 768 769 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); 770 if (!map) 771 return NULL; 772 773 for (i = 0; i < evlist->core.nr_mmaps; i++) { 774 struct perf_mmap *prev = i ? &map[i - 1].core : NULL; 775 776 /* 777 * When the perf_mmap() call is made we grab one refcount, plus 778 * one extra to let perf_mmap__consume() get the last 779 * events after all real references (perf_mmap__get()) are 780 * dropped. 781 * 782 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and 783 * thus does perf_mmap__get() on it. 784 */ 785 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb); 786 } 787 788 return map; 789 } 790 791 static void 792 perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist, 793 struct perf_evsel *_evsel, 794 struct perf_mmap_param *_mp, 795 int idx) 796 { 797 struct evlist *evlist = container_of(_evlist, struct evlist, core); 798 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 799 struct evsel *evsel = container_of(_evsel, struct evsel, core); 800 801 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx); 802 } 803 804 static struct perf_mmap* 805 perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx) 806 { 807 struct evlist *evlist = container_of(_evlist, struct evlist, core); 808 struct mmap *maps; 809 810 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; 811 812 if (!maps) { 813 maps = evlist__alloc_mmap(evlist, overwrite); 814 if (!maps) 815 return NULL; 816 817 if (overwrite) { 818 evlist->overwrite_mmap = maps; 819 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) 820 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); 821 } else { 822 evlist->mmap = maps; 823 } 824 } 825 826 return &maps[idx].core; 827 } 828 829 static int 830 perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp, 831 int output, struct perf_cpu cpu) 832 { 833 struct mmap *map = container_of(_map, struct mmap, core); 834 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 835 836 return mmap__mmap(map, mp, output, cpu); 837 } 838 839 unsigned long perf_event_mlock_kb_in_pages(void) 840 { 841 unsigned long pages; 842 int max; 843 844 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { 845 /* 846 * Pick a once upon a time good value, i.e. things look 847 * strange since we can't read a sysctl value, but lets not 848 * die yet... 849 */ 850 max = 512; 851 } else { 852 max -= (page_size / 1024); 853 } 854 855 pages = (max * 1024) / page_size; 856 if (!is_power_of_2(pages)) 857 pages = rounddown_pow_of_two(pages); 858 859 return pages; 860 } 861 862 size_t evlist__mmap_size(unsigned long pages) 863 { 864 if (pages == UINT_MAX) 865 pages = perf_event_mlock_kb_in_pages(); 866 else if (!is_power_of_2(pages)) 867 return 0; 868 869 return (pages + 1) * page_size; 870 } 871 872 static long parse_pages_arg(const char *str, unsigned long min, 873 unsigned long max) 874 { 875 unsigned long pages, val; 876 static struct parse_tag tags[] = { 877 { .tag = 'B', .mult = 1 }, 878 { .tag = 'K', .mult = 1 << 10 }, 879 { .tag = 'M', .mult = 1 << 20 }, 880 { .tag = 'G', .mult = 1 << 30 }, 881 { .tag = 0 }, 882 }; 883 884 if (str == NULL) 885 return -EINVAL; 886 887 val = parse_tag_value(str, tags); 888 if (val != (unsigned long) -1) { 889 /* we got file size value */ 890 pages = PERF_ALIGN(val, page_size) / page_size; 891 } else { 892 /* we got pages count value */ 893 char *eptr; 894 pages = strtoul(str, &eptr, 10); 895 if (*eptr != '\0') 896 return -EINVAL; 897 } 898 899 if (pages == 0 && min == 0) { 900 /* leave number of pages at 0 */ 901 } else if (!is_power_of_2(pages)) { 902 char buf[100]; 903 904 /* round pages up to next power of 2 */ 905 pages = roundup_pow_of_two(pages); 906 if (!pages) 907 return -EINVAL; 908 909 unit_number__scnprintf(buf, sizeof(buf), pages * page_size); 910 pr_info("rounding mmap pages size to %s (%lu pages)\n", 911 buf, pages); 912 } 913 914 if (pages > max) 915 return -EINVAL; 916 917 return pages; 918 } 919 920 int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) 921 { 922 unsigned long max = UINT_MAX; 923 long pages; 924 925 if (max > SIZE_MAX / page_size) 926 max = SIZE_MAX / page_size; 927 928 pages = parse_pages_arg(str, 1, max); 929 if (pages < 0) { 930 pr_err("Invalid argument for --mmap_pages/-m\n"); 931 return -1; 932 } 933 934 *mmap_pages = pages; 935 return 0; 936 } 937 938 int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused) 939 { 940 return __evlist__parse_mmap_pages(opt->value, str); 941 } 942 943 /** 944 * evlist__mmap_ex - Create mmaps to receive events. 945 * @evlist: list of events 946 * @pages: map length in pages 947 * @overwrite: overwrite older events? 948 * @auxtrace_pages - auxtrace map length in pages 949 * @auxtrace_overwrite - overwrite older auxtrace data? 950 * 951 * If @overwrite is %false the user needs to signal event consumption using 952 * perf_mmap__write_tail(). Using evlist__mmap_read() does this 953 * automatically. 954 * 955 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data 956 * consumption using auxtrace_mmap__write_tail(). 957 * 958 * Return: %0 on success, negative error code otherwise. 959 */ 960 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 961 unsigned int auxtrace_pages, 962 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, 963 int comp_level) 964 { 965 /* 966 * Delay setting mp.prot: set it before calling perf_mmap__mmap. 967 * Its value is decided by evsel's write_backward. 968 * So &mp should not be passed through const pointer. 969 */ 970 struct mmap_params mp = { 971 .nr_cblocks = nr_cblocks, 972 .affinity = affinity, 973 .flush = flush, 974 .comp_level = comp_level 975 }; 976 struct perf_evlist_mmap_ops ops = { 977 .idx = perf_evlist__mmap_cb_idx, 978 .get = perf_evlist__mmap_cb_get, 979 .mmap = perf_evlist__mmap_cb_mmap, 980 }; 981 982 evlist->core.mmap_len = evlist__mmap_size(pages); 983 pr_debug("mmap size %zuB\n", evlist->core.mmap_len); 984 985 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, 986 auxtrace_pages, auxtrace_overwrite); 987 988 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); 989 } 990 991 int evlist__mmap(struct evlist *evlist, unsigned int pages) 992 { 993 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); 994 } 995 996 int evlist__create_maps(struct evlist *evlist, struct target *target) 997 { 998 bool all_threads = (target->per_thread && target->system_wide); 999 struct perf_cpu_map *cpus; 1000 struct perf_thread_map *threads; 1001 1002 /* 1003 * If specify '-a' and '--per-thread' to perf record, perf record 1004 * will override '--per-thread'. target->per_thread = false and 1005 * target->system_wide = true. 1006 * 1007 * If specify '--per-thread' only to perf record, 1008 * target->per_thread = true and target->system_wide = false. 1009 * 1010 * So target->per_thread && target->system_wide is false. 1011 * For perf record, thread_map__new_str doesn't call 1012 * thread_map__new_all_cpus. That will keep perf record's 1013 * current behavior. 1014 * 1015 * For perf stat, it allows the case that target->per_thread and 1016 * target->system_wide are all true. It means to collect system-wide 1017 * per-thread data. thread_map__new_str will call 1018 * thread_map__new_all_cpus to enumerate all threads. 1019 */ 1020 threads = thread_map__new_str(target->pid, target->tid, all_threads); 1021 1022 if (!threads) 1023 return -1; 1024 1025 if (target__uses_dummy_map(target) && !evlist__has_bpf_output(evlist)) 1026 cpus = perf_cpu_map__new_any_cpu(); 1027 else 1028 cpus = perf_cpu_map__new(target->cpu_list); 1029 1030 if (!cpus) 1031 goto out_delete_threads; 1032 1033 evlist->core.has_user_cpus = !!target->cpu_list; 1034 1035 perf_evlist__set_maps(&evlist->core, cpus, threads); 1036 1037 /* as evlist now has references, put count here */ 1038 perf_cpu_map__put(cpus); 1039 perf_thread_map__put(threads); 1040 1041 return 0; 1042 1043 out_delete_threads: 1044 perf_thread_map__put(threads); 1045 return -1; 1046 } 1047 1048 int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel, 1049 struct target *target) 1050 { 1051 struct evsel *evsel; 1052 int err = 0; 1053 1054 evlist__for_each_entry(evlist, evsel) { 1055 /* 1056 * filters only work for tracepoint event, which doesn't have cpu limit. 1057 * So evlist and evsel should always be same. 1058 */ 1059 if (evsel->filter) { 1060 err = perf_evsel__apply_filter(&evsel->core, evsel->filter); 1061 if (err) { 1062 *err_evsel = evsel; 1063 break; 1064 } 1065 } 1066 1067 /* 1068 * non-tracepoint events can have BPF filters. 1069 */ 1070 if (!list_empty(&evsel->bpf_filters)) { 1071 err = perf_bpf_filter__prepare(evsel, target); 1072 if (err) { 1073 *err_evsel = evsel; 1074 break; 1075 } 1076 } 1077 } 1078 1079 return err; 1080 } 1081 1082 int evlist__set_tp_filter(struct evlist *evlist, const char *filter) 1083 { 1084 struct evsel *evsel; 1085 int err = 0; 1086 1087 if (filter == NULL) 1088 return -1; 1089 1090 evlist__for_each_entry(evlist, evsel) { 1091 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1092 continue; 1093 1094 err = evsel__set_filter(evsel, filter); 1095 if (err) 1096 break; 1097 } 1098 1099 return err; 1100 } 1101 1102 int evlist__append_tp_filter(struct evlist *evlist, const char *filter) 1103 { 1104 struct evsel *evsel; 1105 int err = 0; 1106 1107 if (filter == NULL) 1108 return -1; 1109 1110 evlist__for_each_entry(evlist, evsel) { 1111 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1112 continue; 1113 1114 err = evsel__append_tp_filter(evsel, filter); 1115 if (err) 1116 break; 1117 } 1118 1119 return err; 1120 } 1121 1122 char *asprintf__tp_filter_pids(size_t npids, pid_t *pids) 1123 { 1124 char *filter; 1125 size_t i; 1126 1127 for (i = 0; i < npids; ++i) { 1128 if (i == 0) { 1129 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) 1130 return NULL; 1131 } else { 1132 char *tmp; 1133 1134 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) 1135 goto out_free; 1136 1137 free(filter); 1138 filter = tmp; 1139 } 1140 } 1141 1142 return filter; 1143 out_free: 1144 free(filter); 1145 return NULL; 1146 } 1147 1148 int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1149 { 1150 char *filter = asprintf__tp_filter_pids(npids, pids); 1151 int ret = evlist__set_tp_filter(evlist, filter); 1152 1153 free(filter); 1154 return ret; 1155 } 1156 1157 int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1158 { 1159 char *filter = asprintf__tp_filter_pids(npids, pids); 1160 int ret = evlist__append_tp_filter(evlist, filter); 1161 1162 free(filter); 1163 return ret; 1164 } 1165 1166 int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid) 1167 { 1168 return evlist__append_tp_filter_pids(evlist, 1, &pid); 1169 } 1170 1171 bool evlist__valid_sample_type(struct evlist *evlist) 1172 { 1173 struct evsel *pos; 1174 1175 if (evlist->core.nr_entries == 1) 1176 return true; 1177 1178 if (evlist->id_pos < 0 || evlist->is_pos < 0) 1179 return false; 1180 1181 evlist__for_each_entry(evlist, pos) { 1182 if (pos->id_pos != evlist->id_pos || 1183 pos->is_pos != evlist->is_pos) 1184 return false; 1185 } 1186 1187 return true; 1188 } 1189 1190 u64 __evlist__combined_sample_type(struct evlist *evlist) 1191 { 1192 struct evsel *evsel; 1193 1194 if (evlist->combined_sample_type) 1195 return evlist->combined_sample_type; 1196 1197 evlist__for_each_entry(evlist, evsel) 1198 evlist->combined_sample_type |= evsel->core.attr.sample_type; 1199 1200 return evlist->combined_sample_type; 1201 } 1202 1203 u64 evlist__combined_sample_type(struct evlist *evlist) 1204 { 1205 evlist->combined_sample_type = 0; 1206 return __evlist__combined_sample_type(evlist); 1207 } 1208 1209 u64 evlist__combined_branch_type(struct evlist *evlist) 1210 { 1211 struct evsel *evsel; 1212 u64 branch_type = 0; 1213 1214 evlist__for_each_entry(evlist, evsel) 1215 branch_type |= evsel->core.attr.branch_sample_type; 1216 return branch_type; 1217 } 1218 1219 static struct evsel * 1220 evlist__find_dup_event_from_prev(struct evlist *evlist, struct evsel *event) 1221 { 1222 struct evsel *pos; 1223 1224 evlist__for_each_entry(evlist, pos) { 1225 if (event == pos) 1226 break; 1227 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) && 1228 !strcmp(pos->name, event->name)) 1229 return pos; 1230 } 1231 return NULL; 1232 } 1233 1234 #define MAX_NR_ABBR_NAME (26 * 11) 1235 1236 /* 1237 * The abbr name is from A to Z9. If the number of event 1238 * which requires the branch counter > MAX_NR_ABBR_NAME, 1239 * return NA. 1240 */ 1241 static void evlist__new_abbr_name(char *name) 1242 { 1243 static int idx; 1244 int i = idx / 26; 1245 1246 if (idx >= MAX_NR_ABBR_NAME) { 1247 name[0] = 'N'; 1248 name[1] = 'A'; 1249 name[2] = '\0'; 1250 return; 1251 } 1252 1253 name[0] = 'A' + (idx % 26); 1254 1255 if (!i) 1256 name[1] = '\0'; 1257 else { 1258 name[1] = '0' + i - 1; 1259 name[2] = '\0'; 1260 } 1261 1262 idx++; 1263 } 1264 1265 void evlist__update_br_cntr(struct evlist *evlist) 1266 { 1267 struct evsel *evsel, *dup; 1268 int i = 0; 1269 1270 evlist__for_each_entry(evlist, evsel) { 1271 if (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) { 1272 evsel->br_cntr_idx = i++; 1273 evsel__leader(evsel)->br_cntr_nr++; 1274 1275 dup = evlist__find_dup_event_from_prev(evlist, evsel); 1276 if (dup) 1277 memcpy(evsel->abbr_name, dup->abbr_name, 3 * sizeof(char)); 1278 else 1279 evlist__new_abbr_name(evsel->abbr_name); 1280 } 1281 } 1282 evlist->nr_br_cntr = i; 1283 } 1284 1285 bool evlist__valid_read_format(struct evlist *evlist) 1286 { 1287 struct evsel *first = evlist__first(evlist), *pos = first; 1288 u64 read_format = first->core.attr.read_format; 1289 u64 sample_type = first->core.attr.sample_type; 1290 1291 evlist__for_each_entry(evlist, pos) { 1292 if (read_format != pos->core.attr.read_format) { 1293 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n", 1294 read_format, (u64)pos->core.attr.read_format); 1295 } 1296 } 1297 1298 /* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */ 1299 if ((sample_type & PERF_SAMPLE_READ) && 1300 !(read_format & PERF_FORMAT_ID)) { 1301 return false; 1302 } 1303 1304 return true; 1305 } 1306 1307 u16 evlist__id_hdr_size(struct evlist *evlist) 1308 { 1309 struct evsel *first = evlist__first(evlist); 1310 1311 return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0; 1312 } 1313 1314 bool evlist__valid_sample_id_all(struct evlist *evlist) 1315 { 1316 struct evsel *first = evlist__first(evlist), *pos = first; 1317 1318 evlist__for_each_entry_continue(evlist, pos) { 1319 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) 1320 return false; 1321 } 1322 1323 return true; 1324 } 1325 1326 bool evlist__sample_id_all(struct evlist *evlist) 1327 { 1328 struct evsel *first = evlist__first(evlist); 1329 return first->core.attr.sample_id_all; 1330 } 1331 1332 void evlist__set_selected(struct evlist *evlist, struct evsel *evsel) 1333 { 1334 evlist->selected = evsel; 1335 } 1336 1337 void evlist__close(struct evlist *evlist) 1338 { 1339 struct evsel *evsel; 1340 struct evlist_cpu_iterator evlist_cpu_itr; 1341 struct affinity affinity; 1342 1343 /* 1344 * With perf record core.user_requested_cpus is usually NULL. 1345 * Use the old method to handle this for now. 1346 */ 1347 if (!evlist->core.user_requested_cpus || 1348 cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 1349 evlist__for_each_entry_reverse(evlist, evsel) 1350 evsel__close(evsel); 1351 return; 1352 } 1353 1354 if (affinity__setup(&affinity) < 0) 1355 return; 1356 1357 evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { 1358 perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core, 1359 evlist_cpu_itr.cpu_map_idx); 1360 } 1361 1362 affinity__cleanup(&affinity); 1363 evlist__for_each_entry_reverse(evlist, evsel) { 1364 perf_evsel__free_fd(&evsel->core); 1365 perf_evsel__free_id(&evsel->core); 1366 } 1367 perf_evlist__reset_id_hash(&evlist->core); 1368 } 1369 1370 static int evlist__create_syswide_maps(struct evlist *evlist) 1371 { 1372 struct perf_cpu_map *cpus; 1373 struct perf_thread_map *threads; 1374 1375 /* 1376 * Try reading /sys/devices/system/cpu/online to get 1377 * an all cpus map. 1378 * 1379 * FIXME: -ENOMEM is the best we can do here, the cpu_map 1380 * code needs an overhaul to properly forward the 1381 * error, and we may not want to do that fallback to a 1382 * default cpu identity map :-\ 1383 */ 1384 cpus = perf_cpu_map__new_online_cpus(); 1385 if (!cpus) 1386 return -ENOMEM; 1387 1388 threads = perf_thread_map__new_dummy(); 1389 if (!threads) { 1390 perf_cpu_map__put(cpus); 1391 return -ENOMEM; 1392 } 1393 1394 perf_evlist__set_maps(&evlist->core, cpus, threads); 1395 perf_thread_map__put(threads); 1396 perf_cpu_map__put(cpus); 1397 return 0; 1398 } 1399 1400 int evlist__open(struct evlist *evlist) 1401 { 1402 struct evsel *evsel; 1403 int err; 1404 1405 /* 1406 * Default: one fd per CPU, all threads, aka systemwide 1407 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL 1408 */ 1409 if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) { 1410 err = evlist__create_syswide_maps(evlist); 1411 if (err < 0) 1412 goto out_err; 1413 } 1414 1415 evlist__update_id_pos(evlist); 1416 1417 evlist__for_each_entry(evlist, evsel) { 1418 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); 1419 if (err < 0) 1420 goto out_err; 1421 } 1422 1423 return 0; 1424 out_err: 1425 evlist__close(evlist); 1426 errno = -err; 1427 return err; 1428 } 1429 1430 int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[], 1431 bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1432 { 1433 int child_ready_pipe[2], go_pipe[2]; 1434 char bf; 1435 1436 evlist->workload.cork_fd = -1; 1437 1438 if (pipe(child_ready_pipe) < 0) { 1439 perror("failed to create 'ready' pipe"); 1440 return -1; 1441 } 1442 1443 if (pipe(go_pipe) < 0) { 1444 perror("failed to create 'go' pipe"); 1445 goto out_close_ready_pipe; 1446 } 1447 1448 evlist->workload.pid = fork(); 1449 if (evlist->workload.pid < 0) { 1450 perror("failed to fork"); 1451 goto out_close_pipes; 1452 } 1453 1454 if (!evlist->workload.pid) { 1455 int ret; 1456 1457 if (pipe_output) 1458 dup2(2, 1); 1459 1460 signal(SIGTERM, SIG_DFL); 1461 1462 close(child_ready_pipe[0]); 1463 close(go_pipe[1]); 1464 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1465 1466 /* 1467 * Change the name of this process not to confuse --exclude-perf users 1468 * that sees 'perf' in the window up to the execvp() and thinks that 1469 * perf samples are not being excluded. 1470 */ 1471 prctl(PR_SET_NAME, "perf-exec"); 1472 1473 /* 1474 * Tell the parent we're ready to go 1475 */ 1476 close(child_ready_pipe[1]); 1477 1478 /* 1479 * Wait until the parent tells us to go. 1480 */ 1481 ret = read(go_pipe[0], &bf, 1); 1482 /* 1483 * The parent will ask for the execvp() to be performed by 1484 * writing exactly one byte, in workload.cork_fd, usually via 1485 * evlist__start_workload(). 1486 * 1487 * For cancelling the workload without actually running it, 1488 * the parent will just close workload.cork_fd, without writing 1489 * anything, i.e. read will return zero and we just exit() 1490 * here (See evlist__cancel_workload()). 1491 */ 1492 if (ret != 1) { 1493 if (ret == -1) 1494 perror("unable to read pipe"); 1495 exit(ret); 1496 } 1497 1498 execvp(argv[0], (char **)argv); 1499 1500 if (exec_error) { 1501 union sigval val; 1502 1503 val.sival_int = errno; 1504 if (sigqueue(getppid(), SIGUSR1, val)) 1505 perror(argv[0]); 1506 } else 1507 perror(argv[0]); 1508 exit(-1); 1509 } 1510 1511 if (exec_error) { 1512 struct sigaction act = { 1513 .sa_flags = SA_SIGINFO, 1514 .sa_sigaction = exec_error, 1515 }; 1516 sigaction(SIGUSR1, &act, NULL); 1517 } 1518 1519 if (target__none(target)) { 1520 if (evlist->core.threads == NULL) { 1521 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", 1522 __func__, __LINE__); 1523 goto out_close_pipes; 1524 } 1525 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); 1526 } 1527 1528 close(child_ready_pipe[1]); 1529 close(go_pipe[0]); 1530 /* 1531 * wait for child to settle 1532 */ 1533 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1534 perror("unable to read pipe"); 1535 goto out_close_pipes; 1536 } 1537 1538 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1539 evlist->workload.cork_fd = go_pipe[1]; 1540 close(child_ready_pipe[0]); 1541 return 0; 1542 1543 out_close_pipes: 1544 close(go_pipe[0]); 1545 close(go_pipe[1]); 1546 out_close_ready_pipe: 1547 close(child_ready_pipe[0]); 1548 close(child_ready_pipe[1]); 1549 return -1; 1550 } 1551 1552 int evlist__start_workload(struct evlist *evlist) 1553 { 1554 if (evlist->workload.cork_fd >= 0) { 1555 char bf = 0; 1556 int ret; 1557 /* 1558 * Remove the cork, let it rip! 1559 */ 1560 ret = write(evlist->workload.cork_fd, &bf, 1); 1561 if (ret < 0) 1562 perror("unable to write to pipe"); 1563 1564 close(evlist->workload.cork_fd); 1565 evlist->workload.cork_fd = -1; 1566 return ret; 1567 } 1568 1569 return 0; 1570 } 1571 1572 void evlist__cancel_workload(struct evlist *evlist) 1573 { 1574 int status; 1575 1576 if (evlist->workload.cork_fd >= 0) { 1577 close(evlist->workload.cork_fd); 1578 evlist->workload.cork_fd = -1; 1579 waitpid(evlist->workload.pid, &status, WNOHANG); 1580 } 1581 } 1582 1583 int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) 1584 { 1585 struct evsel *evsel = evlist__event2evsel(evlist, event); 1586 int ret; 1587 1588 if (!evsel) 1589 return -EFAULT; 1590 ret = evsel__parse_sample(evsel, event, sample); 1591 if (ret) 1592 return ret; 1593 if (perf_guest && sample->id) { 1594 struct perf_sample_id *sid = evlist__id2sid(evlist, sample->id); 1595 1596 if (sid) { 1597 sample->machine_pid = sid->machine_pid; 1598 sample->vcpu = sid->vcpu.cpu; 1599 } 1600 } 1601 return 0; 1602 } 1603 1604 int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp) 1605 { 1606 struct evsel *evsel = evlist__event2evsel(evlist, event); 1607 1608 if (!evsel) 1609 return -EFAULT; 1610 return evsel__parse_sample_timestamp(evsel, event, timestamp); 1611 } 1612 1613 int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size) 1614 { 1615 int printed, value; 1616 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1617 1618 switch (err) { 1619 case EACCES: 1620 case EPERM: 1621 printed = scnprintf(buf, size, 1622 "Error:\t%s.\n" 1623 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); 1624 1625 value = perf_event_paranoid(); 1626 1627 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1628 1629 if (value >= 2) { 1630 printed += scnprintf(buf + printed, size - printed, 1631 "For your workloads it needs to be <= 1\nHint:\t"); 1632 } 1633 printed += scnprintf(buf + printed, size - printed, 1634 "For system wide tracing it needs to be set to -1.\n"); 1635 1636 printed += scnprintf(buf + printed, size - printed, 1637 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" 1638 "Hint:\tThe current value is %d.", value); 1639 break; 1640 case EINVAL: { 1641 struct evsel *first = evlist__first(evlist); 1642 int max_freq; 1643 1644 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) 1645 goto out_default; 1646 1647 if (first->core.attr.sample_freq < (u64)max_freq) 1648 goto out_default; 1649 1650 printed = scnprintf(buf, size, 1651 "Error:\t%s.\n" 1652 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n" 1653 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.", 1654 emsg, max_freq, first->core.attr.sample_freq); 1655 break; 1656 } 1657 default: 1658 out_default: 1659 scnprintf(buf, size, "%s", emsg); 1660 break; 1661 } 1662 1663 return 0; 1664 } 1665 1666 int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size) 1667 { 1668 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1669 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; 1670 1671 switch (err) { 1672 case EPERM: 1673 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); 1674 printed += scnprintf(buf + printed, size - printed, 1675 "Error:\t%s.\n" 1676 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" 1677 "Hint:\tTried using %zd kB.\n", 1678 emsg, pages_max_per_user, pages_attempted); 1679 1680 if (pages_attempted >= pages_max_per_user) { 1681 printed += scnprintf(buf + printed, size - printed, 1682 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", 1683 pages_max_per_user + pages_attempted); 1684 } 1685 1686 printed += scnprintf(buf + printed, size - printed, 1687 "Hint:\tTry using a smaller -m/--mmap-pages value."); 1688 break; 1689 default: 1690 scnprintf(buf, size, "%s", emsg); 1691 break; 1692 } 1693 1694 return 0; 1695 } 1696 1697 void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel) 1698 { 1699 struct evsel *evsel, *n; 1700 LIST_HEAD(move); 1701 1702 if (move_evsel == evlist__first(evlist)) 1703 return; 1704 1705 evlist__for_each_entry_safe(evlist, n, evsel) { 1706 if (evsel__leader(evsel) == evsel__leader(move_evsel)) 1707 list_move_tail(&evsel->core.node, &move); 1708 } 1709 1710 list_splice(&move, &evlist->core.entries); 1711 } 1712 1713 struct evsel *evlist__get_tracking_event(struct evlist *evlist) 1714 { 1715 struct evsel *evsel; 1716 1717 evlist__for_each_entry(evlist, evsel) { 1718 if (evsel->tracking) 1719 return evsel; 1720 } 1721 1722 return evlist__first(evlist); 1723 } 1724 1725 void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel) 1726 { 1727 struct evsel *evsel; 1728 1729 if (tracking_evsel->tracking) 1730 return; 1731 1732 evlist__for_each_entry(evlist, evsel) { 1733 if (evsel != tracking_evsel) 1734 evsel->tracking = false; 1735 } 1736 1737 tracking_evsel->tracking = true; 1738 } 1739 1740 struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide) 1741 { 1742 struct evsel *evsel; 1743 1744 evsel = evlist__get_tracking_event(evlist); 1745 if (!evsel__is_dummy_event(evsel)) { 1746 evsel = evlist__add_aux_dummy(evlist, system_wide); 1747 if (!evsel) 1748 return NULL; 1749 1750 evlist__set_tracking_event(evlist, evsel); 1751 } else if (system_wide) { 1752 perf_evlist__go_system_wide(&evlist->core, &evsel->core); 1753 } 1754 1755 return evsel; 1756 } 1757 1758 struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str) 1759 { 1760 struct evsel *evsel; 1761 1762 evlist__for_each_entry(evlist, evsel) { 1763 if (!evsel->name) 1764 continue; 1765 if (evsel__name_is(evsel, str)) 1766 return evsel; 1767 } 1768 1769 return NULL; 1770 } 1771 1772 void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state) 1773 { 1774 enum bkw_mmap_state old_state = evlist->bkw_mmap_state; 1775 enum action { 1776 NONE, 1777 PAUSE, 1778 RESUME, 1779 } action = NONE; 1780 1781 if (!evlist->overwrite_mmap) 1782 return; 1783 1784 switch (old_state) { 1785 case BKW_MMAP_NOTREADY: { 1786 if (state != BKW_MMAP_RUNNING) 1787 goto state_err; 1788 break; 1789 } 1790 case BKW_MMAP_RUNNING: { 1791 if (state != BKW_MMAP_DATA_PENDING) 1792 goto state_err; 1793 action = PAUSE; 1794 break; 1795 } 1796 case BKW_MMAP_DATA_PENDING: { 1797 if (state != BKW_MMAP_EMPTY) 1798 goto state_err; 1799 break; 1800 } 1801 case BKW_MMAP_EMPTY: { 1802 if (state != BKW_MMAP_RUNNING) 1803 goto state_err; 1804 action = RESUME; 1805 break; 1806 } 1807 default: 1808 WARN_ONCE(1, "Shouldn't get there\n"); 1809 } 1810 1811 evlist->bkw_mmap_state = state; 1812 1813 switch (action) { 1814 case PAUSE: 1815 evlist__pause(evlist); 1816 break; 1817 case RESUME: 1818 evlist__resume(evlist); 1819 break; 1820 case NONE: 1821 default: 1822 break; 1823 } 1824 1825 state_err: 1826 return; 1827 } 1828 1829 bool evlist__exclude_kernel(struct evlist *evlist) 1830 { 1831 struct evsel *evsel; 1832 1833 evlist__for_each_entry(evlist, evsel) { 1834 if (!evsel->core.attr.exclude_kernel) 1835 return false; 1836 } 1837 1838 return true; 1839 } 1840 1841 /* 1842 * Events in data file are not collect in groups, but we still want 1843 * the group display. Set the artificial group and set the leader's 1844 * forced_leader flag to notify the display code. 1845 */ 1846 void evlist__force_leader(struct evlist *evlist) 1847 { 1848 if (evlist__nr_groups(evlist) == 0) { 1849 struct evsel *leader = evlist__first(evlist); 1850 1851 evlist__set_leader(evlist); 1852 leader->forced_leader = true; 1853 } 1854 } 1855 1856 struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close) 1857 { 1858 struct evsel *c2, *leader; 1859 bool is_open = true; 1860 1861 leader = evsel__leader(evsel); 1862 1863 pr_debug("Weak group for %s/%d failed\n", 1864 leader->name, leader->core.nr_members); 1865 1866 /* 1867 * for_each_group_member doesn't work here because it doesn't 1868 * include the first entry. 1869 */ 1870 evlist__for_each_entry(evsel_list, c2) { 1871 if (c2 == evsel) 1872 is_open = false; 1873 if (evsel__has_leader(c2, leader)) { 1874 if (is_open && close) 1875 perf_evsel__close(&c2->core); 1876 /* 1877 * We want to close all members of the group and reopen 1878 * them. Some events, like Intel topdown, require being 1879 * in a group and so keep these in the group. 1880 */ 1881 evsel__remove_from_group(c2, leader); 1882 1883 /* 1884 * Set this for all former members of the group 1885 * to indicate they get reopened. 1886 */ 1887 c2->reset_group = true; 1888 } 1889 } 1890 /* Reset the leader count if all entries were removed. */ 1891 if (leader->core.nr_members == 1) 1892 leader->core.nr_members = 0; 1893 return leader; 1894 } 1895 1896 static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1897 { 1898 char *s, *p; 1899 int ret = 0, fd; 1900 1901 if (strncmp(str, "fifo:", 5)) 1902 return -EINVAL; 1903 1904 str += 5; 1905 if (!*str || *str == ',') 1906 return -EINVAL; 1907 1908 s = strdup(str); 1909 if (!s) 1910 return -ENOMEM; 1911 1912 p = strchr(s, ','); 1913 if (p) 1914 *p = '\0'; 1915 1916 /* 1917 * O_RDWR avoids POLLHUPs which is necessary to allow the other 1918 * end of a FIFO to be repeatedly opened and closed. 1919 */ 1920 fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1921 if (fd < 0) { 1922 pr_err("Failed to open '%s'\n", s); 1923 ret = -errno; 1924 goto out_free; 1925 } 1926 *ctl_fd = fd; 1927 *ctl_fd_close = true; 1928 1929 if (p && *++p) { 1930 /* O_RDWR | O_NONBLOCK means the other end need not be open */ 1931 fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1932 if (fd < 0) { 1933 pr_err("Failed to open '%s'\n", p); 1934 ret = -errno; 1935 goto out_free; 1936 } 1937 *ctl_fd_ack = fd; 1938 } 1939 1940 out_free: 1941 free(s); 1942 return ret; 1943 } 1944 1945 int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1946 { 1947 char *comma = NULL, *endptr = NULL; 1948 1949 *ctl_fd_close = false; 1950 1951 if (strncmp(str, "fd:", 3)) 1952 return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close); 1953 1954 *ctl_fd = strtoul(&str[3], &endptr, 0); 1955 if (endptr == &str[3]) 1956 return -EINVAL; 1957 1958 comma = strchr(str, ','); 1959 if (comma) { 1960 if (endptr != comma) 1961 return -EINVAL; 1962 1963 *ctl_fd_ack = strtoul(comma + 1, &endptr, 0); 1964 if (endptr == comma + 1 || *endptr != '\0') 1965 return -EINVAL; 1966 } 1967 1968 return 0; 1969 } 1970 1971 void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close) 1972 { 1973 if (*ctl_fd_close) { 1974 *ctl_fd_close = false; 1975 close(ctl_fd); 1976 if (ctl_fd_ack >= 0) 1977 close(ctl_fd_ack); 1978 } 1979 } 1980 1981 int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack) 1982 { 1983 if (fd == -1) { 1984 pr_debug("Control descriptor is not initialized\n"); 1985 return 0; 1986 } 1987 1988 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 1989 fdarray_flag__nonfilterable | 1990 fdarray_flag__non_perf_event); 1991 if (evlist->ctl_fd.pos < 0) { 1992 evlist->ctl_fd.pos = -1; 1993 pr_err("Failed to add ctl fd entry: %m\n"); 1994 return -1; 1995 } 1996 1997 evlist->ctl_fd.fd = fd; 1998 evlist->ctl_fd.ack = ack; 1999 2000 return 0; 2001 } 2002 2003 bool evlist__ctlfd_initialized(struct evlist *evlist) 2004 { 2005 return evlist->ctl_fd.pos >= 0; 2006 } 2007 2008 int evlist__finalize_ctlfd(struct evlist *evlist) 2009 { 2010 struct pollfd *entries = evlist->core.pollfd.entries; 2011 2012 if (!evlist__ctlfd_initialized(evlist)) 2013 return 0; 2014 2015 entries[evlist->ctl_fd.pos].fd = -1; 2016 entries[evlist->ctl_fd.pos].events = 0; 2017 entries[evlist->ctl_fd.pos].revents = 0; 2018 2019 evlist->ctl_fd.pos = -1; 2020 evlist->ctl_fd.ack = -1; 2021 evlist->ctl_fd.fd = -1; 2022 2023 return 0; 2024 } 2025 2026 static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd, 2027 char *cmd_data, size_t data_size) 2028 { 2029 int err; 2030 char c; 2031 size_t bytes_read = 0; 2032 2033 *cmd = EVLIST_CTL_CMD_UNSUPPORTED; 2034 memset(cmd_data, 0, data_size); 2035 data_size--; 2036 2037 do { 2038 err = read(evlist->ctl_fd.fd, &c, 1); 2039 if (err > 0) { 2040 if (c == '\n' || c == '\0') 2041 break; 2042 cmd_data[bytes_read++] = c; 2043 if (bytes_read == data_size) 2044 break; 2045 continue; 2046 } else if (err == -1) { 2047 if (errno == EINTR) 2048 continue; 2049 if (errno == EAGAIN || errno == EWOULDBLOCK) 2050 err = 0; 2051 else 2052 pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd); 2053 } 2054 break; 2055 } while (1); 2056 2057 pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data, 2058 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0"); 2059 2060 if (bytes_read > 0) { 2061 if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG, 2062 (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) { 2063 *cmd = EVLIST_CTL_CMD_ENABLE; 2064 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG, 2065 (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) { 2066 *cmd = EVLIST_CTL_CMD_DISABLE; 2067 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG, 2068 (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) { 2069 *cmd = EVLIST_CTL_CMD_SNAPSHOT; 2070 pr_debug("is snapshot\n"); 2071 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG, 2072 (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) { 2073 *cmd = EVLIST_CTL_CMD_EVLIST; 2074 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG, 2075 (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) { 2076 *cmd = EVLIST_CTL_CMD_STOP; 2077 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG, 2078 (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) { 2079 *cmd = EVLIST_CTL_CMD_PING; 2080 } 2081 } 2082 2083 return bytes_read ? (int)bytes_read : err; 2084 } 2085 2086 int evlist__ctlfd_ack(struct evlist *evlist) 2087 { 2088 int err; 2089 2090 if (evlist->ctl_fd.ack == -1) 2091 return 0; 2092 2093 err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG, 2094 sizeof(EVLIST_CTL_CMD_ACK_TAG)); 2095 if (err == -1) 2096 pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack); 2097 2098 return err; 2099 } 2100 2101 static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg) 2102 { 2103 char *data = cmd_data + cmd_size; 2104 2105 /* no argument */ 2106 if (!*data) 2107 return 0; 2108 2109 /* there's argument */ 2110 if (*data == ' ') { 2111 *arg = data + 1; 2112 return 1; 2113 } 2114 2115 /* malformed */ 2116 return -1; 2117 } 2118 2119 static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable) 2120 { 2121 struct evsel *evsel; 2122 char *name; 2123 int err; 2124 2125 err = get_cmd_arg(cmd_data, 2126 enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 : 2127 sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1, 2128 &name); 2129 if (err < 0) { 2130 pr_info("failed: wrong command\n"); 2131 return -1; 2132 } 2133 2134 if (err) { 2135 evsel = evlist__find_evsel_by_str(evlist, name); 2136 if (evsel) { 2137 if (enable) 2138 evlist__enable_evsel(evlist, name); 2139 else 2140 evlist__disable_evsel(evlist, name); 2141 pr_info("Event %s %s\n", evsel->name, 2142 enable ? "enabled" : "disabled"); 2143 } else { 2144 pr_info("failed: can't find '%s' event\n", name); 2145 } 2146 } else { 2147 if (enable) { 2148 evlist__enable(evlist); 2149 pr_info(EVLIST_ENABLED_MSG); 2150 } else { 2151 evlist__disable(evlist); 2152 pr_info(EVLIST_DISABLED_MSG); 2153 } 2154 } 2155 2156 return 0; 2157 } 2158 2159 static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data) 2160 { 2161 struct perf_attr_details details = { .verbose = false, }; 2162 struct evsel *evsel; 2163 char *arg; 2164 int err; 2165 2166 err = get_cmd_arg(cmd_data, 2167 sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1, 2168 &arg); 2169 if (err < 0) { 2170 pr_info("failed: wrong command\n"); 2171 return -1; 2172 } 2173 2174 if (err) { 2175 if (!strcmp(arg, "-v")) { 2176 details.verbose = true; 2177 } else if (!strcmp(arg, "-g")) { 2178 details.event_group = true; 2179 } else if (!strcmp(arg, "-F")) { 2180 details.freq = true; 2181 } else { 2182 pr_info("failed: wrong command\n"); 2183 return -1; 2184 } 2185 } 2186 2187 evlist__for_each_entry(evlist, evsel) 2188 evsel__fprintf(evsel, &details, stderr); 2189 2190 return 0; 2191 } 2192 2193 int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd) 2194 { 2195 int err = 0; 2196 char cmd_data[EVLIST_CTL_CMD_MAX_LEN]; 2197 int ctlfd_pos = evlist->ctl_fd.pos; 2198 struct pollfd *entries = evlist->core.pollfd.entries; 2199 2200 if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents) 2201 return 0; 2202 2203 if (entries[ctlfd_pos].revents & POLLIN) { 2204 err = evlist__ctlfd_recv(evlist, cmd, cmd_data, 2205 EVLIST_CTL_CMD_MAX_LEN); 2206 if (err > 0) { 2207 switch (*cmd) { 2208 case EVLIST_CTL_CMD_ENABLE: 2209 case EVLIST_CTL_CMD_DISABLE: 2210 err = evlist__ctlfd_enable(evlist, cmd_data, 2211 *cmd == EVLIST_CTL_CMD_ENABLE); 2212 break; 2213 case EVLIST_CTL_CMD_EVLIST: 2214 err = evlist__ctlfd_list(evlist, cmd_data); 2215 break; 2216 case EVLIST_CTL_CMD_SNAPSHOT: 2217 case EVLIST_CTL_CMD_STOP: 2218 case EVLIST_CTL_CMD_PING: 2219 break; 2220 case EVLIST_CTL_CMD_ACK: 2221 case EVLIST_CTL_CMD_UNSUPPORTED: 2222 default: 2223 pr_debug("ctlfd: unsupported %d\n", *cmd); 2224 break; 2225 } 2226 if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED || 2227 *cmd == EVLIST_CTL_CMD_SNAPSHOT)) 2228 evlist__ctlfd_ack(evlist); 2229 } 2230 } 2231 2232 if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR)) 2233 evlist__finalize_ctlfd(evlist); 2234 else 2235 entries[ctlfd_pos].revents = 0; 2236 2237 return err; 2238 } 2239 2240 /** 2241 * struct event_enable_time - perf record -D/--delay single time range. 2242 * @start: start of time range to enable events in milliseconds 2243 * @end: end of time range to enable events in milliseconds 2244 * 2245 * N.B. this structure is also accessed as an array of int. 2246 */ 2247 struct event_enable_time { 2248 int start; 2249 int end; 2250 }; 2251 2252 static int parse_event_enable_time(const char *str, struct event_enable_time *range, bool first) 2253 { 2254 const char *fmt = first ? "%u - %u %n" : " , %u - %u %n"; 2255 int ret, start, end, n; 2256 2257 ret = sscanf(str, fmt, &start, &end, &n); 2258 if (ret != 2 || end <= start) 2259 return -EINVAL; 2260 if (range) { 2261 range->start = start; 2262 range->end = end; 2263 } 2264 return n; 2265 } 2266 2267 static ssize_t parse_event_enable_times(const char *str, struct event_enable_time *range) 2268 { 2269 int incr = !!range; 2270 bool first = true; 2271 ssize_t ret, cnt; 2272 2273 for (cnt = 0; *str; cnt++) { 2274 ret = parse_event_enable_time(str, range, first); 2275 if (ret < 0) 2276 return ret; 2277 /* Check no overlap */ 2278 if (!first && range && range->start <= range[-1].end) 2279 return -EINVAL; 2280 str += ret; 2281 range += incr; 2282 first = false; 2283 } 2284 return cnt; 2285 } 2286 2287 /** 2288 * struct event_enable_timer - control structure for perf record -D/--delay. 2289 * @evlist: event list 2290 * @times: time ranges that events are enabled (N.B. this is also accessed as an 2291 * array of int) 2292 * @times_cnt: number of time ranges 2293 * @timerfd: timer file descriptor 2294 * @pollfd_pos: position in @evlist array of file descriptors to poll (fdarray) 2295 * @times_step: current position in (int *)@times)[], 2296 * refer event_enable_timer__process() 2297 * 2298 * Note, this structure is only used when there are time ranges, not when there 2299 * is only an initial delay. 2300 */ 2301 struct event_enable_timer { 2302 struct evlist *evlist; 2303 struct event_enable_time *times; 2304 size_t times_cnt; 2305 int timerfd; 2306 int pollfd_pos; 2307 size_t times_step; 2308 }; 2309 2310 static int str_to_delay(const char *str) 2311 { 2312 char *endptr; 2313 long d; 2314 2315 d = strtol(str, &endptr, 10); 2316 if (*endptr || d > INT_MAX || d < -1) 2317 return 0; 2318 return d; 2319 } 2320 2321 int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *opts, 2322 const char *str, int unset) 2323 { 2324 enum fdarray_flags flags = fdarray_flag__nonfilterable | fdarray_flag__non_perf_event; 2325 struct event_enable_timer *eet; 2326 ssize_t times_cnt; 2327 ssize_t ret; 2328 int err; 2329 2330 if (unset) 2331 return 0; 2332 2333 opts->target.initial_delay = str_to_delay(str); 2334 if (opts->target.initial_delay) 2335 return 0; 2336 2337 ret = parse_event_enable_times(str, NULL); 2338 if (ret < 0) 2339 return ret; 2340 2341 times_cnt = ret; 2342 if (times_cnt == 0) 2343 return -EINVAL; 2344 2345 eet = zalloc(sizeof(*eet)); 2346 if (!eet) 2347 return -ENOMEM; 2348 2349 eet->times = calloc(times_cnt, sizeof(*eet->times)); 2350 if (!eet->times) { 2351 err = -ENOMEM; 2352 goto free_eet; 2353 } 2354 2355 if (parse_event_enable_times(str, eet->times) != times_cnt) { 2356 err = -EINVAL; 2357 goto free_eet_times; 2358 } 2359 2360 eet->times_cnt = times_cnt; 2361 2362 eet->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC); 2363 if (eet->timerfd == -1) { 2364 err = -errno; 2365 pr_err("timerfd_create failed: %s\n", strerror(errno)); 2366 goto free_eet_times; 2367 } 2368 2369 eet->pollfd_pos = perf_evlist__add_pollfd(&evlist->core, eet->timerfd, NULL, POLLIN, flags); 2370 if (eet->pollfd_pos < 0) { 2371 err = eet->pollfd_pos; 2372 goto close_timerfd; 2373 } 2374 2375 eet->evlist = evlist; 2376 evlist->eet = eet; 2377 opts->target.initial_delay = eet->times[0].start; 2378 2379 return 0; 2380 2381 close_timerfd: 2382 close(eet->timerfd); 2383 free_eet_times: 2384 zfree(&eet->times); 2385 free_eet: 2386 free(eet); 2387 return err; 2388 } 2389 2390 static int event_enable_timer__set_timer(struct event_enable_timer *eet, int ms) 2391 { 2392 struct itimerspec its = { 2393 .it_value.tv_sec = ms / MSEC_PER_SEC, 2394 .it_value.tv_nsec = (ms % MSEC_PER_SEC) * NSEC_PER_MSEC, 2395 }; 2396 int err = 0; 2397 2398 if (timerfd_settime(eet->timerfd, 0, &its, NULL) < 0) { 2399 err = -errno; 2400 pr_err("timerfd_settime failed: %s\n", strerror(errno)); 2401 } 2402 return err; 2403 } 2404 2405 int event_enable_timer__start(struct event_enable_timer *eet) 2406 { 2407 int ms; 2408 2409 if (!eet) 2410 return 0; 2411 2412 ms = eet->times[0].end - eet->times[0].start; 2413 eet->times_step = 1; 2414 2415 return event_enable_timer__set_timer(eet, ms); 2416 } 2417 2418 int event_enable_timer__process(struct event_enable_timer *eet) 2419 { 2420 struct pollfd *entries; 2421 short revents; 2422 2423 if (!eet) 2424 return 0; 2425 2426 entries = eet->evlist->core.pollfd.entries; 2427 revents = entries[eet->pollfd_pos].revents; 2428 entries[eet->pollfd_pos].revents = 0; 2429 2430 if (revents & POLLIN) { 2431 size_t step = eet->times_step; 2432 size_t pos = step / 2; 2433 2434 if (step & 1) { 2435 evlist__disable_non_dummy(eet->evlist); 2436 pr_info(EVLIST_DISABLED_MSG); 2437 if (pos >= eet->times_cnt - 1) { 2438 /* Disarm timer */ 2439 event_enable_timer__set_timer(eet, 0); 2440 return 1; /* Stop */ 2441 } 2442 } else { 2443 evlist__enable_non_dummy(eet->evlist); 2444 pr_info(EVLIST_ENABLED_MSG); 2445 } 2446 2447 step += 1; 2448 pos = step / 2; 2449 2450 if (pos < eet->times_cnt) { 2451 int *times = (int *)eet->times; /* Accessing 'times' as array of int */ 2452 int ms = times[step] - times[step - 1]; 2453 2454 eet->times_step = step; 2455 return event_enable_timer__set_timer(eet, ms); 2456 } 2457 } 2458 2459 return 0; 2460 } 2461 2462 void event_enable_timer__exit(struct event_enable_timer **ep) 2463 { 2464 if (!ep || !*ep) 2465 return; 2466 zfree(&(*ep)->times); 2467 zfree(ep); 2468 } 2469 2470 struct evsel *evlist__find_evsel(struct evlist *evlist, int idx) 2471 { 2472 struct evsel *evsel; 2473 2474 evlist__for_each_entry(evlist, evsel) { 2475 if (evsel->core.idx == idx) 2476 return evsel; 2477 } 2478 return NULL; 2479 } 2480 2481 void evlist__format_evsels(struct evlist *evlist, struct strbuf *sb, size_t max_length) 2482 { 2483 struct evsel *evsel, *leader = NULL; 2484 bool first = true; 2485 2486 evlist__for_each_entry(evlist, evsel) { 2487 struct evsel *new_leader = evsel__leader(evsel); 2488 2489 if (evsel__is_dummy_event(evsel)) 2490 continue; 2491 2492 if (leader != new_leader && leader && leader->core.nr_members > 1) 2493 strbuf_addch(sb, '}'); 2494 2495 if (!first) 2496 strbuf_addch(sb, ','); 2497 2498 if (sb->len > max_length) { 2499 strbuf_addstr(sb, "..."); 2500 return; 2501 } 2502 if (leader != new_leader && new_leader->core.nr_members > 1) 2503 strbuf_addch(sb, '{'); 2504 2505 strbuf_addstr(sb, evsel__name(evsel)); 2506 first = false; 2507 leader = new_leader; 2508 } 2509 if (leader && leader->core.nr_members > 1) 2510 strbuf_addch(sb, '}'); 2511 } 2512 2513 void evlist__check_mem_load_aux(struct evlist *evlist) 2514 { 2515 struct evsel *leader, *evsel, *pos; 2516 2517 /* 2518 * For some platforms, the 'mem-loads' event is required to use 2519 * together with 'mem-loads-aux' within a group and 'mem-loads-aux' 2520 * must be the group leader. Now we disable this group before reporting 2521 * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry 2522 * any valid memory load information. 2523 */ 2524 evlist__for_each_entry(evlist, evsel) { 2525 leader = evsel__leader(evsel); 2526 if (leader == evsel) 2527 continue; 2528 2529 if (leader->name && strstr(leader->name, "mem-loads-aux")) { 2530 for_each_group_evsel(pos, leader) { 2531 evsel__set_leader(pos, pos); 2532 pos->core.nr_members = 0; 2533 } 2534 } 2535 } 2536 } 2537 2538 /** 2539 * evlist__warn_user_requested_cpus() - Check each evsel against requested CPUs 2540 * and warn if the user CPU list is inapplicable for the event's PMU's 2541 * CPUs. Not core PMUs list a CPU in sysfs, but this may be overwritten by a 2542 * user requested CPU and so any online CPU is applicable. Core PMUs handle 2543 * events on the CPUs in their list and otherwise the event isn't supported. 2544 * @evlist: The list of events being checked. 2545 * @cpu_list: The user provided list of CPUs. 2546 */ 2547 void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list) 2548 { 2549 struct perf_cpu_map *user_requested_cpus; 2550 struct evsel *pos; 2551 2552 if (!cpu_list) 2553 return; 2554 2555 user_requested_cpus = perf_cpu_map__new(cpu_list); 2556 if (!user_requested_cpus) 2557 return; 2558 2559 evlist__for_each_entry(evlist, pos) { 2560 evsel__warn_user_requested_cpus(pos, user_requested_cpus); 2561 } 2562 perf_cpu_map__put(user_requested_cpus); 2563 } 2564 2565 /* Should uniquify be disabled for the evlist? */ 2566 static bool evlist__disable_uniquify(const struct evlist *evlist) 2567 { 2568 struct evsel *counter; 2569 struct perf_pmu *last_pmu = NULL; 2570 bool first = true; 2571 2572 evlist__for_each_entry(evlist, counter) { 2573 /* If PMUs vary then uniquify can be useful. */ 2574 if (!first && counter->pmu != last_pmu) 2575 return false; 2576 first = false; 2577 if (counter->pmu) { 2578 /* Allow uniquify for uncore PMUs. */ 2579 if (!counter->pmu->is_core) 2580 return false; 2581 /* Keep hybrid event names uniquified for clarity. */ 2582 if (perf_pmus__num_core_pmus() > 1) 2583 return false; 2584 } 2585 last_pmu = counter->pmu; 2586 } 2587 return true; 2588 } 2589 2590 static bool evlist__set_needs_uniquify(struct evlist *evlist, const struct perf_stat_config *config) 2591 { 2592 struct evsel *counter; 2593 bool needs_uniquify = false; 2594 2595 if (evlist__disable_uniquify(evlist)) { 2596 evlist__for_each_entry(evlist, counter) 2597 counter->uniquified_name = true; 2598 return false; 2599 } 2600 2601 evlist__for_each_entry(evlist, counter) { 2602 if (evsel__set_needs_uniquify(counter, config)) 2603 needs_uniquify = true; 2604 } 2605 return needs_uniquify; 2606 } 2607 2608 void evlist__uniquify_evsel_names(struct evlist *evlist, const struct perf_stat_config *config) 2609 { 2610 if (evlist__set_needs_uniquify(evlist, config)) { 2611 struct evsel *pos; 2612 2613 evlist__for_each_entry(evlist, pos) 2614 evsel__uniquify_counter(pos); 2615 } 2616 } 2617 2618 bool evlist__has_bpf_output(struct evlist *evlist) 2619 { 2620 struct evsel *evsel; 2621 2622 evlist__for_each_entry(evlist, evsel) { 2623 if (evsel__is_bpf_output(evsel)) 2624 return true; 2625 } 2626 2627 return false; 2628 } 2629 2630 bool evlist__needs_bpf_sb_event(struct evlist *evlist) 2631 { 2632 struct evsel *evsel; 2633 2634 evlist__for_each_entry(evlist, evsel) { 2635 if (evsel__is_dummy_event(evsel)) 2636 continue; 2637 if (!evsel->core.attr.exclude_kernel) 2638 return true; 2639 } 2640 2641 return false; 2642 } 2643