1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/list.h> 3 #include <linux/list_sort.h> 4 #include <linux/string.h> 5 #include <linux/zalloc.h> 6 #include <subcmd/pager.h> 7 #include <sys/types.h> 8 #include <ctype.h> 9 #include <dirent.h> 10 #include <pthread.h> 11 #include <string.h> 12 #include <unistd.h> 13 #include "cpumap.h" 14 #include "debug.h" 15 #include "evsel.h" 16 #include "pmus.h" 17 #include "pmu.h" 18 #include "print-events.h" 19 #include "strbuf.h" 20 21 /* 22 * core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs 23 * directory contains "cpus" file. All PMUs belonging to core_pmus 24 * must have pmu->is_core=1. If there are more than one PMU in 25 * this list, perf interprets it as a heterogeneous platform. 26 * (FWIW, certain ARM platforms having heterogeneous cores uses 27 * homogeneous PMU, and thus they are treated as homogeneous 28 * platform by perf because core_pmus will have only one entry) 29 * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't 30 * matter whether PMU is present per SMT-thread or outside of the 31 * core in the hw. For e.g., an instance of AMD ibs_fetch// and 32 * ibs_op// PMUs is present in each hw SMT thread, however they 33 * are captured under other_pmus. PMUs belonging to other_pmus 34 * must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1. 35 */ 36 static LIST_HEAD(core_pmus); 37 static LIST_HEAD(other_pmus); 38 static bool read_sysfs_core_pmus; 39 static bool read_sysfs_all_pmus; 40 41 static void pmu_read_sysfs(bool core_only); 42 43 int pmu_name_len_no_suffix(const char *str, unsigned long *num) 44 { 45 int orig_len, len; 46 47 orig_len = len = strlen(str); 48 49 /* Non-uncore PMUs have their full length, for example, i915. */ 50 if (!strstarts(str, "uncore_")) 51 return len; 52 53 /* 54 * Count trailing digits and '_', if '_{num}' suffix isn't present use 55 * the full length. 56 */ 57 while (len > 0 && isdigit(str[len - 1])) 58 len--; 59 60 if (len > 0 && len != orig_len && str[len - 1] == '_') { 61 if (num) 62 *num = strtoul(&str[len], NULL, 10); 63 return len - 1; 64 } 65 return orig_len; 66 } 67 68 void perf_pmus__destroy(void) 69 { 70 struct perf_pmu *pmu, *tmp; 71 72 list_for_each_entry_safe(pmu, tmp, &core_pmus, list) { 73 list_del(&pmu->list); 74 75 perf_pmu__delete(pmu); 76 } 77 list_for_each_entry_safe(pmu, tmp, &other_pmus, list) { 78 list_del(&pmu->list); 79 80 perf_pmu__delete(pmu); 81 } 82 read_sysfs_core_pmus = false; 83 read_sysfs_all_pmus = false; 84 } 85 86 static struct perf_pmu *pmu_find(const char *name) 87 { 88 struct perf_pmu *pmu; 89 90 list_for_each_entry(pmu, &core_pmus, list) { 91 if (!strcmp(pmu->name, name) || 92 (pmu->alias_name && !strcmp(pmu->alias_name, name))) 93 return pmu; 94 } 95 list_for_each_entry(pmu, &other_pmus, list) { 96 if (!strcmp(pmu->name, name) || 97 (pmu->alias_name && !strcmp(pmu->alias_name, name))) 98 return pmu; 99 } 100 101 return NULL; 102 } 103 104 struct perf_pmu *perf_pmus__find(const char *name) 105 { 106 struct perf_pmu *pmu; 107 int dirfd; 108 bool core_pmu; 109 110 /* 111 * Once PMU is loaded it stays in the list, 112 * so we keep us from multiple reading/parsing 113 * the pmu format definitions. 114 */ 115 pmu = pmu_find(name); 116 if (pmu) 117 return pmu; 118 119 if (read_sysfs_all_pmus) 120 return NULL; 121 122 core_pmu = is_pmu_core(name); 123 if (core_pmu && read_sysfs_core_pmus) 124 return NULL; 125 126 dirfd = perf_pmu__event_source_devices_fd(); 127 pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name, 128 /*eager_load=*/false); 129 close(dirfd); 130 131 if (!pmu) { 132 /* 133 * Looking up an inidividual PMU failed. This may mean name is 134 * an alias, so read the PMUs from sysfs and try to find again. 135 */ 136 pmu_read_sysfs(core_pmu); 137 pmu = pmu_find(name); 138 } 139 return pmu; 140 } 141 142 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name) 143 { 144 struct perf_pmu *pmu; 145 bool core_pmu; 146 147 /* 148 * Once PMU is loaded it stays in the list, 149 * so we keep us from multiple reading/parsing 150 * the pmu format definitions. 151 */ 152 pmu = pmu_find(name); 153 if (pmu) 154 return pmu; 155 156 if (read_sysfs_all_pmus) 157 return NULL; 158 159 core_pmu = is_pmu_core(name); 160 if (core_pmu && read_sysfs_core_pmus) 161 return NULL; 162 163 return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name, 164 /*eager_load=*/false); 165 } 166 167 static int pmus_cmp(void *priv __maybe_unused, 168 const struct list_head *lhs, const struct list_head *rhs) 169 { 170 unsigned long lhs_num = 0, rhs_num = 0; 171 struct perf_pmu *lhs_pmu = container_of(lhs, struct perf_pmu, list); 172 struct perf_pmu *rhs_pmu = container_of(rhs, struct perf_pmu, list); 173 const char *lhs_pmu_name = lhs_pmu->name ?: ""; 174 const char *rhs_pmu_name = rhs_pmu->name ?: ""; 175 int lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name, &lhs_num); 176 int rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name, &rhs_num); 177 int ret = strncmp(lhs_pmu_name, rhs_pmu_name, 178 lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len); 179 180 if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0) 181 return ret; 182 183 return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0); 184 } 185 186 /* Add all pmus in sysfs to pmu list: */ 187 static void pmu_read_sysfs(bool core_only) 188 { 189 int fd; 190 DIR *dir; 191 struct dirent *dent; 192 193 if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus)) 194 return; 195 196 fd = perf_pmu__event_source_devices_fd(); 197 if (fd < 0) 198 return; 199 200 dir = fdopendir(fd); 201 if (!dir) { 202 close(fd); 203 return; 204 } 205 206 while ((dent = readdir(dir))) { 207 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) 208 continue; 209 if (core_only && !is_pmu_core(dent->d_name)) 210 continue; 211 /* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */ 212 perf_pmu__find2(fd, dent->d_name); 213 } 214 215 closedir(dir); 216 if (list_empty(&core_pmus)) { 217 if (!perf_pmu__create_placeholder_core_pmu(&core_pmus)) 218 pr_err("Failure to set up any core PMUs\n"); 219 } 220 list_sort(NULL, &core_pmus, pmus_cmp); 221 list_sort(NULL, &other_pmus, pmus_cmp); 222 if (!list_empty(&core_pmus)) { 223 read_sysfs_core_pmus = true; 224 if (!core_only) 225 read_sysfs_all_pmus = true; 226 } 227 } 228 229 static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type) 230 { 231 struct perf_pmu *pmu; 232 233 list_for_each_entry(pmu, &core_pmus, list) { 234 if (pmu->type == type) 235 return pmu; 236 } 237 238 list_for_each_entry(pmu, &other_pmus, list) { 239 if (pmu->type == type) 240 return pmu; 241 } 242 return NULL; 243 } 244 245 struct perf_pmu *perf_pmus__find_by_type(unsigned int type) 246 { 247 struct perf_pmu *pmu = __perf_pmus__find_by_type(type); 248 249 if (pmu || read_sysfs_all_pmus) 250 return pmu; 251 252 pmu_read_sysfs(/*core_only=*/false); 253 pmu = __perf_pmus__find_by_type(type); 254 return pmu; 255 } 256 257 /* 258 * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the 259 * next pmu. Returns NULL on end. 260 */ 261 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu) 262 { 263 bool use_core_pmus = !pmu || pmu->is_core; 264 265 if (!pmu) { 266 pmu_read_sysfs(/*core_only=*/false); 267 pmu = list_prepare_entry(pmu, &core_pmus, list); 268 } 269 if (use_core_pmus) { 270 list_for_each_entry_continue(pmu, &core_pmus, list) 271 return pmu; 272 273 pmu = NULL; 274 pmu = list_prepare_entry(pmu, &other_pmus, list); 275 } 276 list_for_each_entry_continue(pmu, &other_pmus, list) 277 return pmu; 278 return NULL; 279 } 280 281 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu) 282 { 283 if (!pmu) { 284 pmu_read_sysfs(/*core_only=*/true); 285 return list_first_entry_or_null(&core_pmus, typeof(*pmu), list); 286 } 287 list_for_each_entry_continue(pmu, &core_pmus, list) 288 return pmu; 289 290 return NULL; 291 } 292 293 static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu) 294 { 295 bool use_core_pmus = !pmu || pmu->is_core; 296 int last_pmu_name_len = 0; 297 const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : ""; 298 299 if (!pmu) { 300 pmu_read_sysfs(/*core_only=*/false); 301 pmu = list_prepare_entry(pmu, &core_pmus, list); 302 } else 303 last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", NULL); 304 305 if (use_core_pmus) { 306 list_for_each_entry_continue(pmu, &core_pmus, list) { 307 int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL); 308 309 if (last_pmu_name_len == pmu_name_len && 310 !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len)) 311 continue; 312 313 return pmu; 314 } 315 pmu = NULL; 316 pmu = list_prepare_entry(pmu, &other_pmus, list); 317 } 318 list_for_each_entry_continue(pmu, &other_pmus, list) { 319 int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL); 320 321 if (last_pmu_name_len == pmu_name_len && 322 !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len)) 323 continue; 324 325 return pmu; 326 } 327 return NULL; 328 } 329 330 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str) 331 { 332 struct perf_pmu *pmu = NULL; 333 334 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 335 if (!strcmp(pmu->name, str)) 336 return pmu; 337 /* Ignore "uncore_" prefix. */ 338 if (!strncmp(pmu->name, "uncore_", 7)) { 339 if (!strcmp(pmu->name + 7, str)) 340 return pmu; 341 } 342 /* Ignore "cpu_" prefix on Intel hybrid PMUs. */ 343 if (!strncmp(pmu->name, "cpu_", 4)) { 344 if (!strcmp(pmu->name + 4, str)) 345 return pmu; 346 } 347 } 348 return NULL; 349 } 350 351 /** Struct for ordering events as output in perf list. */ 352 struct sevent { 353 /** PMU for event. */ 354 const struct perf_pmu *pmu; 355 const char *name; 356 const char* alias; 357 const char *scale_unit; 358 const char *desc; 359 const char *long_desc; 360 const char *encoding_desc; 361 const char *topic; 362 const char *pmu_name; 363 bool deprecated; 364 }; 365 366 static int cmp_sevent(const void *a, const void *b) 367 { 368 const struct sevent *as = a; 369 const struct sevent *bs = b; 370 bool a_iscpu, b_iscpu; 371 int ret; 372 373 /* Put extra events last. */ 374 if (!!as->desc != !!bs->desc) 375 return !!as->desc - !!bs->desc; 376 377 /* Order by topics. */ 378 ret = strcmp(as->topic ?: "", bs->topic ?: ""); 379 if (ret) 380 return ret; 381 382 /* Order CPU core events to be first */ 383 a_iscpu = as->pmu ? as->pmu->is_core : true; 384 b_iscpu = bs->pmu ? bs->pmu->is_core : true; 385 if (a_iscpu != b_iscpu) 386 return a_iscpu ? -1 : 1; 387 388 /* Order by PMU name. */ 389 if (as->pmu != bs->pmu) { 390 ret = strcmp(as->pmu_name ?: "", bs->pmu_name ?: ""); 391 if (ret) 392 return ret; 393 } 394 395 /* Order by event name. */ 396 return strcmp(as->name, bs->name); 397 } 398 399 static bool pmu_alias_is_duplicate(struct sevent *a, struct sevent *b) 400 { 401 /* Different names -> never duplicates */ 402 if (strcmp(a->name ?: "//", b->name ?: "//")) 403 return false; 404 405 /* Don't remove duplicates for different PMUs */ 406 return strcmp(a->pmu_name, b->pmu_name) == 0; 407 } 408 409 struct events_callback_state { 410 struct sevent *aliases; 411 size_t aliases_len; 412 size_t index; 413 }; 414 415 static int perf_pmus__print_pmu_events__callback(void *vstate, 416 struct pmu_event_info *info) 417 { 418 struct events_callback_state *state = vstate; 419 struct sevent *s; 420 421 if (state->index >= state->aliases_len) { 422 pr_err("Unexpected event %s/%s/\n", info->pmu->name, info->name); 423 return 1; 424 } 425 s = &state->aliases[state->index]; 426 s->pmu = info->pmu; 427 #define COPY_STR(str) s->str = info->str ? strdup(info->str) : NULL 428 COPY_STR(name); 429 COPY_STR(alias); 430 COPY_STR(scale_unit); 431 COPY_STR(desc); 432 COPY_STR(long_desc); 433 COPY_STR(encoding_desc); 434 COPY_STR(topic); 435 COPY_STR(pmu_name); 436 #undef COPY_STR 437 s->deprecated = info->deprecated; 438 state->index++; 439 return 0; 440 } 441 442 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state) 443 { 444 struct perf_pmu *pmu; 445 int printed = 0; 446 int len; 447 struct sevent *aliases; 448 struct events_callback_state state; 449 bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state); 450 struct perf_pmu *(*scan_fn)(struct perf_pmu *); 451 452 if (skip_duplicate_pmus) 453 scan_fn = perf_pmus__scan_skip_duplicates; 454 else 455 scan_fn = perf_pmus__scan; 456 457 pmu = NULL; 458 len = 0; 459 while ((pmu = scan_fn(pmu)) != NULL) 460 len += perf_pmu__num_events(pmu); 461 462 aliases = zalloc(sizeof(struct sevent) * len); 463 if (!aliases) { 464 pr_err("FATAL: not enough memory to print PMU events\n"); 465 return; 466 } 467 pmu = NULL; 468 state = (struct events_callback_state) { 469 .aliases = aliases, 470 .aliases_len = len, 471 .index = 0, 472 }; 473 while ((pmu = scan_fn(pmu)) != NULL) { 474 perf_pmu__for_each_event(pmu, skip_duplicate_pmus, &state, 475 perf_pmus__print_pmu_events__callback); 476 } 477 qsort(aliases, len, sizeof(struct sevent), cmp_sevent); 478 for (int j = 0; j < len; j++) { 479 /* Skip duplicates */ 480 if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1])) 481 continue; 482 483 print_cb->print_event(print_state, 484 aliases[j].pmu_name, 485 aliases[j].topic, 486 aliases[j].name, 487 aliases[j].alias, 488 aliases[j].scale_unit, 489 aliases[j].deprecated, 490 "Kernel PMU event", 491 aliases[j].desc, 492 aliases[j].long_desc, 493 aliases[j].encoding_desc); 494 zfree(&aliases[j].name); 495 zfree(&aliases[j].alias); 496 zfree(&aliases[j].scale_unit); 497 zfree(&aliases[j].desc); 498 zfree(&aliases[j].long_desc); 499 zfree(&aliases[j].encoding_desc); 500 zfree(&aliases[j].topic); 501 zfree(&aliases[j].pmu_name); 502 } 503 if (printed && pager_in_use()) 504 printf("\n"); 505 506 zfree(&aliases); 507 } 508 509 struct build_format_string_args { 510 struct strbuf short_string; 511 struct strbuf long_string; 512 int num_formats; 513 }; 514 515 static int build_format_string(void *state, const char *name, int config, 516 const unsigned long *bits) 517 { 518 struct build_format_string_args *args = state; 519 unsigned int num_bits; 520 int ret1, ret2 = 0; 521 522 (void)config; 523 args->num_formats++; 524 if (args->num_formats > 1) { 525 strbuf_addch(&args->long_string, ','); 526 if (args->num_formats < 4) 527 strbuf_addch(&args->short_string, ','); 528 } 529 num_bits = bits ? bitmap_weight(bits, PERF_PMU_FORMAT_BITS) : 0; 530 if (num_bits <= 1) { 531 ret1 = strbuf_addf(&args->long_string, "%s", name); 532 if (args->num_formats < 4) 533 ret2 = strbuf_addf(&args->short_string, "%s", name); 534 } else if (num_bits > 8) { 535 ret1 = strbuf_addf(&args->long_string, "%s=0..0x%llx", name, 536 ULLONG_MAX >> (64 - num_bits)); 537 if (args->num_formats < 4) { 538 ret2 = strbuf_addf(&args->short_string, "%s=0..0x%llx", name, 539 ULLONG_MAX >> (64 - num_bits)); 540 } 541 } else { 542 ret1 = strbuf_addf(&args->long_string, "%s=0..%llu", name, 543 ULLONG_MAX >> (64 - num_bits)); 544 if (args->num_formats < 4) { 545 ret2 = strbuf_addf(&args->short_string, "%s=0..%llu", name, 546 ULLONG_MAX >> (64 - num_bits)); 547 } 548 } 549 return ret1 < 0 ? ret1 : (ret2 < 0 ? ret2 : 0); 550 } 551 552 void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, void *print_state) 553 { 554 bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state); 555 struct perf_pmu *(*scan_fn)(struct perf_pmu *); 556 struct perf_pmu *pmu = NULL; 557 558 if (skip_duplicate_pmus) 559 scan_fn = perf_pmus__scan_skip_duplicates; 560 else 561 scan_fn = perf_pmus__scan; 562 563 while ((pmu = scan_fn(pmu)) != NULL) { 564 struct build_format_string_args format_args = { 565 .short_string = STRBUF_INIT, 566 .long_string = STRBUF_INIT, 567 .num_formats = 0, 568 }; 569 int len = pmu_name_len_no_suffix(pmu->name, /*num=*/NULL); 570 const char *desc = "(see 'man perf-list' or 'man perf-record' on how to encode it)"; 571 572 if (!pmu->is_core) 573 desc = NULL; 574 575 strbuf_addf(&format_args.short_string, "%.*s/", len, pmu->name); 576 strbuf_addf(&format_args.long_string, "%.*s/", len, pmu->name); 577 perf_pmu__for_each_format(pmu, &format_args, build_format_string); 578 579 if (format_args.num_formats > 3) 580 strbuf_addf(&format_args.short_string, ",.../modifier"); 581 else 582 strbuf_addf(&format_args.short_string, "/modifier"); 583 584 strbuf_addf(&format_args.long_string, "/modifier"); 585 print_cb->print_event(print_state, 586 /*topic=*/NULL, 587 /*pmu_name=*/NULL, 588 format_args.short_string.buf, 589 /*event_alias=*/NULL, 590 /*scale_unit=*/NULL, 591 /*deprecated=*/false, 592 "Raw event descriptor", 593 desc, 594 /*long_desc=*/NULL, 595 format_args.long_string.buf); 596 597 strbuf_release(&format_args.short_string); 598 strbuf_release(&format_args.long_string); 599 } 600 } 601 602 bool perf_pmus__have_event(const char *pname, const char *name) 603 { 604 struct perf_pmu *pmu = perf_pmus__find(pname); 605 606 return pmu && perf_pmu__have_event(pmu, name); 607 } 608 609 int perf_pmus__num_core_pmus(void) 610 { 611 static int count; 612 613 if (!count) { 614 struct perf_pmu *pmu = NULL; 615 616 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) 617 count++; 618 } 619 return count; 620 } 621 622 static bool __perf_pmus__supports_extended_type(void) 623 { 624 struct perf_pmu *pmu = NULL; 625 626 if (perf_pmus__num_core_pmus() <= 1) 627 return false; 628 629 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 630 if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT))) 631 return false; 632 } 633 634 return true; 635 } 636 637 static bool perf_pmus__do_support_extended_type; 638 639 static void perf_pmus__init_supports_extended_type(void) 640 { 641 perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type(); 642 } 643 644 bool perf_pmus__supports_extended_type(void) 645 { 646 static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT; 647 648 pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type); 649 650 return perf_pmus__do_support_extended_type; 651 } 652 653 char *perf_pmus__default_pmu_name(void) 654 { 655 int fd; 656 DIR *dir; 657 struct dirent *dent; 658 char *result = NULL; 659 660 if (!list_empty(&core_pmus)) 661 return strdup(list_first_entry(&core_pmus, struct perf_pmu, list)->name); 662 663 fd = perf_pmu__event_source_devices_fd(); 664 if (fd < 0) 665 return strdup("cpu"); 666 667 dir = fdopendir(fd); 668 if (!dir) { 669 close(fd); 670 return strdup("cpu"); 671 } 672 673 while ((dent = readdir(dir))) { 674 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) 675 continue; 676 if (is_pmu_core(dent->d_name)) { 677 result = strdup(dent->d_name); 678 break; 679 } 680 } 681 682 closedir(dir); 683 return result ?: strdup("cpu"); 684 } 685 686 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel) 687 { 688 struct perf_pmu *pmu = evsel->pmu; 689 690 if (!pmu) { 691 pmu = perf_pmus__find_by_type(evsel->core.attr.type); 692 ((struct evsel *)evsel)->pmu = pmu; 693 } 694 return pmu; 695 } 696 697 struct perf_pmu *perf_pmus__find_core_pmu(void) 698 { 699 return perf_pmus__scan_core(NULL); 700 } 701 702 struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name) 703 { 704 /* 705 * Some PMU functions read from the sysfs mount point, so care is 706 * needed, hence passing the eager_load flag to load things like the 707 * format files. 708 */ 709 return perf_pmu__lookup(&other_pmus, test_sysfs_dirfd, name, /*eager_load=*/true); 710 } 711