xref: /linux/tools/perf/util/pmus.c (revision f4db95b68ae68ebaf91d35cc0487ac1cbd04261e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/list.h>
3 #include <linux/list_sort.h>
4 #include <linux/string.h>
5 #include <linux/zalloc.h>
6 #include <subcmd/pager.h>
7 #include <sys/types.h>
8 #include <ctype.h>
9 #include <dirent.h>
10 #include <pthread.h>
11 #include <string.h>
12 #include <unistd.h>
13 #include "cpumap.h"
14 #include "debug.h"
15 #include "evsel.h"
16 #include "pmus.h"
17 #include "pmu.h"
18 #include "tool_pmu.h"
19 #include "print-events.h"
20 #include "strbuf.h"
21 
22 /*
23  * core_pmus:  A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
24  *             directory contains "cpus" file. All PMUs belonging to core_pmus
25  *             must have pmu->is_core=1. If there are more than one PMU in
26  *             this list, perf interprets it as a heterogeneous platform.
27  *             (FWIW, certain ARM platforms having heterogeneous cores uses
28  *             homogeneous PMU, and thus they are treated as homogeneous
29  *             platform by perf because core_pmus will have only one entry)
30  * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't
31  *             matter whether PMU is present per SMT-thread or outside of the
32  *             core in the hw. For e.g., an instance of AMD ibs_fetch// and
33  *             ibs_op// PMUs is present in each hw SMT thread, however they
34  *             are captured under other_pmus. PMUs belonging to other_pmus
35  *             must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1.
36  */
37 static LIST_HEAD(core_pmus);
38 static LIST_HEAD(other_pmus);
39 static bool read_sysfs_core_pmus;
40 static bool read_sysfs_all_pmus;
41 
42 static void pmu_read_sysfs(bool core_only);
43 
44 size_t pmu_name_len_no_suffix(const char *str)
45 {
46 	int orig_len, len;
47 	bool has_hex_digits = false;
48 
49 	orig_len = len = strlen(str);
50 
51 	/* Count trailing digits. */
52 	while (len > 0 && isxdigit(str[len - 1])) {
53 		if (!isdigit(str[len - 1]))
54 			has_hex_digits = true;
55 		len--;
56 	}
57 
58 	if (len > 0 && len != orig_len && str[len - 1] == '_') {
59 		/*
60 		 * There is a '_{num}' suffix. For decimal suffixes any length
61 		 * will do, for hexadecimal ensure more than 2 hex digits so
62 		 * that S390's cpum_cf PMU doesn't match.
63 		 */
64 		if (!has_hex_digits || (orig_len - len) > 2)
65 			return len - 1;
66 	}
67 	/* Use the full length. */
68 	return orig_len;
69 }
70 
71 int pmu_name_cmp(const char *lhs_pmu_name, const char *rhs_pmu_name)
72 {
73 	unsigned long long lhs_num = 0, rhs_num = 0;
74 	size_t lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name);
75 	size_t rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name);
76 	int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
77 			lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len);
78 
79 	if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0)
80 		return ret;
81 
82 	if (lhs_pmu_name_len + 1 < strlen(lhs_pmu_name))
83 		lhs_num = strtoull(&lhs_pmu_name[lhs_pmu_name_len + 1], NULL, 16);
84 	if (rhs_pmu_name_len + 1 < strlen(rhs_pmu_name))
85 		rhs_num = strtoull(&rhs_pmu_name[rhs_pmu_name_len + 1], NULL, 16);
86 
87 	return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
88 }
89 
90 void perf_pmus__destroy(void)
91 {
92 	struct perf_pmu *pmu, *tmp;
93 
94 	list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
95 		list_del(&pmu->list);
96 
97 		perf_pmu__delete(pmu);
98 	}
99 	list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
100 		list_del(&pmu->list);
101 
102 		perf_pmu__delete(pmu);
103 	}
104 	read_sysfs_core_pmus = false;
105 	read_sysfs_all_pmus = false;
106 }
107 
108 static struct perf_pmu *pmu_find(const char *name)
109 {
110 	struct perf_pmu *pmu;
111 
112 	list_for_each_entry(pmu, &core_pmus, list) {
113 		if (!strcmp(pmu->name, name) ||
114 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
115 			return pmu;
116 	}
117 	list_for_each_entry(pmu, &other_pmus, list) {
118 		if (!strcmp(pmu->name, name) ||
119 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
120 			return pmu;
121 	}
122 
123 	return NULL;
124 }
125 
126 struct perf_pmu *perf_pmus__find(const char *name)
127 {
128 	struct perf_pmu *pmu;
129 	int dirfd;
130 	bool core_pmu;
131 
132 	/*
133 	 * Once PMU is loaded it stays in the list,
134 	 * so we keep us from multiple reading/parsing
135 	 * the pmu format definitions.
136 	 */
137 	pmu = pmu_find(name);
138 	if (pmu)
139 		return pmu;
140 
141 	if (read_sysfs_all_pmus)
142 		return NULL;
143 
144 	core_pmu = is_pmu_core(name);
145 	if (core_pmu && read_sysfs_core_pmus)
146 		return NULL;
147 
148 	dirfd = perf_pmu__event_source_devices_fd();
149 	pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
150 			       /*eager_load=*/false);
151 	close(dirfd);
152 
153 	if (!pmu) {
154 		/*
155 		 * Looking up an inidividual PMU failed. This may mean name is
156 		 * an alias, so read the PMUs from sysfs and try to find again.
157 		 */
158 		pmu_read_sysfs(core_pmu);
159 		pmu = pmu_find(name);
160 	}
161 	return pmu;
162 }
163 
164 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
165 {
166 	struct perf_pmu *pmu;
167 	bool core_pmu;
168 
169 	/*
170 	 * Once PMU is loaded it stays in the list,
171 	 * so we keep us from multiple reading/parsing
172 	 * the pmu format definitions.
173 	 */
174 	pmu = pmu_find(name);
175 	if (pmu)
176 		return pmu;
177 
178 	if (read_sysfs_all_pmus)
179 		return NULL;
180 
181 	core_pmu = is_pmu_core(name);
182 	if (core_pmu && read_sysfs_core_pmus)
183 		return NULL;
184 
185 	return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
186 				/*eager_load=*/false);
187 }
188 
189 static int pmus_cmp(void *priv __maybe_unused,
190 		    const struct list_head *lhs, const struct list_head *rhs)
191 {
192 	struct perf_pmu *lhs_pmu = container_of(lhs, struct perf_pmu, list);
193 	struct perf_pmu *rhs_pmu = container_of(rhs, struct perf_pmu, list);
194 
195 	return pmu_name_cmp(lhs_pmu->name ?: "", rhs_pmu->name ?: "");
196 }
197 
198 /* Add all pmus in sysfs to pmu list: */
199 static void pmu_read_sysfs(bool core_only)
200 {
201 	int fd;
202 	DIR *dir;
203 	struct dirent *dent;
204 	struct perf_pmu *tool_pmu;
205 
206 	if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus))
207 		return;
208 
209 	fd = perf_pmu__event_source_devices_fd();
210 	if (fd < 0)
211 		return;
212 
213 	dir = fdopendir(fd);
214 	if (!dir) {
215 		close(fd);
216 		return;
217 	}
218 
219 	while ((dent = readdir(dir))) {
220 		if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
221 			continue;
222 		if (core_only && !is_pmu_core(dent->d_name))
223 			continue;
224 		/* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
225 		perf_pmu__find2(fd, dent->d_name);
226 	}
227 
228 	closedir(dir);
229 	if (list_empty(&core_pmus)) {
230 		if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
231 			pr_err("Failure to set up any core PMUs\n");
232 	}
233 	list_sort(NULL, &core_pmus, pmus_cmp);
234 	if (!core_only) {
235 		tool_pmu = perf_pmus__tool_pmu();
236 		list_add_tail(&tool_pmu->list, &other_pmus);
237 	}
238 	list_sort(NULL, &other_pmus, pmus_cmp);
239 	if (!list_empty(&core_pmus)) {
240 		read_sysfs_core_pmus = true;
241 		if (!core_only)
242 			read_sysfs_all_pmus = true;
243 	}
244 }
245 
246 static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type)
247 {
248 	struct perf_pmu *pmu;
249 
250 	list_for_each_entry(pmu, &core_pmus, list) {
251 		if (pmu->type == type)
252 			return pmu;
253 	}
254 
255 	list_for_each_entry(pmu, &other_pmus, list) {
256 		if (pmu->type == type)
257 			return pmu;
258 	}
259 	return NULL;
260 }
261 
262 struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
263 {
264 	struct perf_pmu *pmu = __perf_pmus__find_by_type(type);
265 
266 	if (pmu || read_sysfs_all_pmus)
267 		return pmu;
268 
269 	pmu_read_sysfs(/*core_only=*/false);
270 	pmu = __perf_pmus__find_by_type(type);
271 	return pmu;
272 }
273 
274 /*
275  * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
276  * next pmu. Returns NULL on end.
277  */
278 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
279 {
280 	bool use_core_pmus = !pmu || pmu->is_core;
281 
282 	if (!pmu) {
283 		pmu_read_sysfs(/*core_only=*/false);
284 		pmu = list_prepare_entry(pmu, &core_pmus, list);
285 	}
286 	if (use_core_pmus) {
287 		list_for_each_entry_continue(pmu, &core_pmus, list)
288 			return pmu;
289 
290 		pmu = NULL;
291 		pmu = list_prepare_entry(pmu, &other_pmus, list);
292 	}
293 	list_for_each_entry_continue(pmu, &other_pmus, list)
294 		return pmu;
295 	return NULL;
296 }
297 
298 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
299 {
300 	if (!pmu) {
301 		pmu_read_sysfs(/*core_only=*/true);
302 		return list_first_entry_or_null(&core_pmus, typeof(*pmu), list);
303 	}
304 	list_for_each_entry_continue(pmu, &core_pmus, list)
305 		return pmu;
306 
307 	return NULL;
308 }
309 
310 static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
311 {
312 	bool use_core_pmus = !pmu || pmu->is_core;
313 	int last_pmu_name_len = 0;
314 	const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : "";
315 
316 	if (!pmu) {
317 		pmu_read_sysfs(/*core_only=*/false);
318 		pmu = list_prepare_entry(pmu, &core_pmus, list);
319 	} else
320 		last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
321 
322 	if (use_core_pmus) {
323 		list_for_each_entry_continue(pmu, &core_pmus, list) {
324 			int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
325 
326 			if (last_pmu_name_len == pmu_name_len &&
327 			    !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
328 				continue;
329 
330 			return pmu;
331 		}
332 		pmu = NULL;
333 		pmu = list_prepare_entry(pmu, &other_pmus, list);
334 	}
335 	list_for_each_entry_continue(pmu, &other_pmus, list) {
336 		int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
337 
338 		if (last_pmu_name_len == pmu_name_len &&
339 		    !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
340 			continue;
341 
342 		return pmu;
343 	}
344 	return NULL;
345 }
346 
347 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
348 {
349 	struct perf_pmu *pmu = NULL;
350 
351 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
352 		if (!strcmp(pmu->name, str))
353 			return pmu;
354 		/* Ignore "uncore_" prefix. */
355 		if (!strncmp(pmu->name, "uncore_", 7)) {
356 			if (!strcmp(pmu->name + 7, str))
357 				return pmu;
358 		}
359 		/* Ignore "cpu_" prefix on Intel hybrid PMUs. */
360 		if (!strncmp(pmu->name, "cpu_", 4)) {
361 			if (!strcmp(pmu->name + 4, str))
362 				return pmu;
363 		}
364 	}
365 	return NULL;
366 }
367 
368 /** Struct for ordering events as output in perf list. */
369 struct sevent {
370 	/** PMU for event. */
371 	const struct perf_pmu *pmu;
372 	const char *name;
373 	const char* alias;
374 	const char *scale_unit;
375 	const char *desc;
376 	const char *long_desc;
377 	const char *encoding_desc;
378 	const char *topic;
379 	const char *pmu_name;
380 	const char *event_type_desc;
381 	bool deprecated;
382 };
383 
384 static int cmp_sevent(const void *a, const void *b)
385 {
386 	const struct sevent *as = a;
387 	const struct sevent *bs = b;
388 	bool a_iscpu, b_iscpu;
389 	int ret;
390 
391 	/* Put extra events last. */
392 	if (!!as->desc != !!bs->desc)
393 		return !!as->desc - !!bs->desc;
394 
395 	/* Order by topics. */
396 	ret = strcmp(as->topic ?: "", bs->topic ?: "");
397 	if (ret)
398 		return ret;
399 
400 	/* Order CPU core events to be first */
401 	a_iscpu = as->pmu ? as->pmu->is_core : true;
402 	b_iscpu = bs->pmu ? bs->pmu->is_core : true;
403 	if (a_iscpu != b_iscpu)
404 		return a_iscpu ? -1 : 1;
405 
406 	/* Order by PMU name. */
407 	if (as->pmu != bs->pmu) {
408 		ret = strcmp(as->pmu_name ?: "", bs->pmu_name ?: "");
409 		if (ret)
410 			return ret;
411 	}
412 
413 	/* Order by event name. */
414 	return strcmp(as->name, bs->name);
415 }
416 
417 static bool pmu_alias_is_duplicate(struct sevent *a, struct sevent *b)
418 {
419 	/* Different names -> never duplicates */
420 	if (strcmp(a->name ?: "//", b->name ?: "//"))
421 		return false;
422 
423 	/* Don't remove duplicates for different PMUs */
424 	return strcmp(a->pmu_name, b->pmu_name) == 0;
425 }
426 
427 struct events_callback_state {
428 	struct sevent *aliases;
429 	size_t aliases_len;
430 	size_t index;
431 };
432 
433 static int perf_pmus__print_pmu_events__callback(void *vstate,
434 						struct pmu_event_info *info)
435 {
436 	struct events_callback_state *state = vstate;
437 	struct sevent *s;
438 
439 	if (state->index >= state->aliases_len) {
440 		pr_err("Unexpected event %s/%s/\n", info->pmu->name, info->name);
441 		return 1;
442 	}
443 	assert(info->pmu != NULL || info->name != NULL);
444 	s = &state->aliases[state->index];
445 	s->pmu = info->pmu;
446 #define COPY_STR(str) s->str = info->str ? strdup(info->str) : NULL
447 	COPY_STR(name);
448 	COPY_STR(alias);
449 	COPY_STR(scale_unit);
450 	COPY_STR(desc);
451 	COPY_STR(long_desc);
452 	COPY_STR(encoding_desc);
453 	COPY_STR(topic);
454 	COPY_STR(pmu_name);
455 	COPY_STR(event_type_desc);
456 #undef COPY_STR
457 	s->deprecated = info->deprecated;
458 	state->index++;
459 	return 0;
460 }
461 
462 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
463 {
464 	struct perf_pmu *pmu;
465 	int printed = 0;
466 	int len;
467 	struct sevent *aliases;
468 	struct events_callback_state state;
469 	bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
470 	struct perf_pmu *(*scan_fn)(struct perf_pmu *);
471 
472 	if (skip_duplicate_pmus)
473 		scan_fn = perf_pmus__scan_skip_duplicates;
474 	else
475 		scan_fn = perf_pmus__scan;
476 
477 	pmu = NULL;
478 	len = 0;
479 	while ((pmu = scan_fn(pmu)) != NULL)
480 		len += perf_pmu__num_events(pmu);
481 
482 	aliases = zalloc(sizeof(struct sevent) * len);
483 	if (!aliases) {
484 		pr_err("FATAL: not enough memory to print PMU events\n");
485 		return;
486 	}
487 	pmu = NULL;
488 	state = (struct events_callback_state) {
489 		.aliases = aliases,
490 		.aliases_len = len,
491 		.index = 0,
492 	};
493 	while ((pmu = scan_fn(pmu)) != NULL) {
494 		perf_pmu__for_each_event(pmu, skip_duplicate_pmus, &state,
495 					 perf_pmus__print_pmu_events__callback);
496 	}
497 	qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
498 	for (int j = 0; j < len; j++) {
499 		/* Skip duplicates */
500 		if (j < len - 1 && pmu_alias_is_duplicate(&aliases[j], &aliases[j + 1]))
501 			goto free;
502 
503 		print_cb->print_event(print_state,
504 				aliases[j].pmu_name,
505 				aliases[j].topic,
506 				aliases[j].name,
507 				aliases[j].alias,
508 				aliases[j].scale_unit,
509 				aliases[j].deprecated,
510 				aliases[j].event_type_desc,
511 				aliases[j].desc,
512 				aliases[j].long_desc,
513 				aliases[j].encoding_desc);
514 free:
515 		zfree(&aliases[j].name);
516 		zfree(&aliases[j].alias);
517 		zfree(&aliases[j].scale_unit);
518 		zfree(&aliases[j].desc);
519 		zfree(&aliases[j].long_desc);
520 		zfree(&aliases[j].encoding_desc);
521 		zfree(&aliases[j].topic);
522 		zfree(&aliases[j].pmu_name);
523 		zfree(&aliases[j].event_type_desc);
524 	}
525 	if (printed && pager_in_use())
526 		printf("\n");
527 
528 	zfree(&aliases);
529 }
530 
531 struct build_format_string_args {
532 	struct strbuf short_string;
533 	struct strbuf long_string;
534 	int num_formats;
535 };
536 
537 static int build_format_string(void *state, const char *name, int config,
538 			       const unsigned long *bits)
539 {
540 	struct build_format_string_args *args = state;
541 	unsigned int num_bits;
542 	int ret1, ret2 = 0;
543 
544 	(void)config;
545 	args->num_formats++;
546 	if (args->num_formats > 1) {
547 		strbuf_addch(&args->long_string, ',');
548 		if (args->num_formats < 4)
549 			strbuf_addch(&args->short_string, ',');
550 	}
551 	num_bits = bits ? bitmap_weight(bits, PERF_PMU_FORMAT_BITS) : 0;
552 	if (num_bits <= 1) {
553 		ret1 = strbuf_addf(&args->long_string, "%s", name);
554 		if (args->num_formats < 4)
555 			ret2 = strbuf_addf(&args->short_string, "%s", name);
556 	} else if (num_bits > 8) {
557 		ret1 = strbuf_addf(&args->long_string, "%s=0..0x%llx", name,
558 				   ULLONG_MAX >> (64 - num_bits));
559 		if (args->num_formats < 4) {
560 			ret2 = strbuf_addf(&args->short_string, "%s=0..0x%llx", name,
561 					   ULLONG_MAX >> (64 - num_bits));
562 		}
563 	} else {
564 		ret1 = strbuf_addf(&args->long_string, "%s=0..%llu", name,
565 				  ULLONG_MAX >> (64 - num_bits));
566 		if (args->num_formats < 4) {
567 			ret2 = strbuf_addf(&args->short_string, "%s=0..%llu", name,
568 					   ULLONG_MAX >> (64 - num_bits));
569 		}
570 	}
571 	return ret1 < 0 ? ret1 : (ret2 < 0 ? ret2 : 0);
572 }
573 
574 void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, void *print_state)
575 {
576 	bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
577 	struct perf_pmu *(*scan_fn)(struct perf_pmu *);
578 	struct perf_pmu *pmu = NULL;
579 
580 	if (skip_duplicate_pmus)
581 		scan_fn = perf_pmus__scan_skip_duplicates;
582 	else
583 		scan_fn = perf_pmus__scan;
584 
585 	while ((pmu = scan_fn(pmu)) != NULL) {
586 		struct build_format_string_args format_args = {
587 			.short_string = STRBUF_INIT,
588 			.long_string = STRBUF_INIT,
589 			.num_formats = 0,
590 		};
591 		int len = pmu_name_len_no_suffix(pmu->name);
592 		const char *desc = "(see 'man perf-list' or 'man perf-record' on how to encode it)";
593 
594 		if (!pmu->is_core)
595 			desc = NULL;
596 
597 		strbuf_addf(&format_args.short_string, "%.*s/", len, pmu->name);
598 		strbuf_addf(&format_args.long_string, "%.*s/", len, pmu->name);
599 		perf_pmu__for_each_format(pmu, &format_args, build_format_string);
600 
601 		if (format_args.num_formats > 3)
602 			strbuf_addf(&format_args.short_string, ",.../modifier");
603 		else
604 			strbuf_addf(&format_args.short_string, "/modifier");
605 
606 		strbuf_addf(&format_args.long_string, "/modifier");
607 		print_cb->print_event(print_state,
608 				/*topic=*/NULL,
609 				/*pmu_name=*/NULL,
610 				format_args.short_string.buf,
611 				/*event_alias=*/NULL,
612 				/*scale_unit=*/NULL,
613 				/*deprecated=*/false,
614 				"Raw event descriptor",
615 				desc,
616 				/*long_desc=*/NULL,
617 				format_args.long_string.buf);
618 
619 		strbuf_release(&format_args.short_string);
620 		strbuf_release(&format_args.long_string);
621 	}
622 }
623 
624 bool perf_pmus__have_event(const char *pname, const char *name)
625 {
626 	struct perf_pmu *pmu = perf_pmus__find(pname);
627 
628 	return pmu && perf_pmu__have_event(pmu, name);
629 }
630 
631 int perf_pmus__num_core_pmus(void)
632 {
633 	static int count;
634 
635 	if (!count) {
636 		struct perf_pmu *pmu = NULL;
637 
638 		while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
639 			count++;
640 	}
641 	return count;
642 }
643 
644 static bool __perf_pmus__supports_extended_type(void)
645 {
646 	struct perf_pmu *pmu = NULL;
647 
648 	if (perf_pmus__num_core_pmus() <= 1)
649 		return false;
650 
651 	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
652 		if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT)))
653 			return false;
654 	}
655 
656 	return true;
657 }
658 
659 static bool perf_pmus__do_support_extended_type;
660 
661 static void perf_pmus__init_supports_extended_type(void)
662 {
663 	perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type();
664 }
665 
666 bool perf_pmus__supports_extended_type(void)
667 {
668 	static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT;
669 
670 	pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type);
671 
672 	return perf_pmus__do_support_extended_type;
673 }
674 
675 char *perf_pmus__default_pmu_name(void)
676 {
677 	int fd;
678 	DIR *dir;
679 	struct dirent *dent;
680 	char *result = NULL;
681 
682 	if (!list_empty(&core_pmus))
683 		return strdup(list_first_entry(&core_pmus, struct perf_pmu, list)->name);
684 
685 	fd = perf_pmu__event_source_devices_fd();
686 	if (fd < 0)
687 		return strdup("cpu");
688 
689 	dir = fdopendir(fd);
690 	if (!dir) {
691 		close(fd);
692 		return strdup("cpu");
693 	}
694 
695 	while ((dent = readdir(dir))) {
696 		if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
697 			continue;
698 		if (is_pmu_core(dent->d_name)) {
699 			result = strdup(dent->d_name);
700 			break;
701 		}
702 	}
703 
704 	closedir(dir);
705 	return result ?: strdup("cpu");
706 }
707 
708 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
709 {
710 	struct perf_pmu *pmu = evsel->pmu;
711 
712 	if (!pmu) {
713 		pmu = perf_pmus__find_by_type(evsel->core.attr.type);
714 		((struct evsel *)evsel)->pmu = pmu;
715 	}
716 	return pmu;
717 }
718 
719 struct perf_pmu *perf_pmus__find_core_pmu(void)
720 {
721 	return perf_pmus__scan_core(NULL);
722 }
723 
724 struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name)
725 {
726 	/*
727 	 * Some PMU functions read from the sysfs mount point, so care is
728 	 * needed, hence passing the eager_load flag to load things like the
729 	 * format files.
730 	 */
731 	return perf_pmu__lookup(&other_pmus, test_sysfs_dirfd, name, /*eager_load=*/true);
732 }
733 
734 struct perf_pmu *perf_pmus__fake_pmu(void)
735 {
736 	static struct perf_pmu fake = {
737 		.name = "fake",
738 		.type = PERF_PMU_TYPE_FAKE,
739 		.format = LIST_HEAD_INIT(fake.format),
740 	};
741 
742 	return &fake;
743 }
744