xref: /linux/tools/perf/util/pmus.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/list.h>
3 #include <linux/list_sort.h>
4 #include <linux/string.h>
5 #include <linux/zalloc.h>
6 #include <subcmd/pager.h>
7 #include <sys/types.h>
8 #include <ctype.h>
9 #include <dirent.h>
10 #include <pthread.h>
11 #include <string.h>
12 #include <unistd.h>
13 #include "cpumap.h"
14 #include "debug.h"
15 #include "evsel.h"
16 #include "pmus.h"
17 #include "pmu.h"
18 #include "print-events.h"
19 #include "strbuf.h"
20 
21 /*
22  * core_pmus:  A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
23  *             directory contains "cpus" file. All PMUs belonging to core_pmus
24  *             must have pmu->is_core=1. If there are more than one PMU in
25  *             this list, perf interprets it as a heterogeneous platform.
26  *             (FWIW, certain ARM platforms having heterogeneous cores uses
27  *             homogeneous PMU, and thus they are treated as homogeneous
28  *             platform by perf because core_pmus will have only one entry)
29  * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't
30  *             matter whether PMU is present per SMT-thread or outside of the
31  *             core in the hw. For e.g., an instance of AMD ibs_fetch// and
32  *             ibs_op// PMUs is present in each hw SMT thread, however they
33  *             are captured under other_pmus. PMUs belonging to other_pmus
34  *             must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1.
35  */
36 static LIST_HEAD(core_pmus);
37 static LIST_HEAD(other_pmus);
38 static bool read_sysfs_core_pmus;
39 static bool read_sysfs_all_pmus;
40 
41 static void pmu_read_sysfs(bool core_only);
42 
pmu_name_len_no_suffix(const char * str)43 size_t pmu_name_len_no_suffix(const char *str)
44 {
45 	int orig_len, len;
46 	bool has_hex_digits = false;
47 
48 	orig_len = len = strlen(str);
49 
50 	/* Count trailing digits. */
51 	while (len > 0 && isxdigit(str[len - 1])) {
52 		if (!isdigit(str[len - 1]))
53 			has_hex_digits = true;
54 		len--;
55 	}
56 
57 	if (len > 0 && len != orig_len && str[len - 1] == '_') {
58 		/*
59 		 * There is a '_{num}' suffix. For decimal suffixes any length
60 		 * will do, for hexadecimal ensure more than 2 hex digits so
61 		 * that S390's cpum_cf PMU doesn't match.
62 		 */
63 		if (!has_hex_digits || (orig_len - len) > 2)
64 			return len - 1;
65 	}
66 	/* Use the full length. */
67 	return orig_len;
68 }
69 
pmu_name_cmp(const char * lhs_pmu_name,const char * rhs_pmu_name)70 int pmu_name_cmp(const char *lhs_pmu_name, const char *rhs_pmu_name)
71 {
72 	unsigned long long lhs_num = 0, rhs_num = 0;
73 	size_t lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name);
74 	size_t rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name);
75 	int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
76 			lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len);
77 
78 	if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0)
79 		return ret;
80 
81 	if (lhs_pmu_name_len + 1 < strlen(lhs_pmu_name))
82 		lhs_num = strtoull(&lhs_pmu_name[lhs_pmu_name_len + 1], NULL, 16);
83 	if (rhs_pmu_name_len + 1 < strlen(rhs_pmu_name))
84 		rhs_num = strtoull(&rhs_pmu_name[rhs_pmu_name_len + 1], NULL, 16);
85 
86 	return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
87 }
88 
perf_pmus__destroy(void)89 void perf_pmus__destroy(void)
90 {
91 	struct perf_pmu *pmu, *tmp;
92 
93 	list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
94 		list_del(&pmu->list);
95 
96 		perf_pmu__delete(pmu);
97 	}
98 	list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
99 		list_del(&pmu->list);
100 
101 		perf_pmu__delete(pmu);
102 	}
103 	read_sysfs_core_pmus = false;
104 	read_sysfs_all_pmus = false;
105 }
106 
pmu_find(const char * name)107 static struct perf_pmu *pmu_find(const char *name)
108 {
109 	struct perf_pmu *pmu;
110 
111 	list_for_each_entry(pmu, &core_pmus, list) {
112 		if (!strcmp(pmu->name, name) ||
113 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
114 			return pmu;
115 	}
116 	list_for_each_entry(pmu, &other_pmus, list) {
117 		if (!strcmp(pmu->name, name) ||
118 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
119 			return pmu;
120 	}
121 
122 	return NULL;
123 }
124 
perf_pmus__find(const char * name)125 struct perf_pmu *perf_pmus__find(const char *name)
126 {
127 	struct perf_pmu *pmu;
128 	int dirfd;
129 	bool core_pmu;
130 
131 	/*
132 	 * Once PMU is loaded it stays in the list,
133 	 * so we keep us from multiple reading/parsing
134 	 * the pmu format definitions.
135 	 */
136 	pmu = pmu_find(name);
137 	if (pmu)
138 		return pmu;
139 
140 	if (read_sysfs_all_pmus)
141 		return NULL;
142 
143 	core_pmu = is_pmu_core(name);
144 	if (core_pmu && read_sysfs_core_pmus)
145 		return NULL;
146 
147 	dirfd = perf_pmu__event_source_devices_fd();
148 	pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
149 			       /*eager_load=*/false);
150 	close(dirfd);
151 
152 	if (!pmu) {
153 		/*
154 		 * Looking up an inidividual PMU failed. This may mean name is
155 		 * an alias, so read the PMUs from sysfs and try to find again.
156 		 */
157 		pmu_read_sysfs(core_pmu);
158 		pmu = pmu_find(name);
159 	}
160 	return pmu;
161 }
162 
perf_pmu__find2(int dirfd,const char * name)163 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
164 {
165 	struct perf_pmu *pmu;
166 	bool core_pmu;
167 
168 	/*
169 	 * Once PMU is loaded it stays in the list,
170 	 * so we keep us from multiple reading/parsing
171 	 * the pmu format definitions.
172 	 */
173 	pmu = pmu_find(name);
174 	if (pmu)
175 		return pmu;
176 
177 	if (read_sysfs_all_pmus)
178 		return NULL;
179 
180 	core_pmu = is_pmu_core(name);
181 	if (core_pmu && read_sysfs_core_pmus)
182 		return NULL;
183 
184 	return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
185 				/*eager_load=*/false);
186 }
187 
pmus_cmp(void * priv __maybe_unused,const struct list_head * lhs,const struct list_head * rhs)188 static int pmus_cmp(void *priv __maybe_unused,
189 		    const struct list_head *lhs, const struct list_head *rhs)
190 {
191 	struct perf_pmu *lhs_pmu = container_of(lhs, struct perf_pmu, list);
192 	struct perf_pmu *rhs_pmu = container_of(rhs, struct perf_pmu, list);
193 
194 	return pmu_name_cmp(lhs_pmu->name ?: "", rhs_pmu->name ?: "");
195 }
196 
197 /* Add all pmus in sysfs to pmu list: */
pmu_read_sysfs(bool core_only)198 static void pmu_read_sysfs(bool core_only)
199 {
200 	int fd;
201 	DIR *dir;
202 	struct dirent *dent;
203 
204 	if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus))
205 		return;
206 
207 	fd = perf_pmu__event_source_devices_fd();
208 	if (fd < 0)
209 		return;
210 
211 	dir = fdopendir(fd);
212 	if (!dir) {
213 		close(fd);
214 		return;
215 	}
216 
217 	while ((dent = readdir(dir))) {
218 		if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
219 			continue;
220 		if (core_only && !is_pmu_core(dent->d_name))
221 			continue;
222 		/* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
223 		perf_pmu__find2(fd, dent->d_name);
224 	}
225 
226 	closedir(dir);
227 	if (list_empty(&core_pmus)) {
228 		if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
229 			pr_err("Failure to set up any core PMUs\n");
230 	}
231 	list_sort(NULL, &core_pmus, pmus_cmp);
232 	list_sort(NULL, &other_pmus, pmus_cmp);
233 	if (!list_empty(&core_pmus)) {
234 		read_sysfs_core_pmus = true;
235 		if (!core_only)
236 			read_sysfs_all_pmus = true;
237 	}
238 }
239 
__perf_pmus__find_by_type(unsigned int type)240 static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type)
241 {
242 	struct perf_pmu *pmu;
243 
244 	list_for_each_entry(pmu, &core_pmus, list) {
245 		if (pmu->type == type)
246 			return pmu;
247 	}
248 
249 	list_for_each_entry(pmu, &other_pmus, list) {
250 		if (pmu->type == type)
251 			return pmu;
252 	}
253 	return NULL;
254 }
255 
perf_pmus__find_by_type(unsigned int type)256 struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
257 {
258 	struct perf_pmu *pmu = __perf_pmus__find_by_type(type);
259 
260 	if (pmu || read_sysfs_all_pmus)
261 		return pmu;
262 
263 	pmu_read_sysfs(/*core_only=*/false);
264 	pmu = __perf_pmus__find_by_type(type);
265 	return pmu;
266 }
267 
268 /*
269  * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
270  * next pmu. Returns NULL on end.
271  */
perf_pmus__scan(struct perf_pmu * pmu)272 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
273 {
274 	bool use_core_pmus = !pmu || pmu->is_core;
275 
276 	if (!pmu) {
277 		pmu_read_sysfs(/*core_only=*/false);
278 		pmu = list_prepare_entry(pmu, &core_pmus, list);
279 	}
280 	if (use_core_pmus) {
281 		list_for_each_entry_continue(pmu, &core_pmus, list)
282 			return pmu;
283 
284 		pmu = NULL;
285 		pmu = list_prepare_entry(pmu, &other_pmus, list);
286 	}
287 	list_for_each_entry_continue(pmu, &other_pmus, list)
288 		return pmu;
289 	return NULL;
290 }
291 
perf_pmus__scan_core(struct perf_pmu * pmu)292 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
293 {
294 	if (!pmu) {
295 		pmu_read_sysfs(/*core_only=*/true);
296 		return list_first_entry_or_null(&core_pmus, typeof(*pmu), list);
297 	}
298 	list_for_each_entry_continue(pmu, &core_pmus, list)
299 		return pmu;
300 
301 	return NULL;
302 }
303 
perf_pmus__scan_skip_duplicates(struct perf_pmu * pmu)304 static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
305 {
306 	bool use_core_pmus = !pmu || pmu->is_core;
307 	int last_pmu_name_len = 0;
308 	const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : "";
309 
310 	if (!pmu) {
311 		pmu_read_sysfs(/*core_only=*/false);
312 		pmu = list_prepare_entry(pmu, &core_pmus, list);
313 	} else
314 		last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
315 
316 	if (use_core_pmus) {
317 		list_for_each_entry_continue(pmu, &core_pmus, list) {
318 			int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
319 
320 			if (last_pmu_name_len == pmu_name_len &&
321 			    !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
322 				continue;
323 
324 			return pmu;
325 		}
326 		pmu = NULL;
327 		pmu = list_prepare_entry(pmu, &other_pmus, list);
328 	}
329 	list_for_each_entry_continue(pmu, &other_pmus, list) {
330 		int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
331 
332 		if (last_pmu_name_len == pmu_name_len &&
333 		    !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
334 			continue;
335 
336 		return pmu;
337 	}
338 	return NULL;
339 }
340 
perf_pmus__pmu_for_pmu_filter(const char * str)341 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
342 {
343 	struct perf_pmu *pmu = NULL;
344 
345 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
346 		if (!strcmp(pmu->name, str))
347 			return pmu;
348 		/* Ignore "uncore_" prefix. */
349 		if (!strncmp(pmu->name, "uncore_", 7)) {
350 			if (!strcmp(pmu->name + 7, str))
351 				return pmu;
352 		}
353 		/* Ignore "cpu_" prefix on Intel hybrid PMUs. */
354 		if (!strncmp(pmu->name, "cpu_", 4)) {
355 			if (!strcmp(pmu->name + 4, str))
356 				return pmu;
357 		}
358 	}
359 	return NULL;
360 }
361 
362 /** Struct for ordering events as output in perf list. */
363 struct sevent {
364 	/** PMU for event. */
365 	const struct perf_pmu *pmu;
366 	const char *name;
367 	const char* alias;
368 	const char *scale_unit;
369 	const char *desc;
370 	const char *long_desc;
371 	const char *encoding_desc;
372 	const char *topic;
373 	const char *pmu_name;
374 	const char *event_type_desc;
375 	bool deprecated;
376 };
377 
cmp_sevent(const void * a,const void * b)378 static int cmp_sevent(const void *a, const void *b)
379 {
380 	const struct sevent *as = a;
381 	const struct sevent *bs = b;
382 	bool a_iscpu, b_iscpu;
383 	int ret;
384 
385 	/* Put extra events last. */
386 	if (!!as->desc != !!bs->desc)
387 		return !!as->desc - !!bs->desc;
388 
389 	/* Order by topics. */
390 	ret = strcmp(as->topic ?: "", bs->topic ?: "");
391 	if (ret)
392 		return ret;
393 
394 	/* Order CPU core events to be first */
395 	a_iscpu = as->pmu ? as->pmu->is_core : true;
396 	b_iscpu = bs->pmu ? bs->pmu->is_core : true;
397 	if (a_iscpu != b_iscpu)
398 		return a_iscpu ? -1 : 1;
399 
400 	/* Order by PMU name. */
401 	if (as->pmu != bs->pmu) {
402 		ret = strcmp(as->pmu_name ?: "", bs->pmu_name ?: "");
403 		if (ret)
404 			return ret;
405 	}
406 
407 	/* Order by event name. */
408 	return strcmp(as->name, bs->name);
409 }
410 
pmu_alias_is_duplicate(struct sevent * a,struct sevent * b)411 static bool pmu_alias_is_duplicate(struct sevent *a, struct sevent *b)
412 {
413 	/* Different names -> never duplicates */
414 	if (strcmp(a->name ?: "//", b->name ?: "//"))
415 		return false;
416 
417 	/* Don't remove duplicates for different PMUs */
418 	return strcmp(a->pmu_name, b->pmu_name) == 0;
419 }
420 
421 struct events_callback_state {
422 	struct sevent *aliases;
423 	size_t aliases_len;
424 	size_t index;
425 };
426 
perf_pmus__print_pmu_events__callback(void * vstate,struct pmu_event_info * info)427 static int perf_pmus__print_pmu_events__callback(void *vstate,
428 						struct pmu_event_info *info)
429 {
430 	struct events_callback_state *state = vstate;
431 	struct sevent *s;
432 
433 	if (state->index >= state->aliases_len) {
434 		pr_err("Unexpected event %s/%s/\n", info->pmu->name, info->name);
435 		return 1;
436 	}
437 	s = &state->aliases[state->index];
438 	s->pmu = info->pmu;
439 #define COPY_STR(str) s->str = info->str ? strdup(info->str) : NULL
440 	COPY_STR(name);
441 	COPY_STR(alias);
442 	COPY_STR(scale_unit);
443 	COPY_STR(desc);
444 	COPY_STR(long_desc);
445 	COPY_STR(encoding_desc);
446 	COPY_STR(topic);
447 	COPY_STR(pmu_name);
448 	COPY_STR(event_type_desc);
449 #undef COPY_STR
450 	s->deprecated = info->deprecated;
451 	state->index++;
452 	return 0;
453 }
454 
perf_pmus__print_pmu_events(const struct print_callbacks * print_cb,void * print_state)455 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
456 {
457 	struct perf_pmu *pmu;
458 	int printed = 0;
459 	int len;
460 	struct sevent *aliases;
461 	struct events_callback_state state;
462 	bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
463 	struct perf_pmu *(*scan_fn)(struct perf_pmu *);
464 
465 	if (skip_duplicate_pmus)
466 		scan_fn = perf_pmus__scan_skip_duplicates;
467 	else
468 		scan_fn = perf_pmus__scan;
469 
470 	pmu = NULL;
471 	len = 0;
472 	while ((pmu = scan_fn(pmu)) != NULL)
473 		len += perf_pmu__num_events(pmu);
474 
475 	aliases = zalloc(sizeof(struct sevent) * len);
476 	if (!aliases) {
477 		pr_err("FATAL: not enough memory to print PMU events\n");
478 		return;
479 	}
480 	pmu = NULL;
481 	state = (struct events_callback_state) {
482 		.aliases = aliases,
483 		.aliases_len = len,
484 		.index = 0,
485 	};
486 	while ((pmu = scan_fn(pmu)) != NULL) {
487 		perf_pmu__for_each_event(pmu, skip_duplicate_pmus, &state,
488 					 perf_pmus__print_pmu_events__callback);
489 	}
490 	qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
491 	for (int j = 0; j < len; j++) {
492 		/* Skip duplicates */
493 		if (j < len - 1 && pmu_alias_is_duplicate(&aliases[j], &aliases[j + 1]))
494 			goto free;
495 
496 		print_cb->print_event(print_state,
497 				aliases[j].pmu_name,
498 				aliases[j].topic,
499 				aliases[j].name,
500 				aliases[j].alias,
501 				aliases[j].scale_unit,
502 				aliases[j].deprecated,
503 				aliases[j].event_type_desc,
504 				aliases[j].desc,
505 				aliases[j].long_desc,
506 				aliases[j].encoding_desc);
507 free:
508 		zfree(&aliases[j].name);
509 		zfree(&aliases[j].alias);
510 		zfree(&aliases[j].scale_unit);
511 		zfree(&aliases[j].desc);
512 		zfree(&aliases[j].long_desc);
513 		zfree(&aliases[j].encoding_desc);
514 		zfree(&aliases[j].topic);
515 		zfree(&aliases[j].pmu_name);
516 		zfree(&aliases[j].event_type_desc);
517 	}
518 	if (printed && pager_in_use())
519 		printf("\n");
520 
521 	zfree(&aliases);
522 }
523 
524 struct build_format_string_args {
525 	struct strbuf short_string;
526 	struct strbuf long_string;
527 	int num_formats;
528 };
529 
build_format_string(void * state,const char * name,int config,const unsigned long * bits)530 static int build_format_string(void *state, const char *name, int config,
531 			       const unsigned long *bits)
532 {
533 	struct build_format_string_args *args = state;
534 	unsigned int num_bits;
535 	int ret1, ret2 = 0;
536 
537 	(void)config;
538 	args->num_formats++;
539 	if (args->num_formats > 1) {
540 		strbuf_addch(&args->long_string, ',');
541 		if (args->num_formats < 4)
542 			strbuf_addch(&args->short_string, ',');
543 	}
544 	num_bits = bits ? bitmap_weight(bits, PERF_PMU_FORMAT_BITS) : 0;
545 	if (num_bits <= 1) {
546 		ret1 = strbuf_addf(&args->long_string, "%s", name);
547 		if (args->num_formats < 4)
548 			ret2 = strbuf_addf(&args->short_string, "%s", name);
549 	} else if (num_bits > 8) {
550 		ret1 = strbuf_addf(&args->long_string, "%s=0..0x%llx", name,
551 				   ULLONG_MAX >> (64 - num_bits));
552 		if (args->num_formats < 4) {
553 			ret2 = strbuf_addf(&args->short_string, "%s=0..0x%llx", name,
554 					   ULLONG_MAX >> (64 - num_bits));
555 		}
556 	} else {
557 		ret1 = strbuf_addf(&args->long_string, "%s=0..%llu", name,
558 				  ULLONG_MAX >> (64 - num_bits));
559 		if (args->num_formats < 4) {
560 			ret2 = strbuf_addf(&args->short_string, "%s=0..%llu", name,
561 					   ULLONG_MAX >> (64 - num_bits));
562 		}
563 	}
564 	return ret1 < 0 ? ret1 : (ret2 < 0 ? ret2 : 0);
565 }
566 
perf_pmus__print_raw_pmu_events(const struct print_callbacks * print_cb,void * print_state)567 void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, void *print_state)
568 {
569 	bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
570 	struct perf_pmu *(*scan_fn)(struct perf_pmu *);
571 	struct perf_pmu *pmu = NULL;
572 
573 	if (skip_duplicate_pmus)
574 		scan_fn = perf_pmus__scan_skip_duplicates;
575 	else
576 		scan_fn = perf_pmus__scan;
577 
578 	while ((pmu = scan_fn(pmu)) != NULL) {
579 		struct build_format_string_args format_args = {
580 			.short_string = STRBUF_INIT,
581 			.long_string = STRBUF_INIT,
582 			.num_formats = 0,
583 		};
584 		int len = pmu_name_len_no_suffix(pmu->name);
585 		const char *desc = "(see 'man perf-list' or 'man perf-record' on how to encode it)";
586 
587 		if (!pmu->is_core)
588 			desc = NULL;
589 
590 		strbuf_addf(&format_args.short_string, "%.*s/", len, pmu->name);
591 		strbuf_addf(&format_args.long_string, "%.*s/", len, pmu->name);
592 		perf_pmu__for_each_format(pmu, &format_args, build_format_string);
593 
594 		if (format_args.num_formats > 3)
595 			strbuf_addf(&format_args.short_string, ",.../modifier");
596 		else
597 			strbuf_addf(&format_args.short_string, "/modifier");
598 
599 		strbuf_addf(&format_args.long_string, "/modifier");
600 		print_cb->print_event(print_state,
601 				/*topic=*/NULL,
602 				/*pmu_name=*/NULL,
603 				format_args.short_string.buf,
604 				/*event_alias=*/NULL,
605 				/*scale_unit=*/NULL,
606 				/*deprecated=*/false,
607 				"Raw event descriptor",
608 				desc,
609 				/*long_desc=*/NULL,
610 				format_args.long_string.buf);
611 
612 		strbuf_release(&format_args.short_string);
613 		strbuf_release(&format_args.long_string);
614 	}
615 }
616 
perf_pmus__have_event(const char * pname,const char * name)617 bool perf_pmus__have_event(const char *pname, const char *name)
618 {
619 	struct perf_pmu *pmu = perf_pmus__find(pname);
620 
621 	return pmu && perf_pmu__have_event(pmu, name);
622 }
623 
perf_pmus__num_core_pmus(void)624 int perf_pmus__num_core_pmus(void)
625 {
626 	static int count;
627 
628 	if (!count) {
629 		struct perf_pmu *pmu = NULL;
630 
631 		while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
632 			count++;
633 	}
634 	return count;
635 }
636 
__perf_pmus__supports_extended_type(void)637 static bool __perf_pmus__supports_extended_type(void)
638 {
639 	struct perf_pmu *pmu = NULL;
640 
641 	if (perf_pmus__num_core_pmus() <= 1)
642 		return false;
643 
644 	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
645 		if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT)))
646 			return false;
647 	}
648 
649 	return true;
650 }
651 
652 static bool perf_pmus__do_support_extended_type;
653 
perf_pmus__init_supports_extended_type(void)654 static void perf_pmus__init_supports_extended_type(void)
655 {
656 	perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type();
657 }
658 
perf_pmus__supports_extended_type(void)659 bool perf_pmus__supports_extended_type(void)
660 {
661 	static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT;
662 
663 	pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type);
664 
665 	return perf_pmus__do_support_extended_type;
666 }
667 
perf_pmus__default_pmu_name(void)668 char *perf_pmus__default_pmu_name(void)
669 {
670 	int fd;
671 	DIR *dir;
672 	struct dirent *dent;
673 	char *result = NULL;
674 
675 	if (!list_empty(&core_pmus))
676 		return strdup(list_first_entry(&core_pmus, struct perf_pmu, list)->name);
677 
678 	fd = perf_pmu__event_source_devices_fd();
679 	if (fd < 0)
680 		return strdup("cpu");
681 
682 	dir = fdopendir(fd);
683 	if (!dir) {
684 		close(fd);
685 		return strdup("cpu");
686 	}
687 
688 	while ((dent = readdir(dir))) {
689 		if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
690 			continue;
691 		if (is_pmu_core(dent->d_name)) {
692 			result = strdup(dent->d_name);
693 			break;
694 		}
695 	}
696 
697 	closedir(dir);
698 	return result ?: strdup("cpu");
699 }
700 
evsel__find_pmu(const struct evsel * evsel)701 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
702 {
703 	struct perf_pmu *pmu = evsel->pmu;
704 
705 	if (!pmu) {
706 		pmu = perf_pmus__find_by_type(evsel->core.attr.type);
707 		((struct evsel *)evsel)->pmu = pmu;
708 	}
709 	return pmu;
710 }
711 
perf_pmus__find_core_pmu(void)712 struct perf_pmu *perf_pmus__find_core_pmu(void)
713 {
714 	return perf_pmus__scan_core(NULL);
715 }
716 
perf_pmus__add_test_pmu(int test_sysfs_dirfd,const char * name)717 struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name)
718 {
719 	/*
720 	 * Some PMU functions read from the sysfs mount point, so care is
721 	 * needed, hence passing the eager_load flag to load things like the
722 	 * format files.
723 	 */
724 	return perf_pmu__lookup(&other_pmus, test_sysfs_dirfd, name, /*eager_load=*/true);
725 }
726 
perf_pmus__fake_pmu(void)727 struct perf_pmu *perf_pmus__fake_pmu(void)
728 {
729 	static struct perf_pmu fake = {
730 		.name = "fake",
731 		.type = PERF_PMU_TYPE_FAKE,
732 		.format = LIST_HEAD_INIT(fake.format),
733 	};
734 
735 	return &fake;
736 }
737