xref: /linux/tools/perf/util/pmus.c (revision f4f346c3465949ebba80c6cc52cd8d2eeaa545fd)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/list.h>
3 #include <linux/list_sort.h>
4 #include <linux/string.h>
5 #include <linux/zalloc.h>
6 #include <api/io_dir.h>
7 #include <subcmd/pager.h>
8 #include <sys/types.h>
9 #include <ctype.h>
10 #include <pthread.h>
11 #include <string.h>
12 #include <unistd.h>
13 #include "cpumap.h"
14 #include "debug.h"
15 #include "drm_pmu.h"
16 #include "evsel.h"
17 #include "pmus.h"
18 #include "pmu.h"
19 #include "hwmon_pmu.h"
20 #include "tool_pmu.h"
21 #include "print-events.h"
22 #include "strbuf.h"
23 #include "string2.h"
24 
25 /*
26  * core_pmus:  A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
27  *             directory contains "cpus" file. All PMUs belonging to core_pmus
28  *             must have pmu->is_core=1. If there are more than one PMU in
29  *             this list, perf interprets it as a heterogeneous platform.
30  *             (FWIW, certain ARM platforms having heterogeneous cores uses
31  *             homogeneous PMU, and thus they are treated as homogeneous
32  *             platform by perf because core_pmus will have only one entry)
33  * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't
34  *             matter whether PMU is present per SMT-thread or outside of the
35  *             core in the hw. For e.g., an instance of AMD ibs_fetch// and
36  *             ibs_op// PMUs is present in each hw SMT thread, however they
37  *             are captured under other_pmus. PMUs belonging to other_pmus
38  *             must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1.
39  */
40 static LIST_HEAD(core_pmus);
41 static LIST_HEAD(other_pmus);
42 enum perf_tool_pmu_type {
43 	PERF_TOOL_PMU_TYPE_PE_CORE,
44 	PERF_TOOL_PMU_TYPE_PE_OTHER,
45 	PERF_TOOL_PMU_TYPE_TOOL,
46 	PERF_TOOL_PMU_TYPE_HWMON,
47 	PERF_TOOL_PMU_TYPE_DRM,
48 
49 #define PERF_TOOL_PMU_TYPE_PE_CORE_MASK (1 << PERF_TOOL_PMU_TYPE_PE_CORE)
50 #define PERF_TOOL_PMU_TYPE_PE_OTHER_MASK (1 << PERF_TOOL_PMU_TYPE_PE_OTHER)
51 #define PERF_TOOL_PMU_TYPE_TOOL_MASK (1 << PERF_TOOL_PMU_TYPE_TOOL)
52 #define PERF_TOOL_PMU_TYPE_HWMON_MASK (1 << PERF_TOOL_PMU_TYPE_HWMON)
53 #define PERF_TOOL_PMU_TYPE_DRM_MASK (1 << PERF_TOOL_PMU_TYPE_DRM)
54 
55 #define PERF_TOOL_PMU_TYPE_ALL_MASK (PERF_TOOL_PMU_TYPE_PE_CORE_MASK |	\
56 					PERF_TOOL_PMU_TYPE_PE_OTHER_MASK | \
57 					PERF_TOOL_PMU_TYPE_TOOL_MASK |	\
58 					PERF_TOOL_PMU_TYPE_HWMON_MASK | \
59 					PERF_TOOL_PMU_TYPE_DRM_MASK)
60 };
61 static unsigned int read_pmu_types;
62 
63 static void pmu_read_sysfs(unsigned int to_read_pmus);
64 
pmu_name_len_no_suffix(const char * str)65 size_t pmu_name_len_no_suffix(const char *str)
66 {
67 	int orig_len, len;
68 	bool has_hex_digits = false;
69 
70 	orig_len = len = strlen(str);
71 
72 	/* Count trailing digits. */
73 	while (len > 0 && isxdigit(str[len - 1])) {
74 		if (!isdigit(str[len - 1]))
75 			has_hex_digits = true;
76 		len--;
77 	}
78 
79 	if (len > 0 && len != orig_len && str[len - 1] == '_') {
80 		/*
81 		 * There is a '_{num}' suffix. For decimal suffixes any length
82 		 * will do, for hexadecimal ensure more than 2 hex digits so
83 		 * that S390's cpum_cf PMU doesn't match.
84 		 */
85 		if (!has_hex_digits || (orig_len - len) > 2)
86 			return len - 1;
87 	}
88 	/* Use the full length. */
89 	return orig_len;
90 }
91 
pmu_name_cmp(const char * lhs_pmu_name,const char * rhs_pmu_name)92 int pmu_name_cmp(const char *lhs_pmu_name, const char *rhs_pmu_name)
93 {
94 	unsigned long long lhs_num = 0, rhs_num = 0;
95 	size_t lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name);
96 	size_t rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name);
97 	int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
98 			lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len);
99 
100 	if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0)
101 		return ret;
102 
103 	if (lhs_pmu_name_len + 1 < strlen(lhs_pmu_name))
104 		lhs_num = strtoull(&lhs_pmu_name[lhs_pmu_name_len + 1], NULL, 16);
105 	if (rhs_pmu_name_len + 1 < strlen(rhs_pmu_name))
106 		rhs_num = strtoull(&rhs_pmu_name[rhs_pmu_name_len + 1], NULL, 16);
107 
108 	return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
109 }
110 
perf_pmus__destroy(void)111 void perf_pmus__destroy(void)
112 {
113 	struct perf_pmu *pmu, *tmp;
114 
115 	list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
116 		list_del(&pmu->list);
117 
118 		perf_pmu__delete(pmu);
119 	}
120 	list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
121 		list_del(&pmu->list);
122 
123 		perf_pmu__delete(pmu);
124 	}
125 	read_pmu_types = 0;
126 }
127 
pmu_find(const char * name)128 static struct perf_pmu *pmu_find(const char *name)
129 {
130 	struct perf_pmu *pmu;
131 
132 	list_for_each_entry(pmu, &core_pmus, list) {
133 		if (!strcmp(pmu->name, name) ||
134 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
135 			return pmu;
136 	}
137 	list_for_each_entry(pmu, &other_pmus, list) {
138 		if (!strcmp(pmu->name, name) ||
139 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
140 			return pmu;
141 	}
142 
143 	return NULL;
144 }
145 
perf_pmus__find(const char * name)146 struct perf_pmu *perf_pmus__find(const char *name)
147 {
148 	struct perf_pmu *pmu;
149 	int dirfd;
150 	bool core_pmu;
151 	unsigned int to_read_pmus = 0;
152 
153 	/*
154 	 * Once PMU is loaded it stays in the list,
155 	 * so we keep us from multiple reading/parsing
156 	 * the pmu format definitions.
157 	 */
158 	pmu = pmu_find(name);
159 	if (pmu)
160 		return pmu;
161 
162 	if (read_pmu_types == PERF_TOOL_PMU_TYPE_ALL_MASK)
163 		return NULL;
164 
165 	core_pmu = is_pmu_core(name);
166 	if (core_pmu && (read_pmu_types & PERF_TOOL_PMU_TYPE_PE_CORE_MASK))
167 		return NULL;
168 
169 	dirfd = perf_pmu__event_source_devices_fd();
170 	pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
171 			       /*eager_load=*/false);
172 	close(dirfd);
173 
174 	if (pmu)
175 		return pmu;
176 
177 	/* Looking up an individual perf event PMU failed, check if a tool PMU should be read. */
178 	if (!strncmp(name, "hwmon_", 6))
179 		to_read_pmus |= PERF_TOOL_PMU_TYPE_HWMON_MASK;
180 	else if (!strncmp(name, "drm_", 4))
181 		to_read_pmus |= PERF_TOOL_PMU_TYPE_DRM_MASK;
182 	else if (!strcmp(name, "tool"))
183 		to_read_pmus |= PERF_TOOL_PMU_TYPE_TOOL_MASK;
184 
185 	if (to_read_pmus) {
186 		pmu_read_sysfs(to_read_pmus);
187 		pmu = pmu_find(name);
188 		if (pmu)
189 			return pmu;
190 	}
191 	/* Read all necessary PMUs from sysfs and see if the PMU is found. */
192 	to_read_pmus = PERF_TOOL_PMU_TYPE_PE_CORE_MASK;
193 	if (!core_pmu)
194 		to_read_pmus |= PERF_TOOL_PMU_TYPE_PE_OTHER_MASK;
195 	pmu_read_sysfs(to_read_pmus);
196 	return pmu_find(name);
197 }
198 
perf_pmu__find2(int dirfd,const char * name)199 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
200 {
201 	struct perf_pmu *pmu;
202 	bool core_pmu;
203 
204 	/*
205 	 * Once PMU is loaded it stays in the list,
206 	 * so we keep us from multiple reading/parsing
207 	 * the pmu format definitions.
208 	 */
209 	pmu = pmu_find(name);
210 	if (pmu)
211 		return pmu;
212 
213 	if (read_pmu_types == PERF_TOOL_PMU_TYPE_ALL_MASK)
214 		return NULL;
215 
216 	core_pmu = is_pmu_core(name);
217 	if (core_pmu && (read_pmu_types & PERF_TOOL_PMU_TYPE_PE_CORE_MASK))
218 		return NULL;
219 
220 	return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
221 				/*eager_load=*/false);
222 }
223 
pmus_cmp(void * priv __maybe_unused,const struct list_head * lhs,const struct list_head * rhs)224 static int pmus_cmp(void *priv __maybe_unused,
225 		    const struct list_head *lhs, const struct list_head *rhs)
226 {
227 	struct perf_pmu *lhs_pmu = container_of(lhs, struct perf_pmu, list);
228 	struct perf_pmu *rhs_pmu = container_of(rhs, struct perf_pmu, list);
229 
230 	return pmu_name_cmp(lhs_pmu->name ?: "", rhs_pmu->name ?: "");
231 }
232 
233 /* Add all pmus in sysfs to pmu list: */
pmu_read_sysfs(unsigned int to_read_types)234 static void pmu_read_sysfs(unsigned int to_read_types)
235 {
236 	struct perf_pmu *tool_pmu;
237 
238 	if ((read_pmu_types & to_read_types) == to_read_types) {
239 		/* All requested PMU types have been read. */
240 		return;
241 	}
242 
243 	if (to_read_types & (PERF_TOOL_PMU_TYPE_PE_CORE_MASK | PERF_TOOL_PMU_TYPE_PE_OTHER_MASK)) {
244 		int fd = perf_pmu__event_source_devices_fd();
245 		struct io_dir dir;
246 		struct io_dirent64 *dent;
247 		bool core_only = (to_read_types & PERF_TOOL_PMU_TYPE_PE_OTHER_MASK) == 0;
248 
249 		if (fd < 0)
250 			goto skip_pe_pmus;
251 
252 		io_dir__init(&dir, fd);
253 
254 		while ((dent = io_dir__readdir(&dir)) != NULL) {
255 			if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
256 				continue;
257 			if (core_only && !is_pmu_core(dent->d_name))
258 				continue;
259 			/* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
260 			perf_pmu__find2(fd, dent->d_name);
261 		}
262 
263 		close(fd);
264 	}
265 skip_pe_pmus:
266 	if ((to_read_types & PERF_TOOL_PMU_TYPE_PE_CORE_MASK) && list_empty(&core_pmus)) {
267 		if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
268 			pr_err("Failure to set up any core PMUs\n");
269 	}
270 	list_sort(NULL, &core_pmus, pmus_cmp);
271 
272 	if ((to_read_types & PERF_TOOL_PMU_TYPE_TOOL_MASK) != 0 &&
273 	    (read_pmu_types & PERF_TOOL_PMU_TYPE_TOOL_MASK) == 0) {
274 		tool_pmu = tool_pmu__new();
275 		if (tool_pmu)
276 			list_add_tail(&tool_pmu->list, &other_pmus);
277 	}
278 	if ((to_read_types & PERF_TOOL_PMU_TYPE_HWMON_MASK) != 0 &&
279 	    (read_pmu_types & PERF_TOOL_PMU_TYPE_HWMON_MASK) == 0)
280 		perf_pmus__read_hwmon_pmus(&other_pmus);
281 
282 	if ((to_read_types & PERF_TOOL_PMU_TYPE_DRM_MASK) != 0 &&
283 	    (read_pmu_types & PERF_TOOL_PMU_TYPE_DRM_MASK) == 0)
284 		perf_pmus__read_drm_pmus(&other_pmus);
285 
286 	list_sort(NULL, &other_pmus, pmus_cmp);
287 
288 	read_pmu_types |= to_read_types;
289 }
290 
__perf_pmus__find_by_type(unsigned int type)291 static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type)
292 {
293 	struct perf_pmu *pmu;
294 
295 	list_for_each_entry(pmu, &core_pmus, list) {
296 		if (pmu->type == type)
297 			return pmu;
298 	}
299 
300 	list_for_each_entry(pmu, &other_pmus, list) {
301 		if (pmu->type == type)
302 			return pmu;
303 	}
304 	return NULL;
305 }
306 
perf_pmus__find_by_type(unsigned int type)307 struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
308 {
309 	unsigned int to_read_pmus;
310 	struct perf_pmu *pmu = __perf_pmus__find_by_type(type);
311 
312 	if (pmu || (read_pmu_types == PERF_TOOL_PMU_TYPE_ALL_MASK))
313 		return pmu;
314 
315 	if (type >= PERF_PMU_TYPE_PE_START && type <= PERF_PMU_TYPE_PE_END) {
316 		to_read_pmus = PERF_TOOL_PMU_TYPE_PE_CORE_MASK |
317 			PERF_TOOL_PMU_TYPE_PE_OTHER_MASK;
318 	} else if (type >= PERF_PMU_TYPE_DRM_START && type <= PERF_PMU_TYPE_DRM_END) {
319 		to_read_pmus = PERF_TOOL_PMU_TYPE_DRM_MASK;
320 	} else if (type >= PERF_PMU_TYPE_HWMON_START && type <= PERF_PMU_TYPE_HWMON_END) {
321 		to_read_pmus = PERF_TOOL_PMU_TYPE_HWMON_MASK;
322 	} else {
323 		to_read_pmus = PERF_TOOL_PMU_TYPE_TOOL_MASK;
324 	}
325 	pmu_read_sysfs(to_read_pmus);
326 	pmu = __perf_pmus__find_by_type(type);
327 	return pmu;
328 }
329 
330 /*
331  * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
332  * next pmu. Returns NULL on end.
333  */
perf_pmus__scan(struct perf_pmu * pmu)334 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
335 {
336 	bool use_core_pmus = !pmu || pmu->is_core;
337 
338 	if (!pmu) {
339 		pmu_read_sysfs(PERF_TOOL_PMU_TYPE_ALL_MASK);
340 		pmu = list_prepare_entry(pmu, &core_pmus, list);
341 	}
342 	if (use_core_pmus) {
343 		list_for_each_entry_continue(pmu, &core_pmus, list)
344 			return pmu;
345 
346 		pmu = NULL;
347 		pmu = list_prepare_entry(pmu, &other_pmus, list);
348 	}
349 	list_for_each_entry_continue(pmu, &other_pmus, list)
350 		return pmu;
351 	return NULL;
352 }
353 
perf_pmus__scan_core(struct perf_pmu * pmu)354 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
355 {
356 	if (!pmu) {
357 		pmu_read_sysfs(PERF_TOOL_PMU_TYPE_PE_CORE_MASK);
358 		return list_first_entry_or_null(&core_pmus, typeof(*pmu), list);
359 	}
360 	list_for_each_entry_continue(pmu, &core_pmus, list)
361 		return pmu;
362 
363 	return NULL;
364 }
365 
perf_pmus__scan_for_event(struct perf_pmu * pmu,const char * event)366 struct perf_pmu *perf_pmus__scan_for_event(struct perf_pmu *pmu, const char *event)
367 {
368 	bool use_core_pmus = !pmu || pmu->is_core;
369 
370 	if (!pmu) {
371 		/* Hwmon filename values that aren't used. */
372 		enum hwmon_type type;
373 		int number;
374 		/*
375 		 * Core PMUs, other sysfs PMUs and tool PMU can take all event
376 		 * types or aren't wother optimizing for.
377 		 */
378 		unsigned int to_read_pmus =  PERF_TOOL_PMU_TYPE_PE_CORE_MASK |
379 			PERF_TOOL_PMU_TYPE_PE_OTHER_MASK |
380 			PERF_TOOL_PMU_TYPE_TOOL_MASK;
381 
382 		/* Could the event be a hwmon event? */
383 		if (parse_hwmon_filename(event, &type, &number, /*item=*/NULL, /*alarm=*/NULL))
384 			to_read_pmus |= PERF_TOOL_PMU_TYPE_HWMON_MASK;
385 
386 		/* Could the event be a DRM event? */
387 		if (strlen(event) > 4 && strncmp("drm-", event, 4) == 0)
388 			to_read_pmus |= PERF_TOOL_PMU_TYPE_DRM_MASK;
389 
390 		pmu_read_sysfs(to_read_pmus);
391 		pmu = list_prepare_entry(pmu, &core_pmus, list);
392 	}
393 	if (use_core_pmus) {
394 		list_for_each_entry_continue(pmu, &core_pmus, list)
395 			return pmu;
396 
397 		pmu = NULL;
398 		pmu = list_prepare_entry(pmu, &other_pmus, list);
399 	}
400 	list_for_each_entry_continue(pmu, &other_pmus, list)
401 		return pmu;
402 	return NULL;
403 }
404 
perf_pmus__scan_matching_wildcard(struct perf_pmu * pmu,const char * wildcard)405 struct perf_pmu *perf_pmus__scan_matching_wildcard(struct perf_pmu *pmu, const char *wildcard)
406 {
407 	bool use_core_pmus = !pmu || pmu->is_core;
408 
409 	if (!pmu) {
410 		/*
411 		 * Core PMUs, other sysfs PMUs and tool PMU can have any name or
412 		 * aren't wother optimizing for.
413 		 */
414 		unsigned int to_read_pmus =  PERF_TOOL_PMU_TYPE_PE_CORE_MASK |
415 			PERF_TOOL_PMU_TYPE_PE_OTHER_MASK |
416 			PERF_TOOL_PMU_TYPE_TOOL_MASK;
417 
418 		/*
419 		 * Hwmon PMUs have an alias from a sysfs name like hwmon0,
420 		 * hwmon1, etc. or have a name of hwmon_<name>. They therefore
421 		 * can only have a wildcard match if the wildcard begins with
422 		 * "hwmon". Similarly drm PMUs must start "drm_", avoid reading
423 		 * such events unless the PMU could match.
424 		 */
425 		if (strisglob(wildcard)) {
426 			to_read_pmus |= PERF_TOOL_PMU_TYPE_HWMON_MASK |
427 				PERF_TOOL_PMU_TYPE_DRM_MASK;
428 		} else if (strlen(wildcard) >= 4 && strncmp("drm_", wildcard, 4) == 0) {
429 			to_read_pmus |= PERF_TOOL_PMU_TYPE_DRM_MASK;
430 		} else if (strlen(wildcard) >= 5 && strncmp("hwmon", wildcard, 5) == 0) {
431 			to_read_pmus |= PERF_TOOL_PMU_TYPE_HWMON_MASK;
432 		}
433 
434 		pmu_read_sysfs(to_read_pmus);
435 		pmu = list_prepare_entry(pmu, &core_pmus, list);
436 	}
437 	if (use_core_pmus) {
438 		list_for_each_entry_continue(pmu, &core_pmus, list) {
439 			if (perf_pmu__wildcard_match(pmu, wildcard))
440 				return pmu;
441 		}
442 		pmu = NULL;
443 		pmu = list_prepare_entry(pmu, &other_pmus, list);
444 	}
445 	list_for_each_entry_continue(pmu, &other_pmus, list) {
446 		if (perf_pmu__wildcard_match(pmu, wildcard))
447 			return pmu;
448 	}
449 	return NULL;
450 }
451 
perf_pmus__scan_skip_duplicates(struct perf_pmu * pmu)452 static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
453 {
454 	bool use_core_pmus = !pmu || pmu->is_core;
455 	int last_pmu_name_len = 0;
456 	const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : "";
457 
458 	if (!pmu) {
459 		pmu_read_sysfs(PERF_TOOL_PMU_TYPE_ALL_MASK);
460 		pmu = list_prepare_entry(pmu, &core_pmus, list);
461 	} else
462 		last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
463 
464 	if (use_core_pmus) {
465 		list_for_each_entry_continue(pmu, &core_pmus, list) {
466 			int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
467 
468 			if (last_pmu_name_len == pmu_name_len &&
469 			    !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
470 				continue;
471 
472 			return pmu;
473 		}
474 		pmu = NULL;
475 		pmu = list_prepare_entry(pmu, &other_pmus, list);
476 	}
477 	list_for_each_entry_continue(pmu, &other_pmus, list) {
478 		int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
479 
480 		if (last_pmu_name_len == pmu_name_len &&
481 		    !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
482 			continue;
483 
484 		return pmu;
485 	}
486 	return NULL;
487 }
488 
perf_pmus__pmu_for_pmu_filter(const char * str)489 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
490 {
491 	struct perf_pmu *pmu = NULL;
492 
493 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
494 		if (!strcmp(pmu->name, str))
495 			return pmu;
496 		/* Ignore "uncore_" prefix. */
497 		if (!strncmp(pmu->name, "uncore_", 7)) {
498 			if (!strcmp(pmu->name + 7, str))
499 				return pmu;
500 		}
501 		/* Ignore "cpu_" prefix on Intel hybrid PMUs. */
502 		if (!strncmp(pmu->name, "cpu_", 4)) {
503 			if (!strcmp(pmu->name + 4, str))
504 				return pmu;
505 		}
506 	}
507 	return NULL;
508 }
509 
510 /** Struct for ordering events as output in perf list. */
511 struct sevent {
512 	/** PMU for event. */
513 	const struct perf_pmu *pmu;
514 	const char *name;
515 	const char* alias;
516 	const char *scale_unit;
517 	const char *desc;
518 	const char *long_desc;
519 	const char *encoding_desc;
520 	const char *topic;
521 	const char *pmu_name;
522 	const char *event_type_desc;
523 	bool deprecated;
524 };
525 
cmp_sevent(const void * a,const void * b)526 static int cmp_sevent(const void *a, const void *b)
527 {
528 	const struct sevent *as = a;
529 	const struct sevent *bs = b;
530 	bool a_iscpu, b_iscpu;
531 	int ret;
532 
533 	/* Put extra events last. */
534 	if (!!as->desc != !!bs->desc)
535 		return !!as->desc - !!bs->desc;
536 
537 	/* Order by topics. */
538 	ret = strcmp(as->topic ?: "", bs->topic ?: "");
539 	if (ret)
540 		return ret;
541 
542 	/* Order CPU core events to be first */
543 	a_iscpu = as->pmu ? as->pmu->is_core : true;
544 	b_iscpu = bs->pmu ? bs->pmu->is_core : true;
545 	if (a_iscpu != b_iscpu)
546 		return a_iscpu ? -1 : 1;
547 
548 	/* Order by PMU name. */
549 	if (as->pmu != bs->pmu) {
550 		ret = strcmp(as->pmu_name ?: "", bs->pmu_name ?: "");
551 		if (ret)
552 			return ret;
553 	}
554 
555 	/* Order by event name. */
556 	return strcmp(as->name, bs->name);
557 }
558 
pmu_alias_is_duplicate(struct sevent * a,struct sevent * b)559 static bool pmu_alias_is_duplicate(struct sevent *a, struct sevent *b)
560 {
561 	/* Different names -> never duplicates */
562 	if (strcmp(a->name ?: "//", b->name ?: "//"))
563 		return false;
564 
565 	/* Don't remove duplicates for different PMUs */
566 	return strcmp(a->pmu_name, b->pmu_name) == 0;
567 }
568 
569 struct events_callback_state {
570 	struct sevent *aliases;
571 	size_t aliases_len;
572 	size_t index;
573 };
574 
perf_pmus__print_pmu_events__callback(void * vstate,struct pmu_event_info * info)575 static int perf_pmus__print_pmu_events__callback(void *vstate,
576 						struct pmu_event_info *info)
577 {
578 	struct events_callback_state *state = vstate;
579 	struct sevent *s;
580 
581 	if (state->index >= state->aliases_len) {
582 		pr_err("Unexpected event %s/%s/\n", info->pmu->name, info->name);
583 		return 1;
584 	}
585 	assert(info->pmu != NULL || info->name != NULL);
586 	s = &state->aliases[state->index];
587 	s->pmu = info->pmu;
588 #define COPY_STR(str) s->str = info->str ? strdup(info->str) : NULL
589 	COPY_STR(name);
590 	COPY_STR(alias);
591 	COPY_STR(scale_unit);
592 	COPY_STR(desc);
593 	COPY_STR(long_desc);
594 	COPY_STR(encoding_desc);
595 	COPY_STR(topic);
596 	COPY_STR(pmu_name);
597 	COPY_STR(event_type_desc);
598 #undef COPY_STR
599 	s->deprecated = info->deprecated;
600 	state->index++;
601 	return 0;
602 }
603 
perf_pmus__print_pmu_events(const struct print_callbacks * print_cb,void * print_state)604 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
605 {
606 	struct perf_pmu *pmu;
607 	int printed = 0;
608 	int len;
609 	struct sevent *aliases;
610 	struct events_callback_state state;
611 	bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
612 	struct perf_pmu *(*scan_fn)(struct perf_pmu *);
613 
614 	if (skip_duplicate_pmus)
615 		scan_fn = perf_pmus__scan_skip_duplicates;
616 	else
617 		scan_fn = perf_pmus__scan;
618 
619 	pmu = NULL;
620 	len = 0;
621 	while ((pmu = scan_fn(pmu)) != NULL)
622 		len += perf_pmu__num_events(pmu);
623 
624 	aliases = zalloc(sizeof(struct sevent) * len);
625 	if (!aliases) {
626 		pr_err("FATAL: not enough memory to print PMU events\n");
627 		return;
628 	}
629 	pmu = NULL;
630 	state = (struct events_callback_state) {
631 		.aliases = aliases,
632 		.aliases_len = len,
633 		.index = 0,
634 	};
635 	while ((pmu = scan_fn(pmu)) != NULL) {
636 		perf_pmu__for_each_event(pmu, skip_duplicate_pmus, &state,
637 					 perf_pmus__print_pmu_events__callback);
638 	}
639 	qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
640 	for (int j = 0; j < len; j++) {
641 		/* Skip duplicates */
642 		if (j < len - 1 && pmu_alias_is_duplicate(&aliases[j], &aliases[j + 1]))
643 			goto free;
644 
645 		print_cb->print_event(print_state,
646 				aliases[j].topic,
647 				aliases[j].pmu_name,
648 				aliases[j].pmu->type,
649 				aliases[j].name,
650 				aliases[j].alias,
651 				aliases[j].scale_unit,
652 				aliases[j].deprecated,
653 				aliases[j].event_type_desc,
654 				aliases[j].desc,
655 				aliases[j].long_desc,
656 				aliases[j].encoding_desc);
657 free:
658 		zfree(&aliases[j].name);
659 		zfree(&aliases[j].alias);
660 		zfree(&aliases[j].scale_unit);
661 		zfree(&aliases[j].desc);
662 		zfree(&aliases[j].long_desc);
663 		zfree(&aliases[j].encoding_desc);
664 		zfree(&aliases[j].topic);
665 		zfree(&aliases[j].pmu_name);
666 		zfree(&aliases[j].event_type_desc);
667 	}
668 	if (printed && pager_in_use())
669 		printf("\n");
670 
671 	zfree(&aliases);
672 }
673 
674 struct build_format_string_args {
675 	struct strbuf short_string;
676 	struct strbuf long_string;
677 	int num_formats;
678 };
679 
build_format_string(void * state,const char * name,int config,const unsigned long * bits)680 static int build_format_string(void *state, const char *name, int config,
681 			       const unsigned long *bits)
682 {
683 	struct build_format_string_args *args = state;
684 	unsigned int num_bits;
685 	int ret1, ret2 = 0;
686 
687 	(void)config;
688 	args->num_formats++;
689 	if (args->num_formats > 1) {
690 		strbuf_addch(&args->long_string, ',');
691 		if (args->num_formats < 4)
692 			strbuf_addch(&args->short_string, ',');
693 	}
694 	num_bits = bits ? bitmap_weight(bits, PERF_PMU_FORMAT_BITS) : 0;
695 	if (num_bits <= 1) {
696 		ret1 = strbuf_addf(&args->long_string, "%s", name);
697 		if (args->num_formats < 4)
698 			ret2 = strbuf_addf(&args->short_string, "%s", name);
699 	} else if (num_bits > 8) {
700 		ret1 = strbuf_addf(&args->long_string, "%s=0..0x%llx", name,
701 				   ULLONG_MAX >> (64 - num_bits));
702 		if (args->num_formats < 4) {
703 			ret2 = strbuf_addf(&args->short_string, "%s=0..0x%llx", name,
704 					   ULLONG_MAX >> (64 - num_bits));
705 		}
706 	} else {
707 		ret1 = strbuf_addf(&args->long_string, "%s=0..%llu", name,
708 				  ULLONG_MAX >> (64 - num_bits));
709 		if (args->num_formats < 4) {
710 			ret2 = strbuf_addf(&args->short_string, "%s=0..%llu", name,
711 					   ULLONG_MAX >> (64 - num_bits));
712 		}
713 	}
714 	return ret1 < 0 ? ret1 : (ret2 < 0 ? ret2 : 0);
715 }
716 
perf_pmus__print_raw_pmu_events(const struct print_callbacks * print_cb,void * print_state)717 void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, void *print_state)
718 {
719 	bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
720 	struct perf_pmu *(*scan_fn)(struct perf_pmu *);
721 	struct perf_pmu *pmu = NULL;
722 
723 	if (skip_duplicate_pmus)
724 		scan_fn = perf_pmus__scan_skip_duplicates;
725 	else
726 		scan_fn = perf_pmus__scan;
727 
728 	while ((pmu = scan_fn(pmu)) != NULL) {
729 		struct build_format_string_args format_args = {
730 			.short_string = STRBUF_INIT,
731 			.long_string = STRBUF_INIT,
732 			.num_formats = 0,
733 		};
734 		int len = pmu_name_len_no_suffix(pmu->name);
735 		const char *desc = "(see 'man perf-list' or 'man perf-record' on how to encode it)";
736 
737 		if (!pmu->is_core)
738 			desc = NULL;
739 
740 		strbuf_addf(&format_args.short_string, "%.*s/", len, pmu->name);
741 		strbuf_addf(&format_args.long_string, "%.*s/", len, pmu->name);
742 		perf_pmu__for_each_format(pmu, &format_args, build_format_string);
743 
744 		if (format_args.num_formats > 3)
745 			strbuf_addf(&format_args.short_string, ",.../modifier");
746 		else
747 			strbuf_addf(&format_args.short_string, "/modifier");
748 
749 		strbuf_addf(&format_args.long_string, "/modifier");
750 		print_cb->print_event(print_state,
751 				/*topic=*/NULL,
752 				/*pmu_name=*/NULL,
753 				pmu->type,
754 				format_args.short_string.buf,
755 				/*event_alias=*/NULL,
756 				/*scale_unit=*/NULL,
757 				/*deprecated=*/false,
758 				"Raw event descriptor",
759 				desc,
760 				/*long_desc=*/NULL,
761 				format_args.long_string.buf);
762 
763 		strbuf_release(&format_args.short_string);
764 		strbuf_release(&format_args.long_string);
765 	}
766 }
767 
perf_pmus__have_event(const char * pname,const char * name)768 bool perf_pmus__have_event(const char *pname, const char *name)
769 {
770 	struct perf_pmu *pmu = perf_pmus__find(pname);
771 
772 	return pmu && perf_pmu__have_event(pmu, name);
773 }
774 
perf_pmus__num_core_pmus(void)775 int perf_pmus__num_core_pmus(void)
776 {
777 	static int count;
778 
779 	if (!count) {
780 		struct perf_pmu *pmu = NULL;
781 
782 		while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
783 			count++;
784 	}
785 	return count;
786 }
787 
__perf_pmus__supports_extended_type(void)788 static bool __perf_pmus__supports_extended_type(void)
789 {
790 	struct perf_pmu *pmu = NULL;
791 
792 	if (perf_pmus__num_core_pmus() <= 1)
793 		return false;
794 
795 	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
796 		if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT)))
797 			return false;
798 	}
799 
800 	return true;
801 }
802 
803 static bool perf_pmus__do_support_extended_type;
804 
perf_pmus__init_supports_extended_type(void)805 static void perf_pmus__init_supports_extended_type(void)
806 {
807 	perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type();
808 }
809 
perf_pmus__supports_extended_type(void)810 bool perf_pmus__supports_extended_type(void)
811 {
812 	static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT;
813 
814 	pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type);
815 
816 	return perf_pmus__do_support_extended_type;
817 }
818 
perf_pmus__find_by_attr(const struct perf_event_attr * attr)819 struct perf_pmu *perf_pmus__find_by_attr(const struct perf_event_attr *attr)
820 {
821 	struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
822 	u32 type = attr->type;
823 	bool legacy_core_type = type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE;
824 
825 	if (!pmu && legacy_core_type && perf_pmus__supports_extended_type()) {
826 		type = attr->config >> PERF_PMU_TYPE_SHIFT;
827 
828 		pmu = perf_pmus__find_by_type(type);
829 	}
830 	if (!pmu && (legacy_core_type || type == PERF_TYPE_RAW)) {
831 		/*
832 		 * For legacy events, if there was no extended type info then
833 		 * assume the PMU is the first core PMU.
834 		 *
835 		 * On architectures like ARM there is no sysfs PMU with type
836 		 * PERF_TYPE_RAW, assume the RAW events are going to be handled
837 		 * by the first core PMU.
838 		 */
839 		pmu = perf_pmus__find_core_pmu();
840 	}
841 	return pmu;
842 }
843 
evsel__find_pmu(const struct evsel * evsel)844 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
845 {
846 	struct perf_pmu *pmu = evsel->pmu;
847 
848 	if (pmu)
849 		return pmu;
850 
851 	pmu = perf_pmus__find_by_attr(&evsel->core.attr);
852 	((struct evsel *)evsel)->pmu = pmu;
853 	return pmu;
854 }
855 
perf_pmus__find_core_pmu(void)856 struct perf_pmu *perf_pmus__find_core_pmu(void)
857 {
858 	return perf_pmus__scan_core(NULL);
859 }
860 
perf_pmus__add_test_pmu(int test_sysfs_dirfd,const char * name)861 struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name)
862 {
863 	/*
864 	 * Some PMU functions read from the sysfs mount point, so care is
865 	 * needed, hence passing the eager_load flag to load things like the
866 	 * format files.
867 	 */
868 	return perf_pmu__lookup(&other_pmus, test_sysfs_dirfd, name, /*eager_load=*/true);
869 }
870 
perf_pmus__add_test_hwmon_pmu(const char * hwmon_dir,const char * sysfs_name,const char * name)871 struct perf_pmu *perf_pmus__add_test_hwmon_pmu(const char *hwmon_dir,
872 					       const char *sysfs_name,
873 					       const char *name)
874 {
875 	return hwmon_pmu__new(&other_pmus, hwmon_dir, sysfs_name, name);
876 }
877 
perf_pmus__fake_pmu(void)878 struct perf_pmu *perf_pmus__fake_pmu(void)
879 {
880 	static struct perf_pmu fake = {
881 		.name = "fake",
882 		.type = PERF_PMU_TYPE_FAKE,
883 		.format = LIST_HEAD_INIT(fake.format),
884 	};
885 
886 	return &fake;
887 }
888