xref: /linux/tools/perf/arch/x86/util/evsel.c (revision 8dc364fa484df22d301a16148d1b356931d0cbb0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include "util/evlist.h"
6 #include "util/evsel.h"
7 #include "util/env.h"
8 #include "util/pmu.h"
9 #include "util/pmus.h"
10 #include "util/stat.h"
11 #include "util/strbuf.h"
12 #include "linux/string.h"
13 #include "topdown.h"
14 #include "evsel.h"
15 #include "util/debug.h"
16 #include "env.h"
17 
18 #define IBS_FETCH_L3MISSONLY   (1ULL << 59)
19 #define IBS_OP_L3MISSONLY      (1ULL << 16)
20 
21 void arch_evsel__set_sample_weight(struct evsel *evsel)
22 {
23 	evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
24 }
25 
26 /* Check whether the evsel's PMU supports the perf metrics */
27 bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
28 {
29 	struct perf_pmu *pmu;
30 
31 	if (!topdown_sys_has_perf_metrics())
32 		return false;
33 
34 	/*
35 	 * The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU on a
36 	 * non-hybrid machine, "cpu_core" PMU on a hybrid machine.  The
37 	 * topdown_sys_has_perf_metrics checks the slots event is only available
38 	 * for the core PMU, which supports the perf metrics feature. Checking
39 	 * both the PERF_TYPE_RAW type and the slots event should be good enough
40 	 * to detect the perf metrics feature.
41 	 */
42 	pmu = evsel__find_pmu(evsel);
43 	return pmu && pmu->type == PERF_TYPE_RAW;
44 }
45 
46 bool arch_evsel__must_be_in_group(const struct evsel *evsel)
47 {
48 	if (!evsel__sys_has_perf_metrics(evsel))
49 		return false;
50 
51 	return arch_is_topdown_metrics(evsel) || arch_is_topdown_slots(evsel);
52 }
53 
54 int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
55 {
56 	u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK;
57 	u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
58 	const char *event_name;
59 
60 	if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event])
61 		event_name = evsel__hw_names[event];
62 	else
63 		event_name = "unknown-hardware";
64 
65 	/* The PMU type is not required for the non-hybrid platform. */
66 	if (!pmu)
67 		return  scnprintf(bf, size, "%s", event_name);
68 
69 	return scnprintf(bf, size, "%s/%s/",
70 			 evsel->pmu ? evsel->pmu->name : "cpu",
71 			 event_name);
72 }
73 
74 static void ibs_l3miss_warn(void)
75 {
76 	pr_warning(
77 "WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n"
78 "and tagged operation does not cause L3 Miss. This causes sampling period skew.\n");
79 }
80 
81 void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
82 {
83 	struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
84 	static int warned_once;
85 
86 	if (warned_once || !x86__is_amd_cpu())
87 		return;
88 
89 	evsel_pmu = evsel__find_pmu(evsel);
90 	if (!evsel_pmu)
91 		return;
92 
93 	ibs_fetch_pmu = perf_pmus__find("ibs_fetch");
94 	ibs_op_pmu = perf_pmus__find("ibs_op");
95 
96 	if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) {
97 		if (attr->config & IBS_FETCH_L3MISSONLY) {
98 			ibs_l3miss_warn();
99 			warned_once = 1;
100 		}
101 	} else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) {
102 		if (attr->config & IBS_OP_L3MISSONLY) {
103 			ibs_l3miss_warn();
104 			warned_once = 1;
105 		}
106 	}
107 }
108 
109 static int amd_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
110 {
111 	struct perf_pmu *pmu;
112 
113 	if (evsel->core.attr.precise_ip == 0)
114 		return 0;
115 
116 	pmu = evsel__find_pmu(evsel);
117 	if (!pmu || strncmp(pmu->name, "ibs", 3))
118 		return 0;
119 
120 	/* More verbose IBS errors. */
121 	if (evsel->core.attr.exclude_kernel || evsel->core.attr.exclude_user ||
122 	    evsel->core.attr.exclude_hv || evsel->core.attr.exclude_idle ||
123 	    evsel->core.attr.exclude_host || evsel->core.attr.exclude_guest) {
124 		return scnprintf(msg, size, "AMD IBS doesn't support privilege filtering. Try "
125 				 "again without the privilege modifiers (like 'k') at the end.");
126 	}
127 	return 0;
128 }
129 
130 static int intel_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size)
131 {
132 	struct strbuf sb = STRBUF_INIT;
133 	int ret;
134 
135 	if (err != EINVAL)
136 		return 0;
137 
138 	if (!topdown_sys_has_perf_metrics())
139 		return 0;
140 
141 	if (arch_is_topdown_slots(evsel)) {
142 		if (!evsel__is_group_leader(evsel)) {
143 			evlist__uniquify_evsel_names(evsel->evlist, &stat_config);
144 			evlist__format_evsels(evsel->evlist, &sb, 2048);
145 			ret = scnprintf(msg, size, "Topdown slots event can only be group leader "
146 					"in '%s'.", sb.buf);
147 			strbuf_release(&sb);
148 			return ret;
149 		}
150 	} else if (arch_is_topdown_metrics(evsel)) {
151 		struct evsel *pos;
152 
153 		evlist__for_each_entry(evsel->evlist, pos) {
154 			if (pos == evsel || !arch_is_topdown_metrics(pos))
155 				continue;
156 
157 			if (pos->core.attr.config != evsel->core.attr.config)
158 				continue;
159 
160 			evlist__uniquify_evsel_names(evsel->evlist, &stat_config);
161 			evlist__format_evsels(evsel->evlist, &sb, 2048);
162 			ret = scnprintf(msg, size, "Perf metric event '%s' is duplicated "
163 					"in the same group (only one event is allowed) in '%s'.",
164 					evsel__name(evsel), sb.buf);
165 			strbuf_release(&sb);
166 			return ret;
167 		}
168 	}
169 	return 0;
170 }
171 
172 int arch_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size)
173 {
174 	return x86__is_amd_cpu()
175 		? amd_evsel__open_strerror(evsel, msg, size)
176 		: intel_evsel__open_strerror(evsel, err, msg, size);
177 }
178