1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Per core/cpu state
4 *
5 * Used to coordinate shared registers between HT threads or
6 * among events on a single PMU.
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
17 #include <linux/kvm_host.h>
18
19 #include <asm/cpufeature.h>
20 #include <asm/debugreg.h>
21 #include <asm/hardirq.h>
22 #include <asm/intel-family.h>
23 #include <asm/intel_pt.h>
24 #include <asm/apic.h>
25 #include <asm/cpu_device_id.h>
26 #include <asm/msr.h>
27
28 #include "../perf_event.h"
29
30 /*
31 * Intel PerfMon, used on Core and later.
32 */
33 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
34 {
35 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
36 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
37 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
38 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
39 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
40 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
41 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
42 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
43 };
44
45 static struct event_constraint intel_core_event_constraints[] __read_mostly =
46 {
47 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
48 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
49 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
50 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
51 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
52 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
53 EVENT_CONSTRAINT_END
54 };
55
56 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
57 {
58 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
59 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
60 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
61 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
62 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
63 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
64 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
65 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
66 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
67 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
68 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
69 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
70 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
71 EVENT_CONSTRAINT_END
72 };
73
74 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
75 {
76 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
77 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
78 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
79 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
80 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
81 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
82 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
83 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
84 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
85 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
86 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
87 EVENT_CONSTRAINT_END
88 };
89
90 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
91 {
92 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
93 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
94 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
95 EVENT_EXTRA_END
96 };
97
98 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
99 {
100 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
101 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
102 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
103 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
104 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
105 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
106 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
107 EVENT_CONSTRAINT_END
108 };
109
110 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
111 {
112 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
113 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
114 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
115 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
116 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
117 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
118 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
119 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
120 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
121 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
122 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
123 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
124
125 /*
126 * When HT is off these events can only run on the bottom 4 counters
127 * When HT is on, they are impacted by the HT bug and require EXCL access
128 */
129 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
130 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
131 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
132 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
133
134 EVENT_CONSTRAINT_END
135 };
136
137 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
138 {
139 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
140 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
141 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
142 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
143 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
144 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
145 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
146 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
147 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
148 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
149 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
150 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
151 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
152
153 /*
154 * When HT is off these events can only run on the bottom 4 counters
155 * When HT is on, they are impacted by the HT bug and require EXCL access
156 */
157 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
158 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
159 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
160 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
161
162 EVENT_CONSTRAINT_END
163 };
164
165 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
166 {
167 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
168 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
169 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
170 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
171 EVENT_EXTRA_END
172 };
173
174 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
175 {
176 EVENT_CONSTRAINT_END
177 };
178
179 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
180 {
181 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
182 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
183 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
184 EVENT_CONSTRAINT_END
185 };
186
187 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly =
188 {
189 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
190 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
191 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
192 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
193 FIXED_EVENT_CONSTRAINT(0x0500, 4),
194 FIXED_EVENT_CONSTRAINT(0x0600, 5),
195 FIXED_EVENT_CONSTRAINT(0x0700, 6),
196 FIXED_EVENT_CONSTRAINT(0x0800, 7),
197 FIXED_EVENT_CONSTRAINT(0x0900, 8),
198 FIXED_EVENT_CONSTRAINT(0x0a00, 9),
199 FIXED_EVENT_CONSTRAINT(0x0b00, 10),
200 FIXED_EVENT_CONSTRAINT(0x0c00, 11),
201 FIXED_EVENT_CONSTRAINT(0x0d00, 12),
202 FIXED_EVENT_CONSTRAINT(0x0e00, 13),
203 FIXED_EVENT_CONSTRAINT(0x0f00, 14),
204 FIXED_EVENT_CONSTRAINT(0x1000, 15),
205 EVENT_CONSTRAINT_END
206 };
207
208 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
209 {
210 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
211 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
212 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
213 EVENT_CONSTRAINT_END
214 };
215
216 static struct event_constraint intel_grt_event_constraints[] __read_mostly = {
217 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
218 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
219 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
220 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
221 EVENT_CONSTRAINT_END
222 };
223
224 static struct event_constraint intel_skt_event_constraints[] __read_mostly = {
225 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
226 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
227 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
228 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
229 FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */
230 FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */
231 FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */
232 EVENT_CONSTRAINT_END
233 };
234
235 static struct event_constraint intel_arw_event_constraints[] __read_mostly = {
236 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
237 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
238 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
239 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
240 FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */
241 FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */
242 FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */
243 INTEL_UEVENT_CONSTRAINT(0x01b7, 0x1),
244 INTEL_UEVENT_CONSTRAINT(0x02b7, 0x2),
245 INTEL_UEVENT_CONSTRAINT(0x04b7, 0x4),
246 INTEL_UEVENT_CONSTRAINT(0x08b7, 0x8),
247 INTEL_UEVENT_CONSTRAINT(0x01d4, 0x1),
248 INTEL_UEVENT_CONSTRAINT(0x02d4, 0x2),
249 INTEL_UEVENT_CONSTRAINT(0x04d4, 0x4),
250 INTEL_UEVENT_CONSTRAINT(0x08d4, 0x8),
251 INTEL_UEVENT_CONSTRAINT(0x0175, 0x1),
252 INTEL_UEVENT_CONSTRAINT(0x0275, 0x2),
253 INTEL_UEVENT_CONSTRAINT(0x21d3, 0x1),
254 INTEL_UEVENT_CONSTRAINT(0x22d3, 0x1),
255 EVENT_CONSTRAINT_END
256 };
257
258 static struct event_constraint intel_skl_event_constraints[] = {
259 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
260 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
261 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
262 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
263
264 /*
265 * when HT is off, these can only run on the bottom 4 counters
266 */
267 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
268 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
269 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
270 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
271 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
272
273 EVENT_CONSTRAINT_END
274 };
275
276 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
277 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
278 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
279 EVENT_EXTRA_END
280 };
281
282 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
283 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
284 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
285 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
286 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
287 EVENT_EXTRA_END
288 };
289
290 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
291 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
292 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
293 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
294 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
295 EVENT_EXTRA_END
296 };
297
298 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
299 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
300 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
301 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
302 /*
303 * Note the low 8 bits eventsel code is not a continuous field, containing
304 * some #GPing bits. These are masked out.
305 */
306 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
307 EVENT_EXTRA_END
308 };
309
310 static struct event_constraint intel_icl_event_constraints[] = {
311 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
312 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */
313 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
314 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
315 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
316 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
317 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
318 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
319 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
320 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
321 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
322 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
323 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
324 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
325 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
326 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
327 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
328 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
329 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
330 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
331 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
332 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
333 INTEL_EVENT_CONSTRAINT(0xef, 0xf),
334 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
335 EVENT_CONSTRAINT_END
336 };
337
338 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
339 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
340 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
341 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
342 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
343 EVENT_EXTRA_END
344 };
345
346 static struct extra_reg intel_glc_extra_regs[] __read_mostly = {
347 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
348 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
349 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
350 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
351 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
352 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
353 EVENT_EXTRA_END
354 };
355
356 static struct event_constraint intel_glc_event_constraints[] = {
357 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
358 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
359 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
360 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
361 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
362 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
363 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
364 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
365 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
366 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
367 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
368 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
369 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
370 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
371
372 INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
373 INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
374 /*
375 * Generally event codes < 0x90 are restricted to counters 0-3.
376 * The 0x2E and 0x3C are exception, which has no restriction.
377 */
378 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
379
380 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
381 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
382 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
383 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
384 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
385 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
386 INTEL_EVENT_CONSTRAINT(0xce, 0x1),
387 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
388 /*
389 * Generally event codes >= 0x90 are likely to have no restrictions.
390 * The exception are defined as above.
391 */
392 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
393
394 EVENT_CONSTRAINT_END
395 };
396
397 static struct extra_reg intel_rwc_extra_regs[] __read_mostly = {
398 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
399 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
400 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
401 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
402 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
403 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
404 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
405 EVENT_EXTRA_END
406 };
407
408 static struct event_constraint intel_lnc_event_constraints[] = {
409 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
410 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
411 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
412 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
413 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
414 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
415 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
416 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
417 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
418 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
419 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
420 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
421 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
422 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
423
424 INTEL_EVENT_CONSTRAINT(0x20, 0xf),
425
426 INTEL_UEVENT_CONSTRAINT(0x012a, 0xf),
427 INTEL_UEVENT_CONSTRAINT(0x012b, 0xf),
428 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
429 INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
430
431 INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
432 INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
433
434 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
435 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
436 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
437 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
438 INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
439 INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
440 INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc),
441 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
442
443 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
444
445 INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf),
446
447 EVENT_CONSTRAINT_END
448 };
449
450 static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
451 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0),
452 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1),
453 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
454 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
455 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
456 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
457 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
458 EVENT_EXTRA_END
459 };
460
461 static struct event_constraint intel_pnc_event_constraints[] = {
462 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
463 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
464 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
465 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
466 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
467 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
468 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
469 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
470 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
471 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
472 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
473 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
474 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
475 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
476
477 INTEL_EVENT_CONSTRAINT(0x20, 0xf),
478 INTEL_EVENT_CONSTRAINT(0x79, 0xf),
479
480 INTEL_UEVENT_CONSTRAINT(0x0275, 0xf),
481 INTEL_UEVENT_CONSTRAINT(0x0176, 0xf),
482 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
483 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
484 INTEL_UEVENT_CONSTRAINT(0x01cd, 0xfc),
485 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
486
487 INTEL_EVENT_CONSTRAINT(0xd0, 0xf),
488 INTEL_EVENT_CONSTRAINT(0xd1, 0xf),
489 INTEL_EVENT_CONSTRAINT(0xd4, 0xf),
490 INTEL_EVENT_CONSTRAINT(0xd6, 0xf),
491 INTEL_EVENT_CONSTRAINT(0xdf, 0xf),
492 INTEL_EVENT_CONSTRAINT(0xce, 0x1),
493
494 INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
495 INTEL_UEVENT_CONSTRAINT(0x0847, 0xf),
496 INTEL_UEVENT_CONSTRAINT(0x0446, 0xf),
497 INTEL_UEVENT_CONSTRAINT(0x0846, 0xf),
498 INTEL_UEVENT_CONSTRAINT(0x0148, 0xf),
499
500 EVENT_CONSTRAINT_END
501 };
502
503 static struct extra_reg intel_pnc_extra_regs[] __read_mostly = {
504 /* must define OMR_X first, see intel_alt_er() */
505 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OMR_0, 0x40ffffff0000ffffull, OMR_0),
506 INTEL_UEVENT_EXTRA_REG(0x022a, MSR_OMR_1, 0x40ffffff0000ffffull, OMR_1),
507 INTEL_UEVENT_EXTRA_REG(0x042a, MSR_OMR_2, 0x40ffffff0000ffffull, OMR_2),
508 INTEL_UEVENT_EXTRA_REG(0x082a, MSR_OMR_3, 0x40ffffff0000ffffull, OMR_3),
509 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
510 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
511 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
512 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
513 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
514 EVENT_EXTRA_END
515 };
516
517 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
518 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
519 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
520
521 static struct attribute *nhm_mem_events_attrs[] = {
522 EVENT_PTR(mem_ld_nhm),
523 NULL,
524 };
525
526 /*
527 * topdown events for Intel Core CPUs.
528 *
529 * The events are all in slots, which is a free slot in a 4 wide
530 * pipeline. Some events are already reported in slots, for cycle
531 * events we multiply by the pipeline width (4).
532 *
533 * With Hyper Threading on, topdown metrics are either summed or averaged
534 * between the threads of a core: (count_t0 + count_t1).
535 *
536 * For the average case the metric is always scaled to pipeline width,
537 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
538 */
539
540 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
541 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
542 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
543 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
544 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
545 "event=0xe,umask=0x1"); /* uops_issued.any */
546 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
547 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
548 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
549 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
550 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
551 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
552 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
553 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
554 "4", "2");
555
556 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
557 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
558 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
559 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
560 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
561 EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84");
562 EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85");
563 EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86");
564 EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87");
565
566 static struct attribute *snb_events_attrs[] = {
567 EVENT_PTR(td_slots_issued),
568 EVENT_PTR(td_slots_retired),
569 EVENT_PTR(td_fetch_bubbles),
570 EVENT_PTR(td_total_slots),
571 EVENT_PTR(td_total_slots_scale),
572 EVENT_PTR(td_recovery_bubbles),
573 EVENT_PTR(td_recovery_bubbles_scale),
574 NULL,
575 };
576
577 static struct attribute *snb_mem_events_attrs[] = {
578 EVENT_PTR(mem_ld_snb),
579 EVENT_PTR(mem_st_snb),
580 NULL,
581 };
582
583 static struct event_constraint intel_hsw_event_constraints[] = {
584 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
585 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
586 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
587 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
588 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
589 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
590 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
591 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
592 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
593 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
594 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
595 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
596
597 /*
598 * When HT is off these events can only run on the bottom 4 counters
599 * When HT is on, they are impacted by the HT bug and require EXCL access
600 */
601 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
602 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
603 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
604 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
605
606 EVENT_CONSTRAINT_END
607 };
608
609 static struct event_constraint intel_bdw_event_constraints[] = {
610 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
611 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
612 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
613 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
614 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
615 /*
616 * when HT is off, these can only run on the bottom 4 counters
617 */
618 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
619 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
620 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
621 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
622 EVENT_CONSTRAINT_END
623 };
624
intel_pmu_event_map(int hw_event)625 static u64 intel_pmu_event_map(int hw_event)
626 {
627 return intel_perfmon_event_map[hw_event];
628 }
629
630 static __initconst const u64 glc_hw_cache_event_ids
631 [PERF_COUNT_HW_CACHE_MAX]
632 [PERF_COUNT_HW_CACHE_OP_MAX]
633 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
634 {
635 [ C(L1D ) ] = {
636 [ C(OP_READ) ] = {
637 [ C(RESULT_ACCESS) ] = 0x81d0,
638 [ C(RESULT_MISS) ] = 0xe124,
639 },
640 [ C(OP_WRITE) ] = {
641 [ C(RESULT_ACCESS) ] = 0x82d0,
642 },
643 },
644 [ C(L1I ) ] = {
645 [ C(OP_READ) ] = {
646 [ C(RESULT_MISS) ] = 0xe424,
647 },
648 [ C(OP_WRITE) ] = {
649 [ C(RESULT_ACCESS) ] = -1,
650 [ C(RESULT_MISS) ] = -1,
651 },
652 },
653 [ C(LL ) ] = {
654 [ C(OP_READ) ] = {
655 [ C(RESULT_ACCESS) ] = 0x12a,
656 [ C(RESULT_MISS) ] = 0x12a,
657 },
658 [ C(OP_WRITE) ] = {
659 [ C(RESULT_ACCESS) ] = 0x12a,
660 [ C(RESULT_MISS) ] = 0x12a,
661 },
662 },
663 [ C(DTLB) ] = {
664 [ C(OP_READ) ] = {
665 [ C(RESULT_ACCESS) ] = 0x81d0,
666 [ C(RESULT_MISS) ] = 0xe12,
667 },
668 [ C(OP_WRITE) ] = {
669 [ C(RESULT_ACCESS) ] = 0x82d0,
670 [ C(RESULT_MISS) ] = 0xe13,
671 },
672 },
673 [ C(ITLB) ] = {
674 [ C(OP_READ) ] = {
675 [ C(RESULT_ACCESS) ] = -1,
676 [ C(RESULT_MISS) ] = 0xe11,
677 },
678 [ C(OP_WRITE) ] = {
679 [ C(RESULT_ACCESS) ] = -1,
680 [ C(RESULT_MISS) ] = -1,
681 },
682 [ C(OP_PREFETCH) ] = {
683 [ C(RESULT_ACCESS) ] = -1,
684 [ C(RESULT_MISS) ] = -1,
685 },
686 },
687 [ C(BPU ) ] = {
688 [ C(OP_READ) ] = {
689 [ C(RESULT_ACCESS) ] = 0x4c4,
690 [ C(RESULT_MISS) ] = 0x4c5,
691 },
692 [ C(OP_WRITE) ] = {
693 [ C(RESULT_ACCESS) ] = -1,
694 [ C(RESULT_MISS) ] = -1,
695 },
696 [ C(OP_PREFETCH) ] = {
697 [ C(RESULT_ACCESS) ] = -1,
698 [ C(RESULT_MISS) ] = -1,
699 },
700 },
701 [ C(NODE) ] = {
702 [ C(OP_READ) ] = {
703 [ C(RESULT_ACCESS) ] = 0x12a,
704 [ C(RESULT_MISS) ] = 0x12a,
705 },
706 },
707 };
708
709 static __initconst const u64 glc_hw_cache_extra_regs
710 [PERF_COUNT_HW_CACHE_MAX]
711 [PERF_COUNT_HW_CACHE_OP_MAX]
712 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
713 {
714 [ C(LL ) ] = {
715 [ C(OP_READ) ] = {
716 [ C(RESULT_ACCESS) ] = 0x10001,
717 [ C(RESULT_MISS) ] = 0x3fbfc00001,
718 },
719 [ C(OP_WRITE) ] = {
720 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
721 [ C(RESULT_MISS) ] = 0x3f3fc00002,
722 },
723 },
724 [ C(NODE) ] = {
725 [ C(OP_READ) ] = {
726 [ C(RESULT_ACCESS) ] = 0x10c000001,
727 [ C(RESULT_MISS) ] = 0x3fb3000001,
728 },
729 },
730 };
731
732 static __initconst const u64 pnc_hw_cache_event_ids
733 [PERF_COUNT_HW_CACHE_MAX]
734 [PERF_COUNT_HW_CACHE_OP_MAX]
735 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
736 {
737 [ C(L1D ) ] = {
738 [ C(OP_READ) ] = {
739 [ C(RESULT_ACCESS) ] = 0x81d0,
740 [ C(RESULT_MISS) ] = 0xe124,
741 },
742 [ C(OP_WRITE) ] = {
743 [ C(RESULT_ACCESS) ] = 0x82d0,
744 },
745 },
746 [ C(L1I ) ] = {
747 [ C(OP_READ) ] = {
748 [ C(RESULT_MISS) ] = 0xe424,
749 },
750 [ C(OP_WRITE) ] = {
751 [ C(RESULT_ACCESS) ] = -1,
752 [ C(RESULT_MISS) ] = -1,
753 },
754 },
755 [ C(LL ) ] = {
756 [ C(OP_READ) ] = {
757 [ C(RESULT_ACCESS) ] = 0x12a,
758 [ C(RESULT_MISS) ] = 0x12a,
759 },
760 [ C(OP_WRITE) ] = {
761 [ C(RESULT_ACCESS) ] = 0x12a,
762 [ C(RESULT_MISS) ] = 0x12a,
763 },
764 },
765 [ C(DTLB) ] = {
766 [ C(OP_READ) ] = {
767 [ C(RESULT_ACCESS) ] = 0x81d0,
768 [ C(RESULT_MISS) ] = 0xe12,
769 },
770 [ C(OP_WRITE) ] = {
771 [ C(RESULT_ACCESS) ] = 0x82d0,
772 [ C(RESULT_MISS) ] = 0xe13,
773 },
774 },
775 [ C(ITLB) ] = {
776 [ C(OP_READ) ] = {
777 [ C(RESULT_ACCESS) ] = -1,
778 [ C(RESULT_MISS) ] = 0xe11,
779 },
780 [ C(OP_WRITE) ] = {
781 [ C(RESULT_ACCESS) ] = -1,
782 [ C(RESULT_MISS) ] = -1,
783 },
784 [ C(OP_PREFETCH) ] = {
785 [ C(RESULT_ACCESS) ] = -1,
786 [ C(RESULT_MISS) ] = -1,
787 },
788 },
789 [ C(BPU ) ] = {
790 [ C(OP_READ) ] = {
791 [ C(RESULT_ACCESS) ] = 0x4c4,
792 [ C(RESULT_MISS) ] = 0x4c5,
793 },
794 [ C(OP_WRITE) ] = {
795 [ C(RESULT_ACCESS) ] = -1,
796 [ C(RESULT_MISS) ] = -1,
797 },
798 [ C(OP_PREFETCH) ] = {
799 [ C(RESULT_ACCESS) ] = -1,
800 [ C(RESULT_MISS) ] = -1,
801 },
802 },
803 [ C(NODE) ] = {
804 [ C(OP_READ) ] = {
805 [ C(RESULT_ACCESS) ] = -1,
806 [ C(RESULT_MISS) ] = -1,
807 },
808 },
809 };
810
811 static __initconst const u64 pnc_hw_cache_extra_regs
812 [PERF_COUNT_HW_CACHE_MAX]
813 [PERF_COUNT_HW_CACHE_OP_MAX]
814 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
815 {
816 [ C(LL ) ] = {
817 [ C(OP_READ) ] = {
818 [ C(RESULT_ACCESS) ] = 0x4000000000000001,
819 [ C(RESULT_MISS) ] = 0xFFFFF000000001,
820 },
821 [ C(OP_WRITE) ] = {
822 [ C(RESULT_ACCESS) ] = 0x4000000000000002,
823 [ C(RESULT_MISS) ] = 0xFFFFF000000002,
824 },
825 },
826 };
827
828 /*
829 * Notes on the events:
830 * - data reads do not include code reads (comparable to earlier tables)
831 * - data counts include speculative execution (except L1 write, dtlb, bpu)
832 * - remote node access includes remote memory, remote cache, remote mmio.
833 * - prefetches are not included in the counts.
834 * - icache miss does not include decoded icache
835 */
836
837 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
838 #define SKL_DEMAND_RFO BIT_ULL(1)
839 #define SKL_ANY_RESPONSE BIT_ULL(16)
840 #define SKL_SUPPLIER_NONE BIT_ULL(17)
841 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
842 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
843 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
844 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
845 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
846 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
847 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
848 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
849 #define SKL_SPL_HIT BIT_ULL(30)
850 #define SKL_SNOOP_NONE BIT_ULL(31)
851 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
852 #define SKL_SNOOP_MISS BIT_ULL(33)
853 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
854 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
855 #define SKL_SNOOP_HITM BIT_ULL(36)
856 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
857 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
858 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
859 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
860 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
861 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
862 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
863 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
864 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
865 SKL_SNOOP_HITM|SKL_SPL_HIT)
866 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
867 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
868 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
869 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
870 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
871
872 static __initconst const u64 skl_hw_cache_event_ids
873 [PERF_COUNT_HW_CACHE_MAX]
874 [PERF_COUNT_HW_CACHE_OP_MAX]
875 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
876 {
877 [ C(L1D ) ] = {
878 [ C(OP_READ) ] = {
879 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
880 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
881 },
882 [ C(OP_WRITE) ] = {
883 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
884 [ C(RESULT_MISS) ] = 0x0,
885 },
886 [ C(OP_PREFETCH) ] = {
887 [ C(RESULT_ACCESS) ] = 0x0,
888 [ C(RESULT_MISS) ] = 0x0,
889 },
890 },
891 [ C(L1I ) ] = {
892 [ C(OP_READ) ] = {
893 [ C(RESULT_ACCESS) ] = 0x0,
894 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
895 },
896 [ C(OP_WRITE) ] = {
897 [ C(RESULT_ACCESS) ] = -1,
898 [ C(RESULT_MISS) ] = -1,
899 },
900 [ C(OP_PREFETCH) ] = {
901 [ C(RESULT_ACCESS) ] = 0x0,
902 [ C(RESULT_MISS) ] = 0x0,
903 },
904 },
905 [ C(LL ) ] = {
906 [ C(OP_READ) ] = {
907 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
908 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
909 },
910 [ C(OP_WRITE) ] = {
911 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
912 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
913 },
914 [ C(OP_PREFETCH) ] = {
915 [ C(RESULT_ACCESS) ] = 0x0,
916 [ C(RESULT_MISS) ] = 0x0,
917 },
918 },
919 [ C(DTLB) ] = {
920 [ C(OP_READ) ] = {
921 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
922 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
923 },
924 [ C(OP_WRITE) ] = {
925 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
926 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
927 },
928 [ C(OP_PREFETCH) ] = {
929 [ C(RESULT_ACCESS) ] = 0x0,
930 [ C(RESULT_MISS) ] = 0x0,
931 },
932 },
933 [ C(ITLB) ] = {
934 [ C(OP_READ) ] = {
935 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
936 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
937 },
938 [ C(OP_WRITE) ] = {
939 [ C(RESULT_ACCESS) ] = -1,
940 [ C(RESULT_MISS) ] = -1,
941 },
942 [ C(OP_PREFETCH) ] = {
943 [ C(RESULT_ACCESS) ] = -1,
944 [ C(RESULT_MISS) ] = -1,
945 },
946 },
947 [ C(BPU ) ] = {
948 [ C(OP_READ) ] = {
949 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
950 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
951 },
952 [ C(OP_WRITE) ] = {
953 [ C(RESULT_ACCESS) ] = -1,
954 [ C(RESULT_MISS) ] = -1,
955 },
956 [ C(OP_PREFETCH) ] = {
957 [ C(RESULT_ACCESS) ] = -1,
958 [ C(RESULT_MISS) ] = -1,
959 },
960 },
961 [ C(NODE) ] = {
962 [ C(OP_READ) ] = {
963 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
964 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
965 },
966 [ C(OP_WRITE) ] = {
967 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
968 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
969 },
970 [ C(OP_PREFETCH) ] = {
971 [ C(RESULT_ACCESS) ] = 0x0,
972 [ C(RESULT_MISS) ] = 0x0,
973 },
974 },
975 };
976
977 static __initconst const u64 skl_hw_cache_extra_regs
978 [PERF_COUNT_HW_CACHE_MAX]
979 [PERF_COUNT_HW_CACHE_OP_MAX]
980 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
981 {
982 [ C(LL ) ] = {
983 [ C(OP_READ) ] = {
984 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
985 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
986 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
987 SKL_L3_MISS|SKL_ANY_SNOOP|
988 SKL_SUPPLIER_NONE,
989 },
990 [ C(OP_WRITE) ] = {
991 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
992 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
993 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
994 SKL_L3_MISS|SKL_ANY_SNOOP|
995 SKL_SUPPLIER_NONE,
996 },
997 [ C(OP_PREFETCH) ] = {
998 [ C(RESULT_ACCESS) ] = 0x0,
999 [ C(RESULT_MISS) ] = 0x0,
1000 },
1001 },
1002 [ C(NODE) ] = {
1003 [ C(OP_READ) ] = {
1004 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
1005 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
1006 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
1007 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
1008 },
1009 [ C(OP_WRITE) ] = {
1010 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
1011 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
1012 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
1013 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
1014 },
1015 [ C(OP_PREFETCH) ] = {
1016 [ C(RESULT_ACCESS) ] = 0x0,
1017 [ C(RESULT_MISS) ] = 0x0,
1018 },
1019 },
1020 };
1021
1022 #define SNB_DMND_DATA_RD (1ULL << 0)
1023 #define SNB_DMND_RFO (1ULL << 1)
1024 #define SNB_DMND_IFETCH (1ULL << 2)
1025 #define SNB_DMND_WB (1ULL << 3)
1026 #define SNB_PF_DATA_RD (1ULL << 4)
1027 #define SNB_PF_RFO (1ULL << 5)
1028 #define SNB_PF_IFETCH (1ULL << 6)
1029 #define SNB_LLC_DATA_RD (1ULL << 7)
1030 #define SNB_LLC_RFO (1ULL << 8)
1031 #define SNB_LLC_IFETCH (1ULL << 9)
1032 #define SNB_BUS_LOCKS (1ULL << 10)
1033 #define SNB_STRM_ST (1ULL << 11)
1034 #define SNB_OTHER (1ULL << 15)
1035 #define SNB_RESP_ANY (1ULL << 16)
1036 #define SNB_NO_SUPP (1ULL << 17)
1037 #define SNB_LLC_HITM (1ULL << 18)
1038 #define SNB_LLC_HITE (1ULL << 19)
1039 #define SNB_LLC_HITS (1ULL << 20)
1040 #define SNB_LLC_HITF (1ULL << 21)
1041 #define SNB_LOCAL (1ULL << 22)
1042 #define SNB_REMOTE (0xffULL << 23)
1043 #define SNB_SNP_NONE (1ULL << 31)
1044 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
1045 #define SNB_SNP_MISS (1ULL << 33)
1046 #define SNB_NO_FWD (1ULL << 34)
1047 #define SNB_SNP_FWD (1ULL << 35)
1048 #define SNB_HITM (1ULL << 36)
1049 #define SNB_NON_DRAM (1ULL << 37)
1050
1051 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
1052 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
1053 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1054
1055 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
1056 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
1057 SNB_HITM)
1058
1059 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
1060 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
1061
1062 #define SNB_L3_ACCESS SNB_RESP_ANY
1063 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
1064
1065 static __initconst const u64 snb_hw_cache_extra_regs
1066 [PERF_COUNT_HW_CACHE_MAX]
1067 [PERF_COUNT_HW_CACHE_OP_MAX]
1068 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1069 {
1070 [ C(LL ) ] = {
1071 [ C(OP_READ) ] = {
1072 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
1073 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
1074 },
1075 [ C(OP_WRITE) ] = {
1076 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
1077 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
1078 },
1079 [ C(OP_PREFETCH) ] = {
1080 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
1081 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
1082 },
1083 },
1084 [ C(NODE) ] = {
1085 [ C(OP_READ) ] = {
1086 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
1087 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
1088 },
1089 [ C(OP_WRITE) ] = {
1090 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
1091 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
1092 },
1093 [ C(OP_PREFETCH) ] = {
1094 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
1095 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
1096 },
1097 },
1098 };
1099
1100 static __initconst const u64 snb_hw_cache_event_ids
1101 [PERF_COUNT_HW_CACHE_MAX]
1102 [PERF_COUNT_HW_CACHE_OP_MAX]
1103 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1104 {
1105 [ C(L1D) ] = {
1106 [ C(OP_READ) ] = {
1107 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
1108 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
1109 },
1110 [ C(OP_WRITE) ] = {
1111 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
1112 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
1113 },
1114 [ C(OP_PREFETCH) ] = {
1115 [ C(RESULT_ACCESS) ] = 0x0,
1116 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
1117 },
1118 },
1119 [ C(L1I ) ] = {
1120 [ C(OP_READ) ] = {
1121 [ C(RESULT_ACCESS) ] = 0x0,
1122 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
1123 },
1124 [ C(OP_WRITE) ] = {
1125 [ C(RESULT_ACCESS) ] = -1,
1126 [ C(RESULT_MISS) ] = -1,
1127 },
1128 [ C(OP_PREFETCH) ] = {
1129 [ C(RESULT_ACCESS) ] = 0x0,
1130 [ C(RESULT_MISS) ] = 0x0,
1131 },
1132 },
1133 [ C(LL ) ] = {
1134 [ C(OP_READ) ] = {
1135 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1136 [ C(RESULT_ACCESS) ] = 0x01b7,
1137 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1138 [ C(RESULT_MISS) ] = 0x01b7,
1139 },
1140 [ C(OP_WRITE) ] = {
1141 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1142 [ C(RESULT_ACCESS) ] = 0x01b7,
1143 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1144 [ C(RESULT_MISS) ] = 0x01b7,
1145 },
1146 [ C(OP_PREFETCH) ] = {
1147 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1148 [ C(RESULT_ACCESS) ] = 0x01b7,
1149 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1150 [ C(RESULT_MISS) ] = 0x01b7,
1151 },
1152 },
1153 [ C(DTLB) ] = {
1154 [ C(OP_READ) ] = {
1155 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
1156 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
1157 },
1158 [ C(OP_WRITE) ] = {
1159 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
1160 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1161 },
1162 [ C(OP_PREFETCH) ] = {
1163 [ C(RESULT_ACCESS) ] = 0x0,
1164 [ C(RESULT_MISS) ] = 0x0,
1165 },
1166 },
1167 [ C(ITLB) ] = {
1168 [ C(OP_READ) ] = {
1169 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
1170 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
1171 },
1172 [ C(OP_WRITE) ] = {
1173 [ C(RESULT_ACCESS) ] = -1,
1174 [ C(RESULT_MISS) ] = -1,
1175 },
1176 [ C(OP_PREFETCH) ] = {
1177 [ C(RESULT_ACCESS) ] = -1,
1178 [ C(RESULT_MISS) ] = -1,
1179 },
1180 },
1181 [ C(BPU ) ] = {
1182 [ C(OP_READ) ] = {
1183 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1184 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1185 },
1186 [ C(OP_WRITE) ] = {
1187 [ C(RESULT_ACCESS) ] = -1,
1188 [ C(RESULT_MISS) ] = -1,
1189 },
1190 [ C(OP_PREFETCH) ] = {
1191 [ C(RESULT_ACCESS) ] = -1,
1192 [ C(RESULT_MISS) ] = -1,
1193 },
1194 },
1195 [ C(NODE) ] = {
1196 [ C(OP_READ) ] = {
1197 [ C(RESULT_ACCESS) ] = 0x01b7,
1198 [ C(RESULT_MISS) ] = 0x01b7,
1199 },
1200 [ C(OP_WRITE) ] = {
1201 [ C(RESULT_ACCESS) ] = 0x01b7,
1202 [ C(RESULT_MISS) ] = 0x01b7,
1203 },
1204 [ C(OP_PREFETCH) ] = {
1205 [ C(RESULT_ACCESS) ] = 0x01b7,
1206 [ C(RESULT_MISS) ] = 0x01b7,
1207 },
1208 },
1209
1210 };
1211
1212 /*
1213 * Notes on the events:
1214 * - data reads do not include code reads (comparable to earlier tables)
1215 * - data counts include speculative execution (except L1 write, dtlb, bpu)
1216 * - remote node access includes remote memory, remote cache, remote mmio.
1217 * - prefetches are not included in the counts because they are not
1218 * reliably counted.
1219 */
1220
1221 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
1222 #define HSW_DEMAND_RFO BIT_ULL(1)
1223 #define HSW_ANY_RESPONSE BIT_ULL(16)
1224 #define HSW_SUPPLIER_NONE BIT_ULL(17)
1225 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
1226 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
1227 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
1228 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
1229 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
1230 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1231 HSW_L3_MISS_REMOTE_HOP2P)
1232 #define HSW_SNOOP_NONE BIT_ULL(31)
1233 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
1234 #define HSW_SNOOP_MISS BIT_ULL(33)
1235 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
1236 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
1237 #define HSW_SNOOP_HITM BIT_ULL(36)
1238 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
1239 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
1240 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
1241 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
1242 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
1243 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
1244 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
1245 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
1246 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
1247 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
1248 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
1249
1250 #define BDW_L3_MISS_LOCAL BIT(26)
1251 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
1252 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1253 HSW_L3_MISS_REMOTE_HOP2P)
1254
1255
1256 static __initconst const u64 hsw_hw_cache_event_ids
1257 [PERF_COUNT_HW_CACHE_MAX]
1258 [PERF_COUNT_HW_CACHE_OP_MAX]
1259 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1260 {
1261 [ C(L1D ) ] = {
1262 [ C(OP_READ) ] = {
1263 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1264 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
1265 },
1266 [ C(OP_WRITE) ] = {
1267 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1268 [ C(RESULT_MISS) ] = 0x0,
1269 },
1270 [ C(OP_PREFETCH) ] = {
1271 [ C(RESULT_ACCESS) ] = 0x0,
1272 [ C(RESULT_MISS) ] = 0x0,
1273 },
1274 },
1275 [ C(L1I ) ] = {
1276 [ C(OP_READ) ] = {
1277 [ C(RESULT_ACCESS) ] = 0x0,
1278 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
1279 },
1280 [ C(OP_WRITE) ] = {
1281 [ C(RESULT_ACCESS) ] = -1,
1282 [ C(RESULT_MISS) ] = -1,
1283 },
1284 [ C(OP_PREFETCH) ] = {
1285 [ C(RESULT_ACCESS) ] = 0x0,
1286 [ C(RESULT_MISS) ] = 0x0,
1287 },
1288 },
1289 [ C(LL ) ] = {
1290 [ C(OP_READ) ] = {
1291 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1292 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1293 },
1294 [ C(OP_WRITE) ] = {
1295 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1296 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1297 },
1298 [ C(OP_PREFETCH) ] = {
1299 [ C(RESULT_ACCESS) ] = 0x0,
1300 [ C(RESULT_MISS) ] = 0x0,
1301 },
1302 },
1303 [ C(DTLB) ] = {
1304 [ C(OP_READ) ] = {
1305 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1306 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1307 },
1308 [ C(OP_WRITE) ] = {
1309 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1310 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1311 },
1312 [ C(OP_PREFETCH) ] = {
1313 [ C(RESULT_ACCESS) ] = 0x0,
1314 [ C(RESULT_MISS) ] = 0x0,
1315 },
1316 },
1317 [ C(ITLB) ] = {
1318 [ C(OP_READ) ] = {
1319 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
1320 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
1321 },
1322 [ C(OP_WRITE) ] = {
1323 [ C(RESULT_ACCESS) ] = -1,
1324 [ C(RESULT_MISS) ] = -1,
1325 },
1326 [ C(OP_PREFETCH) ] = {
1327 [ C(RESULT_ACCESS) ] = -1,
1328 [ C(RESULT_MISS) ] = -1,
1329 },
1330 },
1331 [ C(BPU ) ] = {
1332 [ C(OP_READ) ] = {
1333 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
1334 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1335 },
1336 [ C(OP_WRITE) ] = {
1337 [ C(RESULT_ACCESS) ] = -1,
1338 [ C(RESULT_MISS) ] = -1,
1339 },
1340 [ C(OP_PREFETCH) ] = {
1341 [ C(RESULT_ACCESS) ] = -1,
1342 [ C(RESULT_MISS) ] = -1,
1343 },
1344 },
1345 [ C(NODE) ] = {
1346 [ C(OP_READ) ] = {
1347 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1348 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1349 },
1350 [ C(OP_WRITE) ] = {
1351 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1352 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1353 },
1354 [ C(OP_PREFETCH) ] = {
1355 [ C(RESULT_ACCESS) ] = 0x0,
1356 [ C(RESULT_MISS) ] = 0x0,
1357 },
1358 },
1359 };
1360
1361 static __initconst const u64 hsw_hw_cache_extra_regs
1362 [PERF_COUNT_HW_CACHE_MAX]
1363 [PERF_COUNT_HW_CACHE_OP_MAX]
1364 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1365 {
1366 [ C(LL ) ] = {
1367 [ C(OP_READ) ] = {
1368 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1369 HSW_LLC_ACCESS,
1370 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1371 HSW_L3_MISS|HSW_ANY_SNOOP,
1372 },
1373 [ C(OP_WRITE) ] = {
1374 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1375 HSW_LLC_ACCESS,
1376 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1377 HSW_L3_MISS|HSW_ANY_SNOOP,
1378 },
1379 [ C(OP_PREFETCH) ] = {
1380 [ C(RESULT_ACCESS) ] = 0x0,
1381 [ C(RESULT_MISS) ] = 0x0,
1382 },
1383 },
1384 [ C(NODE) ] = {
1385 [ C(OP_READ) ] = {
1386 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1387 HSW_L3_MISS_LOCAL_DRAM|
1388 HSW_SNOOP_DRAM,
1389 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1390 HSW_L3_MISS_REMOTE|
1391 HSW_SNOOP_DRAM,
1392 },
1393 [ C(OP_WRITE) ] = {
1394 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1395 HSW_L3_MISS_LOCAL_DRAM|
1396 HSW_SNOOP_DRAM,
1397 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1398 HSW_L3_MISS_REMOTE|
1399 HSW_SNOOP_DRAM,
1400 },
1401 [ C(OP_PREFETCH) ] = {
1402 [ C(RESULT_ACCESS) ] = 0x0,
1403 [ C(RESULT_MISS) ] = 0x0,
1404 },
1405 },
1406 };
1407
1408 static __initconst const u64 westmere_hw_cache_event_ids
1409 [PERF_COUNT_HW_CACHE_MAX]
1410 [PERF_COUNT_HW_CACHE_OP_MAX]
1411 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1412 {
1413 [ C(L1D) ] = {
1414 [ C(OP_READ) ] = {
1415 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1416 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1417 },
1418 [ C(OP_WRITE) ] = {
1419 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1420 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1421 },
1422 [ C(OP_PREFETCH) ] = {
1423 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1424 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1425 },
1426 },
1427 [ C(L1I ) ] = {
1428 [ C(OP_READ) ] = {
1429 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1430 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1431 },
1432 [ C(OP_WRITE) ] = {
1433 [ C(RESULT_ACCESS) ] = -1,
1434 [ C(RESULT_MISS) ] = -1,
1435 },
1436 [ C(OP_PREFETCH) ] = {
1437 [ C(RESULT_ACCESS) ] = 0x0,
1438 [ C(RESULT_MISS) ] = 0x0,
1439 },
1440 },
1441 [ C(LL ) ] = {
1442 [ C(OP_READ) ] = {
1443 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1444 [ C(RESULT_ACCESS) ] = 0x01b7,
1445 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1446 [ C(RESULT_MISS) ] = 0x01b7,
1447 },
1448 /*
1449 * Use RFO, not WRITEBACK, because a write miss would typically occur
1450 * on RFO.
1451 */
1452 [ C(OP_WRITE) ] = {
1453 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1454 [ C(RESULT_ACCESS) ] = 0x01b7,
1455 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1456 [ C(RESULT_MISS) ] = 0x01b7,
1457 },
1458 [ C(OP_PREFETCH) ] = {
1459 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1460 [ C(RESULT_ACCESS) ] = 0x01b7,
1461 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1462 [ C(RESULT_MISS) ] = 0x01b7,
1463 },
1464 },
1465 [ C(DTLB) ] = {
1466 [ C(OP_READ) ] = {
1467 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1468 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1469 },
1470 [ C(OP_WRITE) ] = {
1471 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1472 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1473 },
1474 [ C(OP_PREFETCH) ] = {
1475 [ C(RESULT_ACCESS) ] = 0x0,
1476 [ C(RESULT_MISS) ] = 0x0,
1477 },
1478 },
1479 [ C(ITLB) ] = {
1480 [ C(OP_READ) ] = {
1481 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1482 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1483 },
1484 [ C(OP_WRITE) ] = {
1485 [ C(RESULT_ACCESS) ] = -1,
1486 [ C(RESULT_MISS) ] = -1,
1487 },
1488 [ C(OP_PREFETCH) ] = {
1489 [ C(RESULT_ACCESS) ] = -1,
1490 [ C(RESULT_MISS) ] = -1,
1491 },
1492 },
1493 [ C(BPU ) ] = {
1494 [ C(OP_READ) ] = {
1495 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1496 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1497 },
1498 [ C(OP_WRITE) ] = {
1499 [ C(RESULT_ACCESS) ] = -1,
1500 [ C(RESULT_MISS) ] = -1,
1501 },
1502 [ C(OP_PREFETCH) ] = {
1503 [ C(RESULT_ACCESS) ] = -1,
1504 [ C(RESULT_MISS) ] = -1,
1505 },
1506 },
1507 [ C(NODE) ] = {
1508 [ C(OP_READ) ] = {
1509 [ C(RESULT_ACCESS) ] = 0x01b7,
1510 [ C(RESULT_MISS) ] = 0x01b7,
1511 },
1512 [ C(OP_WRITE) ] = {
1513 [ C(RESULT_ACCESS) ] = 0x01b7,
1514 [ C(RESULT_MISS) ] = 0x01b7,
1515 },
1516 [ C(OP_PREFETCH) ] = {
1517 [ C(RESULT_ACCESS) ] = 0x01b7,
1518 [ C(RESULT_MISS) ] = 0x01b7,
1519 },
1520 },
1521 };
1522
1523 /*
1524 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1525 * See IA32 SDM Vol 3B 30.6.1.3
1526 */
1527
1528 #define NHM_DMND_DATA_RD (1 << 0)
1529 #define NHM_DMND_RFO (1 << 1)
1530 #define NHM_DMND_IFETCH (1 << 2)
1531 #define NHM_DMND_WB (1 << 3)
1532 #define NHM_PF_DATA_RD (1 << 4)
1533 #define NHM_PF_DATA_RFO (1 << 5)
1534 #define NHM_PF_IFETCH (1 << 6)
1535 #define NHM_OFFCORE_OTHER (1 << 7)
1536 #define NHM_UNCORE_HIT (1 << 8)
1537 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1538 #define NHM_OTHER_CORE_HITM (1 << 10)
1539 /* reserved */
1540 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1541 #define NHM_REMOTE_DRAM (1 << 13)
1542 #define NHM_LOCAL_DRAM (1 << 14)
1543 #define NHM_NON_DRAM (1 << 15)
1544
1545 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1546 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1547
1548 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1549 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1550 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1551
1552 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1553 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1554 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1555
1556 static __initconst const u64 nehalem_hw_cache_extra_regs
1557 [PERF_COUNT_HW_CACHE_MAX]
1558 [PERF_COUNT_HW_CACHE_OP_MAX]
1559 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1560 {
1561 [ C(LL ) ] = {
1562 [ C(OP_READ) ] = {
1563 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1564 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1565 },
1566 [ C(OP_WRITE) ] = {
1567 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1568 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1569 },
1570 [ C(OP_PREFETCH) ] = {
1571 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1572 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1573 },
1574 },
1575 [ C(NODE) ] = {
1576 [ C(OP_READ) ] = {
1577 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1578 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1579 },
1580 [ C(OP_WRITE) ] = {
1581 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1582 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1583 },
1584 [ C(OP_PREFETCH) ] = {
1585 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1586 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1587 },
1588 },
1589 };
1590
1591 static __initconst const u64 nehalem_hw_cache_event_ids
1592 [PERF_COUNT_HW_CACHE_MAX]
1593 [PERF_COUNT_HW_CACHE_OP_MAX]
1594 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1595 {
1596 [ C(L1D) ] = {
1597 [ C(OP_READ) ] = {
1598 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1599 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1600 },
1601 [ C(OP_WRITE) ] = {
1602 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1603 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1604 },
1605 [ C(OP_PREFETCH) ] = {
1606 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1607 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1608 },
1609 },
1610 [ C(L1I ) ] = {
1611 [ C(OP_READ) ] = {
1612 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1613 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1614 },
1615 [ C(OP_WRITE) ] = {
1616 [ C(RESULT_ACCESS) ] = -1,
1617 [ C(RESULT_MISS) ] = -1,
1618 },
1619 [ C(OP_PREFETCH) ] = {
1620 [ C(RESULT_ACCESS) ] = 0x0,
1621 [ C(RESULT_MISS) ] = 0x0,
1622 },
1623 },
1624 [ C(LL ) ] = {
1625 [ C(OP_READ) ] = {
1626 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1627 [ C(RESULT_ACCESS) ] = 0x01b7,
1628 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1629 [ C(RESULT_MISS) ] = 0x01b7,
1630 },
1631 /*
1632 * Use RFO, not WRITEBACK, because a write miss would typically occur
1633 * on RFO.
1634 */
1635 [ C(OP_WRITE) ] = {
1636 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1637 [ C(RESULT_ACCESS) ] = 0x01b7,
1638 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1639 [ C(RESULT_MISS) ] = 0x01b7,
1640 },
1641 [ C(OP_PREFETCH) ] = {
1642 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1643 [ C(RESULT_ACCESS) ] = 0x01b7,
1644 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1645 [ C(RESULT_MISS) ] = 0x01b7,
1646 },
1647 },
1648 [ C(DTLB) ] = {
1649 [ C(OP_READ) ] = {
1650 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1651 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1652 },
1653 [ C(OP_WRITE) ] = {
1654 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1655 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1656 },
1657 [ C(OP_PREFETCH) ] = {
1658 [ C(RESULT_ACCESS) ] = 0x0,
1659 [ C(RESULT_MISS) ] = 0x0,
1660 },
1661 },
1662 [ C(ITLB) ] = {
1663 [ C(OP_READ) ] = {
1664 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1665 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1666 },
1667 [ C(OP_WRITE) ] = {
1668 [ C(RESULT_ACCESS) ] = -1,
1669 [ C(RESULT_MISS) ] = -1,
1670 },
1671 [ C(OP_PREFETCH) ] = {
1672 [ C(RESULT_ACCESS) ] = -1,
1673 [ C(RESULT_MISS) ] = -1,
1674 },
1675 },
1676 [ C(BPU ) ] = {
1677 [ C(OP_READ) ] = {
1678 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1679 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1680 },
1681 [ C(OP_WRITE) ] = {
1682 [ C(RESULT_ACCESS) ] = -1,
1683 [ C(RESULT_MISS) ] = -1,
1684 },
1685 [ C(OP_PREFETCH) ] = {
1686 [ C(RESULT_ACCESS) ] = -1,
1687 [ C(RESULT_MISS) ] = -1,
1688 },
1689 },
1690 [ C(NODE) ] = {
1691 [ C(OP_READ) ] = {
1692 [ C(RESULT_ACCESS) ] = 0x01b7,
1693 [ C(RESULT_MISS) ] = 0x01b7,
1694 },
1695 [ C(OP_WRITE) ] = {
1696 [ C(RESULT_ACCESS) ] = 0x01b7,
1697 [ C(RESULT_MISS) ] = 0x01b7,
1698 },
1699 [ C(OP_PREFETCH) ] = {
1700 [ C(RESULT_ACCESS) ] = 0x01b7,
1701 [ C(RESULT_MISS) ] = 0x01b7,
1702 },
1703 },
1704 };
1705
1706 static __initconst const u64 core2_hw_cache_event_ids
1707 [PERF_COUNT_HW_CACHE_MAX]
1708 [PERF_COUNT_HW_CACHE_OP_MAX]
1709 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1710 {
1711 [ C(L1D) ] = {
1712 [ C(OP_READ) ] = {
1713 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1714 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1715 },
1716 [ C(OP_WRITE) ] = {
1717 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1718 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1719 },
1720 [ C(OP_PREFETCH) ] = {
1721 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1722 [ C(RESULT_MISS) ] = 0,
1723 },
1724 },
1725 [ C(L1I ) ] = {
1726 [ C(OP_READ) ] = {
1727 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1728 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1729 },
1730 [ C(OP_WRITE) ] = {
1731 [ C(RESULT_ACCESS) ] = -1,
1732 [ C(RESULT_MISS) ] = -1,
1733 },
1734 [ C(OP_PREFETCH) ] = {
1735 [ C(RESULT_ACCESS) ] = 0,
1736 [ C(RESULT_MISS) ] = 0,
1737 },
1738 },
1739 [ C(LL ) ] = {
1740 [ C(OP_READ) ] = {
1741 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1742 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1743 },
1744 [ C(OP_WRITE) ] = {
1745 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1746 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1747 },
1748 [ C(OP_PREFETCH) ] = {
1749 [ C(RESULT_ACCESS) ] = 0,
1750 [ C(RESULT_MISS) ] = 0,
1751 },
1752 },
1753 [ C(DTLB) ] = {
1754 [ C(OP_READ) ] = {
1755 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1756 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1757 },
1758 [ C(OP_WRITE) ] = {
1759 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1760 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1761 },
1762 [ C(OP_PREFETCH) ] = {
1763 [ C(RESULT_ACCESS) ] = 0,
1764 [ C(RESULT_MISS) ] = 0,
1765 },
1766 },
1767 [ C(ITLB) ] = {
1768 [ C(OP_READ) ] = {
1769 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1770 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1771 },
1772 [ C(OP_WRITE) ] = {
1773 [ C(RESULT_ACCESS) ] = -1,
1774 [ C(RESULT_MISS) ] = -1,
1775 },
1776 [ C(OP_PREFETCH) ] = {
1777 [ C(RESULT_ACCESS) ] = -1,
1778 [ C(RESULT_MISS) ] = -1,
1779 },
1780 },
1781 [ C(BPU ) ] = {
1782 [ C(OP_READ) ] = {
1783 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1784 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1785 },
1786 [ C(OP_WRITE) ] = {
1787 [ C(RESULT_ACCESS) ] = -1,
1788 [ C(RESULT_MISS) ] = -1,
1789 },
1790 [ C(OP_PREFETCH) ] = {
1791 [ C(RESULT_ACCESS) ] = -1,
1792 [ C(RESULT_MISS) ] = -1,
1793 },
1794 },
1795 };
1796
1797 static __initconst const u64 atom_hw_cache_event_ids
1798 [PERF_COUNT_HW_CACHE_MAX]
1799 [PERF_COUNT_HW_CACHE_OP_MAX]
1800 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1801 {
1802 [ C(L1D) ] = {
1803 [ C(OP_READ) ] = {
1804 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1805 [ C(RESULT_MISS) ] = 0,
1806 },
1807 [ C(OP_WRITE) ] = {
1808 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1809 [ C(RESULT_MISS) ] = 0,
1810 },
1811 [ C(OP_PREFETCH) ] = {
1812 [ C(RESULT_ACCESS) ] = 0x0,
1813 [ C(RESULT_MISS) ] = 0,
1814 },
1815 },
1816 [ C(L1I ) ] = {
1817 [ C(OP_READ) ] = {
1818 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1819 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1820 },
1821 [ C(OP_WRITE) ] = {
1822 [ C(RESULT_ACCESS) ] = -1,
1823 [ C(RESULT_MISS) ] = -1,
1824 },
1825 [ C(OP_PREFETCH) ] = {
1826 [ C(RESULT_ACCESS) ] = 0,
1827 [ C(RESULT_MISS) ] = 0,
1828 },
1829 },
1830 [ C(LL ) ] = {
1831 [ C(OP_READ) ] = {
1832 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1833 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1834 },
1835 [ C(OP_WRITE) ] = {
1836 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1837 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1838 },
1839 [ C(OP_PREFETCH) ] = {
1840 [ C(RESULT_ACCESS) ] = 0,
1841 [ C(RESULT_MISS) ] = 0,
1842 },
1843 },
1844 [ C(DTLB) ] = {
1845 [ C(OP_READ) ] = {
1846 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1847 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1848 },
1849 [ C(OP_WRITE) ] = {
1850 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1851 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1852 },
1853 [ C(OP_PREFETCH) ] = {
1854 [ C(RESULT_ACCESS) ] = 0,
1855 [ C(RESULT_MISS) ] = 0,
1856 },
1857 },
1858 [ C(ITLB) ] = {
1859 [ C(OP_READ) ] = {
1860 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1861 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1862 },
1863 [ C(OP_WRITE) ] = {
1864 [ C(RESULT_ACCESS) ] = -1,
1865 [ C(RESULT_MISS) ] = -1,
1866 },
1867 [ C(OP_PREFETCH) ] = {
1868 [ C(RESULT_ACCESS) ] = -1,
1869 [ C(RESULT_MISS) ] = -1,
1870 },
1871 },
1872 [ C(BPU ) ] = {
1873 [ C(OP_READ) ] = {
1874 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1875 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1876 },
1877 [ C(OP_WRITE) ] = {
1878 [ C(RESULT_ACCESS) ] = -1,
1879 [ C(RESULT_MISS) ] = -1,
1880 },
1881 [ C(OP_PREFETCH) ] = {
1882 [ C(RESULT_ACCESS) ] = -1,
1883 [ C(RESULT_MISS) ] = -1,
1884 },
1885 },
1886 };
1887
1888 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1889 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1890 /* no_alloc_cycles.not_delivered */
1891 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1892 "event=0xca,umask=0x50");
1893 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1894 /* uops_retired.all */
1895 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1896 "event=0xc2,umask=0x10");
1897 /* uops_retired.all */
1898 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1899 "event=0xc2,umask=0x10");
1900
1901 static struct attribute *slm_events_attrs[] = {
1902 EVENT_PTR(td_total_slots_slm),
1903 EVENT_PTR(td_total_slots_scale_slm),
1904 EVENT_PTR(td_fetch_bubbles_slm),
1905 EVENT_PTR(td_fetch_bubbles_scale_slm),
1906 EVENT_PTR(td_slots_issued_slm),
1907 EVENT_PTR(td_slots_retired_slm),
1908 NULL
1909 };
1910
1911 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1912 {
1913 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1914 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1915 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1916 EVENT_EXTRA_END
1917 };
1918
1919 #define SLM_DMND_READ SNB_DMND_DATA_RD
1920 #define SLM_DMND_WRITE SNB_DMND_RFO
1921 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1922
1923 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1924 #define SLM_LLC_ACCESS SNB_RESP_ANY
1925 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1926
1927 static __initconst const u64 slm_hw_cache_extra_regs
1928 [PERF_COUNT_HW_CACHE_MAX]
1929 [PERF_COUNT_HW_CACHE_OP_MAX]
1930 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1931 {
1932 [ C(LL ) ] = {
1933 [ C(OP_READ) ] = {
1934 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1935 [ C(RESULT_MISS) ] = 0,
1936 },
1937 [ C(OP_WRITE) ] = {
1938 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1939 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1940 },
1941 [ C(OP_PREFETCH) ] = {
1942 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1943 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1944 },
1945 },
1946 };
1947
1948 static __initconst const u64 slm_hw_cache_event_ids
1949 [PERF_COUNT_HW_CACHE_MAX]
1950 [PERF_COUNT_HW_CACHE_OP_MAX]
1951 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1952 {
1953 [ C(L1D) ] = {
1954 [ C(OP_READ) ] = {
1955 [ C(RESULT_ACCESS) ] = 0,
1956 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1957 },
1958 [ C(OP_WRITE) ] = {
1959 [ C(RESULT_ACCESS) ] = 0,
1960 [ C(RESULT_MISS) ] = 0,
1961 },
1962 [ C(OP_PREFETCH) ] = {
1963 [ C(RESULT_ACCESS) ] = 0,
1964 [ C(RESULT_MISS) ] = 0,
1965 },
1966 },
1967 [ C(L1I ) ] = {
1968 [ C(OP_READ) ] = {
1969 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1970 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1971 },
1972 [ C(OP_WRITE) ] = {
1973 [ C(RESULT_ACCESS) ] = -1,
1974 [ C(RESULT_MISS) ] = -1,
1975 },
1976 [ C(OP_PREFETCH) ] = {
1977 [ C(RESULT_ACCESS) ] = 0,
1978 [ C(RESULT_MISS) ] = 0,
1979 },
1980 },
1981 [ C(LL ) ] = {
1982 [ C(OP_READ) ] = {
1983 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1984 [ C(RESULT_ACCESS) ] = 0x01b7,
1985 [ C(RESULT_MISS) ] = 0,
1986 },
1987 [ C(OP_WRITE) ] = {
1988 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1989 [ C(RESULT_ACCESS) ] = 0x01b7,
1990 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1991 [ C(RESULT_MISS) ] = 0x01b7,
1992 },
1993 [ C(OP_PREFETCH) ] = {
1994 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1995 [ C(RESULT_ACCESS) ] = 0x01b7,
1996 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1997 [ C(RESULT_MISS) ] = 0x01b7,
1998 },
1999 },
2000 [ C(DTLB) ] = {
2001 [ C(OP_READ) ] = {
2002 [ C(RESULT_ACCESS) ] = 0,
2003 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
2004 },
2005 [ C(OP_WRITE) ] = {
2006 [ C(RESULT_ACCESS) ] = 0,
2007 [ C(RESULT_MISS) ] = 0,
2008 },
2009 [ C(OP_PREFETCH) ] = {
2010 [ C(RESULT_ACCESS) ] = 0,
2011 [ C(RESULT_MISS) ] = 0,
2012 },
2013 },
2014 [ C(ITLB) ] = {
2015 [ C(OP_READ) ] = {
2016 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
2017 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
2018 },
2019 [ C(OP_WRITE) ] = {
2020 [ C(RESULT_ACCESS) ] = -1,
2021 [ C(RESULT_MISS) ] = -1,
2022 },
2023 [ C(OP_PREFETCH) ] = {
2024 [ C(RESULT_ACCESS) ] = -1,
2025 [ C(RESULT_MISS) ] = -1,
2026 },
2027 },
2028 [ C(BPU ) ] = {
2029 [ C(OP_READ) ] = {
2030 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
2031 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
2032 },
2033 [ C(OP_WRITE) ] = {
2034 [ C(RESULT_ACCESS) ] = -1,
2035 [ C(RESULT_MISS) ] = -1,
2036 },
2037 [ C(OP_PREFETCH) ] = {
2038 [ C(RESULT_ACCESS) ] = -1,
2039 [ C(RESULT_MISS) ] = -1,
2040 },
2041 },
2042 };
2043
2044 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
2045 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
2046 /* UOPS_NOT_DELIVERED.ANY */
2047 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
2048 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
2049 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
2050 /* UOPS_RETIRED.ANY */
2051 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
2052 /* UOPS_ISSUED.ANY */
2053 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
2054
2055 static struct attribute *glm_events_attrs[] = {
2056 EVENT_PTR(td_total_slots_glm),
2057 EVENT_PTR(td_total_slots_scale_glm),
2058 EVENT_PTR(td_fetch_bubbles_glm),
2059 EVENT_PTR(td_recovery_bubbles_glm),
2060 EVENT_PTR(td_slots_issued_glm),
2061 EVENT_PTR(td_slots_retired_glm),
2062 NULL
2063 };
2064
2065 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
2066 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2067 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
2068 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
2069 EVENT_EXTRA_END
2070 };
2071
2072 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
2073 #define GLM_DEMAND_RFO BIT_ULL(1)
2074 #define GLM_ANY_RESPONSE BIT_ULL(16)
2075 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
2076 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
2077 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
2078 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
2079 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
2080 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
2081 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
2082
2083 static __initconst const u64 glm_hw_cache_event_ids
2084 [PERF_COUNT_HW_CACHE_MAX]
2085 [PERF_COUNT_HW_CACHE_OP_MAX]
2086 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2087 [C(L1D)] = {
2088 [C(OP_READ)] = {
2089 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
2090 [C(RESULT_MISS)] = 0x0,
2091 },
2092 [C(OP_WRITE)] = {
2093 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
2094 [C(RESULT_MISS)] = 0x0,
2095 },
2096 [C(OP_PREFETCH)] = {
2097 [C(RESULT_ACCESS)] = 0x0,
2098 [C(RESULT_MISS)] = 0x0,
2099 },
2100 },
2101 [C(L1I)] = {
2102 [C(OP_READ)] = {
2103 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
2104 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
2105 },
2106 [C(OP_WRITE)] = {
2107 [C(RESULT_ACCESS)] = -1,
2108 [C(RESULT_MISS)] = -1,
2109 },
2110 [C(OP_PREFETCH)] = {
2111 [C(RESULT_ACCESS)] = 0x0,
2112 [C(RESULT_MISS)] = 0x0,
2113 },
2114 },
2115 [C(LL)] = {
2116 [C(OP_READ)] = {
2117 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
2118 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
2119 },
2120 [C(OP_WRITE)] = {
2121 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
2122 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
2123 },
2124 [C(OP_PREFETCH)] = {
2125 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
2126 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
2127 },
2128 },
2129 [C(DTLB)] = {
2130 [C(OP_READ)] = {
2131 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
2132 [C(RESULT_MISS)] = 0x0,
2133 },
2134 [C(OP_WRITE)] = {
2135 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
2136 [C(RESULT_MISS)] = 0x0,
2137 },
2138 [C(OP_PREFETCH)] = {
2139 [C(RESULT_ACCESS)] = 0x0,
2140 [C(RESULT_MISS)] = 0x0,
2141 },
2142 },
2143 [C(ITLB)] = {
2144 [C(OP_READ)] = {
2145 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
2146 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
2147 },
2148 [C(OP_WRITE)] = {
2149 [C(RESULT_ACCESS)] = -1,
2150 [C(RESULT_MISS)] = -1,
2151 },
2152 [C(OP_PREFETCH)] = {
2153 [C(RESULT_ACCESS)] = -1,
2154 [C(RESULT_MISS)] = -1,
2155 },
2156 },
2157 [C(BPU)] = {
2158 [C(OP_READ)] = {
2159 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
2160 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
2161 },
2162 [C(OP_WRITE)] = {
2163 [C(RESULT_ACCESS)] = -1,
2164 [C(RESULT_MISS)] = -1,
2165 },
2166 [C(OP_PREFETCH)] = {
2167 [C(RESULT_ACCESS)] = -1,
2168 [C(RESULT_MISS)] = -1,
2169 },
2170 },
2171 };
2172
2173 static __initconst const u64 glm_hw_cache_extra_regs
2174 [PERF_COUNT_HW_CACHE_MAX]
2175 [PERF_COUNT_HW_CACHE_OP_MAX]
2176 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2177 [C(LL)] = {
2178 [C(OP_READ)] = {
2179 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2180 GLM_LLC_ACCESS,
2181 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2182 GLM_LLC_MISS,
2183 },
2184 [C(OP_WRITE)] = {
2185 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2186 GLM_LLC_ACCESS,
2187 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2188 GLM_LLC_MISS,
2189 },
2190 [C(OP_PREFETCH)] = {
2191 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
2192 GLM_LLC_ACCESS,
2193 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
2194 GLM_LLC_MISS,
2195 },
2196 },
2197 };
2198
2199 static __initconst const u64 glp_hw_cache_event_ids
2200 [PERF_COUNT_HW_CACHE_MAX]
2201 [PERF_COUNT_HW_CACHE_OP_MAX]
2202 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2203 [C(L1D)] = {
2204 [C(OP_READ)] = {
2205 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
2206 [C(RESULT_MISS)] = 0x0,
2207 },
2208 [C(OP_WRITE)] = {
2209 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
2210 [C(RESULT_MISS)] = 0x0,
2211 },
2212 [C(OP_PREFETCH)] = {
2213 [C(RESULT_ACCESS)] = 0x0,
2214 [C(RESULT_MISS)] = 0x0,
2215 },
2216 },
2217 [C(L1I)] = {
2218 [C(OP_READ)] = {
2219 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
2220 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
2221 },
2222 [C(OP_WRITE)] = {
2223 [C(RESULT_ACCESS)] = -1,
2224 [C(RESULT_MISS)] = -1,
2225 },
2226 [C(OP_PREFETCH)] = {
2227 [C(RESULT_ACCESS)] = 0x0,
2228 [C(RESULT_MISS)] = 0x0,
2229 },
2230 },
2231 [C(LL)] = {
2232 [C(OP_READ)] = {
2233 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
2234 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
2235 },
2236 [C(OP_WRITE)] = {
2237 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
2238 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
2239 },
2240 [C(OP_PREFETCH)] = {
2241 [C(RESULT_ACCESS)] = 0x0,
2242 [C(RESULT_MISS)] = 0x0,
2243 },
2244 },
2245 [C(DTLB)] = {
2246 [C(OP_READ)] = {
2247 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
2248 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
2249 },
2250 [C(OP_WRITE)] = {
2251 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
2252 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
2253 },
2254 [C(OP_PREFETCH)] = {
2255 [C(RESULT_ACCESS)] = 0x0,
2256 [C(RESULT_MISS)] = 0x0,
2257 },
2258 },
2259 [C(ITLB)] = {
2260 [C(OP_READ)] = {
2261 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
2262 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
2263 },
2264 [C(OP_WRITE)] = {
2265 [C(RESULT_ACCESS)] = -1,
2266 [C(RESULT_MISS)] = -1,
2267 },
2268 [C(OP_PREFETCH)] = {
2269 [C(RESULT_ACCESS)] = -1,
2270 [C(RESULT_MISS)] = -1,
2271 },
2272 },
2273 [C(BPU)] = {
2274 [C(OP_READ)] = {
2275 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
2276 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
2277 },
2278 [C(OP_WRITE)] = {
2279 [C(RESULT_ACCESS)] = -1,
2280 [C(RESULT_MISS)] = -1,
2281 },
2282 [C(OP_PREFETCH)] = {
2283 [C(RESULT_ACCESS)] = -1,
2284 [C(RESULT_MISS)] = -1,
2285 },
2286 },
2287 };
2288
2289 static __initconst const u64 glp_hw_cache_extra_regs
2290 [PERF_COUNT_HW_CACHE_MAX]
2291 [PERF_COUNT_HW_CACHE_OP_MAX]
2292 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2293 [C(LL)] = {
2294 [C(OP_READ)] = {
2295 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2296 GLM_LLC_ACCESS,
2297 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2298 GLM_LLC_MISS,
2299 },
2300 [C(OP_WRITE)] = {
2301 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2302 GLM_LLC_ACCESS,
2303 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2304 GLM_LLC_MISS,
2305 },
2306 [C(OP_PREFETCH)] = {
2307 [C(RESULT_ACCESS)] = 0x0,
2308 [C(RESULT_MISS)] = 0x0,
2309 },
2310 },
2311 };
2312
2313 #define TNT_LOCAL_DRAM BIT_ULL(26)
2314 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
2315 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO
2316 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE
2317 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2318 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2319 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2320
2321 static __initconst const u64 tnt_hw_cache_extra_regs
2322 [PERF_COUNT_HW_CACHE_MAX]
2323 [PERF_COUNT_HW_CACHE_OP_MAX]
2324 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2325 [C(LL)] = {
2326 [C(OP_READ)] = {
2327 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
2328 TNT_LLC_ACCESS,
2329 [C(RESULT_MISS)] = TNT_DEMAND_READ|
2330 TNT_LLC_MISS,
2331 },
2332 [C(OP_WRITE)] = {
2333 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
2334 TNT_LLC_ACCESS,
2335 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
2336 TNT_LLC_MISS,
2337 },
2338 [C(OP_PREFETCH)] = {
2339 [C(RESULT_ACCESS)] = 0x0,
2340 [C(RESULT_MISS)] = 0x0,
2341 },
2342 },
2343 };
2344
2345 static __initconst const u64 arw_hw_cache_extra_regs
2346 [PERF_COUNT_HW_CACHE_MAX]
2347 [PERF_COUNT_HW_CACHE_OP_MAX]
2348 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2349 [C(LL)] = {
2350 [C(OP_READ)] = {
2351 [C(RESULT_ACCESS)] = 0x4000000000000001,
2352 [C(RESULT_MISS)] = 0xFFFFF000000001,
2353 },
2354 [C(OP_WRITE)] = {
2355 [C(RESULT_ACCESS)] = 0x4000000000000002,
2356 [C(RESULT_MISS)] = 0xFFFFF000000002,
2357 },
2358 [C(OP_PREFETCH)] = {
2359 [C(RESULT_ACCESS)] = 0x0,
2360 [C(RESULT_MISS)] = 0x0,
2361 },
2362 },
2363 };
2364
2365 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
2366 EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
2367 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
2368 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0");
2369
2370 static struct attribute *tnt_events_attrs[] = {
2371 EVENT_PTR(td_fe_bound_tnt),
2372 EVENT_PTR(td_retiring_tnt),
2373 EVENT_PTR(td_bad_spec_tnt),
2374 EVENT_PTR(td_be_bound_tnt),
2375 NULL,
2376 };
2377
2378 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2379 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2380 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2381 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2382 EVENT_EXTRA_END
2383 };
2384
2385 EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3");
2386 EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6");
2387
2388 static struct attribute *grt_mem_attrs[] = {
2389 EVENT_PTR(mem_ld_grt),
2390 EVENT_PTR(mem_st_grt),
2391 NULL
2392 };
2393
2394 static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2395 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2396 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2397 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2398 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2399 EVENT_EXTRA_END
2400 };
2401
2402 EVENT_ATTR_STR(topdown-retiring, td_retiring_cmt, "event=0x72,umask=0x0");
2403 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_cmt, "event=0x73,umask=0x0");
2404
2405 static struct attribute *cmt_events_attrs[] = {
2406 EVENT_PTR(td_fe_bound_tnt),
2407 EVENT_PTR(td_retiring_cmt),
2408 EVENT_PTR(td_bad_spec_cmt),
2409 EVENT_PTR(td_be_bound_tnt),
2410 NULL
2411 };
2412
2413 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
2414 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2415 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0),
2416 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1),
2417 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2418 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
2419 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
2420 EVENT_EXTRA_END
2421 };
2422
2423 static struct extra_reg intel_arw_extra_regs[] __read_mostly = {
2424 /* must define OMR_X first, see intel_alt_er() */
2425 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OMR_0, 0xc0ffffffffffffffull, OMR_0),
2426 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OMR_1, 0xc0ffffffffffffffull, OMR_1),
2427 INTEL_UEVENT_EXTRA_REG(0x04b7, MSR_OMR_2, 0xc0ffffffffffffffull, OMR_2),
2428 INTEL_UEVENT_EXTRA_REG(0x08b7, MSR_OMR_3, 0xc0ffffffffffffffull, OMR_3),
2429 INTEL_UEVENT_EXTRA_REG(0x01d4, MSR_OMR_0, 0xc0ffffffffffffffull, OMR_0),
2430 INTEL_UEVENT_EXTRA_REG(0x02d4, MSR_OMR_1, 0xc0ffffffffffffffull, OMR_1),
2431 INTEL_UEVENT_EXTRA_REG(0x04d4, MSR_OMR_2, 0xc0ffffffffffffffull, OMR_2),
2432 INTEL_UEVENT_EXTRA_REG(0x08d4, MSR_OMR_3, 0xc0ffffffffffffffull, OMR_3),
2433 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2434 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
2435 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
2436 EVENT_EXTRA_END
2437 };
2438
2439 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_skt, "event=0x9c,umask=0x01");
2440 EVENT_ATTR_STR(topdown-retiring, td_retiring_skt, "event=0xc2,umask=0x02");
2441 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_skt, "event=0xa4,umask=0x02");
2442
2443 static struct attribute *skt_events_attrs[] = {
2444 EVENT_PTR(td_fe_bound_skt),
2445 EVENT_PTR(td_retiring_skt),
2446 EVENT_PTR(td_bad_spec_cmt),
2447 EVENT_PTR(td_be_bound_skt),
2448 NULL,
2449 };
2450
2451 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
2452 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
2453 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
2454 #define KNL_MCDRAM_FAR BIT_ULL(22)
2455 #define KNL_DDR_LOCAL BIT_ULL(23)
2456 #define KNL_DDR_FAR BIT_ULL(24)
2457 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2458 KNL_DDR_LOCAL | KNL_DDR_FAR)
2459 #define KNL_L2_READ SLM_DMND_READ
2460 #define KNL_L2_WRITE SLM_DMND_WRITE
2461 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
2462 #define KNL_L2_ACCESS SLM_LLC_ACCESS
2463 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2464 KNL_DRAM_ANY | SNB_SNP_ANY | \
2465 SNB_NON_DRAM)
2466
2467 static __initconst const u64 knl_hw_cache_extra_regs
2468 [PERF_COUNT_HW_CACHE_MAX]
2469 [PERF_COUNT_HW_CACHE_OP_MAX]
2470 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2471 [C(LL)] = {
2472 [C(OP_READ)] = {
2473 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2474 [C(RESULT_MISS)] = 0,
2475 },
2476 [C(OP_WRITE)] = {
2477 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2478 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
2479 },
2480 [C(OP_PREFETCH)] = {
2481 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2482 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
2483 },
2484 },
2485 };
2486
2487 /*
2488 * Used from PMIs where the LBRs are already disabled.
2489 *
2490 * This function could be called consecutively. It is required to remain in
2491 * disabled state if called consecutively.
2492 *
2493 * During consecutive calls, the same disable value will be written to related
2494 * registers, so the PMU state remains unchanged.
2495 *
2496 * intel_bts events don't coexist with intel PMU's BTS events because of
2497 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2498 * disabled around intel PMU's event batching etc, only inside the PMI handler.
2499 *
2500 * Avoid PEBS_ENABLE MSR access in PMIs.
2501 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2502 * It doesn't matter if the PEBS is enabled or not.
2503 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2504 * access PEBS_ENABLE MSR in disable_all()/enable_all().
2505 * However, there are some cases which may change PEBS status, e.g. PMI
2506 * throttle. The PEBS_ENABLE should be updated where the status changes.
2507 */
__intel_pmu_disable_all(bool bts)2508 static __always_inline void __intel_pmu_disable_all(bool bts)
2509 {
2510 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2511
2512 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2513
2514 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2515 intel_pmu_disable_bts();
2516 }
2517
intel_pmu_disable_all(void)2518 static __always_inline void intel_pmu_disable_all(void)
2519 {
2520 __intel_pmu_disable_all(true);
2521 static_call_cond(x86_pmu_pebs_disable_all)();
2522 intel_pmu_lbr_disable_all();
2523 }
2524
__intel_pmu_enable_all(int added,bool pmi)2525 static void __intel_pmu_enable_all(int added, bool pmi)
2526 {
2527 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2528 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2529
2530 intel_pmu_lbr_enable_all(pmi);
2531
2532 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) {
2533 wrmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val);
2534 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val;
2535 }
2536
2537 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL,
2538 intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2539
2540 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2541 struct perf_event *event =
2542 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2543
2544 if (WARN_ON_ONCE(!event))
2545 return;
2546
2547 intel_pmu_enable_bts(event->hw.config);
2548 }
2549 }
2550
intel_pmu_enable_all(int added)2551 static void intel_pmu_enable_all(int added)
2552 {
2553 static_call_cond(x86_pmu_pebs_enable_all)();
2554 __intel_pmu_enable_all(added, false);
2555 }
2556
2557 static noinline int
__intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt,unsigned long flags)2558 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
2559 unsigned int cnt, unsigned long flags)
2560 {
2561 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2562
2563 intel_pmu_lbr_read();
2564 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
2565
2566 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
2567 intel_pmu_enable_all(0);
2568 local_irq_restore(flags);
2569 return cnt;
2570 }
2571
2572 static int
intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2573 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2574 {
2575 unsigned long flags;
2576
2577 /* must not have branches... */
2578 local_irq_save(flags);
2579 __intel_pmu_disable_all(false); /* we don't care about BTS */
2580 __intel_pmu_lbr_disable();
2581 /* ... until here */
2582 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2583 }
2584
2585 static int
intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2586 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2587 {
2588 unsigned long flags;
2589
2590 /* must not have branches... */
2591 local_irq_save(flags);
2592 __intel_pmu_disable_all(false); /* we don't care about BTS */
2593 __intel_pmu_arch_lbr_disable();
2594 /* ... until here */
2595 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2596 }
2597
2598 /*
2599 * Workaround for:
2600 * Intel Errata AAK100 (model 26)
2601 * Intel Errata AAP53 (model 30)
2602 * Intel Errata BD53 (model 44)
2603 *
2604 * The official story:
2605 * These chips need to be 'reset' when adding counters by programming the
2606 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2607 * in sequence on the same PMC or on different PMCs.
2608 *
2609 * In practice it appears some of these events do in fact count, and
2610 * we need to program all 4 events.
2611 */
intel_pmu_nhm_workaround(void)2612 static void intel_pmu_nhm_workaround(void)
2613 {
2614 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2615 static const unsigned long nhm_magic[4] = {
2616 0x4300B5,
2617 0x4300D2,
2618 0x4300B1,
2619 0x4300B1
2620 };
2621 struct perf_event *event;
2622 int i;
2623
2624 /*
2625 * The Errata requires below steps:
2626 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2627 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2628 * the corresponding PMCx;
2629 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2630 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2631 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2632 */
2633
2634 /*
2635 * The real steps we choose are a little different from above.
2636 * A) To reduce MSR operations, we don't run step 1) as they
2637 * are already cleared before this function is called;
2638 * B) Call x86_perf_event_update to save PMCx before configuring
2639 * PERFEVTSELx with magic number;
2640 * C) With step 5), we do clear only when the PERFEVTSELx is
2641 * not used currently.
2642 * D) Call x86_perf_event_set_period to restore PMCx;
2643 */
2644
2645 /* We always operate 4 pairs of PERF Counters */
2646 for (i = 0; i < 4; i++) {
2647 event = cpuc->events[i];
2648 if (event)
2649 static_call(x86_pmu_update)(event);
2650 }
2651
2652 for (i = 0; i < 4; i++) {
2653 wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2654 wrmsrq(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2655 }
2656
2657 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2658 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2659
2660 for (i = 0; i < 4; i++) {
2661 event = cpuc->events[i];
2662
2663 if (event) {
2664 static_call(x86_pmu_set_period)(event);
2665 __x86_pmu_enable_event(&event->hw,
2666 ARCH_PERFMON_EVENTSEL_ENABLE);
2667 } else
2668 wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2669 }
2670 }
2671
intel_pmu_nhm_enable_all(int added)2672 static void intel_pmu_nhm_enable_all(int added)
2673 {
2674 if (added)
2675 intel_pmu_nhm_workaround();
2676 intel_pmu_enable_all(added);
2677 }
2678
intel_set_tfa(struct cpu_hw_events * cpuc,bool on)2679 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2680 {
2681 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2682
2683 if (cpuc->tfa_shadow != val) {
2684 cpuc->tfa_shadow = val;
2685 wrmsrq(MSR_TSX_FORCE_ABORT, val);
2686 }
2687 }
2688
intel_tfa_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)2689 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2690 {
2691 /*
2692 * We're going to use PMC3, make sure TFA is set before we touch it.
2693 */
2694 if (cntr == 3)
2695 intel_set_tfa(cpuc, true);
2696 }
2697
intel_tfa_pmu_enable_all(int added)2698 static void intel_tfa_pmu_enable_all(int added)
2699 {
2700 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2701
2702 /*
2703 * If we find PMC3 is no longer used when we enable the PMU, we can
2704 * clear TFA.
2705 */
2706 if (!test_bit(3, cpuc->active_mask))
2707 intel_set_tfa(cpuc, false);
2708
2709 intel_pmu_enable_all(added);
2710 }
2711
intel_pmu_get_status(void)2712 static inline u64 intel_pmu_get_status(void)
2713 {
2714 u64 status;
2715
2716 rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status);
2717
2718 return status;
2719 }
2720
intel_pmu_ack_status(u64 ack)2721 static inline void intel_pmu_ack_status(u64 ack)
2722 {
2723 wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2724 }
2725
event_is_checkpointed(struct perf_event * event)2726 static inline bool event_is_checkpointed(struct perf_event *event)
2727 {
2728 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2729 }
2730
intel_set_masks(struct perf_event * event,int idx)2731 static inline void intel_set_masks(struct perf_event *event, int idx)
2732 {
2733 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2734
2735 if (event->attr.exclude_host)
2736 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2737 if (event->attr.exclude_guest)
2738 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2739 if (event_is_checkpointed(event))
2740 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2741 }
2742
intel_clear_masks(struct perf_event * event,int idx)2743 static inline void intel_clear_masks(struct perf_event *event, int idx)
2744 {
2745 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2746
2747 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2748 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2749 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2750 }
2751
intel_pmu_disable_fixed(struct perf_event * event)2752 static void intel_pmu_disable_fixed(struct perf_event *event)
2753 {
2754 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2755 struct hw_perf_event *hwc = &event->hw;
2756 int idx = hwc->idx;
2757 u64 mask;
2758
2759 if (is_topdown_idx(idx)) {
2760 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2761
2762 /*
2763 * When there are other active TopDown events,
2764 * don't disable the fixed counter 3.
2765 */
2766 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2767 return;
2768 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2769 }
2770
2771 intel_clear_masks(event, idx);
2772
2773 mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
2774 cpuc->fixed_ctrl_val &= ~mask;
2775 }
2776
__intel_pmu_update_event_ext(int idx,u64 ext)2777 static inline void __intel_pmu_update_event_ext(int idx, u64 ext)
2778 {
2779 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2780 u32 msr;
2781
2782 if (idx < INTEL_PMC_IDX_FIXED) {
2783 msr = MSR_IA32_PMC_V6_GP0_CFG_C +
2784 x86_pmu.addr_offset(idx, false);
2785 } else {
2786 msr = MSR_IA32_PMC_V6_FX0_CFG_C +
2787 x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false);
2788 }
2789
2790 cpuc->cfg_c_val[idx] = ext;
2791 wrmsrq(msr, ext);
2792 }
2793
intel_pmu_disable_event_ext(struct perf_event * event)2794 static void intel_pmu_disable_event_ext(struct perf_event *event)
2795 {
2796 /*
2797 * Only clear CFG_C MSR for PEBS counter group events,
2798 * it avoids the HW counter's value to be added into
2799 * other PEBS records incorrectly after PEBS counter
2800 * group events are disabled.
2801 *
2802 * For other events, it's unnecessary to clear CFG_C MSRs
2803 * since CFG_C doesn't take effect if counter is in
2804 * disabled state. That helps to reduce the WRMSR overhead
2805 * in context switches.
2806 */
2807 if (!is_pebs_counter_event_group(event))
2808 return;
2809
2810 __intel_pmu_update_event_ext(event->hw.idx, 0);
2811 }
2812
2813 DEFINE_STATIC_CALL_NULL(intel_pmu_disable_event_ext, intel_pmu_disable_event_ext);
2814
intel_pmu_disable_event(struct perf_event * event)2815 static void intel_pmu_disable_event(struct perf_event *event)
2816 {
2817 struct hw_perf_event *hwc = &event->hw;
2818 int idx = hwc->idx;
2819
2820 switch (idx) {
2821 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2822 intel_clear_masks(event, idx);
2823 static_call_cond(intel_pmu_disable_event_ext)(event);
2824 x86_pmu_disable_event(event);
2825 break;
2826 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2827 static_call_cond(intel_pmu_disable_event_ext)(event);
2828 fallthrough;
2829 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2830 intel_pmu_disable_fixed(event);
2831 break;
2832 case INTEL_PMC_IDX_FIXED_BTS:
2833 intel_pmu_disable_bts();
2834 intel_pmu_drain_bts_buffer();
2835 return;
2836 case INTEL_PMC_IDX_FIXED_VLBR:
2837 intel_clear_masks(event, idx);
2838 break;
2839 default:
2840 intel_clear_masks(event, idx);
2841 pr_warn("Failed to disable the event with invalid index %d\n",
2842 idx);
2843 return;
2844 }
2845
2846 /*
2847 * Needs to be called after x86_pmu_disable_event,
2848 * so we don't trigger the event without PEBS bit set.
2849 */
2850 if (unlikely(event->attr.precise_ip))
2851 static_call(x86_pmu_pebs_disable)(event);
2852 }
2853
intel_pmu_assign_event(struct perf_event * event,int idx)2854 static void intel_pmu_assign_event(struct perf_event *event, int idx)
2855 {
2856 if (is_pebs_pt(event))
2857 perf_report_aux_output_id(event, idx);
2858 }
2859
intel_pmu_needs_branch_stack(struct perf_event * event)2860 static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event)
2861 {
2862 return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK;
2863 }
2864
intel_pmu_del_event(struct perf_event * event)2865 static void intel_pmu_del_event(struct perf_event *event)
2866 {
2867 if (intel_pmu_needs_branch_stack(event))
2868 intel_pmu_lbr_del(event);
2869 if (event->attr.precise_ip)
2870 intel_pmu_pebs_del(event);
2871 if (is_pebs_counter_event_group(event) ||
2872 is_acr_event_group(event))
2873 this_cpu_ptr(&cpu_hw_events)->n_late_setup--;
2874 }
2875
icl_set_topdown_event_period(struct perf_event * event)2876 static int icl_set_topdown_event_period(struct perf_event *event)
2877 {
2878 struct hw_perf_event *hwc = &event->hw;
2879 s64 left = local64_read(&hwc->period_left);
2880
2881 /*
2882 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2883 * Software should start both registers, PERF_METRICS and fixed
2884 * counter 3, from zero.
2885 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2886 * After that, both MSRs will be cleared for each read.
2887 * Don't need to clear them again.
2888 */
2889 if (left == x86_pmu.max_period) {
2890 wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0);
2891 wrmsrq(MSR_PERF_METRICS, 0);
2892 hwc->saved_slots = 0;
2893 hwc->saved_metric = 0;
2894 }
2895
2896 if ((hwc->saved_slots) && is_slots_event(event)) {
2897 wrmsrq(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2898 wrmsrq(MSR_PERF_METRICS, hwc->saved_metric);
2899 }
2900
2901 perf_event_update_userpage(event);
2902
2903 return 0;
2904 }
2905
2906 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period);
2907
icl_get_metrics_event_value(u64 metric,u64 slots,int idx)2908 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2909 {
2910 u32 val;
2911
2912 /*
2913 * The metric is reported as an 8bit integer fraction
2914 * summing up to 0xff.
2915 * slots-in-metric = (Metric / 0xff) * slots
2916 */
2917 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2918 return mul_u64_u32_div(slots, val, 0xff);
2919 }
2920
icl_get_topdown_value(struct perf_event * event,u64 slots,u64 metrics)2921 static u64 icl_get_topdown_value(struct perf_event *event,
2922 u64 slots, u64 metrics)
2923 {
2924 int idx = event->hw.idx;
2925 u64 delta;
2926
2927 if (is_metric_idx(idx))
2928 delta = icl_get_metrics_event_value(metrics, slots, idx);
2929 else
2930 delta = slots;
2931
2932 return delta;
2933 }
2934
__icl_update_topdown_event(struct perf_event * event,u64 slots,u64 metrics,u64 last_slots,u64 last_metrics)2935 static void __icl_update_topdown_event(struct perf_event *event,
2936 u64 slots, u64 metrics,
2937 u64 last_slots, u64 last_metrics)
2938 {
2939 u64 delta, last = 0;
2940
2941 delta = icl_get_topdown_value(event, slots, metrics);
2942 if (last_slots)
2943 last = icl_get_topdown_value(event, last_slots, last_metrics);
2944
2945 /*
2946 * The 8bit integer fraction of metric may be not accurate,
2947 * especially when the changes is very small.
2948 * For example, if only a few bad_spec happens, the fraction
2949 * may be reduced from 1 to 0. If so, the bad_spec event value
2950 * will be 0 which is definitely less than the last value.
2951 * Avoid update event->count for this case.
2952 */
2953 if (delta > last) {
2954 delta -= last;
2955 local64_add(delta, &event->count);
2956 }
2957 }
2958
update_saved_topdown_regs(struct perf_event * event,u64 slots,u64 metrics,int metric_end)2959 static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2960 u64 metrics, int metric_end)
2961 {
2962 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2963 struct perf_event *other;
2964 int idx;
2965
2966 event->hw.saved_slots = slots;
2967 event->hw.saved_metric = metrics;
2968
2969 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2970 if (!is_topdown_idx(idx))
2971 continue;
2972 other = cpuc->events[idx];
2973 other->hw.saved_slots = slots;
2974 other->hw.saved_metric = metrics;
2975 }
2976 }
2977
2978 /*
2979 * Update all active Topdown events.
2980 *
2981 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2982 * modify by a NMI. PMU has to be disabled before calling this function.
2983 */
2984
intel_update_topdown_event(struct perf_event * event,int metric_end,u64 * val)2985 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end, u64 *val)
2986 {
2987 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2988 struct perf_event *other;
2989 u64 slots, metrics;
2990 bool reset = true;
2991 int idx;
2992
2993 if (!val) {
2994 /* read Fixed counter 3 */
2995 slots = rdpmc(3 | INTEL_PMC_FIXED_RDPMC_BASE);
2996 if (!slots)
2997 return 0;
2998
2999 /* read PERF_METRICS */
3000 metrics = rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS);
3001 } else {
3002 slots = val[0];
3003 metrics = val[1];
3004 /*
3005 * Don't reset the PERF_METRICS and Fixed counter 3
3006 * for each PEBS record read. Utilize the RDPMC metrics
3007 * clear mode.
3008 */
3009 reset = false;
3010 }
3011
3012 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
3013 if (!is_topdown_idx(idx))
3014 continue;
3015 other = cpuc->events[idx];
3016 __icl_update_topdown_event(other, slots, metrics,
3017 event ? event->hw.saved_slots : 0,
3018 event ? event->hw.saved_metric : 0);
3019 }
3020
3021 /*
3022 * Check and update this event, which may have been cleared
3023 * in active_mask e.g. x86_pmu_stop()
3024 */
3025 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
3026 __icl_update_topdown_event(event, slots, metrics,
3027 event->hw.saved_slots,
3028 event->hw.saved_metric);
3029
3030 /*
3031 * In x86_pmu_stop(), the event is cleared in active_mask first,
3032 * then drain the delta, which indicates context switch for
3033 * counting.
3034 * Save metric and slots for context switch.
3035 * Don't need to reset the PERF_METRICS and Fixed counter 3.
3036 * Because the values will be restored in next schedule in.
3037 */
3038 update_saved_topdown_regs(event, slots, metrics, metric_end);
3039 reset = false;
3040 }
3041
3042 if (reset) {
3043 /* The fixed counter 3 has to be written before the PERF_METRICS. */
3044 wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0);
3045 wrmsrq(MSR_PERF_METRICS, 0);
3046 if (event)
3047 update_saved_topdown_regs(event, 0, 0, metric_end);
3048 }
3049
3050 return slots;
3051 }
3052
icl_update_topdown_event(struct perf_event * event,u64 * val)3053 static u64 icl_update_topdown_event(struct perf_event *event, u64 *val)
3054 {
3055 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
3056 x86_pmu.num_topdown_events - 1,
3057 val);
3058 }
3059
3060 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, intel_pmu_topdown_event_update);
3061
intel_pmu_read_event(struct perf_event * event)3062 static void intel_pmu_read_event(struct perf_event *event)
3063 {
3064 if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN) ||
3065 is_pebs_counter_event_group(event)) {
3066 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3067 bool pmu_enabled = cpuc->enabled;
3068
3069 /* Only need to call update_topdown_event() once for group read. */
3070 if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
3071 return;
3072
3073 cpuc->enabled = 0;
3074 if (pmu_enabled)
3075 intel_pmu_disable_all();
3076
3077 /*
3078 * If the PEBS counters snapshotting is enabled,
3079 * the topdown event is available in PEBS records.
3080 */
3081 if (is_topdown_count(event) && !is_pebs_counter_event_group(event))
3082 static_call(intel_pmu_update_topdown_event)(event, NULL);
3083 else
3084 intel_pmu_drain_pebs_buffer();
3085
3086 cpuc->enabled = pmu_enabled;
3087 if (pmu_enabled)
3088 intel_pmu_enable_all(0);
3089
3090 return;
3091 }
3092
3093 x86_perf_event_update(event);
3094 }
3095
intel_pmu_enable_fixed(struct perf_event * event)3096 static void intel_pmu_enable_fixed(struct perf_event *event)
3097 {
3098 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3099 struct hw_perf_event *hwc = &event->hw;
3100 int idx = hwc->idx;
3101 u64 bits = 0;
3102
3103 if (is_topdown_idx(idx)) {
3104 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3105 /*
3106 * When there are other active TopDown events,
3107 * don't enable the fixed counter 3 again.
3108 */
3109 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
3110 return;
3111
3112 idx = INTEL_PMC_IDX_FIXED_SLOTS;
3113
3114 if (event->attr.config1 & INTEL_TD_CFG_METRIC_CLEAR)
3115 bits |= INTEL_FIXED_3_METRICS_CLEAR;
3116 }
3117
3118 intel_set_masks(event, idx);
3119
3120 /*
3121 * Enable IRQ generation (0x8), if not PEBS or self-reloaded
3122 * ACR event, and enable ring-3 counting (0x2) and ring-0
3123 * counting (0x1) if requested:
3124 */
3125 if (!event->attr.precise_ip && !is_acr_self_reload_event(event))
3126 bits |= INTEL_FIXED_0_ENABLE_PMI;
3127 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
3128 bits |= INTEL_FIXED_0_USER;
3129 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
3130 bits |= INTEL_FIXED_0_KERNEL;
3131 if (hwc->config & ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE)
3132 bits |= INTEL_FIXED_0_RDPMC_USER_DISABLE;
3133
3134 /*
3135 * ANY bit is supported in v3 and up
3136 */
3137 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
3138 bits |= INTEL_FIXED_0_ANYTHREAD;
3139
3140 idx -= INTEL_PMC_IDX_FIXED;
3141 bits = intel_fixed_bits_by_idx(idx, bits);
3142 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip)
3143 bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
3144
3145 cpuc->fixed_ctrl_val &= ~intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
3146 cpuc->fixed_ctrl_val |= bits;
3147 }
3148
intel_pmu_config_acr(int idx,u64 mask,u32 reload)3149 static void intel_pmu_config_acr(int idx, u64 mask, u32 reload)
3150 {
3151 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3152 int msr_b, msr_c;
3153 int msr_offset;
3154
3155 if (!mask && !cpuc->acr_cfg_b[idx])
3156 return;
3157
3158 if (idx < INTEL_PMC_IDX_FIXED) {
3159 msr_b = MSR_IA32_PMC_V6_GP0_CFG_B;
3160 msr_c = MSR_IA32_PMC_V6_GP0_CFG_C;
3161 msr_offset = x86_pmu.addr_offset(idx, false);
3162 } else {
3163 msr_b = MSR_IA32_PMC_V6_FX0_CFG_B;
3164 msr_c = MSR_IA32_PMC_V6_FX0_CFG_C;
3165 msr_offset = x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false);
3166 }
3167
3168 if (cpuc->acr_cfg_b[idx] != mask) {
3169 wrmsrl(msr_b + msr_offset, mask);
3170 cpuc->acr_cfg_b[idx] = mask;
3171 }
3172 /* Only need to update the reload value when there is a valid config value. */
3173 if (mask && cpuc->acr_cfg_c[idx] != reload) {
3174 wrmsrl(msr_c + msr_offset, reload);
3175 cpuc->acr_cfg_c[idx] = reload;
3176 }
3177 }
3178
intel_pmu_enable_acr(struct perf_event * event)3179 static void intel_pmu_enable_acr(struct perf_event *event)
3180 {
3181 struct hw_perf_event *hwc = &event->hw;
3182
3183 if (!is_acr_event_group(event) || !event->attr.config2) {
3184 /*
3185 * The disable doesn't clear the ACR CFG register.
3186 * Check and clear the ACR CFG register.
3187 */
3188 intel_pmu_config_acr(hwc->idx, 0, 0);
3189 return;
3190 }
3191
3192 intel_pmu_config_acr(hwc->idx, hwc->config1, -hwc->sample_period);
3193 }
3194
3195 DEFINE_STATIC_CALL_NULL(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
3196
intel_pmu_enable_event_ext(struct perf_event * event)3197 static void intel_pmu_enable_event_ext(struct perf_event *event)
3198 {
3199 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3200 struct hw_perf_event *hwc = &event->hw;
3201 union arch_pebs_index old, new;
3202 struct arch_pebs_cap cap;
3203 u64 ext = 0;
3204
3205 cap = hybrid(cpuc->pmu, arch_pebs_cap);
3206
3207 if (event->attr.precise_ip) {
3208 u64 pebs_data_cfg = intel_get_arch_pebs_data_config(event);
3209
3210 ext |= ARCH_PEBS_EN;
3211 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD)
3212 ext |= (-hwc->sample_period) & ARCH_PEBS_RELOAD;
3213
3214 if (pebs_data_cfg && cap.caps) {
3215 if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
3216 ext |= ARCH_PEBS_AUX & cap.caps;
3217
3218 if (pebs_data_cfg & PEBS_DATACFG_GP)
3219 ext |= ARCH_PEBS_GPR & cap.caps;
3220
3221 if (pebs_data_cfg & PEBS_DATACFG_XMMS)
3222 ext |= ARCH_PEBS_VECR_XMM & cap.caps;
3223
3224 if (pebs_data_cfg & PEBS_DATACFG_LBRS)
3225 ext |= ARCH_PEBS_LBR & cap.caps;
3226
3227 if (pebs_data_cfg &
3228 (PEBS_DATACFG_CNTR_MASK << PEBS_DATACFG_CNTR_SHIFT))
3229 ext |= ARCH_PEBS_CNTR_GP & cap.caps;
3230
3231 if (pebs_data_cfg &
3232 (PEBS_DATACFG_FIX_MASK << PEBS_DATACFG_FIX_SHIFT))
3233 ext |= ARCH_PEBS_CNTR_FIXED & cap.caps;
3234
3235 if (pebs_data_cfg & PEBS_DATACFG_METRICS)
3236 ext |= ARCH_PEBS_CNTR_METRICS & cap.caps;
3237 }
3238
3239 if (cpuc->n_pebs == cpuc->n_large_pebs)
3240 new.thresh = ARCH_PEBS_THRESH_MULTI;
3241 else
3242 new.thresh = ARCH_PEBS_THRESH_SINGLE;
3243
3244 rdmsrq(MSR_IA32_PEBS_INDEX, old.whole);
3245 if (new.thresh != old.thresh || !old.en) {
3246 if (old.thresh == ARCH_PEBS_THRESH_MULTI && old.wr > 0) {
3247 /*
3248 * Large PEBS was enabled.
3249 * Drain PEBS buffer before applying the single PEBS.
3250 */
3251 intel_pmu_drain_pebs_buffer();
3252 } else {
3253 new.wr = 0;
3254 new.full = 0;
3255 new.en = 1;
3256 wrmsrq(MSR_IA32_PEBS_INDEX, new.whole);
3257 }
3258 }
3259 }
3260
3261 if (is_pebs_counter_event_group(event))
3262 ext |= ARCH_PEBS_CNTR_ALLOW;
3263
3264 if (cpuc->cfg_c_val[hwc->idx] != ext)
3265 __intel_pmu_update_event_ext(hwc->idx, ext);
3266 }
3267
intel_pmu_update_rdpmc_user_disable(struct perf_event * event)3268 static void intel_pmu_update_rdpmc_user_disable(struct perf_event *event)
3269 {
3270 if (!x86_pmu_has_rdpmc_user_disable(event->pmu))
3271 return;
3272
3273 /*
3274 * Counter scope's user-space rdpmc is disabled by default
3275 * except two cases.
3276 * a. rdpmc = 2 (user space rdpmc enabled unconditionally)
3277 * b. rdpmc = 1 and the event is not a system-wide event.
3278 * The count of non-system-wide events would be cleared when
3279 * context switches, so no count data is leaked.
3280 */
3281 if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_ALWAYS_ENABLE ||
3282 (x86_pmu.attr_rdpmc == X86_USER_RDPMC_CONDITIONAL_ENABLE &&
3283 event->ctx->task))
3284 event->hw.config &= ~ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE;
3285 else
3286 event->hw.config |= ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE;
3287 }
3288
3289 DEFINE_STATIC_CALL_NULL(intel_pmu_enable_event_ext, intel_pmu_enable_event_ext);
3290
intel_pmu_enable_event(struct perf_event * event)3291 static void intel_pmu_enable_event(struct perf_event *event)
3292 {
3293 u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE;
3294 struct hw_perf_event *hwc = &event->hw;
3295 int idx = hwc->idx;
3296
3297 intel_pmu_update_rdpmc_user_disable(event);
3298
3299 if (unlikely(event->attr.precise_ip))
3300 static_call(x86_pmu_pebs_enable)(event);
3301
3302 switch (idx) {
3303 case 0 ... INTEL_PMC_IDX_FIXED - 1:
3304 if (branch_sample_counters(event))
3305 enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR;
3306 intel_set_masks(event, idx);
3307 static_call_cond(intel_pmu_enable_acr_event)(event);
3308 static_call_cond(intel_pmu_enable_event_ext)(event);
3309 /*
3310 * For self-reloaded ACR event, don't enable PMI since
3311 * HW won't set overflow bit in GLOBAL_STATUS. Otherwise,
3312 * the PMI would be recognized as a suspicious NMI.
3313 */
3314 if (is_acr_self_reload_event(event))
3315 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
3316 else if (!event->attr.precise_ip)
3317 hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
3318 __x86_pmu_enable_event(hwc, enable_mask);
3319 break;
3320 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
3321 static_call_cond(intel_pmu_enable_acr_event)(event);
3322 static_call_cond(intel_pmu_enable_event_ext)(event);
3323 fallthrough;
3324 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
3325 intel_pmu_enable_fixed(event);
3326 break;
3327 case INTEL_PMC_IDX_FIXED_BTS:
3328 if (!__this_cpu_read(cpu_hw_events.enabled))
3329 return;
3330 intel_pmu_enable_bts(hwc->config);
3331 break;
3332 case INTEL_PMC_IDX_FIXED_VLBR:
3333 intel_set_masks(event, idx);
3334 break;
3335 default:
3336 pr_warn("Failed to enable the event with invalid index %d\n",
3337 idx);
3338 }
3339 }
3340
intel_pmu_acr_late_setup(struct cpu_hw_events * cpuc)3341 static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc)
3342 {
3343 struct perf_event *event, *leader;
3344 int i, j, k, bit, idx;
3345
3346 /*
3347 * FIXME: ACR mask parsing relies on cpuc->event_list[] (active events only).
3348 * Disabling an ACR event causes bit-shifting errors in the acr_mask of
3349 * remaining group members. As ACR sampling requires all events to be active,
3350 * this limitation is acceptable for now. Revisit if independent event toggling
3351 * is required.
3352 */
3353 for (i = 0; i < cpuc->n_events; i++) {
3354 leader = cpuc->event_list[i];
3355 if (!is_acr_event_group(leader))
3356 continue;
3357
3358 /* Find the last event of the ACR group. */
3359 for (j = i; j < cpuc->n_events; j++) {
3360 event = cpuc->event_list[j];
3361 if (event->group_leader != leader->group_leader)
3362 break;
3363 }
3364
3365 /*
3366 * Translate the user-space ACR mask (attr.config2) into the physical
3367 * counter bitmask (hw.config1) for each ACR event in the group.
3368 * NOTE: ACR event contiguity is guaranteed by intel_pmu_hw_config().
3369 */
3370 for (k = i; k < j; k++) {
3371 event = cpuc->event_list[k];
3372 event->hw.config1 = 0;
3373 for_each_set_bit(bit, (unsigned long *)&event->attr.config2, X86_PMC_IDX_MAX) {
3374 idx = i + bit;
3375 /* Event index of ACR group must locate in [i, j). */
3376 if (idx >= j || !is_acr_event_group(cpuc->event_list[idx]))
3377 continue;
3378 __set_bit(cpuc->assign[idx], (unsigned long *)&event->hw.config1);
3379 }
3380 }
3381 i = j - 1;
3382 }
3383 }
3384
intel_pmu_late_setup(void)3385 void intel_pmu_late_setup(void)
3386 {
3387 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3388
3389 if (!cpuc->n_late_setup)
3390 return;
3391
3392 intel_pmu_pebs_late_setup(cpuc);
3393 intel_pmu_acr_late_setup(cpuc);
3394 }
3395
intel_pmu_add_event(struct perf_event * event)3396 static void intel_pmu_add_event(struct perf_event *event)
3397 {
3398 if (event->attr.precise_ip)
3399 intel_pmu_pebs_add(event);
3400 if (intel_pmu_needs_branch_stack(event))
3401 intel_pmu_lbr_add(event);
3402 if (is_pebs_counter_event_group(event) ||
3403 is_acr_event_group(event))
3404 this_cpu_ptr(&cpu_hw_events)->n_late_setup++;
3405 }
3406
3407 /*
3408 * Save and restart an expired event. Called by NMI contexts,
3409 * so it has to be careful about preempting normal event ops:
3410 */
intel_pmu_save_and_restart(struct perf_event * event)3411 int intel_pmu_save_and_restart(struct perf_event *event)
3412 {
3413 static_call(x86_pmu_update)(event);
3414 /*
3415 * For a checkpointed counter always reset back to 0. This
3416 * avoids a situation where the counter overflows, aborts the
3417 * transaction and is then set back to shortly before the
3418 * overflow, and overflows and aborts again.
3419 */
3420 if (unlikely(event_is_checkpointed(event))) {
3421 /* No race with NMIs because the counter should not be armed */
3422 wrmsrq(event->hw.event_base, 0);
3423 local64_set(&event->hw.prev_count, 0);
3424 }
3425 return static_call(x86_pmu_set_period)(event);
3426 }
3427
intel_pmu_set_period(struct perf_event * event)3428 static int intel_pmu_set_period(struct perf_event *event)
3429 {
3430 if (unlikely(is_topdown_count(event)))
3431 return static_call(intel_pmu_set_topdown_event_period)(event);
3432
3433 return x86_perf_event_set_period(event);
3434 }
3435
intel_pmu_update(struct perf_event * event)3436 static u64 intel_pmu_update(struct perf_event *event)
3437 {
3438 if (unlikely(is_topdown_count(event)))
3439 return static_call(intel_pmu_update_topdown_event)(event, NULL);
3440
3441 return x86_perf_event_update(event);
3442 }
3443
intel_pmu_reset(void)3444 static void intel_pmu_reset(void)
3445 {
3446 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
3447 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3448 unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
3449 unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
3450 unsigned long flags;
3451 int idx;
3452
3453 if (!*(u64 *)cntr_mask)
3454 return;
3455
3456 local_irq_save(flags);
3457
3458 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
3459
3460 for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) {
3461 wrmsrq_safe(x86_pmu_config_addr(idx), 0ull);
3462 wrmsrq_safe(x86_pmu_event_addr(idx), 0ull);
3463 }
3464 for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
3465 if (fixed_counter_disabled(idx, cpuc->pmu))
3466 continue;
3467 wrmsrq_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
3468 }
3469
3470 if (ds)
3471 ds->bts_index = ds->bts_buffer_base;
3472
3473 /* Ack all overflows and disable fixed counters */
3474 if (x86_pmu.version >= 2) {
3475 intel_pmu_ack_status(intel_pmu_get_status());
3476 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0);
3477 }
3478
3479 /* Reset LBRs and LBR freezing */
3480 if (x86_pmu.lbr_nr) {
3481 update_debugctlmsr(get_debugctlmsr() &
3482 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
3483 }
3484
3485 local_irq_restore(flags);
3486 }
3487
3488 /*
3489 * We may be running with guest PEBS events created by KVM, and the
3490 * PEBS records are logged into the guest's DS and invisible to host.
3491 *
3492 * In the case of guest PEBS overflow, we only trigger a fake event
3493 * to emulate the PEBS overflow PMI for guest PEBS counters in KVM.
3494 * The guest will then vm-entry and check the guest DS area to read
3495 * the guest PEBS records.
3496 *
3497 * The contents and other behavior of the guest event do not matter.
3498 */
x86_pmu_handle_guest_pebs(struct pt_regs * regs,struct perf_sample_data * data)3499 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
3500 struct perf_sample_data *data)
3501 {
3502 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3503 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
3504 struct perf_event *event = NULL;
3505 int bit;
3506
3507 if (!unlikely(perf_guest_state()))
3508 return;
3509
3510 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active ||
3511 !guest_pebs_idxs)
3512 return;
3513
3514 for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, X86_PMC_IDX_MAX) {
3515 event = cpuc->events[bit];
3516 if (!event->attr.precise_ip)
3517 continue;
3518
3519 perf_sample_data_init(data, 0, event->hw.last_period);
3520 perf_event_overflow(event, data, regs);
3521
3522 /* Inject one fake event is enough. */
3523 break;
3524 }
3525 }
3526
handle_pmi_common(struct pt_regs * regs,u64 status)3527 static int handle_pmi_common(struct pt_regs *regs, u64 status)
3528 {
3529 struct perf_sample_data data;
3530 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3531 int bit;
3532 int handled = 0;
3533
3534 inc_irq_stat(apic_perf_irqs);
3535
3536 /*
3537 * Ignore a range of extra bits in status that do not indicate
3538 * overflow by themselves.
3539 */
3540 status &= ~(GLOBAL_STATUS_COND_CHG |
3541 GLOBAL_STATUS_ASIF |
3542 GLOBAL_STATUS_LBRS_FROZEN);
3543 if (!status)
3544 return 0;
3545 /*
3546 * In case multiple PEBS events are sampled at the same time,
3547 * it is possible to have GLOBAL_STATUS bit 62 set indicating
3548 * PEBS buffer overflow and also seeing at most 3 PEBS counters
3549 * having their bits set in the status register. This is a sign
3550 * that there was at least one PEBS record pending at the time
3551 * of the PMU interrupt. PEBS counters must only be processed
3552 * via the drain_pebs() calls and not via the regular sample
3553 * processing loop coming after that the function, otherwise
3554 * phony regular samples may be generated in the sampling buffer
3555 * not marked with the EXACT tag. Another possibility is to have
3556 * one PEBS event and at least one non-PEBS event which overflows
3557 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
3558 * not be set, yet the overflow status bit for the PEBS counter will
3559 * be on Skylake.
3560 *
3561 * To avoid this problem, we systematically ignore the PEBS-enabled
3562 * counters from the GLOBAL_STATUS mask and we always process PEBS
3563 * events via drain_pebs().
3564 */
3565 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable);
3566
3567 /*
3568 * PEBS overflow sets bit 62 in the global status register
3569 */
3570 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
3571 u64 pebs_enabled = cpuc->pebs_enabled;
3572
3573 handled++;
3574 x86_pmu_handle_guest_pebs(regs, &data);
3575 static_call(x86_pmu_drain_pebs)(regs, &data);
3576
3577 /*
3578 * PMI throttle may be triggered, which stops the PEBS event.
3579 * Although cpuc->pebs_enabled is updated accordingly, the
3580 * MSR_IA32_PEBS_ENABLE is not updated. Because the
3581 * cpuc->enabled has been forced to 0 in PMI.
3582 * Update the MSR if pebs_enabled is changed.
3583 */
3584 if (pebs_enabled != cpuc->pebs_enabled)
3585 wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
3586
3587 /*
3588 * Above PEBS handler (PEBS counters snapshotting) has updated fixed
3589 * counter 3 and perf metrics counts if they are in counter group,
3590 * unnecessary to update again.
3591 */
3592 if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] &&
3593 is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS]))
3594 status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT;
3595 }
3596
3597 /*
3598 * Arch PEBS sets bit 54 in the global status register
3599 */
3600 if (__test_and_clear_bit(GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT,
3601 (unsigned long *)&status)) {
3602 handled++;
3603 static_call(x86_pmu_drain_pebs)(regs, &data);
3604
3605 if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] &&
3606 is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS]))
3607 status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT;
3608 }
3609
3610 /*
3611 * Intel PT
3612 */
3613 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
3614 handled++;
3615 if (!perf_guest_handle_intel_pt_intr())
3616 intel_pt_interrupt();
3617 }
3618
3619 /*
3620 * Intel Perf metrics
3621 */
3622 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
3623 handled++;
3624 static_call(intel_pmu_update_topdown_event)(NULL, NULL);
3625 }
3626
3627 status &= hybrid(cpuc->pmu, intel_ctrl);
3628
3629 /*
3630 * Checkpointed counters can lead to 'spurious' PMIs because the
3631 * rollback caused by the PMI will have cleared the overflow status
3632 * bit. Therefore always force probe these counters.
3633 */
3634 status |= cpuc->intel_cp_status;
3635
3636 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
3637 struct perf_event *event = cpuc->events[bit];
3638 u64 last_period;
3639
3640 handled++;
3641
3642 if (!test_bit(bit, cpuc->active_mask))
3643 continue;
3644 /* Event may have already been cleared: */
3645 if (!event)
3646 continue;
3647
3648 /*
3649 * There may be unprocessed PEBS records in the PEBS buffer,
3650 * which still stores the previous values.
3651 * Process those records first before handling the latest value.
3652 * For example,
3653 * A is a regular counter
3654 * B is a PEBS event which reads A
3655 * C is a PEBS event
3656 *
3657 * The following can happen:
3658 * B-assist A=1
3659 * C A=2
3660 * B-assist A=3
3661 * A-overflow-PMI A=4
3662 * C-assist-PMI (PEBS buffer) A=5
3663 *
3664 * The PEBS buffer has to be drained before handling the A-PMI
3665 */
3666 if (is_pebs_counter_event_group(event))
3667 static_call(x86_pmu_drain_pebs)(regs, &data);
3668
3669 last_period = event->hw.last_period;
3670
3671 if (!intel_pmu_save_and_restart(event))
3672 continue;
3673
3674 perf_sample_data_init(&data, 0, last_period);
3675
3676 if (has_branch_stack(event))
3677 intel_pmu_lbr_save_brstack(&data, cpuc, event);
3678
3679 perf_event_overflow(event, &data, regs);
3680 }
3681
3682 return handled;
3683 }
3684
3685 /*
3686 * This handler is triggered by the local APIC, so the APIC IRQ handling
3687 * rules apply:
3688 */
intel_pmu_handle_irq(struct pt_regs * regs)3689 static int intel_pmu_handle_irq(struct pt_regs *regs)
3690 {
3691 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3692 bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
3693 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
3694 int loops;
3695 u64 status;
3696 int handled;
3697 int pmu_enabled;
3698
3699 /*
3700 * Save the PMU state.
3701 * It needs to be restored when leaving the handler.
3702 */
3703 pmu_enabled = cpuc->enabled;
3704 /*
3705 * In general, the early ACK is only applied for old platforms.
3706 * For the big core starts from Haswell, the late ACK should be
3707 * applied.
3708 * For the small core after Tremont, we have to do the ACK right
3709 * before re-enabling counters, which is in the middle of the
3710 * NMI handler.
3711 */
3712 if (!late_ack && !mid_ack)
3713 apic_write(APIC_LVTPC, APIC_DM_NMI);
3714 intel_bts_disable_local();
3715 cpuc->enabled = 0;
3716 __intel_pmu_disable_all(true);
3717 handled = intel_pmu_drain_bts_buffer();
3718 handled += intel_bts_interrupt();
3719 status = intel_pmu_get_status();
3720 if (!status)
3721 goto done;
3722
3723 loops = 0;
3724 again:
3725 intel_pmu_lbr_read();
3726 intel_pmu_ack_status(status);
3727 if (++loops > 100) {
3728 static bool warned;
3729
3730 if (!warned) {
3731 WARN(1, "perfevents: irq loop stuck!\n");
3732 perf_event_print_debug();
3733 warned = true;
3734 }
3735 intel_pmu_reset();
3736 goto done;
3737 }
3738
3739 handled += handle_pmi_common(regs, status);
3740
3741 /*
3742 * Repeat if there is more work to be done:
3743 */
3744 status = intel_pmu_get_status();
3745 if (status)
3746 goto again;
3747
3748 done:
3749 if (mid_ack)
3750 apic_write(APIC_LVTPC, APIC_DM_NMI);
3751 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
3752 cpuc->enabled = pmu_enabled;
3753 if (pmu_enabled)
3754 __intel_pmu_enable_all(0, true);
3755 intel_bts_enable_local();
3756
3757 /*
3758 * Only unmask the NMI after the overflow counters
3759 * have been reset. This avoids spurious NMIs on
3760 * Haswell CPUs.
3761 */
3762 if (late_ack)
3763 apic_write(APIC_LVTPC, APIC_DM_NMI);
3764 return handled;
3765 }
3766
3767 static struct event_constraint *
intel_bts_constraints(struct perf_event * event)3768 intel_bts_constraints(struct perf_event *event)
3769 {
3770 if (unlikely(intel_pmu_has_bts(event)))
3771 return &bts_constraint;
3772
3773 return NULL;
3774 }
3775
3776 /*
3777 * Note: matches a fake event, like Fixed2.
3778 */
3779 static struct event_constraint *
intel_vlbr_constraints(struct perf_event * event)3780 intel_vlbr_constraints(struct perf_event *event)
3781 {
3782 struct event_constraint *c = &vlbr_constraint;
3783
3784 if (unlikely(constraint_match(c, event->hw.config))) {
3785 event->hw.flags |= c->flags;
3786 return c;
3787 }
3788
3789 return NULL;
3790 }
3791
intel_alt_er(struct cpu_hw_events * cpuc,int idx,u64 config)3792 static int intel_alt_er(struct cpu_hw_events *cpuc,
3793 int idx, u64 config)
3794 {
3795 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3796 int alt_idx = idx;
3797
3798 switch (idx) {
3799 case EXTRA_REG_RSP_0 ... EXTRA_REG_RSP_1:
3800 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3801 return idx;
3802 if (++alt_idx > EXTRA_REG_RSP_1)
3803 alt_idx = EXTRA_REG_RSP_0;
3804 if (config & ~extra_regs[alt_idx].valid_mask)
3805 return idx;
3806 break;
3807
3808 case EXTRA_REG_OMR_0 ... EXTRA_REG_OMR_3:
3809 if (!(x86_pmu.flags & PMU_FL_HAS_OMR))
3810 return idx;
3811 if (++alt_idx > EXTRA_REG_OMR_3)
3812 alt_idx = EXTRA_REG_OMR_0;
3813 /*
3814 * Subtracting EXTRA_REG_OMR_0 ensures to get correct
3815 * OMR extra_reg entries which start from 0.
3816 */
3817 if (config & ~extra_regs[alt_idx - EXTRA_REG_OMR_0].valid_mask)
3818 return idx;
3819 break;
3820
3821 default:
3822 break;
3823 }
3824
3825 return alt_idx;
3826 }
3827
intel_fixup_er(struct perf_event * event,int idx)3828 static void intel_fixup_er(struct perf_event *event, int idx)
3829 {
3830 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3831 int er_idx;
3832
3833 event->hw.extra_reg.idx = idx;
3834 switch (idx) {
3835 case EXTRA_REG_RSP_0 ... EXTRA_REG_RSP_1:
3836 er_idx = idx - EXTRA_REG_RSP_0;
3837 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3838 event->hw.config |= extra_regs[er_idx].event;
3839 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0 + er_idx;
3840 break;
3841
3842 case EXTRA_REG_OMR_0 ... EXTRA_REG_OMR_3:
3843 er_idx = idx - EXTRA_REG_OMR_0;
3844 event->hw.config &= ~ARCH_PERFMON_EVENTSEL_UMASK;
3845 event->hw.config |= 1ULL << (8 + er_idx);
3846 event->hw.extra_reg.reg = MSR_OMR_0 + er_idx;
3847 break;
3848
3849 default:
3850 pr_warn("The extra reg idx %d is not supported.\n", idx);
3851 }
3852 }
3853
3854 /*
3855 * manage allocation of shared extra msr for certain events
3856 *
3857 * sharing can be:
3858 * per-cpu: to be shared between the various events on a single PMU
3859 * per-core: per-cpu + shared by HT threads
3860 */
3861 static struct event_constraint *
__intel_shared_reg_get_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,struct hw_perf_event_extra * reg)3862 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3863 struct perf_event *event,
3864 struct hw_perf_event_extra *reg)
3865 {
3866 struct event_constraint *c = &emptyconstraint;
3867 struct er_account *era;
3868 unsigned long flags;
3869 int idx = reg->idx;
3870
3871 /*
3872 * reg->alloc can be set due to existing state, so for fake cpuc we
3873 * need to ignore this, otherwise we might fail to allocate proper fake
3874 * state for this extra reg constraint. Also see the comment below.
3875 */
3876 if (reg->alloc && !cpuc->is_fake)
3877 return NULL; /* call x86_get_event_constraint() */
3878
3879 again:
3880 era = &cpuc->shared_regs->regs[idx];
3881 /*
3882 * we use spin_lock_irqsave() to avoid lockdep issues when
3883 * passing a fake cpuc
3884 */
3885 raw_spin_lock_irqsave(&era->lock, flags);
3886
3887 if (!atomic_read(&era->ref) || era->config == reg->config) {
3888
3889 /*
3890 * If its a fake cpuc -- as per validate_{group,event}() we
3891 * shouldn't touch event state and we can avoid doing so
3892 * since both will only call get_event_constraints() once
3893 * on each event, this avoids the need for reg->alloc.
3894 *
3895 * Not doing the ER fixup will only result in era->reg being
3896 * wrong, but since we won't actually try and program hardware
3897 * this isn't a problem either.
3898 */
3899 if (!cpuc->is_fake) {
3900 if (idx != reg->idx)
3901 intel_fixup_er(event, idx);
3902
3903 /*
3904 * x86_schedule_events() can call get_event_constraints()
3905 * multiple times on events in the case of incremental
3906 * scheduling(). reg->alloc ensures we only do the ER
3907 * allocation once.
3908 */
3909 reg->alloc = 1;
3910 }
3911
3912 /* lock in msr value */
3913 era->config = reg->config;
3914 era->reg = reg->reg;
3915
3916 /* one more user */
3917 atomic_inc(&era->ref);
3918
3919 /*
3920 * need to call x86_get_event_constraint()
3921 * to check if associated event has constraints
3922 */
3923 c = NULL;
3924 } else {
3925 idx = intel_alt_er(cpuc, idx, reg->config);
3926 if (idx != reg->idx) {
3927 raw_spin_unlock_irqrestore(&era->lock, flags);
3928 goto again;
3929 }
3930 }
3931 raw_spin_unlock_irqrestore(&era->lock, flags);
3932
3933 return c;
3934 }
3935
3936 static void
__intel_shared_reg_put_constraints(struct cpu_hw_events * cpuc,struct hw_perf_event_extra * reg)3937 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3938 struct hw_perf_event_extra *reg)
3939 {
3940 struct er_account *era;
3941
3942 /*
3943 * Only put constraint if extra reg was actually allocated. Also takes
3944 * care of event which do not use an extra shared reg.
3945 *
3946 * Also, if this is a fake cpuc we shouldn't touch any event state
3947 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3948 * either since it'll be thrown out.
3949 */
3950 if (!reg->alloc || cpuc->is_fake)
3951 return;
3952
3953 era = &cpuc->shared_regs->regs[reg->idx];
3954
3955 /* one fewer user */
3956 atomic_dec(&era->ref);
3957
3958 /* allocate again next time */
3959 reg->alloc = 0;
3960 }
3961
3962 static struct event_constraint *
intel_shared_regs_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3963 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3964 struct perf_event *event)
3965 {
3966 struct event_constraint *c = NULL, *d;
3967 struct hw_perf_event_extra *xreg, *breg;
3968
3969 xreg = &event->hw.extra_reg;
3970 if (xreg->idx != EXTRA_REG_NONE) {
3971 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3972 if (c == &emptyconstraint)
3973 return c;
3974 }
3975 breg = &event->hw.branch_reg;
3976 if (breg->idx != EXTRA_REG_NONE) {
3977 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3978 if (d == &emptyconstraint) {
3979 __intel_shared_reg_put_constraints(cpuc, xreg);
3980 c = d;
3981 }
3982 }
3983 return c;
3984 }
3985
3986 struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3987 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3988 struct perf_event *event)
3989 {
3990 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3991 struct event_constraint *c;
3992
3993 if (event_constraints) {
3994 for_each_event_constraint(c, event_constraints) {
3995 if (constraint_match(c, event->hw.config)) {
3996 event->hw.flags |= c->flags;
3997 return c;
3998 }
3999 }
4000 }
4001
4002 return &hybrid_var(cpuc->pmu, unconstrained);
4003 }
4004
4005 static struct event_constraint *
__intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4006 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4007 struct perf_event *event)
4008 {
4009 struct event_constraint *c;
4010
4011 c = intel_vlbr_constraints(event);
4012 if (c)
4013 return c;
4014
4015 c = intel_bts_constraints(event);
4016 if (c)
4017 return c;
4018
4019 c = intel_shared_regs_constraints(cpuc, event);
4020 if (c)
4021 return c;
4022
4023 c = intel_pebs_constraints(event);
4024 if (c)
4025 return c;
4026
4027 return x86_get_event_constraints(cpuc, idx, event);
4028 }
4029
4030 static void
intel_start_scheduling(struct cpu_hw_events * cpuc)4031 intel_start_scheduling(struct cpu_hw_events *cpuc)
4032 {
4033 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
4034 struct intel_excl_states *xl;
4035 int tid = cpuc->excl_thread_id;
4036
4037 /*
4038 * nothing needed if in group validation mode
4039 */
4040 if (cpuc->is_fake || !is_ht_workaround_enabled())
4041 return;
4042
4043 /*
4044 * no exclusion needed
4045 */
4046 if (WARN_ON_ONCE(!excl_cntrs))
4047 return;
4048
4049 xl = &excl_cntrs->states[tid];
4050
4051 xl->sched_started = true;
4052 /*
4053 * lock shared state until we are done scheduling
4054 * in stop_event_scheduling()
4055 * makes scheduling appear as a transaction
4056 */
4057 raw_spin_lock(&excl_cntrs->lock);
4058 }
4059
intel_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)4060 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
4061 {
4062 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
4063 struct event_constraint *c = cpuc->event_constraint[idx];
4064 struct intel_excl_states *xl;
4065 int tid = cpuc->excl_thread_id;
4066
4067 if (cpuc->is_fake || !is_ht_workaround_enabled())
4068 return;
4069
4070 if (WARN_ON_ONCE(!excl_cntrs))
4071 return;
4072
4073 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
4074 return;
4075
4076 xl = &excl_cntrs->states[tid];
4077
4078 lockdep_assert_held(&excl_cntrs->lock);
4079
4080 if (c->flags & PERF_X86_EVENT_EXCL)
4081 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
4082 else
4083 xl->state[cntr] = INTEL_EXCL_SHARED;
4084 }
4085
4086 static void
intel_stop_scheduling(struct cpu_hw_events * cpuc)4087 intel_stop_scheduling(struct cpu_hw_events *cpuc)
4088 {
4089 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
4090 struct intel_excl_states *xl;
4091 int tid = cpuc->excl_thread_id;
4092
4093 /*
4094 * nothing needed if in group validation mode
4095 */
4096 if (cpuc->is_fake || !is_ht_workaround_enabled())
4097 return;
4098 /*
4099 * no exclusion needed
4100 */
4101 if (WARN_ON_ONCE(!excl_cntrs))
4102 return;
4103
4104 xl = &excl_cntrs->states[tid];
4105
4106 xl->sched_started = false;
4107 /*
4108 * release shared state lock (acquired in intel_start_scheduling())
4109 */
4110 raw_spin_unlock(&excl_cntrs->lock);
4111 }
4112
4113 static struct event_constraint *
dyn_constraint(struct cpu_hw_events * cpuc,struct event_constraint * c,int idx)4114 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
4115 {
4116 WARN_ON_ONCE(!cpuc->constraint_list);
4117
4118 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
4119 struct event_constraint *cx;
4120
4121 /*
4122 * grab pre-allocated constraint entry
4123 */
4124 cx = &cpuc->constraint_list[idx];
4125
4126 /*
4127 * initialize dynamic constraint
4128 * with static constraint
4129 */
4130 *cx = *c;
4131
4132 /*
4133 * mark constraint as dynamic
4134 */
4135 cx->flags |= PERF_X86_EVENT_DYNAMIC;
4136 c = cx;
4137 }
4138
4139 return c;
4140 }
4141
4142 static struct event_constraint *
intel_get_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,int idx,struct event_constraint * c)4143 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
4144 int idx, struct event_constraint *c)
4145 {
4146 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
4147 struct intel_excl_states *xlo;
4148 int tid = cpuc->excl_thread_id;
4149 int is_excl, i, w;
4150
4151 /*
4152 * validating a group does not require
4153 * enforcing cross-thread exclusion
4154 */
4155 if (cpuc->is_fake || !is_ht_workaround_enabled())
4156 return c;
4157
4158 /*
4159 * no exclusion needed
4160 */
4161 if (WARN_ON_ONCE(!excl_cntrs))
4162 return c;
4163
4164 /*
4165 * because we modify the constraint, we need
4166 * to make a copy. Static constraints come
4167 * from static const tables.
4168 *
4169 * only needed when constraint has not yet
4170 * been cloned (marked dynamic)
4171 */
4172 c = dyn_constraint(cpuc, c, idx);
4173
4174 /*
4175 * From here on, the constraint is dynamic.
4176 * Either it was just allocated above, or it
4177 * was allocated during a earlier invocation
4178 * of this function
4179 */
4180
4181 /*
4182 * state of sibling HT
4183 */
4184 xlo = &excl_cntrs->states[tid ^ 1];
4185
4186 /*
4187 * event requires exclusive counter access
4188 * across HT threads
4189 */
4190 is_excl = c->flags & PERF_X86_EVENT_EXCL;
4191 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
4192 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
4193 if (!cpuc->n_excl++)
4194 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
4195 }
4196
4197 /*
4198 * Modify static constraint with current dynamic
4199 * state of thread
4200 *
4201 * EXCLUSIVE: sibling counter measuring exclusive event
4202 * SHARED : sibling counter measuring non-exclusive event
4203 * UNUSED : sibling counter unused
4204 */
4205 w = c->weight;
4206 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
4207 /*
4208 * exclusive event in sibling counter
4209 * our corresponding counter cannot be used
4210 * regardless of our event
4211 */
4212 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
4213 __clear_bit(i, c->idxmsk);
4214 w--;
4215 continue;
4216 }
4217 /*
4218 * if measuring an exclusive event, sibling
4219 * measuring non-exclusive, then counter cannot
4220 * be used
4221 */
4222 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
4223 __clear_bit(i, c->idxmsk);
4224 w--;
4225 continue;
4226 }
4227 }
4228
4229 /*
4230 * if we return an empty mask, then switch
4231 * back to static empty constraint to avoid
4232 * the cost of freeing later on
4233 */
4234 if (!w)
4235 c = &emptyconstraint;
4236
4237 c->weight = w;
4238
4239 return c;
4240 }
4241
4242 static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4243 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4244 struct perf_event *event)
4245 {
4246 struct event_constraint *c1, *c2;
4247
4248 c1 = cpuc->event_constraint[idx];
4249
4250 /*
4251 * first time only
4252 * - static constraint: no change across incremental scheduling calls
4253 * - dynamic constraint: handled by intel_get_excl_constraints()
4254 */
4255 c2 = __intel_get_event_constraints(cpuc, idx, event);
4256 if (c1) {
4257 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
4258 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
4259 c1->weight = c2->weight;
4260 c2 = c1;
4261 }
4262
4263 if (cpuc->excl_cntrs)
4264 return intel_get_excl_constraints(cpuc, event, idx, c2);
4265
4266 if (event->hw.dyn_constraint != ~0ULL) {
4267 c2 = dyn_constraint(cpuc, c2, idx);
4268 c2->idxmsk64 &= event->hw.dyn_constraint;
4269 c2->weight = hweight64(c2->idxmsk64);
4270 }
4271
4272 return c2;
4273 }
4274
intel_put_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)4275 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
4276 struct perf_event *event)
4277 {
4278 struct hw_perf_event *hwc = &event->hw;
4279 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
4280 int tid = cpuc->excl_thread_id;
4281 struct intel_excl_states *xl;
4282
4283 /*
4284 * nothing needed if in group validation mode
4285 */
4286 if (cpuc->is_fake)
4287 return;
4288
4289 if (WARN_ON_ONCE(!excl_cntrs))
4290 return;
4291
4292 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
4293 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
4294 if (!--cpuc->n_excl)
4295 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
4296 }
4297
4298 /*
4299 * If event was actually assigned, then mark the counter state as
4300 * unused now.
4301 */
4302 if (hwc->idx >= 0) {
4303 xl = &excl_cntrs->states[tid];
4304
4305 /*
4306 * put_constraint may be called from x86_schedule_events()
4307 * which already has the lock held so here make locking
4308 * conditional.
4309 */
4310 if (!xl->sched_started)
4311 raw_spin_lock(&excl_cntrs->lock);
4312
4313 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
4314
4315 if (!xl->sched_started)
4316 raw_spin_unlock(&excl_cntrs->lock);
4317 }
4318 }
4319
4320 static void
intel_put_shared_regs_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)4321 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
4322 struct perf_event *event)
4323 {
4324 struct hw_perf_event_extra *reg;
4325
4326 reg = &event->hw.extra_reg;
4327 if (reg->idx != EXTRA_REG_NONE)
4328 __intel_shared_reg_put_constraints(cpuc, reg);
4329
4330 reg = &event->hw.branch_reg;
4331 if (reg->idx != EXTRA_REG_NONE)
4332 __intel_shared_reg_put_constraints(cpuc, reg);
4333 }
4334
intel_put_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)4335 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
4336 struct perf_event *event)
4337 {
4338 intel_put_shared_regs_event_constraints(cpuc, event);
4339
4340 /*
4341 * is PMU has exclusive counter restrictions, then
4342 * all events are subject to and must call the
4343 * put_excl_constraints() routine
4344 */
4345 if (cpuc->excl_cntrs)
4346 intel_put_excl_constraints(cpuc, event);
4347 }
4348
intel_pebs_aliases_core2(struct perf_event * event)4349 static void intel_pebs_aliases_core2(struct perf_event *event)
4350 {
4351 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
4352 /*
4353 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
4354 * (0x003c) so that we can use it with PEBS.
4355 *
4356 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
4357 * PEBS capable. However we can use INST_RETIRED.ANY_P
4358 * (0x00c0), which is a PEBS capable event, to get the same
4359 * count.
4360 *
4361 * INST_RETIRED.ANY_P counts the number of cycles that retires
4362 * CNTMASK instructions. By setting CNTMASK to a value (16)
4363 * larger than the maximum number of instructions that can be
4364 * retired per cycle (4) and then inverting the condition, we
4365 * count all cycles that retire 16 or less instructions, which
4366 * is every cycle.
4367 *
4368 * Thereby we gain a PEBS capable cycle counter.
4369 */
4370 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
4371
4372 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
4373 event->hw.config = alt_config;
4374 }
4375 }
4376
intel_pebs_aliases_snb(struct perf_event * event)4377 static void intel_pebs_aliases_snb(struct perf_event *event)
4378 {
4379 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
4380 /*
4381 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
4382 * (0x003c) so that we can use it with PEBS.
4383 *
4384 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
4385 * PEBS capable. However we can use UOPS_RETIRED.ALL
4386 * (0x01c2), which is a PEBS capable event, to get the same
4387 * count.
4388 *
4389 * UOPS_RETIRED.ALL counts the number of cycles that retires
4390 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
4391 * larger than the maximum number of micro-ops that can be
4392 * retired per cycle (4) and then inverting the condition, we
4393 * count all cycles that retire 16 or less micro-ops, which
4394 * is every cycle.
4395 *
4396 * Thereby we gain a PEBS capable cycle counter.
4397 */
4398 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
4399
4400 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
4401 event->hw.config = alt_config;
4402 }
4403 }
4404
intel_pebs_aliases_precdist(struct perf_event * event)4405 static void intel_pebs_aliases_precdist(struct perf_event *event)
4406 {
4407 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
4408 /*
4409 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
4410 * (0x003c) so that we can use it with PEBS.
4411 *
4412 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
4413 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
4414 * (0x01c0), which is a PEBS capable event, to get the same
4415 * count.
4416 *
4417 * The PREC_DIST event has special support to minimize sample
4418 * shadowing effects. One drawback is that it can be
4419 * only programmed on counter 1, but that seems like an
4420 * acceptable trade off.
4421 */
4422 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
4423
4424 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
4425 event->hw.config = alt_config;
4426 }
4427 }
4428
intel_pebs_aliases_ivb(struct perf_event * event)4429 static void intel_pebs_aliases_ivb(struct perf_event *event)
4430 {
4431 if (event->attr.precise_ip < 3)
4432 return intel_pebs_aliases_snb(event);
4433 return intel_pebs_aliases_precdist(event);
4434 }
4435
intel_pebs_aliases_skl(struct perf_event * event)4436 static void intel_pebs_aliases_skl(struct perf_event *event)
4437 {
4438 if (event->attr.precise_ip < 3)
4439 return intel_pebs_aliases_core2(event);
4440 return intel_pebs_aliases_precdist(event);
4441 }
4442
intel_pmu_large_pebs_flags(struct perf_event * event)4443 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
4444 {
4445 unsigned long flags = x86_pmu.large_pebs_flags;
4446
4447 if (event->attr.use_clockid)
4448 flags &= ~PERF_SAMPLE_TIME;
4449 if (!event->attr.exclude_kernel)
4450 flags &= ~PERF_SAMPLE_REGS_USER;
4451 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
4452 flags &= ~PERF_SAMPLE_REGS_USER;
4453 if (event->attr.sample_regs_intr & ~PEBS_GP_REGS)
4454 flags &= ~PERF_SAMPLE_REGS_INTR;
4455 return flags;
4456 }
4457
intel_pmu_bts_config(struct perf_event * event)4458 static int intel_pmu_bts_config(struct perf_event *event)
4459 {
4460 struct perf_event_attr *attr = &event->attr;
4461
4462 if (unlikely(intel_pmu_has_bts(event))) {
4463 /* BTS is not supported by this architecture. */
4464 if (!x86_pmu.bts_active)
4465 return -EOPNOTSUPP;
4466
4467 /* BTS is currently only allowed for user-mode. */
4468 if (!attr->exclude_kernel)
4469 return -EOPNOTSUPP;
4470
4471 /* BTS is not allowed for precise events. */
4472 if (attr->precise_ip)
4473 return -EOPNOTSUPP;
4474
4475 /* disallow bts if conflicting events are present */
4476 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
4477 return -EBUSY;
4478
4479 event->destroy = hw_perf_lbr_event_destroy;
4480 }
4481
4482 return 0;
4483 }
4484
core_pmu_hw_config(struct perf_event * event)4485 static int core_pmu_hw_config(struct perf_event *event)
4486 {
4487 int ret = x86_pmu_hw_config(event);
4488
4489 if (ret)
4490 return ret;
4491
4492 return intel_pmu_bts_config(event);
4493 }
4494
4495 #define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \
4496 ((x86_pmu.num_topdown_events - 1) << 8))
4497
is_available_metric_event(struct perf_event * event)4498 static bool is_available_metric_event(struct perf_event *event)
4499 {
4500 return is_metric_event(event) &&
4501 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
4502 }
4503
is_mem_loads_event(struct perf_event * event)4504 static inline bool is_mem_loads_event(struct perf_event *event)
4505 {
4506 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
4507 }
4508
is_mem_loads_aux_event(struct perf_event * event)4509 static inline bool is_mem_loads_aux_event(struct perf_event *event)
4510 {
4511 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
4512 }
4513
require_mem_loads_aux_event(struct perf_event * event)4514 static inline bool require_mem_loads_aux_event(struct perf_event *event)
4515 {
4516 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
4517 return false;
4518
4519 if (is_hybrid())
4520 return hybrid_pmu(event->pmu)->pmu_type == hybrid_big;
4521
4522 return true;
4523 }
4524
intel_pmu_has_cap(struct perf_event * event,int idx)4525 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
4526 {
4527 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
4528
4529 return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
4530 }
4531
intel_pmu_freq_start_period(struct perf_event * event)4532 static u64 intel_pmu_freq_start_period(struct perf_event *event)
4533 {
4534 int type = event->attr.type;
4535 u64 config, factor;
4536 s64 start;
4537
4538 /*
4539 * The 127 is the lowest possible recommended SAV (sample after value)
4540 * for a 4000 freq (default freq), according to the event list JSON file.
4541 * Also, assume the workload is idle 50% time.
4542 */
4543 factor = 64 * 4000;
4544 if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
4545 goto end;
4546
4547 /*
4548 * The estimation of the start period in the freq mode is
4549 * based on the below assumption.
4550 *
4551 * For a cycles or an instructions event, 1GHZ of the
4552 * underlying platform, 1 IPC. The workload is idle 50% time.
4553 * The start period = 1,000,000,000 * 1 / freq / 2.
4554 * = 500,000,000 / freq
4555 *
4556 * Usually, the branch-related events occur less than the
4557 * instructions event. According to the Intel event list JSON
4558 * file, the SAV (sample after value) of a branch-related event
4559 * is usually 1/4 of an instruction event.
4560 * The start period of branch-related events = 125,000,000 / freq.
4561 *
4562 * The cache-related events occurs even less. The SAV is usually
4563 * 1/20 of an instruction event.
4564 * The start period of cache-related events = 25,000,000 / freq.
4565 */
4566 config = event->attr.config & PERF_HW_EVENT_MASK;
4567 if (type == PERF_TYPE_HARDWARE) {
4568 switch (config) {
4569 case PERF_COUNT_HW_CPU_CYCLES:
4570 case PERF_COUNT_HW_INSTRUCTIONS:
4571 case PERF_COUNT_HW_BUS_CYCLES:
4572 case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
4573 case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
4574 case PERF_COUNT_HW_REF_CPU_CYCLES:
4575 factor = 500000000;
4576 break;
4577 case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
4578 case PERF_COUNT_HW_BRANCH_MISSES:
4579 factor = 125000000;
4580 break;
4581 case PERF_COUNT_HW_CACHE_REFERENCES:
4582 case PERF_COUNT_HW_CACHE_MISSES:
4583 factor = 25000000;
4584 break;
4585 default:
4586 goto end;
4587 }
4588 }
4589
4590 if (type == PERF_TYPE_HW_CACHE)
4591 factor = 25000000;
4592 end:
4593 /*
4594 * Usually, a prime or a number with less factors (close to prime)
4595 * is chosen as an SAV, which makes it less likely that the sampling
4596 * period synchronizes with some periodic event in the workload.
4597 * Minus 1 to make it at least avoiding values near power of twos
4598 * for the default freq.
4599 */
4600 start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
4601
4602 if (start > x86_pmu.max_period)
4603 start = x86_pmu.max_period;
4604
4605 if (x86_pmu.limit_period)
4606 x86_pmu.limit_period(event, &start);
4607
4608 return start;
4609 }
4610
intel_pmu_has_acr(struct pmu * pmu)4611 static inline bool intel_pmu_has_acr(struct pmu *pmu)
4612 {
4613 return !!hybrid(pmu, acr_cause_mask64);
4614 }
4615
intel_pmu_is_acr_group(struct perf_event * event)4616 static bool intel_pmu_is_acr_group(struct perf_event *event)
4617 {
4618 /* The group leader has the ACR flag set */
4619 if (is_acr_event_group(event))
4620 return true;
4621
4622 /* The acr_mask is set */
4623 if (event->attr.config2)
4624 return true;
4625
4626 return false;
4627 }
4628
intel_pmu_has_pebs_counter_group(struct pmu * pmu)4629 static inline bool intel_pmu_has_pebs_counter_group(struct pmu *pmu)
4630 {
4631 u64 caps;
4632
4633 if (x86_pmu.intel_cap.pebs_format >= 6 && x86_pmu.intel_cap.pebs_baseline)
4634 return true;
4635
4636 caps = hybrid(pmu, arch_pebs_cap).caps;
4637 if (x86_pmu.arch_pebs && (caps & ARCH_PEBS_CNTR_MASK))
4638 return true;
4639
4640 return false;
4641 }
4642
intel_pmu_set_acr_cntr_constr(struct perf_event * event,u64 * cause_mask,int * num)4643 static inline void intel_pmu_set_acr_cntr_constr(struct perf_event *event,
4644 u64 *cause_mask, int *num)
4645 {
4646 event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64);
4647 *cause_mask |= event->attr.config2;
4648 *num += 1;
4649 }
4650
intel_pmu_set_acr_caused_constr(struct perf_event * event,int idx,u64 cause_mask)4651 static inline void intel_pmu_set_acr_caused_constr(struct perf_event *event,
4652 int idx, u64 cause_mask)
4653 {
4654 if (test_bit(idx, (unsigned long *)&cause_mask))
4655 event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64);
4656 }
4657
intel_set_branch_counter_constr(struct perf_event * event,int * num)4658 static inline int intel_set_branch_counter_constr(struct perf_event *event,
4659 int *num)
4660 {
4661 if (branch_sample_call_stack(event))
4662 return -EINVAL;
4663 if (branch_sample_counters(event)) {
4664 (*num)++;
4665 event->hw.dyn_constraint &= x86_pmu.lbr_counters;
4666 }
4667
4668 return 0;
4669 }
4670
intel_pmu_hw_config(struct perf_event * event)4671 static int intel_pmu_hw_config(struct perf_event *event)
4672 {
4673 int ret = x86_pmu_hw_config(event);
4674
4675 if (ret)
4676 return ret;
4677
4678 ret = intel_pmu_bts_config(event);
4679 if (ret)
4680 return ret;
4681
4682 if (event->attr.freq && event->attr.sample_freq) {
4683 event->hw.sample_period = intel_pmu_freq_start_period(event);
4684 event->hw.last_period = event->hw.sample_period;
4685 local64_set(&event->hw.period_left, event->hw.sample_period);
4686 }
4687
4688 if (event->attr.precise_ip) {
4689 struct arch_pebs_cap pebs_cap = hybrid(event->pmu, arch_pebs_cap);
4690
4691 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
4692 return -EINVAL;
4693
4694 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
4695 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
4696 if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event)) &&
4697 !has_aux_action(event)) {
4698 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
4699 event->attach_state |= PERF_ATTACH_SCHED_CB;
4700 }
4701 }
4702 if (x86_pmu.pebs_aliases)
4703 x86_pmu.pebs_aliases(event);
4704
4705 if (x86_pmu.arch_pebs) {
4706 u64 cntr_mask = hybrid(event->pmu, intel_ctrl) &
4707 ~GLOBAL_CTRL_EN_PERF_METRICS;
4708 u64 pebs_mask = event->attr.precise_ip >= 3 ?
4709 pebs_cap.pdists : pebs_cap.counters;
4710 if (cntr_mask != pebs_mask)
4711 event->hw.dyn_constraint &= pebs_mask;
4712 }
4713 }
4714
4715 if (needs_branch_stack(event)) {
4716 /* Avoid branch stack setup for counting events in SAMPLE READ */
4717 if (is_sampling_event(event) ||
4718 !(event->attr.sample_type & PERF_SAMPLE_READ))
4719 event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
4720 }
4721
4722 if (branch_sample_counters(event)) {
4723 struct perf_event *leader, *sibling;
4724 int num = 0;
4725
4726 if (!(x86_pmu.flags & PMU_FL_BR_CNTR) ||
4727 (event->attr.config & ~INTEL_ARCH_EVENT_MASK))
4728 return -EINVAL;
4729
4730 /*
4731 * The branch counter logging is not supported in the call stack
4732 * mode yet, since we cannot simply flush the LBR during e.g.,
4733 * multiplexing. Also, there is no obvious usage with the call
4734 * stack mode. Simply forbids it for now.
4735 *
4736 * If any events in the group enable the branch counter logging
4737 * feature, the group is treated as a branch counter logging
4738 * group, which requires the extra space to store the counters.
4739 */
4740 leader = event->group_leader;
4741 if (intel_set_branch_counter_constr(leader, &num))
4742 return -EINVAL;
4743 leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
4744
4745 for_each_sibling_event(sibling, leader) {
4746 if (intel_set_branch_counter_constr(sibling, &num))
4747 return -EINVAL;
4748 }
4749
4750 /* event isn't installed as a sibling yet. */
4751 if (event != leader) {
4752 if (intel_set_branch_counter_constr(event, &num))
4753 return -EINVAL;
4754 }
4755
4756 if (num > fls(x86_pmu.lbr_counters))
4757 return -EINVAL;
4758 /*
4759 * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't
4760 * require any branch stack setup.
4761 * Clear the bit to avoid unnecessary branch stack setup.
4762 */
4763 if (0 == (event->attr.branch_sample_type &
4764 ~(PERF_SAMPLE_BRANCH_PLM_ALL |
4765 PERF_SAMPLE_BRANCH_COUNTERS)))
4766 event->hw.flags &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK;
4767
4768 /*
4769 * Force the leader to be a LBR event. So LBRs can be reset
4770 * with the leader event. See intel_pmu_lbr_del() for details.
4771 */
4772 if (!intel_pmu_needs_branch_stack(leader))
4773 return -EINVAL;
4774 }
4775
4776 if (intel_pmu_needs_branch_stack(event)) {
4777 ret = intel_pmu_setup_lbr_filter(event);
4778 if (ret)
4779 return ret;
4780 event->attach_state |= PERF_ATTACH_SCHED_CB;
4781
4782 /*
4783 * BTS is set up earlier in this path, so don't account twice
4784 */
4785 if (!unlikely(intel_pmu_has_bts(event))) {
4786 /* disallow lbr if conflicting events are present */
4787 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
4788 return -EBUSY;
4789
4790 event->destroy = hw_perf_lbr_event_destroy;
4791 }
4792 }
4793
4794 if (event->attr.aux_output) {
4795 if (!event->attr.precise_ip)
4796 return -EINVAL;
4797
4798 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
4799 }
4800
4801 if ((event->attr.sample_type & PERF_SAMPLE_READ) &&
4802 intel_pmu_has_pebs_counter_group(event->pmu) &&
4803 is_sampling_event(event) &&
4804 event->attr.precise_ip)
4805 event->group_leader->hw.flags |= PERF_X86_EVENT_PEBS_CNTR;
4806
4807 if (intel_pmu_has_acr(event->pmu) && intel_pmu_is_acr_group(event)) {
4808 struct perf_event *sibling, *leader = event->group_leader;
4809 struct pmu *pmu = event->pmu;
4810 bool has_sw_event = false;
4811 int num = 0, idx = 0;
4812 u64 cause_mask = 0;
4813
4814 /* Not support perf metrics */
4815 if (is_metric_event(event))
4816 return -EINVAL;
4817
4818 /* Not support freq mode */
4819 if (event->attr.freq)
4820 return -EINVAL;
4821
4822 /* PDist is not supported */
4823 if (event->attr.config2 && event->attr.precise_ip > 2)
4824 return -EINVAL;
4825
4826 /* The reload value cannot exceeds the max period */
4827 if (event->attr.sample_period > x86_pmu.max_period)
4828 return -EINVAL;
4829 /*
4830 * The counter-constraints of each event cannot be finalized
4831 * unless the whole group is scanned. However, it's hard
4832 * to know whether the event is the last one of the group.
4833 * Recalculate the counter-constraints for each event when
4834 * adding a new event.
4835 *
4836 * The group is traversed twice, which may be optimized later.
4837 * In the first round,
4838 * - Find all events which do reload when other events
4839 * overflow and set the corresponding counter-constraints
4840 * - Add all events, which can cause other events reload,
4841 * in the cause_mask
4842 * - Error out if the number of events exceeds the HW limit
4843 * - The ACR events must be contiguous.
4844 * Error out if there are non-X86 events between ACR events.
4845 * This is not a HW limit, but a SW limit.
4846 * With the assumption, the intel_pmu_acr_late_setup() can
4847 * easily convert the event idx to counter idx without
4848 * traversing the whole event list.
4849 */
4850 if (!is_x86_event(leader))
4851 return -EINVAL;
4852
4853 if (leader->attr.config2)
4854 intel_pmu_set_acr_cntr_constr(leader, &cause_mask, &num);
4855
4856 if (leader->nr_siblings) {
4857 for_each_sibling_event(sibling, leader) {
4858 if (!is_x86_event(sibling)) {
4859 has_sw_event = true;
4860 continue;
4861 }
4862 if (!sibling->attr.config2)
4863 continue;
4864 if (has_sw_event)
4865 return -EINVAL;
4866 intel_pmu_set_acr_cntr_constr(sibling, &cause_mask, &num);
4867 }
4868 }
4869 if (leader != event && event->attr.config2) {
4870 if (has_sw_event)
4871 return -EINVAL;
4872 intel_pmu_set_acr_cntr_constr(event, &cause_mask, &num);
4873 }
4874
4875 if (hweight64(cause_mask) > hweight64(hybrid(pmu, acr_cause_mask64)) ||
4876 num > hweight64(hybrid(event->pmu, acr_cntr_mask64)))
4877 return -EINVAL;
4878 /*
4879 * In the second round, apply the counter-constraints for
4880 * the events which can cause other events reload.
4881 */
4882 intel_pmu_set_acr_caused_constr(leader, idx++, cause_mask);
4883
4884 if (leader->nr_siblings) {
4885 for_each_sibling_event(sibling, leader) {
4886 if (is_x86_event(sibling))
4887 intel_pmu_set_acr_caused_constr(sibling, idx++, cause_mask);
4888 }
4889 }
4890
4891 if (leader != event)
4892 intel_pmu_set_acr_caused_constr(event, idx, cause_mask);
4893
4894 leader->hw.flags |= PERF_X86_EVENT_ACR;
4895 }
4896
4897 if ((event->attr.type == PERF_TYPE_HARDWARE) ||
4898 (event->attr.type == PERF_TYPE_HW_CACHE))
4899 return 0;
4900
4901 /*
4902 * Config Topdown slots and metric events
4903 *
4904 * The slots event on Fixed Counter 3 can support sampling,
4905 * which will be handled normally in x86_perf_event_update().
4906 *
4907 * Metric events don't support sampling and require being paired
4908 * with a slots event as group leader. When the slots event
4909 * is used in a metrics group, it too cannot support sampling.
4910 */
4911 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
4912 /* The metrics_clear can only be set for the slots event */
4913 if (event->attr.config1 &&
4914 (!is_slots_event(event) || (event->attr.config1 & ~INTEL_TD_CFG_METRIC_CLEAR)))
4915 return -EINVAL;
4916
4917 if (event->attr.config2)
4918 return -EINVAL;
4919
4920 /*
4921 * The TopDown metrics events and slots event don't
4922 * support any filters.
4923 */
4924 if (event->attr.config & X86_ALL_EVENT_FLAGS)
4925 return -EINVAL;
4926
4927 if (is_available_metric_event(event)) {
4928 struct perf_event *leader = event->group_leader;
4929
4930 /* The metric events don't support sampling. */
4931 if (is_sampling_event(event))
4932 return -EINVAL;
4933
4934 /* The metric events require a slots group leader. */
4935 if (!is_slots_event(leader))
4936 return -EINVAL;
4937
4938 /*
4939 * The leader/SLOTS must not be a sampling event for
4940 * metric use; hardware requires it starts at 0 when used
4941 * in conjunction with MSR_PERF_METRICS.
4942 */
4943 if (is_sampling_event(leader))
4944 return -EINVAL;
4945
4946 event->event_caps |= PERF_EV_CAP_SIBLING;
4947 /*
4948 * Only once we have a METRICs sibling do we
4949 * need TopDown magic.
4950 */
4951 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
4952 event->hw.flags |= PERF_X86_EVENT_TOPDOWN;
4953 }
4954 }
4955
4956 /*
4957 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
4958 * doesn't function quite right. As a work-around it needs to always be
4959 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
4960 * The actual count of this second event is irrelevant it just needs
4961 * to be active to make the first event function correctly.
4962 *
4963 * In a group, the auxiliary event must be in front of the load latency
4964 * event. The rule is to simplify the implementation of the check.
4965 * That's because perf cannot have a complete group at the moment.
4966 */
4967 if (require_mem_loads_aux_event(event) &&
4968 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
4969 is_mem_loads_event(event)) {
4970 struct perf_event *leader = event->group_leader;
4971 struct perf_event *sibling = NULL;
4972
4973 /*
4974 * When this memload event is also the first event (no group
4975 * exists yet), then there is no aux event before it.
4976 */
4977 if (leader == event)
4978 return -ENODATA;
4979
4980 if (!is_mem_loads_aux_event(leader)) {
4981 for_each_sibling_event(sibling, leader) {
4982 if (is_mem_loads_aux_event(sibling))
4983 break;
4984 }
4985 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
4986 return -ENODATA;
4987 }
4988 }
4989
4990 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
4991 return 0;
4992
4993 if (x86_pmu.version < 3)
4994 return -EINVAL;
4995
4996 ret = perf_allow_cpu();
4997 if (ret)
4998 return ret;
4999
5000 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
5001
5002 return 0;
5003 }
5004
5005 /*
5006 * Currently, the only caller of this function is the atomic_switch_perf_msrs().
5007 * The host perf context helps to prepare the values of the real hardware for
5008 * a set of msrs that need to be switched atomically in a vmx transaction.
5009 *
5010 * For example, the pseudocode needed to add a new msr should look like:
5011 *
5012 * arr[(*nr)++] = (struct perf_guest_switch_msr){
5013 * .msr = the hardware msr address,
5014 * .host = the value the hardware has when it doesn't run a guest,
5015 * .guest = the value the hardware has when it runs a guest,
5016 * };
5017 *
5018 * These values have nothing to do with the emulated values the guest sees
5019 * when it uses {RD,WR}MSR, which should be handled by the KVM context,
5020 * specifically in the intel_pmu_{get,set}_msr().
5021 */
intel_guest_get_msrs(int * nr,void * data)5022 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
5023 {
5024 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5025 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
5026 struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
5027 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
5028 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
5029 int global_ctrl, pebs_enable;
5030
5031 /*
5032 * In addition to obeying exclude_guest/exclude_host, remove bits being
5033 * used for PEBS when running a guest, because PEBS writes to virtual
5034 * addresses (not physical addresses).
5035 */
5036 *nr = 0;
5037 global_ctrl = (*nr)++;
5038 arr[global_ctrl] = (struct perf_guest_switch_msr){
5039 .msr = MSR_CORE_PERF_GLOBAL_CTRL,
5040 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
5041 .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
5042 };
5043
5044 if (!x86_pmu.ds_pebs)
5045 return arr;
5046
5047 /*
5048 * If PMU counter has PEBS enabled it is not enough to
5049 * disable counter on a guest entry since PEBS memory
5050 * write can overshoot guest entry and corrupt guest
5051 * memory. Disabling PEBS solves the problem.
5052 *
5053 * Don't do this if the CPU already enforces it.
5054 */
5055 if (x86_pmu.pebs_no_isolation) {
5056 arr[(*nr)++] = (struct perf_guest_switch_msr){
5057 .msr = MSR_IA32_PEBS_ENABLE,
5058 .host = cpuc->pebs_enabled,
5059 .guest = 0,
5060 };
5061 return arr;
5062 }
5063
5064 if (!kvm_pmu || !x86_pmu.pebs_ept)
5065 return arr;
5066
5067 arr[(*nr)++] = (struct perf_guest_switch_msr){
5068 .msr = MSR_IA32_DS_AREA,
5069 .host = (unsigned long)cpuc->ds,
5070 .guest = kvm_pmu->ds_area,
5071 };
5072
5073 if (x86_pmu.intel_cap.pebs_baseline) {
5074 arr[(*nr)++] = (struct perf_guest_switch_msr){
5075 .msr = MSR_PEBS_DATA_CFG,
5076 .host = cpuc->active_pebs_data_cfg,
5077 .guest = kvm_pmu->pebs_data_cfg,
5078 };
5079 }
5080
5081 pebs_enable = (*nr)++;
5082 arr[pebs_enable] = (struct perf_guest_switch_msr){
5083 .msr = MSR_IA32_PEBS_ENABLE,
5084 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
5085 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
5086 };
5087
5088 if (arr[pebs_enable].host) {
5089 /* Disable guest PEBS if host PEBS is enabled. */
5090 arr[pebs_enable].guest = 0;
5091 } else {
5092 /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
5093 arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
5094 arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
5095 /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
5096 arr[global_ctrl].guest |= arr[pebs_enable].guest;
5097 }
5098
5099 return arr;
5100 }
5101
core_guest_get_msrs(int * nr,void * data)5102 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
5103 {
5104 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5105 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
5106 int idx;
5107
5108 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
5109 struct perf_event *event = cpuc->events[idx];
5110
5111 arr[idx].msr = x86_pmu_config_addr(idx);
5112 arr[idx].host = arr[idx].guest = 0;
5113
5114 if (!test_bit(idx, cpuc->active_mask))
5115 continue;
5116
5117 arr[idx].host = arr[idx].guest =
5118 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
5119
5120 if (event->attr.exclude_host)
5121 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
5122 else if (event->attr.exclude_guest)
5123 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
5124 }
5125
5126 *nr = x86_pmu_max_num_counters(cpuc->pmu);
5127 return arr;
5128 }
5129
core_pmu_enable_event(struct perf_event * event)5130 static void core_pmu_enable_event(struct perf_event *event)
5131 {
5132 if (!event->attr.exclude_host)
5133 x86_pmu_enable_event(event);
5134 }
5135
core_pmu_enable_all(int added)5136 static void core_pmu_enable_all(int added)
5137 {
5138 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5139 int idx;
5140
5141 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
5142 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
5143
5144 if (!test_bit(idx, cpuc->active_mask) ||
5145 cpuc->events[idx]->attr.exclude_host)
5146 continue;
5147
5148 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
5149 }
5150 }
5151
hsw_hw_config(struct perf_event * event)5152 static int hsw_hw_config(struct perf_event *event)
5153 {
5154 int ret = intel_pmu_hw_config(event);
5155
5156 if (ret)
5157 return ret;
5158 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
5159 return 0;
5160 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
5161
5162 /*
5163 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
5164 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
5165 * this combination.
5166 */
5167 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
5168 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
5169 event->attr.precise_ip > 0))
5170 return -EOPNOTSUPP;
5171
5172 if (event_is_checkpointed(event)) {
5173 /*
5174 * Sampling of checkpointed events can cause situations where
5175 * the CPU constantly aborts because of a overflow, which is
5176 * then checkpointed back and ignored. Forbid checkpointing
5177 * for sampling.
5178 *
5179 * But still allow a long sampling period, so that perf stat
5180 * from KVM works.
5181 */
5182 if (event->attr.sample_period > 0 &&
5183 event->attr.sample_period < 0x7fffffff)
5184 return -EOPNOTSUPP;
5185 }
5186 return 0;
5187 }
5188
5189 static struct event_constraint counter0_constraint =
5190 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
5191
5192 static struct event_constraint counter1_constraint =
5193 INTEL_ALL_EVENT_CONSTRAINT(0, 0x2);
5194
5195 static struct event_constraint counter0_1_constraint =
5196 INTEL_ALL_EVENT_CONSTRAINT(0, 0x3);
5197
5198 static struct event_constraint counter2_constraint =
5199 EVENT_CONSTRAINT(0, 0x4, 0);
5200
5201 static struct event_constraint fixed0_constraint =
5202 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
5203
5204 static struct event_constraint fixed0_counter0_constraint =
5205 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
5206
5207 static struct event_constraint fixed0_counter0_1_constraint =
5208 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL);
5209
5210 static struct event_constraint counters_1_7_constraint =
5211 INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL);
5212
5213 static struct event_constraint *
hsw_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5214 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5215 struct perf_event *event)
5216 {
5217 struct event_constraint *c;
5218
5219 c = intel_get_event_constraints(cpuc, idx, event);
5220
5221 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
5222 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
5223 if (c->idxmsk64 & (1U << 2))
5224 return &counter2_constraint;
5225 return &emptyconstraint;
5226 }
5227
5228 return c;
5229 }
5230
5231 static struct event_constraint *
icl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5232 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5233 struct perf_event *event)
5234 {
5235 /*
5236 * Fixed counter 0 has less skid.
5237 * Force instruction:ppp in Fixed counter 0
5238 */
5239 if ((event->attr.precise_ip == 3) &&
5240 constraint_match(&fixed0_constraint, event->hw.config))
5241 return &fixed0_constraint;
5242
5243 return hsw_get_event_constraints(cpuc, idx, event);
5244 }
5245
5246 static struct event_constraint *
glc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5247 glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5248 struct perf_event *event)
5249 {
5250 struct event_constraint *c;
5251
5252 c = icl_get_event_constraints(cpuc, idx, event);
5253
5254 /*
5255 * The :ppp indicates the Precise Distribution (PDist) facility, which
5256 * is only supported on the GP counter 0. If a :ppp event which is not
5257 * available on the GP counter 0, error out.
5258 * Exception: Instruction PDIR is only available on the fixed counter 0.
5259 */
5260 if ((event->attr.precise_ip == 3) &&
5261 !constraint_match(&fixed0_constraint, event->hw.config)) {
5262 if (c->idxmsk64 & BIT_ULL(0))
5263 return &counter0_constraint;
5264
5265 return &emptyconstraint;
5266 }
5267
5268 return c;
5269 }
5270
5271 static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5272 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5273 struct perf_event *event)
5274 {
5275 struct event_constraint *c;
5276
5277 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
5278 if (event->attr.precise_ip == 3)
5279 return &counter0_constraint;
5280
5281 c = intel_get_event_constraints(cpuc, idx, event);
5282
5283 return c;
5284 }
5285
5286 static struct event_constraint *
tnt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5287 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5288 struct perf_event *event)
5289 {
5290 struct event_constraint *c;
5291
5292 c = intel_get_event_constraints(cpuc, idx, event);
5293
5294 /*
5295 * :ppp means to do reduced skid PEBS,
5296 * which is available on PMC0 and fixed counter 0.
5297 */
5298 if (event->attr.precise_ip == 3) {
5299 /* Force instruction:ppp on PMC0 and Fixed counter 0 */
5300 if (constraint_match(&fixed0_constraint, event->hw.config))
5301 return &fixed0_counter0_constraint;
5302
5303 return &counter0_constraint;
5304 }
5305
5306 return c;
5307 }
5308
5309 static bool allow_tsx_force_abort = true;
5310
5311 static struct event_constraint *
tfa_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5312 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5313 struct perf_event *event)
5314 {
5315 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
5316
5317 /*
5318 * Without TFA we must not use PMC3.
5319 */
5320 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
5321 c = dyn_constraint(cpuc, c, idx);
5322 c->idxmsk64 &= ~(1ULL << 3);
5323 c->weight--;
5324 }
5325
5326 return c;
5327 }
5328
5329 static struct event_constraint *
adl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5330 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5331 struct perf_event *event)
5332 {
5333 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
5334
5335 if (pmu->pmu_type == hybrid_big)
5336 return glc_get_event_constraints(cpuc, idx, event);
5337 else if (pmu->pmu_type == hybrid_small)
5338 return tnt_get_event_constraints(cpuc, idx, event);
5339
5340 WARN_ON(1);
5341 return &emptyconstraint;
5342 }
5343
5344 static struct event_constraint *
cmt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5345 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5346 struct perf_event *event)
5347 {
5348 struct event_constraint *c;
5349
5350 c = intel_get_event_constraints(cpuc, idx, event);
5351
5352 /*
5353 * The :ppp indicates the Precise Distribution (PDist) facility, which
5354 * is only supported on the GP counter 0 & 1 and Fixed counter 0.
5355 * If a :ppp event which is not available on the above eligible counters,
5356 * error out.
5357 */
5358 if (event->attr.precise_ip == 3) {
5359 /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */
5360 if (constraint_match(&fixed0_constraint, event->hw.config)) {
5361 /* The fixed counter 0 doesn't support LBR event logging. */
5362 if (branch_sample_counters(event))
5363 return &counter0_1_constraint;
5364 else
5365 return &fixed0_counter0_1_constraint;
5366 }
5367
5368 switch (c->idxmsk64 & 0x3ull) {
5369 case 0x1:
5370 return &counter0_constraint;
5371 case 0x2:
5372 return &counter1_constraint;
5373 case 0x3:
5374 return &counter0_1_constraint;
5375 }
5376 return &emptyconstraint;
5377 }
5378
5379 return c;
5380 }
5381
5382 static struct event_constraint *
rwc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5383 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5384 struct perf_event *event)
5385 {
5386 struct event_constraint *c;
5387
5388 c = glc_get_event_constraints(cpuc, idx, event);
5389
5390 /* The Retire Latency is not supported by the fixed counter 0. */
5391 if (event->attr.precise_ip &&
5392 (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
5393 constraint_match(&fixed0_constraint, event->hw.config)) {
5394 /*
5395 * The Instruction PDIR is only available
5396 * on the fixed counter 0. Error out for this case.
5397 */
5398 if (event->attr.precise_ip == 3)
5399 return &emptyconstraint;
5400 return &counters_1_7_constraint;
5401 }
5402
5403 return c;
5404 }
5405
5406 static struct event_constraint *
mtl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5407 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5408 struct perf_event *event)
5409 {
5410 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
5411
5412 if (pmu->pmu_type == hybrid_big)
5413 return rwc_get_event_constraints(cpuc, idx, event);
5414 if (pmu->pmu_type == hybrid_small)
5415 return cmt_get_event_constraints(cpuc, idx, event);
5416
5417 WARN_ON(1);
5418 return &emptyconstraint;
5419 }
5420
adl_hw_config(struct perf_event * event)5421 static int adl_hw_config(struct perf_event *event)
5422 {
5423 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
5424
5425 if (pmu->pmu_type == hybrid_big)
5426 return hsw_hw_config(event);
5427 else if (pmu->pmu_type == hybrid_small)
5428 return intel_pmu_hw_config(event);
5429
5430 WARN_ON(1);
5431 return -EOPNOTSUPP;
5432 }
5433
adl_get_hybrid_cpu_type(void)5434 static enum intel_cpu_type adl_get_hybrid_cpu_type(void)
5435 {
5436 return INTEL_CPU_TYPE_CORE;
5437 }
5438
erratum_hsw11(struct perf_event * event)5439 static inline bool erratum_hsw11(struct perf_event *event)
5440 {
5441 return (event->hw.config & INTEL_ARCH_EVENT_MASK) ==
5442 X86_CONFIG(.event=0xc0, .umask=0x01);
5443 }
5444
5445 static struct event_constraint *
arl_h_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5446 arl_h_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5447 struct perf_event *event)
5448 {
5449 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
5450
5451 if (pmu->pmu_type == hybrid_tiny)
5452 return cmt_get_event_constraints(cpuc, idx, event);
5453
5454 return mtl_get_event_constraints(cpuc, idx, event);
5455 }
5456
arl_h_hw_config(struct perf_event * event)5457 static int arl_h_hw_config(struct perf_event *event)
5458 {
5459 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
5460
5461 if (pmu->pmu_type == hybrid_tiny)
5462 return intel_pmu_hw_config(event);
5463
5464 return adl_hw_config(event);
5465 }
5466
5467 /*
5468 * The HSW11 requires a period larger than 100 which is the same as the BDM11.
5469 * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL.
5470 *
5471 * The message 'interrupt took too long' can be observed on any counter which
5472 * was armed with a period < 32 and two events expired in the same NMI.
5473 * A minimum period of 32 is enforced for the rest of the events.
5474 */
hsw_limit_period(struct perf_event * event,s64 * left)5475 static void hsw_limit_period(struct perf_event *event, s64 *left)
5476 {
5477 *left = max(*left, erratum_hsw11(event) ? 128 : 32);
5478 }
5479
5480 /*
5481 * Broadwell:
5482 *
5483 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
5484 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
5485 * the two to enforce a minimum period of 128 (the smallest value that has bits
5486 * 0-5 cleared and >= 100).
5487 *
5488 * Because of how the code in x86_perf_event_set_period() works, the truncation
5489 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
5490 * to make up for the 'lost' events due to carrying the 'error' in period_left.
5491 *
5492 * Therefore the effective (average) period matches the requested period,
5493 * despite coarser hardware granularity.
5494 */
bdw_limit_period(struct perf_event * event,s64 * left)5495 static void bdw_limit_period(struct perf_event *event, s64 *left)
5496 {
5497 if (erratum_hsw11(event)) {
5498 if (*left < 128)
5499 *left = 128;
5500 *left &= ~0x3fULL;
5501 }
5502 }
5503
nhm_limit_period(struct perf_event * event,s64 * left)5504 static void nhm_limit_period(struct perf_event *event, s64 *left)
5505 {
5506 *left = max(*left, 32LL);
5507 }
5508
glc_limit_period(struct perf_event * event,s64 * left)5509 static void glc_limit_period(struct perf_event *event, s64 *left)
5510 {
5511 if (event->attr.precise_ip == 3)
5512 *left = max(*left, 128LL);
5513 }
5514
5515 PMU_FORMAT_ATTR(event, "config:0-7" );
5516 PMU_FORMAT_ATTR(umask, "config:8-15" );
5517 PMU_FORMAT_ATTR(edge, "config:18" );
5518 PMU_FORMAT_ATTR(pc, "config:19" );
5519 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
5520 PMU_FORMAT_ATTR(inv, "config:23" );
5521 PMU_FORMAT_ATTR(cmask, "config:24-31" );
5522 PMU_FORMAT_ATTR(in_tx, "config:32" );
5523 PMU_FORMAT_ATTR(in_tx_cp, "config:33" );
5524 PMU_FORMAT_ATTR(eq, "config:36" ); /* v6 + */
5525
5526 PMU_FORMAT_ATTR(metrics_clear, "config1:0"); /* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */
5527
umask2_show(struct device * dev,struct device_attribute * attr,char * page)5528 static ssize_t umask2_show(struct device *dev,
5529 struct device_attribute *attr,
5530 char *page)
5531 {
5532 u64 mask = hybrid(dev_get_drvdata(dev), config_mask) & ARCH_PERFMON_EVENTSEL_UMASK2;
5533
5534 if (mask == ARCH_PERFMON_EVENTSEL_UMASK2)
5535 return sprintf(page, "config:8-15,40-47\n");
5536
5537 /* Roll back to the old format if umask2 is not supported. */
5538 return sprintf(page, "config:8-15\n");
5539 }
5540
5541 static struct device_attribute format_attr_umask2 =
5542 __ATTR(umask, 0444, umask2_show, NULL);
5543
5544 static struct attribute *format_evtsel_ext_attrs[] = {
5545 &format_attr_umask2.attr,
5546 &format_attr_eq.attr,
5547 &format_attr_metrics_clear.attr,
5548 NULL
5549 };
5550
5551 static umode_t
evtsel_ext_is_visible(struct kobject * kobj,struct attribute * attr,int i)5552 evtsel_ext_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5553 {
5554 struct device *dev = kobj_to_dev(kobj);
5555 u64 mask;
5556
5557 /*
5558 * The umask and umask2 have different formats but share the
5559 * same attr name. In update mode, the previous value of the
5560 * umask is unconditionally removed before is_visible. If
5561 * umask2 format is not enumerated, it's impossible to roll
5562 * back to the old format.
5563 * Does the check in umask2_show rather than is_visible.
5564 */
5565 if (i == 0)
5566 return attr->mode;
5567
5568 mask = hybrid(dev_get_drvdata(dev), config_mask);
5569 if (i == 1)
5570 return (mask & ARCH_PERFMON_EVENTSEL_EQ) ? attr->mode : 0;
5571
5572 /* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */
5573 if (i == 2) {
5574 union perf_capabilities intel_cap = hybrid(dev_get_drvdata(dev), intel_cap);
5575
5576 return intel_cap.rdpmc_metrics_clear ? attr->mode : 0;
5577 }
5578
5579 return 0;
5580 }
5581
5582 static struct attribute *intel_arch_formats_attr[] = {
5583 &format_attr_event.attr,
5584 &format_attr_umask.attr,
5585 &format_attr_edge.attr,
5586 &format_attr_pc.attr,
5587 &format_attr_inv.attr,
5588 &format_attr_cmask.attr,
5589 NULL,
5590 };
5591
intel_event_sysfs_show(char * page,u64 config)5592 ssize_t intel_event_sysfs_show(char *page, u64 config)
5593 {
5594 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
5595
5596 return x86_event_sysfs_show(page, config, event);
5597 }
5598
allocate_shared_regs(int cpu)5599 static struct intel_shared_regs *allocate_shared_regs(int cpu)
5600 {
5601 struct intel_shared_regs *regs;
5602 int i;
5603
5604 regs = kzalloc_node(sizeof(struct intel_shared_regs),
5605 GFP_KERNEL, cpu_to_node(cpu));
5606 if (regs) {
5607 /*
5608 * initialize the locks to keep lockdep happy
5609 */
5610 for (i = 0; i < EXTRA_REG_MAX; i++)
5611 raw_spin_lock_init(®s->regs[i].lock);
5612
5613 regs->core_id = -1;
5614 }
5615 return regs;
5616 }
5617
allocate_excl_cntrs(int cpu)5618 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
5619 {
5620 struct intel_excl_cntrs *c;
5621
5622 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
5623 GFP_KERNEL, cpu_to_node(cpu));
5624 if (c) {
5625 raw_spin_lock_init(&c->lock);
5626 c->core_id = -1;
5627 }
5628 return c;
5629 }
5630
5631
intel_cpuc_prepare(struct cpu_hw_events * cpuc,int cpu)5632 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
5633 {
5634 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
5635
5636 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
5637 cpuc->shared_regs = allocate_shared_regs(cpu);
5638 if (!cpuc->shared_regs)
5639 goto err;
5640 }
5641
5642 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_DYN_CONSTRAINT)) {
5643 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
5644
5645 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
5646 if (!cpuc->constraint_list)
5647 goto err_shared_regs;
5648 }
5649
5650 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
5651 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
5652 if (!cpuc->excl_cntrs)
5653 goto err_constraint_list;
5654
5655 cpuc->excl_thread_id = 0;
5656 }
5657
5658 return 0;
5659
5660 err_constraint_list:
5661 kfree(cpuc->constraint_list);
5662 cpuc->constraint_list = NULL;
5663
5664 err_shared_regs:
5665 kfree(cpuc->shared_regs);
5666 cpuc->shared_regs = NULL;
5667
5668 err:
5669 return -ENOMEM;
5670 }
5671
intel_pmu_cpu_prepare(int cpu)5672 static int intel_pmu_cpu_prepare(int cpu)
5673 {
5674 int ret;
5675
5676 ret = intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
5677 if (ret)
5678 return ret;
5679
5680 return alloc_arch_pebs_buf_on_cpu(cpu);
5681 }
5682
flip_smm_bit(void * data)5683 static void flip_smm_bit(void *data)
5684 {
5685 unsigned long set = *(unsigned long *)data;
5686
5687 if (set > 0) {
5688 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
5689 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
5690 } else {
5691 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
5692 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
5693 }
5694 }
5695
intel_pmu_check_counters_mask(u64 * cntr_mask,u64 * fixed_cntr_mask,u64 * intel_ctrl)5696 static void intel_pmu_check_counters_mask(u64 *cntr_mask,
5697 u64 *fixed_cntr_mask,
5698 u64 *intel_ctrl)
5699 {
5700 unsigned int bit;
5701
5702 bit = fls64(*cntr_mask);
5703 if (bit > INTEL_PMC_MAX_GENERIC) {
5704 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5705 bit, INTEL_PMC_MAX_GENERIC);
5706 *cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
5707 }
5708 *intel_ctrl = *cntr_mask;
5709
5710 bit = fls64(*fixed_cntr_mask);
5711 if (bit > INTEL_PMC_MAX_FIXED) {
5712 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5713 bit, INTEL_PMC_MAX_FIXED);
5714 *fixed_cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
5715 }
5716
5717 *intel_ctrl |= *fixed_cntr_mask << INTEL_PMC_IDX_FIXED;
5718 }
5719
5720 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
5721 u64 cntr_mask,
5722 u64 fixed_cntr_mask,
5723 u64 intel_ctrl);
5724
5725 enum dyn_constr_type {
5726 DYN_CONSTR_NONE,
5727 DYN_CONSTR_BR_CNTR,
5728 DYN_CONSTR_ACR_CNTR,
5729 DYN_CONSTR_ACR_CAUSE,
5730 DYN_CONSTR_PEBS,
5731 DYN_CONSTR_PDIST,
5732
5733 DYN_CONSTR_MAX,
5734 };
5735
5736 static const char * const dyn_constr_type_name[] = {
5737 [DYN_CONSTR_NONE] = "a normal event",
5738 [DYN_CONSTR_BR_CNTR] = "a branch counter logging event",
5739 [DYN_CONSTR_ACR_CNTR] = "an auto-counter reload event",
5740 [DYN_CONSTR_ACR_CAUSE] = "an auto-counter reload cause event",
5741 [DYN_CONSTR_PEBS] = "a PEBS event",
5742 [DYN_CONSTR_PDIST] = "a PEBS PDIST event",
5743 };
5744
__intel_pmu_check_dyn_constr(struct event_constraint * constr,enum dyn_constr_type type,u64 mask)5745 static void __intel_pmu_check_dyn_constr(struct event_constraint *constr,
5746 enum dyn_constr_type type, u64 mask)
5747 {
5748 struct event_constraint *c1, *c2;
5749 int new_weight, check_weight;
5750 u64 new_mask, check_mask;
5751
5752 for_each_event_constraint(c1, constr) {
5753 new_mask = c1->idxmsk64 & mask;
5754 new_weight = hweight64(new_mask);
5755
5756 /* ignore topdown perf metrics event */
5757 if (c1->idxmsk64 & INTEL_PMC_MSK_TOPDOWN)
5758 continue;
5759
5760 if (!new_weight && fls64(c1->idxmsk64) < INTEL_PMC_IDX_FIXED) {
5761 pr_info("The event 0x%llx is not supported as %s.\n",
5762 c1->code, dyn_constr_type_name[type]);
5763 }
5764
5765 if (new_weight <= 1)
5766 continue;
5767
5768 for_each_event_constraint(c2, c1 + 1) {
5769 bool check_fail = false;
5770
5771 check_mask = c2->idxmsk64 & mask;
5772 check_weight = hweight64(check_mask);
5773
5774 if (c2->idxmsk64 & INTEL_PMC_MSK_TOPDOWN ||
5775 !check_weight)
5776 continue;
5777
5778 /* The same constraints or no overlap */
5779 if (new_mask == check_mask ||
5780 (new_mask ^ check_mask) == (new_mask | check_mask))
5781 continue;
5782
5783 /*
5784 * A scheduler issue may be triggered in the following cases.
5785 * - Two overlap constraints have the same weight.
5786 * E.g., A constraints: 0x3, B constraints: 0x6
5787 * event counter failure case
5788 * B PMC[2:1] 1
5789 * A PMC[1:0] 0
5790 * A PMC[1:0] FAIL
5791 * - Two overlap constraints have different weight.
5792 * The constraint has a low weight, but has high last bit.
5793 * E.g., A constraints: 0x7, B constraints: 0xC
5794 * event counter failure case
5795 * B PMC[3:2] 2
5796 * A PMC[2:0] 0
5797 * A PMC[2:0] 1
5798 * A PMC[2:0] FAIL
5799 */
5800 if (new_weight == check_weight) {
5801 check_fail = true;
5802 } else if (new_weight < check_weight) {
5803 if ((new_mask | check_mask) != check_mask &&
5804 fls64(new_mask) > fls64(check_mask))
5805 check_fail = true;
5806 } else {
5807 if ((new_mask | check_mask) != new_mask &&
5808 fls64(new_mask) < fls64(check_mask))
5809 check_fail = true;
5810 }
5811
5812 if (check_fail) {
5813 pr_warn("The two events 0x%llx and 0x%llx may not be "
5814 "fully scheduled under some circumstances as "
5815 "%s.\n",
5816 c1->code, c2->code, dyn_constr_type_name[type]);
5817 }
5818 }
5819 }
5820 }
5821
intel_pmu_check_dyn_constr(struct pmu * pmu,struct event_constraint * constr,u64 cntr_mask)5822 static void intel_pmu_check_dyn_constr(struct pmu *pmu,
5823 struct event_constraint *constr,
5824 u64 cntr_mask)
5825 {
5826 u64 gp_mask = GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
5827 enum dyn_constr_type i;
5828 u64 mask;
5829
5830 for (i = DYN_CONSTR_NONE; i < DYN_CONSTR_MAX; i++) {
5831 mask = 0;
5832 switch (i) {
5833 case DYN_CONSTR_NONE:
5834 mask = cntr_mask;
5835 break;
5836 case DYN_CONSTR_BR_CNTR:
5837 if (x86_pmu.flags & PMU_FL_BR_CNTR)
5838 mask = x86_pmu.lbr_counters;
5839 break;
5840 case DYN_CONSTR_ACR_CNTR:
5841 mask = hybrid(pmu, acr_cntr_mask64) & gp_mask;
5842 break;
5843 case DYN_CONSTR_ACR_CAUSE:
5844 if (hybrid(pmu, acr_cntr_mask64) ==
5845 hybrid(pmu, acr_cause_mask64))
5846 continue;
5847 mask = hybrid(pmu, acr_cause_mask64) & gp_mask;
5848 break;
5849 case DYN_CONSTR_PEBS:
5850 if (x86_pmu.arch_pebs) {
5851 mask = hybrid(pmu, arch_pebs_cap).counters &
5852 gp_mask;
5853 }
5854 break;
5855 case DYN_CONSTR_PDIST:
5856 if (x86_pmu.arch_pebs) {
5857 mask = hybrid(pmu, arch_pebs_cap).pdists &
5858 gp_mask;
5859 }
5860 break;
5861 default:
5862 pr_warn("Unsupported dynamic constraint type %d\n", i);
5863 }
5864
5865 if (mask)
5866 __intel_pmu_check_dyn_constr(constr, i, mask);
5867 }
5868 }
5869
intel_pmu_check_event_constraints_all(struct pmu * pmu)5870 static void intel_pmu_check_event_constraints_all(struct pmu *pmu)
5871 {
5872 struct event_constraint *event_constraints = hybrid(pmu, event_constraints);
5873 struct event_constraint *pebs_constraints = hybrid(pmu, pebs_constraints);
5874 u64 cntr_mask = hybrid(pmu, cntr_mask64);
5875 u64 fixed_cntr_mask = hybrid(pmu, fixed_cntr_mask64);
5876 u64 intel_ctrl = hybrid(pmu, intel_ctrl);
5877
5878 intel_pmu_check_event_constraints(event_constraints, cntr_mask,
5879 fixed_cntr_mask, intel_ctrl);
5880
5881 if (event_constraints)
5882 intel_pmu_check_dyn_constr(pmu, event_constraints, cntr_mask);
5883
5884 if (pebs_constraints)
5885 intel_pmu_check_dyn_constr(pmu, pebs_constraints, cntr_mask);
5886 }
5887
5888 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
5889
intel_pmu_broken_perf_cap(void)5890 static inline bool intel_pmu_broken_perf_cap(void)
5891 {
5892 /* The Perf Metric (Bit 15) is always cleared */
5893 if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE ||
5894 boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L)
5895 return true;
5896
5897 return false;
5898 }
5899
__intel_update_pmu_caps(struct pmu * pmu)5900 static inline void __intel_update_pmu_caps(struct pmu *pmu)
5901 {
5902 struct pmu *dest_pmu = pmu ? pmu : x86_get_pmu(smp_processor_id());
5903
5904 if (hybrid(pmu, arch_pebs_cap).caps & ARCH_PEBS_VECR_XMM)
5905 dest_pmu->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
5906 }
5907
__intel_update_large_pebs_flags(struct pmu * pmu)5908 static inline void __intel_update_large_pebs_flags(struct pmu *pmu)
5909 {
5910 u64 caps = hybrid(pmu, arch_pebs_cap).caps;
5911
5912 x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
5913 if (caps & ARCH_PEBS_LBR)
5914 x86_pmu.large_pebs_flags |= PERF_SAMPLE_BRANCH_STACK;
5915 if (caps & ARCH_PEBS_CNTR_MASK)
5916 x86_pmu.large_pebs_flags |= PERF_SAMPLE_READ;
5917
5918 if (!(caps & ARCH_PEBS_AUX))
5919 x86_pmu.large_pebs_flags &= ~PERF_SAMPLE_DATA_SRC;
5920 if (!(caps & ARCH_PEBS_GPR)) {
5921 x86_pmu.large_pebs_flags &=
5922 ~(PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER);
5923 }
5924 }
5925
5926 #define counter_mask(_gp, _fixed) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX_FIXED))
5927
update_pmu_cap(struct pmu * pmu)5928 static void update_pmu_cap(struct pmu *pmu)
5929 {
5930 unsigned int eax, ebx, ecx, edx;
5931 union cpuid35_eax eax_0;
5932 union cpuid35_ebx ebx_0;
5933 u64 cntrs_mask = 0;
5934 u64 pebs_mask = 0;
5935 u64 pdists_mask = 0;
5936
5937 cpuid(ARCH_PERFMON_EXT_LEAF, &eax_0.full, &ebx_0.full, &ecx, &edx);
5938
5939 if (ebx_0.split.umask2)
5940 hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2;
5941 if (ebx_0.split.eq)
5942 hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ;
5943 if (ebx_0.split.rdpmc_user_disable)
5944 hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE;
5945
5946 if (eax_0.split.cntr_subleaf) {
5947 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
5948 &eax, &ebx, &ecx, &edx);
5949 hybrid(pmu, cntr_mask64) = eax;
5950 hybrid(pmu, fixed_cntr_mask64) = ebx;
5951 cntrs_mask = counter_mask(eax, ebx);
5952 }
5953
5954 if (eax_0.split.acr_subleaf) {
5955 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF,
5956 &eax, &ebx, &ecx, &edx);
5957 /* The mask of the counters which can be reloaded */
5958 hybrid(pmu, acr_cntr_mask64) = counter_mask(eax, ebx);
5959 /* The mask of the counters which can cause a reload of reloadable counters */
5960 hybrid(pmu, acr_cause_mask64) = counter_mask(ecx, edx);
5961 }
5962
5963 /* Bits[5:4] should be set simultaneously if arch-PEBS is supported */
5964 if (eax_0.split.pebs_caps_subleaf && eax_0.split.pebs_cnts_subleaf) {
5965 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_CAP_LEAF,
5966 &eax, &ebx, &ecx, &edx);
5967 hybrid(pmu, arch_pebs_cap).caps = (u64)ebx << 32;
5968
5969 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_COUNTER_LEAF,
5970 &eax, &ebx, &ecx, &edx);
5971 pebs_mask = counter_mask(eax, ecx);
5972 pdists_mask = counter_mask(ebx, edx);
5973 hybrid(pmu, arch_pebs_cap).counters = pebs_mask;
5974 hybrid(pmu, arch_pebs_cap).pdists = pdists_mask;
5975
5976 if (WARN_ON((pebs_mask | pdists_mask) & ~cntrs_mask)) {
5977 x86_pmu.arch_pebs = 0;
5978 } else {
5979 __intel_update_pmu_caps(pmu);
5980 __intel_update_large_pebs_flags(pmu);
5981 }
5982 } else {
5983 WARN_ON(x86_pmu.arch_pebs == 1);
5984 x86_pmu.arch_pebs = 0;
5985 }
5986
5987 if (!intel_pmu_broken_perf_cap()) {
5988 /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
5989 rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities);
5990 }
5991 }
5992
intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu * pmu)5993 static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
5994 {
5995 intel_pmu_check_counters_mask(&pmu->cntr_mask64, &pmu->fixed_cntr_mask64,
5996 &pmu->intel_ctrl);
5997 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
5998 pmu->unconstrained = (struct event_constraint)
5999 __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
6000 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
6001
6002 if (pmu->intel_cap.perf_metrics)
6003 pmu->intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS;
6004 else
6005 pmu->intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS;
6006
6007 pmu->pmu.capabilities |= PERF_PMU_CAP_MEDIATED_VPMU;
6008
6009 intel_pmu_check_event_constraints_all(&pmu->pmu);
6010
6011 intel_pmu_check_extra_regs(pmu->extra_regs);
6012 }
6013
find_hybrid_pmu_for_cpu(void)6014 static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
6015 {
6016 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
6017 enum intel_cpu_type cpu_type = c->topo.intel_type;
6018 int i;
6019
6020 /*
6021 * This is running on a CPU model that is known to have hybrid
6022 * configurations. But the CPU told us it is not hybrid, shame
6023 * on it. There should be a fixup function provided for these
6024 * troublesome CPUs (->get_hybrid_cpu_type).
6025 */
6026 if (cpu_type == INTEL_CPU_TYPE_UNKNOWN) {
6027 if (x86_pmu.get_hybrid_cpu_type)
6028 cpu_type = x86_pmu.get_hybrid_cpu_type();
6029 else
6030 return NULL;
6031 }
6032
6033 /*
6034 * This essentially just maps between the 'hybrid_cpu_type'
6035 * and 'hybrid_pmu_type' enums except for ARL-H processor
6036 * which needs to compare atom uarch native id since ARL-H
6037 * contains two different atom uarchs.
6038 */
6039 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
6040 enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type;
6041 u32 native_id;
6042
6043 if (cpu_type == INTEL_CPU_TYPE_CORE && pmu_type == hybrid_big)
6044 return &x86_pmu.hybrid_pmu[i];
6045 if (cpu_type == INTEL_CPU_TYPE_ATOM) {
6046 if (x86_pmu.num_hybrid_pmus == 2 && pmu_type == hybrid_small)
6047 return &x86_pmu.hybrid_pmu[i];
6048
6049 native_id = c->topo.intel_native_model_id;
6050 if (native_id == INTEL_ATOM_SKT_NATIVE_ID && pmu_type == hybrid_small)
6051 return &x86_pmu.hybrid_pmu[i];
6052 if (native_id == INTEL_ATOM_CMT_NATIVE_ID && pmu_type == hybrid_tiny)
6053 return &x86_pmu.hybrid_pmu[i];
6054 }
6055 }
6056
6057 return NULL;
6058 }
6059
init_hybrid_pmu(int cpu)6060 static bool init_hybrid_pmu(int cpu)
6061 {
6062 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
6063 struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu();
6064
6065 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
6066 cpuc->pmu = NULL;
6067 return false;
6068 }
6069
6070 /* Only check and dump the PMU information for the first CPU */
6071 if (!cpumask_empty(&pmu->supported_cpus))
6072 goto end;
6073
6074 if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
6075 update_pmu_cap(&pmu->pmu);
6076
6077 intel_pmu_check_hybrid_pmus(pmu);
6078
6079 if (!check_hw_exists(&pmu->pmu, pmu->cntr_mask, pmu->fixed_cntr_mask))
6080 return false;
6081
6082 pr_info("%s PMU driver: ", pmu->name);
6083
6084 pr_cont("\n");
6085
6086 x86_pmu_show_pmu_cap(&pmu->pmu);
6087
6088 end:
6089 cpumask_set_cpu(cpu, &pmu->supported_cpus);
6090 cpuc->pmu = &pmu->pmu;
6091
6092 return true;
6093 }
6094
intel_pmu_cpu_starting(int cpu)6095 static void intel_pmu_cpu_starting(int cpu)
6096 {
6097 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
6098 int core_id = topology_core_id(cpu);
6099 int i;
6100
6101 if (is_hybrid() && !init_hybrid_pmu(cpu))
6102 return;
6103
6104 init_debug_store_on_cpu(cpu);
6105 init_arch_pebs_on_cpu(cpu);
6106 /*
6107 * Deal with CPUs that don't clear their LBRs on power-up, and that may
6108 * even boot with LBRs enabled.
6109 */
6110 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr)
6111 msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT);
6112 intel_pmu_lbr_reset();
6113
6114 cpuc->lbr_sel = NULL;
6115
6116 if (x86_pmu.flags & PMU_FL_TFA) {
6117 WARN_ON_ONCE(cpuc->tfa_shadow);
6118 cpuc->tfa_shadow = ~0ULL;
6119 intel_set_tfa(cpuc, false);
6120 }
6121
6122 if (x86_pmu.version > 1)
6123 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
6124
6125 /*
6126 * Disable perf metrics if any added CPU doesn't support it.
6127 *
6128 * Turn off the check for a hybrid architecture, because the
6129 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
6130 * the architecture features. The perf metrics is a model-specific
6131 * feature for now. The corresponding bit should always be 0 on
6132 * a hybrid platform, e.g., Alder Lake.
6133 */
6134 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
6135 union perf_capabilities perf_cap;
6136
6137 rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
6138 if (!perf_cap.perf_metrics) {
6139 x86_pmu.intel_cap.perf_metrics = 0;
6140 x86_pmu.intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS;
6141 }
6142 }
6143
6144 __intel_update_pmu_caps(cpuc->pmu);
6145
6146 if (!cpuc->shared_regs)
6147 return;
6148
6149 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
6150 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
6151 struct intel_shared_regs *pc;
6152
6153 pc = per_cpu(cpu_hw_events, i).shared_regs;
6154 if (pc && pc->core_id == core_id) {
6155 cpuc->kfree_on_online[0] = cpuc->shared_regs;
6156 cpuc->shared_regs = pc;
6157 break;
6158 }
6159 }
6160 cpuc->shared_regs->core_id = core_id;
6161 cpuc->shared_regs->refcnt++;
6162 }
6163
6164 if (x86_pmu.lbr_sel_map)
6165 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
6166
6167 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
6168 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
6169 struct cpu_hw_events *sibling;
6170 struct intel_excl_cntrs *c;
6171
6172 sibling = &per_cpu(cpu_hw_events, i);
6173 c = sibling->excl_cntrs;
6174 if (c && c->core_id == core_id) {
6175 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
6176 cpuc->excl_cntrs = c;
6177 if (!sibling->excl_thread_id)
6178 cpuc->excl_thread_id = 1;
6179 break;
6180 }
6181 }
6182 cpuc->excl_cntrs->core_id = core_id;
6183 cpuc->excl_cntrs->refcnt++;
6184 }
6185 }
6186
free_excl_cntrs(struct cpu_hw_events * cpuc)6187 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
6188 {
6189 struct intel_excl_cntrs *c;
6190
6191 c = cpuc->excl_cntrs;
6192 if (c) {
6193 if (c->core_id == -1 || --c->refcnt == 0)
6194 kfree(c);
6195 cpuc->excl_cntrs = NULL;
6196 }
6197
6198 kfree(cpuc->constraint_list);
6199 cpuc->constraint_list = NULL;
6200 }
6201
intel_pmu_cpu_dying(int cpu)6202 static void intel_pmu_cpu_dying(int cpu)
6203 {
6204 fini_debug_store_on_cpu(cpu);
6205 fini_arch_pebs_on_cpu(cpu);
6206 }
6207
intel_cpuc_finish(struct cpu_hw_events * cpuc)6208 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
6209 {
6210 struct intel_shared_regs *pc;
6211
6212 pc = cpuc->shared_regs;
6213 if (pc) {
6214 if (pc->core_id == -1 || --pc->refcnt == 0)
6215 kfree(pc);
6216 cpuc->shared_regs = NULL;
6217 }
6218
6219 free_excl_cntrs(cpuc);
6220 }
6221
intel_pmu_cpu_dead(int cpu)6222 static void intel_pmu_cpu_dead(int cpu)
6223 {
6224 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
6225
6226 release_arch_pebs_buf_on_cpu(cpu);
6227 intel_cpuc_finish(cpuc);
6228
6229 if (is_hybrid() && cpuc->pmu)
6230 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
6231 }
6232
intel_pmu_sched_task(struct perf_event_pmu_context * pmu_ctx,struct task_struct * task,bool sched_in)6233 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
6234 struct task_struct *task, bool sched_in)
6235 {
6236 intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
6237 intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in);
6238 }
6239
intel_pmu_check_period(struct perf_event * event,u64 value)6240 static int intel_pmu_check_period(struct perf_event *event, u64 value)
6241 {
6242 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
6243 }
6244
intel_aux_output_init(void)6245 static void intel_aux_output_init(void)
6246 {
6247 /* Refer also intel_pmu_aux_output_match() */
6248 if (x86_pmu.intel_cap.pebs_output_pt_available)
6249 x86_pmu.assign = intel_pmu_assign_event;
6250 }
6251
intel_pmu_aux_output_match(struct perf_event * event)6252 static int intel_pmu_aux_output_match(struct perf_event *event)
6253 {
6254 /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */
6255 if (!x86_pmu.intel_cap.pebs_output_pt_available)
6256 return 0;
6257
6258 return is_intel_pt_event(event);
6259 }
6260
intel_pmu_filter(struct pmu * pmu,int cpu,bool * ret)6261 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
6262 {
6263 struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu);
6264
6265 *ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
6266 }
6267
6268 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
6269
6270 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
6271
6272 PMU_FORMAT_ATTR(frontend, "config1:0-23");
6273
6274 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63");
6275
6276 static struct attribute *intel_arch3_formats_attr[] = {
6277 &format_attr_event.attr,
6278 &format_attr_umask.attr,
6279 &format_attr_edge.attr,
6280 &format_attr_pc.attr,
6281 &format_attr_any.attr,
6282 &format_attr_inv.attr,
6283 &format_attr_cmask.attr,
6284 NULL,
6285 };
6286
6287 static struct attribute *hsw_format_attr[] = {
6288 &format_attr_in_tx.attr,
6289 &format_attr_in_tx_cp.attr,
6290 &format_attr_offcore_rsp.attr,
6291 &format_attr_ldlat.attr,
6292 NULL
6293 };
6294
6295 static struct attribute *nhm_format_attr[] = {
6296 &format_attr_offcore_rsp.attr,
6297 &format_attr_ldlat.attr,
6298 NULL
6299 };
6300
6301 static struct attribute *slm_format_attr[] = {
6302 &format_attr_offcore_rsp.attr,
6303 NULL
6304 };
6305
6306 static struct attribute *cmt_format_attr[] = {
6307 &format_attr_offcore_rsp.attr,
6308 &format_attr_ldlat.attr,
6309 &format_attr_snoop_rsp.attr,
6310 NULL
6311 };
6312
6313 static struct attribute *skl_format_attr[] = {
6314 &format_attr_frontend.attr,
6315 NULL,
6316 };
6317
6318 static __initconst const struct x86_pmu core_pmu = {
6319 .name = "core",
6320 .handle_irq = x86_pmu_handle_irq,
6321 .disable_all = x86_pmu_disable_all,
6322 .enable_all = core_pmu_enable_all,
6323 .enable = core_pmu_enable_event,
6324 .disable = x86_pmu_disable_event,
6325 .hw_config = core_pmu_hw_config,
6326 .schedule_events = x86_schedule_events,
6327 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
6328 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
6329 .fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0,
6330 .event_map = intel_pmu_event_map,
6331 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
6332 .apic = 1,
6333 .large_pebs_flags = LARGE_PEBS_FLAGS,
6334
6335 /*
6336 * Intel PMCs cannot be accessed sanely above 32-bit width,
6337 * so we install an artificial 1<<31 period regardless of
6338 * the generic event period:
6339 */
6340 .max_period = (1ULL<<31) - 1,
6341 .get_event_constraints = intel_get_event_constraints,
6342 .put_event_constraints = intel_put_event_constraints,
6343 .event_constraints = intel_core_event_constraints,
6344 .guest_get_msrs = core_guest_get_msrs,
6345 .format_attrs = intel_arch_formats_attr,
6346 .events_sysfs_show = intel_event_sysfs_show,
6347
6348 /*
6349 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
6350 * together with PMU version 1 and thus be using core_pmu with
6351 * shared_regs. We need following callbacks here to allocate
6352 * it properly.
6353 */
6354 .cpu_prepare = intel_pmu_cpu_prepare,
6355 .cpu_starting = intel_pmu_cpu_starting,
6356 .cpu_dying = intel_pmu_cpu_dying,
6357 .cpu_dead = intel_pmu_cpu_dead,
6358
6359 .check_period = intel_pmu_check_period,
6360
6361 .lbr_reset = intel_pmu_lbr_reset_64,
6362 .lbr_read = intel_pmu_lbr_read_64,
6363 .lbr_save = intel_pmu_lbr_save,
6364 .lbr_restore = intel_pmu_lbr_restore,
6365 };
6366
6367 static __initconst const struct x86_pmu intel_pmu = {
6368 .name = "Intel",
6369 .handle_irq = intel_pmu_handle_irq,
6370 .disable_all = intel_pmu_disable_all,
6371 .enable_all = intel_pmu_enable_all,
6372 .enable = intel_pmu_enable_event,
6373 .disable = intel_pmu_disable_event,
6374 .add = intel_pmu_add_event,
6375 .del = intel_pmu_del_event,
6376 .read = intel_pmu_read_event,
6377 .set_period = intel_pmu_set_period,
6378 .update = intel_pmu_update,
6379 .hw_config = intel_pmu_hw_config,
6380 .schedule_events = x86_schedule_events,
6381 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
6382 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
6383 .fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0,
6384 .event_map = intel_pmu_event_map,
6385 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
6386 .apic = 1,
6387 .large_pebs_flags = LARGE_PEBS_FLAGS,
6388 /*
6389 * Intel PMCs cannot be accessed sanely above 32 bit width,
6390 * so we install an artificial 1<<31 period regardless of
6391 * the generic event period:
6392 */
6393 .max_period = (1ULL << 31) - 1,
6394 .get_event_constraints = intel_get_event_constraints,
6395 .put_event_constraints = intel_put_event_constraints,
6396 .pebs_aliases = intel_pebs_aliases_core2,
6397
6398 .format_attrs = intel_arch3_formats_attr,
6399 .events_sysfs_show = intel_event_sysfs_show,
6400
6401 .cpu_prepare = intel_pmu_cpu_prepare,
6402 .cpu_starting = intel_pmu_cpu_starting,
6403 .cpu_dying = intel_pmu_cpu_dying,
6404 .cpu_dead = intel_pmu_cpu_dead,
6405
6406 .guest_get_msrs = intel_guest_get_msrs,
6407 .sched_task = intel_pmu_sched_task,
6408
6409 .check_period = intel_pmu_check_period,
6410
6411 .aux_output_match = intel_pmu_aux_output_match,
6412
6413 .lbr_reset = intel_pmu_lbr_reset_64,
6414 .lbr_read = intel_pmu_lbr_read_64,
6415 .lbr_save = intel_pmu_lbr_save,
6416 .lbr_restore = intel_pmu_lbr_restore,
6417
6418 /*
6419 * SMM has access to all 4 rings and while traditionally SMM code only
6420 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
6421 *
6422 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
6423 * between SMM or not, this results in what should be pure userspace
6424 * counters including SMM data.
6425 *
6426 * This is a clear privilege issue, therefore globally disable
6427 * counting SMM by default.
6428 */
6429 .attr_freeze_on_smi = 1,
6430 };
6431
intel_clovertown_quirk(void)6432 static __init void intel_clovertown_quirk(void)
6433 {
6434 /*
6435 * PEBS is unreliable due to:
6436 *
6437 * AJ67 - PEBS may experience CPL leaks
6438 * AJ68 - PEBS PMI may be delayed by one event
6439 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
6440 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
6441 *
6442 * AJ67 could be worked around by restricting the OS/USR flags.
6443 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
6444 *
6445 * AJ106 could possibly be worked around by not allowing LBR
6446 * usage from PEBS, including the fixup.
6447 * AJ68 could possibly be worked around by always programming
6448 * a pebs_event_reset[0] value and coping with the lost events.
6449 *
6450 * But taken together it might just make sense to not enable PEBS on
6451 * these chips.
6452 */
6453 pr_warn("PEBS disabled due to CPU errata\n");
6454 x86_pmu.ds_pebs = 0;
6455 x86_pmu.pebs_constraints = NULL;
6456 }
6457
6458 static const struct x86_cpu_id isolation_ucodes[] = {
6459 X86_MATCH_VFM_STEPS(INTEL_HASWELL, 3, 3, 0x0000001f),
6460 X86_MATCH_VFM_STEPS(INTEL_HASWELL_L, 1, 1, 0x0000001e),
6461 X86_MATCH_VFM_STEPS(INTEL_HASWELL_G, 1, 1, 0x00000015),
6462 X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 2, 2, 0x00000037),
6463 X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 4, 4, 0x0000000a),
6464 X86_MATCH_VFM_STEPS(INTEL_BROADWELL, 4, 4, 0x00000023),
6465 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_G, 1, 1, 0x00000014),
6466 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 2, 2, 0x00000010),
6467 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 3, 3, 0x07000009),
6468 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 4, 4, 0x0f000009),
6469 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 5, 5, 0x0e000002),
6470 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_X, 1, 1, 0x0b000014),
6471 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 3, 3, 0x00000021),
6472 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 4, 7, 0x00000000),
6473 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 11, 11, 0x00000000),
6474 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_L, 3, 3, 0x0000007c),
6475 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE, 3, 3, 0x0000007c),
6476 X86_MATCH_VFM_STEPS(INTEL_KABYLAKE, 9, 13, 0x0000004e),
6477 X86_MATCH_VFM_STEPS(INTEL_KABYLAKE_L, 9, 12, 0x0000004e),
6478 {}
6479 };
6480
intel_check_pebs_isolation(void)6481 static void intel_check_pebs_isolation(void)
6482 {
6483 x86_pmu.pebs_no_isolation = !x86_match_min_microcode_rev(isolation_ucodes);
6484 }
6485
intel_pebs_isolation_quirk(void)6486 static __init void intel_pebs_isolation_quirk(void)
6487 {
6488 WARN_ON_ONCE(x86_pmu.check_microcode);
6489 x86_pmu.check_microcode = intel_check_pebs_isolation;
6490 intel_check_pebs_isolation();
6491 }
6492
6493 static const struct x86_cpu_id pebs_ucodes[] = {
6494 X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE, 7, 7, 0x00000028),
6495 X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X, 6, 6, 0x00000618),
6496 X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X, 7, 7, 0x0000070c),
6497 {}
6498 };
6499
intel_snb_pebs_broken(void)6500 static bool intel_snb_pebs_broken(void)
6501 {
6502 return !x86_match_min_microcode_rev(pebs_ucodes);
6503 }
6504
intel_snb_check_microcode(void)6505 static void intel_snb_check_microcode(void)
6506 {
6507 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
6508 return;
6509
6510 /*
6511 * Serialized by the microcode lock..
6512 */
6513 if (x86_pmu.pebs_broken) {
6514 pr_info("PEBS enabled due to microcode update\n");
6515 x86_pmu.pebs_broken = 0;
6516 } else {
6517 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
6518 x86_pmu.pebs_broken = 1;
6519 }
6520 }
6521
is_lbr_from(unsigned long msr)6522 static bool is_lbr_from(unsigned long msr)
6523 {
6524 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
6525
6526 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
6527 }
6528
6529 /*
6530 * Under certain circumstances, access certain MSR may cause #GP.
6531 * The function tests if the input MSR can be safely accessed.
6532 */
check_msr(unsigned long msr,u64 mask)6533 static bool check_msr(unsigned long msr, u64 mask)
6534 {
6535 u64 val_old, val_new, val_tmp;
6536
6537 /*
6538 * Disable the check for real HW, so we don't
6539 * mess with potentially enabled registers:
6540 */
6541 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
6542 return true;
6543
6544 /*
6545 * Read the current value, change it and read it back to see if it
6546 * matches, this is needed to detect certain hardware emulators
6547 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
6548 */
6549 if (rdmsrq_safe(msr, &val_old))
6550 return false;
6551
6552 /*
6553 * Only change the bits which can be updated by wrmsrq.
6554 */
6555 val_tmp = val_old ^ mask;
6556
6557 if (is_lbr_from(msr))
6558 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
6559
6560 if (wrmsrq_safe(msr, val_tmp) ||
6561 rdmsrq_safe(msr, &val_new))
6562 return false;
6563
6564 /*
6565 * Quirk only affects validation in wrmsr(), so wrmsrq()'s value
6566 * should equal rdmsrq()'s even with the quirk.
6567 */
6568 if (val_new != val_tmp)
6569 return false;
6570
6571 if (is_lbr_from(msr))
6572 val_old = lbr_from_signext_quirk_wr(val_old);
6573
6574 /* Here it's sure that the MSR can be safely accessed.
6575 * Restore the old value and return.
6576 */
6577 wrmsrq(msr, val_old);
6578
6579 return true;
6580 }
6581
intel_sandybridge_quirk(void)6582 static __init void intel_sandybridge_quirk(void)
6583 {
6584 x86_pmu.check_microcode = intel_snb_check_microcode;
6585 cpus_read_lock();
6586 intel_snb_check_microcode();
6587 cpus_read_unlock();
6588 }
6589
6590 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
6591 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
6592 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
6593 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
6594 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
6595 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
6596 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
6597 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
6598 };
6599
intel_arch_events_quirk(void)6600 static __init void intel_arch_events_quirk(void)
6601 {
6602 int bit;
6603
6604 /* disable event that reported as not present by cpuid */
6605 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
6606 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
6607 pr_warn("CPUID marked event: \'%s\' unavailable\n",
6608 intel_arch_events_map[bit].name);
6609 }
6610 }
6611
intel_nehalem_quirk(void)6612 static __init void intel_nehalem_quirk(void)
6613 {
6614 union cpuid10_ebx ebx;
6615
6616 ebx.full = x86_pmu.events_maskl;
6617 if (ebx.split.no_branch_misses_retired) {
6618 /*
6619 * Erratum AAJ80 detected, we work it around by using
6620 * the BR_MISP_EXEC.ANY event. This will over-count
6621 * branch-misses, but it's still much better than the
6622 * architectural event which is often completely bogus:
6623 */
6624 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
6625 ebx.split.no_branch_misses_retired = 0;
6626 x86_pmu.events_maskl = ebx.full;
6627 pr_info("CPU erratum AAJ80 worked around\n");
6628 }
6629 }
6630
6631 /*
6632 * enable software workaround for errata:
6633 * SNB: BJ122
6634 * IVB: BV98
6635 * HSW: HSD29
6636 *
6637 * Only needed when HT is enabled. However detecting
6638 * if HT is enabled is difficult (model specific). So instead,
6639 * we enable the workaround in the early boot, and verify if
6640 * it is needed in a later initcall phase once we have valid
6641 * topology information to check if HT is actually enabled
6642 */
intel_ht_bug(void)6643 static __init void intel_ht_bug(void)
6644 {
6645 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
6646
6647 x86_pmu.start_scheduling = intel_start_scheduling;
6648 x86_pmu.commit_scheduling = intel_commit_scheduling;
6649 x86_pmu.stop_scheduling = intel_stop_scheduling;
6650 }
6651
6652 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
6653 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
6654
6655 /* Haswell special events */
6656 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
6657 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
6658 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
6659 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
6660 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
6661 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
6662 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
6663 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
6664 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
6665 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
6666 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
6667 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
6668
6669 static struct attribute *hsw_events_attrs[] = {
6670 EVENT_PTR(td_slots_issued),
6671 EVENT_PTR(td_slots_retired),
6672 EVENT_PTR(td_fetch_bubbles),
6673 EVENT_PTR(td_total_slots),
6674 EVENT_PTR(td_total_slots_scale),
6675 EVENT_PTR(td_recovery_bubbles),
6676 EVENT_PTR(td_recovery_bubbles_scale),
6677 NULL
6678 };
6679
6680 static struct attribute *hsw_mem_events_attrs[] = {
6681 EVENT_PTR(mem_ld_hsw),
6682 EVENT_PTR(mem_st_hsw),
6683 NULL,
6684 };
6685
6686 static struct attribute *hsw_tsx_events_attrs[] = {
6687 EVENT_PTR(tx_start),
6688 EVENT_PTR(tx_commit),
6689 EVENT_PTR(tx_abort),
6690 EVENT_PTR(tx_capacity),
6691 EVENT_PTR(tx_conflict),
6692 EVENT_PTR(el_start),
6693 EVENT_PTR(el_commit),
6694 EVENT_PTR(el_abort),
6695 EVENT_PTR(el_capacity),
6696 EVENT_PTR(el_conflict),
6697 EVENT_PTR(cycles_t),
6698 EVENT_PTR(cycles_ct),
6699 NULL
6700 };
6701
6702 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
6703 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
6704 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
6705 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
6706
6707 static struct attribute *icl_events_attrs[] = {
6708 EVENT_PTR(mem_ld_hsw),
6709 EVENT_PTR(mem_st_hsw),
6710 NULL,
6711 };
6712
6713 static struct attribute *icl_td_events_attrs[] = {
6714 EVENT_PTR(slots),
6715 EVENT_PTR(td_retiring),
6716 EVENT_PTR(td_bad_spec),
6717 EVENT_PTR(td_fe_bound),
6718 EVENT_PTR(td_be_bound),
6719 NULL,
6720 };
6721
6722 static struct attribute *icl_tsx_events_attrs[] = {
6723 EVENT_PTR(tx_start),
6724 EVENT_PTR(tx_abort),
6725 EVENT_PTR(tx_commit),
6726 EVENT_PTR(tx_capacity_read),
6727 EVENT_PTR(tx_capacity_write),
6728 EVENT_PTR(tx_conflict),
6729 EVENT_PTR(el_start),
6730 EVENT_PTR(el_abort),
6731 EVENT_PTR(el_commit),
6732 EVENT_PTR(el_capacity_read),
6733 EVENT_PTR(el_capacity_write),
6734 EVENT_PTR(el_conflict),
6735 EVENT_PTR(cycles_t),
6736 EVENT_PTR(cycles_ct),
6737 NULL,
6738 };
6739
6740
6741 EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2");
6742 EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82");
6743
6744 static struct attribute *glc_events_attrs[] = {
6745 EVENT_PTR(mem_ld_hsw),
6746 EVENT_PTR(mem_st_spr),
6747 EVENT_PTR(mem_ld_aux),
6748 NULL,
6749 };
6750
6751 static struct attribute *glc_td_events_attrs[] = {
6752 EVENT_PTR(slots),
6753 EVENT_PTR(td_retiring),
6754 EVENT_PTR(td_bad_spec),
6755 EVENT_PTR(td_fe_bound),
6756 EVENT_PTR(td_be_bound),
6757 EVENT_PTR(td_heavy_ops),
6758 EVENT_PTR(td_br_mispredict),
6759 EVENT_PTR(td_fetch_lat),
6760 EVENT_PTR(td_mem_bound),
6761 NULL,
6762 };
6763
6764 static struct attribute *glc_tsx_events_attrs[] = {
6765 EVENT_PTR(tx_start),
6766 EVENT_PTR(tx_abort),
6767 EVENT_PTR(tx_commit),
6768 EVENT_PTR(tx_capacity_read),
6769 EVENT_PTR(tx_capacity_write),
6770 EVENT_PTR(tx_conflict),
6771 EVENT_PTR(cycles_t),
6772 EVENT_PTR(cycles_ct),
6773 NULL,
6774 };
6775
freeze_on_smi_show(struct device * cdev,struct device_attribute * attr,char * buf)6776 static ssize_t freeze_on_smi_show(struct device *cdev,
6777 struct device_attribute *attr,
6778 char *buf)
6779 {
6780 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
6781 }
6782
6783 static DEFINE_MUTEX(freeze_on_smi_mutex);
6784
freeze_on_smi_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)6785 static ssize_t freeze_on_smi_store(struct device *cdev,
6786 struct device_attribute *attr,
6787 const char *buf, size_t count)
6788 {
6789 unsigned long val;
6790 ssize_t ret;
6791
6792 ret = kstrtoul(buf, 0, &val);
6793 if (ret)
6794 return ret;
6795
6796 if (val > 1)
6797 return -EINVAL;
6798
6799 mutex_lock(&freeze_on_smi_mutex);
6800
6801 if (x86_pmu.attr_freeze_on_smi == val)
6802 goto done;
6803
6804 x86_pmu.attr_freeze_on_smi = val;
6805
6806 cpus_read_lock();
6807 on_each_cpu(flip_smm_bit, &val, 1);
6808 cpus_read_unlock();
6809 done:
6810 mutex_unlock(&freeze_on_smi_mutex);
6811
6812 return count;
6813 }
6814
update_tfa_sched(void * ignored)6815 static void update_tfa_sched(void *ignored)
6816 {
6817 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
6818
6819 /*
6820 * check if PMC3 is used
6821 * and if so force schedule out for all event types all contexts
6822 */
6823 if (test_bit(3, cpuc->active_mask))
6824 perf_pmu_resched(x86_get_pmu(smp_processor_id()));
6825 }
6826
show_sysctl_tfa(struct device * cdev,struct device_attribute * attr,char * buf)6827 static ssize_t show_sysctl_tfa(struct device *cdev,
6828 struct device_attribute *attr,
6829 char *buf)
6830 {
6831 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
6832 }
6833
set_sysctl_tfa(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)6834 static ssize_t set_sysctl_tfa(struct device *cdev,
6835 struct device_attribute *attr,
6836 const char *buf, size_t count)
6837 {
6838 bool val;
6839 ssize_t ret;
6840
6841 ret = kstrtobool(buf, &val);
6842 if (ret)
6843 return ret;
6844
6845 /* no change */
6846 if (val == allow_tsx_force_abort)
6847 return count;
6848
6849 allow_tsx_force_abort = val;
6850
6851 cpus_read_lock();
6852 on_each_cpu(update_tfa_sched, NULL, 1);
6853 cpus_read_unlock();
6854
6855 return count;
6856 }
6857
6858
6859 static DEVICE_ATTR_RW(freeze_on_smi);
6860
branches_show(struct device * cdev,struct device_attribute * attr,char * buf)6861 static ssize_t branches_show(struct device *cdev,
6862 struct device_attribute *attr,
6863 char *buf)
6864 {
6865 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
6866 }
6867
6868 static DEVICE_ATTR_RO(branches);
6869
branch_counter_nr_show(struct device * cdev,struct device_attribute * attr,char * buf)6870 static ssize_t branch_counter_nr_show(struct device *cdev,
6871 struct device_attribute *attr,
6872 char *buf)
6873 {
6874 return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters));
6875 }
6876
6877 static DEVICE_ATTR_RO(branch_counter_nr);
6878
branch_counter_width_show(struct device * cdev,struct device_attribute * attr,char * buf)6879 static ssize_t branch_counter_width_show(struct device *cdev,
6880 struct device_attribute *attr,
6881 char *buf)
6882 {
6883 return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS);
6884 }
6885
6886 static DEVICE_ATTR_RO(branch_counter_width);
6887
6888 static struct attribute *lbr_attrs[] = {
6889 &dev_attr_branches.attr,
6890 &dev_attr_branch_counter_nr.attr,
6891 &dev_attr_branch_counter_width.attr,
6892 NULL
6893 };
6894
6895 static umode_t
lbr_is_visible(struct kobject * kobj,struct attribute * attr,int i)6896 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6897 {
6898 /* branches */
6899 if (i == 0)
6900 return x86_pmu.lbr_nr ? attr->mode : 0;
6901
6902 return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0;
6903 }
6904
6905 static char pmu_name_str[30];
6906
6907 static DEVICE_STRING_ATTR_RO(pmu_name, 0444, pmu_name_str);
6908
6909 static struct attribute *intel_pmu_caps_attrs[] = {
6910 &dev_attr_pmu_name.attr.attr,
6911 NULL
6912 };
6913
6914 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
6915 show_sysctl_tfa,
6916 set_sysctl_tfa);
6917
6918 static struct attribute *intel_pmu_attrs[] = {
6919 &dev_attr_freeze_on_smi.attr,
6920 &dev_attr_allow_tsx_force_abort.attr,
6921 NULL,
6922 };
6923
6924 static umode_t
default_is_visible(struct kobject * kobj,struct attribute * attr,int i)6925 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6926 {
6927 if (attr == &dev_attr_allow_tsx_force_abort.attr)
6928 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
6929
6930 return attr->mode;
6931 }
6932
6933 static umode_t
tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)6934 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6935 {
6936 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
6937 }
6938
6939 static umode_t
pebs_is_visible(struct kobject * kobj,struct attribute * attr,int i)6940 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6941 {
6942 return intel_pmu_has_pebs() ? attr->mode : 0;
6943 }
6944
6945 static umode_t
mem_is_visible(struct kobject * kobj,struct attribute * attr,int i)6946 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6947 {
6948 if (attr == &event_attr_mem_ld_aux.attr.attr)
6949 return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0;
6950
6951 return pebs_is_visible(kobj, attr, i);
6952 }
6953
6954 static umode_t
exra_is_visible(struct kobject * kobj,struct attribute * attr,int i)6955 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6956 {
6957 return x86_pmu.version >= 2 ? attr->mode : 0;
6958 }
6959
6960 static umode_t
td_is_visible(struct kobject * kobj,struct attribute * attr,int i)6961 td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6962 {
6963 /*
6964 * Hide the perf metrics topdown events
6965 * if the feature is not enumerated.
6966 */
6967 if (x86_pmu.num_topdown_events)
6968 return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0;
6969
6970 return attr->mode;
6971 }
6972
6973 PMU_FORMAT_ATTR(acr_mask, "config2:0-63");
6974
6975 static struct attribute *format_acr_attrs[] = {
6976 &format_attr_acr_mask.attr,
6977 NULL
6978 };
6979
6980 static umode_t
acr_is_visible(struct kobject * kobj,struct attribute * attr,int i)6981 acr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6982 {
6983 struct device *dev = kobj_to_dev(kobj);
6984
6985 return intel_pmu_has_acr(dev_get_drvdata(dev)) ? attr->mode : 0;
6986 }
6987
6988 static struct attribute_group group_events_td = {
6989 .name = "events",
6990 .is_visible = td_is_visible,
6991 };
6992
6993 static struct attribute_group group_events_mem = {
6994 .name = "events",
6995 .is_visible = mem_is_visible,
6996 };
6997
6998 static struct attribute_group group_events_tsx = {
6999 .name = "events",
7000 .is_visible = tsx_is_visible,
7001 };
7002
7003 static struct attribute_group group_caps_gen = {
7004 .name = "caps",
7005 .attrs = intel_pmu_caps_attrs,
7006 };
7007
7008 static struct attribute_group group_caps_lbr = {
7009 .name = "caps",
7010 .attrs = lbr_attrs,
7011 .is_visible = lbr_is_visible,
7012 };
7013
7014 static struct attribute_group group_format_extra = {
7015 .name = "format",
7016 .is_visible = exra_is_visible,
7017 };
7018
7019 static struct attribute_group group_format_extra_skl = {
7020 .name = "format",
7021 .is_visible = exra_is_visible,
7022 };
7023
7024 static struct attribute_group group_format_evtsel_ext = {
7025 .name = "format",
7026 .attrs = format_evtsel_ext_attrs,
7027 .is_visible = evtsel_ext_is_visible,
7028 };
7029
7030 static struct attribute_group group_format_acr = {
7031 .name = "format",
7032 .attrs = format_acr_attrs,
7033 .is_visible = acr_is_visible,
7034 };
7035
7036 static struct attribute_group group_default = {
7037 .attrs = intel_pmu_attrs,
7038 .is_visible = default_is_visible,
7039 };
7040
7041 static const struct attribute_group *attr_update[] = {
7042 &group_events_td,
7043 &group_events_mem,
7044 &group_events_tsx,
7045 &group_caps_gen,
7046 &group_caps_lbr,
7047 &group_format_extra,
7048 &group_format_extra_skl,
7049 &group_format_evtsel_ext,
7050 &group_format_acr,
7051 &group_default,
7052 NULL,
7053 };
7054
7055 EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big);
7056 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
7057 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
7058 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
7059 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
7060 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big);
7061 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big);
7062 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big);
7063 EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big);
7064
7065 static struct attribute *adl_hybrid_events_attrs[] = {
7066 EVENT_PTR(slots_adl),
7067 EVENT_PTR(td_retiring_adl),
7068 EVENT_PTR(td_bad_spec_adl),
7069 EVENT_PTR(td_fe_bound_adl),
7070 EVENT_PTR(td_be_bound_adl),
7071 EVENT_PTR(td_heavy_ops_adl),
7072 EVENT_PTR(td_br_mis_adl),
7073 EVENT_PTR(td_fetch_lat_adl),
7074 EVENT_PTR(td_mem_bound_adl),
7075 NULL,
7076 };
7077
7078 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_lnl, "event=0xc2,umask=0x02;event=0x00,umask=0x80", hybrid_big_small);
7079 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_lnl, "event=0x9c,umask=0x01;event=0x00,umask=0x82", hybrid_big_small);
7080 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_lnl, "event=0xa4,umask=0x02;event=0x00,umask=0x83", hybrid_big_small);
7081
7082 static struct attribute *lnl_hybrid_events_attrs[] = {
7083 EVENT_PTR(slots_adl),
7084 EVENT_PTR(td_retiring_lnl),
7085 EVENT_PTR(td_bad_spec_adl),
7086 EVENT_PTR(td_fe_bound_lnl),
7087 EVENT_PTR(td_be_bound_lnl),
7088 EVENT_PTR(td_heavy_ops_adl),
7089 EVENT_PTR(td_br_mis_adl),
7090 EVENT_PTR(td_fetch_lat_adl),
7091 EVENT_PTR(td_mem_bound_adl),
7092 NULL
7093 };
7094
7095 /* The event string must be in PMU IDX order. */
7096 EVENT_ATTR_STR_HYBRID(topdown-retiring,
7097 td_retiring_arl_h,
7098 "event=0xc2,umask=0x02;event=0x00,umask=0x80;event=0xc2,umask=0x0",
7099 hybrid_big_small_tiny);
7100 EVENT_ATTR_STR_HYBRID(topdown-bad-spec,
7101 td_bad_spec_arl_h,
7102 "event=0x73,umask=0x0;event=0x00,umask=0x81;event=0x73,umask=0x0",
7103 hybrid_big_small_tiny);
7104 EVENT_ATTR_STR_HYBRID(topdown-fe-bound,
7105 td_fe_bound_arl_h,
7106 "event=0x9c,umask=0x01;event=0x00,umask=0x82;event=0x71,umask=0x0",
7107 hybrid_big_small_tiny);
7108 EVENT_ATTR_STR_HYBRID(topdown-be-bound,
7109 td_be_bound_arl_h,
7110 "event=0xa4,umask=0x02;event=0x00,umask=0x83;event=0x74,umask=0x0",
7111 hybrid_big_small_tiny);
7112
7113 static struct attribute *arl_h_hybrid_events_attrs[] = {
7114 EVENT_PTR(slots_adl),
7115 EVENT_PTR(td_retiring_arl_h),
7116 EVENT_PTR(td_bad_spec_arl_h),
7117 EVENT_PTR(td_fe_bound_arl_h),
7118 EVENT_PTR(td_be_bound_arl_h),
7119 EVENT_PTR(td_heavy_ops_adl),
7120 EVENT_PTR(td_br_mis_adl),
7121 EVENT_PTR(td_fetch_lat_adl),
7122 EVENT_PTR(td_mem_bound_adl),
7123 NULL,
7124 };
7125
7126 /* Must be in IDX order */
7127 EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
7128 EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
7129 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big);
7130
7131 static struct attribute *adl_hybrid_mem_attrs[] = {
7132 EVENT_PTR(mem_ld_adl),
7133 EVENT_PTR(mem_st_adl),
7134 EVENT_PTR(mem_ld_aux_adl),
7135 NULL,
7136 };
7137
7138 static struct attribute *mtl_hybrid_mem_attrs[] = {
7139 EVENT_PTR(mem_ld_adl),
7140 EVENT_PTR(mem_st_adl),
7141 NULL
7142 };
7143
7144 EVENT_ATTR_STR_HYBRID(mem-loads,
7145 mem_ld_arl_h,
7146 "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3;event=0xd0,umask=0x5,ldlat=3",
7147 hybrid_big_small_tiny);
7148 EVENT_ATTR_STR_HYBRID(mem-stores,
7149 mem_st_arl_h,
7150 "event=0xd0,umask=0x6;event=0xcd,umask=0x2;event=0xd0,umask=0x6",
7151 hybrid_big_small_tiny);
7152
7153 static struct attribute *arl_h_hybrid_mem_attrs[] = {
7154 EVENT_PTR(mem_ld_arl_h),
7155 EVENT_PTR(mem_st_arl_h),
7156 NULL,
7157 };
7158
7159 EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big);
7160 EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big);
7161 EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big);
7162 EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big);
7163 EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big);
7164 EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
7165 EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big);
7166 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big);
7167
7168 static struct attribute *adl_hybrid_tsx_attrs[] = {
7169 EVENT_PTR(tx_start_adl),
7170 EVENT_PTR(tx_abort_adl),
7171 EVENT_PTR(tx_commit_adl),
7172 EVENT_PTR(tx_capacity_read_adl),
7173 EVENT_PTR(tx_capacity_write_adl),
7174 EVENT_PTR(tx_conflict_adl),
7175 EVENT_PTR(cycles_t_adl),
7176 EVENT_PTR(cycles_ct_adl),
7177 NULL,
7178 };
7179
7180 FORMAT_ATTR_HYBRID(in_tx, hybrid_big);
7181 FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big);
7182 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small_tiny);
7183 FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small_tiny);
7184 FORMAT_ATTR_HYBRID(frontend, hybrid_big);
7185
7186 #define ADL_HYBRID_RTM_FORMAT_ATTR \
7187 FORMAT_HYBRID_PTR(in_tx), \
7188 FORMAT_HYBRID_PTR(in_tx_cp)
7189
7190 #define ADL_HYBRID_FORMAT_ATTR \
7191 FORMAT_HYBRID_PTR(offcore_rsp), \
7192 FORMAT_HYBRID_PTR(ldlat), \
7193 FORMAT_HYBRID_PTR(frontend)
7194
7195 static struct attribute *adl_hybrid_extra_attr_rtm[] = {
7196 ADL_HYBRID_RTM_FORMAT_ATTR,
7197 ADL_HYBRID_FORMAT_ATTR,
7198 NULL
7199 };
7200
7201 static struct attribute *adl_hybrid_extra_attr[] = {
7202 ADL_HYBRID_FORMAT_ATTR,
7203 NULL
7204 };
7205
7206 FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small_tiny);
7207
7208 static struct attribute *mtl_hybrid_extra_attr_rtm[] = {
7209 ADL_HYBRID_RTM_FORMAT_ATTR,
7210 ADL_HYBRID_FORMAT_ATTR,
7211 FORMAT_HYBRID_PTR(snoop_rsp),
7212 NULL
7213 };
7214
7215 static struct attribute *mtl_hybrid_extra_attr[] = {
7216 ADL_HYBRID_FORMAT_ATTR,
7217 FORMAT_HYBRID_PTR(snoop_rsp),
7218 NULL
7219 };
7220
is_attr_for_this_pmu(struct kobject * kobj,struct attribute * attr)7221 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
7222 {
7223 struct device *dev = kobj_to_dev(kobj);
7224 struct x86_hybrid_pmu *pmu =
7225 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
7226 struct perf_pmu_events_hybrid_attr *pmu_attr =
7227 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
7228
7229 return pmu->pmu_type & pmu_attr->pmu_type;
7230 }
7231
hybrid_events_is_visible(struct kobject * kobj,struct attribute * attr,int i)7232 static umode_t hybrid_events_is_visible(struct kobject *kobj,
7233 struct attribute *attr, int i)
7234 {
7235 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
7236 }
7237
hybrid_find_supported_cpu(struct x86_hybrid_pmu * pmu)7238 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
7239 {
7240 int cpu = cpumask_first(&pmu->supported_cpus);
7241
7242 return (cpu >= nr_cpu_ids) ? -1 : cpu;
7243 }
7244
hybrid_tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)7245 static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
7246 struct attribute *attr, int i)
7247 {
7248 struct device *dev = kobj_to_dev(kobj);
7249 struct x86_hybrid_pmu *pmu =
7250 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
7251 int cpu = hybrid_find_supported_cpu(pmu);
7252
7253 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
7254 }
7255
hybrid_format_is_visible(struct kobject * kobj,struct attribute * attr,int i)7256 static umode_t hybrid_format_is_visible(struct kobject *kobj,
7257 struct attribute *attr, int i)
7258 {
7259 struct device *dev = kobj_to_dev(kobj);
7260 struct x86_hybrid_pmu *pmu =
7261 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
7262 struct perf_pmu_format_hybrid_attr *pmu_attr =
7263 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
7264 int cpu = hybrid_find_supported_cpu(pmu);
7265
7266 return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0;
7267 }
7268
hybrid_td_is_visible(struct kobject * kobj,struct attribute * attr,int i)7269 static umode_t hybrid_td_is_visible(struct kobject *kobj,
7270 struct attribute *attr, int i)
7271 {
7272 struct device *dev = kobj_to_dev(kobj);
7273 struct x86_hybrid_pmu *pmu =
7274 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
7275
7276 if (!is_attr_for_this_pmu(kobj, attr))
7277 return 0;
7278
7279
7280 /* Only the big core supports perf metrics */
7281 if (pmu->pmu_type == hybrid_big)
7282 return pmu->intel_cap.perf_metrics ? attr->mode : 0;
7283
7284 return attr->mode;
7285 }
7286
7287 static struct attribute_group hybrid_group_events_td = {
7288 .name = "events",
7289 .is_visible = hybrid_td_is_visible,
7290 };
7291
7292 static struct attribute_group hybrid_group_events_mem = {
7293 .name = "events",
7294 .is_visible = hybrid_events_is_visible,
7295 };
7296
7297 static struct attribute_group hybrid_group_events_tsx = {
7298 .name = "events",
7299 .is_visible = hybrid_tsx_is_visible,
7300 };
7301
7302 static struct attribute_group hybrid_group_format_extra = {
7303 .name = "format",
7304 .is_visible = hybrid_format_is_visible,
7305 };
7306
intel_hybrid_get_attr_cpus(struct device * dev,struct device_attribute * attr,char * buf)7307 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
7308 struct device_attribute *attr,
7309 char *buf)
7310 {
7311 struct x86_hybrid_pmu *pmu =
7312 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
7313
7314 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
7315 }
7316
7317 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
7318 static struct attribute *intel_hybrid_cpus_attrs[] = {
7319 &dev_attr_cpus.attr,
7320 NULL,
7321 };
7322
7323 static struct attribute_group hybrid_group_cpus = {
7324 .attrs = intel_hybrid_cpus_attrs,
7325 };
7326
7327 static const struct attribute_group *hybrid_attr_update[] = {
7328 &hybrid_group_events_td,
7329 &hybrid_group_events_mem,
7330 &hybrid_group_events_tsx,
7331 &group_caps_gen,
7332 &group_caps_lbr,
7333 &hybrid_group_format_extra,
7334 &group_format_evtsel_ext,
7335 &group_format_acr,
7336 &group_default,
7337 &hybrid_group_cpus,
7338 NULL,
7339 };
7340
7341 static struct attribute *empty_attrs;
7342
intel_pmu_check_event_constraints(struct event_constraint * event_constraints,u64 cntr_mask,u64 fixed_cntr_mask,u64 intel_ctrl)7343 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
7344 u64 cntr_mask,
7345 u64 fixed_cntr_mask,
7346 u64 intel_ctrl)
7347 {
7348 struct event_constraint *c;
7349
7350 if (!event_constraints)
7351 return;
7352
7353 /*
7354 * event on fixed counter2 (REF_CYCLES) only works on this
7355 * counter, so do not extend mask to generic counters
7356 */
7357 for_each_event_constraint(c, event_constraints) {
7358 /*
7359 * Don't extend the topdown slots and metrics
7360 * events to the generic counters.
7361 */
7362 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
7363 /*
7364 * Disable topdown slots and metrics events,
7365 * if slots event is not in CPUID.
7366 */
7367 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
7368 c->idxmsk64 = 0;
7369 c->weight = hweight64(c->idxmsk64);
7370 continue;
7371 }
7372
7373 if (c->cmask == FIXED_EVENT_FLAGS) {
7374 /* Disabled fixed counters which are not in CPUID */
7375 c->idxmsk64 &= intel_ctrl;
7376
7377 /*
7378 * Don't extend the pseudo-encoding to the
7379 * generic counters
7380 */
7381 if (!use_fixed_pseudo_encoding(c->code))
7382 c->idxmsk64 |= cntr_mask;
7383 }
7384 c->idxmsk64 &= cntr_mask | (fixed_cntr_mask << INTEL_PMC_IDX_FIXED);
7385 c->weight = hweight64(c->idxmsk64);
7386 }
7387 }
7388
intel_pmu_check_extra_regs(struct extra_reg * extra_regs)7389 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
7390 {
7391 struct extra_reg *er;
7392
7393 /*
7394 * Access extra MSR may cause #GP under certain circumstances.
7395 * E.g. KVM doesn't support offcore event
7396 * Check all extra_regs here.
7397 */
7398 if (!extra_regs)
7399 return;
7400
7401 for (er = extra_regs; er->msr; er++) {
7402 er->extra_msr_access = check_msr(er->msr, 0x11UL);
7403 /* Disable LBR select mapping */
7404 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
7405 x86_pmu.lbr_sel_map = NULL;
7406 }
7407 }
7408
intel_pmu_v6_addr_offset(int index,bool eventsel)7409 static inline int intel_pmu_v6_addr_offset(int index, bool eventsel)
7410 {
7411 return MSR_IA32_PMC_V6_STEP * index;
7412 }
7413
7414 static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = {
7415 { hybrid_small, "cpu_atom" },
7416 { hybrid_big, "cpu_core" },
7417 { hybrid_tiny, "cpu_lowpower" },
7418 };
7419
intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)7420 static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
7421 {
7422 unsigned long pmus_mask = pmus;
7423 struct x86_hybrid_pmu *pmu;
7424 int idx = 0, bit;
7425
7426 x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask);
7427 x86_pmu.hybrid_pmu = kzalloc_objs(struct x86_hybrid_pmu,
7428 x86_pmu.num_hybrid_pmus);
7429 if (!x86_pmu.hybrid_pmu)
7430 return -ENOMEM;
7431
7432 static_branch_enable(&perf_is_hybrid);
7433 x86_pmu.filter = intel_pmu_filter;
7434
7435 for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) {
7436 pmu = &x86_pmu.hybrid_pmu[idx++];
7437 pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id;
7438 pmu->name = intel_hybrid_pmu_type_map[bit].name;
7439
7440 pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7441 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7442 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
7443 pmu->config_mask = X86_RAW_EVENT_MASK;
7444 pmu->unconstrained = (struct event_constraint)
7445 __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
7446 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
7447
7448 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
7449 if (pmu->pmu_type & hybrid_small_tiny) {
7450 pmu->intel_cap.perf_metrics = 0;
7451 pmu->mid_ack = true;
7452 } else if (pmu->pmu_type & hybrid_big) {
7453 pmu->intel_cap.perf_metrics = 1;
7454 pmu->late_ack = true;
7455 }
7456 }
7457
7458 return 0;
7459 }
7460
intel_pmu_ref_cycles_ext(void)7461 static __always_inline void intel_pmu_ref_cycles_ext(void)
7462 {
7463 if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED)))
7464 intel_perfmon_event_map[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x013c;
7465 }
7466
intel_pmu_init_glc(struct pmu * pmu)7467 static __always_inline void intel_pmu_init_glc(struct pmu *pmu)
7468 {
7469 x86_pmu.late_ack = true;
7470 x86_pmu.limit_period = glc_limit_period;
7471 x86_pmu.pebs_aliases = NULL;
7472 x86_pmu.pebs_prec_dist = true;
7473 x86_pmu.pebs_block = true;
7474 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7475 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7476 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
7477 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
7478 x86_pmu.lbr_pt_coexist = true;
7479 x86_pmu.num_topdown_events = 8;
7480 static_call_update(intel_pmu_update_topdown_event,
7481 &icl_update_topdown_event);
7482 static_call_update(intel_pmu_set_topdown_event_period,
7483 &icl_set_topdown_event_period);
7484
7485 memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7486 memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7487 hybrid(pmu, event_constraints) = intel_glc_event_constraints;
7488 hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints;
7489
7490 intel_pmu_ref_cycles_ext();
7491 }
7492
intel_pmu_init_grt(struct pmu * pmu)7493 static __always_inline void intel_pmu_init_grt(struct pmu *pmu)
7494 {
7495 x86_pmu.mid_ack = true;
7496 x86_pmu.limit_period = glc_limit_period;
7497 x86_pmu.pebs_aliases = NULL;
7498 x86_pmu.pebs_prec_dist = true;
7499 x86_pmu.pebs_block = true;
7500 x86_pmu.lbr_pt_coexist = true;
7501 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7502 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
7503
7504 memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7505 memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7506 hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
7507 hybrid(pmu, event_constraints) = intel_grt_event_constraints;
7508 hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints;
7509 hybrid(pmu, extra_regs) = intel_grt_extra_regs;
7510
7511 intel_pmu_ref_cycles_ext();
7512 }
7513
intel_pmu_init_lnc(struct pmu * pmu)7514 static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
7515 {
7516 intel_pmu_init_glc(pmu);
7517 hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
7518 hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
7519 hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
7520 }
7521
intel_pmu_init_pnc(struct pmu * pmu)7522 static __always_inline void intel_pmu_init_pnc(struct pmu *pmu)
7523 {
7524 intel_pmu_init_glc(pmu);
7525 x86_pmu.flags &= ~PMU_FL_HAS_RSP_1;
7526 x86_pmu.flags |= PMU_FL_HAS_OMR;
7527 memcpy(hybrid_var(pmu, hw_cache_event_ids),
7528 pnc_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7529 memcpy(hybrid_var(pmu, hw_cache_extra_regs),
7530 pnc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7531 hybrid(pmu, event_constraints) = intel_pnc_event_constraints;
7532 hybrid(pmu, pebs_constraints) = intel_pnc_pebs_event_constraints;
7533 hybrid(pmu, extra_regs) = intel_pnc_extra_regs;
7534 static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
7535 }
7536
intel_pmu_init_skt(struct pmu * pmu)7537 static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
7538 {
7539 intel_pmu_init_grt(pmu);
7540 hybrid(pmu, event_constraints) = intel_skt_event_constraints;
7541 hybrid(pmu, extra_regs) = intel_cmt_extra_regs;
7542 static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
7543 }
7544
intel_pmu_init_arw(struct pmu * pmu)7545 static __always_inline void intel_pmu_init_arw(struct pmu *pmu)
7546 {
7547 intel_pmu_init_grt(pmu);
7548 x86_pmu.flags &= ~PMU_FL_HAS_RSP_1;
7549 x86_pmu.flags |= PMU_FL_HAS_OMR;
7550 memcpy(hybrid_var(pmu, hw_cache_extra_regs),
7551 arw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7552 hybrid(pmu, event_constraints) = intel_arw_event_constraints;
7553 hybrid(pmu, pebs_constraints) = intel_arw_pebs_event_constraints;
7554 hybrid(pmu, extra_regs) = intel_arw_extra_regs;
7555 static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
7556 }
7557
intel_pmu_init(void)7558 __init int intel_pmu_init(void)
7559 {
7560 struct attribute **extra_skl_attr = &empty_attrs;
7561 struct attribute **extra_attr = &empty_attrs;
7562 struct attribute **td_attr = &empty_attrs;
7563 struct attribute **mem_attr = &empty_attrs;
7564 struct attribute **tsx_attr = &empty_attrs;
7565 union cpuid10_edx edx;
7566 union cpuid10_eax eax;
7567 union cpuid10_ebx ebx;
7568 unsigned int fixed_mask;
7569 bool pmem = false;
7570 int version, i;
7571 char *name;
7572 struct x86_hybrid_pmu *pmu;
7573
7574 /* Architectural Perfmon was introduced starting with Core "Yonah" */
7575 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
7576 switch (boot_cpu_data.x86) {
7577 case 6:
7578 if (boot_cpu_data.x86_vfm < INTEL_CORE_YONAH)
7579 return p6_pmu_init();
7580 break;
7581 case 11:
7582 return knc_pmu_init();
7583 case 15:
7584 return p4_pmu_init();
7585 }
7586
7587 pr_cont("unsupported CPU family %d model %d ",
7588 boot_cpu_data.x86, boot_cpu_data.x86_model);
7589 return -ENODEV;
7590 }
7591
7592 /*
7593 * Check whether the Architectural PerfMon supports
7594 * Branch Misses Retired hw_event or not.
7595 */
7596 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
7597 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
7598 return -ENODEV;
7599
7600 version = eax.split.version_id;
7601 if (version < 2)
7602 x86_pmu = core_pmu;
7603 else
7604 x86_pmu = intel_pmu;
7605
7606 x86_pmu.version = version;
7607 x86_pmu.cntr_mask64 = GENMASK_ULL(eax.split.num_counters - 1, 0);
7608 x86_pmu.cntval_bits = eax.split.bit_width;
7609 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
7610
7611 x86_pmu.events_maskl = ebx.full;
7612 x86_pmu.events_mask_len = eax.split.mask_length;
7613
7614 x86_pmu.pebs_events_mask = intel_pmu_pebs_mask(x86_pmu.cntr_mask64);
7615 x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
7616 x86_pmu.config_mask = X86_RAW_EVENT_MASK;
7617
7618 /*
7619 * Quirk: v2 perfmon does not report fixed-purpose events, so
7620 * assume at least 3 events, when not running in a hypervisor:
7621 */
7622 if (version > 1 && version < 5) {
7623 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
7624
7625 x86_pmu.fixed_cntr_mask64 =
7626 GENMASK_ULL(max((int)edx.split.num_counters_fixed, assume) - 1, 0);
7627 } else if (version >= 5)
7628 x86_pmu.fixed_cntr_mask64 = fixed_mask;
7629
7630 if (boot_cpu_has(X86_FEATURE_PDCM)) {
7631 u64 capabilities;
7632
7633 rdmsrq(MSR_IA32_PERF_CAPABILITIES, capabilities);
7634 x86_pmu.intel_cap.capabilities = capabilities;
7635 }
7636
7637 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
7638 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
7639 x86_pmu.lbr_read = intel_pmu_lbr_read_32;
7640 }
7641
7642 if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
7643 intel_pmu_arch_lbr_init();
7644
7645 intel_pebs_init();
7646
7647 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
7648
7649 if (version >= 5) {
7650 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
7651 if (x86_pmu.intel_cap.anythread_deprecated)
7652 pr_cont(" AnyThread deprecated, ");
7653 }
7654
7655 /* The perf side of core PMU is ready to support the mediated vPMU. */
7656 x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_MEDIATED_VPMU;
7657
7658 /*
7659 * Many features on and after V6 require dynamic constraint,
7660 * e.g., Arch PEBS, ACR.
7661 */
7662 if (version >= 6) {
7663 x86_pmu.flags |= PMU_FL_DYN_CONSTRAINT;
7664 x86_pmu.late_setup = intel_pmu_late_setup;
7665 }
7666
7667 /*
7668 * Install the hw-cache-events table:
7669 */
7670 switch (boot_cpu_data.x86_vfm) {
7671 case INTEL_CORE_YONAH:
7672 pr_cont("Core events, ");
7673 name = "core";
7674 break;
7675
7676 case INTEL_CORE2_MEROM:
7677 x86_add_quirk(intel_clovertown_quirk);
7678 fallthrough;
7679
7680 case INTEL_CORE2_MEROM_L:
7681 case INTEL_CORE2_PENRYN:
7682 case INTEL_CORE2_DUNNINGTON:
7683 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
7684 sizeof(hw_cache_event_ids));
7685
7686 intel_pmu_lbr_init_core();
7687
7688 x86_pmu.event_constraints = intel_core2_event_constraints;
7689 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
7690 pr_cont("Core2 events, ");
7691 name = "core2";
7692 break;
7693
7694 case INTEL_NEHALEM:
7695 case INTEL_NEHALEM_EP:
7696 case INTEL_NEHALEM_EX:
7697 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
7698 sizeof(hw_cache_event_ids));
7699 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
7700 sizeof(hw_cache_extra_regs));
7701
7702 intel_pmu_lbr_init_nhm();
7703
7704 x86_pmu.event_constraints = intel_nehalem_event_constraints;
7705 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
7706 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
7707 x86_pmu.extra_regs = intel_nehalem_extra_regs;
7708 x86_pmu.limit_period = nhm_limit_period;
7709
7710 mem_attr = nhm_mem_events_attrs;
7711
7712 /* UOPS_ISSUED.STALLED_CYCLES */
7713 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
7714 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
7715 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
7716 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
7717 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
7718
7719 intel_pmu_pebs_data_source_nhm();
7720 x86_add_quirk(intel_nehalem_quirk);
7721 x86_pmu.pebs_no_tlb = 1;
7722 extra_attr = nhm_format_attr;
7723
7724 pr_cont("Nehalem events, ");
7725 name = "nehalem";
7726 break;
7727
7728 case INTEL_ATOM_BONNELL:
7729 case INTEL_ATOM_BONNELL_MID:
7730 case INTEL_ATOM_SALTWELL:
7731 case INTEL_ATOM_SALTWELL_MID:
7732 case INTEL_ATOM_SALTWELL_TABLET:
7733 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
7734 sizeof(hw_cache_event_ids));
7735
7736 intel_pmu_lbr_init_atom();
7737
7738 x86_pmu.event_constraints = intel_gen_event_constraints;
7739 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
7740 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
7741 pr_cont("Atom events, ");
7742 name = "bonnell";
7743 break;
7744
7745 case INTEL_ATOM_SILVERMONT:
7746 case INTEL_ATOM_SILVERMONT_D:
7747 case INTEL_ATOM_SILVERMONT_MID:
7748 case INTEL_ATOM_AIRMONT:
7749 case INTEL_ATOM_AIRMONT_NP:
7750 case INTEL_ATOM_SILVERMONT_MID2:
7751 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
7752 sizeof(hw_cache_event_ids));
7753 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
7754 sizeof(hw_cache_extra_regs));
7755
7756 intel_pmu_lbr_init_slm();
7757
7758 x86_pmu.event_constraints = intel_slm_event_constraints;
7759 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
7760 x86_pmu.extra_regs = intel_slm_extra_regs;
7761 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7762 td_attr = slm_events_attrs;
7763 extra_attr = slm_format_attr;
7764 pr_cont("Silvermont events, ");
7765 name = "silvermont";
7766 break;
7767
7768 case INTEL_ATOM_GOLDMONT:
7769 case INTEL_ATOM_GOLDMONT_D:
7770 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
7771 sizeof(hw_cache_event_ids));
7772 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
7773 sizeof(hw_cache_extra_regs));
7774
7775 intel_pmu_lbr_init_skl();
7776
7777 x86_pmu.event_constraints = intel_slm_event_constraints;
7778 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
7779 x86_pmu.extra_regs = intel_glm_extra_regs;
7780 /*
7781 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
7782 * for precise cycles.
7783 * :pp is identical to :ppp
7784 */
7785 x86_pmu.pebs_aliases = NULL;
7786 x86_pmu.pebs_prec_dist = true;
7787 x86_pmu.lbr_pt_coexist = true;
7788 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7789 td_attr = glm_events_attrs;
7790 extra_attr = slm_format_attr;
7791 pr_cont("Goldmont events, ");
7792 name = "goldmont";
7793 break;
7794
7795 case INTEL_ATOM_GOLDMONT_PLUS:
7796 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
7797 sizeof(hw_cache_event_ids));
7798 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
7799 sizeof(hw_cache_extra_regs));
7800
7801 intel_pmu_lbr_init_skl();
7802
7803 x86_pmu.event_constraints = intel_slm_event_constraints;
7804 x86_pmu.extra_regs = intel_glm_extra_regs;
7805 /*
7806 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
7807 * for precise cycles.
7808 */
7809 x86_pmu.pebs_aliases = NULL;
7810 x86_pmu.pebs_prec_dist = true;
7811 x86_pmu.lbr_pt_coexist = true;
7812 x86_pmu.pebs_capable = ~0ULL;
7813 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7814 x86_pmu.flags |= PMU_FL_PEBS_ALL;
7815 x86_pmu.get_event_constraints = glp_get_event_constraints;
7816 td_attr = glm_events_attrs;
7817 /* Goldmont Plus has 4-wide pipeline */
7818 event_attr_td_total_slots_scale_glm.event_str = "4";
7819 extra_attr = slm_format_attr;
7820 pr_cont("Goldmont plus events, ");
7821 name = "goldmont_plus";
7822 break;
7823
7824 case INTEL_ATOM_TREMONT_D:
7825 case INTEL_ATOM_TREMONT:
7826 case INTEL_ATOM_TREMONT_L:
7827 x86_pmu.late_ack = true;
7828 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
7829 sizeof(hw_cache_event_ids));
7830 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
7831 sizeof(hw_cache_extra_regs));
7832 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
7833
7834 intel_pmu_lbr_init_skl();
7835
7836 x86_pmu.event_constraints = intel_slm_event_constraints;
7837 x86_pmu.extra_regs = intel_tnt_extra_regs;
7838 /*
7839 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
7840 * for precise cycles.
7841 */
7842 x86_pmu.pebs_aliases = NULL;
7843 x86_pmu.pebs_prec_dist = true;
7844 x86_pmu.lbr_pt_coexist = true;
7845 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7846 x86_pmu.get_event_constraints = tnt_get_event_constraints;
7847 td_attr = tnt_events_attrs;
7848 extra_attr = slm_format_attr;
7849 pr_cont("Tremont events, ");
7850 name = "Tremont";
7851 break;
7852
7853 case INTEL_ATOM_GRACEMONT:
7854 intel_pmu_init_grt(NULL);
7855 intel_pmu_pebs_data_source_grt();
7856 x86_pmu.pebs_latency_data = grt_latency_data;
7857 x86_pmu.get_event_constraints = tnt_get_event_constraints;
7858 td_attr = tnt_events_attrs;
7859 mem_attr = grt_mem_attrs;
7860 extra_attr = nhm_format_attr;
7861 pr_cont("Gracemont events, ");
7862 name = "gracemont";
7863 break;
7864
7865 case INTEL_ATOM_CRESTMONT:
7866 case INTEL_ATOM_CRESTMONT_X:
7867 intel_pmu_init_grt(NULL);
7868 x86_pmu.extra_regs = intel_cmt_extra_regs;
7869 intel_pmu_pebs_data_source_cmt();
7870 x86_pmu.pebs_latency_data = cmt_latency_data;
7871 x86_pmu.get_event_constraints = cmt_get_event_constraints;
7872 td_attr = cmt_events_attrs;
7873 mem_attr = grt_mem_attrs;
7874 extra_attr = cmt_format_attr;
7875 pr_cont("Crestmont events, ");
7876 name = "crestmont";
7877 break;
7878
7879 case INTEL_ATOM_DARKMONT_X:
7880 intel_pmu_init_skt(NULL);
7881 intel_pmu_pebs_data_source_cmt();
7882 x86_pmu.pebs_latency_data = cmt_latency_data;
7883 x86_pmu.get_event_constraints = cmt_get_event_constraints;
7884 td_attr = skt_events_attrs;
7885 mem_attr = grt_mem_attrs;
7886 extra_attr = cmt_format_attr;
7887 pr_cont("Darkmont events, ");
7888 name = "darkmont";
7889 break;
7890
7891 case INTEL_WESTMERE:
7892 case INTEL_WESTMERE_EP:
7893 case INTEL_WESTMERE_EX:
7894 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
7895 sizeof(hw_cache_event_ids));
7896 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
7897 sizeof(hw_cache_extra_regs));
7898
7899 intel_pmu_lbr_init_nhm();
7900
7901 x86_pmu.event_constraints = intel_westmere_event_constraints;
7902 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
7903 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
7904 x86_pmu.extra_regs = intel_westmere_extra_regs;
7905 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7906
7907 mem_attr = nhm_mem_events_attrs;
7908
7909 /* UOPS_ISSUED.STALLED_CYCLES */
7910 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
7911 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
7912 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
7913 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
7914 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
7915
7916 intel_pmu_pebs_data_source_nhm();
7917 extra_attr = nhm_format_attr;
7918 pr_cont("Westmere events, ");
7919 name = "westmere";
7920 break;
7921
7922 case INTEL_SANDYBRIDGE:
7923 case INTEL_SANDYBRIDGE_X:
7924 x86_add_quirk(intel_sandybridge_quirk);
7925 x86_add_quirk(intel_ht_bug);
7926 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
7927 sizeof(hw_cache_event_ids));
7928 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
7929 sizeof(hw_cache_extra_regs));
7930
7931 intel_pmu_lbr_init_snb();
7932
7933 x86_pmu.event_constraints = intel_snb_event_constraints;
7934 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
7935 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
7936 if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X)
7937 x86_pmu.extra_regs = intel_snbep_extra_regs;
7938 else
7939 x86_pmu.extra_regs = intel_snb_extra_regs;
7940
7941
7942 /* all extra regs are per-cpu when HT is on */
7943 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7944 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7945
7946 td_attr = snb_events_attrs;
7947 mem_attr = snb_mem_events_attrs;
7948
7949 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
7950 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
7951 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
7952 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
7953 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
7954 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
7955
7956 extra_attr = nhm_format_attr;
7957
7958 pr_cont("SandyBridge events, ");
7959 name = "sandybridge";
7960 break;
7961
7962 case INTEL_IVYBRIDGE:
7963 case INTEL_IVYBRIDGE_X:
7964 x86_add_quirk(intel_ht_bug);
7965 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
7966 sizeof(hw_cache_event_ids));
7967 /* dTLB-load-misses on IVB is different than SNB */
7968 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
7969
7970 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
7971 sizeof(hw_cache_extra_regs));
7972
7973 intel_pmu_lbr_init_snb();
7974
7975 x86_pmu.event_constraints = intel_ivb_event_constraints;
7976 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
7977 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
7978 x86_pmu.pebs_prec_dist = true;
7979 if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X)
7980 x86_pmu.extra_regs = intel_snbep_extra_regs;
7981 else
7982 x86_pmu.extra_regs = intel_snb_extra_regs;
7983 /* all extra regs are per-cpu when HT is on */
7984 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7985 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7986
7987 td_attr = snb_events_attrs;
7988 mem_attr = snb_mem_events_attrs;
7989
7990 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
7991 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
7992 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
7993
7994 extra_attr = nhm_format_attr;
7995
7996 pr_cont("IvyBridge events, ");
7997 name = "ivybridge";
7998 break;
7999
8000
8001 case INTEL_HASWELL:
8002 case INTEL_HASWELL_X:
8003 case INTEL_HASWELL_L:
8004 case INTEL_HASWELL_G:
8005 x86_add_quirk(intel_ht_bug);
8006 x86_add_quirk(intel_pebs_isolation_quirk);
8007 x86_pmu.late_ack = true;
8008 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
8009 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
8010
8011 intel_pmu_lbr_init_hsw();
8012
8013 x86_pmu.event_constraints = intel_hsw_event_constraints;
8014 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
8015 x86_pmu.extra_regs = intel_snbep_extra_regs;
8016 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
8017 x86_pmu.pebs_prec_dist = true;
8018 /* all extra regs are per-cpu when HT is on */
8019 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
8020 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
8021
8022 x86_pmu.hw_config = hsw_hw_config;
8023 x86_pmu.get_event_constraints = hsw_get_event_constraints;
8024 x86_pmu.limit_period = hsw_limit_period;
8025 x86_pmu.lbr_double_abort = true;
8026 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8027 hsw_format_attr : nhm_format_attr;
8028 td_attr = hsw_events_attrs;
8029 mem_attr = hsw_mem_events_attrs;
8030 tsx_attr = hsw_tsx_events_attrs;
8031 pr_cont("Haswell events, ");
8032 name = "haswell";
8033 break;
8034
8035 case INTEL_BROADWELL:
8036 case INTEL_BROADWELL_D:
8037 case INTEL_BROADWELL_G:
8038 case INTEL_BROADWELL_X:
8039 x86_add_quirk(intel_pebs_isolation_quirk);
8040 x86_pmu.late_ack = true;
8041 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
8042 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
8043
8044 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
8045 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
8046 BDW_L3_MISS|HSW_SNOOP_DRAM;
8047 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
8048 HSW_SNOOP_DRAM;
8049 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
8050 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
8051 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
8052 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
8053
8054 intel_pmu_lbr_init_hsw();
8055
8056 x86_pmu.event_constraints = intel_bdw_event_constraints;
8057 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
8058 x86_pmu.extra_regs = intel_snbep_extra_regs;
8059 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
8060 x86_pmu.pebs_prec_dist = true;
8061 /* all extra regs are per-cpu when HT is on */
8062 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
8063 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
8064
8065 x86_pmu.hw_config = hsw_hw_config;
8066 x86_pmu.get_event_constraints = hsw_get_event_constraints;
8067 x86_pmu.limit_period = bdw_limit_period;
8068 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8069 hsw_format_attr : nhm_format_attr;
8070 td_attr = hsw_events_attrs;
8071 mem_attr = hsw_mem_events_attrs;
8072 tsx_attr = hsw_tsx_events_attrs;
8073 pr_cont("Broadwell events, ");
8074 name = "broadwell";
8075 break;
8076
8077 case INTEL_XEON_PHI_KNL:
8078 case INTEL_XEON_PHI_KNM:
8079 memcpy(hw_cache_event_ids,
8080 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
8081 memcpy(hw_cache_extra_regs,
8082 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
8083 intel_pmu_lbr_init_knl();
8084
8085 x86_pmu.event_constraints = intel_slm_event_constraints;
8086 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
8087 x86_pmu.extra_regs = intel_knl_extra_regs;
8088
8089 /* all extra regs are per-cpu when HT is on */
8090 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
8091 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
8092 extra_attr = slm_format_attr;
8093 pr_cont("Knights Landing/Mill events, ");
8094 name = "knights-landing";
8095 break;
8096
8097 case INTEL_SKYLAKE_X:
8098 pmem = true;
8099 fallthrough;
8100 case INTEL_SKYLAKE_L:
8101 case INTEL_SKYLAKE:
8102 case INTEL_KABYLAKE_L:
8103 case INTEL_KABYLAKE:
8104 case INTEL_COMETLAKE_L:
8105 case INTEL_COMETLAKE:
8106 x86_add_quirk(intel_pebs_isolation_quirk);
8107 x86_pmu.late_ack = true;
8108 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
8109 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
8110 intel_pmu_lbr_init_skl();
8111
8112 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
8113 event_attr_td_recovery_bubbles.event_str_noht =
8114 "event=0xd,umask=0x1,cmask=1";
8115 event_attr_td_recovery_bubbles.event_str_ht =
8116 "event=0xd,umask=0x1,cmask=1,any=1";
8117
8118 x86_pmu.event_constraints = intel_skl_event_constraints;
8119 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
8120 x86_pmu.extra_regs = intel_skl_extra_regs;
8121 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
8122 x86_pmu.pebs_prec_dist = true;
8123 /* all extra regs are per-cpu when HT is on */
8124 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
8125 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
8126
8127 x86_pmu.hw_config = hsw_hw_config;
8128 x86_pmu.get_event_constraints = hsw_get_event_constraints;
8129 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8130 hsw_format_attr : nhm_format_attr;
8131 extra_skl_attr = skl_format_attr;
8132 td_attr = hsw_events_attrs;
8133 mem_attr = hsw_mem_events_attrs;
8134 tsx_attr = hsw_tsx_events_attrs;
8135 intel_pmu_pebs_data_source_skl(pmem);
8136
8137 /*
8138 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
8139 * TSX force abort hooks are not required on these systems. Only deploy
8140 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
8141 */
8142 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
8143 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
8144 x86_pmu.flags |= PMU_FL_TFA;
8145 x86_pmu.get_event_constraints = tfa_get_event_constraints;
8146 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
8147 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
8148 }
8149
8150 pr_cont("Skylake events, ");
8151 name = "skylake";
8152 break;
8153
8154 case INTEL_ICELAKE_X:
8155 case INTEL_ICELAKE_D:
8156 x86_pmu.pebs_ept = 1;
8157 pmem = true;
8158 fallthrough;
8159 case INTEL_ICELAKE_L:
8160 case INTEL_ICELAKE:
8161 case INTEL_TIGERLAKE_L:
8162 case INTEL_TIGERLAKE:
8163 case INTEL_ROCKETLAKE:
8164 x86_pmu.late_ack = true;
8165 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
8166 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
8167 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
8168 intel_pmu_lbr_init_skl();
8169
8170 x86_pmu.event_constraints = intel_icl_event_constraints;
8171 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
8172 x86_pmu.extra_regs = intel_icl_extra_regs;
8173 x86_pmu.pebs_aliases = NULL;
8174 x86_pmu.pebs_prec_dist = true;
8175 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
8176 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
8177
8178 x86_pmu.hw_config = hsw_hw_config;
8179 x86_pmu.get_event_constraints = icl_get_event_constraints;
8180 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8181 hsw_format_attr : nhm_format_attr;
8182 extra_skl_attr = skl_format_attr;
8183 mem_attr = icl_events_attrs;
8184 td_attr = icl_td_events_attrs;
8185 tsx_attr = icl_tsx_events_attrs;
8186 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
8187 x86_pmu.lbr_pt_coexist = true;
8188 intel_pmu_pebs_data_source_skl(pmem);
8189 x86_pmu.num_topdown_events = 4;
8190 static_call_update(intel_pmu_update_topdown_event,
8191 &icl_update_topdown_event);
8192 static_call_update(intel_pmu_set_topdown_event_period,
8193 &icl_set_topdown_event_period);
8194 pr_cont("Icelake events, ");
8195 name = "icelake";
8196 break;
8197
8198 case INTEL_SAPPHIRERAPIDS_X:
8199 case INTEL_EMERALDRAPIDS_X:
8200 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
8201 x86_pmu.extra_regs = intel_glc_extra_regs;
8202 pr_cont("Sapphire Rapids events, ");
8203 name = "sapphire_rapids";
8204 goto glc_common;
8205
8206 case INTEL_GRANITERAPIDS_X:
8207 case INTEL_GRANITERAPIDS_D:
8208 x86_pmu.extra_regs = intel_rwc_extra_regs;
8209 pr_cont("Granite Rapids events, ");
8210 name = "granite_rapids";
8211 goto glc_common;
8212
8213 case INTEL_DIAMONDRAPIDS_X:
8214 intel_pmu_init_pnc(NULL);
8215 x86_pmu.pebs_latency_data = pnc_latency_data;
8216
8217 pr_cont("Panthercove events, ");
8218 name = "panthercove";
8219 goto glc_base;
8220
8221 glc_common:
8222 intel_pmu_init_glc(NULL);
8223 intel_pmu_pebs_data_source_skl(true);
8224
8225 glc_base:
8226 x86_pmu.pebs_ept = 1;
8227 x86_pmu.hw_config = hsw_hw_config;
8228 x86_pmu.get_event_constraints = glc_get_event_constraints;
8229 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8230 hsw_format_attr : nhm_format_attr;
8231 extra_skl_attr = skl_format_attr;
8232 mem_attr = glc_events_attrs;
8233 td_attr = glc_td_events_attrs;
8234 tsx_attr = glc_tsx_events_attrs;
8235 break;
8236
8237 case INTEL_ALDERLAKE:
8238 case INTEL_ALDERLAKE_L:
8239 case INTEL_RAPTORLAKE:
8240 case INTEL_RAPTORLAKE_P:
8241 case INTEL_RAPTORLAKE_S:
8242 /*
8243 * Alder Lake has 2 types of CPU, core and atom.
8244 *
8245 * Initialize the common PerfMon capabilities here.
8246 */
8247 intel_pmu_init_hybrid(hybrid_big_small);
8248
8249 x86_pmu.pebs_latency_data = grt_latency_data;
8250 x86_pmu.get_event_constraints = adl_get_event_constraints;
8251 x86_pmu.hw_config = adl_hw_config;
8252 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
8253
8254 td_attr = adl_hybrid_events_attrs;
8255 mem_attr = adl_hybrid_mem_attrs;
8256 tsx_attr = adl_hybrid_tsx_attrs;
8257 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8258 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
8259
8260 /* Initialize big core specific PerfMon capabilities.*/
8261 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
8262 intel_pmu_init_glc(&pmu->pmu);
8263 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
8264 pmu->cntr_mask64 <<= 2;
8265 pmu->cntr_mask64 |= 0x3;
8266 pmu->fixed_cntr_mask64 <<= 1;
8267 pmu->fixed_cntr_mask64 |= 0x1;
8268 } else {
8269 pmu->cntr_mask64 = x86_pmu.cntr_mask64;
8270 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
8271 }
8272
8273 /*
8274 * Quirk: For some Alder Lake machine, when all E-cores are disabled in
8275 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
8276 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
8277 * mistakenly add extra counters for P-cores. Correct the number of
8278 * counters here.
8279 */
8280 if ((x86_pmu_num_counters(&pmu->pmu) > 8) || (x86_pmu_num_counters_fixed(&pmu->pmu) > 4)) {
8281 pmu->cntr_mask64 = x86_pmu.cntr_mask64;
8282 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
8283 }
8284
8285 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
8286 pmu->unconstrained = (struct event_constraint)
8287 __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
8288 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
8289
8290 pmu->extra_regs = intel_glc_extra_regs;
8291
8292 /* Initialize Atom core specific PerfMon capabilities.*/
8293 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
8294 intel_pmu_init_grt(&pmu->pmu);
8295
8296 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
8297 intel_pmu_pebs_data_source_adl();
8298 pr_cont("Alderlake Hybrid events, ");
8299 name = "alderlake_hybrid";
8300 break;
8301
8302 case INTEL_METEORLAKE:
8303 case INTEL_METEORLAKE_L:
8304 case INTEL_ARROWLAKE_U:
8305 intel_pmu_init_hybrid(hybrid_big_small);
8306
8307 x86_pmu.pebs_latency_data = cmt_latency_data;
8308 x86_pmu.get_event_constraints = mtl_get_event_constraints;
8309 x86_pmu.hw_config = adl_hw_config;
8310
8311 td_attr = adl_hybrid_events_attrs;
8312 mem_attr = mtl_hybrid_mem_attrs;
8313 tsx_attr = adl_hybrid_tsx_attrs;
8314 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8315 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
8316
8317 /* Initialize big core specific PerfMon capabilities.*/
8318 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
8319 intel_pmu_init_glc(&pmu->pmu);
8320 pmu->extra_regs = intel_rwc_extra_regs;
8321
8322 /* Initialize Atom core specific PerfMon capabilities.*/
8323 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
8324 intel_pmu_init_grt(&pmu->pmu);
8325 pmu->extra_regs = intel_cmt_extra_regs;
8326
8327 intel_pmu_pebs_data_source_mtl();
8328 pr_cont("Meteorlake Hybrid events, ");
8329 name = "meteorlake_hybrid";
8330 break;
8331
8332 case INTEL_PANTHERLAKE_L:
8333 case INTEL_WILDCATLAKE_L:
8334 pr_cont("Pantherlake Hybrid events, ");
8335 name = "pantherlake_hybrid";
8336 goto lnl_common;
8337
8338 case INTEL_LUNARLAKE_M:
8339 case INTEL_ARROWLAKE:
8340 pr_cont("Lunarlake Hybrid events, ");
8341 name = "lunarlake_hybrid";
8342
8343 lnl_common:
8344 intel_pmu_init_hybrid(hybrid_big_small);
8345
8346 x86_pmu.pebs_latency_data = lnl_latency_data;
8347 x86_pmu.get_event_constraints = mtl_get_event_constraints;
8348 x86_pmu.hw_config = adl_hw_config;
8349
8350 td_attr = lnl_hybrid_events_attrs;
8351 mem_attr = mtl_hybrid_mem_attrs;
8352 tsx_attr = adl_hybrid_tsx_attrs;
8353 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8354 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
8355
8356 /* Initialize big core specific PerfMon capabilities.*/
8357 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
8358 intel_pmu_init_lnc(&pmu->pmu);
8359
8360 /* Initialize Atom core specific PerfMon capabilities.*/
8361 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
8362 intel_pmu_init_skt(&pmu->pmu);
8363
8364 intel_pmu_pebs_data_source_lnl();
8365 break;
8366
8367 case INTEL_ARROWLAKE_H:
8368 intel_pmu_init_hybrid(hybrid_big_small_tiny);
8369
8370 x86_pmu.pebs_latency_data = arl_h_latency_data;
8371 x86_pmu.get_event_constraints = arl_h_get_event_constraints;
8372 x86_pmu.hw_config = arl_h_hw_config;
8373
8374 td_attr = arl_h_hybrid_events_attrs;
8375 mem_attr = arl_h_hybrid_mem_attrs;
8376 tsx_attr = adl_hybrid_tsx_attrs;
8377 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8378 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
8379
8380 /* Initialize big core specific PerfMon capabilities. */
8381 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
8382 intel_pmu_init_lnc(&pmu->pmu);
8383
8384 /* Initialize Atom core specific PerfMon capabilities. */
8385 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
8386 intel_pmu_init_skt(&pmu->pmu);
8387
8388 /* Initialize Lower Power Atom specific PerfMon capabilities. */
8389 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX];
8390 intel_pmu_init_grt(&pmu->pmu);
8391 pmu->extra_regs = intel_cmt_extra_regs;
8392
8393 intel_pmu_pebs_data_source_arl_h();
8394 pr_cont("ArrowLake-H Hybrid events, ");
8395 name = "arrowlake_h_hybrid";
8396 break;
8397
8398 case INTEL_NOVALAKE:
8399 case INTEL_NOVALAKE_L:
8400 pr_cont("Novalake Hybrid events, ");
8401 name = "novalake_hybrid";
8402 intel_pmu_init_hybrid(hybrid_big_small);
8403
8404 x86_pmu.pebs_latency_data = nvl_latency_data;
8405 x86_pmu.get_event_constraints = mtl_get_event_constraints;
8406 x86_pmu.hw_config = adl_hw_config;
8407
8408 td_attr = lnl_hybrid_events_attrs;
8409 mem_attr = mtl_hybrid_mem_attrs;
8410 tsx_attr = adl_hybrid_tsx_attrs;
8411 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8412 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
8413
8414 /* Initialize big core specific PerfMon capabilities.*/
8415 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
8416 intel_pmu_init_pnc(&pmu->pmu);
8417
8418 /* Initialize Atom core specific PerfMon capabilities.*/
8419 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
8420 intel_pmu_init_arw(&pmu->pmu);
8421
8422 intel_pmu_pebs_data_source_lnl();
8423 break;
8424
8425 default:
8426 switch (x86_pmu.version) {
8427 case 1:
8428 x86_pmu.event_constraints = intel_v1_event_constraints;
8429 pr_cont("generic architected perfmon v1, ");
8430 name = "generic_arch_v1";
8431 break;
8432 case 2:
8433 case 3:
8434 case 4:
8435 /*
8436 * default constraints for v2 and up
8437 */
8438 x86_pmu.event_constraints = intel_gen_event_constraints;
8439 pr_cont("generic architected perfmon, ");
8440 name = "generic_arch_v2+";
8441 break;
8442 default:
8443 /*
8444 * The default constraints for v5 and up can support up to
8445 * 16 fixed counters. For the fixed counters 4 and later,
8446 * the pseudo-encoding is applied.
8447 * The constraints may be cut according to the CPUID enumeration
8448 * by inserting the EVENT_CONSTRAINT_END.
8449 */
8450 if (fls64(x86_pmu.fixed_cntr_mask64) > INTEL_PMC_MAX_FIXED)
8451 x86_pmu.fixed_cntr_mask64 &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
8452 intel_v5_gen_event_constraints[fls64(x86_pmu.fixed_cntr_mask64)].weight = -1;
8453 x86_pmu.event_constraints = intel_v5_gen_event_constraints;
8454 pr_cont("generic architected perfmon, ");
8455 name = "generic_arch_v5+";
8456 break;
8457 }
8458 }
8459
8460 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
8461
8462 if (!is_hybrid()) {
8463 group_events_td.attrs = td_attr;
8464 group_events_mem.attrs = mem_attr;
8465 group_events_tsx.attrs = tsx_attr;
8466 group_format_extra.attrs = extra_attr;
8467 group_format_extra_skl.attrs = extra_skl_attr;
8468
8469 x86_pmu.attr_update = attr_update;
8470 } else {
8471 hybrid_group_events_td.attrs = td_attr;
8472 hybrid_group_events_mem.attrs = mem_attr;
8473 hybrid_group_events_tsx.attrs = tsx_attr;
8474 hybrid_group_format_extra.attrs = extra_attr;
8475
8476 x86_pmu.attr_update = hybrid_attr_update;
8477 }
8478
8479 /*
8480 * The archPerfmonExt (0x23) includes an enhanced enumeration of
8481 * PMU architectural features with a per-core view. For non-hybrid,
8482 * each core has the same PMU capabilities. It's good enough to
8483 * update the x86_pmu from the booting CPU. For hybrid, the x86_pmu
8484 * is used to keep the common capabilities. Still keep the values
8485 * from the leaf 0xa. The core specific update will be done later
8486 * when a new type is online.
8487 */
8488 if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
8489 update_pmu_cap(NULL);
8490
8491 if (x86_pmu.arch_pebs) {
8492 static_call_update(intel_pmu_disable_event_ext,
8493 intel_pmu_disable_event_ext);
8494 static_call_update(intel_pmu_enable_event_ext,
8495 intel_pmu_enable_event_ext);
8496 pr_cont("Architectural PEBS, ");
8497 }
8498
8499 intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64,
8500 &x86_pmu.fixed_cntr_mask64,
8501 &x86_pmu.intel_ctrl);
8502
8503 /* AnyThread may be deprecated on arch perfmon v5 or later */
8504 if (x86_pmu.intel_cap.anythread_deprecated)
8505 x86_pmu.format_attrs = intel_arch_formats_attr;
8506
8507 intel_pmu_check_event_constraints_all(NULL);
8508
8509 /*
8510 * Access LBR MSR may cause #GP under certain circumstances.
8511 * Check all LBR MSR here.
8512 * Disable LBR access if any LBR MSRs can not be accessed.
8513 */
8514 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
8515 x86_pmu.lbr_nr = 0;
8516 for (i = 0; i < x86_pmu.lbr_nr; i++) {
8517 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
8518 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
8519 x86_pmu.lbr_nr = 0;
8520 }
8521
8522 if (x86_pmu.lbr_nr) {
8523 intel_pmu_lbr_init();
8524
8525 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
8526
8527 /* only support branch_stack snapshot for perfmon >= v2 */
8528 if (x86_pmu.disable_all == intel_pmu_disable_all) {
8529 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
8530 static_call_update(perf_snapshot_branch_stack,
8531 intel_pmu_snapshot_arch_branch_stack);
8532 } else {
8533 static_call_update(perf_snapshot_branch_stack,
8534 intel_pmu_snapshot_branch_stack);
8535 }
8536 }
8537 }
8538
8539 intel_pmu_check_extra_regs(x86_pmu.extra_regs);
8540
8541 /* Support full width counters using alternative MSR range */
8542 if (x86_pmu.intel_cap.full_width_write) {
8543 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
8544 x86_pmu.perfctr = MSR_IA32_PMC0;
8545 pr_cont("full-width counters, ");
8546 }
8547
8548 /* Support V6+ MSR Aliasing */
8549 if (x86_pmu.version >= 6) {
8550 x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR;
8551 x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A;
8552 x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR;
8553 x86_pmu.addr_offset = intel_pmu_v6_addr_offset;
8554 }
8555
8556 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
8557 x86_pmu.intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS;
8558
8559 if (x86_pmu.intel_cap.pebs_timing_info)
8560 x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
8561
8562 intel_aux_output_init();
8563
8564 return 0;
8565 }
8566
8567 /*
8568 * HT bug: phase 2 init
8569 * Called once we have valid topology information to check
8570 * whether or not HT is enabled
8571 * If HT is off, then we disable the workaround
8572 */
fixup_ht_bug(void)8573 static __init int fixup_ht_bug(void)
8574 {
8575 int c;
8576 /*
8577 * problem not present on this CPU model, nothing to do
8578 */
8579 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
8580 return 0;
8581
8582 if (topology_max_smt_threads() > 1) {
8583 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
8584 return 0;
8585 }
8586
8587 cpus_read_lock();
8588
8589 hardlockup_detector_perf_stop();
8590
8591 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
8592
8593 x86_pmu.start_scheduling = NULL;
8594 x86_pmu.commit_scheduling = NULL;
8595 x86_pmu.stop_scheduling = NULL;
8596
8597 hardlockup_detector_perf_restart();
8598
8599 for_each_online_cpu(c)
8600 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
8601
8602 cpus_read_unlock();
8603 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
8604 return 0;
8605 }
8606 subsys_initcall(fixup_ht_bug)
8607