1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Per core/cpu state
4 *
5 * Used to coordinate shared registers between HT threads or
6 * among events on a single PMU.
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
17 #include <linux/kvm_host.h>
18
19 #include <asm/cpufeature.h>
20 #include <asm/debugreg.h>
21 #include <asm/hardirq.h>
22 #include <asm/intel-family.h>
23 #include <asm/intel_pt.h>
24 #include <asm/apic.h>
25 #include <asm/cpu_device_id.h>
26
27 #include "../perf_event.h"
28
29 /*
30 * Intel PerfMon, used on Core and later.
31 */
32 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
33 {
34 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
35 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
36 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
37 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
38 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
39 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
40 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
41 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
42 };
43
44 static struct event_constraint intel_core_event_constraints[] __read_mostly =
45 {
46 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
47 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
48 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
49 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
50 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
51 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
52 EVENT_CONSTRAINT_END
53 };
54
55 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
56 {
57 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
58 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
59 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
60 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
61 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
62 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
63 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
64 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
65 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
66 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
67 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
68 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
69 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
70 EVENT_CONSTRAINT_END
71 };
72
73 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
74 {
75 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
76 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
77 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
78 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
79 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
80 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
81 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
82 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
83 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
84 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
85 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
86 EVENT_CONSTRAINT_END
87 };
88
89 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
90 {
91 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
92 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
93 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
94 EVENT_EXTRA_END
95 };
96
97 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
98 {
99 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
100 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
101 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
102 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
103 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
104 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
105 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
106 EVENT_CONSTRAINT_END
107 };
108
109 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
110 {
111 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
112 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
113 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
114 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
116 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
117 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
118 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
119 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
120 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
121 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
122 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
123
124 /*
125 * When HT is off these events can only run on the bottom 4 counters
126 * When HT is on, they are impacted by the HT bug and require EXCL access
127 */
128 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
129 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
130 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
131 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
132
133 EVENT_CONSTRAINT_END
134 };
135
136 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
137 {
138 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
139 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
140 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
141 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
142 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
143 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
144 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
146 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
147 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
148 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
149 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
150 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
151
152 /*
153 * When HT is off these events can only run on the bottom 4 counters
154 * When HT is on, they are impacted by the HT bug and require EXCL access
155 */
156 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
157 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
158 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
159 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
160
161 EVENT_CONSTRAINT_END
162 };
163
164 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
165 {
166 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
167 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
168 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
169 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
170 EVENT_EXTRA_END
171 };
172
173 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
174 {
175 EVENT_CONSTRAINT_END
176 };
177
178 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
179 {
180 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
181 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
182 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
183 EVENT_CONSTRAINT_END
184 };
185
186 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly =
187 {
188 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
189 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
190 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
191 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
192 FIXED_EVENT_CONSTRAINT(0x0500, 4),
193 FIXED_EVENT_CONSTRAINT(0x0600, 5),
194 FIXED_EVENT_CONSTRAINT(0x0700, 6),
195 FIXED_EVENT_CONSTRAINT(0x0800, 7),
196 FIXED_EVENT_CONSTRAINT(0x0900, 8),
197 FIXED_EVENT_CONSTRAINT(0x0a00, 9),
198 FIXED_EVENT_CONSTRAINT(0x0b00, 10),
199 FIXED_EVENT_CONSTRAINT(0x0c00, 11),
200 FIXED_EVENT_CONSTRAINT(0x0d00, 12),
201 FIXED_EVENT_CONSTRAINT(0x0e00, 13),
202 FIXED_EVENT_CONSTRAINT(0x0f00, 14),
203 FIXED_EVENT_CONSTRAINT(0x1000, 15),
204 EVENT_CONSTRAINT_END
205 };
206
207 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
208 {
209 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
210 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
211 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
212 EVENT_CONSTRAINT_END
213 };
214
215 static struct event_constraint intel_grt_event_constraints[] __read_mostly = {
216 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
217 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
218 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
219 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
220 EVENT_CONSTRAINT_END
221 };
222
223 static struct event_constraint intel_skt_event_constraints[] __read_mostly = {
224 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
226 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
227 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
228 FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */
229 FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */
230 FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */
231 EVENT_CONSTRAINT_END
232 };
233
234 static struct event_constraint intel_skl_event_constraints[] = {
235 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
236 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
237 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
238 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
239
240 /*
241 * when HT is off, these can only run on the bottom 4 counters
242 */
243 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
244 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
245 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
246 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
247 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
248
249 EVENT_CONSTRAINT_END
250 };
251
252 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
253 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
254 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
255 EVENT_EXTRA_END
256 };
257
258 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
259 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
260 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
261 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
262 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
263 EVENT_EXTRA_END
264 };
265
266 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
267 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
268 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
269 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
270 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
271 EVENT_EXTRA_END
272 };
273
274 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
275 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
276 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
277 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
278 /*
279 * Note the low 8 bits eventsel code is not a continuous field, containing
280 * some #GPing bits. These are masked out.
281 */
282 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
283 EVENT_EXTRA_END
284 };
285
286 static struct event_constraint intel_icl_event_constraints[] = {
287 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
288 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */
289 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
290 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
291 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
292 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
293 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
294 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
295 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
296 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
297 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
298 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
299 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
300 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
301 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
302 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
303 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
304 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
305 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
306 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
307 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
308 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
309 INTEL_EVENT_CONSTRAINT(0xef, 0xf),
310 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
311 EVENT_CONSTRAINT_END
312 };
313
314 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
315 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
316 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
317 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
318 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
319 EVENT_EXTRA_END
320 };
321
322 static struct extra_reg intel_glc_extra_regs[] __read_mostly = {
323 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
324 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
325 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
326 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
327 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
328 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
329 EVENT_EXTRA_END
330 };
331
332 static struct event_constraint intel_glc_event_constraints[] = {
333 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
334 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
335 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
336 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
337 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
338 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
339 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
340 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
341 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
342 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
343 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
344 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
345 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
346 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
347
348 INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
349 INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
350 /*
351 * Generally event codes < 0x90 are restricted to counters 0-3.
352 * The 0x2E and 0x3C are exception, which has no restriction.
353 */
354 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
355
356 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
357 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
358 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
359 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
360 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
361 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
362 INTEL_EVENT_CONSTRAINT(0xce, 0x1),
363 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
364 /*
365 * Generally event codes >= 0x90 are likely to have no restrictions.
366 * The exception are defined as above.
367 */
368 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
369
370 EVENT_CONSTRAINT_END
371 };
372
373 static struct extra_reg intel_rwc_extra_regs[] __read_mostly = {
374 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
375 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
376 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
377 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
378 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
379 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
380 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
381 EVENT_EXTRA_END
382 };
383
384 static struct event_constraint intel_lnc_event_constraints[] = {
385 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
386 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
387 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
388 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
389 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
390 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
391 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
392 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
393 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
394 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
395 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
396 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
397 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
398 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
399
400 INTEL_EVENT_CONSTRAINT(0x20, 0xf),
401
402 INTEL_UEVENT_CONSTRAINT(0x012a, 0xf),
403 INTEL_UEVENT_CONSTRAINT(0x012b, 0xf),
404 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
405 INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
406
407 INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
408 INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
409
410 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
411 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
412 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
413 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
414 INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
415 INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
416 INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc),
417 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
418
419 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
420
421 INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf),
422
423 EVENT_CONSTRAINT_END
424 };
425
426 static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
427 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0),
428 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1),
429 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
430 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
431 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
432 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
433 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
434 EVENT_EXTRA_END
435 };
436
437 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
438 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
439 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
440
441 static struct attribute *nhm_mem_events_attrs[] = {
442 EVENT_PTR(mem_ld_nhm),
443 NULL,
444 };
445
446 /*
447 * topdown events for Intel Core CPUs.
448 *
449 * The events are all in slots, which is a free slot in a 4 wide
450 * pipeline. Some events are already reported in slots, for cycle
451 * events we multiply by the pipeline width (4).
452 *
453 * With Hyper Threading on, topdown metrics are either summed or averaged
454 * between the threads of a core: (count_t0 + count_t1).
455 *
456 * For the average case the metric is always scaled to pipeline width,
457 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
458 */
459
460 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
461 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
462 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
463 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
464 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
465 "event=0xe,umask=0x1"); /* uops_issued.any */
466 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
467 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
468 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
469 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
470 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
471 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
472 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
473 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
474 "4", "2");
475
476 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
477 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
478 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
479 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
480 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
481 EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84");
482 EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85");
483 EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86");
484 EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87");
485
486 static struct attribute *snb_events_attrs[] = {
487 EVENT_PTR(td_slots_issued),
488 EVENT_PTR(td_slots_retired),
489 EVENT_PTR(td_fetch_bubbles),
490 EVENT_PTR(td_total_slots),
491 EVENT_PTR(td_total_slots_scale),
492 EVENT_PTR(td_recovery_bubbles),
493 EVENT_PTR(td_recovery_bubbles_scale),
494 NULL,
495 };
496
497 static struct attribute *snb_mem_events_attrs[] = {
498 EVENT_PTR(mem_ld_snb),
499 EVENT_PTR(mem_st_snb),
500 NULL,
501 };
502
503 static struct event_constraint intel_hsw_event_constraints[] = {
504 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
505 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
506 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
507 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
508 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
509 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
510 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
511 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
512 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
513 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
514 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
515 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
516
517 /*
518 * When HT is off these events can only run on the bottom 4 counters
519 * When HT is on, they are impacted by the HT bug and require EXCL access
520 */
521 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
522 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
523 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
524 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
525
526 EVENT_CONSTRAINT_END
527 };
528
529 static struct event_constraint intel_bdw_event_constraints[] = {
530 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
531 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
532 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
533 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
534 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
535 /*
536 * when HT is off, these can only run on the bottom 4 counters
537 */
538 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
539 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
540 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
541 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
542 EVENT_CONSTRAINT_END
543 };
544
intel_pmu_event_map(int hw_event)545 static u64 intel_pmu_event_map(int hw_event)
546 {
547 return intel_perfmon_event_map[hw_event];
548 }
549
550 static __initconst const u64 glc_hw_cache_event_ids
551 [PERF_COUNT_HW_CACHE_MAX]
552 [PERF_COUNT_HW_CACHE_OP_MAX]
553 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
554 {
555 [ C(L1D ) ] = {
556 [ C(OP_READ) ] = {
557 [ C(RESULT_ACCESS) ] = 0x81d0,
558 [ C(RESULT_MISS) ] = 0xe124,
559 },
560 [ C(OP_WRITE) ] = {
561 [ C(RESULT_ACCESS) ] = 0x82d0,
562 },
563 },
564 [ C(L1I ) ] = {
565 [ C(OP_READ) ] = {
566 [ C(RESULT_MISS) ] = 0xe424,
567 },
568 [ C(OP_WRITE) ] = {
569 [ C(RESULT_ACCESS) ] = -1,
570 [ C(RESULT_MISS) ] = -1,
571 },
572 },
573 [ C(LL ) ] = {
574 [ C(OP_READ) ] = {
575 [ C(RESULT_ACCESS) ] = 0x12a,
576 [ C(RESULT_MISS) ] = 0x12a,
577 },
578 [ C(OP_WRITE) ] = {
579 [ C(RESULT_ACCESS) ] = 0x12a,
580 [ C(RESULT_MISS) ] = 0x12a,
581 },
582 },
583 [ C(DTLB) ] = {
584 [ C(OP_READ) ] = {
585 [ C(RESULT_ACCESS) ] = 0x81d0,
586 [ C(RESULT_MISS) ] = 0xe12,
587 },
588 [ C(OP_WRITE) ] = {
589 [ C(RESULT_ACCESS) ] = 0x82d0,
590 [ C(RESULT_MISS) ] = 0xe13,
591 },
592 },
593 [ C(ITLB) ] = {
594 [ C(OP_READ) ] = {
595 [ C(RESULT_ACCESS) ] = -1,
596 [ C(RESULT_MISS) ] = 0xe11,
597 },
598 [ C(OP_WRITE) ] = {
599 [ C(RESULT_ACCESS) ] = -1,
600 [ C(RESULT_MISS) ] = -1,
601 },
602 [ C(OP_PREFETCH) ] = {
603 [ C(RESULT_ACCESS) ] = -1,
604 [ C(RESULT_MISS) ] = -1,
605 },
606 },
607 [ C(BPU ) ] = {
608 [ C(OP_READ) ] = {
609 [ C(RESULT_ACCESS) ] = 0x4c4,
610 [ C(RESULT_MISS) ] = 0x4c5,
611 },
612 [ C(OP_WRITE) ] = {
613 [ C(RESULT_ACCESS) ] = -1,
614 [ C(RESULT_MISS) ] = -1,
615 },
616 [ C(OP_PREFETCH) ] = {
617 [ C(RESULT_ACCESS) ] = -1,
618 [ C(RESULT_MISS) ] = -1,
619 },
620 },
621 [ C(NODE) ] = {
622 [ C(OP_READ) ] = {
623 [ C(RESULT_ACCESS) ] = 0x12a,
624 [ C(RESULT_MISS) ] = 0x12a,
625 },
626 },
627 };
628
629 static __initconst const u64 glc_hw_cache_extra_regs
630 [PERF_COUNT_HW_CACHE_MAX]
631 [PERF_COUNT_HW_CACHE_OP_MAX]
632 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
633 {
634 [ C(LL ) ] = {
635 [ C(OP_READ) ] = {
636 [ C(RESULT_ACCESS) ] = 0x10001,
637 [ C(RESULT_MISS) ] = 0x3fbfc00001,
638 },
639 [ C(OP_WRITE) ] = {
640 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
641 [ C(RESULT_MISS) ] = 0x3f3fc00002,
642 },
643 },
644 [ C(NODE) ] = {
645 [ C(OP_READ) ] = {
646 [ C(RESULT_ACCESS) ] = 0x10c000001,
647 [ C(RESULT_MISS) ] = 0x3fb3000001,
648 },
649 },
650 };
651
652 /*
653 * Notes on the events:
654 * - data reads do not include code reads (comparable to earlier tables)
655 * - data counts include speculative execution (except L1 write, dtlb, bpu)
656 * - remote node access includes remote memory, remote cache, remote mmio.
657 * - prefetches are not included in the counts.
658 * - icache miss does not include decoded icache
659 */
660
661 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
662 #define SKL_DEMAND_RFO BIT_ULL(1)
663 #define SKL_ANY_RESPONSE BIT_ULL(16)
664 #define SKL_SUPPLIER_NONE BIT_ULL(17)
665 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
666 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
667 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
668 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
669 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
670 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
671 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
672 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
673 #define SKL_SPL_HIT BIT_ULL(30)
674 #define SKL_SNOOP_NONE BIT_ULL(31)
675 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
676 #define SKL_SNOOP_MISS BIT_ULL(33)
677 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
678 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
679 #define SKL_SNOOP_HITM BIT_ULL(36)
680 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
681 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
682 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
683 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
684 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
685 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
686 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
687 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
688 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
689 SKL_SNOOP_HITM|SKL_SPL_HIT)
690 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
691 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
692 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
693 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
694 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
695
696 static __initconst const u64 skl_hw_cache_event_ids
697 [PERF_COUNT_HW_CACHE_MAX]
698 [PERF_COUNT_HW_CACHE_OP_MAX]
699 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
700 {
701 [ C(L1D ) ] = {
702 [ C(OP_READ) ] = {
703 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
704 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
705 },
706 [ C(OP_WRITE) ] = {
707 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
708 [ C(RESULT_MISS) ] = 0x0,
709 },
710 [ C(OP_PREFETCH) ] = {
711 [ C(RESULT_ACCESS) ] = 0x0,
712 [ C(RESULT_MISS) ] = 0x0,
713 },
714 },
715 [ C(L1I ) ] = {
716 [ C(OP_READ) ] = {
717 [ C(RESULT_ACCESS) ] = 0x0,
718 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
719 },
720 [ C(OP_WRITE) ] = {
721 [ C(RESULT_ACCESS) ] = -1,
722 [ C(RESULT_MISS) ] = -1,
723 },
724 [ C(OP_PREFETCH) ] = {
725 [ C(RESULT_ACCESS) ] = 0x0,
726 [ C(RESULT_MISS) ] = 0x0,
727 },
728 },
729 [ C(LL ) ] = {
730 [ C(OP_READ) ] = {
731 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
732 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
733 },
734 [ C(OP_WRITE) ] = {
735 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
736 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
737 },
738 [ C(OP_PREFETCH) ] = {
739 [ C(RESULT_ACCESS) ] = 0x0,
740 [ C(RESULT_MISS) ] = 0x0,
741 },
742 },
743 [ C(DTLB) ] = {
744 [ C(OP_READ) ] = {
745 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
746 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
747 },
748 [ C(OP_WRITE) ] = {
749 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
750 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
751 },
752 [ C(OP_PREFETCH) ] = {
753 [ C(RESULT_ACCESS) ] = 0x0,
754 [ C(RESULT_MISS) ] = 0x0,
755 },
756 },
757 [ C(ITLB) ] = {
758 [ C(OP_READ) ] = {
759 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
760 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
761 },
762 [ C(OP_WRITE) ] = {
763 [ C(RESULT_ACCESS) ] = -1,
764 [ C(RESULT_MISS) ] = -1,
765 },
766 [ C(OP_PREFETCH) ] = {
767 [ C(RESULT_ACCESS) ] = -1,
768 [ C(RESULT_MISS) ] = -1,
769 },
770 },
771 [ C(BPU ) ] = {
772 [ C(OP_READ) ] = {
773 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
774 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
775 },
776 [ C(OP_WRITE) ] = {
777 [ C(RESULT_ACCESS) ] = -1,
778 [ C(RESULT_MISS) ] = -1,
779 },
780 [ C(OP_PREFETCH) ] = {
781 [ C(RESULT_ACCESS) ] = -1,
782 [ C(RESULT_MISS) ] = -1,
783 },
784 },
785 [ C(NODE) ] = {
786 [ C(OP_READ) ] = {
787 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
788 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
789 },
790 [ C(OP_WRITE) ] = {
791 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
792 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
793 },
794 [ C(OP_PREFETCH) ] = {
795 [ C(RESULT_ACCESS) ] = 0x0,
796 [ C(RESULT_MISS) ] = 0x0,
797 },
798 },
799 };
800
801 static __initconst const u64 skl_hw_cache_extra_regs
802 [PERF_COUNT_HW_CACHE_MAX]
803 [PERF_COUNT_HW_CACHE_OP_MAX]
804 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
805 {
806 [ C(LL ) ] = {
807 [ C(OP_READ) ] = {
808 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
809 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
810 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
811 SKL_L3_MISS|SKL_ANY_SNOOP|
812 SKL_SUPPLIER_NONE,
813 },
814 [ C(OP_WRITE) ] = {
815 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
816 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
817 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
818 SKL_L3_MISS|SKL_ANY_SNOOP|
819 SKL_SUPPLIER_NONE,
820 },
821 [ C(OP_PREFETCH) ] = {
822 [ C(RESULT_ACCESS) ] = 0x0,
823 [ C(RESULT_MISS) ] = 0x0,
824 },
825 },
826 [ C(NODE) ] = {
827 [ C(OP_READ) ] = {
828 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
829 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
830 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
831 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
832 },
833 [ C(OP_WRITE) ] = {
834 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
835 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
836 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
837 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
838 },
839 [ C(OP_PREFETCH) ] = {
840 [ C(RESULT_ACCESS) ] = 0x0,
841 [ C(RESULT_MISS) ] = 0x0,
842 },
843 },
844 };
845
846 #define SNB_DMND_DATA_RD (1ULL << 0)
847 #define SNB_DMND_RFO (1ULL << 1)
848 #define SNB_DMND_IFETCH (1ULL << 2)
849 #define SNB_DMND_WB (1ULL << 3)
850 #define SNB_PF_DATA_RD (1ULL << 4)
851 #define SNB_PF_RFO (1ULL << 5)
852 #define SNB_PF_IFETCH (1ULL << 6)
853 #define SNB_LLC_DATA_RD (1ULL << 7)
854 #define SNB_LLC_RFO (1ULL << 8)
855 #define SNB_LLC_IFETCH (1ULL << 9)
856 #define SNB_BUS_LOCKS (1ULL << 10)
857 #define SNB_STRM_ST (1ULL << 11)
858 #define SNB_OTHER (1ULL << 15)
859 #define SNB_RESP_ANY (1ULL << 16)
860 #define SNB_NO_SUPP (1ULL << 17)
861 #define SNB_LLC_HITM (1ULL << 18)
862 #define SNB_LLC_HITE (1ULL << 19)
863 #define SNB_LLC_HITS (1ULL << 20)
864 #define SNB_LLC_HITF (1ULL << 21)
865 #define SNB_LOCAL (1ULL << 22)
866 #define SNB_REMOTE (0xffULL << 23)
867 #define SNB_SNP_NONE (1ULL << 31)
868 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
869 #define SNB_SNP_MISS (1ULL << 33)
870 #define SNB_NO_FWD (1ULL << 34)
871 #define SNB_SNP_FWD (1ULL << 35)
872 #define SNB_HITM (1ULL << 36)
873 #define SNB_NON_DRAM (1ULL << 37)
874
875 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
876 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
877 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
878
879 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
880 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
881 SNB_HITM)
882
883 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
884 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
885
886 #define SNB_L3_ACCESS SNB_RESP_ANY
887 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
888
889 static __initconst const u64 snb_hw_cache_extra_regs
890 [PERF_COUNT_HW_CACHE_MAX]
891 [PERF_COUNT_HW_CACHE_OP_MAX]
892 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
893 {
894 [ C(LL ) ] = {
895 [ C(OP_READ) ] = {
896 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
897 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
898 },
899 [ C(OP_WRITE) ] = {
900 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
901 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
902 },
903 [ C(OP_PREFETCH) ] = {
904 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
905 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
906 },
907 },
908 [ C(NODE) ] = {
909 [ C(OP_READ) ] = {
910 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
911 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
912 },
913 [ C(OP_WRITE) ] = {
914 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
915 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
916 },
917 [ C(OP_PREFETCH) ] = {
918 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
919 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
920 },
921 },
922 };
923
924 static __initconst const u64 snb_hw_cache_event_ids
925 [PERF_COUNT_HW_CACHE_MAX]
926 [PERF_COUNT_HW_CACHE_OP_MAX]
927 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
928 {
929 [ C(L1D) ] = {
930 [ C(OP_READ) ] = {
931 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
932 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
933 },
934 [ C(OP_WRITE) ] = {
935 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
936 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
937 },
938 [ C(OP_PREFETCH) ] = {
939 [ C(RESULT_ACCESS) ] = 0x0,
940 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
941 },
942 },
943 [ C(L1I ) ] = {
944 [ C(OP_READ) ] = {
945 [ C(RESULT_ACCESS) ] = 0x0,
946 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
947 },
948 [ C(OP_WRITE) ] = {
949 [ C(RESULT_ACCESS) ] = -1,
950 [ C(RESULT_MISS) ] = -1,
951 },
952 [ C(OP_PREFETCH) ] = {
953 [ C(RESULT_ACCESS) ] = 0x0,
954 [ C(RESULT_MISS) ] = 0x0,
955 },
956 },
957 [ C(LL ) ] = {
958 [ C(OP_READ) ] = {
959 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
960 [ C(RESULT_ACCESS) ] = 0x01b7,
961 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
962 [ C(RESULT_MISS) ] = 0x01b7,
963 },
964 [ C(OP_WRITE) ] = {
965 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
966 [ C(RESULT_ACCESS) ] = 0x01b7,
967 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
968 [ C(RESULT_MISS) ] = 0x01b7,
969 },
970 [ C(OP_PREFETCH) ] = {
971 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
972 [ C(RESULT_ACCESS) ] = 0x01b7,
973 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
974 [ C(RESULT_MISS) ] = 0x01b7,
975 },
976 },
977 [ C(DTLB) ] = {
978 [ C(OP_READ) ] = {
979 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
980 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
981 },
982 [ C(OP_WRITE) ] = {
983 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
984 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
985 },
986 [ C(OP_PREFETCH) ] = {
987 [ C(RESULT_ACCESS) ] = 0x0,
988 [ C(RESULT_MISS) ] = 0x0,
989 },
990 },
991 [ C(ITLB) ] = {
992 [ C(OP_READ) ] = {
993 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
994 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
995 },
996 [ C(OP_WRITE) ] = {
997 [ C(RESULT_ACCESS) ] = -1,
998 [ C(RESULT_MISS) ] = -1,
999 },
1000 [ C(OP_PREFETCH) ] = {
1001 [ C(RESULT_ACCESS) ] = -1,
1002 [ C(RESULT_MISS) ] = -1,
1003 },
1004 },
1005 [ C(BPU ) ] = {
1006 [ C(OP_READ) ] = {
1007 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1008 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1009 },
1010 [ C(OP_WRITE) ] = {
1011 [ C(RESULT_ACCESS) ] = -1,
1012 [ C(RESULT_MISS) ] = -1,
1013 },
1014 [ C(OP_PREFETCH) ] = {
1015 [ C(RESULT_ACCESS) ] = -1,
1016 [ C(RESULT_MISS) ] = -1,
1017 },
1018 },
1019 [ C(NODE) ] = {
1020 [ C(OP_READ) ] = {
1021 [ C(RESULT_ACCESS) ] = 0x01b7,
1022 [ C(RESULT_MISS) ] = 0x01b7,
1023 },
1024 [ C(OP_WRITE) ] = {
1025 [ C(RESULT_ACCESS) ] = 0x01b7,
1026 [ C(RESULT_MISS) ] = 0x01b7,
1027 },
1028 [ C(OP_PREFETCH) ] = {
1029 [ C(RESULT_ACCESS) ] = 0x01b7,
1030 [ C(RESULT_MISS) ] = 0x01b7,
1031 },
1032 },
1033
1034 };
1035
1036 /*
1037 * Notes on the events:
1038 * - data reads do not include code reads (comparable to earlier tables)
1039 * - data counts include speculative execution (except L1 write, dtlb, bpu)
1040 * - remote node access includes remote memory, remote cache, remote mmio.
1041 * - prefetches are not included in the counts because they are not
1042 * reliably counted.
1043 */
1044
1045 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
1046 #define HSW_DEMAND_RFO BIT_ULL(1)
1047 #define HSW_ANY_RESPONSE BIT_ULL(16)
1048 #define HSW_SUPPLIER_NONE BIT_ULL(17)
1049 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
1050 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
1051 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
1052 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
1053 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
1054 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1055 HSW_L3_MISS_REMOTE_HOP2P)
1056 #define HSW_SNOOP_NONE BIT_ULL(31)
1057 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
1058 #define HSW_SNOOP_MISS BIT_ULL(33)
1059 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
1060 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
1061 #define HSW_SNOOP_HITM BIT_ULL(36)
1062 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
1063 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
1064 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
1065 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
1066 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
1067 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
1068 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
1069 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
1070 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
1071 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
1072 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
1073
1074 #define BDW_L3_MISS_LOCAL BIT(26)
1075 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
1076 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1077 HSW_L3_MISS_REMOTE_HOP2P)
1078
1079
1080 static __initconst const u64 hsw_hw_cache_event_ids
1081 [PERF_COUNT_HW_CACHE_MAX]
1082 [PERF_COUNT_HW_CACHE_OP_MAX]
1083 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1084 {
1085 [ C(L1D ) ] = {
1086 [ C(OP_READ) ] = {
1087 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1088 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
1089 },
1090 [ C(OP_WRITE) ] = {
1091 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1092 [ C(RESULT_MISS) ] = 0x0,
1093 },
1094 [ C(OP_PREFETCH) ] = {
1095 [ C(RESULT_ACCESS) ] = 0x0,
1096 [ C(RESULT_MISS) ] = 0x0,
1097 },
1098 },
1099 [ C(L1I ) ] = {
1100 [ C(OP_READ) ] = {
1101 [ C(RESULT_ACCESS) ] = 0x0,
1102 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
1103 },
1104 [ C(OP_WRITE) ] = {
1105 [ C(RESULT_ACCESS) ] = -1,
1106 [ C(RESULT_MISS) ] = -1,
1107 },
1108 [ C(OP_PREFETCH) ] = {
1109 [ C(RESULT_ACCESS) ] = 0x0,
1110 [ C(RESULT_MISS) ] = 0x0,
1111 },
1112 },
1113 [ C(LL ) ] = {
1114 [ C(OP_READ) ] = {
1115 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1116 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1117 },
1118 [ C(OP_WRITE) ] = {
1119 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1120 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1121 },
1122 [ C(OP_PREFETCH) ] = {
1123 [ C(RESULT_ACCESS) ] = 0x0,
1124 [ C(RESULT_MISS) ] = 0x0,
1125 },
1126 },
1127 [ C(DTLB) ] = {
1128 [ C(OP_READ) ] = {
1129 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1130 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1131 },
1132 [ C(OP_WRITE) ] = {
1133 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1134 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1135 },
1136 [ C(OP_PREFETCH) ] = {
1137 [ C(RESULT_ACCESS) ] = 0x0,
1138 [ C(RESULT_MISS) ] = 0x0,
1139 },
1140 },
1141 [ C(ITLB) ] = {
1142 [ C(OP_READ) ] = {
1143 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
1144 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
1145 },
1146 [ C(OP_WRITE) ] = {
1147 [ C(RESULT_ACCESS) ] = -1,
1148 [ C(RESULT_MISS) ] = -1,
1149 },
1150 [ C(OP_PREFETCH) ] = {
1151 [ C(RESULT_ACCESS) ] = -1,
1152 [ C(RESULT_MISS) ] = -1,
1153 },
1154 },
1155 [ C(BPU ) ] = {
1156 [ C(OP_READ) ] = {
1157 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
1158 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1159 },
1160 [ C(OP_WRITE) ] = {
1161 [ C(RESULT_ACCESS) ] = -1,
1162 [ C(RESULT_MISS) ] = -1,
1163 },
1164 [ C(OP_PREFETCH) ] = {
1165 [ C(RESULT_ACCESS) ] = -1,
1166 [ C(RESULT_MISS) ] = -1,
1167 },
1168 },
1169 [ C(NODE) ] = {
1170 [ C(OP_READ) ] = {
1171 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1172 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1173 },
1174 [ C(OP_WRITE) ] = {
1175 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1176 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1177 },
1178 [ C(OP_PREFETCH) ] = {
1179 [ C(RESULT_ACCESS) ] = 0x0,
1180 [ C(RESULT_MISS) ] = 0x0,
1181 },
1182 },
1183 };
1184
1185 static __initconst const u64 hsw_hw_cache_extra_regs
1186 [PERF_COUNT_HW_CACHE_MAX]
1187 [PERF_COUNT_HW_CACHE_OP_MAX]
1188 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1189 {
1190 [ C(LL ) ] = {
1191 [ C(OP_READ) ] = {
1192 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1193 HSW_LLC_ACCESS,
1194 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1195 HSW_L3_MISS|HSW_ANY_SNOOP,
1196 },
1197 [ C(OP_WRITE) ] = {
1198 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1199 HSW_LLC_ACCESS,
1200 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1201 HSW_L3_MISS|HSW_ANY_SNOOP,
1202 },
1203 [ C(OP_PREFETCH) ] = {
1204 [ C(RESULT_ACCESS) ] = 0x0,
1205 [ C(RESULT_MISS) ] = 0x0,
1206 },
1207 },
1208 [ C(NODE) ] = {
1209 [ C(OP_READ) ] = {
1210 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1211 HSW_L3_MISS_LOCAL_DRAM|
1212 HSW_SNOOP_DRAM,
1213 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1214 HSW_L3_MISS_REMOTE|
1215 HSW_SNOOP_DRAM,
1216 },
1217 [ C(OP_WRITE) ] = {
1218 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1219 HSW_L3_MISS_LOCAL_DRAM|
1220 HSW_SNOOP_DRAM,
1221 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1222 HSW_L3_MISS_REMOTE|
1223 HSW_SNOOP_DRAM,
1224 },
1225 [ C(OP_PREFETCH) ] = {
1226 [ C(RESULT_ACCESS) ] = 0x0,
1227 [ C(RESULT_MISS) ] = 0x0,
1228 },
1229 },
1230 };
1231
1232 static __initconst const u64 westmere_hw_cache_event_ids
1233 [PERF_COUNT_HW_CACHE_MAX]
1234 [PERF_COUNT_HW_CACHE_OP_MAX]
1235 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1236 {
1237 [ C(L1D) ] = {
1238 [ C(OP_READ) ] = {
1239 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1240 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1241 },
1242 [ C(OP_WRITE) ] = {
1243 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1244 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1245 },
1246 [ C(OP_PREFETCH) ] = {
1247 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1248 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1249 },
1250 },
1251 [ C(L1I ) ] = {
1252 [ C(OP_READ) ] = {
1253 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1254 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1255 },
1256 [ C(OP_WRITE) ] = {
1257 [ C(RESULT_ACCESS) ] = -1,
1258 [ C(RESULT_MISS) ] = -1,
1259 },
1260 [ C(OP_PREFETCH) ] = {
1261 [ C(RESULT_ACCESS) ] = 0x0,
1262 [ C(RESULT_MISS) ] = 0x0,
1263 },
1264 },
1265 [ C(LL ) ] = {
1266 [ C(OP_READ) ] = {
1267 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1268 [ C(RESULT_ACCESS) ] = 0x01b7,
1269 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1270 [ C(RESULT_MISS) ] = 0x01b7,
1271 },
1272 /*
1273 * Use RFO, not WRITEBACK, because a write miss would typically occur
1274 * on RFO.
1275 */
1276 [ C(OP_WRITE) ] = {
1277 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1278 [ C(RESULT_ACCESS) ] = 0x01b7,
1279 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1280 [ C(RESULT_MISS) ] = 0x01b7,
1281 },
1282 [ C(OP_PREFETCH) ] = {
1283 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1284 [ C(RESULT_ACCESS) ] = 0x01b7,
1285 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1286 [ C(RESULT_MISS) ] = 0x01b7,
1287 },
1288 },
1289 [ C(DTLB) ] = {
1290 [ C(OP_READ) ] = {
1291 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1292 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1293 },
1294 [ C(OP_WRITE) ] = {
1295 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1296 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1297 },
1298 [ C(OP_PREFETCH) ] = {
1299 [ C(RESULT_ACCESS) ] = 0x0,
1300 [ C(RESULT_MISS) ] = 0x0,
1301 },
1302 },
1303 [ C(ITLB) ] = {
1304 [ C(OP_READ) ] = {
1305 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1306 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1307 },
1308 [ C(OP_WRITE) ] = {
1309 [ C(RESULT_ACCESS) ] = -1,
1310 [ C(RESULT_MISS) ] = -1,
1311 },
1312 [ C(OP_PREFETCH) ] = {
1313 [ C(RESULT_ACCESS) ] = -1,
1314 [ C(RESULT_MISS) ] = -1,
1315 },
1316 },
1317 [ C(BPU ) ] = {
1318 [ C(OP_READ) ] = {
1319 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1320 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1321 },
1322 [ C(OP_WRITE) ] = {
1323 [ C(RESULT_ACCESS) ] = -1,
1324 [ C(RESULT_MISS) ] = -1,
1325 },
1326 [ C(OP_PREFETCH) ] = {
1327 [ C(RESULT_ACCESS) ] = -1,
1328 [ C(RESULT_MISS) ] = -1,
1329 },
1330 },
1331 [ C(NODE) ] = {
1332 [ C(OP_READ) ] = {
1333 [ C(RESULT_ACCESS) ] = 0x01b7,
1334 [ C(RESULT_MISS) ] = 0x01b7,
1335 },
1336 [ C(OP_WRITE) ] = {
1337 [ C(RESULT_ACCESS) ] = 0x01b7,
1338 [ C(RESULT_MISS) ] = 0x01b7,
1339 },
1340 [ C(OP_PREFETCH) ] = {
1341 [ C(RESULT_ACCESS) ] = 0x01b7,
1342 [ C(RESULT_MISS) ] = 0x01b7,
1343 },
1344 },
1345 };
1346
1347 /*
1348 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1349 * See IA32 SDM Vol 3B 30.6.1.3
1350 */
1351
1352 #define NHM_DMND_DATA_RD (1 << 0)
1353 #define NHM_DMND_RFO (1 << 1)
1354 #define NHM_DMND_IFETCH (1 << 2)
1355 #define NHM_DMND_WB (1 << 3)
1356 #define NHM_PF_DATA_RD (1 << 4)
1357 #define NHM_PF_DATA_RFO (1 << 5)
1358 #define NHM_PF_IFETCH (1 << 6)
1359 #define NHM_OFFCORE_OTHER (1 << 7)
1360 #define NHM_UNCORE_HIT (1 << 8)
1361 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1362 #define NHM_OTHER_CORE_HITM (1 << 10)
1363 /* reserved */
1364 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1365 #define NHM_REMOTE_DRAM (1 << 13)
1366 #define NHM_LOCAL_DRAM (1 << 14)
1367 #define NHM_NON_DRAM (1 << 15)
1368
1369 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1370 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1371
1372 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1373 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1374 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1375
1376 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1377 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1378 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1379
1380 static __initconst const u64 nehalem_hw_cache_extra_regs
1381 [PERF_COUNT_HW_CACHE_MAX]
1382 [PERF_COUNT_HW_CACHE_OP_MAX]
1383 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1384 {
1385 [ C(LL ) ] = {
1386 [ C(OP_READ) ] = {
1387 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1388 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1389 },
1390 [ C(OP_WRITE) ] = {
1391 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1392 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1393 },
1394 [ C(OP_PREFETCH) ] = {
1395 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1396 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1397 },
1398 },
1399 [ C(NODE) ] = {
1400 [ C(OP_READ) ] = {
1401 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1402 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1403 },
1404 [ C(OP_WRITE) ] = {
1405 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1406 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1407 },
1408 [ C(OP_PREFETCH) ] = {
1409 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1410 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1411 },
1412 },
1413 };
1414
1415 static __initconst const u64 nehalem_hw_cache_event_ids
1416 [PERF_COUNT_HW_CACHE_MAX]
1417 [PERF_COUNT_HW_CACHE_OP_MAX]
1418 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1419 {
1420 [ C(L1D) ] = {
1421 [ C(OP_READ) ] = {
1422 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1423 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1424 },
1425 [ C(OP_WRITE) ] = {
1426 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1427 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1428 },
1429 [ C(OP_PREFETCH) ] = {
1430 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1431 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1432 },
1433 },
1434 [ C(L1I ) ] = {
1435 [ C(OP_READ) ] = {
1436 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1437 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1438 },
1439 [ C(OP_WRITE) ] = {
1440 [ C(RESULT_ACCESS) ] = -1,
1441 [ C(RESULT_MISS) ] = -1,
1442 },
1443 [ C(OP_PREFETCH) ] = {
1444 [ C(RESULT_ACCESS) ] = 0x0,
1445 [ C(RESULT_MISS) ] = 0x0,
1446 },
1447 },
1448 [ C(LL ) ] = {
1449 [ C(OP_READ) ] = {
1450 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1451 [ C(RESULT_ACCESS) ] = 0x01b7,
1452 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1453 [ C(RESULT_MISS) ] = 0x01b7,
1454 },
1455 /*
1456 * Use RFO, not WRITEBACK, because a write miss would typically occur
1457 * on RFO.
1458 */
1459 [ C(OP_WRITE) ] = {
1460 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1461 [ C(RESULT_ACCESS) ] = 0x01b7,
1462 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1463 [ C(RESULT_MISS) ] = 0x01b7,
1464 },
1465 [ C(OP_PREFETCH) ] = {
1466 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1467 [ C(RESULT_ACCESS) ] = 0x01b7,
1468 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1469 [ C(RESULT_MISS) ] = 0x01b7,
1470 },
1471 },
1472 [ C(DTLB) ] = {
1473 [ C(OP_READ) ] = {
1474 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1475 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1476 },
1477 [ C(OP_WRITE) ] = {
1478 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1479 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1480 },
1481 [ C(OP_PREFETCH) ] = {
1482 [ C(RESULT_ACCESS) ] = 0x0,
1483 [ C(RESULT_MISS) ] = 0x0,
1484 },
1485 },
1486 [ C(ITLB) ] = {
1487 [ C(OP_READ) ] = {
1488 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1489 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1490 },
1491 [ C(OP_WRITE) ] = {
1492 [ C(RESULT_ACCESS) ] = -1,
1493 [ C(RESULT_MISS) ] = -1,
1494 },
1495 [ C(OP_PREFETCH) ] = {
1496 [ C(RESULT_ACCESS) ] = -1,
1497 [ C(RESULT_MISS) ] = -1,
1498 },
1499 },
1500 [ C(BPU ) ] = {
1501 [ C(OP_READ) ] = {
1502 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1503 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1504 },
1505 [ C(OP_WRITE) ] = {
1506 [ C(RESULT_ACCESS) ] = -1,
1507 [ C(RESULT_MISS) ] = -1,
1508 },
1509 [ C(OP_PREFETCH) ] = {
1510 [ C(RESULT_ACCESS) ] = -1,
1511 [ C(RESULT_MISS) ] = -1,
1512 },
1513 },
1514 [ C(NODE) ] = {
1515 [ C(OP_READ) ] = {
1516 [ C(RESULT_ACCESS) ] = 0x01b7,
1517 [ C(RESULT_MISS) ] = 0x01b7,
1518 },
1519 [ C(OP_WRITE) ] = {
1520 [ C(RESULT_ACCESS) ] = 0x01b7,
1521 [ C(RESULT_MISS) ] = 0x01b7,
1522 },
1523 [ C(OP_PREFETCH) ] = {
1524 [ C(RESULT_ACCESS) ] = 0x01b7,
1525 [ C(RESULT_MISS) ] = 0x01b7,
1526 },
1527 },
1528 };
1529
1530 static __initconst const u64 core2_hw_cache_event_ids
1531 [PERF_COUNT_HW_CACHE_MAX]
1532 [PERF_COUNT_HW_CACHE_OP_MAX]
1533 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1534 {
1535 [ C(L1D) ] = {
1536 [ C(OP_READ) ] = {
1537 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1538 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1539 },
1540 [ C(OP_WRITE) ] = {
1541 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1542 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1543 },
1544 [ C(OP_PREFETCH) ] = {
1545 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1546 [ C(RESULT_MISS) ] = 0,
1547 },
1548 },
1549 [ C(L1I ) ] = {
1550 [ C(OP_READ) ] = {
1551 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1552 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1553 },
1554 [ C(OP_WRITE) ] = {
1555 [ C(RESULT_ACCESS) ] = -1,
1556 [ C(RESULT_MISS) ] = -1,
1557 },
1558 [ C(OP_PREFETCH) ] = {
1559 [ C(RESULT_ACCESS) ] = 0,
1560 [ C(RESULT_MISS) ] = 0,
1561 },
1562 },
1563 [ C(LL ) ] = {
1564 [ C(OP_READ) ] = {
1565 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1566 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1567 },
1568 [ C(OP_WRITE) ] = {
1569 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1570 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1571 },
1572 [ C(OP_PREFETCH) ] = {
1573 [ C(RESULT_ACCESS) ] = 0,
1574 [ C(RESULT_MISS) ] = 0,
1575 },
1576 },
1577 [ C(DTLB) ] = {
1578 [ C(OP_READ) ] = {
1579 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1580 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1581 },
1582 [ C(OP_WRITE) ] = {
1583 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1584 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1585 },
1586 [ C(OP_PREFETCH) ] = {
1587 [ C(RESULT_ACCESS) ] = 0,
1588 [ C(RESULT_MISS) ] = 0,
1589 },
1590 },
1591 [ C(ITLB) ] = {
1592 [ C(OP_READ) ] = {
1593 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1594 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1595 },
1596 [ C(OP_WRITE) ] = {
1597 [ C(RESULT_ACCESS) ] = -1,
1598 [ C(RESULT_MISS) ] = -1,
1599 },
1600 [ C(OP_PREFETCH) ] = {
1601 [ C(RESULT_ACCESS) ] = -1,
1602 [ C(RESULT_MISS) ] = -1,
1603 },
1604 },
1605 [ C(BPU ) ] = {
1606 [ C(OP_READ) ] = {
1607 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1608 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1609 },
1610 [ C(OP_WRITE) ] = {
1611 [ C(RESULT_ACCESS) ] = -1,
1612 [ C(RESULT_MISS) ] = -1,
1613 },
1614 [ C(OP_PREFETCH) ] = {
1615 [ C(RESULT_ACCESS) ] = -1,
1616 [ C(RESULT_MISS) ] = -1,
1617 },
1618 },
1619 };
1620
1621 static __initconst const u64 atom_hw_cache_event_ids
1622 [PERF_COUNT_HW_CACHE_MAX]
1623 [PERF_COUNT_HW_CACHE_OP_MAX]
1624 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1625 {
1626 [ C(L1D) ] = {
1627 [ C(OP_READ) ] = {
1628 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1629 [ C(RESULT_MISS) ] = 0,
1630 },
1631 [ C(OP_WRITE) ] = {
1632 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1633 [ C(RESULT_MISS) ] = 0,
1634 },
1635 [ C(OP_PREFETCH) ] = {
1636 [ C(RESULT_ACCESS) ] = 0x0,
1637 [ C(RESULT_MISS) ] = 0,
1638 },
1639 },
1640 [ C(L1I ) ] = {
1641 [ C(OP_READ) ] = {
1642 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1643 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1644 },
1645 [ C(OP_WRITE) ] = {
1646 [ C(RESULT_ACCESS) ] = -1,
1647 [ C(RESULT_MISS) ] = -1,
1648 },
1649 [ C(OP_PREFETCH) ] = {
1650 [ C(RESULT_ACCESS) ] = 0,
1651 [ C(RESULT_MISS) ] = 0,
1652 },
1653 },
1654 [ C(LL ) ] = {
1655 [ C(OP_READ) ] = {
1656 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1657 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1658 },
1659 [ C(OP_WRITE) ] = {
1660 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1661 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1662 },
1663 [ C(OP_PREFETCH) ] = {
1664 [ C(RESULT_ACCESS) ] = 0,
1665 [ C(RESULT_MISS) ] = 0,
1666 },
1667 },
1668 [ C(DTLB) ] = {
1669 [ C(OP_READ) ] = {
1670 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1671 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1672 },
1673 [ C(OP_WRITE) ] = {
1674 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1675 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1676 },
1677 [ C(OP_PREFETCH) ] = {
1678 [ C(RESULT_ACCESS) ] = 0,
1679 [ C(RESULT_MISS) ] = 0,
1680 },
1681 },
1682 [ C(ITLB) ] = {
1683 [ C(OP_READ) ] = {
1684 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1685 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1686 },
1687 [ C(OP_WRITE) ] = {
1688 [ C(RESULT_ACCESS) ] = -1,
1689 [ C(RESULT_MISS) ] = -1,
1690 },
1691 [ C(OP_PREFETCH) ] = {
1692 [ C(RESULT_ACCESS) ] = -1,
1693 [ C(RESULT_MISS) ] = -1,
1694 },
1695 },
1696 [ C(BPU ) ] = {
1697 [ C(OP_READ) ] = {
1698 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1699 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1700 },
1701 [ C(OP_WRITE) ] = {
1702 [ C(RESULT_ACCESS) ] = -1,
1703 [ C(RESULT_MISS) ] = -1,
1704 },
1705 [ C(OP_PREFETCH) ] = {
1706 [ C(RESULT_ACCESS) ] = -1,
1707 [ C(RESULT_MISS) ] = -1,
1708 },
1709 },
1710 };
1711
1712 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1713 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1714 /* no_alloc_cycles.not_delivered */
1715 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1716 "event=0xca,umask=0x50");
1717 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1718 /* uops_retired.all */
1719 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1720 "event=0xc2,umask=0x10");
1721 /* uops_retired.all */
1722 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1723 "event=0xc2,umask=0x10");
1724
1725 static struct attribute *slm_events_attrs[] = {
1726 EVENT_PTR(td_total_slots_slm),
1727 EVENT_PTR(td_total_slots_scale_slm),
1728 EVENT_PTR(td_fetch_bubbles_slm),
1729 EVENT_PTR(td_fetch_bubbles_scale_slm),
1730 EVENT_PTR(td_slots_issued_slm),
1731 EVENT_PTR(td_slots_retired_slm),
1732 NULL
1733 };
1734
1735 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1736 {
1737 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1738 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1739 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1740 EVENT_EXTRA_END
1741 };
1742
1743 #define SLM_DMND_READ SNB_DMND_DATA_RD
1744 #define SLM_DMND_WRITE SNB_DMND_RFO
1745 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1746
1747 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1748 #define SLM_LLC_ACCESS SNB_RESP_ANY
1749 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1750
1751 static __initconst const u64 slm_hw_cache_extra_regs
1752 [PERF_COUNT_HW_CACHE_MAX]
1753 [PERF_COUNT_HW_CACHE_OP_MAX]
1754 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1755 {
1756 [ C(LL ) ] = {
1757 [ C(OP_READ) ] = {
1758 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1759 [ C(RESULT_MISS) ] = 0,
1760 },
1761 [ C(OP_WRITE) ] = {
1762 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1763 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1764 },
1765 [ C(OP_PREFETCH) ] = {
1766 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1767 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1768 },
1769 },
1770 };
1771
1772 static __initconst const u64 slm_hw_cache_event_ids
1773 [PERF_COUNT_HW_CACHE_MAX]
1774 [PERF_COUNT_HW_CACHE_OP_MAX]
1775 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1776 {
1777 [ C(L1D) ] = {
1778 [ C(OP_READ) ] = {
1779 [ C(RESULT_ACCESS) ] = 0,
1780 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1781 },
1782 [ C(OP_WRITE) ] = {
1783 [ C(RESULT_ACCESS) ] = 0,
1784 [ C(RESULT_MISS) ] = 0,
1785 },
1786 [ C(OP_PREFETCH) ] = {
1787 [ C(RESULT_ACCESS) ] = 0,
1788 [ C(RESULT_MISS) ] = 0,
1789 },
1790 },
1791 [ C(L1I ) ] = {
1792 [ C(OP_READ) ] = {
1793 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1794 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1795 },
1796 [ C(OP_WRITE) ] = {
1797 [ C(RESULT_ACCESS) ] = -1,
1798 [ C(RESULT_MISS) ] = -1,
1799 },
1800 [ C(OP_PREFETCH) ] = {
1801 [ C(RESULT_ACCESS) ] = 0,
1802 [ C(RESULT_MISS) ] = 0,
1803 },
1804 },
1805 [ C(LL ) ] = {
1806 [ C(OP_READ) ] = {
1807 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1808 [ C(RESULT_ACCESS) ] = 0x01b7,
1809 [ C(RESULT_MISS) ] = 0,
1810 },
1811 [ C(OP_WRITE) ] = {
1812 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1813 [ C(RESULT_ACCESS) ] = 0x01b7,
1814 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1815 [ C(RESULT_MISS) ] = 0x01b7,
1816 },
1817 [ C(OP_PREFETCH) ] = {
1818 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1819 [ C(RESULT_ACCESS) ] = 0x01b7,
1820 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1821 [ C(RESULT_MISS) ] = 0x01b7,
1822 },
1823 },
1824 [ C(DTLB) ] = {
1825 [ C(OP_READ) ] = {
1826 [ C(RESULT_ACCESS) ] = 0,
1827 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1828 },
1829 [ C(OP_WRITE) ] = {
1830 [ C(RESULT_ACCESS) ] = 0,
1831 [ C(RESULT_MISS) ] = 0,
1832 },
1833 [ C(OP_PREFETCH) ] = {
1834 [ C(RESULT_ACCESS) ] = 0,
1835 [ C(RESULT_MISS) ] = 0,
1836 },
1837 },
1838 [ C(ITLB) ] = {
1839 [ C(OP_READ) ] = {
1840 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1841 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1842 },
1843 [ C(OP_WRITE) ] = {
1844 [ C(RESULT_ACCESS) ] = -1,
1845 [ C(RESULT_MISS) ] = -1,
1846 },
1847 [ C(OP_PREFETCH) ] = {
1848 [ C(RESULT_ACCESS) ] = -1,
1849 [ C(RESULT_MISS) ] = -1,
1850 },
1851 },
1852 [ C(BPU ) ] = {
1853 [ C(OP_READ) ] = {
1854 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1855 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1856 },
1857 [ C(OP_WRITE) ] = {
1858 [ C(RESULT_ACCESS) ] = -1,
1859 [ C(RESULT_MISS) ] = -1,
1860 },
1861 [ C(OP_PREFETCH) ] = {
1862 [ C(RESULT_ACCESS) ] = -1,
1863 [ C(RESULT_MISS) ] = -1,
1864 },
1865 },
1866 };
1867
1868 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1869 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1870 /* UOPS_NOT_DELIVERED.ANY */
1871 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1872 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1873 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1874 /* UOPS_RETIRED.ANY */
1875 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1876 /* UOPS_ISSUED.ANY */
1877 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1878
1879 static struct attribute *glm_events_attrs[] = {
1880 EVENT_PTR(td_total_slots_glm),
1881 EVENT_PTR(td_total_slots_scale_glm),
1882 EVENT_PTR(td_fetch_bubbles_glm),
1883 EVENT_PTR(td_recovery_bubbles_glm),
1884 EVENT_PTR(td_slots_issued_glm),
1885 EVENT_PTR(td_slots_retired_glm),
1886 NULL
1887 };
1888
1889 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1890 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1891 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1892 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1893 EVENT_EXTRA_END
1894 };
1895
1896 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1897 #define GLM_DEMAND_RFO BIT_ULL(1)
1898 #define GLM_ANY_RESPONSE BIT_ULL(16)
1899 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1900 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1901 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1902 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1903 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1904 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1905 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1906
1907 static __initconst const u64 glm_hw_cache_event_ids
1908 [PERF_COUNT_HW_CACHE_MAX]
1909 [PERF_COUNT_HW_CACHE_OP_MAX]
1910 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1911 [C(L1D)] = {
1912 [C(OP_READ)] = {
1913 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1914 [C(RESULT_MISS)] = 0x0,
1915 },
1916 [C(OP_WRITE)] = {
1917 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1918 [C(RESULT_MISS)] = 0x0,
1919 },
1920 [C(OP_PREFETCH)] = {
1921 [C(RESULT_ACCESS)] = 0x0,
1922 [C(RESULT_MISS)] = 0x0,
1923 },
1924 },
1925 [C(L1I)] = {
1926 [C(OP_READ)] = {
1927 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1928 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1929 },
1930 [C(OP_WRITE)] = {
1931 [C(RESULT_ACCESS)] = -1,
1932 [C(RESULT_MISS)] = -1,
1933 },
1934 [C(OP_PREFETCH)] = {
1935 [C(RESULT_ACCESS)] = 0x0,
1936 [C(RESULT_MISS)] = 0x0,
1937 },
1938 },
1939 [C(LL)] = {
1940 [C(OP_READ)] = {
1941 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1942 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1943 },
1944 [C(OP_WRITE)] = {
1945 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1946 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1947 },
1948 [C(OP_PREFETCH)] = {
1949 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1950 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1951 },
1952 },
1953 [C(DTLB)] = {
1954 [C(OP_READ)] = {
1955 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1956 [C(RESULT_MISS)] = 0x0,
1957 },
1958 [C(OP_WRITE)] = {
1959 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1960 [C(RESULT_MISS)] = 0x0,
1961 },
1962 [C(OP_PREFETCH)] = {
1963 [C(RESULT_ACCESS)] = 0x0,
1964 [C(RESULT_MISS)] = 0x0,
1965 },
1966 },
1967 [C(ITLB)] = {
1968 [C(OP_READ)] = {
1969 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1970 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1971 },
1972 [C(OP_WRITE)] = {
1973 [C(RESULT_ACCESS)] = -1,
1974 [C(RESULT_MISS)] = -1,
1975 },
1976 [C(OP_PREFETCH)] = {
1977 [C(RESULT_ACCESS)] = -1,
1978 [C(RESULT_MISS)] = -1,
1979 },
1980 },
1981 [C(BPU)] = {
1982 [C(OP_READ)] = {
1983 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1984 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1985 },
1986 [C(OP_WRITE)] = {
1987 [C(RESULT_ACCESS)] = -1,
1988 [C(RESULT_MISS)] = -1,
1989 },
1990 [C(OP_PREFETCH)] = {
1991 [C(RESULT_ACCESS)] = -1,
1992 [C(RESULT_MISS)] = -1,
1993 },
1994 },
1995 };
1996
1997 static __initconst const u64 glm_hw_cache_extra_regs
1998 [PERF_COUNT_HW_CACHE_MAX]
1999 [PERF_COUNT_HW_CACHE_OP_MAX]
2000 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2001 [C(LL)] = {
2002 [C(OP_READ)] = {
2003 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2004 GLM_LLC_ACCESS,
2005 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2006 GLM_LLC_MISS,
2007 },
2008 [C(OP_WRITE)] = {
2009 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2010 GLM_LLC_ACCESS,
2011 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2012 GLM_LLC_MISS,
2013 },
2014 [C(OP_PREFETCH)] = {
2015 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
2016 GLM_LLC_ACCESS,
2017 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
2018 GLM_LLC_MISS,
2019 },
2020 },
2021 };
2022
2023 static __initconst const u64 glp_hw_cache_event_ids
2024 [PERF_COUNT_HW_CACHE_MAX]
2025 [PERF_COUNT_HW_CACHE_OP_MAX]
2026 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2027 [C(L1D)] = {
2028 [C(OP_READ)] = {
2029 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
2030 [C(RESULT_MISS)] = 0x0,
2031 },
2032 [C(OP_WRITE)] = {
2033 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
2034 [C(RESULT_MISS)] = 0x0,
2035 },
2036 [C(OP_PREFETCH)] = {
2037 [C(RESULT_ACCESS)] = 0x0,
2038 [C(RESULT_MISS)] = 0x0,
2039 },
2040 },
2041 [C(L1I)] = {
2042 [C(OP_READ)] = {
2043 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
2044 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
2045 },
2046 [C(OP_WRITE)] = {
2047 [C(RESULT_ACCESS)] = -1,
2048 [C(RESULT_MISS)] = -1,
2049 },
2050 [C(OP_PREFETCH)] = {
2051 [C(RESULT_ACCESS)] = 0x0,
2052 [C(RESULT_MISS)] = 0x0,
2053 },
2054 },
2055 [C(LL)] = {
2056 [C(OP_READ)] = {
2057 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
2058 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
2059 },
2060 [C(OP_WRITE)] = {
2061 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
2062 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
2063 },
2064 [C(OP_PREFETCH)] = {
2065 [C(RESULT_ACCESS)] = 0x0,
2066 [C(RESULT_MISS)] = 0x0,
2067 },
2068 },
2069 [C(DTLB)] = {
2070 [C(OP_READ)] = {
2071 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
2072 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
2073 },
2074 [C(OP_WRITE)] = {
2075 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
2076 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
2077 },
2078 [C(OP_PREFETCH)] = {
2079 [C(RESULT_ACCESS)] = 0x0,
2080 [C(RESULT_MISS)] = 0x0,
2081 },
2082 },
2083 [C(ITLB)] = {
2084 [C(OP_READ)] = {
2085 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
2086 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
2087 },
2088 [C(OP_WRITE)] = {
2089 [C(RESULT_ACCESS)] = -1,
2090 [C(RESULT_MISS)] = -1,
2091 },
2092 [C(OP_PREFETCH)] = {
2093 [C(RESULT_ACCESS)] = -1,
2094 [C(RESULT_MISS)] = -1,
2095 },
2096 },
2097 [C(BPU)] = {
2098 [C(OP_READ)] = {
2099 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
2100 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
2101 },
2102 [C(OP_WRITE)] = {
2103 [C(RESULT_ACCESS)] = -1,
2104 [C(RESULT_MISS)] = -1,
2105 },
2106 [C(OP_PREFETCH)] = {
2107 [C(RESULT_ACCESS)] = -1,
2108 [C(RESULT_MISS)] = -1,
2109 },
2110 },
2111 };
2112
2113 static __initconst const u64 glp_hw_cache_extra_regs
2114 [PERF_COUNT_HW_CACHE_MAX]
2115 [PERF_COUNT_HW_CACHE_OP_MAX]
2116 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2117 [C(LL)] = {
2118 [C(OP_READ)] = {
2119 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2120 GLM_LLC_ACCESS,
2121 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2122 GLM_LLC_MISS,
2123 },
2124 [C(OP_WRITE)] = {
2125 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2126 GLM_LLC_ACCESS,
2127 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2128 GLM_LLC_MISS,
2129 },
2130 [C(OP_PREFETCH)] = {
2131 [C(RESULT_ACCESS)] = 0x0,
2132 [C(RESULT_MISS)] = 0x0,
2133 },
2134 },
2135 };
2136
2137 #define TNT_LOCAL_DRAM BIT_ULL(26)
2138 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
2139 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO
2140 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE
2141 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2142 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2143 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2144
2145 static __initconst const u64 tnt_hw_cache_extra_regs
2146 [PERF_COUNT_HW_CACHE_MAX]
2147 [PERF_COUNT_HW_CACHE_OP_MAX]
2148 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2149 [C(LL)] = {
2150 [C(OP_READ)] = {
2151 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
2152 TNT_LLC_ACCESS,
2153 [C(RESULT_MISS)] = TNT_DEMAND_READ|
2154 TNT_LLC_MISS,
2155 },
2156 [C(OP_WRITE)] = {
2157 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
2158 TNT_LLC_ACCESS,
2159 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
2160 TNT_LLC_MISS,
2161 },
2162 [C(OP_PREFETCH)] = {
2163 [C(RESULT_ACCESS)] = 0x0,
2164 [C(RESULT_MISS)] = 0x0,
2165 },
2166 },
2167 };
2168
2169 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
2170 EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
2171 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
2172 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0");
2173
2174 static struct attribute *tnt_events_attrs[] = {
2175 EVENT_PTR(td_fe_bound_tnt),
2176 EVENT_PTR(td_retiring_tnt),
2177 EVENT_PTR(td_bad_spec_tnt),
2178 EVENT_PTR(td_be_bound_tnt),
2179 NULL,
2180 };
2181
2182 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2183 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2184 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2185 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2186 EVENT_EXTRA_END
2187 };
2188
2189 EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3");
2190 EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6");
2191
2192 static struct attribute *grt_mem_attrs[] = {
2193 EVENT_PTR(mem_ld_grt),
2194 EVENT_PTR(mem_st_grt),
2195 NULL
2196 };
2197
2198 static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2199 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2200 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2201 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2202 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2203 EVENT_EXTRA_END
2204 };
2205
2206 EVENT_ATTR_STR(topdown-retiring, td_retiring_cmt, "event=0x72,umask=0x0");
2207 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_cmt, "event=0x73,umask=0x0");
2208
2209 static struct attribute *cmt_events_attrs[] = {
2210 EVENT_PTR(td_fe_bound_tnt),
2211 EVENT_PTR(td_retiring_cmt),
2212 EVENT_PTR(td_bad_spec_cmt),
2213 EVENT_PTR(td_be_bound_tnt),
2214 NULL
2215 };
2216
2217 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
2218 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2219 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0),
2220 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1),
2221 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2222 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
2223 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
2224 EVENT_EXTRA_END
2225 };
2226
2227 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
2228 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
2229 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
2230 #define KNL_MCDRAM_FAR BIT_ULL(22)
2231 #define KNL_DDR_LOCAL BIT_ULL(23)
2232 #define KNL_DDR_FAR BIT_ULL(24)
2233 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2234 KNL_DDR_LOCAL | KNL_DDR_FAR)
2235 #define KNL_L2_READ SLM_DMND_READ
2236 #define KNL_L2_WRITE SLM_DMND_WRITE
2237 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
2238 #define KNL_L2_ACCESS SLM_LLC_ACCESS
2239 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2240 KNL_DRAM_ANY | SNB_SNP_ANY | \
2241 SNB_NON_DRAM)
2242
2243 static __initconst const u64 knl_hw_cache_extra_regs
2244 [PERF_COUNT_HW_CACHE_MAX]
2245 [PERF_COUNT_HW_CACHE_OP_MAX]
2246 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2247 [C(LL)] = {
2248 [C(OP_READ)] = {
2249 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2250 [C(RESULT_MISS)] = 0,
2251 },
2252 [C(OP_WRITE)] = {
2253 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2254 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
2255 },
2256 [C(OP_PREFETCH)] = {
2257 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2258 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
2259 },
2260 },
2261 };
2262
2263 /*
2264 * Used from PMIs where the LBRs are already disabled.
2265 *
2266 * This function could be called consecutively. It is required to remain in
2267 * disabled state if called consecutively.
2268 *
2269 * During consecutive calls, the same disable value will be written to related
2270 * registers, so the PMU state remains unchanged.
2271 *
2272 * intel_bts events don't coexist with intel PMU's BTS events because of
2273 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2274 * disabled around intel PMU's event batching etc, only inside the PMI handler.
2275 *
2276 * Avoid PEBS_ENABLE MSR access in PMIs.
2277 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2278 * It doesn't matter if the PEBS is enabled or not.
2279 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2280 * access PEBS_ENABLE MSR in disable_all()/enable_all().
2281 * However, there are some cases which may change PEBS status, e.g. PMI
2282 * throttle. The PEBS_ENABLE should be updated where the status changes.
2283 */
__intel_pmu_disable_all(bool bts)2284 static __always_inline void __intel_pmu_disable_all(bool bts)
2285 {
2286 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2287
2288 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2289
2290 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2291 intel_pmu_disable_bts();
2292 }
2293
intel_pmu_disable_all(void)2294 static __always_inline void intel_pmu_disable_all(void)
2295 {
2296 __intel_pmu_disable_all(true);
2297 intel_pmu_pebs_disable_all();
2298 intel_pmu_lbr_disable_all();
2299 }
2300
__intel_pmu_enable_all(int added,bool pmi)2301 static void __intel_pmu_enable_all(int added, bool pmi)
2302 {
2303 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2304 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2305
2306 intel_pmu_lbr_enable_all(pmi);
2307
2308 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) {
2309 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val);
2310 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val;
2311 }
2312
2313 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2314 intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2315
2316 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2317 struct perf_event *event =
2318 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2319
2320 if (WARN_ON_ONCE(!event))
2321 return;
2322
2323 intel_pmu_enable_bts(event->hw.config);
2324 }
2325 }
2326
intel_pmu_enable_all(int added)2327 static void intel_pmu_enable_all(int added)
2328 {
2329 intel_pmu_pebs_enable_all();
2330 __intel_pmu_enable_all(added, false);
2331 }
2332
2333 static noinline int
__intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt,unsigned long flags)2334 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
2335 unsigned int cnt, unsigned long flags)
2336 {
2337 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2338
2339 intel_pmu_lbr_read();
2340 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
2341
2342 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
2343 intel_pmu_enable_all(0);
2344 local_irq_restore(flags);
2345 return cnt;
2346 }
2347
2348 static int
intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2349 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2350 {
2351 unsigned long flags;
2352
2353 /* must not have branches... */
2354 local_irq_save(flags);
2355 __intel_pmu_disable_all(false); /* we don't care about BTS */
2356 __intel_pmu_lbr_disable();
2357 /* ... until here */
2358 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2359 }
2360
2361 static int
intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2362 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2363 {
2364 unsigned long flags;
2365
2366 /* must not have branches... */
2367 local_irq_save(flags);
2368 __intel_pmu_disable_all(false); /* we don't care about BTS */
2369 __intel_pmu_arch_lbr_disable();
2370 /* ... until here */
2371 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2372 }
2373
2374 /*
2375 * Workaround for:
2376 * Intel Errata AAK100 (model 26)
2377 * Intel Errata AAP53 (model 30)
2378 * Intel Errata BD53 (model 44)
2379 *
2380 * The official story:
2381 * These chips need to be 'reset' when adding counters by programming the
2382 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2383 * in sequence on the same PMC or on different PMCs.
2384 *
2385 * In practice it appears some of these events do in fact count, and
2386 * we need to program all 4 events.
2387 */
intel_pmu_nhm_workaround(void)2388 static void intel_pmu_nhm_workaround(void)
2389 {
2390 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2391 static const unsigned long nhm_magic[4] = {
2392 0x4300B5,
2393 0x4300D2,
2394 0x4300B1,
2395 0x4300B1
2396 };
2397 struct perf_event *event;
2398 int i;
2399
2400 /*
2401 * The Errata requires below steps:
2402 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2403 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2404 * the corresponding PMCx;
2405 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2406 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2407 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2408 */
2409
2410 /*
2411 * The real steps we choose are a little different from above.
2412 * A) To reduce MSR operations, we don't run step 1) as they
2413 * are already cleared before this function is called;
2414 * B) Call x86_perf_event_update to save PMCx before configuring
2415 * PERFEVTSELx with magic number;
2416 * C) With step 5), we do clear only when the PERFEVTSELx is
2417 * not used currently.
2418 * D) Call x86_perf_event_set_period to restore PMCx;
2419 */
2420
2421 /* We always operate 4 pairs of PERF Counters */
2422 for (i = 0; i < 4; i++) {
2423 event = cpuc->events[i];
2424 if (event)
2425 static_call(x86_pmu_update)(event);
2426 }
2427
2428 for (i = 0; i < 4; i++) {
2429 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2430 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2431 }
2432
2433 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2434 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2435
2436 for (i = 0; i < 4; i++) {
2437 event = cpuc->events[i];
2438
2439 if (event) {
2440 static_call(x86_pmu_set_period)(event);
2441 __x86_pmu_enable_event(&event->hw,
2442 ARCH_PERFMON_EVENTSEL_ENABLE);
2443 } else
2444 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2445 }
2446 }
2447
intel_pmu_nhm_enable_all(int added)2448 static void intel_pmu_nhm_enable_all(int added)
2449 {
2450 if (added)
2451 intel_pmu_nhm_workaround();
2452 intel_pmu_enable_all(added);
2453 }
2454
intel_set_tfa(struct cpu_hw_events * cpuc,bool on)2455 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2456 {
2457 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2458
2459 if (cpuc->tfa_shadow != val) {
2460 cpuc->tfa_shadow = val;
2461 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2462 }
2463 }
2464
intel_tfa_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)2465 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2466 {
2467 /*
2468 * We're going to use PMC3, make sure TFA is set before we touch it.
2469 */
2470 if (cntr == 3)
2471 intel_set_tfa(cpuc, true);
2472 }
2473
intel_tfa_pmu_enable_all(int added)2474 static void intel_tfa_pmu_enable_all(int added)
2475 {
2476 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2477
2478 /*
2479 * If we find PMC3 is no longer used when we enable the PMU, we can
2480 * clear TFA.
2481 */
2482 if (!test_bit(3, cpuc->active_mask))
2483 intel_set_tfa(cpuc, false);
2484
2485 intel_pmu_enable_all(added);
2486 }
2487
intel_pmu_get_status(void)2488 static inline u64 intel_pmu_get_status(void)
2489 {
2490 u64 status;
2491
2492 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2493
2494 return status;
2495 }
2496
intel_pmu_ack_status(u64 ack)2497 static inline void intel_pmu_ack_status(u64 ack)
2498 {
2499 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2500 }
2501
event_is_checkpointed(struct perf_event * event)2502 static inline bool event_is_checkpointed(struct perf_event *event)
2503 {
2504 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2505 }
2506
intel_set_masks(struct perf_event * event,int idx)2507 static inline void intel_set_masks(struct perf_event *event, int idx)
2508 {
2509 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2510
2511 if (event->attr.exclude_host)
2512 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2513 if (event->attr.exclude_guest)
2514 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2515 if (event_is_checkpointed(event))
2516 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2517 }
2518
intel_clear_masks(struct perf_event * event,int idx)2519 static inline void intel_clear_masks(struct perf_event *event, int idx)
2520 {
2521 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2522
2523 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2524 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2525 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2526 }
2527
intel_pmu_disable_fixed(struct perf_event * event)2528 static void intel_pmu_disable_fixed(struct perf_event *event)
2529 {
2530 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2531 struct hw_perf_event *hwc = &event->hw;
2532 int idx = hwc->idx;
2533 u64 mask;
2534
2535 if (is_topdown_idx(idx)) {
2536 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2537
2538 /*
2539 * When there are other active TopDown events,
2540 * don't disable the fixed counter 3.
2541 */
2542 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2543 return;
2544 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2545 }
2546
2547 intel_clear_masks(event, idx);
2548
2549 mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
2550 cpuc->fixed_ctrl_val &= ~mask;
2551 }
2552
intel_pmu_disable_event(struct perf_event * event)2553 static void intel_pmu_disable_event(struct perf_event *event)
2554 {
2555 struct hw_perf_event *hwc = &event->hw;
2556 int idx = hwc->idx;
2557
2558 switch (idx) {
2559 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2560 intel_clear_masks(event, idx);
2561 x86_pmu_disable_event(event);
2562 break;
2563 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2564 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2565 intel_pmu_disable_fixed(event);
2566 break;
2567 case INTEL_PMC_IDX_FIXED_BTS:
2568 intel_pmu_disable_bts();
2569 intel_pmu_drain_bts_buffer();
2570 return;
2571 case INTEL_PMC_IDX_FIXED_VLBR:
2572 intel_clear_masks(event, idx);
2573 break;
2574 default:
2575 intel_clear_masks(event, idx);
2576 pr_warn("Failed to disable the event with invalid index %d\n",
2577 idx);
2578 return;
2579 }
2580
2581 /*
2582 * Needs to be called after x86_pmu_disable_event,
2583 * so we don't trigger the event without PEBS bit set.
2584 */
2585 if (unlikely(event->attr.precise_ip))
2586 intel_pmu_pebs_disable(event);
2587 }
2588
intel_pmu_assign_event(struct perf_event * event,int idx)2589 static void intel_pmu_assign_event(struct perf_event *event, int idx)
2590 {
2591 if (is_pebs_pt(event))
2592 perf_report_aux_output_id(event, idx);
2593 }
2594
intel_pmu_needs_branch_stack(struct perf_event * event)2595 static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event)
2596 {
2597 return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK;
2598 }
2599
intel_pmu_del_event(struct perf_event * event)2600 static void intel_pmu_del_event(struct perf_event *event)
2601 {
2602 if (intel_pmu_needs_branch_stack(event))
2603 intel_pmu_lbr_del(event);
2604 if (event->attr.precise_ip)
2605 intel_pmu_pebs_del(event);
2606 }
2607
icl_set_topdown_event_period(struct perf_event * event)2608 static int icl_set_topdown_event_period(struct perf_event *event)
2609 {
2610 struct hw_perf_event *hwc = &event->hw;
2611 s64 left = local64_read(&hwc->period_left);
2612
2613 /*
2614 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2615 * Software should start both registers, PERF_METRICS and fixed
2616 * counter 3, from zero.
2617 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2618 * After that, both MSRs will be cleared for each read.
2619 * Don't need to clear them again.
2620 */
2621 if (left == x86_pmu.max_period) {
2622 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2623 wrmsrl(MSR_PERF_METRICS, 0);
2624 hwc->saved_slots = 0;
2625 hwc->saved_metric = 0;
2626 }
2627
2628 if ((hwc->saved_slots) && is_slots_event(event)) {
2629 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2630 wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
2631 }
2632
2633 perf_event_update_userpage(event);
2634
2635 return 0;
2636 }
2637
2638 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period);
2639
icl_get_metrics_event_value(u64 metric,u64 slots,int idx)2640 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2641 {
2642 u32 val;
2643
2644 /*
2645 * The metric is reported as an 8bit integer fraction
2646 * summing up to 0xff.
2647 * slots-in-metric = (Metric / 0xff) * slots
2648 */
2649 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2650 return mul_u64_u32_div(slots, val, 0xff);
2651 }
2652
icl_get_topdown_value(struct perf_event * event,u64 slots,u64 metrics)2653 static u64 icl_get_topdown_value(struct perf_event *event,
2654 u64 slots, u64 metrics)
2655 {
2656 int idx = event->hw.idx;
2657 u64 delta;
2658
2659 if (is_metric_idx(idx))
2660 delta = icl_get_metrics_event_value(metrics, slots, idx);
2661 else
2662 delta = slots;
2663
2664 return delta;
2665 }
2666
__icl_update_topdown_event(struct perf_event * event,u64 slots,u64 metrics,u64 last_slots,u64 last_metrics)2667 static void __icl_update_topdown_event(struct perf_event *event,
2668 u64 slots, u64 metrics,
2669 u64 last_slots, u64 last_metrics)
2670 {
2671 u64 delta, last = 0;
2672
2673 delta = icl_get_topdown_value(event, slots, metrics);
2674 if (last_slots)
2675 last = icl_get_topdown_value(event, last_slots, last_metrics);
2676
2677 /*
2678 * The 8bit integer fraction of metric may be not accurate,
2679 * especially when the changes is very small.
2680 * For example, if only a few bad_spec happens, the fraction
2681 * may be reduced from 1 to 0. If so, the bad_spec event value
2682 * will be 0 which is definitely less than the last value.
2683 * Avoid update event->count for this case.
2684 */
2685 if (delta > last) {
2686 delta -= last;
2687 local64_add(delta, &event->count);
2688 }
2689 }
2690
update_saved_topdown_regs(struct perf_event * event,u64 slots,u64 metrics,int metric_end)2691 static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2692 u64 metrics, int metric_end)
2693 {
2694 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2695 struct perf_event *other;
2696 int idx;
2697
2698 event->hw.saved_slots = slots;
2699 event->hw.saved_metric = metrics;
2700
2701 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2702 if (!is_topdown_idx(idx))
2703 continue;
2704 other = cpuc->events[idx];
2705 other->hw.saved_slots = slots;
2706 other->hw.saved_metric = metrics;
2707 }
2708 }
2709
2710 /*
2711 * Update all active Topdown events.
2712 *
2713 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2714 * modify by a NMI. PMU has to be disabled before calling this function.
2715 */
2716
intel_update_topdown_event(struct perf_event * event,int metric_end,u64 * val)2717 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end, u64 *val)
2718 {
2719 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2720 struct perf_event *other;
2721 u64 slots, metrics;
2722 bool reset = true;
2723 int idx;
2724
2725 if (!val) {
2726 /* read Fixed counter 3 */
2727 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2728 if (!slots)
2729 return 0;
2730
2731 /* read PERF_METRICS */
2732 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2733 } else {
2734 slots = val[0];
2735 metrics = val[1];
2736 /*
2737 * Don't reset the PERF_METRICS and Fixed counter 3
2738 * for each PEBS record read. Utilize the RDPMC metrics
2739 * clear mode.
2740 */
2741 reset = false;
2742 }
2743
2744 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2745 if (!is_topdown_idx(idx))
2746 continue;
2747 other = cpuc->events[idx];
2748 __icl_update_topdown_event(other, slots, metrics,
2749 event ? event->hw.saved_slots : 0,
2750 event ? event->hw.saved_metric : 0);
2751 }
2752
2753 /*
2754 * Check and update this event, which may have been cleared
2755 * in active_mask e.g. x86_pmu_stop()
2756 */
2757 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2758 __icl_update_topdown_event(event, slots, metrics,
2759 event->hw.saved_slots,
2760 event->hw.saved_metric);
2761
2762 /*
2763 * In x86_pmu_stop(), the event is cleared in active_mask first,
2764 * then drain the delta, which indicates context switch for
2765 * counting.
2766 * Save metric and slots for context switch.
2767 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2768 * Because the values will be restored in next schedule in.
2769 */
2770 update_saved_topdown_regs(event, slots, metrics, metric_end);
2771 reset = false;
2772 }
2773
2774 if (reset) {
2775 /* The fixed counter 3 has to be written before the PERF_METRICS. */
2776 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2777 wrmsrl(MSR_PERF_METRICS, 0);
2778 if (event)
2779 update_saved_topdown_regs(event, 0, 0, metric_end);
2780 }
2781
2782 return slots;
2783 }
2784
icl_update_topdown_event(struct perf_event * event,u64 * val)2785 static u64 icl_update_topdown_event(struct perf_event *event, u64 *val)
2786 {
2787 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2788 x86_pmu.num_topdown_events - 1,
2789 val);
2790 }
2791
2792 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, intel_pmu_topdown_event_update);
2793
intel_pmu_read_event(struct perf_event * event)2794 static void intel_pmu_read_event(struct perf_event *event)
2795 {
2796 if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN) ||
2797 is_pebs_counter_event_group(event)) {
2798 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2799 bool pmu_enabled = cpuc->enabled;
2800
2801 /* Only need to call update_topdown_event() once for group read. */
2802 if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
2803 return;
2804
2805 cpuc->enabled = 0;
2806 if (pmu_enabled)
2807 intel_pmu_disable_all();
2808
2809 /*
2810 * If the PEBS counters snapshotting is enabled,
2811 * the topdown event is available in PEBS records.
2812 */
2813 if (is_topdown_event(event) && !is_pebs_counter_event_group(event))
2814 static_call(intel_pmu_update_topdown_event)(event, NULL);
2815 else
2816 intel_pmu_drain_pebs_buffer();
2817
2818 cpuc->enabled = pmu_enabled;
2819 if (pmu_enabled)
2820 intel_pmu_enable_all(0);
2821
2822 return;
2823 }
2824
2825 x86_perf_event_update(event);
2826 }
2827
intel_pmu_enable_fixed(struct perf_event * event)2828 static void intel_pmu_enable_fixed(struct perf_event *event)
2829 {
2830 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2831 struct hw_perf_event *hwc = &event->hw;
2832 u64 mask, bits = 0;
2833 int idx = hwc->idx;
2834
2835 if (is_topdown_idx(idx)) {
2836 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2837 /*
2838 * When there are other active TopDown events,
2839 * don't enable the fixed counter 3 again.
2840 */
2841 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2842 return;
2843
2844 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2845
2846 if (event->attr.config1 & INTEL_TD_CFG_METRIC_CLEAR)
2847 bits |= INTEL_FIXED_3_METRICS_CLEAR;
2848 }
2849
2850 intel_set_masks(event, idx);
2851
2852 /*
2853 * Enable IRQ generation (0x8), if not PEBS,
2854 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2855 * if requested:
2856 */
2857 if (!event->attr.precise_ip)
2858 bits |= INTEL_FIXED_0_ENABLE_PMI;
2859 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2860 bits |= INTEL_FIXED_0_USER;
2861 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2862 bits |= INTEL_FIXED_0_KERNEL;
2863
2864 /*
2865 * ANY bit is supported in v3 and up
2866 */
2867 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2868 bits |= INTEL_FIXED_0_ANYTHREAD;
2869
2870 idx -= INTEL_PMC_IDX_FIXED;
2871 bits = intel_fixed_bits_by_idx(idx, bits);
2872 mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
2873
2874 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2875 bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2876 mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2877 }
2878
2879 cpuc->fixed_ctrl_val &= ~mask;
2880 cpuc->fixed_ctrl_val |= bits;
2881 }
2882
intel_pmu_enable_event(struct perf_event * event)2883 static void intel_pmu_enable_event(struct perf_event *event)
2884 {
2885 u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE;
2886 struct hw_perf_event *hwc = &event->hw;
2887 int idx = hwc->idx;
2888
2889 if (unlikely(event->attr.precise_ip))
2890 intel_pmu_pebs_enable(event);
2891
2892 switch (idx) {
2893 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2894 if (branch_sample_counters(event))
2895 enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR;
2896 intel_set_masks(event, idx);
2897 __x86_pmu_enable_event(hwc, enable_mask);
2898 break;
2899 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2900 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2901 intel_pmu_enable_fixed(event);
2902 break;
2903 case INTEL_PMC_IDX_FIXED_BTS:
2904 if (!__this_cpu_read(cpu_hw_events.enabled))
2905 return;
2906 intel_pmu_enable_bts(hwc->config);
2907 break;
2908 case INTEL_PMC_IDX_FIXED_VLBR:
2909 intel_set_masks(event, idx);
2910 break;
2911 default:
2912 pr_warn("Failed to enable the event with invalid index %d\n",
2913 idx);
2914 }
2915 }
2916
intel_pmu_add_event(struct perf_event * event)2917 static void intel_pmu_add_event(struct perf_event *event)
2918 {
2919 if (event->attr.precise_ip)
2920 intel_pmu_pebs_add(event);
2921 if (intel_pmu_needs_branch_stack(event))
2922 intel_pmu_lbr_add(event);
2923 }
2924
2925 /*
2926 * Save and restart an expired event. Called by NMI contexts,
2927 * so it has to be careful about preempting normal event ops:
2928 */
intel_pmu_save_and_restart(struct perf_event * event)2929 int intel_pmu_save_and_restart(struct perf_event *event)
2930 {
2931 static_call(x86_pmu_update)(event);
2932 /*
2933 * For a checkpointed counter always reset back to 0. This
2934 * avoids a situation where the counter overflows, aborts the
2935 * transaction and is then set back to shortly before the
2936 * overflow, and overflows and aborts again.
2937 */
2938 if (unlikely(event_is_checkpointed(event))) {
2939 /* No race with NMIs because the counter should not be armed */
2940 wrmsrl(event->hw.event_base, 0);
2941 local64_set(&event->hw.prev_count, 0);
2942 }
2943 return static_call(x86_pmu_set_period)(event);
2944 }
2945
intel_pmu_set_period(struct perf_event * event)2946 static int intel_pmu_set_period(struct perf_event *event)
2947 {
2948 if (unlikely(is_topdown_count(event)))
2949 return static_call(intel_pmu_set_topdown_event_period)(event);
2950
2951 return x86_perf_event_set_period(event);
2952 }
2953
intel_pmu_update(struct perf_event * event)2954 static u64 intel_pmu_update(struct perf_event *event)
2955 {
2956 if (unlikely(is_topdown_count(event)))
2957 return static_call(intel_pmu_update_topdown_event)(event, NULL);
2958
2959 return x86_perf_event_update(event);
2960 }
2961
intel_pmu_reset(void)2962 static void intel_pmu_reset(void)
2963 {
2964 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2965 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2966 unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
2967 unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
2968 unsigned long flags;
2969 int idx;
2970
2971 if (!*(u64 *)cntr_mask)
2972 return;
2973
2974 local_irq_save(flags);
2975
2976 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2977
2978 for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) {
2979 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2980 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2981 }
2982 for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
2983 if (fixed_counter_disabled(idx, cpuc->pmu))
2984 continue;
2985 wrmsrl_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
2986 }
2987
2988 if (ds)
2989 ds->bts_index = ds->bts_buffer_base;
2990
2991 /* Ack all overflows and disable fixed counters */
2992 if (x86_pmu.version >= 2) {
2993 intel_pmu_ack_status(intel_pmu_get_status());
2994 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2995 }
2996
2997 /* Reset LBRs and LBR freezing */
2998 if (x86_pmu.lbr_nr) {
2999 update_debugctlmsr(get_debugctlmsr() &
3000 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
3001 }
3002
3003 local_irq_restore(flags);
3004 }
3005
3006 /*
3007 * We may be running with guest PEBS events created by KVM, and the
3008 * PEBS records are logged into the guest's DS and invisible to host.
3009 *
3010 * In the case of guest PEBS overflow, we only trigger a fake event
3011 * to emulate the PEBS overflow PMI for guest PEBS counters in KVM.
3012 * The guest will then vm-entry and check the guest DS area to read
3013 * the guest PEBS records.
3014 *
3015 * The contents and other behavior of the guest event do not matter.
3016 */
x86_pmu_handle_guest_pebs(struct pt_regs * regs,struct perf_sample_data * data)3017 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
3018 struct perf_sample_data *data)
3019 {
3020 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3021 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
3022 struct perf_event *event = NULL;
3023 int bit;
3024
3025 if (!unlikely(perf_guest_state()))
3026 return;
3027
3028 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active ||
3029 !guest_pebs_idxs)
3030 return;
3031
3032 for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, X86_PMC_IDX_MAX) {
3033 event = cpuc->events[bit];
3034 if (!event->attr.precise_ip)
3035 continue;
3036
3037 perf_sample_data_init(data, 0, event->hw.last_period);
3038 if (perf_event_overflow(event, data, regs))
3039 x86_pmu_stop(event, 0);
3040
3041 /* Inject one fake event is enough. */
3042 break;
3043 }
3044 }
3045
handle_pmi_common(struct pt_regs * regs,u64 status)3046 static int handle_pmi_common(struct pt_regs *regs, u64 status)
3047 {
3048 struct perf_sample_data data;
3049 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3050 int bit;
3051 int handled = 0;
3052
3053 inc_irq_stat(apic_perf_irqs);
3054
3055 /*
3056 * Ignore a range of extra bits in status that do not indicate
3057 * overflow by themselves.
3058 */
3059 status &= ~(GLOBAL_STATUS_COND_CHG |
3060 GLOBAL_STATUS_ASIF |
3061 GLOBAL_STATUS_LBRS_FROZEN);
3062 if (!status)
3063 return 0;
3064 /*
3065 * In case multiple PEBS events are sampled at the same time,
3066 * it is possible to have GLOBAL_STATUS bit 62 set indicating
3067 * PEBS buffer overflow and also seeing at most 3 PEBS counters
3068 * having their bits set in the status register. This is a sign
3069 * that there was at least one PEBS record pending at the time
3070 * of the PMU interrupt. PEBS counters must only be processed
3071 * via the drain_pebs() calls and not via the regular sample
3072 * processing loop coming after that the function, otherwise
3073 * phony regular samples may be generated in the sampling buffer
3074 * not marked with the EXACT tag. Another possibility is to have
3075 * one PEBS event and at least one non-PEBS event which overflows
3076 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
3077 * not be set, yet the overflow status bit for the PEBS counter will
3078 * be on Skylake.
3079 *
3080 * To avoid this problem, we systematically ignore the PEBS-enabled
3081 * counters from the GLOBAL_STATUS mask and we always process PEBS
3082 * events via drain_pebs().
3083 */
3084 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable);
3085
3086 /*
3087 * PEBS overflow sets bit 62 in the global status register
3088 */
3089 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
3090 u64 pebs_enabled = cpuc->pebs_enabled;
3091
3092 handled++;
3093 x86_pmu_handle_guest_pebs(regs, &data);
3094 static_call(x86_pmu_drain_pebs)(regs, &data);
3095
3096 /*
3097 * PMI throttle may be triggered, which stops the PEBS event.
3098 * Although cpuc->pebs_enabled is updated accordingly, the
3099 * MSR_IA32_PEBS_ENABLE is not updated. Because the
3100 * cpuc->enabled has been forced to 0 in PMI.
3101 * Update the MSR if pebs_enabled is changed.
3102 */
3103 if (pebs_enabled != cpuc->pebs_enabled)
3104 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
3105
3106 /*
3107 * Above PEBS handler (PEBS counters snapshotting) has updated fixed
3108 * counter 3 and perf metrics counts if they are in counter group,
3109 * unnecessary to update again.
3110 */
3111 if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] &&
3112 is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS]))
3113 status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT;
3114 }
3115
3116 /*
3117 * Intel PT
3118 */
3119 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
3120 handled++;
3121 if (!perf_guest_handle_intel_pt_intr())
3122 intel_pt_interrupt();
3123 }
3124
3125 /*
3126 * Intel Perf metrics
3127 */
3128 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
3129 handled++;
3130 static_call(intel_pmu_update_topdown_event)(NULL, NULL);
3131 }
3132
3133 status &= hybrid(cpuc->pmu, intel_ctrl);
3134
3135 /*
3136 * Checkpointed counters can lead to 'spurious' PMIs because the
3137 * rollback caused by the PMI will have cleared the overflow status
3138 * bit. Therefore always force probe these counters.
3139 */
3140 status |= cpuc->intel_cp_status;
3141
3142 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
3143 struct perf_event *event = cpuc->events[bit];
3144
3145 handled++;
3146
3147 if (!test_bit(bit, cpuc->active_mask))
3148 continue;
3149
3150 /*
3151 * There may be unprocessed PEBS records in the PEBS buffer,
3152 * which still stores the previous values.
3153 * Process those records first before handling the latest value.
3154 * For example,
3155 * A is a regular counter
3156 * B is a PEBS event which reads A
3157 * C is a PEBS event
3158 *
3159 * The following can happen:
3160 * B-assist A=1
3161 * C A=2
3162 * B-assist A=3
3163 * A-overflow-PMI A=4
3164 * C-assist-PMI (PEBS buffer) A=5
3165 *
3166 * The PEBS buffer has to be drained before handling the A-PMI
3167 */
3168 if (is_pebs_counter_event_group(event))
3169 x86_pmu.drain_pebs(regs, &data);
3170
3171 if (!intel_pmu_save_and_restart(event))
3172 continue;
3173
3174 perf_sample_data_init(&data, 0, event->hw.last_period);
3175
3176 if (has_branch_stack(event))
3177 intel_pmu_lbr_save_brstack(&data, cpuc, event);
3178
3179 if (perf_event_overflow(event, &data, regs))
3180 x86_pmu_stop(event, 0);
3181 }
3182
3183 return handled;
3184 }
3185
3186 /*
3187 * This handler is triggered by the local APIC, so the APIC IRQ handling
3188 * rules apply:
3189 */
intel_pmu_handle_irq(struct pt_regs * regs)3190 static int intel_pmu_handle_irq(struct pt_regs *regs)
3191 {
3192 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3193 bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
3194 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
3195 int loops;
3196 u64 status;
3197 int handled;
3198 int pmu_enabled;
3199
3200 /*
3201 * Save the PMU state.
3202 * It needs to be restored when leaving the handler.
3203 */
3204 pmu_enabled = cpuc->enabled;
3205 /*
3206 * In general, the early ACK is only applied for old platforms.
3207 * For the big core starts from Haswell, the late ACK should be
3208 * applied.
3209 * For the small core after Tremont, we have to do the ACK right
3210 * before re-enabling counters, which is in the middle of the
3211 * NMI handler.
3212 */
3213 if (!late_ack && !mid_ack)
3214 apic_write(APIC_LVTPC, APIC_DM_NMI);
3215 intel_bts_disable_local();
3216 cpuc->enabled = 0;
3217 __intel_pmu_disable_all(true);
3218 handled = intel_pmu_drain_bts_buffer();
3219 handled += intel_bts_interrupt();
3220 status = intel_pmu_get_status();
3221 if (!status)
3222 goto done;
3223
3224 loops = 0;
3225 again:
3226 intel_pmu_lbr_read();
3227 intel_pmu_ack_status(status);
3228 if (++loops > 100) {
3229 static bool warned;
3230
3231 if (!warned) {
3232 WARN(1, "perfevents: irq loop stuck!\n");
3233 perf_event_print_debug();
3234 warned = true;
3235 }
3236 intel_pmu_reset();
3237 goto done;
3238 }
3239
3240 handled += handle_pmi_common(regs, status);
3241
3242 /*
3243 * Repeat if there is more work to be done:
3244 */
3245 status = intel_pmu_get_status();
3246 if (status)
3247 goto again;
3248
3249 done:
3250 if (mid_ack)
3251 apic_write(APIC_LVTPC, APIC_DM_NMI);
3252 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
3253 cpuc->enabled = pmu_enabled;
3254 if (pmu_enabled)
3255 __intel_pmu_enable_all(0, true);
3256 intel_bts_enable_local();
3257
3258 /*
3259 * Only unmask the NMI after the overflow counters
3260 * have been reset. This avoids spurious NMIs on
3261 * Haswell CPUs.
3262 */
3263 if (late_ack)
3264 apic_write(APIC_LVTPC, APIC_DM_NMI);
3265 return handled;
3266 }
3267
3268 static struct event_constraint *
intel_bts_constraints(struct perf_event * event)3269 intel_bts_constraints(struct perf_event *event)
3270 {
3271 if (unlikely(intel_pmu_has_bts(event)))
3272 return &bts_constraint;
3273
3274 return NULL;
3275 }
3276
3277 /*
3278 * Note: matches a fake event, like Fixed2.
3279 */
3280 static struct event_constraint *
intel_vlbr_constraints(struct perf_event * event)3281 intel_vlbr_constraints(struct perf_event *event)
3282 {
3283 struct event_constraint *c = &vlbr_constraint;
3284
3285 if (unlikely(constraint_match(c, event->hw.config))) {
3286 event->hw.flags |= c->flags;
3287 return c;
3288 }
3289
3290 return NULL;
3291 }
3292
intel_alt_er(struct cpu_hw_events * cpuc,int idx,u64 config)3293 static int intel_alt_er(struct cpu_hw_events *cpuc,
3294 int idx, u64 config)
3295 {
3296 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3297 int alt_idx = idx;
3298
3299 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3300 return idx;
3301
3302 if (idx == EXTRA_REG_RSP_0)
3303 alt_idx = EXTRA_REG_RSP_1;
3304
3305 if (idx == EXTRA_REG_RSP_1)
3306 alt_idx = EXTRA_REG_RSP_0;
3307
3308 if (config & ~extra_regs[alt_idx].valid_mask)
3309 return idx;
3310
3311 return alt_idx;
3312 }
3313
intel_fixup_er(struct perf_event * event,int idx)3314 static void intel_fixup_er(struct perf_event *event, int idx)
3315 {
3316 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3317 event->hw.extra_reg.idx = idx;
3318
3319 if (idx == EXTRA_REG_RSP_0) {
3320 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3321 event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3322 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3323 } else if (idx == EXTRA_REG_RSP_1) {
3324 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3325 event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3326 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3327 }
3328 }
3329
3330 /*
3331 * manage allocation of shared extra msr for certain events
3332 *
3333 * sharing can be:
3334 * per-cpu: to be shared between the various events on a single PMU
3335 * per-core: per-cpu + shared by HT threads
3336 */
3337 static struct event_constraint *
__intel_shared_reg_get_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,struct hw_perf_event_extra * reg)3338 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3339 struct perf_event *event,
3340 struct hw_perf_event_extra *reg)
3341 {
3342 struct event_constraint *c = &emptyconstraint;
3343 struct er_account *era;
3344 unsigned long flags;
3345 int idx = reg->idx;
3346
3347 /*
3348 * reg->alloc can be set due to existing state, so for fake cpuc we
3349 * need to ignore this, otherwise we might fail to allocate proper fake
3350 * state for this extra reg constraint. Also see the comment below.
3351 */
3352 if (reg->alloc && !cpuc->is_fake)
3353 return NULL; /* call x86_get_event_constraint() */
3354
3355 again:
3356 era = &cpuc->shared_regs->regs[idx];
3357 /*
3358 * we use spin_lock_irqsave() to avoid lockdep issues when
3359 * passing a fake cpuc
3360 */
3361 raw_spin_lock_irqsave(&era->lock, flags);
3362
3363 if (!atomic_read(&era->ref) || era->config == reg->config) {
3364
3365 /*
3366 * If its a fake cpuc -- as per validate_{group,event}() we
3367 * shouldn't touch event state and we can avoid doing so
3368 * since both will only call get_event_constraints() once
3369 * on each event, this avoids the need for reg->alloc.
3370 *
3371 * Not doing the ER fixup will only result in era->reg being
3372 * wrong, but since we won't actually try and program hardware
3373 * this isn't a problem either.
3374 */
3375 if (!cpuc->is_fake) {
3376 if (idx != reg->idx)
3377 intel_fixup_er(event, idx);
3378
3379 /*
3380 * x86_schedule_events() can call get_event_constraints()
3381 * multiple times on events in the case of incremental
3382 * scheduling(). reg->alloc ensures we only do the ER
3383 * allocation once.
3384 */
3385 reg->alloc = 1;
3386 }
3387
3388 /* lock in msr value */
3389 era->config = reg->config;
3390 era->reg = reg->reg;
3391
3392 /* one more user */
3393 atomic_inc(&era->ref);
3394
3395 /*
3396 * need to call x86_get_event_constraint()
3397 * to check if associated event has constraints
3398 */
3399 c = NULL;
3400 } else {
3401 idx = intel_alt_er(cpuc, idx, reg->config);
3402 if (idx != reg->idx) {
3403 raw_spin_unlock_irqrestore(&era->lock, flags);
3404 goto again;
3405 }
3406 }
3407 raw_spin_unlock_irqrestore(&era->lock, flags);
3408
3409 return c;
3410 }
3411
3412 static void
__intel_shared_reg_put_constraints(struct cpu_hw_events * cpuc,struct hw_perf_event_extra * reg)3413 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3414 struct hw_perf_event_extra *reg)
3415 {
3416 struct er_account *era;
3417
3418 /*
3419 * Only put constraint if extra reg was actually allocated. Also takes
3420 * care of event which do not use an extra shared reg.
3421 *
3422 * Also, if this is a fake cpuc we shouldn't touch any event state
3423 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3424 * either since it'll be thrown out.
3425 */
3426 if (!reg->alloc || cpuc->is_fake)
3427 return;
3428
3429 era = &cpuc->shared_regs->regs[reg->idx];
3430
3431 /* one fewer user */
3432 atomic_dec(&era->ref);
3433
3434 /* allocate again next time */
3435 reg->alloc = 0;
3436 }
3437
3438 static struct event_constraint *
intel_shared_regs_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3439 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3440 struct perf_event *event)
3441 {
3442 struct event_constraint *c = NULL, *d;
3443 struct hw_perf_event_extra *xreg, *breg;
3444
3445 xreg = &event->hw.extra_reg;
3446 if (xreg->idx != EXTRA_REG_NONE) {
3447 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3448 if (c == &emptyconstraint)
3449 return c;
3450 }
3451 breg = &event->hw.branch_reg;
3452 if (breg->idx != EXTRA_REG_NONE) {
3453 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3454 if (d == &emptyconstraint) {
3455 __intel_shared_reg_put_constraints(cpuc, xreg);
3456 c = d;
3457 }
3458 }
3459 return c;
3460 }
3461
3462 struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3463 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3464 struct perf_event *event)
3465 {
3466 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3467 struct event_constraint *c;
3468
3469 if (event_constraints) {
3470 for_each_event_constraint(c, event_constraints) {
3471 if (constraint_match(c, event->hw.config)) {
3472 event->hw.flags |= c->flags;
3473 return c;
3474 }
3475 }
3476 }
3477
3478 return &hybrid_var(cpuc->pmu, unconstrained);
3479 }
3480
3481 static struct event_constraint *
__intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3482 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3483 struct perf_event *event)
3484 {
3485 struct event_constraint *c;
3486
3487 c = intel_vlbr_constraints(event);
3488 if (c)
3489 return c;
3490
3491 c = intel_bts_constraints(event);
3492 if (c)
3493 return c;
3494
3495 c = intel_shared_regs_constraints(cpuc, event);
3496 if (c)
3497 return c;
3498
3499 c = intel_pebs_constraints(event);
3500 if (c)
3501 return c;
3502
3503 return x86_get_event_constraints(cpuc, idx, event);
3504 }
3505
3506 static void
intel_start_scheduling(struct cpu_hw_events * cpuc)3507 intel_start_scheduling(struct cpu_hw_events *cpuc)
3508 {
3509 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3510 struct intel_excl_states *xl;
3511 int tid = cpuc->excl_thread_id;
3512
3513 /*
3514 * nothing needed if in group validation mode
3515 */
3516 if (cpuc->is_fake || !is_ht_workaround_enabled())
3517 return;
3518
3519 /*
3520 * no exclusion needed
3521 */
3522 if (WARN_ON_ONCE(!excl_cntrs))
3523 return;
3524
3525 xl = &excl_cntrs->states[tid];
3526
3527 xl->sched_started = true;
3528 /*
3529 * lock shared state until we are done scheduling
3530 * in stop_event_scheduling()
3531 * makes scheduling appear as a transaction
3532 */
3533 raw_spin_lock(&excl_cntrs->lock);
3534 }
3535
intel_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)3536 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3537 {
3538 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3539 struct event_constraint *c = cpuc->event_constraint[idx];
3540 struct intel_excl_states *xl;
3541 int tid = cpuc->excl_thread_id;
3542
3543 if (cpuc->is_fake || !is_ht_workaround_enabled())
3544 return;
3545
3546 if (WARN_ON_ONCE(!excl_cntrs))
3547 return;
3548
3549 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3550 return;
3551
3552 xl = &excl_cntrs->states[tid];
3553
3554 lockdep_assert_held(&excl_cntrs->lock);
3555
3556 if (c->flags & PERF_X86_EVENT_EXCL)
3557 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3558 else
3559 xl->state[cntr] = INTEL_EXCL_SHARED;
3560 }
3561
3562 static void
intel_stop_scheduling(struct cpu_hw_events * cpuc)3563 intel_stop_scheduling(struct cpu_hw_events *cpuc)
3564 {
3565 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3566 struct intel_excl_states *xl;
3567 int tid = cpuc->excl_thread_id;
3568
3569 /*
3570 * nothing needed if in group validation mode
3571 */
3572 if (cpuc->is_fake || !is_ht_workaround_enabled())
3573 return;
3574 /*
3575 * no exclusion needed
3576 */
3577 if (WARN_ON_ONCE(!excl_cntrs))
3578 return;
3579
3580 xl = &excl_cntrs->states[tid];
3581
3582 xl->sched_started = false;
3583 /*
3584 * release shared state lock (acquired in intel_start_scheduling())
3585 */
3586 raw_spin_unlock(&excl_cntrs->lock);
3587 }
3588
3589 static struct event_constraint *
dyn_constraint(struct cpu_hw_events * cpuc,struct event_constraint * c,int idx)3590 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3591 {
3592 WARN_ON_ONCE(!cpuc->constraint_list);
3593
3594 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3595 struct event_constraint *cx;
3596
3597 /*
3598 * grab pre-allocated constraint entry
3599 */
3600 cx = &cpuc->constraint_list[idx];
3601
3602 /*
3603 * initialize dynamic constraint
3604 * with static constraint
3605 */
3606 *cx = *c;
3607
3608 /*
3609 * mark constraint as dynamic
3610 */
3611 cx->flags |= PERF_X86_EVENT_DYNAMIC;
3612 c = cx;
3613 }
3614
3615 return c;
3616 }
3617
3618 static struct event_constraint *
intel_get_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,int idx,struct event_constraint * c)3619 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3620 int idx, struct event_constraint *c)
3621 {
3622 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3623 struct intel_excl_states *xlo;
3624 int tid = cpuc->excl_thread_id;
3625 int is_excl, i, w;
3626
3627 /*
3628 * validating a group does not require
3629 * enforcing cross-thread exclusion
3630 */
3631 if (cpuc->is_fake || !is_ht_workaround_enabled())
3632 return c;
3633
3634 /*
3635 * no exclusion needed
3636 */
3637 if (WARN_ON_ONCE(!excl_cntrs))
3638 return c;
3639
3640 /*
3641 * because we modify the constraint, we need
3642 * to make a copy. Static constraints come
3643 * from static const tables.
3644 *
3645 * only needed when constraint has not yet
3646 * been cloned (marked dynamic)
3647 */
3648 c = dyn_constraint(cpuc, c, idx);
3649
3650 /*
3651 * From here on, the constraint is dynamic.
3652 * Either it was just allocated above, or it
3653 * was allocated during a earlier invocation
3654 * of this function
3655 */
3656
3657 /*
3658 * state of sibling HT
3659 */
3660 xlo = &excl_cntrs->states[tid ^ 1];
3661
3662 /*
3663 * event requires exclusive counter access
3664 * across HT threads
3665 */
3666 is_excl = c->flags & PERF_X86_EVENT_EXCL;
3667 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3668 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3669 if (!cpuc->n_excl++)
3670 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3671 }
3672
3673 /*
3674 * Modify static constraint with current dynamic
3675 * state of thread
3676 *
3677 * EXCLUSIVE: sibling counter measuring exclusive event
3678 * SHARED : sibling counter measuring non-exclusive event
3679 * UNUSED : sibling counter unused
3680 */
3681 w = c->weight;
3682 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3683 /*
3684 * exclusive event in sibling counter
3685 * our corresponding counter cannot be used
3686 * regardless of our event
3687 */
3688 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3689 __clear_bit(i, c->idxmsk);
3690 w--;
3691 continue;
3692 }
3693 /*
3694 * if measuring an exclusive event, sibling
3695 * measuring non-exclusive, then counter cannot
3696 * be used
3697 */
3698 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3699 __clear_bit(i, c->idxmsk);
3700 w--;
3701 continue;
3702 }
3703 }
3704
3705 /*
3706 * if we return an empty mask, then switch
3707 * back to static empty constraint to avoid
3708 * the cost of freeing later on
3709 */
3710 if (!w)
3711 c = &emptyconstraint;
3712
3713 c->weight = w;
3714
3715 return c;
3716 }
3717
3718 static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3719 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3720 struct perf_event *event)
3721 {
3722 struct event_constraint *c1, *c2;
3723
3724 c1 = cpuc->event_constraint[idx];
3725
3726 /*
3727 * first time only
3728 * - static constraint: no change across incremental scheduling calls
3729 * - dynamic constraint: handled by intel_get_excl_constraints()
3730 */
3731 c2 = __intel_get_event_constraints(cpuc, idx, event);
3732 if (c1) {
3733 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3734 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3735 c1->weight = c2->weight;
3736 c2 = c1;
3737 }
3738
3739 if (cpuc->excl_cntrs)
3740 return intel_get_excl_constraints(cpuc, event, idx, c2);
3741
3742 /* Not all counters support the branch counter feature. */
3743 if (branch_sample_counters(event)) {
3744 c2 = dyn_constraint(cpuc, c2, idx);
3745 c2->idxmsk64 &= x86_pmu.lbr_counters;
3746 c2->weight = hweight64(c2->idxmsk64);
3747 }
3748
3749 return c2;
3750 }
3751
intel_put_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3752 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3753 struct perf_event *event)
3754 {
3755 struct hw_perf_event *hwc = &event->hw;
3756 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3757 int tid = cpuc->excl_thread_id;
3758 struct intel_excl_states *xl;
3759
3760 /*
3761 * nothing needed if in group validation mode
3762 */
3763 if (cpuc->is_fake)
3764 return;
3765
3766 if (WARN_ON_ONCE(!excl_cntrs))
3767 return;
3768
3769 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3770 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3771 if (!--cpuc->n_excl)
3772 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3773 }
3774
3775 /*
3776 * If event was actually assigned, then mark the counter state as
3777 * unused now.
3778 */
3779 if (hwc->idx >= 0) {
3780 xl = &excl_cntrs->states[tid];
3781
3782 /*
3783 * put_constraint may be called from x86_schedule_events()
3784 * which already has the lock held so here make locking
3785 * conditional.
3786 */
3787 if (!xl->sched_started)
3788 raw_spin_lock(&excl_cntrs->lock);
3789
3790 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3791
3792 if (!xl->sched_started)
3793 raw_spin_unlock(&excl_cntrs->lock);
3794 }
3795 }
3796
3797 static void
intel_put_shared_regs_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3798 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3799 struct perf_event *event)
3800 {
3801 struct hw_perf_event_extra *reg;
3802
3803 reg = &event->hw.extra_reg;
3804 if (reg->idx != EXTRA_REG_NONE)
3805 __intel_shared_reg_put_constraints(cpuc, reg);
3806
3807 reg = &event->hw.branch_reg;
3808 if (reg->idx != EXTRA_REG_NONE)
3809 __intel_shared_reg_put_constraints(cpuc, reg);
3810 }
3811
intel_put_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3812 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3813 struct perf_event *event)
3814 {
3815 intel_put_shared_regs_event_constraints(cpuc, event);
3816
3817 /*
3818 * is PMU has exclusive counter restrictions, then
3819 * all events are subject to and must call the
3820 * put_excl_constraints() routine
3821 */
3822 if (cpuc->excl_cntrs)
3823 intel_put_excl_constraints(cpuc, event);
3824 }
3825
intel_pebs_aliases_core2(struct perf_event * event)3826 static void intel_pebs_aliases_core2(struct perf_event *event)
3827 {
3828 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3829 /*
3830 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3831 * (0x003c) so that we can use it with PEBS.
3832 *
3833 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3834 * PEBS capable. However we can use INST_RETIRED.ANY_P
3835 * (0x00c0), which is a PEBS capable event, to get the same
3836 * count.
3837 *
3838 * INST_RETIRED.ANY_P counts the number of cycles that retires
3839 * CNTMASK instructions. By setting CNTMASK to a value (16)
3840 * larger than the maximum number of instructions that can be
3841 * retired per cycle (4) and then inverting the condition, we
3842 * count all cycles that retire 16 or less instructions, which
3843 * is every cycle.
3844 *
3845 * Thereby we gain a PEBS capable cycle counter.
3846 */
3847 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3848
3849 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3850 event->hw.config = alt_config;
3851 }
3852 }
3853
intel_pebs_aliases_snb(struct perf_event * event)3854 static void intel_pebs_aliases_snb(struct perf_event *event)
3855 {
3856 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3857 /*
3858 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3859 * (0x003c) so that we can use it with PEBS.
3860 *
3861 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3862 * PEBS capable. However we can use UOPS_RETIRED.ALL
3863 * (0x01c2), which is a PEBS capable event, to get the same
3864 * count.
3865 *
3866 * UOPS_RETIRED.ALL counts the number of cycles that retires
3867 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3868 * larger than the maximum number of micro-ops that can be
3869 * retired per cycle (4) and then inverting the condition, we
3870 * count all cycles that retire 16 or less micro-ops, which
3871 * is every cycle.
3872 *
3873 * Thereby we gain a PEBS capable cycle counter.
3874 */
3875 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3876
3877 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3878 event->hw.config = alt_config;
3879 }
3880 }
3881
intel_pebs_aliases_precdist(struct perf_event * event)3882 static void intel_pebs_aliases_precdist(struct perf_event *event)
3883 {
3884 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3885 /*
3886 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3887 * (0x003c) so that we can use it with PEBS.
3888 *
3889 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3890 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3891 * (0x01c0), which is a PEBS capable event, to get the same
3892 * count.
3893 *
3894 * The PREC_DIST event has special support to minimize sample
3895 * shadowing effects. One drawback is that it can be
3896 * only programmed on counter 1, but that seems like an
3897 * acceptable trade off.
3898 */
3899 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3900
3901 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3902 event->hw.config = alt_config;
3903 }
3904 }
3905
intel_pebs_aliases_ivb(struct perf_event * event)3906 static void intel_pebs_aliases_ivb(struct perf_event *event)
3907 {
3908 if (event->attr.precise_ip < 3)
3909 return intel_pebs_aliases_snb(event);
3910 return intel_pebs_aliases_precdist(event);
3911 }
3912
intel_pebs_aliases_skl(struct perf_event * event)3913 static void intel_pebs_aliases_skl(struct perf_event *event)
3914 {
3915 if (event->attr.precise_ip < 3)
3916 return intel_pebs_aliases_core2(event);
3917 return intel_pebs_aliases_precdist(event);
3918 }
3919
intel_pmu_large_pebs_flags(struct perf_event * event)3920 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3921 {
3922 unsigned long flags = x86_pmu.large_pebs_flags;
3923
3924 if (event->attr.use_clockid)
3925 flags &= ~PERF_SAMPLE_TIME;
3926 if (!event->attr.exclude_kernel)
3927 flags &= ~PERF_SAMPLE_REGS_USER;
3928 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3929 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3930 return flags;
3931 }
3932
intel_pmu_bts_config(struct perf_event * event)3933 static int intel_pmu_bts_config(struct perf_event *event)
3934 {
3935 struct perf_event_attr *attr = &event->attr;
3936
3937 if (unlikely(intel_pmu_has_bts(event))) {
3938 /* BTS is not supported by this architecture. */
3939 if (!x86_pmu.bts_active)
3940 return -EOPNOTSUPP;
3941
3942 /* BTS is currently only allowed for user-mode. */
3943 if (!attr->exclude_kernel)
3944 return -EOPNOTSUPP;
3945
3946 /* BTS is not allowed for precise events. */
3947 if (attr->precise_ip)
3948 return -EOPNOTSUPP;
3949
3950 /* disallow bts if conflicting events are present */
3951 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3952 return -EBUSY;
3953
3954 event->destroy = hw_perf_lbr_event_destroy;
3955 }
3956
3957 return 0;
3958 }
3959
core_pmu_hw_config(struct perf_event * event)3960 static int core_pmu_hw_config(struct perf_event *event)
3961 {
3962 int ret = x86_pmu_hw_config(event);
3963
3964 if (ret)
3965 return ret;
3966
3967 return intel_pmu_bts_config(event);
3968 }
3969
3970 #define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \
3971 ((x86_pmu.num_topdown_events - 1) << 8))
3972
is_available_metric_event(struct perf_event * event)3973 static bool is_available_metric_event(struct perf_event *event)
3974 {
3975 return is_metric_event(event) &&
3976 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
3977 }
3978
is_mem_loads_event(struct perf_event * event)3979 static inline bool is_mem_loads_event(struct perf_event *event)
3980 {
3981 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
3982 }
3983
is_mem_loads_aux_event(struct perf_event * event)3984 static inline bool is_mem_loads_aux_event(struct perf_event *event)
3985 {
3986 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
3987 }
3988
require_mem_loads_aux_event(struct perf_event * event)3989 static inline bool require_mem_loads_aux_event(struct perf_event *event)
3990 {
3991 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
3992 return false;
3993
3994 if (is_hybrid())
3995 return hybrid_pmu(event->pmu)->pmu_type == hybrid_big;
3996
3997 return true;
3998 }
3999
intel_pmu_has_cap(struct perf_event * event,int idx)4000 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
4001 {
4002 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
4003
4004 return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
4005 }
4006
intel_pmu_freq_start_period(struct perf_event * event)4007 static u64 intel_pmu_freq_start_period(struct perf_event *event)
4008 {
4009 int type = event->attr.type;
4010 u64 config, factor;
4011 s64 start;
4012
4013 /*
4014 * The 127 is the lowest possible recommended SAV (sample after value)
4015 * for a 4000 freq (default freq), according to the event list JSON file.
4016 * Also, assume the workload is idle 50% time.
4017 */
4018 factor = 64 * 4000;
4019 if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
4020 goto end;
4021
4022 /*
4023 * The estimation of the start period in the freq mode is
4024 * based on the below assumption.
4025 *
4026 * For a cycles or an instructions event, 1GHZ of the
4027 * underlying platform, 1 IPC. The workload is idle 50% time.
4028 * The start period = 1,000,000,000 * 1 / freq / 2.
4029 * = 500,000,000 / freq
4030 *
4031 * Usually, the branch-related events occur less than the
4032 * instructions event. According to the Intel event list JSON
4033 * file, the SAV (sample after value) of a branch-related event
4034 * is usually 1/4 of an instruction event.
4035 * The start period of branch-related events = 125,000,000 / freq.
4036 *
4037 * The cache-related events occurs even less. The SAV is usually
4038 * 1/20 of an instruction event.
4039 * The start period of cache-related events = 25,000,000 / freq.
4040 */
4041 config = event->attr.config & PERF_HW_EVENT_MASK;
4042 if (type == PERF_TYPE_HARDWARE) {
4043 switch (config) {
4044 case PERF_COUNT_HW_CPU_CYCLES:
4045 case PERF_COUNT_HW_INSTRUCTIONS:
4046 case PERF_COUNT_HW_BUS_CYCLES:
4047 case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
4048 case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
4049 case PERF_COUNT_HW_REF_CPU_CYCLES:
4050 factor = 500000000;
4051 break;
4052 case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
4053 case PERF_COUNT_HW_BRANCH_MISSES:
4054 factor = 125000000;
4055 break;
4056 case PERF_COUNT_HW_CACHE_REFERENCES:
4057 case PERF_COUNT_HW_CACHE_MISSES:
4058 factor = 25000000;
4059 break;
4060 default:
4061 goto end;
4062 }
4063 }
4064
4065 if (type == PERF_TYPE_HW_CACHE)
4066 factor = 25000000;
4067 end:
4068 /*
4069 * Usually, a prime or a number with less factors (close to prime)
4070 * is chosen as an SAV, which makes it less likely that the sampling
4071 * period synchronizes with some periodic event in the workload.
4072 * Minus 1 to make it at least avoiding values near power of twos
4073 * for the default freq.
4074 */
4075 start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
4076
4077 if (start > x86_pmu.max_period)
4078 start = x86_pmu.max_period;
4079
4080 if (x86_pmu.limit_period)
4081 x86_pmu.limit_period(event, &start);
4082
4083 return start;
4084 }
4085
intel_pmu_hw_config(struct perf_event * event)4086 static int intel_pmu_hw_config(struct perf_event *event)
4087 {
4088 int ret = x86_pmu_hw_config(event);
4089
4090 if (ret)
4091 return ret;
4092
4093 ret = intel_pmu_bts_config(event);
4094 if (ret)
4095 return ret;
4096
4097 if (event->attr.freq && event->attr.sample_freq) {
4098 event->hw.sample_period = intel_pmu_freq_start_period(event);
4099 event->hw.last_period = event->hw.sample_period;
4100 local64_set(&event->hw.period_left, event->hw.sample_period);
4101 }
4102
4103 if (event->attr.precise_ip) {
4104 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
4105 return -EINVAL;
4106
4107 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
4108 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
4109 if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event)) &&
4110 !has_aux_action(event)) {
4111 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
4112 event->attach_state |= PERF_ATTACH_SCHED_CB;
4113 }
4114 }
4115 if (x86_pmu.pebs_aliases)
4116 x86_pmu.pebs_aliases(event);
4117 }
4118
4119 if (needs_branch_stack(event)) {
4120 /* Avoid branch stack setup for counting events in SAMPLE READ */
4121 if (is_sampling_event(event) ||
4122 !(event->attr.sample_type & PERF_SAMPLE_READ))
4123 event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
4124 }
4125
4126 if (branch_sample_counters(event)) {
4127 struct perf_event *leader, *sibling;
4128 int num = 0;
4129
4130 if (!(x86_pmu.flags & PMU_FL_BR_CNTR) ||
4131 (event->attr.config & ~INTEL_ARCH_EVENT_MASK))
4132 return -EINVAL;
4133
4134 /*
4135 * The branch counter logging is not supported in the call stack
4136 * mode yet, since we cannot simply flush the LBR during e.g.,
4137 * multiplexing. Also, there is no obvious usage with the call
4138 * stack mode. Simply forbids it for now.
4139 *
4140 * If any events in the group enable the branch counter logging
4141 * feature, the group is treated as a branch counter logging
4142 * group, which requires the extra space to store the counters.
4143 */
4144 leader = event->group_leader;
4145 if (branch_sample_call_stack(leader))
4146 return -EINVAL;
4147 if (branch_sample_counters(leader))
4148 num++;
4149 leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
4150
4151 for_each_sibling_event(sibling, leader) {
4152 if (branch_sample_call_stack(sibling))
4153 return -EINVAL;
4154 if (branch_sample_counters(sibling))
4155 num++;
4156 }
4157
4158 if (num > fls(x86_pmu.lbr_counters))
4159 return -EINVAL;
4160 /*
4161 * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't
4162 * require any branch stack setup.
4163 * Clear the bit to avoid unnecessary branch stack setup.
4164 */
4165 if (0 == (event->attr.branch_sample_type &
4166 ~(PERF_SAMPLE_BRANCH_PLM_ALL |
4167 PERF_SAMPLE_BRANCH_COUNTERS)))
4168 event->hw.flags &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK;
4169
4170 /*
4171 * Force the leader to be a LBR event. So LBRs can be reset
4172 * with the leader event. See intel_pmu_lbr_del() for details.
4173 */
4174 if (!intel_pmu_needs_branch_stack(leader))
4175 return -EINVAL;
4176 }
4177
4178 if (intel_pmu_needs_branch_stack(event)) {
4179 ret = intel_pmu_setup_lbr_filter(event);
4180 if (ret)
4181 return ret;
4182 event->attach_state |= PERF_ATTACH_SCHED_CB;
4183
4184 /*
4185 * BTS is set up earlier in this path, so don't account twice
4186 */
4187 if (!unlikely(intel_pmu_has_bts(event))) {
4188 /* disallow lbr if conflicting events are present */
4189 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
4190 return -EBUSY;
4191
4192 event->destroy = hw_perf_lbr_event_destroy;
4193 }
4194 }
4195
4196 if (event->attr.aux_output) {
4197 if (!event->attr.precise_ip)
4198 return -EINVAL;
4199
4200 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
4201 }
4202
4203 if ((event->attr.sample_type & PERF_SAMPLE_READ) &&
4204 (x86_pmu.intel_cap.pebs_format >= 6) &&
4205 x86_pmu.intel_cap.pebs_baseline &&
4206 is_sampling_event(event) &&
4207 event->attr.precise_ip)
4208 event->group_leader->hw.flags |= PERF_X86_EVENT_PEBS_CNTR;
4209
4210 if ((event->attr.type == PERF_TYPE_HARDWARE) ||
4211 (event->attr.type == PERF_TYPE_HW_CACHE))
4212 return 0;
4213
4214 /*
4215 * Config Topdown slots and metric events
4216 *
4217 * The slots event on Fixed Counter 3 can support sampling,
4218 * which will be handled normally in x86_perf_event_update().
4219 *
4220 * Metric events don't support sampling and require being paired
4221 * with a slots event as group leader. When the slots event
4222 * is used in a metrics group, it too cannot support sampling.
4223 */
4224 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
4225 /* The metrics_clear can only be set for the slots event */
4226 if (event->attr.config1 &&
4227 (!is_slots_event(event) || (event->attr.config1 & ~INTEL_TD_CFG_METRIC_CLEAR)))
4228 return -EINVAL;
4229
4230 if (event->attr.config2)
4231 return -EINVAL;
4232
4233 /*
4234 * The TopDown metrics events and slots event don't
4235 * support any filters.
4236 */
4237 if (event->attr.config & X86_ALL_EVENT_FLAGS)
4238 return -EINVAL;
4239
4240 if (is_available_metric_event(event)) {
4241 struct perf_event *leader = event->group_leader;
4242
4243 /* The metric events don't support sampling. */
4244 if (is_sampling_event(event))
4245 return -EINVAL;
4246
4247 /* The metric events require a slots group leader. */
4248 if (!is_slots_event(leader))
4249 return -EINVAL;
4250
4251 /*
4252 * The leader/SLOTS must not be a sampling event for
4253 * metric use; hardware requires it starts at 0 when used
4254 * in conjunction with MSR_PERF_METRICS.
4255 */
4256 if (is_sampling_event(leader))
4257 return -EINVAL;
4258
4259 event->event_caps |= PERF_EV_CAP_SIBLING;
4260 /*
4261 * Only once we have a METRICs sibling do we
4262 * need TopDown magic.
4263 */
4264 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
4265 event->hw.flags |= PERF_X86_EVENT_TOPDOWN;
4266 }
4267 }
4268
4269 /*
4270 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
4271 * doesn't function quite right. As a work-around it needs to always be
4272 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
4273 * The actual count of this second event is irrelevant it just needs
4274 * to be active to make the first event function correctly.
4275 *
4276 * In a group, the auxiliary event must be in front of the load latency
4277 * event. The rule is to simplify the implementation of the check.
4278 * That's because perf cannot have a complete group at the moment.
4279 */
4280 if (require_mem_loads_aux_event(event) &&
4281 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
4282 is_mem_loads_event(event)) {
4283 struct perf_event *leader = event->group_leader;
4284 struct perf_event *sibling = NULL;
4285
4286 /*
4287 * When this memload event is also the first event (no group
4288 * exists yet), then there is no aux event before it.
4289 */
4290 if (leader == event)
4291 return -ENODATA;
4292
4293 if (!is_mem_loads_aux_event(leader)) {
4294 for_each_sibling_event(sibling, leader) {
4295 if (is_mem_loads_aux_event(sibling))
4296 break;
4297 }
4298 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
4299 return -ENODATA;
4300 }
4301 }
4302
4303 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
4304 return 0;
4305
4306 if (x86_pmu.version < 3)
4307 return -EINVAL;
4308
4309 ret = perf_allow_cpu();
4310 if (ret)
4311 return ret;
4312
4313 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
4314
4315 return 0;
4316 }
4317
4318 /*
4319 * Currently, the only caller of this function is the atomic_switch_perf_msrs().
4320 * The host perf context helps to prepare the values of the real hardware for
4321 * a set of msrs that need to be switched atomically in a vmx transaction.
4322 *
4323 * For example, the pseudocode needed to add a new msr should look like:
4324 *
4325 * arr[(*nr)++] = (struct perf_guest_switch_msr){
4326 * .msr = the hardware msr address,
4327 * .host = the value the hardware has when it doesn't run a guest,
4328 * .guest = the value the hardware has when it runs a guest,
4329 * };
4330 *
4331 * These values have nothing to do with the emulated values the guest sees
4332 * when it uses {RD,WR}MSR, which should be handled by the KVM context,
4333 * specifically in the intel_pmu_{get,set}_msr().
4334 */
intel_guest_get_msrs(int * nr,void * data)4335 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
4336 {
4337 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4338 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4339 struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
4340 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
4341 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
4342 int global_ctrl, pebs_enable;
4343
4344 /*
4345 * In addition to obeying exclude_guest/exclude_host, remove bits being
4346 * used for PEBS when running a guest, because PEBS writes to virtual
4347 * addresses (not physical addresses).
4348 */
4349 *nr = 0;
4350 global_ctrl = (*nr)++;
4351 arr[global_ctrl] = (struct perf_guest_switch_msr){
4352 .msr = MSR_CORE_PERF_GLOBAL_CTRL,
4353 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
4354 .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
4355 };
4356
4357 if (!x86_pmu.pebs)
4358 return arr;
4359
4360 /*
4361 * If PMU counter has PEBS enabled it is not enough to
4362 * disable counter on a guest entry since PEBS memory
4363 * write can overshoot guest entry and corrupt guest
4364 * memory. Disabling PEBS solves the problem.
4365 *
4366 * Don't do this if the CPU already enforces it.
4367 */
4368 if (x86_pmu.pebs_no_isolation) {
4369 arr[(*nr)++] = (struct perf_guest_switch_msr){
4370 .msr = MSR_IA32_PEBS_ENABLE,
4371 .host = cpuc->pebs_enabled,
4372 .guest = 0,
4373 };
4374 return arr;
4375 }
4376
4377 if (!kvm_pmu || !x86_pmu.pebs_ept)
4378 return arr;
4379
4380 arr[(*nr)++] = (struct perf_guest_switch_msr){
4381 .msr = MSR_IA32_DS_AREA,
4382 .host = (unsigned long)cpuc->ds,
4383 .guest = kvm_pmu->ds_area,
4384 };
4385
4386 if (x86_pmu.intel_cap.pebs_baseline) {
4387 arr[(*nr)++] = (struct perf_guest_switch_msr){
4388 .msr = MSR_PEBS_DATA_CFG,
4389 .host = cpuc->active_pebs_data_cfg,
4390 .guest = kvm_pmu->pebs_data_cfg,
4391 };
4392 }
4393
4394 pebs_enable = (*nr)++;
4395 arr[pebs_enable] = (struct perf_guest_switch_msr){
4396 .msr = MSR_IA32_PEBS_ENABLE,
4397 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
4398 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
4399 };
4400
4401 if (arr[pebs_enable].host) {
4402 /* Disable guest PEBS if host PEBS is enabled. */
4403 arr[pebs_enable].guest = 0;
4404 } else {
4405 /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
4406 arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
4407 arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
4408 /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
4409 arr[global_ctrl].guest |= arr[pebs_enable].guest;
4410 }
4411
4412 return arr;
4413 }
4414
core_guest_get_msrs(int * nr,void * data)4415 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
4416 {
4417 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4418 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4419 int idx;
4420
4421 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
4422 struct perf_event *event = cpuc->events[idx];
4423
4424 arr[idx].msr = x86_pmu_config_addr(idx);
4425 arr[idx].host = arr[idx].guest = 0;
4426
4427 if (!test_bit(idx, cpuc->active_mask))
4428 continue;
4429
4430 arr[idx].host = arr[idx].guest =
4431 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
4432
4433 if (event->attr.exclude_host)
4434 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4435 else if (event->attr.exclude_guest)
4436 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4437 }
4438
4439 *nr = x86_pmu_max_num_counters(cpuc->pmu);
4440 return arr;
4441 }
4442
core_pmu_enable_event(struct perf_event * event)4443 static void core_pmu_enable_event(struct perf_event *event)
4444 {
4445 if (!event->attr.exclude_host)
4446 x86_pmu_enable_event(event);
4447 }
4448
core_pmu_enable_all(int added)4449 static void core_pmu_enable_all(int added)
4450 {
4451 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4452 int idx;
4453
4454 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
4455 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
4456
4457 if (!test_bit(idx, cpuc->active_mask) ||
4458 cpuc->events[idx]->attr.exclude_host)
4459 continue;
4460
4461 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
4462 }
4463 }
4464
hsw_hw_config(struct perf_event * event)4465 static int hsw_hw_config(struct perf_event *event)
4466 {
4467 int ret = intel_pmu_hw_config(event);
4468
4469 if (ret)
4470 return ret;
4471 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
4472 return 0;
4473 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
4474
4475 /*
4476 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
4477 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
4478 * this combination.
4479 */
4480 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
4481 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
4482 event->attr.precise_ip > 0))
4483 return -EOPNOTSUPP;
4484
4485 if (event_is_checkpointed(event)) {
4486 /*
4487 * Sampling of checkpointed events can cause situations where
4488 * the CPU constantly aborts because of a overflow, which is
4489 * then checkpointed back and ignored. Forbid checkpointing
4490 * for sampling.
4491 *
4492 * But still allow a long sampling period, so that perf stat
4493 * from KVM works.
4494 */
4495 if (event->attr.sample_period > 0 &&
4496 event->attr.sample_period < 0x7fffffff)
4497 return -EOPNOTSUPP;
4498 }
4499 return 0;
4500 }
4501
4502 static struct event_constraint counter0_constraint =
4503 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
4504
4505 static struct event_constraint counter1_constraint =
4506 INTEL_ALL_EVENT_CONSTRAINT(0, 0x2);
4507
4508 static struct event_constraint counter0_1_constraint =
4509 INTEL_ALL_EVENT_CONSTRAINT(0, 0x3);
4510
4511 static struct event_constraint counter2_constraint =
4512 EVENT_CONSTRAINT(0, 0x4, 0);
4513
4514 static struct event_constraint fixed0_constraint =
4515 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
4516
4517 static struct event_constraint fixed0_counter0_constraint =
4518 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
4519
4520 static struct event_constraint fixed0_counter0_1_constraint =
4521 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL);
4522
4523 static struct event_constraint counters_1_7_constraint =
4524 INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL);
4525
4526 static struct event_constraint *
hsw_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4527 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4528 struct perf_event *event)
4529 {
4530 struct event_constraint *c;
4531
4532 c = intel_get_event_constraints(cpuc, idx, event);
4533
4534 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
4535 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4536 if (c->idxmsk64 & (1U << 2))
4537 return &counter2_constraint;
4538 return &emptyconstraint;
4539 }
4540
4541 return c;
4542 }
4543
4544 static struct event_constraint *
icl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4545 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4546 struct perf_event *event)
4547 {
4548 /*
4549 * Fixed counter 0 has less skid.
4550 * Force instruction:ppp in Fixed counter 0
4551 */
4552 if ((event->attr.precise_ip == 3) &&
4553 constraint_match(&fixed0_constraint, event->hw.config))
4554 return &fixed0_constraint;
4555
4556 return hsw_get_event_constraints(cpuc, idx, event);
4557 }
4558
4559 static struct event_constraint *
glc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4560 glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4561 struct perf_event *event)
4562 {
4563 struct event_constraint *c;
4564
4565 c = icl_get_event_constraints(cpuc, idx, event);
4566
4567 /*
4568 * The :ppp indicates the Precise Distribution (PDist) facility, which
4569 * is only supported on the GP counter 0. If a :ppp event which is not
4570 * available on the GP counter 0, error out.
4571 * Exception: Instruction PDIR is only available on the fixed counter 0.
4572 */
4573 if ((event->attr.precise_ip == 3) &&
4574 !constraint_match(&fixed0_constraint, event->hw.config)) {
4575 if (c->idxmsk64 & BIT_ULL(0))
4576 return &counter0_constraint;
4577
4578 return &emptyconstraint;
4579 }
4580
4581 return c;
4582 }
4583
4584 static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4585 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4586 struct perf_event *event)
4587 {
4588 struct event_constraint *c;
4589
4590 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
4591 if (event->attr.precise_ip == 3)
4592 return &counter0_constraint;
4593
4594 c = intel_get_event_constraints(cpuc, idx, event);
4595
4596 return c;
4597 }
4598
4599 static struct event_constraint *
tnt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4600 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4601 struct perf_event *event)
4602 {
4603 struct event_constraint *c;
4604
4605 c = intel_get_event_constraints(cpuc, idx, event);
4606
4607 /*
4608 * :ppp means to do reduced skid PEBS,
4609 * which is available on PMC0 and fixed counter 0.
4610 */
4611 if (event->attr.precise_ip == 3) {
4612 /* Force instruction:ppp on PMC0 and Fixed counter 0 */
4613 if (constraint_match(&fixed0_constraint, event->hw.config))
4614 return &fixed0_counter0_constraint;
4615
4616 return &counter0_constraint;
4617 }
4618
4619 return c;
4620 }
4621
4622 static bool allow_tsx_force_abort = true;
4623
4624 static struct event_constraint *
tfa_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4625 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4626 struct perf_event *event)
4627 {
4628 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4629
4630 /*
4631 * Without TFA we must not use PMC3.
4632 */
4633 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4634 c = dyn_constraint(cpuc, c, idx);
4635 c->idxmsk64 &= ~(1ULL << 3);
4636 c->weight--;
4637 }
4638
4639 return c;
4640 }
4641
4642 static struct event_constraint *
adl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4643 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4644 struct perf_event *event)
4645 {
4646 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4647
4648 if (pmu->pmu_type == hybrid_big)
4649 return glc_get_event_constraints(cpuc, idx, event);
4650 else if (pmu->pmu_type == hybrid_small)
4651 return tnt_get_event_constraints(cpuc, idx, event);
4652
4653 WARN_ON(1);
4654 return &emptyconstraint;
4655 }
4656
4657 static struct event_constraint *
cmt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4658 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4659 struct perf_event *event)
4660 {
4661 struct event_constraint *c;
4662
4663 c = intel_get_event_constraints(cpuc, idx, event);
4664
4665 /*
4666 * The :ppp indicates the Precise Distribution (PDist) facility, which
4667 * is only supported on the GP counter 0 & 1 and Fixed counter 0.
4668 * If a :ppp event which is not available on the above eligible counters,
4669 * error out.
4670 */
4671 if (event->attr.precise_ip == 3) {
4672 /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */
4673 if (constraint_match(&fixed0_constraint, event->hw.config)) {
4674 /* The fixed counter 0 doesn't support LBR event logging. */
4675 if (branch_sample_counters(event))
4676 return &counter0_1_constraint;
4677 else
4678 return &fixed0_counter0_1_constraint;
4679 }
4680
4681 switch (c->idxmsk64 & 0x3ull) {
4682 case 0x1:
4683 return &counter0_constraint;
4684 case 0x2:
4685 return &counter1_constraint;
4686 case 0x3:
4687 return &counter0_1_constraint;
4688 }
4689 return &emptyconstraint;
4690 }
4691
4692 return c;
4693 }
4694
4695 static struct event_constraint *
rwc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4696 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4697 struct perf_event *event)
4698 {
4699 struct event_constraint *c;
4700
4701 c = glc_get_event_constraints(cpuc, idx, event);
4702
4703 /* The Retire Latency is not supported by the fixed counter 0. */
4704 if (event->attr.precise_ip &&
4705 (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
4706 constraint_match(&fixed0_constraint, event->hw.config)) {
4707 /*
4708 * The Instruction PDIR is only available
4709 * on the fixed counter 0. Error out for this case.
4710 */
4711 if (event->attr.precise_ip == 3)
4712 return &emptyconstraint;
4713 return &counters_1_7_constraint;
4714 }
4715
4716 return c;
4717 }
4718
4719 static struct event_constraint *
mtl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4720 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4721 struct perf_event *event)
4722 {
4723 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4724
4725 if (pmu->pmu_type == hybrid_big)
4726 return rwc_get_event_constraints(cpuc, idx, event);
4727 if (pmu->pmu_type == hybrid_small)
4728 return cmt_get_event_constraints(cpuc, idx, event);
4729
4730 WARN_ON(1);
4731 return &emptyconstraint;
4732 }
4733
adl_hw_config(struct perf_event * event)4734 static int adl_hw_config(struct perf_event *event)
4735 {
4736 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4737
4738 if (pmu->pmu_type == hybrid_big)
4739 return hsw_hw_config(event);
4740 else if (pmu->pmu_type == hybrid_small)
4741 return intel_pmu_hw_config(event);
4742
4743 WARN_ON(1);
4744 return -EOPNOTSUPP;
4745 }
4746
adl_get_hybrid_cpu_type(void)4747 static enum intel_cpu_type adl_get_hybrid_cpu_type(void)
4748 {
4749 return INTEL_CPU_TYPE_CORE;
4750 }
4751
erratum_hsw11(struct perf_event * event)4752 static inline bool erratum_hsw11(struct perf_event *event)
4753 {
4754 return (event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4755 X86_CONFIG(.event=0xc0, .umask=0x01);
4756 }
4757
4758 static struct event_constraint *
arl_h_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4759 arl_h_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4760 struct perf_event *event)
4761 {
4762 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4763
4764 if (pmu->pmu_type == hybrid_tiny)
4765 return cmt_get_event_constraints(cpuc, idx, event);
4766
4767 return mtl_get_event_constraints(cpuc, idx, event);
4768 }
4769
arl_h_hw_config(struct perf_event * event)4770 static int arl_h_hw_config(struct perf_event *event)
4771 {
4772 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4773
4774 if (pmu->pmu_type == hybrid_tiny)
4775 return intel_pmu_hw_config(event);
4776
4777 return adl_hw_config(event);
4778 }
4779
4780 /*
4781 * The HSW11 requires a period larger than 100 which is the same as the BDM11.
4782 * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL.
4783 *
4784 * The message 'interrupt took too long' can be observed on any counter which
4785 * was armed with a period < 32 and two events expired in the same NMI.
4786 * A minimum period of 32 is enforced for the rest of the events.
4787 */
hsw_limit_period(struct perf_event * event,s64 * left)4788 static void hsw_limit_period(struct perf_event *event, s64 *left)
4789 {
4790 *left = max(*left, erratum_hsw11(event) ? 128 : 32);
4791 }
4792
4793 /*
4794 * Broadwell:
4795 *
4796 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
4797 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
4798 * the two to enforce a minimum period of 128 (the smallest value that has bits
4799 * 0-5 cleared and >= 100).
4800 *
4801 * Because of how the code in x86_perf_event_set_period() works, the truncation
4802 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
4803 * to make up for the 'lost' events due to carrying the 'error' in period_left.
4804 *
4805 * Therefore the effective (average) period matches the requested period,
4806 * despite coarser hardware granularity.
4807 */
bdw_limit_period(struct perf_event * event,s64 * left)4808 static void bdw_limit_period(struct perf_event *event, s64 *left)
4809 {
4810 if (erratum_hsw11(event)) {
4811 if (*left < 128)
4812 *left = 128;
4813 *left &= ~0x3fULL;
4814 }
4815 }
4816
nhm_limit_period(struct perf_event * event,s64 * left)4817 static void nhm_limit_period(struct perf_event *event, s64 *left)
4818 {
4819 *left = max(*left, 32LL);
4820 }
4821
glc_limit_period(struct perf_event * event,s64 * left)4822 static void glc_limit_period(struct perf_event *event, s64 *left)
4823 {
4824 if (event->attr.precise_ip == 3)
4825 *left = max(*left, 128LL);
4826 }
4827
4828 PMU_FORMAT_ATTR(event, "config:0-7" );
4829 PMU_FORMAT_ATTR(umask, "config:8-15" );
4830 PMU_FORMAT_ATTR(edge, "config:18" );
4831 PMU_FORMAT_ATTR(pc, "config:19" );
4832 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
4833 PMU_FORMAT_ATTR(inv, "config:23" );
4834 PMU_FORMAT_ATTR(cmask, "config:24-31" );
4835 PMU_FORMAT_ATTR(in_tx, "config:32" );
4836 PMU_FORMAT_ATTR(in_tx_cp, "config:33" );
4837 PMU_FORMAT_ATTR(eq, "config:36" ); /* v6 + */
4838
4839 PMU_FORMAT_ATTR(metrics_clear, "config1:0"); /* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */
4840
umask2_show(struct device * dev,struct device_attribute * attr,char * page)4841 static ssize_t umask2_show(struct device *dev,
4842 struct device_attribute *attr,
4843 char *page)
4844 {
4845 u64 mask = hybrid(dev_get_drvdata(dev), config_mask) & ARCH_PERFMON_EVENTSEL_UMASK2;
4846
4847 if (mask == ARCH_PERFMON_EVENTSEL_UMASK2)
4848 return sprintf(page, "config:8-15,40-47\n");
4849
4850 /* Roll back to the old format if umask2 is not supported. */
4851 return sprintf(page, "config:8-15\n");
4852 }
4853
4854 static struct device_attribute format_attr_umask2 =
4855 __ATTR(umask, 0444, umask2_show, NULL);
4856
4857 static struct attribute *format_evtsel_ext_attrs[] = {
4858 &format_attr_umask2.attr,
4859 &format_attr_eq.attr,
4860 &format_attr_metrics_clear.attr,
4861 NULL
4862 };
4863
4864 static umode_t
evtsel_ext_is_visible(struct kobject * kobj,struct attribute * attr,int i)4865 evtsel_ext_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4866 {
4867 struct device *dev = kobj_to_dev(kobj);
4868 u64 mask;
4869
4870 /*
4871 * The umask and umask2 have different formats but share the
4872 * same attr name. In update mode, the previous value of the
4873 * umask is unconditionally removed before is_visible. If
4874 * umask2 format is not enumerated, it's impossible to roll
4875 * back to the old format.
4876 * Does the check in umask2_show rather than is_visible.
4877 */
4878 if (i == 0)
4879 return attr->mode;
4880
4881 mask = hybrid(dev_get_drvdata(dev), config_mask);
4882 if (i == 1)
4883 return (mask & ARCH_PERFMON_EVENTSEL_EQ) ? attr->mode : 0;
4884
4885 /* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */
4886 if (i == 2) {
4887 union perf_capabilities intel_cap = hybrid(dev_get_drvdata(dev), intel_cap);
4888
4889 return intel_cap.rdpmc_metrics_clear ? attr->mode : 0;
4890 }
4891
4892 return 0;
4893 }
4894
4895 static struct attribute *intel_arch_formats_attr[] = {
4896 &format_attr_event.attr,
4897 &format_attr_umask.attr,
4898 &format_attr_edge.attr,
4899 &format_attr_pc.attr,
4900 &format_attr_inv.attr,
4901 &format_attr_cmask.attr,
4902 NULL,
4903 };
4904
intel_event_sysfs_show(char * page,u64 config)4905 ssize_t intel_event_sysfs_show(char *page, u64 config)
4906 {
4907 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
4908
4909 return x86_event_sysfs_show(page, config, event);
4910 }
4911
allocate_shared_regs(int cpu)4912 static struct intel_shared_regs *allocate_shared_regs(int cpu)
4913 {
4914 struct intel_shared_regs *regs;
4915 int i;
4916
4917 regs = kzalloc_node(sizeof(struct intel_shared_regs),
4918 GFP_KERNEL, cpu_to_node(cpu));
4919 if (regs) {
4920 /*
4921 * initialize the locks to keep lockdep happy
4922 */
4923 for (i = 0; i < EXTRA_REG_MAX; i++)
4924 raw_spin_lock_init(®s->regs[i].lock);
4925
4926 regs->core_id = -1;
4927 }
4928 return regs;
4929 }
4930
allocate_excl_cntrs(int cpu)4931 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4932 {
4933 struct intel_excl_cntrs *c;
4934
4935 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
4936 GFP_KERNEL, cpu_to_node(cpu));
4937 if (c) {
4938 raw_spin_lock_init(&c->lock);
4939 c->core_id = -1;
4940 }
4941 return c;
4942 }
4943
4944
intel_cpuc_prepare(struct cpu_hw_events * cpuc,int cpu)4945 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4946 {
4947 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
4948
4949 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4950 cpuc->shared_regs = allocate_shared_regs(cpu);
4951 if (!cpuc->shared_regs)
4952 goto err;
4953 }
4954
4955 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) {
4956 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4957
4958 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4959 if (!cpuc->constraint_list)
4960 goto err_shared_regs;
4961 }
4962
4963 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4964 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4965 if (!cpuc->excl_cntrs)
4966 goto err_constraint_list;
4967
4968 cpuc->excl_thread_id = 0;
4969 }
4970
4971 return 0;
4972
4973 err_constraint_list:
4974 kfree(cpuc->constraint_list);
4975 cpuc->constraint_list = NULL;
4976
4977 err_shared_regs:
4978 kfree(cpuc->shared_regs);
4979 cpuc->shared_regs = NULL;
4980
4981 err:
4982 return -ENOMEM;
4983 }
4984
intel_pmu_cpu_prepare(int cpu)4985 static int intel_pmu_cpu_prepare(int cpu)
4986 {
4987 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4988 }
4989
flip_smm_bit(void * data)4990 static void flip_smm_bit(void *data)
4991 {
4992 unsigned long set = *(unsigned long *)data;
4993
4994 if (set > 0) {
4995 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
4996 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4997 } else {
4998 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
4999 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
5000 }
5001 }
5002
intel_pmu_check_counters_mask(u64 * cntr_mask,u64 * fixed_cntr_mask,u64 * intel_ctrl)5003 static void intel_pmu_check_counters_mask(u64 *cntr_mask,
5004 u64 *fixed_cntr_mask,
5005 u64 *intel_ctrl)
5006 {
5007 unsigned int bit;
5008
5009 bit = fls64(*cntr_mask);
5010 if (bit > INTEL_PMC_MAX_GENERIC) {
5011 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5012 bit, INTEL_PMC_MAX_GENERIC);
5013 *cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
5014 }
5015 *intel_ctrl = *cntr_mask;
5016
5017 bit = fls64(*fixed_cntr_mask);
5018 if (bit > INTEL_PMC_MAX_FIXED) {
5019 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5020 bit, INTEL_PMC_MAX_FIXED);
5021 *fixed_cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
5022 }
5023
5024 *intel_ctrl |= *fixed_cntr_mask << INTEL_PMC_IDX_FIXED;
5025 }
5026
5027 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
5028 u64 cntr_mask,
5029 u64 fixed_cntr_mask,
5030 u64 intel_ctrl);
5031
5032 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
5033
intel_pmu_broken_perf_cap(void)5034 static inline bool intel_pmu_broken_perf_cap(void)
5035 {
5036 /* The Perf Metric (Bit 15) is always cleared */
5037 if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE ||
5038 boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L)
5039 return true;
5040
5041 return false;
5042 }
5043
update_pmu_cap(struct x86_hybrid_pmu * pmu)5044 static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
5045 {
5046 unsigned int cntr, fixed_cntr, ecx, edx;
5047 union cpuid35_eax eax;
5048 union cpuid35_ebx ebx;
5049
5050 cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx);
5051
5052 if (ebx.split.umask2)
5053 pmu->config_mask |= ARCH_PERFMON_EVENTSEL_UMASK2;
5054 if (ebx.split.eq)
5055 pmu->config_mask |= ARCH_PERFMON_EVENTSEL_EQ;
5056
5057 if (eax.split.cntr_subleaf) {
5058 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
5059 &cntr, &fixed_cntr, &ecx, &edx);
5060 pmu->cntr_mask64 = cntr;
5061 pmu->fixed_cntr_mask64 = fixed_cntr;
5062 }
5063
5064 if (!intel_pmu_broken_perf_cap()) {
5065 /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
5066 rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities);
5067 }
5068 }
5069
intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu * pmu)5070 static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
5071 {
5072 intel_pmu_check_counters_mask(&pmu->cntr_mask64, &pmu->fixed_cntr_mask64,
5073 &pmu->intel_ctrl);
5074 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
5075 pmu->unconstrained = (struct event_constraint)
5076 __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
5077 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
5078
5079 if (pmu->intel_cap.perf_metrics)
5080 pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
5081 else
5082 pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
5083
5084 intel_pmu_check_event_constraints(pmu->event_constraints,
5085 pmu->cntr_mask64,
5086 pmu->fixed_cntr_mask64,
5087 pmu->intel_ctrl);
5088
5089 intel_pmu_check_extra_regs(pmu->extra_regs);
5090 }
5091
find_hybrid_pmu_for_cpu(void)5092 static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
5093 {
5094 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
5095 enum intel_cpu_type cpu_type = c->topo.intel_type;
5096 int i;
5097
5098 /*
5099 * This is running on a CPU model that is known to have hybrid
5100 * configurations. But the CPU told us it is not hybrid, shame
5101 * on it. There should be a fixup function provided for these
5102 * troublesome CPUs (->get_hybrid_cpu_type).
5103 */
5104 if (cpu_type == INTEL_CPU_TYPE_UNKNOWN) {
5105 if (x86_pmu.get_hybrid_cpu_type)
5106 cpu_type = x86_pmu.get_hybrid_cpu_type();
5107 else
5108 return NULL;
5109 }
5110
5111 /*
5112 * This essentially just maps between the 'hybrid_cpu_type'
5113 * and 'hybrid_pmu_type' enums except for ARL-H processor
5114 * which needs to compare atom uarch native id since ARL-H
5115 * contains two different atom uarchs.
5116 */
5117 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
5118 enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type;
5119 u32 native_id;
5120
5121 if (cpu_type == INTEL_CPU_TYPE_CORE && pmu_type == hybrid_big)
5122 return &x86_pmu.hybrid_pmu[i];
5123 if (cpu_type == INTEL_CPU_TYPE_ATOM) {
5124 if (x86_pmu.num_hybrid_pmus == 2 && pmu_type == hybrid_small)
5125 return &x86_pmu.hybrid_pmu[i];
5126
5127 native_id = c->topo.intel_native_model_id;
5128 if (native_id == INTEL_ATOM_SKT_NATIVE_ID && pmu_type == hybrid_small)
5129 return &x86_pmu.hybrid_pmu[i];
5130 if (native_id == INTEL_ATOM_CMT_NATIVE_ID && pmu_type == hybrid_tiny)
5131 return &x86_pmu.hybrid_pmu[i];
5132 }
5133 }
5134
5135 return NULL;
5136 }
5137
init_hybrid_pmu(int cpu)5138 static bool init_hybrid_pmu(int cpu)
5139 {
5140 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5141 struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu();
5142
5143 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
5144 cpuc->pmu = NULL;
5145 return false;
5146 }
5147
5148 /* Only check and dump the PMU information for the first CPU */
5149 if (!cpumask_empty(&pmu->supported_cpus))
5150 goto end;
5151
5152 if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
5153 update_pmu_cap(pmu);
5154
5155 intel_pmu_check_hybrid_pmus(pmu);
5156
5157 if (!check_hw_exists(&pmu->pmu, pmu->cntr_mask, pmu->fixed_cntr_mask))
5158 return false;
5159
5160 pr_info("%s PMU driver: ", pmu->name);
5161
5162 pr_cont("\n");
5163
5164 x86_pmu_show_pmu_cap(&pmu->pmu);
5165
5166 end:
5167 cpumask_set_cpu(cpu, &pmu->supported_cpus);
5168 cpuc->pmu = &pmu->pmu;
5169
5170 return true;
5171 }
5172
intel_pmu_cpu_starting(int cpu)5173 static void intel_pmu_cpu_starting(int cpu)
5174 {
5175 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5176 int core_id = topology_core_id(cpu);
5177 int i;
5178
5179 if (is_hybrid() && !init_hybrid_pmu(cpu))
5180 return;
5181
5182 init_debug_store_on_cpu(cpu);
5183 /*
5184 * Deal with CPUs that don't clear their LBRs on power-up, and that may
5185 * even boot with LBRs enabled.
5186 */
5187 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr)
5188 msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT);
5189 intel_pmu_lbr_reset();
5190
5191 cpuc->lbr_sel = NULL;
5192
5193 if (x86_pmu.flags & PMU_FL_TFA) {
5194 WARN_ON_ONCE(cpuc->tfa_shadow);
5195 cpuc->tfa_shadow = ~0ULL;
5196 intel_set_tfa(cpuc, false);
5197 }
5198
5199 if (x86_pmu.version > 1)
5200 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
5201
5202 /*
5203 * Disable perf metrics if any added CPU doesn't support it.
5204 *
5205 * Turn off the check for a hybrid architecture, because the
5206 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
5207 * the architecture features. The perf metrics is a model-specific
5208 * feature for now. The corresponding bit should always be 0 on
5209 * a hybrid platform, e.g., Alder Lake.
5210 */
5211 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
5212 union perf_capabilities perf_cap;
5213
5214 rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
5215 if (!perf_cap.perf_metrics) {
5216 x86_pmu.intel_cap.perf_metrics = 0;
5217 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
5218 }
5219 }
5220
5221 if (!cpuc->shared_regs)
5222 return;
5223
5224 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
5225 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
5226 struct intel_shared_regs *pc;
5227
5228 pc = per_cpu(cpu_hw_events, i).shared_regs;
5229 if (pc && pc->core_id == core_id) {
5230 cpuc->kfree_on_online[0] = cpuc->shared_regs;
5231 cpuc->shared_regs = pc;
5232 break;
5233 }
5234 }
5235 cpuc->shared_regs->core_id = core_id;
5236 cpuc->shared_regs->refcnt++;
5237 }
5238
5239 if (x86_pmu.lbr_sel_map)
5240 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
5241
5242 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
5243 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
5244 struct cpu_hw_events *sibling;
5245 struct intel_excl_cntrs *c;
5246
5247 sibling = &per_cpu(cpu_hw_events, i);
5248 c = sibling->excl_cntrs;
5249 if (c && c->core_id == core_id) {
5250 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
5251 cpuc->excl_cntrs = c;
5252 if (!sibling->excl_thread_id)
5253 cpuc->excl_thread_id = 1;
5254 break;
5255 }
5256 }
5257 cpuc->excl_cntrs->core_id = core_id;
5258 cpuc->excl_cntrs->refcnt++;
5259 }
5260 }
5261
free_excl_cntrs(struct cpu_hw_events * cpuc)5262 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
5263 {
5264 struct intel_excl_cntrs *c;
5265
5266 c = cpuc->excl_cntrs;
5267 if (c) {
5268 if (c->core_id == -1 || --c->refcnt == 0)
5269 kfree(c);
5270 cpuc->excl_cntrs = NULL;
5271 }
5272
5273 kfree(cpuc->constraint_list);
5274 cpuc->constraint_list = NULL;
5275 }
5276
intel_pmu_cpu_dying(int cpu)5277 static void intel_pmu_cpu_dying(int cpu)
5278 {
5279 fini_debug_store_on_cpu(cpu);
5280 }
5281
intel_cpuc_finish(struct cpu_hw_events * cpuc)5282 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
5283 {
5284 struct intel_shared_regs *pc;
5285
5286 pc = cpuc->shared_regs;
5287 if (pc) {
5288 if (pc->core_id == -1 || --pc->refcnt == 0)
5289 kfree(pc);
5290 cpuc->shared_regs = NULL;
5291 }
5292
5293 free_excl_cntrs(cpuc);
5294 }
5295
intel_pmu_cpu_dead(int cpu)5296 static void intel_pmu_cpu_dead(int cpu)
5297 {
5298 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5299
5300 intel_cpuc_finish(cpuc);
5301
5302 if (is_hybrid() && cpuc->pmu)
5303 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
5304 }
5305
intel_pmu_sched_task(struct perf_event_pmu_context * pmu_ctx,struct task_struct * task,bool sched_in)5306 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
5307 struct task_struct *task, bool sched_in)
5308 {
5309 intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
5310 intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in);
5311 }
5312
intel_pmu_check_period(struct perf_event * event,u64 value)5313 static int intel_pmu_check_period(struct perf_event *event, u64 value)
5314 {
5315 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
5316 }
5317
intel_aux_output_init(void)5318 static void intel_aux_output_init(void)
5319 {
5320 /* Refer also intel_pmu_aux_output_match() */
5321 if (x86_pmu.intel_cap.pebs_output_pt_available)
5322 x86_pmu.assign = intel_pmu_assign_event;
5323 }
5324
intel_pmu_aux_output_match(struct perf_event * event)5325 static int intel_pmu_aux_output_match(struct perf_event *event)
5326 {
5327 /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */
5328 if (!x86_pmu.intel_cap.pebs_output_pt_available)
5329 return 0;
5330
5331 return is_intel_pt_event(event);
5332 }
5333
intel_pmu_filter(struct pmu * pmu,int cpu,bool * ret)5334 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
5335 {
5336 struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu);
5337
5338 *ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
5339 }
5340
5341 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
5342
5343 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
5344
5345 PMU_FORMAT_ATTR(frontend, "config1:0-23");
5346
5347 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63");
5348
5349 static struct attribute *intel_arch3_formats_attr[] = {
5350 &format_attr_event.attr,
5351 &format_attr_umask.attr,
5352 &format_attr_edge.attr,
5353 &format_attr_pc.attr,
5354 &format_attr_any.attr,
5355 &format_attr_inv.attr,
5356 &format_attr_cmask.attr,
5357 NULL,
5358 };
5359
5360 static struct attribute *hsw_format_attr[] = {
5361 &format_attr_in_tx.attr,
5362 &format_attr_in_tx_cp.attr,
5363 &format_attr_offcore_rsp.attr,
5364 &format_attr_ldlat.attr,
5365 NULL
5366 };
5367
5368 static struct attribute *nhm_format_attr[] = {
5369 &format_attr_offcore_rsp.attr,
5370 &format_attr_ldlat.attr,
5371 NULL
5372 };
5373
5374 static struct attribute *slm_format_attr[] = {
5375 &format_attr_offcore_rsp.attr,
5376 NULL
5377 };
5378
5379 static struct attribute *cmt_format_attr[] = {
5380 &format_attr_offcore_rsp.attr,
5381 &format_attr_ldlat.attr,
5382 &format_attr_snoop_rsp.attr,
5383 NULL
5384 };
5385
5386 static struct attribute *skl_format_attr[] = {
5387 &format_attr_frontend.attr,
5388 NULL,
5389 };
5390
5391 static __initconst const struct x86_pmu core_pmu = {
5392 .name = "core",
5393 .handle_irq = x86_pmu_handle_irq,
5394 .disable_all = x86_pmu_disable_all,
5395 .enable_all = core_pmu_enable_all,
5396 .enable = core_pmu_enable_event,
5397 .disable = x86_pmu_disable_event,
5398 .hw_config = core_pmu_hw_config,
5399 .schedule_events = x86_schedule_events,
5400 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
5401 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
5402 .fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0,
5403 .event_map = intel_pmu_event_map,
5404 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
5405 .apic = 1,
5406 .large_pebs_flags = LARGE_PEBS_FLAGS,
5407
5408 /*
5409 * Intel PMCs cannot be accessed sanely above 32-bit width,
5410 * so we install an artificial 1<<31 period regardless of
5411 * the generic event period:
5412 */
5413 .max_period = (1ULL<<31) - 1,
5414 .get_event_constraints = intel_get_event_constraints,
5415 .put_event_constraints = intel_put_event_constraints,
5416 .event_constraints = intel_core_event_constraints,
5417 .guest_get_msrs = core_guest_get_msrs,
5418 .format_attrs = intel_arch_formats_attr,
5419 .events_sysfs_show = intel_event_sysfs_show,
5420
5421 /*
5422 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
5423 * together with PMU version 1 and thus be using core_pmu with
5424 * shared_regs. We need following callbacks here to allocate
5425 * it properly.
5426 */
5427 .cpu_prepare = intel_pmu_cpu_prepare,
5428 .cpu_starting = intel_pmu_cpu_starting,
5429 .cpu_dying = intel_pmu_cpu_dying,
5430 .cpu_dead = intel_pmu_cpu_dead,
5431
5432 .check_period = intel_pmu_check_period,
5433
5434 .lbr_reset = intel_pmu_lbr_reset_64,
5435 .lbr_read = intel_pmu_lbr_read_64,
5436 .lbr_save = intel_pmu_lbr_save,
5437 .lbr_restore = intel_pmu_lbr_restore,
5438 };
5439
5440 static __initconst const struct x86_pmu intel_pmu = {
5441 .name = "Intel",
5442 .handle_irq = intel_pmu_handle_irq,
5443 .disable_all = intel_pmu_disable_all,
5444 .enable_all = intel_pmu_enable_all,
5445 .enable = intel_pmu_enable_event,
5446 .disable = intel_pmu_disable_event,
5447 .add = intel_pmu_add_event,
5448 .del = intel_pmu_del_event,
5449 .read = intel_pmu_read_event,
5450 .set_period = intel_pmu_set_period,
5451 .update = intel_pmu_update,
5452 .hw_config = intel_pmu_hw_config,
5453 .schedule_events = x86_schedule_events,
5454 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
5455 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
5456 .fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0,
5457 .event_map = intel_pmu_event_map,
5458 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
5459 .apic = 1,
5460 .large_pebs_flags = LARGE_PEBS_FLAGS,
5461 /*
5462 * Intel PMCs cannot be accessed sanely above 32 bit width,
5463 * so we install an artificial 1<<31 period regardless of
5464 * the generic event period:
5465 */
5466 .max_period = (1ULL << 31) - 1,
5467 .get_event_constraints = intel_get_event_constraints,
5468 .put_event_constraints = intel_put_event_constraints,
5469 .pebs_aliases = intel_pebs_aliases_core2,
5470
5471 .format_attrs = intel_arch3_formats_attr,
5472 .events_sysfs_show = intel_event_sysfs_show,
5473
5474 .cpu_prepare = intel_pmu_cpu_prepare,
5475 .cpu_starting = intel_pmu_cpu_starting,
5476 .cpu_dying = intel_pmu_cpu_dying,
5477 .cpu_dead = intel_pmu_cpu_dead,
5478
5479 .guest_get_msrs = intel_guest_get_msrs,
5480 .sched_task = intel_pmu_sched_task,
5481
5482 .check_period = intel_pmu_check_period,
5483
5484 .aux_output_match = intel_pmu_aux_output_match,
5485
5486 .lbr_reset = intel_pmu_lbr_reset_64,
5487 .lbr_read = intel_pmu_lbr_read_64,
5488 .lbr_save = intel_pmu_lbr_save,
5489 .lbr_restore = intel_pmu_lbr_restore,
5490
5491 /*
5492 * SMM has access to all 4 rings and while traditionally SMM code only
5493 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
5494 *
5495 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
5496 * between SMM or not, this results in what should be pure userspace
5497 * counters including SMM data.
5498 *
5499 * This is a clear privilege issue, therefore globally disable
5500 * counting SMM by default.
5501 */
5502 .attr_freeze_on_smi = 1,
5503 };
5504
intel_clovertown_quirk(void)5505 static __init void intel_clovertown_quirk(void)
5506 {
5507 /*
5508 * PEBS is unreliable due to:
5509 *
5510 * AJ67 - PEBS may experience CPL leaks
5511 * AJ68 - PEBS PMI may be delayed by one event
5512 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
5513 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
5514 *
5515 * AJ67 could be worked around by restricting the OS/USR flags.
5516 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
5517 *
5518 * AJ106 could possibly be worked around by not allowing LBR
5519 * usage from PEBS, including the fixup.
5520 * AJ68 could possibly be worked around by always programming
5521 * a pebs_event_reset[0] value and coping with the lost events.
5522 *
5523 * But taken together it might just make sense to not enable PEBS on
5524 * these chips.
5525 */
5526 pr_warn("PEBS disabled due to CPU errata\n");
5527 x86_pmu.pebs = 0;
5528 x86_pmu.pebs_constraints = NULL;
5529 }
5530
5531 static const struct x86_cpu_id isolation_ucodes[] = {
5532 X86_MATCH_VFM_STEPS(INTEL_HASWELL, 3, 3, 0x0000001f),
5533 X86_MATCH_VFM_STEPS(INTEL_HASWELL_L, 1, 1, 0x0000001e),
5534 X86_MATCH_VFM_STEPS(INTEL_HASWELL_G, 1, 1, 0x00000015),
5535 X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 2, 2, 0x00000037),
5536 X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 4, 4, 0x0000000a),
5537 X86_MATCH_VFM_STEPS(INTEL_BROADWELL, 4, 4, 0x00000023),
5538 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_G, 1, 1, 0x00000014),
5539 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 2, 2, 0x00000010),
5540 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 3, 3, 0x07000009),
5541 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 4, 4, 0x0f000009),
5542 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 5, 5, 0x0e000002),
5543 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_X, 1, 1, 0x0b000014),
5544 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 3, 3, 0x00000021),
5545 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 4, 7, 0x00000000),
5546 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 11, 11, 0x00000000),
5547 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_L, 3, 3, 0x0000007c),
5548 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE, 3, 3, 0x0000007c),
5549 X86_MATCH_VFM_STEPS(INTEL_KABYLAKE, 9, 13, 0x0000004e),
5550 X86_MATCH_VFM_STEPS(INTEL_KABYLAKE_L, 9, 12, 0x0000004e),
5551 {}
5552 };
5553
intel_check_pebs_isolation(void)5554 static void intel_check_pebs_isolation(void)
5555 {
5556 x86_pmu.pebs_no_isolation = !x86_match_min_microcode_rev(isolation_ucodes);
5557 }
5558
intel_pebs_isolation_quirk(void)5559 static __init void intel_pebs_isolation_quirk(void)
5560 {
5561 WARN_ON_ONCE(x86_pmu.check_microcode);
5562 x86_pmu.check_microcode = intel_check_pebs_isolation;
5563 intel_check_pebs_isolation();
5564 }
5565
5566 static const struct x86_cpu_id pebs_ucodes[] = {
5567 X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE, 7, 7, 0x00000028),
5568 X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X, 6, 6, 0x00000618),
5569 X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X, 7, 7, 0x0000070c),
5570 {}
5571 };
5572
intel_snb_pebs_broken(void)5573 static bool intel_snb_pebs_broken(void)
5574 {
5575 return !x86_match_min_microcode_rev(pebs_ucodes);
5576 }
5577
intel_snb_check_microcode(void)5578 static void intel_snb_check_microcode(void)
5579 {
5580 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
5581 return;
5582
5583 /*
5584 * Serialized by the microcode lock..
5585 */
5586 if (x86_pmu.pebs_broken) {
5587 pr_info("PEBS enabled due to microcode update\n");
5588 x86_pmu.pebs_broken = 0;
5589 } else {
5590 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
5591 x86_pmu.pebs_broken = 1;
5592 }
5593 }
5594
is_lbr_from(unsigned long msr)5595 static bool is_lbr_from(unsigned long msr)
5596 {
5597 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
5598
5599 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
5600 }
5601
5602 /*
5603 * Under certain circumstances, access certain MSR may cause #GP.
5604 * The function tests if the input MSR can be safely accessed.
5605 */
check_msr(unsigned long msr,u64 mask)5606 static bool check_msr(unsigned long msr, u64 mask)
5607 {
5608 u64 val_old, val_new, val_tmp;
5609
5610 /*
5611 * Disable the check for real HW, so we don't
5612 * mess with potentially enabled registers:
5613 */
5614 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
5615 return true;
5616
5617 /*
5618 * Read the current value, change it and read it back to see if it
5619 * matches, this is needed to detect certain hardware emulators
5620 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
5621 */
5622 if (rdmsrl_safe(msr, &val_old))
5623 return false;
5624
5625 /*
5626 * Only change the bits which can be updated by wrmsrl.
5627 */
5628 val_tmp = val_old ^ mask;
5629
5630 if (is_lbr_from(msr))
5631 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
5632
5633 if (wrmsrl_safe(msr, val_tmp) ||
5634 rdmsrl_safe(msr, &val_new))
5635 return false;
5636
5637 /*
5638 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
5639 * should equal rdmsrl()'s even with the quirk.
5640 */
5641 if (val_new != val_tmp)
5642 return false;
5643
5644 if (is_lbr_from(msr))
5645 val_old = lbr_from_signext_quirk_wr(val_old);
5646
5647 /* Here it's sure that the MSR can be safely accessed.
5648 * Restore the old value and return.
5649 */
5650 wrmsrl(msr, val_old);
5651
5652 return true;
5653 }
5654
intel_sandybridge_quirk(void)5655 static __init void intel_sandybridge_quirk(void)
5656 {
5657 x86_pmu.check_microcode = intel_snb_check_microcode;
5658 cpus_read_lock();
5659 intel_snb_check_microcode();
5660 cpus_read_unlock();
5661 }
5662
5663 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
5664 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
5665 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
5666 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
5667 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
5668 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
5669 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
5670 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
5671 };
5672
intel_arch_events_quirk(void)5673 static __init void intel_arch_events_quirk(void)
5674 {
5675 int bit;
5676
5677 /* disable event that reported as not present by cpuid */
5678 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
5679 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
5680 pr_warn("CPUID marked event: \'%s\' unavailable\n",
5681 intel_arch_events_map[bit].name);
5682 }
5683 }
5684
intel_nehalem_quirk(void)5685 static __init void intel_nehalem_quirk(void)
5686 {
5687 union cpuid10_ebx ebx;
5688
5689 ebx.full = x86_pmu.events_maskl;
5690 if (ebx.split.no_branch_misses_retired) {
5691 /*
5692 * Erratum AAJ80 detected, we work it around by using
5693 * the BR_MISP_EXEC.ANY event. This will over-count
5694 * branch-misses, but it's still much better than the
5695 * architectural event which is often completely bogus:
5696 */
5697 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
5698 ebx.split.no_branch_misses_retired = 0;
5699 x86_pmu.events_maskl = ebx.full;
5700 pr_info("CPU erratum AAJ80 worked around\n");
5701 }
5702 }
5703
5704 /*
5705 * enable software workaround for errata:
5706 * SNB: BJ122
5707 * IVB: BV98
5708 * HSW: HSD29
5709 *
5710 * Only needed when HT is enabled. However detecting
5711 * if HT is enabled is difficult (model specific). So instead,
5712 * we enable the workaround in the early boot, and verify if
5713 * it is needed in a later initcall phase once we have valid
5714 * topology information to check if HT is actually enabled
5715 */
intel_ht_bug(void)5716 static __init void intel_ht_bug(void)
5717 {
5718 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
5719
5720 x86_pmu.start_scheduling = intel_start_scheduling;
5721 x86_pmu.commit_scheduling = intel_commit_scheduling;
5722 x86_pmu.stop_scheduling = intel_stop_scheduling;
5723 }
5724
5725 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
5726 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
5727
5728 /* Haswell special events */
5729 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
5730 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
5731 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
5732 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
5733 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
5734 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
5735 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
5736 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
5737 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
5738 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
5739 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
5740 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
5741
5742 static struct attribute *hsw_events_attrs[] = {
5743 EVENT_PTR(td_slots_issued),
5744 EVENT_PTR(td_slots_retired),
5745 EVENT_PTR(td_fetch_bubbles),
5746 EVENT_PTR(td_total_slots),
5747 EVENT_PTR(td_total_slots_scale),
5748 EVENT_PTR(td_recovery_bubbles),
5749 EVENT_PTR(td_recovery_bubbles_scale),
5750 NULL
5751 };
5752
5753 static struct attribute *hsw_mem_events_attrs[] = {
5754 EVENT_PTR(mem_ld_hsw),
5755 EVENT_PTR(mem_st_hsw),
5756 NULL,
5757 };
5758
5759 static struct attribute *hsw_tsx_events_attrs[] = {
5760 EVENT_PTR(tx_start),
5761 EVENT_PTR(tx_commit),
5762 EVENT_PTR(tx_abort),
5763 EVENT_PTR(tx_capacity),
5764 EVENT_PTR(tx_conflict),
5765 EVENT_PTR(el_start),
5766 EVENT_PTR(el_commit),
5767 EVENT_PTR(el_abort),
5768 EVENT_PTR(el_capacity),
5769 EVENT_PTR(el_conflict),
5770 EVENT_PTR(cycles_t),
5771 EVENT_PTR(cycles_ct),
5772 NULL
5773 };
5774
5775 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
5776 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
5777 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
5778 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
5779
5780 static struct attribute *icl_events_attrs[] = {
5781 EVENT_PTR(mem_ld_hsw),
5782 EVENT_PTR(mem_st_hsw),
5783 NULL,
5784 };
5785
5786 static struct attribute *icl_td_events_attrs[] = {
5787 EVENT_PTR(slots),
5788 EVENT_PTR(td_retiring),
5789 EVENT_PTR(td_bad_spec),
5790 EVENT_PTR(td_fe_bound),
5791 EVENT_PTR(td_be_bound),
5792 NULL,
5793 };
5794
5795 static struct attribute *icl_tsx_events_attrs[] = {
5796 EVENT_PTR(tx_start),
5797 EVENT_PTR(tx_abort),
5798 EVENT_PTR(tx_commit),
5799 EVENT_PTR(tx_capacity_read),
5800 EVENT_PTR(tx_capacity_write),
5801 EVENT_PTR(tx_conflict),
5802 EVENT_PTR(el_start),
5803 EVENT_PTR(el_abort),
5804 EVENT_PTR(el_commit),
5805 EVENT_PTR(el_capacity_read),
5806 EVENT_PTR(el_capacity_write),
5807 EVENT_PTR(el_conflict),
5808 EVENT_PTR(cycles_t),
5809 EVENT_PTR(cycles_ct),
5810 NULL,
5811 };
5812
5813
5814 EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2");
5815 EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82");
5816
5817 static struct attribute *glc_events_attrs[] = {
5818 EVENT_PTR(mem_ld_hsw),
5819 EVENT_PTR(mem_st_spr),
5820 EVENT_PTR(mem_ld_aux),
5821 NULL,
5822 };
5823
5824 static struct attribute *glc_td_events_attrs[] = {
5825 EVENT_PTR(slots),
5826 EVENT_PTR(td_retiring),
5827 EVENT_PTR(td_bad_spec),
5828 EVENT_PTR(td_fe_bound),
5829 EVENT_PTR(td_be_bound),
5830 EVENT_PTR(td_heavy_ops),
5831 EVENT_PTR(td_br_mispredict),
5832 EVENT_PTR(td_fetch_lat),
5833 EVENT_PTR(td_mem_bound),
5834 NULL,
5835 };
5836
5837 static struct attribute *glc_tsx_events_attrs[] = {
5838 EVENT_PTR(tx_start),
5839 EVENT_PTR(tx_abort),
5840 EVENT_PTR(tx_commit),
5841 EVENT_PTR(tx_capacity_read),
5842 EVENT_PTR(tx_capacity_write),
5843 EVENT_PTR(tx_conflict),
5844 EVENT_PTR(cycles_t),
5845 EVENT_PTR(cycles_ct),
5846 NULL,
5847 };
5848
freeze_on_smi_show(struct device * cdev,struct device_attribute * attr,char * buf)5849 static ssize_t freeze_on_smi_show(struct device *cdev,
5850 struct device_attribute *attr,
5851 char *buf)
5852 {
5853 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
5854 }
5855
5856 static DEFINE_MUTEX(freeze_on_smi_mutex);
5857
freeze_on_smi_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)5858 static ssize_t freeze_on_smi_store(struct device *cdev,
5859 struct device_attribute *attr,
5860 const char *buf, size_t count)
5861 {
5862 unsigned long val;
5863 ssize_t ret;
5864
5865 ret = kstrtoul(buf, 0, &val);
5866 if (ret)
5867 return ret;
5868
5869 if (val > 1)
5870 return -EINVAL;
5871
5872 mutex_lock(&freeze_on_smi_mutex);
5873
5874 if (x86_pmu.attr_freeze_on_smi == val)
5875 goto done;
5876
5877 x86_pmu.attr_freeze_on_smi = val;
5878
5879 cpus_read_lock();
5880 on_each_cpu(flip_smm_bit, &val, 1);
5881 cpus_read_unlock();
5882 done:
5883 mutex_unlock(&freeze_on_smi_mutex);
5884
5885 return count;
5886 }
5887
update_tfa_sched(void * ignored)5888 static void update_tfa_sched(void *ignored)
5889 {
5890 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5891
5892 /*
5893 * check if PMC3 is used
5894 * and if so force schedule out for all event types all contexts
5895 */
5896 if (test_bit(3, cpuc->active_mask))
5897 perf_pmu_resched(x86_get_pmu(smp_processor_id()));
5898 }
5899
show_sysctl_tfa(struct device * cdev,struct device_attribute * attr,char * buf)5900 static ssize_t show_sysctl_tfa(struct device *cdev,
5901 struct device_attribute *attr,
5902 char *buf)
5903 {
5904 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
5905 }
5906
set_sysctl_tfa(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)5907 static ssize_t set_sysctl_tfa(struct device *cdev,
5908 struct device_attribute *attr,
5909 const char *buf, size_t count)
5910 {
5911 bool val;
5912 ssize_t ret;
5913
5914 ret = kstrtobool(buf, &val);
5915 if (ret)
5916 return ret;
5917
5918 /* no change */
5919 if (val == allow_tsx_force_abort)
5920 return count;
5921
5922 allow_tsx_force_abort = val;
5923
5924 cpus_read_lock();
5925 on_each_cpu(update_tfa_sched, NULL, 1);
5926 cpus_read_unlock();
5927
5928 return count;
5929 }
5930
5931
5932 static DEVICE_ATTR_RW(freeze_on_smi);
5933
branches_show(struct device * cdev,struct device_attribute * attr,char * buf)5934 static ssize_t branches_show(struct device *cdev,
5935 struct device_attribute *attr,
5936 char *buf)
5937 {
5938 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
5939 }
5940
5941 static DEVICE_ATTR_RO(branches);
5942
branch_counter_nr_show(struct device * cdev,struct device_attribute * attr,char * buf)5943 static ssize_t branch_counter_nr_show(struct device *cdev,
5944 struct device_attribute *attr,
5945 char *buf)
5946 {
5947 return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters));
5948 }
5949
5950 static DEVICE_ATTR_RO(branch_counter_nr);
5951
branch_counter_width_show(struct device * cdev,struct device_attribute * attr,char * buf)5952 static ssize_t branch_counter_width_show(struct device *cdev,
5953 struct device_attribute *attr,
5954 char *buf)
5955 {
5956 return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS);
5957 }
5958
5959 static DEVICE_ATTR_RO(branch_counter_width);
5960
5961 static struct attribute *lbr_attrs[] = {
5962 &dev_attr_branches.attr,
5963 &dev_attr_branch_counter_nr.attr,
5964 &dev_attr_branch_counter_width.attr,
5965 NULL
5966 };
5967
5968 static umode_t
lbr_is_visible(struct kobject * kobj,struct attribute * attr,int i)5969 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5970 {
5971 /* branches */
5972 if (i == 0)
5973 return x86_pmu.lbr_nr ? attr->mode : 0;
5974
5975 return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0;
5976 }
5977
5978 static char pmu_name_str[30];
5979
5980 static DEVICE_STRING_ATTR_RO(pmu_name, 0444, pmu_name_str);
5981
5982 static struct attribute *intel_pmu_caps_attrs[] = {
5983 &dev_attr_pmu_name.attr.attr,
5984 NULL
5985 };
5986
5987 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
5988 show_sysctl_tfa,
5989 set_sysctl_tfa);
5990
5991 static struct attribute *intel_pmu_attrs[] = {
5992 &dev_attr_freeze_on_smi.attr,
5993 &dev_attr_allow_tsx_force_abort.attr,
5994 NULL,
5995 };
5996
5997 static umode_t
default_is_visible(struct kobject * kobj,struct attribute * attr,int i)5998 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5999 {
6000 if (attr == &dev_attr_allow_tsx_force_abort.attr)
6001 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
6002
6003 return attr->mode;
6004 }
6005
6006 static umode_t
tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)6007 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6008 {
6009 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
6010 }
6011
6012 static umode_t
pebs_is_visible(struct kobject * kobj,struct attribute * attr,int i)6013 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6014 {
6015 return x86_pmu.pebs ? attr->mode : 0;
6016 }
6017
6018 static umode_t
mem_is_visible(struct kobject * kobj,struct attribute * attr,int i)6019 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6020 {
6021 if (attr == &event_attr_mem_ld_aux.attr.attr)
6022 return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0;
6023
6024 return pebs_is_visible(kobj, attr, i);
6025 }
6026
6027 static umode_t
exra_is_visible(struct kobject * kobj,struct attribute * attr,int i)6028 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6029 {
6030 return x86_pmu.version >= 2 ? attr->mode : 0;
6031 }
6032
6033 static umode_t
td_is_visible(struct kobject * kobj,struct attribute * attr,int i)6034 td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6035 {
6036 /*
6037 * Hide the perf metrics topdown events
6038 * if the feature is not enumerated.
6039 */
6040 if (x86_pmu.num_topdown_events)
6041 return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0;
6042
6043 return attr->mode;
6044 }
6045
6046 static struct attribute_group group_events_td = {
6047 .name = "events",
6048 .is_visible = td_is_visible,
6049 };
6050
6051 static struct attribute_group group_events_mem = {
6052 .name = "events",
6053 .is_visible = mem_is_visible,
6054 };
6055
6056 static struct attribute_group group_events_tsx = {
6057 .name = "events",
6058 .is_visible = tsx_is_visible,
6059 };
6060
6061 static struct attribute_group group_caps_gen = {
6062 .name = "caps",
6063 .attrs = intel_pmu_caps_attrs,
6064 };
6065
6066 static struct attribute_group group_caps_lbr = {
6067 .name = "caps",
6068 .attrs = lbr_attrs,
6069 .is_visible = lbr_is_visible,
6070 };
6071
6072 static struct attribute_group group_format_extra = {
6073 .name = "format",
6074 .is_visible = exra_is_visible,
6075 };
6076
6077 static struct attribute_group group_format_extra_skl = {
6078 .name = "format",
6079 .is_visible = exra_is_visible,
6080 };
6081
6082 static struct attribute_group group_format_evtsel_ext = {
6083 .name = "format",
6084 .attrs = format_evtsel_ext_attrs,
6085 .is_visible = evtsel_ext_is_visible,
6086 };
6087
6088 static struct attribute_group group_default = {
6089 .attrs = intel_pmu_attrs,
6090 .is_visible = default_is_visible,
6091 };
6092
6093 static const struct attribute_group *attr_update[] = {
6094 &group_events_td,
6095 &group_events_mem,
6096 &group_events_tsx,
6097 &group_caps_gen,
6098 &group_caps_lbr,
6099 &group_format_extra,
6100 &group_format_extra_skl,
6101 &group_format_evtsel_ext,
6102 &group_default,
6103 NULL,
6104 };
6105
6106 EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big);
6107 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
6108 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
6109 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
6110 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
6111 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big);
6112 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big);
6113 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big);
6114 EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big);
6115
6116 static struct attribute *adl_hybrid_events_attrs[] = {
6117 EVENT_PTR(slots_adl),
6118 EVENT_PTR(td_retiring_adl),
6119 EVENT_PTR(td_bad_spec_adl),
6120 EVENT_PTR(td_fe_bound_adl),
6121 EVENT_PTR(td_be_bound_adl),
6122 EVENT_PTR(td_heavy_ops_adl),
6123 EVENT_PTR(td_br_mis_adl),
6124 EVENT_PTR(td_fetch_lat_adl),
6125 EVENT_PTR(td_mem_bound_adl),
6126 NULL,
6127 };
6128
6129 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_lnl, "event=0xc2,umask=0x02;event=0x00,umask=0x80", hybrid_big_small);
6130 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_lnl, "event=0x9c,umask=0x01;event=0x00,umask=0x82", hybrid_big_small);
6131 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_lnl, "event=0xa4,umask=0x02;event=0x00,umask=0x83", hybrid_big_small);
6132
6133 static struct attribute *lnl_hybrid_events_attrs[] = {
6134 EVENT_PTR(slots_adl),
6135 EVENT_PTR(td_retiring_lnl),
6136 EVENT_PTR(td_bad_spec_adl),
6137 EVENT_PTR(td_fe_bound_lnl),
6138 EVENT_PTR(td_be_bound_lnl),
6139 EVENT_PTR(td_heavy_ops_adl),
6140 EVENT_PTR(td_br_mis_adl),
6141 EVENT_PTR(td_fetch_lat_adl),
6142 EVENT_PTR(td_mem_bound_adl),
6143 NULL
6144 };
6145
6146 /* The event string must be in PMU IDX order. */
6147 EVENT_ATTR_STR_HYBRID(topdown-retiring,
6148 td_retiring_arl_h,
6149 "event=0xc2,umask=0x02;event=0x00,umask=0x80;event=0xc2,umask=0x0",
6150 hybrid_big_small_tiny);
6151 EVENT_ATTR_STR_HYBRID(topdown-bad-spec,
6152 td_bad_spec_arl_h,
6153 "event=0x73,umask=0x0;event=0x00,umask=0x81;event=0x73,umask=0x0",
6154 hybrid_big_small_tiny);
6155 EVENT_ATTR_STR_HYBRID(topdown-fe-bound,
6156 td_fe_bound_arl_h,
6157 "event=0x9c,umask=0x01;event=0x00,umask=0x82;event=0x71,umask=0x0",
6158 hybrid_big_small_tiny);
6159 EVENT_ATTR_STR_HYBRID(topdown-be-bound,
6160 td_be_bound_arl_h,
6161 "event=0xa4,umask=0x02;event=0x00,umask=0x83;event=0x74,umask=0x0",
6162 hybrid_big_small_tiny);
6163
6164 static struct attribute *arl_h_hybrid_events_attrs[] = {
6165 EVENT_PTR(slots_adl),
6166 EVENT_PTR(td_retiring_arl_h),
6167 EVENT_PTR(td_bad_spec_arl_h),
6168 EVENT_PTR(td_fe_bound_arl_h),
6169 EVENT_PTR(td_be_bound_arl_h),
6170 EVENT_PTR(td_heavy_ops_adl),
6171 EVENT_PTR(td_br_mis_adl),
6172 EVENT_PTR(td_fetch_lat_adl),
6173 EVENT_PTR(td_mem_bound_adl),
6174 NULL,
6175 };
6176
6177 /* Must be in IDX order */
6178 EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
6179 EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
6180 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big);
6181
6182 static struct attribute *adl_hybrid_mem_attrs[] = {
6183 EVENT_PTR(mem_ld_adl),
6184 EVENT_PTR(mem_st_adl),
6185 EVENT_PTR(mem_ld_aux_adl),
6186 NULL,
6187 };
6188
6189 static struct attribute *mtl_hybrid_mem_attrs[] = {
6190 EVENT_PTR(mem_ld_adl),
6191 EVENT_PTR(mem_st_adl),
6192 NULL
6193 };
6194
6195 EVENT_ATTR_STR_HYBRID(mem-loads,
6196 mem_ld_arl_h,
6197 "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3;event=0xd0,umask=0x5,ldlat=3",
6198 hybrid_big_small_tiny);
6199 EVENT_ATTR_STR_HYBRID(mem-stores,
6200 mem_st_arl_h,
6201 "event=0xd0,umask=0x6;event=0xcd,umask=0x2;event=0xd0,umask=0x6",
6202 hybrid_big_small_tiny);
6203
6204 static struct attribute *arl_h_hybrid_mem_attrs[] = {
6205 EVENT_PTR(mem_ld_arl_h),
6206 EVENT_PTR(mem_st_arl_h),
6207 NULL,
6208 };
6209
6210 EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big);
6211 EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big);
6212 EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big);
6213 EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big);
6214 EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big);
6215 EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
6216 EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big);
6217 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big);
6218
6219 static struct attribute *adl_hybrid_tsx_attrs[] = {
6220 EVENT_PTR(tx_start_adl),
6221 EVENT_PTR(tx_abort_adl),
6222 EVENT_PTR(tx_commit_adl),
6223 EVENT_PTR(tx_capacity_read_adl),
6224 EVENT_PTR(tx_capacity_write_adl),
6225 EVENT_PTR(tx_conflict_adl),
6226 EVENT_PTR(cycles_t_adl),
6227 EVENT_PTR(cycles_ct_adl),
6228 NULL,
6229 };
6230
6231 FORMAT_ATTR_HYBRID(in_tx, hybrid_big);
6232 FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big);
6233 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small_tiny);
6234 FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small_tiny);
6235 FORMAT_ATTR_HYBRID(frontend, hybrid_big);
6236
6237 #define ADL_HYBRID_RTM_FORMAT_ATTR \
6238 FORMAT_HYBRID_PTR(in_tx), \
6239 FORMAT_HYBRID_PTR(in_tx_cp)
6240
6241 #define ADL_HYBRID_FORMAT_ATTR \
6242 FORMAT_HYBRID_PTR(offcore_rsp), \
6243 FORMAT_HYBRID_PTR(ldlat), \
6244 FORMAT_HYBRID_PTR(frontend)
6245
6246 static struct attribute *adl_hybrid_extra_attr_rtm[] = {
6247 ADL_HYBRID_RTM_FORMAT_ATTR,
6248 ADL_HYBRID_FORMAT_ATTR,
6249 NULL
6250 };
6251
6252 static struct attribute *adl_hybrid_extra_attr[] = {
6253 ADL_HYBRID_FORMAT_ATTR,
6254 NULL
6255 };
6256
6257 FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small_tiny);
6258
6259 static struct attribute *mtl_hybrid_extra_attr_rtm[] = {
6260 ADL_HYBRID_RTM_FORMAT_ATTR,
6261 ADL_HYBRID_FORMAT_ATTR,
6262 FORMAT_HYBRID_PTR(snoop_rsp),
6263 NULL
6264 };
6265
6266 static struct attribute *mtl_hybrid_extra_attr[] = {
6267 ADL_HYBRID_FORMAT_ATTR,
6268 FORMAT_HYBRID_PTR(snoop_rsp),
6269 NULL
6270 };
6271
is_attr_for_this_pmu(struct kobject * kobj,struct attribute * attr)6272 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
6273 {
6274 struct device *dev = kobj_to_dev(kobj);
6275 struct x86_hybrid_pmu *pmu =
6276 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6277 struct perf_pmu_events_hybrid_attr *pmu_attr =
6278 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
6279
6280 return pmu->pmu_type & pmu_attr->pmu_type;
6281 }
6282
hybrid_events_is_visible(struct kobject * kobj,struct attribute * attr,int i)6283 static umode_t hybrid_events_is_visible(struct kobject *kobj,
6284 struct attribute *attr, int i)
6285 {
6286 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
6287 }
6288
hybrid_find_supported_cpu(struct x86_hybrid_pmu * pmu)6289 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
6290 {
6291 int cpu = cpumask_first(&pmu->supported_cpus);
6292
6293 return (cpu >= nr_cpu_ids) ? -1 : cpu;
6294 }
6295
hybrid_tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)6296 static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
6297 struct attribute *attr, int i)
6298 {
6299 struct device *dev = kobj_to_dev(kobj);
6300 struct x86_hybrid_pmu *pmu =
6301 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6302 int cpu = hybrid_find_supported_cpu(pmu);
6303
6304 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
6305 }
6306
hybrid_format_is_visible(struct kobject * kobj,struct attribute * attr,int i)6307 static umode_t hybrid_format_is_visible(struct kobject *kobj,
6308 struct attribute *attr, int i)
6309 {
6310 struct device *dev = kobj_to_dev(kobj);
6311 struct x86_hybrid_pmu *pmu =
6312 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6313 struct perf_pmu_format_hybrid_attr *pmu_attr =
6314 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
6315 int cpu = hybrid_find_supported_cpu(pmu);
6316
6317 return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0;
6318 }
6319
hybrid_td_is_visible(struct kobject * kobj,struct attribute * attr,int i)6320 static umode_t hybrid_td_is_visible(struct kobject *kobj,
6321 struct attribute *attr, int i)
6322 {
6323 struct device *dev = kobj_to_dev(kobj);
6324 struct x86_hybrid_pmu *pmu =
6325 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6326
6327 if (!is_attr_for_this_pmu(kobj, attr))
6328 return 0;
6329
6330
6331 /* Only the big core supports perf metrics */
6332 if (pmu->pmu_type == hybrid_big)
6333 return pmu->intel_cap.perf_metrics ? attr->mode : 0;
6334
6335 return attr->mode;
6336 }
6337
6338 static struct attribute_group hybrid_group_events_td = {
6339 .name = "events",
6340 .is_visible = hybrid_td_is_visible,
6341 };
6342
6343 static struct attribute_group hybrid_group_events_mem = {
6344 .name = "events",
6345 .is_visible = hybrid_events_is_visible,
6346 };
6347
6348 static struct attribute_group hybrid_group_events_tsx = {
6349 .name = "events",
6350 .is_visible = hybrid_tsx_is_visible,
6351 };
6352
6353 static struct attribute_group hybrid_group_format_extra = {
6354 .name = "format",
6355 .is_visible = hybrid_format_is_visible,
6356 };
6357
intel_hybrid_get_attr_cpus(struct device * dev,struct device_attribute * attr,char * buf)6358 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
6359 struct device_attribute *attr,
6360 char *buf)
6361 {
6362 struct x86_hybrid_pmu *pmu =
6363 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6364
6365 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
6366 }
6367
6368 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
6369 static struct attribute *intel_hybrid_cpus_attrs[] = {
6370 &dev_attr_cpus.attr,
6371 NULL,
6372 };
6373
6374 static struct attribute_group hybrid_group_cpus = {
6375 .attrs = intel_hybrid_cpus_attrs,
6376 };
6377
6378 static const struct attribute_group *hybrid_attr_update[] = {
6379 &hybrid_group_events_td,
6380 &hybrid_group_events_mem,
6381 &hybrid_group_events_tsx,
6382 &group_caps_gen,
6383 &group_caps_lbr,
6384 &hybrid_group_format_extra,
6385 &group_format_evtsel_ext,
6386 &group_default,
6387 &hybrid_group_cpus,
6388 NULL,
6389 };
6390
6391 static struct attribute *empty_attrs;
6392
intel_pmu_check_event_constraints(struct event_constraint * event_constraints,u64 cntr_mask,u64 fixed_cntr_mask,u64 intel_ctrl)6393 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
6394 u64 cntr_mask,
6395 u64 fixed_cntr_mask,
6396 u64 intel_ctrl)
6397 {
6398 struct event_constraint *c;
6399
6400 if (!event_constraints)
6401 return;
6402
6403 /*
6404 * event on fixed counter2 (REF_CYCLES) only works on this
6405 * counter, so do not extend mask to generic counters
6406 */
6407 for_each_event_constraint(c, event_constraints) {
6408 /*
6409 * Don't extend the topdown slots and metrics
6410 * events to the generic counters.
6411 */
6412 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
6413 /*
6414 * Disable topdown slots and metrics events,
6415 * if slots event is not in CPUID.
6416 */
6417 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
6418 c->idxmsk64 = 0;
6419 c->weight = hweight64(c->idxmsk64);
6420 continue;
6421 }
6422
6423 if (c->cmask == FIXED_EVENT_FLAGS) {
6424 /* Disabled fixed counters which are not in CPUID */
6425 c->idxmsk64 &= intel_ctrl;
6426
6427 /*
6428 * Don't extend the pseudo-encoding to the
6429 * generic counters
6430 */
6431 if (!use_fixed_pseudo_encoding(c->code))
6432 c->idxmsk64 |= cntr_mask;
6433 }
6434 c->idxmsk64 &= cntr_mask | (fixed_cntr_mask << INTEL_PMC_IDX_FIXED);
6435 c->weight = hweight64(c->idxmsk64);
6436 }
6437 }
6438
intel_pmu_check_extra_regs(struct extra_reg * extra_regs)6439 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
6440 {
6441 struct extra_reg *er;
6442
6443 /*
6444 * Access extra MSR may cause #GP under certain circumstances.
6445 * E.g. KVM doesn't support offcore event
6446 * Check all extra_regs here.
6447 */
6448 if (!extra_regs)
6449 return;
6450
6451 for (er = extra_regs; er->msr; er++) {
6452 er->extra_msr_access = check_msr(er->msr, 0x11UL);
6453 /* Disable LBR select mapping */
6454 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
6455 x86_pmu.lbr_sel_map = NULL;
6456 }
6457 }
6458
intel_pmu_v6_addr_offset(int index,bool eventsel)6459 static inline int intel_pmu_v6_addr_offset(int index, bool eventsel)
6460 {
6461 return MSR_IA32_PMC_V6_STEP * index;
6462 }
6463
6464 static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = {
6465 { hybrid_small, "cpu_atom" },
6466 { hybrid_big, "cpu_core" },
6467 { hybrid_tiny, "cpu_lowpower" },
6468 };
6469
intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)6470 static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
6471 {
6472 unsigned long pmus_mask = pmus;
6473 struct x86_hybrid_pmu *pmu;
6474 int idx = 0, bit;
6475
6476 x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask);
6477 x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus,
6478 sizeof(struct x86_hybrid_pmu),
6479 GFP_KERNEL);
6480 if (!x86_pmu.hybrid_pmu)
6481 return -ENOMEM;
6482
6483 static_branch_enable(&perf_is_hybrid);
6484 x86_pmu.filter = intel_pmu_filter;
6485
6486 for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) {
6487 pmu = &x86_pmu.hybrid_pmu[idx++];
6488 pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id;
6489 pmu->name = intel_hybrid_pmu_type_map[bit].name;
6490
6491 pmu->cntr_mask64 = x86_pmu.cntr_mask64;
6492 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
6493 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
6494 pmu->config_mask = X86_RAW_EVENT_MASK;
6495 pmu->unconstrained = (struct event_constraint)
6496 __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
6497 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
6498
6499 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6500 if (pmu->pmu_type & hybrid_small_tiny) {
6501 pmu->intel_cap.perf_metrics = 0;
6502 pmu->mid_ack = true;
6503 } else if (pmu->pmu_type & hybrid_big) {
6504 pmu->intel_cap.perf_metrics = 1;
6505 pmu->late_ack = true;
6506 }
6507 }
6508
6509 return 0;
6510 }
6511
intel_pmu_ref_cycles_ext(void)6512 static __always_inline void intel_pmu_ref_cycles_ext(void)
6513 {
6514 if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED)))
6515 intel_perfmon_event_map[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x013c;
6516 }
6517
intel_pmu_init_glc(struct pmu * pmu)6518 static __always_inline void intel_pmu_init_glc(struct pmu *pmu)
6519 {
6520 x86_pmu.late_ack = true;
6521 x86_pmu.limit_period = glc_limit_period;
6522 x86_pmu.pebs_aliases = NULL;
6523 x86_pmu.pebs_prec_dist = true;
6524 x86_pmu.pebs_block = true;
6525 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6526 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6527 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6528 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6529 x86_pmu.lbr_pt_coexist = true;
6530 x86_pmu.num_topdown_events = 8;
6531 static_call_update(intel_pmu_update_topdown_event,
6532 &icl_update_topdown_event);
6533 static_call_update(intel_pmu_set_topdown_event_period,
6534 &icl_set_topdown_event_period);
6535
6536 memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6537 memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6538 hybrid(pmu, event_constraints) = intel_glc_event_constraints;
6539 hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints;
6540
6541 intel_pmu_ref_cycles_ext();
6542 }
6543
intel_pmu_init_grt(struct pmu * pmu)6544 static __always_inline void intel_pmu_init_grt(struct pmu *pmu)
6545 {
6546 x86_pmu.mid_ack = true;
6547 x86_pmu.limit_period = glc_limit_period;
6548 x86_pmu.pebs_aliases = NULL;
6549 x86_pmu.pebs_prec_dist = true;
6550 x86_pmu.pebs_block = true;
6551 x86_pmu.lbr_pt_coexist = true;
6552 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6553 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6554
6555 memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6556 memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6557 hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6558 hybrid(pmu, event_constraints) = intel_grt_event_constraints;
6559 hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints;
6560 hybrid(pmu, extra_regs) = intel_grt_extra_regs;
6561
6562 intel_pmu_ref_cycles_ext();
6563 }
6564
intel_pmu_init_lnc(struct pmu * pmu)6565 static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
6566 {
6567 intel_pmu_init_glc(pmu);
6568 hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
6569 hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
6570 hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
6571 }
6572
intel_pmu_init_skt(struct pmu * pmu)6573 static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
6574 {
6575 intel_pmu_init_grt(pmu);
6576 hybrid(pmu, event_constraints) = intel_skt_event_constraints;
6577 hybrid(pmu, extra_regs) = intel_cmt_extra_regs;
6578 }
6579
intel_pmu_init(void)6580 __init int intel_pmu_init(void)
6581 {
6582 struct attribute **extra_skl_attr = &empty_attrs;
6583 struct attribute **extra_attr = &empty_attrs;
6584 struct attribute **td_attr = &empty_attrs;
6585 struct attribute **mem_attr = &empty_attrs;
6586 struct attribute **tsx_attr = &empty_attrs;
6587 union cpuid10_edx edx;
6588 union cpuid10_eax eax;
6589 union cpuid10_ebx ebx;
6590 unsigned int fixed_mask;
6591 bool pmem = false;
6592 int version, i;
6593 char *name;
6594 struct x86_hybrid_pmu *pmu;
6595
6596 /* Architectural Perfmon was introduced starting with Core "Yonah" */
6597 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
6598 switch (boot_cpu_data.x86) {
6599 case 6:
6600 if (boot_cpu_data.x86_vfm < INTEL_CORE_YONAH)
6601 return p6_pmu_init();
6602 break;
6603 case 11:
6604 return knc_pmu_init();
6605 case 15:
6606 return p4_pmu_init();
6607 }
6608
6609 pr_cont("unsupported CPU family %d model %d ",
6610 boot_cpu_data.x86, boot_cpu_data.x86_model);
6611 return -ENODEV;
6612 }
6613
6614 /*
6615 * Check whether the Architectural PerfMon supports
6616 * Branch Misses Retired hw_event or not.
6617 */
6618 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
6619 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
6620 return -ENODEV;
6621
6622 version = eax.split.version_id;
6623 if (version < 2)
6624 x86_pmu = core_pmu;
6625 else
6626 x86_pmu = intel_pmu;
6627
6628 x86_pmu.version = version;
6629 x86_pmu.cntr_mask64 = GENMASK_ULL(eax.split.num_counters - 1, 0);
6630 x86_pmu.cntval_bits = eax.split.bit_width;
6631 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
6632
6633 x86_pmu.events_maskl = ebx.full;
6634 x86_pmu.events_mask_len = eax.split.mask_length;
6635
6636 x86_pmu.pebs_events_mask = intel_pmu_pebs_mask(x86_pmu.cntr_mask64);
6637 x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
6638
6639 /*
6640 * Quirk: v2 perfmon does not report fixed-purpose events, so
6641 * assume at least 3 events, when not running in a hypervisor:
6642 */
6643 if (version > 1 && version < 5) {
6644 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
6645
6646 x86_pmu.fixed_cntr_mask64 =
6647 GENMASK_ULL(max((int)edx.split.num_counters_fixed, assume) - 1, 0);
6648 } else if (version >= 5)
6649 x86_pmu.fixed_cntr_mask64 = fixed_mask;
6650
6651 if (boot_cpu_has(X86_FEATURE_PDCM)) {
6652 u64 capabilities;
6653
6654 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
6655 x86_pmu.intel_cap.capabilities = capabilities;
6656 }
6657
6658 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
6659 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
6660 x86_pmu.lbr_read = intel_pmu_lbr_read_32;
6661 }
6662
6663 if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
6664 intel_pmu_arch_lbr_init();
6665
6666 intel_ds_init();
6667
6668 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
6669
6670 if (version >= 5) {
6671 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
6672 if (x86_pmu.intel_cap.anythread_deprecated)
6673 pr_cont(" AnyThread deprecated, ");
6674 }
6675
6676 /*
6677 * Install the hw-cache-events table:
6678 */
6679 switch (boot_cpu_data.x86_vfm) {
6680 case INTEL_CORE_YONAH:
6681 pr_cont("Core events, ");
6682 name = "core";
6683 break;
6684
6685 case INTEL_CORE2_MEROM:
6686 x86_add_quirk(intel_clovertown_quirk);
6687 fallthrough;
6688
6689 case INTEL_CORE2_MEROM_L:
6690 case INTEL_CORE2_PENRYN:
6691 case INTEL_CORE2_DUNNINGTON:
6692 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
6693 sizeof(hw_cache_event_ids));
6694
6695 intel_pmu_lbr_init_core();
6696
6697 x86_pmu.event_constraints = intel_core2_event_constraints;
6698 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
6699 pr_cont("Core2 events, ");
6700 name = "core2";
6701 break;
6702
6703 case INTEL_NEHALEM:
6704 case INTEL_NEHALEM_EP:
6705 case INTEL_NEHALEM_EX:
6706 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
6707 sizeof(hw_cache_event_ids));
6708 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6709 sizeof(hw_cache_extra_regs));
6710
6711 intel_pmu_lbr_init_nhm();
6712
6713 x86_pmu.event_constraints = intel_nehalem_event_constraints;
6714 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
6715 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6716 x86_pmu.extra_regs = intel_nehalem_extra_regs;
6717 x86_pmu.limit_period = nhm_limit_period;
6718
6719 mem_attr = nhm_mem_events_attrs;
6720
6721 /* UOPS_ISSUED.STALLED_CYCLES */
6722 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6723 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6724 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6725 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6726 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6727
6728 intel_pmu_pebs_data_source_nhm();
6729 x86_add_quirk(intel_nehalem_quirk);
6730 x86_pmu.pebs_no_tlb = 1;
6731 extra_attr = nhm_format_attr;
6732
6733 pr_cont("Nehalem events, ");
6734 name = "nehalem";
6735 break;
6736
6737 case INTEL_ATOM_BONNELL:
6738 case INTEL_ATOM_BONNELL_MID:
6739 case INTEL_ATOM_SALTWELL:
6740 case INTEL_ATOM_SALTWELL_MID:
6741 case INTEL_ATOM_SALTWELL_TABLET:
6742 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
6743 sizeof(hw_cache_event_ids));
6744
6745 intel_pmu_lbr_init_atom();
6746
6747 x86_pmu.event_constraints = intel_gen_event_constraints;
6748 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
6749 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
6750 pr_cont("Atom events, ");
6751 name = "bonnell";
6752 break;
6753
6754 case INTEL_ATOM_SILVERMONT:
6755 case INTEL_ATOM_SILVERMONT_D:
6756 case INTEL_ATOM_SILVERMONT_MID:
6757 case INTEL_ATOM_AIRMONT:
6758 case INTEL_ATOM_SILVERMONT_MID2:
6759 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
6760 sizeof(hw_cache_event_ids));
6761 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
6762 sizeof(hw_cache_extra_regs));
6763
6764 intel_pmu_lbr_init_slm();
6765
6766 x86_pmu.event_constraints = intel_slm_event_constraints;
6767 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6768 x86_pmu.extra_regs = intel_slm_extra_regs;
6769 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6770 td_attr = slm_events_attrs;
6771 extra_attr = slm_format_attr;
6772 pr_cont("Silvermont events, ");
6773 name = "silvermont";
6774 break;
6775
6776 case INTEL_ATOM_GOLDMONT:
6777 case INTEL_ATOM_GOLDMONT_D:
6778 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
6779 sizeof(hw_cache_event_ids));
6780 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
6781 sizeof(hw_cache_extra_regs));
6782
6783 intel_pmu_lbr_init_skl();
6784
6785 x86_pmu.event_constraints = intel_slm_event_constraints;
6786 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
6787 x86_pmu.extra_regs = intel_glm_extra_regs;
6788 /*
6789 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6790 * for precise cycles.
6791 * :pp is identical to :ppp
6792 */
6793 x86_pmu.pebs_aliases = NULL;
6794 x86_pmu.pebs_prec_dist = true;
6795 x86_pmu.lbr_pt_coexist = true;
6796 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6797 td_attr = glm_events_attrs;
6798 extra_attr = slm_format_attr;
6799 pr_cont("Goldmont events, ");
6800 name = "goldmont";
6801 break;
6802
6803 case INTEL_ATOM_GOLDMONT_PLUS:
6804 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6805 sizeof(hw_cache_event_ids));
6806 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
6807 sizeof(hw_cache_extra_regs));
6808
6809 intel_pmu_lbr_init_skl();
6810
6811 x86_pmu.event_constraints = intel_slm_event_constraints;
6812 x86_pmu.extra_regs = intel_glm_extra_regs;
6813 /*
6814 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6815 * for precise cycles.
6816 */
6817 x86_pmu.pebs_aliases = NULL;
6818 x86_pmu.pebs_prec_dist = true;
6819 x86_pmu.lbr_pt_coexist = true;
6820 x86_pmu.pebs_capable = ~0ULL;
6821 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6822 x86_pmu.flags |= PMU_FL_PEBS_ALL;
6823 x86_pmu.get_event_constraints = glp_get_event_constraints;
6824 td_attr = glm_events_attrs;
6825 /* Goldmont Plus has 4-wide pipeline */
6826 event_attr_td_total_slots_scale_glm.event_str = "4";
6827 extra_attr = slm_format_attr;
6828 pr_cont("Goldmont plus events, ");
6829 name = "goldmont_plus";
6830 break;
6831
6832 case INTEL_ATOM_TREMONT_D:
6833 case INTEL_ATOM_TREMONT:
6834 case INTEL_ATOM_TREMONT_L:
6835 x86_pmu.late_ack = true;
6836 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6837 sizeof(hw_cache_event_ids));
6838 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
6839 sizeof(hw_cache_extra_regs));
6840 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6841
6842 intel_pmu_lbr_init_skl();
6843
6844 x86_pmu.event_constraints = intel_slm_event_constraints;
6845 x86_pmu.extra_regs = intel_tnt_extra_regs;
6846 /*
6847 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6848 * for precise cycles.
6849 */
6850 x86_pmu.pebs_aliases = NULL;
6851 x86_pmu.pebs_prec_dist = true;
6852 x86_pmu.lbr_pt_coexist = true;
6853 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6854 x86_pmu.get_event_constraints = tnt_get_event_constraints;
6855 td_attr = tnt_events_attrs;
6856 extra_attr = slm_format_attr;
6857 pr_cont("Tremont events, ");
6858 name = "Tremont";
6859 break;
6860
6861 case INTEL_ATOM_GRACEMONT:
6862 intel_pmu_init_grt(NULL);
6863 intel_pmu_pebs_data_source_grt();
6864 x86_pmu.pebs_latency_data = grt_latency_data;
6865 x86_pmu.get_event_constraints = tnt_get_event_constraints;
6866 td_attr = tnt_events_attrs;
6867 mem_attr = grt_mem_attrs;
6868 extra_attr = nhm_format_attr;
6869 pr_cont("Gracemont events, ");
6870 name = "gracemont";
6871 break;
6872
6873 case INTEL_ATOM_CRESTMONT:
6874 case INTEL_ATOM_CRESTMONT_X:
6875 intel_pmu_init_grt(NULL);
6876 x86_pmu.extra_regs = intel_cmt_extra_regs;
6877 intel_pmu_pebs_data_source_cmt();
6878 x86_pmu.pebs_latency_data = cmt_latency_data;
6879 x86_pmu.get_event_constraints = cmt_get_event_constraints;
6880 td_attr = cmt_events_attrs;
6881 mem_attr = grt_mem_attrs;
6882 extra_attr = cmt_format_attr;
6883 pr_cont("Crestmont events, ");
6884 name = "crestmont";
6885 break;
6886
6887 case INTEL_WESTMERE:
6888 case INTEL_WESTMERE_EP:
6889 case INTEL_WESTMERE_EX:
6890 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
6891 sizeof(hw_cache_event_ids));
6892 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6893 sizeof(hw_cache_extra_regs));
6894
6895 intel_pmu_lbr_init_nhm();
6896
6897 x86_pmu.event_constraints = intel_westmere_event_constraints;
6898 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6899 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
6900 x86_pmu.extra_regs = intel_westmere_extra_regs;
6901 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6902
6903 mem_attr = nhm_mem_events_attrs;
6904
6905 /* UOPS_ISSUED.STALLED_CYCLES */
6906 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6907 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6908 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6909 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6910 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6911
6912 intel_pmu_pebs_data_source_nhm();
6913 extra_attr = nhm_format_attr;
6914 pr_cont("Westmere events, ");
6915 name = "westmere";
6916 break;
6917
6918 case INTEL_SANDYBRIDGE:
6919 case INTEL_SANDYBRIDGE_X:
6920 x86_add_quirk(intel_sandybridge_quirk);
6921 x86_add_quirk(intel_ht_bug);
6922 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6923 sizeof(hw_cache_event_ids));
6924 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6925 sizeof(hw_cache_extra_regs));
6926
6927 intel_pmu_lbr_init_snb();
6928
6929 x86_pmu.event_constraints = intel_snb_event_constraints;
6930 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
6931 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
6932 if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X)
6933 x86_pmu.extra_regs = intel_snbep_extra_regs;
6934 else
6935 x86_pmu.extra_regs = intel_snb_extra_regs;
6936
6937
6938 /* all extra regs are per-cpu when HT is on */
6939 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6940 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6941
6942 td_attr = snb_events_attrs;
6943 mem_attr = snb_mem_events_attrs;
6944
6945 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6946 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6947 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6948 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
6949 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6950 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
6951
6952 extra_attr = nhm_format_attr;
6953
6954 pr_cont("SandyBridge events, ");
6955 name = "sandybridge";
6956 break;
6957
6958 case INTEL_IVYBRIDGE:
6959 case INTEL_IVYBRIDGE_X:
6960 x86_add_quirk(intel_ht_bug);
6961 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6962 sizeof(hw_cache_event_ids));
6963 /* dTLB-load-misses on IVB is different than SNB */
6964 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
6965
6966 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6967 sizeof(hw_cache_extra_regs));
6968
6969 intel_pmu_lbr_init_snb();
6970
6971 x86_pmu.event_constraints = intel_ivb_event_constraints;
6972 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
6973 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6974 x86_pmu.pebs_prec_dist = true;
6975 if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X)
6976 x86_pmu.extra_regs = intel_snbep_extra_regs;
6977 else
6978 x86_pmu.extra_regs = intel_snb_extra_regs;
6979 /* all extra regs are per-cpu when HT is on */
6980 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6981 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6982
6983 td_attr = snb_events_attrs;
6984 mem_attr = snb_mem_events_attrs;
6985
6986 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6987 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6988 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6989
6990 extra_attr = nhm_format_attr;
6991
6992 pr_cont("IvyBridge events, ");
6993 name = "ivybridge";
6994 break;
6995
6996
6997 case INTEL_HASWELL:
6998 case INTEL_HASWELL_X:
6999 case INTEL_HASWELL_L:
7000 case INTEL_HASWELL_G:
7001 x86_add_quirk(intel_ht_bug);
7002 x86_add_quirk(intel_pebs_isolation_quirk);
7003 x86_pmu.late_ack = true;
7004 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7005 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7006
7007 intel_pmu_lbr_init_hsw();
7008
7009 x86_pmu.event_constraints = intel_hsw_event_constraints;
7010 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
7011 x86_pmu.extra_regs = intel_snbep_extra_regs;
7012 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
7013 x86_pmu.pebs_prec_dist = true;
7014 /* all extra regs are per-cpu when HT is on */
7015 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7016 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7017
7018 x86_pmu.hw_config = hsw_hw_config;
7019 x86_pmu.get_event_constraints = hsw_get_event_constraints;
7020 x86_pmu.limit_period = hsw_limit_period;
7021 x86_pmu.lbr_double_abort = true;
7022 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7023 hsw_format_attr : nhm_format_attr;
7024 td_attr = hsw_events_attrs;
7025 mem_attr = hsw_mem_events_attrs;
7026 tsx_attr = hsw_tsx_events_attrs;
7027 pr_cont("Haswell events, ");
7028 name = "haswell";
7029 break;
7030
7031 case INTEL_BROADWELL:
7032 case INTEL_BROADWELL_D:
7033 case INTEL_BROADWELL_G:
7034 case INTEL_BROADWELL_X:
7035 x86_add_quirk(intel_pebs_isolation_quirk);
7036 x86_pmu.late_ack = true;
7037 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7038 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7039
7040 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
7041 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
7042 BDW_L3_MISS|HSW_SNOOP_DRAM;
7043 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
7044 HSW_SNOOP_DRAM;
7045 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
7046 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
7047 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
7048 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
7049
7050 intel_pmu_lbr_init_hsw();
7051
7052 x86_pmu.event_constraints = intel_bdw_event_constraints;
7053 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
7054 x86_pmu.extra_regs = intel_snbep_extra_regs;
7055 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
7056 x86_pmu.pebs_prec_dist = true;
7057 /* all extra regs are per-cpu when HT is on */
7058 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7059 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7060
7061 x86_pmu.hw_config = hsw_hw_config;
7062 x86_pmu.get_event_constraints = hsw_get_event_constraints;
7063 x86_pmu.limit_period = bdw_limit_period;
7064 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7065 hsw_format_attr : nhm_format_attr;
7066 td_attr = hsw_events_attrs;
7067 mem_attr = hsw_mem_events_attrs;
7068 tsx_attr = hsw_tsx_events_attrs;
7069 pr_cont("Broadwell events, ");
7070 name = "broadwell";
7071 break;
7072
7073 case INTEL_XEON_PHI_KNL:
7074 case INTEL_XEON_PHI_KNM:
7075 memcpy(hw_cache_event_ids,
7076 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7077 memcpy(hw_cache_extra_regs,
7078 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7079 intel_pmu_lbr_init_knl();
7080
7081 x86_pmu.event_constraints = intel_slm_event_constraints;
7082 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
7083 x86_pmu.extra_regs = intel_knl_extra_regs;
7084
7085 /* all extra regs are per-cpu when HT is on */
7086 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7087 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7088 extra_attr = slm_format_attr;
7089 pr_cont("Knights Landing/Mill events, ");
7090 name = "knights-landing";
7091 break;
7092
7093 case INTEL_SKYLAKE_X:
7094 pmem = true;
7095 fallthrough;
7096 case INTEL_SKYLAKE_L:
7097 case INTEL_SKYLAKE:
7098 case INTEL_KABYLAKE_L:
7099 case INTEL_KABYLAKE:
7100 case INTEL_COMETLAKE_L:
7101 case INTEL_COMETLAKE:
7102 x86_add_quirk(intel_pebs_isolation_quirk);
7103 x86_pmu.late_ack = true;
7104 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7105 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7106 intel_pmu_lbr_init_skl();
7107
7108 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
7109 event_attr_td_recovery_bubbles.event_str_noht =
7110 "event=0xd,umask=0x1,cmask=1";
7111 event_attr_td_recovery_bubbles.event_str_ht =
7112 "event=0xd,umask=0x1,cmask=1,any=1";
7113
7114 x86_pmu.event_constraints = intel_skl_event_constraints;
7115 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
7116 x86_pmu.extra_regs = intel_skl_extra_regs;
7117 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
7118 x86_pmu.pebs_prec_dist = true;
7119 /* all extra regs are per-cpu when HT is on */
7120 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7121 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7122
7123 x86_pmu.hw_config = hsw_hw_config;
7124 x86_pmu.get_event_constraints = hsw_get_event_constraints;
7125 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7126 hsw_format_attr : nhm_format_attr;
7127 extra_skl_attr = skl_format_attr;
7128 td_attr = hsw_events_attrs;
7129 mem_attr = hsw_mem_events_attrs;
7130 tsx_attr = hsw_tsx_events_attrs;
7131 intel_pmu_pebs_data_source_skl(pmem);
7132
7133 /*
7134 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
7135 * TSX force abort hooks are not required on these systems. Only deploy
7136 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
7137 */
7138 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
7139 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
7140 x86_pmu.flags |= PMU_FL_TFA;
7141 x86_pmu.get_event_constraints = tfa_get_event_constraints;
7142 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
7143 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
7144 }
7145
7146 pr_cont("Skylake events, ");
7147 name = "skylake";
7148 break;
7149
7150 case INTEL_ICELAKE_X:
7151 case INTEL_ICELAKE_D:
7152 x86_pmu.pebs_ept = 1;
7153 pmem = true;
7154 fallthrough;
7155 case INTEL_ICELAKE_L:
7156 case INTEL_ICELAKE:
7157 case INTEL_TIGERLAKE_L:
7158 case INTEL_TIGERLAKE:
7159 case INTEL_ROCKETLAKE:
7160 x86_pmu.late_ack = true;
7161 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7162 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7163 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
7164 intel_pmu_lbr_init_skl();
7165
7166 x86_pmu.event_constraints = intel_icl_event_constraints;
7167 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
7168 x86_pmu.extra_regs = intel_icl_extra_regs;
7169 x86_pmu.pebs_aliases = NULL;
7170 x86_pmu.pebs_prec_dist = true;
7171 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7172 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7173
7174 x86_pmu.hw_config = hsw_hw_config;
7175 x86_pmu.get_event_constraints = icl_get_event_constraints;
7176 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7177 hsw_format_attr : nhm_format_attr;
7178 extra_skl_attr = skl_format_attr;
7179 mem_attr = icl_events_attrs;
7180 td_attr = icl_td_events_attrs;
7181 tsx_attr = icl_tsx_events_attrs;
7182 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
7183 x86_pmu.lbr_pt_coexist = true;
7184 intel_pmu_pebs_data_source_skl(pmem);
7185 x86_pmu.num_topdown_events = 4;
7186 static_call_update(intel_pmu_update_topdown_event,
7187 &icl_update_topdown_event);
7188 static_call_update(intel_pmu_set_topdown_event_period,
7189 &icl_set_topdown_event_period);
7190 pr_cont("Icelake events, ");
7191 name = "icelake";
7192 break;
7193
7194 case INTEL_SAPPHIRERAPIDS_X:
7195 case INTEL_EMERALDRAPIDS_X:
7196 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
7197 x86_pmu.extra_regs = intel_glc_extra_regs;
7198 pr_cont("Sapphire Rapids events, ");
7199 name = "sapphire_rapids";
7200 goto glc_common;
7201
7202 case INTEL_GRANITERAPIDS_X:
7203 case INTEL_GRANITERAPIDS_D:
7204 x86_pmu.extra_regs = intel_rwc_extra_regs;
7205 pr_cont("Granite Rapids events, ");
7206 name = "granite_rapids";
7207
7208 glc_common:
7209 intel_pmu_init_glc(NULL);
7210 x86_pmu.pebs_ept = 1;
7211 x86_pmu.hw_config = hsw_hw_config;
7212 x86_pmu.get_event_constraints = glc_get_event_constraints;
7213 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7214 hsw_format_attr : nhm_format_attr;
7215 extra_skl_attr = skl_format_attr;
7216 mem_attr = glc_events_attrs;
7217 td_attr = glc_td_events_attrs;
7218 tsx_attr = glc_tsx_events_attrs;
7219 intel_pmu_pebs_data_source_skl(true);
7220 break;
7221
7222 case INTEL_ALDERLAKE:
7223 case INTEL_ALDERLAKE_L:
7224 case INTEL_RAPTORLAKE:
7225 case INTEL_RAPTORLAKE_P:
7226 case INTEL_RAPTORLAKE_S:
7227 /*
7228 * Alder Lake has 2 types of CPU, core and atom.
7229 *
7230 * Initialize the common PerfMon capabilities here.
7231 */
7232 intel_pmu_init_hybrid(hybrid_big_small);
7233
7234 x86_pmu.pebs_latency_data = grt_latency_data;
7235 x86_pmu.get_event_constraints = adl_get_event_constraints;
7236 x86_pmu.hw_config = adl_hw_config;
7237 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
7238
7239 td_attr = adl_hybrid_events_attrs;
7240 mem_attr = adl_hybrid_mem_attrs;
7241 tsx_attr = adl_hybrid_tsx_attrs;
7242 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7243 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
7244
7245 /* Initialize big core specific PerfMon capabilities.*/
7246 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7247 intel_pmu_init_glc(&pmu->pmu);
7248 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
7249 pmu->cntr_mask64 <<= 2;
7250 pmu->cntr_mask64 |= 0x3;
7251 pmu->fixed_cntr_mask64 <<= 1;
7252 pmu->fixed_cntr_mask64 |= 0x1;
7253 } else {
7254 pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7255 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7256 }
7257
7258 /*
7259 * Quirk: For some Alder Lake machine, when all E-cores are disabled in
7260 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
7261 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
7262 * mistakenly add extra counters for P-cores. Correct the number of
7263 * counters here.
7264 */
7265 if ((x86_pmu_num_counters(&pmu->pmu) > 8) || (x86_pmu_num_counters_fixed(&pmu->pmu) > 4)) {
7266 pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7267 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7268 }
7269
7270 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
7271 pmu->unconstrained = (struct event_constraint)
7272 __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
7273 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
7274
7275 pmu->extra_regs = intel_glc_extra_regs;
7276
7277 /* Initialize Atom core specific PerfMon capabilities.*/
7278 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7279 intel_pmu_init_grt(&pmu->pmu);
7280
7281 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
7282 intel_pmu_pebs_data_source_adl();
7283 pr_cont("Alderlake Hybrid events, ");
7284 name = "alderlake_hybrid";
7285 break;
7286
7287 case INTEL_METEORLAKE:
7288 case INTEL_METEORLAKE_L:
7289 case INTEL_ARROWLAKE_U:
7290 intel_pmu_init_hybrid(hybrid_big_small);
7291
7292 x86_pmu.pebs_latency_data = cmt_latency_data;
7293 x86_pmu.get_event_constraints = mtl_get_event_constraints;
7294 x86_pmu.hw_config = adl_hw_config;
7295
7296 td_attr = adl_hybrid_events_attrs;
7297 mem_attr = mtl_hybrid_mem_attrs;
7298 tsx_attr = adl_hybrid_tsx_attrs;
7299 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7300 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7301
7302 /* Initialize big core specific PerfMon capabilities.*/
7303 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7304 intel_pmu_init_glc(&pmu->pmu);
7305 pmu->extra_regs = intel_rwc_extra_regs;
7306
7307 /* Initialize Atom core specific PerfMon capabilities.*/
7308 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7309 intel_pmu_init_grt(&pmu->pmu);
7310 pmu->extra_regs = intel_cmt_extra_regs;
7311
7312 intel_pmu_pebs_data_source_mtl();
7313 pr_cont("Meteorlake Hybrid events, ");
7314 name = "meteorlake_hybrid";
7315 break;
7316
7317 case INTEL_PANTHERLAKE_L:
7318 pr_cont("Pantherlake Hybrid events, ");
7319 name = "pantherlake_hybrid";
7320 goto lnl_common;
7321
7322 case INTEL_LUNARLAKE_M:
7323 case INTEL_ARROWLAKE:
7324 pr_cont("Lunarlake Hybrid events, ");
7325 name = "lunarlake_hybrid";
7326
7327 lnl_common:
7328 intel_pmu_init_hybrid(hybrid_big_small);
7329
7330 x86_pmu.pebs_latency_data = lnl_latency_data;
7331 x86_pmu.get_event_constraints = mtl_get_event_constraints;
7332 x86_pmu.hw_config = adl_hw_config;
7333
7334 td_attr = lnl_hybrid_events_attrs;
7335 mem_attr = mtl_hybrid_mem_attrs;
7336 tsx_attr = adl_hybrid_tsx_attrs;
7337 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7338 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7339
7340 /* Initialize big core specific PerfMon capabilities.*/
7341 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7342 intel_pmu_init_lnc(&pmu->pmu);
7343
7344 /* Initialize Atom core specific PerfMon capabilities.*/
7345 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7346 intel_pmu_init_skt(&pmu->pmu);
7347
7348 intel_pmu_pebs_data_source_lnl();
7349 break;
7350
7351 case INTEL_ARROWLAKE_H:
7352 intel_pmu_init_hybrid(hybrid_big_small_tiny);
7353
7354 x86_pmu.pebs_latency_data = arl_h_latency_data;
7355 x86_pmu.get_event_constraints = arl_h_get_event_constraints;
7356 x86_pmu.hw_config = arl_h_hw_config;
7357
7358 td_attr = arl_h_hybrid_events_attrs;
7359 mem_attr = arl_h_hybrid_mem_attrs;
7360 tsx_attr = adl_hybrid_tsx_attrs;
7361 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7362 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7363
7364 /* Initialize big core specific PerfMon capabilities. */
7365 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7366 intel_pmu_init_lnc(&pmu->pmu);
7367
7368 /* Initialize Atom core specific PerfMon capabilities. */
7369 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7370 intel_pmu_init_skt(&pmu->pmu);
7371
7372 /* Initialize Lower Power Atom specific PerfMon capabilities. */
7373 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX];
7374 intel_pmu_init_grt(&pmu->pmu);
7375 pmu->extra_regs = intel_cmt_extra_regs;
7376
7377 intel_pmu_pebs_data_source_arl_h();
7378 pr_cont("ArrowLake-H Hybrid events, ");
7379 name = "arrowlake_h_hybrid";
7380 break;
7381
7382 default:
7383 switch (x86_pmu.version) {
7384 case 1:
7385 x86_pmu.event_constraints = intel_v1_event_constraints;
7386 pr_cont("generic architected perfmon v1, ");
7387 name = "generic_arch_v1";
7388 break;
7389 case 2:
7390 case 3:
7391 case 4:
7392 /*
7393 * default constraints for v2 and up
7394 */
7395 x86_pmu.event_constraints = intel_gen_event_constraints;
7396 pr_cont("generic architected perfmon, ");
7397 name = "generic_arch_v2+";
7398 break;
7399 default:
7400 /*
7401 * The default constraints for v5 and up can support up to
7402 * 16 fixed counters. For the fixed counters 4 and later,
7403 * the pseudo-encoding is applied.
7404 * The constraints may be cut according to the CPUID enumeration
7405 * by inserting the EVENT_CONSTRAINT_END.
7406 */
7407 if (fls64(x86_pmu.fixed_cntr_mask64) > INTEL_PMC_MAX_FIXED)
7408 x86_pmu.fixed_cntr_mask64 &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
7409 intel_v5_gen_event_constraints[fls64(x86_pmu.fixed_cntr_mask64)].weight = -1;
7410 x86_pmu.event_constraints = intel_v5_gen_event_constraints;
7411 pr_cont("generic architected perfmon, ");
7412 name = "generic_arch_v5+";
7413 break;
7414 }
7415 }
7416
7417 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
7418
7419 if (!is_hybrid()) {
7420 group_events_td.attrs = td_attr;
7421 group_events_mem.attrs = mem_attr;
7422 group_events_tsx.attrs = tsx_attr;
7423 group_format_extra.attrs = extra_attr;
7424 group_format_extra_skl.attrs = extra_skl_attr;
7425
7426 x86_pmu.attr_update = attr_update;
7427 } else {
7428 hybrid_group_events_td.attrs = td_attr;
7429 hybrid_group_events_mem.attrs = mem_attr;
7430 hybrid_group_events_tsx.attrs = tsx_attr;
7431 hybrid_group_format_extra.attrs = extra_attr;
7432
7433 x86_pmu.attr_update = hybrid_attr_update;
7434 }
7435
7436 intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64,
7437 &x86_pmu.fixed_cntr_mask64,
7438 &x86_pmu.intel_ctrl);
7439
7440 /* AnyThread may be deprecated on arch perfmon v5 or later */
7441 if (x86_pmu.intel_cap.anythread_deprecated)
7442 x86_pmu.format_attrs = intel_arch_formats_attr;
7443
7444 intel_pmu_check_event_constraints(x86_pmu.event_constraints,
7445 x86_pmu.cntr_mask64,
7446 x86_pmu.fixed_cntr_mask64,
7447 x86_pmu.intel_ctrl);
7448 /*
7449 * Access LBR MSR may cause #GP under certain circumstances.
7450 * Check all LBR MSR here.
7451 * Disable LBR access if any LBR MSRs can not be accessed.
7452 */
7453 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
7454 x86_pmu.lbr_nr = 0;
7455 for (i = 0; i < x86_pmu.lbr_nr; i++) {
7456 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
7457 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
7458 x86_pmu.lbr_nr = 0;
7459 }
7460
7461 if (x86_pmu.lbr_nr) {
7462 intel_pmu_lbr_init();
7463
7464 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
7465
7466 /* only support branch_stack snapshot for perfmon >= v2 */
7467 if (x86_pmu.disable_all == intel_pmu_disable_all) {
7468 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
7469 static_call_update(perf_snapshot_branch_stack,
7470 intel_pmu_snapshot_arch_branch_stack);
7471 } else {
7472 static_call_update(perf_snapshot_branch_stack,
7473 intel_pmu_snapshot_branch_stack);
7474 }
7475 }
7476 }
7477
7478 intel_pmu_check_extra_regs(x86_pmu.extra_regs);
7479
7480 /* Support full width counters using alternative MSR range */
7481 if (x86_pmu.intel_cap.full_width_write) {
7482 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
7483 x86_pmu.perfctr = MSR_IA32_PMC0;
7484 pr_cont("full-width counters, ");
7485 }
7486
7487 /* Support V6+ MSR Aliasing */
7488 if (x86_pmu.version >= 6) {
7489 x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR;
7490 x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A;
7491 x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR;
7492 x86_pmu.addr_offset = intel_pmu_v6_addr_offset;
7493 }
7494
7495 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
7496 x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
7497
7498 if (x86_pmu.intel_cap.pebs_timing_info)
7499 x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
7500
7501 intel_aux_output_init();
7502
7503 return 0;
7504 }
7505
7506 /*
7507 * HT bug: phase 2 init
7508 * Called once we have valid topology information to check
7509 * whether or not HT is enabled
7510 * If HT is off, then we disable the workaround
7511 */
fixup_ht_bug(void)7512 static __init int fixup_ht_bug(void)
7513 {
7514 int c;
7515 /*
7516 * problem not present on this CPU model, nothing to do
7517 */
7518 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
7519 return 0;
7520
7521 if (topology_max_smt_threads() > 1) {
7522 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
7523 return 0;
7524 }
7525
7526 cpus_read_lock();
7527
7528 hardlockup_detector_perf_stop();
7529
7530 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
7531
7532 x86_pmu.start_scheduling = NULL;
7533 x86_pmu.commit_scheduling = NULL;
7534 x86_pmu.stop_scheduling = NULL;
7535
7536 hardlockup_detector_perf_restart();
7537
7538 for_each_online_cpu(c)
7539 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
7540
7541 cpus_read_unlock();
7542 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
7543 return 0;
7544 }
7545 subsys_initcall(fixup_ht_bug)
7546