xref: /linux/arch/x86/events/intel/core.c (revision 766331f2860b08695418109582c94e98cc3528fe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Per core/cpu state
4  *
5  * Used to coordinate shared registers between HT threads or
6  * among events on a single PMU.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
17 #include <linux/kvm_host.h>
18 
19 #include <asm/cpufeature.h>
20 #include <asm/debugreg.h>
21 #include <asm/hardirq.h>
22 #include <asm/intel-family.h>
23 #include <asm/intel_pt.h>
24 #include <asm/apic.h>
25 #include <asm/cpu_device_id.h>
26 
27 #include "../perf_event.h"
28 
29 /*
30  * Intel PerfMon, used on Core and later.
31  */
32 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
33 {
34 	[PERF_COUNT_HW_CPU_CYCLES]		= 0x003c,
35 	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
36 	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0x4f2e,
37 	[PERF_COUNT_HW_CACHE_MISSES]		= 0x412e,
38 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
39 	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
40 	[PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
41 	[PERF_COUNT_HW_REF_CPU_CYCLES]		= 0x0300, /* pseudo-encoding */
42 };
43 
44 static struct event_constraint intel_core_event_constraints[] __read_mostly =
45 {
46 	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
47 	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
48 	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
49 	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
50 	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
51 	INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
52 	EVENT_CONSTRAINT_END
53 };
54 
55 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
56 {
57 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
58 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
59 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
60 	INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
61 	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
62 	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
63 	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
64 	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
65 	INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
66 	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
67 	INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
68 	INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
69 	INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
70 	EVENT_CONSTRAINT_END
71 };
72 
73 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
74 {
75 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
76 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
77 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
78 	INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
79 	INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
80 	INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
81 	INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
82 	INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
83 	INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
84 	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
85 	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
86 	EVENT_CONSTRAINT_END
87 };
88 
89 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
90 {
91 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
92 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
93 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
94 	EVENT_EXTRA_END
95 };
96 
97 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
98 {
99 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
100 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
101 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
102 	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
103 	INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
104 	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
105 	INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
106 	EVENT_CONSTRAINT_END
107 };
108 
109 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
110 {
111 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
112 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
113 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
114 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 	INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
116 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
117 	INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
118 	INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
119 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
120 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
121 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
122 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
123 
124 	/*
125 	 * When HT is off these events can only run on the bottom 4 counters
126 	 * When HT is on, they are impacted by the HT bug and require EXCL access
127 	 */
128 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
129 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
130 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
131 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
132 
133 	EVENT_CONSTRAINT_END
134 };
135 
136 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
137 {
138 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
139 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
140 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
141 	INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
142 	INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
143 	INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
144 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
145 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
146 	INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
147 	INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
148 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
149 	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
150 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
151 
152 	/*
153 	 * When HT is off these events can only run on the bottom 4 counters
154 	 * When HT is on, they are impacted by the HT bug and require EXCL access
155 	 */
156 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
157 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
158 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
159 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
160 
161 	EVENT_CONSTRAINT_END
162 };
163 
164 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
165 {
166 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
167 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
168 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
169 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
170 	EVENT_EXTRA_END
171 };
172 
173 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
174 {
175 	EVENT_CONSTRAINT_END
176 };
177 
178 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
179 {
180 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
181 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
182 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
183 	EVENT_CONSTRAINT_END
184 };
185 
186 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly =
187 {
188 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
189 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
190 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
191 	FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
192 	FIXED_EVENT_CONSTRAINT(0x0500, 4),
193 	FIXED_EVENT_CONSTRAINT(0x0600, 5),
194 	FIXED_EVENT_CONSTRAINT(0x0700, 6),
195 	FIXED_EVENT_CONSTRAINT(0x0800, 7),
196 	FIXED_EVENT_CONSTRAINT(0x0900, 8),
197 	FIXED_EVENT_CONSTRAINT(0x0a00, 9),
198 	FIXED_EVENT_CONSTRAINT(0x0b00, 10),
199 	FIXED_EVENT_CONSTRAINT(0x0c00, 11),
200 	FIXED_EVENT_CONSTRAINT(0x0d00, 12),
201 	FIXED_EVENT_CONSTRAINT(0x0e00, 13),
202 	FIXED_EVENT_CONSTRAINT(0x0f00, 14),
203 	FIXED_EVENT_CONSTRAINT(0x1000, 15),
204 	EVENT_CONSTRAINT_END
205 };
206 
207 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
208 {
209 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
210 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
211 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
212 	EVENT_CONSTRAINT_END
213 };
214 
215 static struct event_constraint intel_grt_event_constraints[] __read_mostly = {
216 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
217 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
218 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
219 	FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
220 	EVENT_CONSTRAINT_END
221 };
222 
223 static struct event_constraint intel_skt_event_constraints[] __read_mostly = {
224 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
226 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
227 	FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
228 	FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */
229 	FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */
230 	FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */
231 	EVENT_CONSTRAINT_END
232 };
233 
234 static struct event_constraint intel_skl_event_constraints[] = {
235 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
236 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
237 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
238 	INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),	/* INST_RETIRED.PREC_DIST */
239 
240 	/*
241 	 * when HT is off, these can only run on the bottom 4 counters
242 	 */
243 	INTEL_EVENT_CONSTRAINT(0xd0, 0xf),	/* MEM_INST_RETIRED.* */
244 	INTEL_EVENT_CONSTRAINT(0xd1, 0xf),	/* MEM_LOAD_RETIRED.* */
245 	INTEL_EVENT_CONSTRAINT(0xd2, 0xf),	/* MEM_LOAD_L3_HIT_RETIRED.* */
246 	INTEL_EVENT_CONSTRAINT(0xcd, 0xf),	/* MEM_TRANS_RETIRED.* */
247 	INTEL_EVENT_CONSTRAINT(0xc6, 0xf),	/* FRONTEND_RETIRED.* */
248 
249 	EVENT_CONSTRAINT_END
250 };
251 
252 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
253 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
254 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
255 	EVENT_EXTRA_END
256 };
257 
258 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
259 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
260 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
261 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
262 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
263 	EVENT_EXTRA_END
264 };
265 
266 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
267 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
268 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
269 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
270 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
271 	EVENT_EXTRA_END
272 };
273 
274 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
275 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
276 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
277 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
278 	/*
279 	 * Note the low 8 bits eventsel code is not a continuous field, containing
280 	 * some #GPing bits. These are masked out.
281 	 */
282 	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
283 	EVENT_EXTRA_END
284 };
285 
286 static struct event_constraint intel_icl_event_constraints[] = {
287 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
288 	FIXED_EVENT_CONSTRAINT(0x01c0, 0),	/* old INST_RETIRED.PREC_DIST */
289 	FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */
290 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
291 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
292 	FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */
293 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
294 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
295 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
296 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
297 	INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
298 	INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
299 	INTEL_EVENT_CONSTRAINT(0x32, 0xf),	/* SW_PREFETCH_ACCESS.* */
300 	INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
301 	INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
302 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_TOTAL */
303 	INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff),  /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
304 	INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
305 	INTEL_EVENT_CONSTRAINT(0xa3, 0xf),      /* CYCLE_ACTIVITY.* */
306 	INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
307 	INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
308 	INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
309 	INTEL_EVENT_CONSTRAINT(0xef, 0xf),
310 	INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
311 	EVENT_CONSTRAINT_END
312 };
313 
314 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
315 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
316 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
317 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
318 	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
319 	EVENT_EXTRA_END
320 };
321 
322 static struct extra_reg intel_glc_extra_regs[] __read_mostly = {
323 	INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
324 	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
325 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
326 	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
327 	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
328 	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
329 	EVENT_EXTRA_END
330 };
331 
332 static struct event_constraint intel_glc_event_constraints[] = {
333 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
334 	FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */
335 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
336 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
337 	FIXED_EVENT_CONSTRAINT(0x013c, 2),	/* CPU_CLK_UNHALTED.REF_TSC_P */
338 	FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */
339 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
340 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
341 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
342 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
343 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
344 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
345 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
346 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
347 
348 	INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
349 	INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
350 	/*
351 	 * Generally event codes < 0x90 are restricted to counters 0-3.
352 	 * The 0x2E and 0x3C are exception, which has no restriction.
353 	 */
354 	INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
355 
356 	INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
357 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
358 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
359 	INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
360 	INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
361 	INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
362 	INTEL_EVENT_CONSTRAINT(0xce, 0x1),
363 	INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
364 	/*
365 	 * Generally event codes >= 0x90 are likely to have no restrictions.
366 	 * The exception are defined as above.
367 	 */
368 	INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
369 
370 	EVENT_CONSTRAINT_END
371 };
372 
373 static struct extra_reg intel_rwc_extra_regs[] __read_mostly = {
374 	INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
375 	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
376 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
377 	INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
378 	INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
379 	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
380 	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
381 	EVENT_EXTRA_END
382 };
383 
384 static struct event_constraint intel_lnc_event_constraints[] = {
385 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
386 	FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */
387 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
388 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
389 	FIXED_EVENT_CONSTRAINT(0x013c, 2),	/* CPU_CLK_UNHALTED.REF_TSC_P */
390 	FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */
391 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
392 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
393 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
394 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
395 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
396 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
397 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
398 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
399 
400 	INTEL_EVENT_CONSTRAINT(0x20, 0xf),
401 
402 	INTEL_UEVENT_CONSTRAINT(0x012a, 0xf),
403 	INTEL_UEVENT_CONSTRAINT(0x012b, 0xf),
404 	INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
405 	INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
406 
407 	INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
408 	INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
409 
410 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
411 	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
412 	INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
413 	INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
414 	INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
415 	INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
416 	INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc),
417 	INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
418 
419 	INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
420 
421 	INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf),
422 
423 	EVENT_CONSTRAINT_END
424 };
425 
426 static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
427 	INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0),
428 	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1),
429 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
430 	INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
431 	INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
432 	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
433 	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
434 	EVENT_EXTRA_END
435 };
436 
437 EVENT_ATTR_STR(mem-loads,	mem_ld_nhm,	"event=0x0b,umask=0x10,ldlat=3");
438 EVENT_ATTR_STR(mem-loads,	mem_ld_snb,	"event=0xcd,umask=0x1,ldlat=3");
439 EVENT_ATTR_STR(mem-stores,	mem_st_snb,	"event=0xcd,umask=0x2");
440 
441 static struct attribute *nhm_mem_events_attrs[] = {
442 	EVENT_PTR(mem_ld_nhm),
443 	NULL,
444 };
445 
446 /*
447  * topdown events for Intel Core CPUs.
448  *
449  * The events are all in slots, which is a free slot in a 4 wide
450  * pipeline. Some events are already reported in slots, for cycle
451  * events we multiply by the pipeline width (4).
452  *
453  * With Hyper Threading on, topdown metrics are either summed or averaged
454  * between the threads of a core: (count_t0 + count_t1).
455  *
456  * For the average case the metric is always scaled to pipeline width,
457  * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
458  */
459 
460 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
461 	"event=0x3c,umask=0x0",			/* cpu_clk_unhalted.thread */
462 	"event=0x3c,umask=0x0,any=1");		/* cpu_clk_unhalted.thread_any */
463 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
464 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
465 	"event=0xe,umask=0x1");			/* uops_issued.any */
466 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
467 	"event=0xc2,umask=0x2");		/* uops_retired.retire_slots */
468 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
469 	"event=0x9c,umask=0x1");		/* idq_uops_not_delivered_core */
470 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
471 	"event=0xd,umask=0x3,cmask=1",		/* int_misc.recovery_cycles */
472 	"event=0xd,umask=0x3,cmask=1,any=1");	/* int_misc.recovery_cycles_any */
473 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
474 	"4", "2");
475 
476 EVENT_ATTR_STR(slots,			slots,			"event=0x00,umask=0x4");
477 EVENT_ATTR_STR(topdown-retiring,	td_retiring,		"event=0x00,umask=0x80");
478 EVENT_ATTR_STR(topdown-bad-spec,	td_bad_spec,		"event=0x00,umask=0x81");
479 EVENT_ATTR_STR(topdown-fe-bound,	td_fe_bound,		"event=0x00,umask=0x82");
480 EVENT_ATTR_STR(topdown-be-bound,	td_be_bound,		"event=0x00,umask=0x83");
481 EVENT_ATTR_STR(topdown-heavy-ops,	td_heavy_ops,		"event=0x00,umask=0x84");
482 EVENT_ATTR_STR(topdown-br-mispredict,	td_br_mispredict,	"event=0x00,umask=0x85");
483 EVENT_ATTR_STR(topdown-fetch-lat,	td_fetch_lat,		"event=0x00,umask=0x86");
484 EVENT_ATTR_STR(topdown-mem-bound,	td_mem_bound,		"event=0x00,umask=0x87");
485 
486 static struct attribute *snb_events_attrs[] = {
487 	EVENT_PTR(td_slots_issued),
488 	EVENT_PTR(td_slots_retired),
489 	EVENT_PTR(td_fetch_bubbles),
490 	EVENT_PTR(td_total_slots),
491 	EVENT_PTR(td_total_slots_scale),
492 	EVENT_PTR(td_recovery_bubbles),
493 	EVENT_PTR(td_recovery_bubbles_scale),
494 	NULL,
495 };
496 
497 static struct attribute *snb_mem_events_attrs[] = {
498 	EVENT_PTR(mem_ld_snb),
499 	EVENT_PTR(mem_st_snb),
500 	NULL,
501 };
502 
503 static struct event_constraint intel_hsw_event_constraints[] = {
504 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
505 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
506 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
507 	INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */
508 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
509 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
510 	/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
511 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
512 	/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
513 	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
514 	/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
515 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
516 
517 	/*
518 	 * When HT is off these events can only run on the bottom 4 counters
519 	 * When HT is on, they are impacted by the HT bug and require EXCL access
520 	 */
521 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
522 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
523 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
524 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
525 
526 	EVENT_CONSTRAINT_END
527 };
528 
529 static struct event_constraint intel_bdw_event_constraints[] = {
530 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
531 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
532 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
533 	INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */
534 	INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),	/* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
535 	/*
536 	 * when HT is off, these can only run on the bottom 4 counters
537 	 */
538 	INTEL_EVENT_CONSTRAINT(0xd0, 0xf),	/* MEM_INST_RETIRED.* */
539 	INTEL_EVENT_CONSTRAINT(0xd1, 0xf),	/* MEM_LOAD_RETIRED.* */
540 	INTEL_EVENT_CONSTRAINT(0xd2, 0xf),	/* MEM_LOAD_L3_HIT_RETIRED.* */
541 	INTEL_EVENT_CONSTRAINT(0xcd, 0xf),	/* MEM_TRANS_RETIRED.* */
542 	EVENT_CONSTRAINT_END
543 };
544 
intel_pmu_event_map(int hw_event)545 static u64 intel_pmu_event_map(int hw_event)
546 {
547 	return intel_perfmon_event_map[hw_event];
548 }
549 
550 static __initconst const u64 glc_hw_cache_event_ids
551 				[PERF_COUNT_HW_CACHE_MAX]
552 				[PERF_COUNT_HW_CACHE_OP_MAX]
553 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
554 {
555  [ C(L1D ) ] = {
556 	[ C(OP_READ) ] = {
557 		[ C(RESULT_ACCESS) ] = 0x81d0,
558 		[ C(RESULT_MISS)   ] = 0xe124,
559 	},
560 	[ C(OP_WRITE) ] = {
561 		[ C(RESULT_ACCESS) ] = 0x82d0,
562 	},
563  },
564  [ C(L1I ) ] = {
565 	[ C(OP_READ) ] = {
566 		[ C(RESULT_MISS)   ] = 0xe424,
567 	},
568 	[ C(OP_WRITE) ] = {
569 		[ C(RESULT_ACCESS) ] = -1,
570 		[ C(RESULT_MISS)   ] = -1,
571 	},
572  },
573  [ C(LL  ) ] = {
574 	[ C(OP_READ) ] = {
575 		[ C(RESULT_ACCESS) ] = 0x12a,
576 		[ C(RESULT_MISS)   ] = 0x12a,
577 	},
578 	[ C(OP_WRITE) ] = {
579 		[ C(RESULT_ACCESS) ] = 0x12a,
580 		[ C(RESULT_MISS)   ] = 0x12a,
581 	},
582  },
583  [ C(DTLB) ] = {
584 	[ C(OP_READ) ] = {
585 		[ C(RESULT_ACCESS) ] = 0x81d0,
586 		[ C(RESULT_MISS)   ] = 0xe12,
587 	},
588 	[ C(OP_WRITE) ] = {
589 		[ C(RESULT_ACCESS) ] = 0x82d0,
590 		[ C(RESULT_MISS)   ] = 0xe13,
591 	},
592  },
593  [ C(ITLB) ] = {
594 	[ C(OP_READ) ] = {
595 		[ C(RESULT_ACCESS) ] = -1,
596 		[ C(RESULT_MISS)   ] = 0xe11,
597 	},
598 	[ C(OP_WRITE) ] = {
599 		[ C(RESULT_ACCESS) ] = -1,
600 		[ C(RESULT_MISS)   ] = -1,
601 	},
602 	[ C(OP_PREFETCH) ] = {
603 		[ C(RESULT_ACCESS) ] = -1,
604 		[ C(RESULT_MISS)   ] = -1,
605 	},
606  },
607  [ C(BPU ) ] = {
608 	[ C(OP_READ) ] = {
609 		[ C(RESULT_ACCESS) ] = 0x4c4,
610 		[ C(RESULT_MISS)   ] = 0x4c5,
611 	},
612 	[ C(OP_WRITE) ] = {
613 		[ C(RESULT_ACCESS) ] = -1,
614 		[ C(RESULT_MISS)   ] = -1,
615 	},
616 	[ C(OP_PREFETCH) ] = {
617 		[ C(RESULT_ACCESS) ] = -1,
618 		[ C(RESULT_MISS)   ] = -1,
619 	},
620  },
621  [ C(NODE) ] = {
622 	[ C(OP_READ) ] = {
623 		[ C(RESULT_ACCESS) ] = 0x12a,
624 		[ C(RESULT_MISS)   ] = 0x12a,
625 	},
626  },
627 };
628 
629 static __initconst const u64 glc_hw_cache_extra_regs
630 				[PERF_COUNT_HW_CACHE_MAX]
631 				[PERF_COUNT_HW_CACHE_OP_MAX]
632 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
633 {
634  [ C(LL  ) ] = {
635 	[ C(OP_READ) ] = {
636 		[ C(RESULT_ACCESS) ] = 0x10001,
637 		[ C(RESULT_MISS)   ] = 0x3fbfc00001,
638 	},
639 	[ C(OP_WRITE) ] = {
640 		[ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
641 		[ C(RESULT_MISS)   ] = 0x3f3fc00002,
642 	},
643  },
644  [ C(NODE) ] = {
645 	[ C(OP_READ) ] = {
646 		[ C(RESULT_ACCESS) ] = 0x10c000001,
647 		[ C(RESULT_MISS)   ] = 0x3fb3000001,
648 	},
649  },
650 };
651 
652 /*
653  * Notes on the events:
654  * - data reads do not include code reads (comparable to earlier tables)
655  * - data counts include speculative execution (except L1 write, dtlb, bpu)
656  * - remote node access includes remote memory, remote cache, remote mmio.
657  * - prefetches are not included in the counts.
658  * - icache miss does not include decoded icache
659  */
660 
661 #define SKL_DEMAND_DATA_RD		BIT_ULL(0)
662 #define SKL_DEMAND_RFO			BIT_ULL(1)
663 #define SKL_ANY_RESPONSE		BIT_ULL(16)
664 #define SKL_SUPPLIER_NONE		BIT_ULL(17)
665 #define SKL_L3_MISS_LOCAL_DRAM		BIT_ULL(26)
666 #define SKL_L3_MISS_REMOTE_HOP0_DRAM	BIT_ULL(27)
667 #define SKL_L3_MISS_REMOTE_HOP1_DRAM	BIT_ULL(28)
668 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM	BIT_ULL(29)
669 #define SKL_L3_MISS			(SKL_L3_MISS_LOCAL_DRAM| \
670 					 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
671 					 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
672 					 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
673 #define SKL_SPL_HIT			BIT_ULL(30)
674 #define SKL_SNOOP_NONE			BIT_ULL(31)
675 #define SKL_SNOOP_NOT_NEEDED		BIT_ULL(32)
676 #define SKL_SNOOP_MISS			BIT_ULL(33)
677 #define SKL_SNOOP_HIT_NO_FWD		BIT_ULL(34)
678 #define SKL_SNOOP_HIT_WITH_FWD		BIT_ULL(35)
679 #define SKL_SNOOP_HITM			BIT_ULL(36)
680 #define SKL_SNOOP_NON_DRAM		BIT_ULL(37)
681 #define SKL_ANY_SNOOP			(SKL_SPL_HIT|SKL_SNOOP_NONE| \
682 					 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
683 					 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
684 					 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
685 #define SKL_DEMAND_READ			SKL_DEMAND_DATA_RD
686 #define SKL_SNOOP_DRAM			(SKL_SNOOP_NONE| \
687 					 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
688 					 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
689 					 SKL_SNOOP_HITM|SKL_SPL_HIT)
690 #define SKL_DEMAND_WRITE		SKL_DEMAND_RFO
691 #define SKL_LLC_ACCESS			SKL_ANY_RESPONSE
692 #define SKL_L3_MISS_REMOTE		(SKL_L3_MISS_REMOTE_HOP0_DRAM| \
693 					 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
694 					 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
695 
696 static __initconst const u64 skl_hw_cache_event_ids
697 				[PERF_COUNT_HW_CACHE_MAX]
698 				[PERF_COUNT_HW_CACHE_OP_MAX]
699 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
700 {
701  [ C(L1D ) ] = {
702 	[ C(OP_READ) ] = {
703 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
704 		[ C(RESULT_MISS)   ] = 0x151,	/* L1D.REPLACEMENT */
705 	},
706 	[ C(OP_WRITE) ] = {
707 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
708 		[ C(RESULT_MISS)   ] = 0x0,
709 	},
710 	[ C(OP_PREFETCH) ] = {
711 		[ C(RESULT_ACCESS) ] = 0x0,
712 		[ C(RESULT_MISS)   ] = 0x0,
713 	},
714  },
715  [ C(L1I ) ] = {
716 	[ C(OP_READ) ] = {
717 		[ C(RESULT_ACCESS) ] = 0x0,
718 		[ C(RESULT_MISS)   ] = 0x283,	/* ICACHE_64B.MISS */
719 	},
720 	[ C(OP_WRITE) ] = {
721 		[ C(RESULT_ACCESS) ] = -1,
722 		[ C(RESULT_MISS)   ] = -1,
723 	},
724 	[ C(OP_PREFETCH) ] = {
725 		[ C(RESULT_ACCESS) ] = 0x0,
726 		[ C(RESULT_MISS)   ] = 0x0,
727 	},
728  },
729  [ C(LL  ) ] = {
730 	[ C(OP_READ) ] = {
731 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
732 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
733 	},
734 	[ C(OP_WRITE) ] = {
735 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
736 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
737 	},
738 	[ C(OP_PREFETCH) ] = {
739 		[ C(RESULT_ACCESS) ] = 0x0,
740 		[ C(RESULT_MISS)   ] = 0x0,
741 	},
742  },
743  [ C(DTLB) ] = {
744 	[ C(OP_READ) ] = {
745 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
746 		[ C(RESULT_MISS)   ] = 0xe08,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
747 	},
748 	[ C(OP_WRITE) ] = {
749 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
750 		[ C(RESULT_MISS)   ] = 0xe49,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
751 	},
752 	[ C(OP_PREFETCH) ] = {
753 		[ C(RESULT_ACCESS) ] = 0x0,
754 		[ C(RESULT_MISS)   ] = 0x0,
755 	},
756  },
757  [ C(ITLB) ] = {
758 	[ C(OP_READ) ] = {
759 		[ C(RESULT_ACCESS) ] = 0x2085,	/* ITLB_MISSES.STLB_HIT */
760 		[ C(RESULT_MISS)   ] = 0xe85,	/* ITLB_MISSES.WALK_COMPLETED */
761 	},
762 	[ C(OP_WRITE) ] = {
763 		[ C(RESULT_ACCESS) ] = -1,
764 		[ C(RESULT_MISS)   ] = -1,
765 	},
766 	[ C(OP_PREFETCH) ] = {
767 		[ C(RESULT_ACCESS) ] = -1,
768 		[ C(RESULT_MISS)   ] = -1,
769 	},
770  },
771  [ C(BPU ) ] = {
772 	[ C(OP_READ) ] = {
773 		[ C(RESULT_ACCESS) ] = 0xc4,	/* BR_INST_RETIRED.ALL_BRANCHES */
774 		[ C(RESULT_MISS)   ] = 0xc5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
775 	},
776 	[ C(OP_WRITE) ] = {
777 		[ C(RESULT_ACCESS) ] = -1,
778 		[ C(RESULT_MISS)   ] = -1,
779 	},
780 	[ C(OP_PREFETCH) ] = {
781 		[ C(RESULT_ACCESS) ] = -1,
782 		[ C(RESULT_MISS)   ] = -1,
783 	},
784  },
785  [ C(NODE) ] = {
786 	[ C(OP_READ) ] = {
787 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
788 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
789 	},
790 	[ C(OP_WRITE) ] = {
791 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
792 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
793 	},
794 	[ C(OP_PREFETCH) ] = {
795 		[ C(RESULT_ACCESS) ] = 0x0,
796 		[ C(RESULT_MISS)   ] = 0x0,
797 	},
798  },
799 };
800 
801 static __initconst const u64 skl_hw_cache_extra_regs
802 				[PERF_COUNT_HW_CACHE_MAX]
803 				[PERF_COUNT_HW_CACHE_OP_MAX]
804 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
805 {
806  [ C(LL  ) ] = {
807 	[ C(OP_READ) ] = {
808 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
809 				       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
810 		[ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
811 				       SKL_L3_MISS|SKL_ANY_SNOOP|
812 				       SKL_SUPPLIER_NONE,
813 	},
814 	[ C(OP_WRITE) ] = {
815 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
816 				       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
817 		[ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
818 				       SKL_L3_MISS|SKL_ANY_SNOOP|
819 				       SKL_SUPPLIER_NONE,
820 	},
821 	[ C(OP_PREFETCH) ] = {
822 		[ C(RESULT_ACCESS) ] = 0x0,
823 		[ C(RESULT_MISS)   ] = 0x0,
824 	},
825  },
826  [ C(NODE) ] = {
827 	[ C(OP_READ) ] = {
828 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
829 				       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
830 		[ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
831 				       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
832 	},
833 	[ C(OP_WRITE) ] = {
834 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
835 				       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
836 		[ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
837 				       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
838 	},
839 	[ C(OP_PREFETCH) ] = {
840 		[ C(RESULT_ACCESS) ] = 0x0,
841 		[ C(RESULT_MISS)   ] = 0x0,
842 	},
843  },
844 };
845 
846 #define SNB_DMND_DATA_RD	(1ULL << 0)
847 #define SNB_DMND_RFO		(1ULL << 1)
848 #define SNB_DMND_IFETCH		(1ULL << 2)
849 #define SNB_DMND_WB		(1ULL << 3)
850 #define SNB_PF_DATA_RD		(1ULL << 4)
851 #define SNB_PF_RFO		(1ULL << 5)
852 #define SNB_PF_IFETCH		(1ULL << 6)
853 #define SNB_LLC_DATA_RD		(1ULL << 7)
854 #define SNB_LLC_RFO		(1ULL << 8)
855 #define SNB_LLC_IFETCH		(1ULL << 9)
856 #define SNB_BUS_LOCKS		(1ULL << 10)
857 #define SNB_STRM_ST		(1ULL << 11)
858 #define SNB_OTHER		(1ULL << 15)
859 #define SNB_RESP_ANY		(1ULL << 16)
860 #define SNB_NO_SUPP		(1ULL << 17)
861 #define SNB_LLC_HITM		(1ULL << 18)
862 #define SNB_LLC_HITE		(1ULL << 19)
863 #define SNB_LLC_HITS		(1ULL << 20)
864 #define SNB_LLC_HITF		(1ULL << 21)
865 #define SNB_LOCAL		(1ULL << 22)
866 #define SNB_REMOTE		(0xffULL << 23)
867 #define SNB_SNP_NONE		(1ULL << 31)
868 #define SNB_SNP_NOT_NEEDED	(1ULL << 32)
869 #define SNB_SNP_MISS		(1ULL << 33)
870 #define SNB_NO_FWD		(1ULL << 34)
871 #define SNB_SNP_FWD		(1ULL << 35)
872 #define SNB_HITM		(1ULL << 36)
873 #define SNB_NON_DRAM		(1ULL << 37)
874 
875 #define SNB_DMND_READ		(SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
876 #define SNB_DMND_WRITE		(SNB_DMND_RFO|SNB_LLC_RFO)
877 #define SNB_DMND_PREFETCH	(SNB_PF_DATA_RD|SNB_PF_RFO)
878 
879 #define SNB_SNP_ANY		(SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
880 				 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
881 				 SNB_HITM)
882 
883 #define SNB_DRAM_ANY		(SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
884 #define SNB_DRAM_REMOTE		(SNB_REMOTE|SNB_SNP_ANY)
885 
886 #define SNB_L3_ACCESS		SNB_RESP_ANY
887 #define SNB_L3_MISS		(SNB_DRAM_ANY|SNB_NON_DRAM)
888 
889 static __initconst const u64 snb_hw_cache_extra_regs
890 				[PERF_COUNT_HW_CACHE_MAX]
891 				[PERF_COUNT_HW_CACHE_OP_MAX]
892 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
893 {
894  [ C(LL  ) ] = {
895 	[ C(OP_READ) ] = {
896 		[ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
897 		[ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_L3_MISS,
898 	},
899 	[ C(OP_WRITE) ] = {
900 		[ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
901 		[ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_L3_MISS,
902 	},
903 	[ C(OP_PREFETCH) ] = {
904 		[ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
905 		[ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
906 	},
907  },
908  [ C(NODE) ] = {
909 	[ C(OP_READ) ] = {
910 		[ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
911 		[ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
912 	},
913 	[ C(OP_WRITE) ] = {
914 		[ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
915 		[ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
916 	},
917 	[ C(OP_PREFETCH) ] = {
918 		[ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
919 		[ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
920 	},
921  },
922 };
923 
924 static __initconst const u64 snb_hw_cache_event_ids
925 				[PERF_COUNT_HW_CACHE_MAX]
926 				[PERF_COUNT_HW_CACHE_OP_MAX]
927 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
928 {
929  [ C(L1D) ] = {
930 	[ C(OP_READ) ] = {
931 		[ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
932 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
933 	},
934 	[ C(OP_WRITE) ] = {
935 		[ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
936 		[ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
937 	},
938 	[ C(OP_PREFETCH) ] = {
939 		[ C(RESULT_ACCESS) ] = 0x0,
940 		[ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
941 	},
942  },
943  [ C(L1I ) ] = {
944 	[ C(OP_READ) ] = {
945 		[ C(RESULT_ACCESS) ] = 0x0,
946 		[ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
947 	},
948 	[ C(OP_WRITE) ] = {
949 		[ C(RESULT_ACCESS) ] = -1,
950 		[ C(RESULT_MISS)   ] = -1,
951 	},
952 	[ C(OP_PREFETCH) ] = {
953 		[ C(RESULT_ACCESS) ] = 0x0,
954 		[ C(RESULT_MISS)   ] = 0x0,
955 	},
956  },
957  [ C(LL  ) ] = {
958 	[ C(OP_READ) ] = {
959 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
960 		[ C(RESULT_ACCESS) ] = 0x01b7,
961 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
962 		[ C(RESULT_MISS)   ] = 0x01b7,
963 	},
964 	[ C(OP_WRITE) ] = {
965 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
966 		[ C(RESULT_ACCESS) ] = 0x01b7,
967 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
968 		[ C(RESULT_MISS)   ] = 0x01b7,
969 	},
970 	[ C(OP_PREFETCH) ] = {
971 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
972 		[ C(RESULT_ACCESS) ] = 0x01b7,
973 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
974 		[ C(RESULT_MISS)   ] = 0x01b7,
975 	},
976  },
977  [ C(DTLB) ] = {
978 	[ C(OP_READ) ] = {
979 		[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
980 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
981 	},
982 	[ C(OP_WRITE) ] = {
983 		[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
984 		[ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
985 	},
986 	[ C(OP_PREFETCH) ] = {
987 		[ C(RESULT_ACCESS) ] = 0x0,
988 		[ C(RESULT_MISS)   ] = 0x0,
989 	},
990  },
991  [ C(ITLB) ] = {
992 	[ C(OP_READ) ] = {
993 		[ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
994 		[ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
995 	},
996 	[ C(OP_WRITE) ] = {
997 		[ C(RESULT_ACCESS) ] = -1,
998 		[ C(RESULT_MISS)   ] = -1,
999 	},
1000 	[ C(OP_PREFETCH) ] = {
1001 		[ C(RESULT_ACCESS) ] = -1,
1002 		[ C(RESULT_MISS)   ] = -1,
1003 	},
1004  },
1005  [ C(BPU ) ] = {
1006 	[ C(OP_READ) ] = {
1007 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1008 		[ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1009 	},
1010 	[ C(OP_WRITE) ] = {
1011 		[ C(RESULT_ACCESS) ] = -1,
1012 		[ C(RESULT_MISS)   ] = -1,
1013 	},
1014 	[ C(OP_PREFETCH) ] = {
1015 		[ C(RESULT_ACCESS) ] = -1,
1016 		[ C(RESULT_MISS)   ] = -1,
1017 	},
1018  },
1019  [ C(NODE) ] = {
1020 	[ C(OP_READ) ] = {
1021 		[ C(RESULT_ACCESS) ] = 0x01b7,
1022 		[ C(RESULT_MISS)   ] = 0x01b7,
1023 	},
1024 	[ C(OP_WRITE) ] = {
1025 		[ C(RESULT_ACCESS) ] = 0x01b7,
1026 		[ C(RESULT_MISS)   ] = 0x01b7,
1027 	},
1028 	[ C(OP_PREFETCH) ] = {
1029 		[ C(RESULT_ACCESS) ] = 0x01b7,
1030 		[ C(RESULT_MISS)   ] = 0x01b7,
1031 	},
1032  },
1033 
1034 };
1035 
1036 /*
1037  * Notes on the events:
1038  * - data reads do not include code reads (comparable to earlier tables)
1039  * - data counts include speculative execution (except L1 write, dtlb, bpu)
1040  * - remote node access includes remote memory, remote cache, remote mmio.
1041  * - prefetches are not included in the counts because they are not
1042  *   reliably counted.
1043  */
1044 
1045 #define HSW_DEMAND_DATA_RD		BIT_ULL(0)
1046 #define HSW_DEMAND_RFO			BIT_ULL(1)
1047 #define HSW_ANY_RESPONSE		BIT_ULL(16)
1048 #define HSW_SUPPLIER_NONE		BIT_ULL(17)
1049 #define HSW_L3_MISS_LOCAL_DRAM		BIT_ULL(22)
1050 #define HSW_L3_MISS_REMOTE_HOP0		BIT_ULL(27)
1051 #define HSW_L3_MISS_REMOTE_HOP1		BIT_ULL(28)
1052 #define HSW_L3_MISS_REMOTE_HOP2P	BIT_ULL(29)
1053 #define HSW_L3_MISS			(HSW_L3_MISS_LOCAL_DRAM| \
1054 					 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1055 					 HSW_L3_MISS_REMOTE_HOP2P)
1056 #define HSW_SNOOP_NONE			BIT_ULL(31)
1057 #define HSW_SNOOP_NOT_NEEDED		BIT_ULL(32)
1058 #define HSW_SNOOP_MISS			BIT_ULL(33)
1059 #define HSW_SNOOP_HIT_NO_FWD		BIT_ULL(34)
1060 #define HSW_SNOOP_HIT_WITH_FWD		BIT_ULL(35)
1061 #define HSW_SNOOP_HITM			BIT_ULL(36)
1062 #define HSW_SNOOP_NON_DRAM		BIT_ULL(37)
1063 #define HSW_ANY_SNOOP			(HSW_SNOOP_NONE| \
1064 					 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
1065 					 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
1066 					 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
1067 #define HSW_SNOOP_DRAM			(HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
1068 #define HSW_DEMAND_READ			HSW_DEMAND_DATA_RD
1069 #define HSW_DEMAND_WRITE		HSW_DEMAND_RFO
1070 #define HSW_L3_MISS_REMOTE		(HSW_L3_MISS_REMOTE_HOP0|\
1071 					 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
1072 #define HSW_LLC_ACCESS			HSW_ANY_RESPONSE
1073 
1074 #define BDW_L3_MISS_LOCAL		BIT(26)
1075 #define BDW_L3_MISS			(BDW_L3_MISS_LOCAL| \
1076 					 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1077 					 HSW_L3_MISS_REMOTE_HOP2P)
1078 
1079 
1080 static __initconst const u64 hsw_hw_cache_event_ids
1081 				[PERF_COUNT_HW_CACHE_MAX]
1082 				[PERF_COUNT_HW_CACHE_OP_MAX]
1083 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1084 {
1085  [ C(L1D ) ] = {
1086 	[ C(OP_READ) ] = {
1087 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1088 		[ C(RESULT_MISS)   ] = 0x151,	/* L1D.REPLACEMENT */
1089 	},
1090 	[ C(OP_WRITE) ] = {
1091 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1092 		[ C(RESULT_MISS)   ] = 0x0,
1093 	},
1094 	[ C(OP_PREFETCH) ] = {
1095 		[ C(RESULT_ACCESS) ] = 0x0,
1096 		[ C(RESULT_MISS)   ] = 0x0,
1097 	},
1098  },
1099  [ C(L1I ) ] = {
1100 	[ C(OP_READ) ] = {
1101 		[ C(RESULT_ACCESS) ] = 0x0,
1102 		[ C(RESULT_MISS)   ] = 0x280,	/* ICACHE.MISSES */
1103 	},
1104 	[ C(OP_WRITE) ] = {
1105 		[ C(RESULT_ACCESS) ] = -1,
1106 		[ C(RESULT_MISS)   ] = -1,
1107 	},
1108 	[ C(OP_PREFETCH) ] = {
1109 		[ C(RESULT_ACCESS) ] = 0x0,
1110 		[ C(RESULT_MISS)   ] = 0x0,
1111 	},
1112  },
1113  [ C(LL  ) ] = {
1114 	[ C(OP_READ) ] = {
1115 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1116 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1117 	},
1118 	[ C(OP_WRITE) ] = {
1119 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1120 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1121 	},
1122 	[ C(OP_PREFETCH) ] = {
1123 		[ C(RESULT_ACCESS) ] = 0x0,
1124 		[ C(RESULT_MISS)   ] = 0x0,
1125 	},
1126  },
1127  [ C(DTLB) ] = {
1128 	[ C(OP_READ) ] = {
1129 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1130 		[ C(RESULT_MISS)   ] = 0x108,	/* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1131 	},
1132 	[ C(OP_WRITE) ] = {
1133 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1134 		[ C(RESULT_MISS)   ] = 0x149,	/* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1135 	},
1136 	[ C(OP_PREFETCH) ] = {
1137 		[ C(RESULT_ACCESS) ] = 0x0,
1138 		[ C(RESULT_MISS)   ] = 0x0,
1139 	},
1140  },
1141  [ C(ITLB) ] = {
1142 	[ C(OP_READ) ] = {
1143 		[ C(RESULT_ACCESS) ] = 0x6085,	/* ITLB_MISSES.STLB_HIT */
1144 		[ C(RESULT_MISS)   ] = 0x185,	/* ITLB_MISSES.MISS_CAUSES_A_WALK */
1145 	},
1146 	[ C(OP_WRITE) ] = {
1147 		[ C(RESULT_ACCESS) ] = -1,
1148 		[ C(RESULT_MISS)   ] = -1,
1149 	},
1150 	[ C(OP_PREFETCH) ] = {
1151 		[ C(RESULT_ACCESS) ] = -1,
1152 		[ C(RESULT_MISS)   ] = -1,
1153 	},
1154  },
1155  [ C(BPU ) ] = {
1156 	[ C(OP_READ) ] = {
1157 		[ C(RESULT_ACCESS) ] = 0xc4,	/* BR_INST_RETIRED.ALL_BRANCHES */
1158 		[ C(RESULT_MISS)   ] = 0xc5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
1159 	},
1160 	[ C(OP_WRITE) ] = {
1161 		[ C(RESULT_ACCESS) ] = -1,
1162 		[ C(RESULT_MISS)   ] = -1,
1163 	},
1164 	[ C(OP_PREFETCH) ] = {
1165 		[ C(RESULT_ACCESS) ] = -1,
1166 		[ C(RESULT_MISS)   ] = -1,
1167 	},
1168  },
1169  [ C(NODE) ] = {
1170 	[ C(OP_READ) ] = {
1171 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1172 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1173 	},
1174 	[ C(OP_WRITE) ] = {
1175 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1176 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1177 	},
1178 	[ C(OP_PREFETCH) ] = {
1179 		[ C(RESULT_ACCESS) ] = 0x0,
1180 		[ C(RESULT_MISS)   ] = 0x0,
1181 	},
1182  },
1183 };
1184 
1185 static __initconst const u64 hsw_hw_cache_extra_regs
1186 				[PERF_COUNT_HW_CACHE_MAX]
1187 				[PERF_COUNT_HW_CACHE_OP_MAX]
1188 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1189 {
1190  [ C(LL  ) ] = {
1191 	[ C(OP_READ) ] = {
1192 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1193 				       HSW_LLC_ACCESS,
1194 		[ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
1195 				       HSW_L3_MISS|HSW_ANY_SNOOP,
1196 	},
1197 	[ C(OP_WRITE) ] = {
1198 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1199 				       HSW_LLC_ACCESS,
1200 		[ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
1201 				       HSW_L3_MISS|HSW_ANY_SNOOP,
1202 	},
1203 	[ C(OP_PREFETCH) ] = {
1204 		[ C(RESULT_ACCESS) ] = 0x0,
1205 		[ C(RESULT_MISS)   ] = 0x0,
1206 	},
1207  },
1208  [ C(NODE) ] = {
1209 	[ C(OP_READ) ] = {
1210 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1211 				       HSW_L3_MISS_LOCAL_DRAM|
1212 				       HSW_SNOOP_DRAM,
1213 		[ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
1214 				       HSW_L3_MISS_REMOTE|
1215 				       HSW_SNOOP_DRAM,
1216 	},
1217 	[ C(OP_WRITE) ] = {
1218 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1219 				       HSW_L3_MISS_LOCAL_DRAM|
1220 				       HSW_SNOOP_DRAM,
1221 		[ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
1222 				       HSW_L3_MISS_REMOTE|
1223 				       HSW_SNOOP_DRAM,
1224 	},
1225 	[ C(OP_PREFETCH) ] = {
1226 		[ C(RESULT_ACCESS) ] = 0x0,
1227 		[ C(RESULT_MISS)   ] = 0x0,
1228 	},
1229  },
1230 };
1231 
1232 static __initconst const u64 westmere_hw_cache_event_ids
1233 				[PERF_COUNT_HW_CACHE_MAX]
1234 				[PERF_COUNT_HW_CACHE_OP_MAX]
1235 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1236 {
1237  [ C(L1D) ] = {
1238 	[ C(OP_READ) ] = {
1239 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1240 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1241 	},
1242 	[ C(OP_WRITE) ] = {
1243 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1244 		[ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1245 	},
1246 	[ C(OP_PREFETCH) ] = {
1247 		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1248 		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1249 	},
1250  },
1251  [ C(L1I ) ] = {
1252 	[ C(OP_READ) ] = {
1253 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1254 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1255 	},
1256 	[ C(OP_WRITE) ] = {
1257 		[ C(RESULT_ACCESS) ] = -1,
1258 		[ C(RESULT_MISS)   ] = -1,
1259 	},
1260 	[ C(OP_PREFETCH) ] = {
1261 		[ C(RESULT_ACCESS) ] = 0x0,
1262 		[ C(RESULT_MISS)   ] = 0x0,
1263 	},
1264  },
1265  [ C(LL  ) ] = {
1266 	[ C(OP_READ) ] = {
1267 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1268 		[ C(RESULT_ACCESS) ] = 0x01b7,
1269 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1270 		[ C(RESULT_MISS)   ] = 0x01b7,
1271 	},
1272 	/*
1273 	 * Use RFO, not WRITEBACK, because a write miss would typically occur
1274 	 * on RFO.
1275 	 */
1276 	[ C(OP_WRITE) ] = {
1277 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1278 		[ C(RESULT_ACCESS) ] = 0x01b7,
1279 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1280 		[ C(RESULT_MISS)   ] = 0x01b7,
1281 	},
1282 	[ C(OP_PREFETCH) ] = {
1283 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1284 		[ C(RESULT_ACCESS) ] = 0x01b7,
1285 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1286 		[ C(RESULT_MISS)   ] = 0x01b7,
1287 	},
1288  },
1289  [ C(DTLB) ] = {
1290 	[ C(OP_READ) ] = {
1291 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1292 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1293 	},
1294 	[ C(OP_WRITE) ] = {
1295 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1296 		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1297 	},
1298 	[ C(OP_PREFETCH) ] = {
1299 		[ C(RESULT_ACCESS) ] = 0x0,
1300 		[ C(RESULT_MISS)   ] = 0x0,
1301 	},
1302  },
1303  [ C(ITLB) ] = {
1304 	[ C(OP_READ) ] = {
1305 		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1306 		[ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
1307 	},
1308 	[ C(OP_WRITE) ] = {
1309 		[ C(RESULT_ACCESS) ] = -1,
1310 		[ C(RESULT_MISS)   ] = -1,
1311 	},
1312 	[ C(OP_PREFETCH) ] = {
1313 		[ C(RESULT_ACCESS) ] = -1,
1314 		[ C(RESULT_MISS)   ] = -1,
1315 	},
1316  },
1317  [ C(BPU ) ] = {
1318 	[ C(OP_READ) ] = {
1319 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1320 		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1321 	},
1322 	[ C(OP_WRITE) ] = {
1323 		[ C(RESULT_ACCESS) ] = -1,
1324 		[ C(RESULT_MISS)   ] = -1,
1325 	},
1326 	[ C(OP_PREFETCH) ] = {
1327 		[ C(RESULT_ACCESS) ] = -1,
1328 		[ C(RESULT_MISS)   ] = -1,
1329 	},
1330  },
1331  [ C(NODE) ] = {
1332 	[ C(OP_READ) ] = {
1333 		[ C(RESULT_ACCESS) ] = 0x01b7,
1334 		[ C(RESULT_MISS)   ] = 0x01b7,
1335 	},
1336 	[ C(OP_WRITE) ] = {
1337 		[ C(RESULT_ACCESS) ] = 0x01b7,
1338 		[ C(RESULT_MISS)   ] = 0x01b7,
1339 	},
1340 	[ C(OP_PREFETCH) ] = {
1341 		[ C(RESULT_ACCESS) ] = 0x01b7,
1342 		[ C(RESULT_MISS)   ] = 0x01b7,
1343 	},
1344  },
1345 };
1346 
1347 /*
1348  * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1349  * See IA32 SDM Vol 3B 30.6.1.3
1350  */
1351 
1352 #define NHM_DMND_DATA_RD	(1 << 0)
1353 #define NHM_DMND_RFO		(1 << 1)
1354 #define NHM_DMND_IFETCH		(1 << 2)
1355 #define NHM_DMND_WB		(1 << 3)
1356 #define NHM_PF_DATA_RD		(1 << 4)
1357 #define NHM_PF_DATA_RFO		(1 << 5)
1358 #define NHM_PF_IFETCH		(1 << 6)
1359 #define NHM_OFFCORE_OTHER	(1 << 7)
1360 #define NHM_UNCORE_HIT		(1 << 8)
1361 #define NHM_OTHER_CORE_HIT_SNP	(1 << 9)
1362 #define NHM_OTHER_CORE_HITM	(1 << 10)
1363         			/* reserved */
1364 #define NHM_REMOTE_CACHE_FWD	(1 << 12)
1365 #define NHM_REMOTE_DRAM		(1 << 13)
1366 #define NHM_LOCAL_DRAM		(1 << 14)
1367 #define NHM_NON_DRAM		(1 << 15)
1368 
1369 #define NHM_LOCAL		(NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1370 #define NHM_REMOTE		(NHM_REMOTE_DRAM)
1371 
1372 #define NHM_DMND_READ		(NHM_DMND_DATA_RD)
1373 #define NHM_DMND_WRITE		(NHM_DMND_RFO|NHM_DMND_WB)
1374 #define NHM_DMND_PREFETCH	(NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1375 
1376 #define NHM_L3_HIT	(NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1377 #define NHM_L3_MISS	(NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1378 #define NHM_L3_ACCESS	(NHM_L3_HIT|NHM_L3_MISS)
1379 
1380 static __initconst const u64 nehalem_hw_cache_extra_regs
1381 				[PERF_COUNT_HW_CACHE_MAX]
1382 				[PERF_COUNT_HW_CACHE_OP_MAX]
1383 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1384 {
1385  [ C(LL  ) ] = {
1386 	[ C(OP_READ) ] = {
1387 		[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1388 		[ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
1389 	},
1390 	[ C(OP_WRITE) ] = {
1391 		[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1392 		[ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
1393 	},
1394 	[ C(OP_PREFETCH) ] = {
1395 		[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1396 		[ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1397 	},
1398  },
1399  [ C(NODE) ] = {
1400 	[ C(OP_READ) ] = {
1401 		[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1402 		[ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
1403 	},
1404 	[ C(OP_WRITE) ] = {
1405 		[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1406 		[ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
1407 	},
1408 	[ C(OP_PREFETCH) ] = {
1409 		[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1410 		[ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1411 	},
1412  },
1413 };
1414 
1415 static __initconst const u64 nehalem_hw_cache_event_ids
1416 				[PERF_COUNT_HW_CACHE_MAX]
1417 				[PERF_COUNT_HW_CACHE_OP_MAX]
1418 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1419 {
1420  [ C(L1D) ] = {
1421 	[ C(OP_READ) ] = {
1422 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1423 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1424 	},
1425 	[ C(OP_WRITE) ] = {
1426 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1427 		[ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1428 	},
1429 	[ C(OP_PREFETCH) ] = {
1430 		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1431 		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1432 	},
1433  },
1434  [ C(L1I ) ] = {
1435 	[ C(OP_READ) ] = {
1436 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1437 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1438 	},
1439 	[ C(OP_WRITE) ] = {
1440 		[ C(RESULT_ACCESS) ] = -1,
1441 		[ C(RESULT_MISS)   ] = -1,
1442 	},
1443 	[ C(OP_PREFETCH) ] = {
1444 		[ C(RESULT_ACCESS) ] = 0x0,
1445 		[ C(RESULT_MISS)   ] = 0x0,
1446 	},
1447  },
1448  [ C(LL  ) ] = {
1449 	[ C(OP_READ) ] = {
1450 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1451 		[ C(RESULT_ACCESS) ] = 0x01b7,
1452 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1453 		[ C(RESULT_MISS)   ] = 0x01b7,
1454 	},
1455 	/*
1456 	 * Use RFO, not WRITEBACK, because a write miss would typically occur
1457 	 * on RFO.
1458 	 */
1459 	[ C(OP_WRITE) ] = {
1460 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1461 		[ C(RESULT_ACCESS) ] = 0x01b7,
1462 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1463 		[ C(RESULT_MISS)   ] = 0x01b7,
1464 	},
1465 	[ C(OP_PREFETCH) ] = {
1466 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1467 		[ C(RESULT_ACCESS) ] = 0x01b7,
1468 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1469 		[ C(RESULT_MISS)   ] = 0x01b7,
1470 	},
1471  },
1472  [ C(DTLB) ] = {
1473 	[ C(OP_READ) ] = {
1474 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
1475 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1476 	},
1477 	[ C(OP_WRITE) ] = {
1478 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
1479 		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1480 	},
1481 	[ C(OP_PREFETCH) ] = {
1482 		[ C(RESULT_ACCESS) ] = 0x0,
1483 		[ C(RESULT_MISS)   ] = 0x0,
1484 	},
1485  },
1486  [ C(ITLB) ] = {
1487 	[ C(OP_READ) ] = {
1488 		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1489 		[ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
1490 	},
1491 	[ C(OP_WRITE) ] = {
1492 		[ C(RESULT_ACCESS) ] = -1,
1493 		[ C(RESULT_MISS)   ] = -1,
1494 	},
1495 	[ C(OP_PREFETCH) ] = {
1496 		[ C(RESULT_ACCESS) ] = -1,
1497 		[ C(RESULT_MISS)   ] = -1,
1498 	},
1499  },
1500  [ C(BPU ) ] = {
1501 	[ C(OP_READ) ] = {
1502 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1503 		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1504 	},
1505 	[ C(OP_WRITE) ] = {
1506 		[ C(RESULT_ACCESS) ] = -1,
1507 		[ C(RESULT_MISS)   ] = -1,
1508 	},
1509 	[ C(OP_PREFETCH) ] = {
1510 		[ C(RESULT_ACCESS) ] = -1,
1511 		[ C(RESULT_MISS)   ] = -1,
1512 	},
1513  },
1514  [ C(NODE) ] = {
1515 	[ C(OP_READ) ] = {
1516 		[ C(RESULT_ACCESS) ] = 0x01b7,
1517 		[ C(RESULT_MISS)   ] = 0x01b7,
1518 	},
1519 	[ C(OP_WRITE) ] = {
1520 		[ C(RESULT_ACCESS) ] = 0x01b7,
1521 		[ C(RESULT_MISS)   ] = 0x01b7,
1522 	},
1523 	[ C(OP_PREFETCH) ] = {
1524 		[ C(RESULT_ACCESS) ] = 0x01b7,
1525 		[ C(RESULT_MISS)   ] = 0x01b7,
1526 	},
1527  },
1528 };
1529 
1530 static __initconst const u64 core2_hw_cache_event_ids
1531 				[PERF_COUNT_HW_CACHE_MAX]
1532 				[PERF_COUNT_HW_CACHE_OP_MAX]
1533 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1534 {
1535  [ C(L1D) ] = {
1536 	[ C(OP_READ) ] = {
1537 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
1538 		[ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
1539 	},
1540 	[ C(OP_WRITE) ] = {
1541 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
1542 		[ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
1543 	},
1544 	[ C(OP_PREFETCH) ] = {
1545 		[ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
1546 		[ C(RESULT_MISS)   ] = 0,
1547 	},
1548  },
1549  [ C(L1I ) ] = {
1550 	[ C(OP_READ) ] = {
1551 		[ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
1552 		[ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
1553 	},
1554 	[ C(OP_WRITE) ] = {
1555 		[ C(RESULT_ACCESS) ] = -1,
1556 		[ C(RESULT_MISS)   ] = -1,
1557 	},
1558 	[ C(OP_PREFETCH) ] = {
1559 		[ C(RESULT_ACCESS) ] = 0,
1560 		[ C(RESULT_MISS)   ] = 0,
1561 	},
1562  },
1563  [ C(LL  ) ] = {
1564 	[ C(OP_READ) ] = {
1565 		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1566 		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1567 	},
1568 	[ C(OP_WRITE) ] = {
1569 		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1570 		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1571 	},
1572 	[ C(OP_PREFETCH) ] = {
1573 		[ C(RESULT_ACCESS) ] = 0,
1574 		[ C(RESULT_MISS)   ] = 0,
1575 	},
1576  },
1577  [ C(DTLB) ] = {
1578 	[ C(OP_READ) ] = {
1579 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
1580 		[ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
1581 	},
1582 	[ C(OP_WRITE) ] = {
1583 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
1584 		[ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
1585 	},
1586 	[ C(OP_PREFETCH) ] = {
1587 		[ C(RESULT_ACCESS) ] = 0,
1588 		[ C(RESULT_MISS)   ] = 0,
1589 	},
1590  },
1591  [ C(ITLB) ] = {
1592 	[ C(OP_READ) ] = {
1593 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1594 		[ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
1595 	},
1596 	[ C(OP_WRITE) ] = {
1597 		[ C(RESULT_ACCESS) ] = -1,
1598 		[ C(RESULT_MISS)   ] = -1,
1599 	},
1600 	[ C(OP_PREFETCH) ] = {
1601 		[ C(RESULT_ACCESS) ] = -1,
1602 		[ C(RESULT_MISS)   ] = -1,
1603 	},
1604  },
1605  [ C(BPU ) ] = {
1606 	[ C(OP_READ) ] = {
1607 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1608 		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1609 	},
1610 	[ C(OP_WRITE) ] = {
1611 		[ C(RESULT_ACCESS) ] = -1,
1612 		[ C(RESULT_MISS)   ] = -1,
1613 	},
1614 	[ C(OP_PREFETCH) ] = {
1615 		[ C(RESULT_ACCESS) ] = -1,
1616 		[ C(RESULT_MISS)   ] = -1,
1617 	},
1618  },
1619 };
1620 
1621 static __initconst const u64 atom_hw_cache_event_ids
1622 				[PERF_COUNT_HW_CACHE_MAX]
1623 				[PERF_COUNT_HW_CACHE_OP_MAX]
1624 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1625 {
1626  [ C(L1D) ] = {
1627 	[ C(OP_READ) ] = {
1628 		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
1629 		[ C(RESULT_MISS)   ] = 0,
1630 	},
1631 	[ C(OP_WRITE) ] = {
1632 		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
1633 		[ C(RESULT_MISS)   ] = 0,
1634 	},
1635 	[ C(OP_PREFETCH) ] = {
1636 		[ C(RESULT_ACCESS) ] = 0x0,
1637 		[ C(RESULT_MISS)   ] = 0,
1638 	},
1639  },
1640  [ C(L1I ) ] = {
1641 	[ C(OP_READ) ] = {
1642 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
1643 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
1644 	},
1645 	[ C(OP_WRITE) ] = {
1646 		[ C(RESULT_ACCESS) ] = -1,
1647 		[ C(RESULT_MISS)   ] = -1,
1648 	},
1649 	[ C(OP_PREFETCH) ] = {
1650 		[ C(RESULT_ACCESS) ] = 0,
1651 		[ C(RESULT_MISS)   ] = 0,
1652 	},
1653  },
1654  [ C(LL  ) ] = {
1655 	[ C(OP_READ) ] = {
1656 		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1657 		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1658 	},
1659 	[ C(OP_WRITE) ] = {
1660 		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1661 		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1662 	},
1663 	[ C(OP_PREFETCH) ] = {
1664 		[ C(RESULT_ACCESS) ] = 0,
1665 		[ C(RESULT_MISS)   ] = 0,
1666 	},
1667  },
1668  [ C(DTLB) ] = {
1669 	[ C(OP_READ) ] = {
1670 		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
1671 		[ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
1672 	},
1673 	[ C(OP_WRITE) ] = {
1674 		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
1675 		[ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
1676 	},
1677 	[ C(OP_PREFETCH) ] = {
1678 		[ C(RESULT_ACCESS) ] = 0,
1679 		[ C(RESULT_MISS)   ] = 0,
1680 	},
1681  },
1682  [ C(ITLB) ] = {
1683 	[ C(OP_READ) ] = {
1684 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1685 		[ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
1686 	},
1687 	[ C(OP_WRITE) ] = {
1688 		[ C(RESULT_ACCESS) ] = -1,
1689 		[ C(RESULT_MISS)   ] = -1,
1690 	},
1691 	[ C(OP_PREFETCH) ] = {
1692 		[ C(RESULT_ACCESS) ] = -1,
1693 		[ C(RESULT_MISS)   ] = -1,
1694 	},
1695  },
1696  [ C(BPU ) ] = {
1697 	[ C(OP_READ) ] = {
1698 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1699 		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1700 	},
1701 	[ C(OP_WRITE) ] = {
1702 		[ C(RESULT_ACCESS) ] = -1,
1703 		[ C(RESULT_MISS)   ] = -1,
1704 	},
1705 	[ C(OP_PREFETCH) ] = {
1706 		[ C(RESULT_ACCESS) ] = -1,
1707 		[ C(RESULT_MISS)   ] = -1,
1708 	},
1709  },
1710 };
1711 
1712 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1713 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1714 /* no_alloc_cycles.not_delivered */
1715 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1716 	       "event=0xca,umask=0x50");
1717 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1718 /* uops_retired.all */
1719 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1720 	       "event=0xc2,umask=0x10");
1721 /* uops_retired.all */
1722 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1723 	       "event=0xc2,umask=0x10");
1724 
1725 static struct attribute *slm_events_attrs[] = {
1726 	EVENT_PTR(td_total_slots_slm),
1727 	EVENT_PTR(td_total_slots_scale_slm),
1728 	EVENT_PTR(td_fetch_bubbles_slm),
1729 	EVENT_PTR(td_fetch_bubbles_scale_slm),
1730 	EVENT_PTR(td_slots_issued_slm),
1731 	EVENT_PTR(td_slots_retired_slm),
1732 	NULL
1733 };
1734 
1735 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1736 {
1737 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1738 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1739 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1740 	EVENT_EXTRA_END
1741 };
1742 
1743 #define SLM_DMND_READ		SNB_DMND_DATA_RD
1744 #define SLM_DMND_WRITE		SNB_DMND_RFO
1745 #define SLM_DMND_PREFETCH	(SNB_PF_DATA_RD|SNB_PF_RFO)
1746 
1747 #define SLM_SNP_ANY		(SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1748 #define SLM_LLC_ACCESS		SNB_RESP_ANY
1749 #define SLM_LLC_MISS		(SLM_SNP_ANY|SNB_NON_DRAM)
1750 
1751 static __initconst const u64 slm_hw_cache_extra_regs
1752 				[PERF_COUNT_HW_CACHE_MAX]
1753 				[PERF_COUNT_HW_CACHE_OP_MAX]
1754 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1755 {
1756  [ C(LL  ) ] = {
1757 	[ C(OP_READ) ] = {
1758 		[ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1759 		[ C(RESULT_MISS)   ] = 0,
1760 	},
1761 	[ C(OP_WRITE) ] = {
1762 		[ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1763 		[ C(RESULT_MISS)   ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1764 	},
1765 	[ C(OP_PREFETCH) ] = {
1766 		[ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1767 		[ C(RESULT_MISS)   ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1768 	},
1769  },
1770 };
1771 
1772 static __initconst const u64 slm_hw_cache_event_ids
1773 				[PERF_COUNT_HW_CACHE_MAX]
1774 				[PERF_COUNT_HW_CACHE_OP_MAX]
1775 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1776 {
1777  [ C(L1D) ] = {
1778 	[ C(OP_READ) ] = {
1779 		[ C(RESULT_ACCESS) ] = 0,
1780 		[ C(RESULT_MISS)   ] = 0x0104, /* LD_DCU_MISS */
1781 	},
1782 	[ C(OP_WRITE) ] = {
1783 		[ C(RESULT_ACCESS) ] = 0,
1784 		[ C(RESULT_MISS)   ] = 0,
1785 	},
1786 	[ C(OP_PREFETCH) ] = {
1787 		[ C(RESULT_ACCESS) ] = 0,
1788 		[ C(RESULT_MISS)   ] = 0,
1789 	},
1790  },
1791  [ C(L1I ) ] = {
1792 	[ C(OP_READ) ] = {
1793 		[ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1794 		[ C(RESULT_MISS)   ] = 0x0280, /* ICACGE.MISSES */
1795 	},
1796 	[ C(OP_WRITE) ] = {
1797 		[ C(RESULT_ACCESS) ] = -1,
1798 		[ C(RESULT_MISS)   ] = -1,
1799 	},
1800 	[ C(OP_PREFETCH) ] = {
1801 		[ C(RESULT_ACCESS) ] = 0,
1802 		[ C(RESULT_MISS)   ] = 0,
1803 	},
1804  },
1805  [ C(LL  ) ] = {
1806 	[ C(OP_READ) ] = {
1807 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1808 		[ C(RESULT_ACCESS) ] = 0x01b7,
1809 		[ C(RESULT_MISS)   ] = 0,
1810 	},
1811 	[ C(OP_WRITE) ] = {
1812 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1813 		[ C(RESULT_ACCESS) ] = 0x01b7,
1814 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1815 		[ C(RESULT_MISS)   ] = 0x01b7,
1816 	},
1817 	[ C(OP_PREFETCH) ] = {
1818 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1819 		[ C(RESULT_ACCESS) ] = 0x01b7,
1820 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1821 		[ C(RESULT_MISS)   ] = 0x01b7,
1822 	},
1823  },
1824  [ C(DTLB) ] = {
1825 	[ C(OP_READ) ] = {
1826 		[ C(RESULT_ACCESS) ] = 0,
1827 		[ C(RESULT_MISS)   ] = 0x0804, /* LD_DTLB_MISS */
1828 	},
1829 	[ C(OP_WRITE) ] = {
1830 		[ C(RESULT_ACCESS) ] = 0,
1831 		[ C(RESULT_MISS)   ] = 0,
1832 	},
1833 	[ C(OP_PREFETCH) ] = {
1834 		[ C(RESULT_ACCESS) ] = 0,
1835 		[ C(RESULT_MISS)   ] = 0,
1836 	},
1837  },
1838  [ C(ITLB) ] = {
1839 	[ C(OP_READ) ] = {
1840 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1841 		[ C(RESULT_MISS)   ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1842 	},
1843 	[ C(OP_WRITE) ] = {
1844 		[ C(RESULT_ACCESS) ] = -1,
1845 		[ C(RESULT_MISS)   ] = -1,
1846 	},
1847 	[ C(OP_PREFETCH) ] = {
1848 		[ C(RESULT_ACCESS) ] = -1,
1849 		[ C(RESULT_MISS)   ] = -1,
1850 	},
1851  },
1852  [ C(BPU ) ] = {
1853 	[ C(OP_READ) ] = {
1854 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1855 		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1856 	},
1857 	[ C(OP_WRITE) ] = {
1858 		[ C(RESULT_ACCESS) ] = -1,
1859 		[ C(RESULT_MISS)   ] = -1,
1860 	},
1861 	[ C(OP_PREFETCH) ] = {
1862 		[ C(RESULT_ACCESS) ] = -1,
1863 		[ C(RESULT_MISS)   ] = -1,
1864 	},
1865  },
1866 };
1867 
1868 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1869 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1870 /* UOPS_NOT_DELIVERED.ANY */
1871 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1872 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1873 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1874 /* UOPS_RETIRED.ANY */
1875 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1876 /* UOPS_ISSUED.ANY */
1877 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1878 
1879 static struct attribute *glm_events_attrs[] = {
1880 	EVENT_PTR(td_total_slots_glm),
1881 	EVENT_PTR(td_total_slots_scale_glm),
1882 	EVENT_PTR(td_fetch_bubbles_glm),
1883 	EVENT_PTR(td_recovery_bubbles_glm),
1884 	EVENT_PTR(td_slots_issued_glm),
1885 	EVENT_PTR(td_slots_retired_glm),
1886 	NULL
1887 };
1888 
1889 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1890 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1891 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1892 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1893 	EVENT_EXTRA_END
1894 };
1895 
1896 #define GLM_DEMAND_DATA_RD		BIT_ULL(0)
1897 #define GLM_DEMAND_RFO			BIT_ULL(1)
1898 #define GLM_ANY_RESPONSE		BIT_ULL(16)
1899 #define GLM_SNP_NONE_OR_MISS		BIT_ULL(33)
1900 #define GLM_DEMAND_READ			GLM_DEMAND_DATA_RD
1901 #define GLM_DEMAND_WRITE		GLM_DEMAND_RFO
1902 #define GLM_DEMAND_PREFETCH		(SNB_PF_DATA_RD|SNB_PF_RFO)
1903 #define GLM_LLC_ACCESS			GLM_ANY_RESPONSE
1904 #define GLM_SNP_ANY			(GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1905 #define GLM_LLC_MISS			(GLM_SNP_ANY|SNB_NON_DRAM)
1906 
1907 static __initconst const u64 glm_hw_cache_event_ids
1908 				[PERF_COUNT_HW_CACHE_MAX]
1909 				[PERF_COUNT_HW_CACHE_OP_MAX]
1910 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1911 	[C(L1D)] = {
1912 		[C(OP_READ)] = {
1913 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1914 			[C(RESULT_MISS)]	= 0x0,
1915 		},
1916 		[C(OP_WRITE)] = {
1917 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1918 			[C(RESULT_MISS)]	= 0x0,
1919 		},
1920 		[C(OP_PREFETCH)] = {
1921 			[C(RESULT_ACCESS)]	= 0x0,
1922 			[C(RESULT_MISS)]	= 0x0,
1923 		},
1924 	},
1925 	[C(L1I)] = {
1926 		[C(OP_READ)] = {
1927 			[C(RESULT_ACCESS)]	= 0x0380,	/* ICACHE.ACCESSES */
1928 			[C(RESULT_MISS)]	= 0x0280,	/* ICACHE.MISSES */
1929 		},
1930 		[C(OP_WRITE)] = {
1931 			[C(RESULT_ACCESS)]	= -1,
1932 			[C(RESULT_MISS)]	= -1,
1933 		},
1934 		[C(OP_PREFETCH)] = {
1935 			[C(RESULT_ACCESS)]	= 0x0,
1936 			[C(RESULT_MISS)]	= 0x0,
1937 		},
1938 	},
1939 	[C(LL)] = {
1940 		[C(OP_READ)] = {
1941 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1942 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1943 		},
1944 		[C(OP_WRITE)] = {
1945 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1946 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1947 		},
1948 		[C(OP_PREFETCH)] = {
1949 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1950 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1951 		},
1952 	},
1953 	[C(DTLB)] = {
1954 		[C(OP_READ)] = {
1955 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1956 			[C(RESULT_MISS)]	= 0x0,
1957 		},
1958 		[C(OP_WRITE)] = {
1959 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1960 			[C(RESULT_MISS)]	= 0x0,
1961 		},
1962 		[C(OP_PREFETCH)] = {
1963 			[C(RESULT_ACCESS)]	= 0x0,
1964 			[C(RESULT_MISS)]	= 0x0,
1965 		},
1966 	},
1967 	[C(ITLB)] = {
1968 		[C(OP_READ)] = {
1969 			[C(RESULT_ACCESS)]	= 0x00c0,	/* INST_RETIRED.ANY_P */
1970 			[C(RESULT_MISS)]	= 0x0481,	/* ITLB.MISS */
1971 		},
1972 		[C(OP_WRITE)] = {
1973 			[C(RESULT_ACCESS)]	= -1,
1974 			[C(RESULT_MISS)]	= -1,
1975 		},
1976 		[C(OP_PREFETCH)] = {
1977 			[C(RESULT_ACCESS)]	= -1,
1978 			[C(RESULT_MISS)]	= -1,
1979 		},
1980 	},
1981 	[C(BPU)] = {
1982 		[C(OP_READ)] = {
1983 			[C(RESULT_ACCESS)]	= 0x00c4,	/* BR_INST_RETIRED.ALL_BRANCHES */
1984 			[C(RESULT_MISS)]	= 0x00c5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
1985 		},
1986 		[C(OP_WRITE)] = {
1987 			[C(RESULT_ACCESS)]	= -1,
1988 			[C(RESULT_MISS)]	= -1,
1989 		},
1990 		[C(OP_PREFETCH)] = {
1991 			[C(RESULT_ACCESS)]	= -1,
1992 			[C(RESULT_MISS)]	= -1,
1993 		},
1994 	},
1995 };
1996 
1997 static __initconst const u64 glm_hw_cache_extra_regs
1998 				[PERF_COUNT_HW_CACHE_MAX]
1999 				[PERF_COUNT_HW_CACHE_OP_MAX]
2000 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2001 	[C(LL)] = {
2002 		[C(OP_READ)] = {
2003 			[C(RESULT_ACCESS)]	= GLM_DEMAND_READ|
2004 						  GLM_LLC_ACCESS,
2005 			[C(RESULT_MISS)]	= GLM_DEMAND_READ|
2006 						  GLM_LLC_MISS,
2007 		},
2008 		[C(OP_WRITE)] = {
2009 			[C(RESULT_ACCESS)]	= GLM_DEMAND_WRITE|
2010 						  GLM_LLC_ACCESS,
2011 			[C(RESULT_MISS)]	= GLM_DEMAND_WRITE|
2012 						  GLM_LLC_MISS,
2013 		},
2014 		[C(OP_PREFETCH)] = {
2015 			[C(RESULT_ACCESS)]	= GLM_DEMAND_PREFETCH|
2016 						  GLM_LLC_ACCESS,
2017 			[C(RESULT_MISS)]	= GLM_DEMAND_PREFETCH|
2018 						  GLM_LLC_MISS,
2019 		},
2020 	},
2021 };
2022 
2023 static __initconst const u64 glp_hw_cache_event_ids
2024 				[PERF_COUNT_HW_CACHE_MAX]
2025 				[PERF_COUNT_HW_CACHE_OP_MAX]
2026 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2027 	[C(L1D)] = {
2028 		[C(OP_READ)] = {
2029 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
2030 			[C(RESULT_MISS)]	= 0x0,
2031 		},
2032 		[C(OP_WRITE)] = {
2033 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
2034 			[C(RESULT_MISS)]	= 0x0,
2035 		},
2036 		[C(OP_PREFETCH)] = {
2037 			[C(RESULT_ACCESS)]	= 0x0,
2038 			[C(RESULT_MISS)]	= 0x0,
2039 		},
2040 	},
2041 	[C(L1I)] = {
2042 		[C(OP_READ)] = {
2043 			[C(RESULT_ACCESS)]	= 0x0380,	/* ICACHE.ACCESSES */
2044 			[C(RESULT_MISS)]	= 0x0280,	/* ICACHE.MISSES */
2045 		},
2046 		[C(OP_WRITE)] = {
2047 			[C(RESULT_ACCESS)]	= -1,
2048 			[C(RESULT_MISS)]	= -1,
2049 		},
2050 		[C(OP_PREFETCH)] = {
2051 			[C(RESULT_ACCESS)]	= 0x0,
2052 			[C(RESULT_MISS)]	= 0x0,
2053 		},
2054 	},
2055 	[C(LL)] = {
2056 		[C(OP_READ)] = {
2057 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2058 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2059 		},
2060 		[C(OP_WRITE)] = {
2061 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2062 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2063 		},
2064 		[C(OP_PREFETCH)] = {
2065 			[C(RESULT_ACCESS)]	= 0x0,
2066 			[C(RESULT_MISS)]	= 0x0,
2067 		},
2068 	},
2069 	[C(DTLB)] = {
2070 		[C(OP_READ)] = {
2071 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
2072 			[C(RESULT_MISS)]	= 0xe08,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
2073 		},
2074 		[C(OP_WRITE)] = {
2075 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
2076 			[C(RESULT_MISS)]	= 0xe49,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
2077 		},
2078 		[C(OP_PREFETCH)] = {
2079 			[C(RESULT_ACCESS)]	= 0x0,
2080 			[C(RESULT_MISS)]	= 0x0,
2081 		},
2082 	},
2083 	[C(ITLB)] = {
2084 		[C(OP_READ)] = {
2085 			[C(RESULT_ACCESS)]	= 0x00c0,	/* INST_RETIRED.ANY_P */
2086 			[C(RESULT_MISS)]	= 0x0481,	/* ITLB.MISS */
2087 		},
2088 		[C(OP_WRITE)] = {
2089 			[C(RESULT_ACCESS)]	= -1,
2090 			[C(RESULT_MISS)]	= -1,
2091 		},
2092 		[C(OP_PREFETCH)] = {
2093 			[C(RESULT_ACCESS)]	= -1,
2094 			[C(RESULT_MISS)]	= -1,
2095 		},
2096 	},
2097 	[C(BPU)] = {
2098 		[C(OP_READ)] = {
2099 			[C(RESULT_ACCESS)]	= 0x00c4,	/* BR_INST_RETIRED.ALL_BRANCHES */
2100 			[C(RESULT_MISS)]	= 0x00c5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
2101 		},
2102 		[C(OP_WRITE)] = {
2103 			[C(RESULT_ACCESS)]	= -1,
2104 			[C(RESULT_MISS)]	= -1,
2105 		},
2106 		[C(OP_PREFETCH)] = {
2107 			[C(RESULT_ACCESS)]	= -1,
2108 			[C(RESULT_MISS)]	= -1,
2109 		},
2110 	},
2111 };
2112 
2113 static __initconst const u64 glp_hw_cache_extra_regs
2114 				[PERF_COUNT_HW_CACHE_MAX]
2115 				[PERF_COUNT_HW_CACHE_OP_MAX]
2116 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2117 	[C(LL)] = {
2118 		[C(OP_READ)] = {
2119 			[C(RESULT_ACCESS)]	= GLM_DEMAND_READ|
2120 						  GLM_LLC_ACCESS,
2121 			[C(RESULT_MISS)]	= GLM_DEMAND_READ|
2122 						  GLM_LLC_MISS,
2123 		},
2124 		[C(OP_WRITE)] = {
2125 			[C(RESULT_ACCESS)]	= GLM_DEMAND_WRITE|
2126 						  GLM_LLC_ACCESS,
2127 			[C(RESULT_MISS)]	= GLM_DEMAND_WRITE|
2128 						  GLM_LLC_MISS,
2129 		},
2130 		[C(OP_PREFETCH)] = {
2131 			[C(RESULT_ACCESS)]	= 0x0,
2132 			[C(RESULT_MISS)]	= 0x0,
2133 		},
2134 	},
2135 };
2136 
2137 #define TNT_LOCAL_DRAM			BIT_ULL(26)
2138 #define TNT_DEMAND_READ			GLM_DEMAND_DATA_RD
2139 #define TNT_DEMAND_WRITE		GLM_DEMAND_RFO
2140 #define TNT_LLC_ACCESS			GLM_ANY_RESPONSE
2141 #define TNT_SNP_ANY			(SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2142 					 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2143 #define TNT_LLC_MISS			(TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2144 
2145 static __initconst const u64 tnt_hw_cache_extra_regs
2146 				[PERF_COUNT_HW_CACHE_MAX]
2147 				[PERF_COUNT_HW_CACHE_OP_MAX]
2148 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2149 	[C(LL)] = {
2150 		[C(OP_READ)] = {
2151 			[C(RESULT_ACCESS)]	= TNT_DEMAND_READ|
2152 						  TNT_LLC_ACCESS,
2153 			[C(RESULT_MISS)]	= TNT_DEMAND_READ|
2154 						  TNT_LLC_MISS,
2155 		},
2156 		[C(OP_WRITE)] = {
2157 			[C(RESULT_ACCESS)]	= TNT_DEMAND_WRITE|
2158 						  TNT_LLC_ACCESS,
2159 			[C(RESULT_MISS)]	= TNT_DEMAND_WRITE|
2160 						  TNT_LLC_MISS,
2161 		},
2162 		[C(OP_PREFETCH)] = {
2163 			[C(RESULT_ACCESS)]	= 0x0,
2164 			[C(RESULT_MISS)]	= 0x0,
2165 		},
2166 	},
2167 };
2168 
2169 EVENT_ATTR_STR(topdown-fe-bound,       td_fe_bound_tnt,        "event=0x71,umask=0x0");
2170 EVENT_ATTR_STR(topdown-retiring,       td_retiring_tnt,        "event=0xc2,umask=0x0");
2171 EVENT_ATTR_STR(topdown-bad-spec,       td_bad_spec_tnt,        "event=0x73,umask=0x6");
2172 EVENT_ATTR_STR(topdown-be-bound,       td_be_bound_tnt,        "event=0x74,umask=0x0");
2173 
2174 static struct attribute *tnt_events_attrs[] = {
2175 	EVENT_PTR(td_fe_bound_tnt),
2176 	EVENT_PTR(td_retiring_tnt),
2177 	EVENT_PTR(td_bad_spec_tnt),
2178 	EVENT_PTR(td_be_bound_tnt),
2179 	NULL,
2180 };
2181 
2182 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2183 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2184 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2185 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2186 	EVENT_EXTRA_END
2187 };
2188 
2189 EVENT_ATTR_STR(mem-loads,	mem_ld_grt,	"event=0xd0,umask=0x5,ldlat=3");
2190 EVENT_ATTR_STR(mem-stores,	mem_st_grt,	"event=0xd0,umask=0x6");
2191 
2192 static struct attribute *grt_mem_attrs[] = {
2193 	EVENT_PTR(mem_ld_grt),
2194 	EVENT_PTR(mem_st_grt),
2195 	NULL
2196 };
2197 
2198 static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2199 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2200 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2201 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2202 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2203 	EVENT_EXTRA_END
2204 };
2205 
2206 EVENT_ATTR_STR(topdown-retiring,       td_retiring_cmt,        "event=0x72,umask=0x0");
2207 EVENT_ATTR_STR(topdown-bad-spec,       td_bad_spec_cmt,        "event=0x73,umask=0x0");
2208 
2209 static struct attribute *cmt_events_attrs[] = {
2210 	EVENT_PTR(td_fe_bound_tnt),
2211 	EVENT_PTR(td_retiring_cmt),
2212 	EVENT_PTR(td_bad_spec_cmt),
2213 	EVENT_PTR(td_be_bound_tnt),
2214 	NULL
2215 };
2216 
2217 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
2218 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2219 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0),
2220 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1),
2221 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2222 	INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
2223 	INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
2224 	EVENT_EXTRA_END
2225 };
2226 
2227 #define KNL_OT_L2_HITE		BIT_ULL(19) /* Other Tile L2 Hit */
2228 #define KNL_OT_L2_HITF		BIT_ULL(20) /* Other Tile L2 Hit */
2229 #define KNL_MCDRAM_LOCAL	BIT_ULL(21)
2230 #define KNL_MCDRAM_FAR		BIT_ULL(22)
2231 #define KNL_DDR_LOCAL		BIT_ULL(23)
2232 #define KNL_DDR_FAR		BIT_ULL(24)
2233 #define KNL_DRAM_ANY		(KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2234 				    KNL_DDR_LOCAL | KNL_DDR_FAR)
2235 #define KNL_L2_READ		SLM_DMND_READ
2236 #define KNL_L2_WRITE		SLM_DMND_WRITE
2237 #define KNL_L2_PREFETCH		SLM_DMND_PREFETCH
2238 #define KNL_L2_ACCESS		SLM_LLC_ACCESS
2239 #define KNL_L2_MISS		(KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2240 				   KNL_DRAM_ANY | SNB_SNP_ANY | \
2241 						  SNB_NON_DRAM)
2242 
2243 static __initconst const u64 knl_hw_cache_extra_regs
2244 				[PERF_COUNT_HW_CACHE_MAX]
2245 				[PERF_COUNT_HW_CACHE_OP_MAX]
2246 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2247 	[C(LL)] = {
2248 		[C(OP_READ)] = {
2249 			[C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2250 			[C(RESULT_MISS)]   = 0,
2251 		},
2252 		[C(OP_WRITE)] = {
2253 			[C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2254 			[C(RESULT_MISS)]   = KNL_L2_WRITE | KNL_L2_MISS,
2255 		},
2256 		[C(OP_PREFETCH)] = {
2257 			[C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2258 			[C(RESULT_MISS)]   = KNL_L2_PREFETCH | KNL_L2_MISS,
2259 		},
2260 	},
2261 };
2262 
2263 /*
2264  * Used from PMIs where the LBRs are already disabled.
2265  *
2266  * This function could be called consecutively. It is required to remain in
2267  * disabled state if called consecutively.
2268  *
2269  * During consecutive calls, the same disable value will be written to related
2270  * registers, so the PMU state remains unchanged.
2271  *
2272  * intel_bts events don't coexist with intel PMU's BTS events because of
2273  * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2274  * disabled around intel PMU's event batching etc, only inside the PMI handler.
2275  *
2276  * Avoid PEBS_ENABLE MSR access in PMIs.
2277  * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2278  * It doesn't matter if the PEBS is enabled or not.
2279  * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2280  * access PEBS_ENABLE MSR in disable_all()/enable_all().
2281  * However, there are some cases which may change PEBS status, e.g. PMI
2282  * throttle. The PEBS_ENABLE should be updated where the status changes.
2283  */
__intel_pmu_disable_all(bool bts)2284 static __always_inline void __intel_pmu_disable_all(bool bts)
2285 {
2286 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2287 
2288 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2289 
2290 	if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2291 		intel_pmu_disable_bts();
2292 }
2293 
intel_pmu_disable_all(void)2294 static __always_inline void intel_pmu_disable_all(void)
2295 {
2296 	__intel_pmu_disable_all(true);
2297 	intel_pmu_pebs_disable_all();
2298 	intel_pmu_lbr_disable_all();
2299 }
2300 
__intel_pmu_enable_all(int added,bool pmi)2301 static void __intel_pmu_enable_all(int added, bool pmi)
2302 {
2303 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2304 	u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2305 
2306 	intel_pmu_lbr_enable_all(pmi);
2307 
2308 	if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) {
2309 		wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val);
2310 		cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val;
2311 	}
2312 
2313 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2314 	       intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2315 
2316 	if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2317 		struct perf_event *event =
2318 			cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2319 
2320 		if (WARN_ON_ONCE(!event))
2321 			return;
2322 
2323 		intel_pmu_enable_bts(event->hw.config);
2324 	}
2325 }
2326 
intel_pmu_enable_all(int added)2327 static void intel_pmu_enable_all(int added)
2328 {
2329 	intel_pmu_pebs_enable_all();
2330 	__intel_pmu_enable_all(added, false);
2331 }
2332 
2333 static noinline int
__intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt,unsigned long flags)2334 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
2335 				  unsigned int cnt, unsigned long flags)
2336 {
2337 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2338 
2339 	intel_pmu_lbr_read();
2340 	cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
2341 
2342 	memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
2343 	intel_pmu_enable_all(0);
2344 	local_irq_restore(flags);
2345 	return cnt;
2346 }
2347 
2348 static int
intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2349 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2350 {
2351 	unsigned long flags;
2352 
2353 	/* must not have branches... */
2354 	local_irq_save(flags);
2355 	__intel_pmu_disable_all(false); /* we don't care about BTS */
2356 	__intel_pmu_lbr_disable();
2357 	/*            ... until here */
2358 	return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2359 }
2360 
2361 static int
intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2362 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2363 {
2364 	unsigned long flags;
2365 
2366 	/* must not have branches... */
2367 	local_irq_save(flags);
2368 	__intel_pmu_disable_all(false); /* we don't care about BTS */
2369 	__intel_pmu_arch_lbr_disable();
2370 	/*            ... until here */
2371 	return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2372 }
2373 
2374 /*
2375  * Workaround for:
2376  *   Intel Errata AAK100 (model 26)
2377  *   Intel Errata AAP53  (model 30)
2378  *   Intel Errata BD53   (model 44)
2379  *
2380  * The official story:
2381  *   These chips need to be 'reset' when adding counters by programming the
2382  *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2383  *   in sequence on the same PMC or on different PMCs.
2384  *
2385  * In practice it appears some of these events do in fact count, and
2386  * we need to program all 4 events.
2387  */
intel_pmu_nhm_workaround(void)2388 static void intel_pmu_nhm_workaround(void)
2389 {
2390 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2391 	static const unsigned long nhm_magic[4] = {
2392 		0x4300B5,
2393 		0x4300D2,
2394 		0x4300B1,
2395 		0x4300B1
2396 	};
2397 	struct perf_event *event;
2398 	int i;
2399 
2400 	/*
2401 	 * The Errata requires below steps:
2402 	 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2403 	 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2404 	 *    the corresponding PMCx;
2405 	 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2406 	 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2407 	 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2408 	 */
2409 
2410 	/*
2411 	 * The real steps we choose are a little different from above.
2412 	 * A) To reduce MSR operations, we don't run step 1) as they
2413 	 *    are already cleared before this function is called;
2414 	 * B) Call x86_perf_event_update to save PMCx before configuring
2415 	 *    PERFEVTSELx with magic number;
2416 	 * C) With step 5), we do clear only when the PERFEVTSELx is
2417 	 *    not used currently.
2418 	 * D) Call x86_perf_event_set_period to restore PMCx;
2419 	 */
2420 
2421 	/* We always operate 4 pairs of PERF Counters */
2422 	for (i = 0; i < 4; i++) {
2423 		event = cpuc->events[i];
2424 		if (event)
2425 			static_call(x86_pmu_update)(event);
2426 	}
2427 
2428 	for (i = 0; i < 4; i++) {
2429 		wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2430 		wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2431 	}
2432 
2433 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2434 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2435 
2436 	for (i = 0; i < 4; i++) {
2437 		event = cpuc->events[i];
2438 
2439 		if (event) {
2440 			static_call(x86_pmu_set_period)(event);
2441 			__x86_pmu_enable_event(&event->hw,
2442 					ARCH_PERFMON_EVENTSEL_ENABLE);
2443 		} else
2444 			wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2445 	}
2446 }
2447 
intel_pmu_nhm_enable_all(int added)2448 static void intel_pmu_nhm_enable_all(int added)
2449 {
2450 	if (added)
2451 		intel_pmu_nhm_workaround();
2452 	intel_pmu_enable_all(added);
2453 }
2454 
intel_set_tfa(struct cpu_hw_events * cpuc,bool on)2455 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2456 {
2457 	u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2458 
2459 	if (cpuc->tfa_shadow != val) {
2460 		cpuc->tfa_shadow = val;
2461 		wrmsrl(MSR_TSX_FORCE_ABORT, val);
2462 	}
2463 }
2464 
intel_tfa_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)2465 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2466 {
2467 	/*
2468 	 * We're going to use PMC3, make sure TFA is set before we touch it.
2469 	 */
2470 	if (cntr == 3)
2471 		intel_set_tfa(cpuc, true);
2472 }
2473 
intel_tfa_pmu_enable_all(int added)2474 static void intel_tfa_pmu_enable_all(int added)
2475 {
2476 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2477 
2478 	/*
2479 	 * If we find PMC3 is no longer used when we enable the PMU, we can
2480 	 * clear TFA.
2481 	 */
2482 	if (!test_bit(3, cpuc->active_mask))
2483 		intel_set_tfa(cpuc, false);
2484 
2485 	intel_pmu_enable_all(added);
2486 }
2487 
intel_pmu_get_status(void)2488 static inline u64 intel_pmu_get_status(void)
2489 {
2490 	u64 status;
2491 
2492 	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2493 
2494 	return status;
2495 }
2496 
intel_pmu_ack_status(u64 ack)2497 static inline void intel_pmu_ack_status(u64 ack)
2498 {
2499 	wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2500 }
2501 
event_is_checkpointed(struct perf_event * event)2502 static inline bool event_is_checkpointed(struct perf_event *event)
2503 {
2504 	return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2505 }
2506 
intel_set_masks(struct perf_event * event,int idx)2507 static inline void intel_set_masks(struct perf_event *event, int idx)
2508 {
2509 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2510 
2511 	if (event->attr.exclude_host)
2512 		__set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2513 	if (event->attr.exclude_guest)
2514 		__set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2515 	if (event_is_checkpointed(event))
2516 		__set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2517 }
2518 
intel_clear_masks(struct perf_event * event,int idx)2519 static inline void intel_clear_masks(struct perf_event *event, int idx)
2520 {
2521 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2522 
2523 	__clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2524 	__clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2525 	__clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2526 }
2527 
intel_pmu_disable_fixed(struct perf_event * event)2528 static void intel_pmu_disable_fixed(struct perf_event *event)
2529 {
2530 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2531 	struct hw_perf_event *hwc = &event->hw;
2532 	int idx = hwc->idx;
2533 	u64 mask;
2534 
2535 	if (is_topdown_idx(idx)) {
2536 		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2537 
2538 		/*
2539 		 * When there are other active TopDown events,
2540 		 * don't disable the fixed counter 3.
2541 		 */
2542 		if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2543 			return;
2544 		idx = INTEL_PMC_IDX_FIXED_SLOTS;
2545 	}
2546 
2547 	intel_clear_masks(event, idx);
2548 
2549 	mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
2550 	cpuc->fixed_ctrl_val &= ~mask;
2551 }
2552 
intel_pmu_disable_event(struct perf_event * event)2553 static void intel_pmu_disable_event(struct perf_event *event)
2554 {
2555 	struct hw_perf_event *hwc = &event->hw;
2556 	int idx = hwc->idx;
2557 
2558 	switch (idx) {
2559 	case 0 ... INTEL_PMC_IDX_FIXED - 1:
2560 		intel_clear_masks(event, idx);
2561 		x86_pmu_disable_event(event);
2562 		break;
2563 	case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2564 	case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2565 		intel_pmu_disable_fixed(event);
2566 		break;
2567 	case INTEL_PMC_IDX_FIXED_BTS:
2568 		intel_pmu_disable_bts();
2569 		intel_pmu_drain_bts_buffer();
2570 		return;
2571 	case INTEL_PMC_IDX_FIXED_VLBR:
2572 		intel_clear_masks(event, idx);
2573 		break;
2574 	default:
2575 		intel_clear_masks(event, idx);
2576 		pr_warn("Failed to disable the event with invalid index %d\n",
2577 			idx);
2578 		return;
2579 	}
2580 
2581 	/*
2582 	 * Needs to be called after x86_pmu_disable_event,
2583 	 * so we don't trigger the event without PEBS bit set.
2584 	 */
2585 	if (unlikely(event->attr.precise_ip))
2586 		intel_pmu_pebs_disable(event);
2587 }
2588 
intel_pmu_assign_event(struct perf_event * event,int idx)2589 static void intel_pmu_assign_event(struct perf_event *event, int idx)
2590 {
2591 	if (is_pebs_pt(event))
2592 		perf_report_aux_output_id(event, idx);
2593 }
2594 
intel_pmu_needs_branch_stack(struct perf_event * event)2595 static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event)
2596 {
2597 	return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK;
2598 }
2599 
intel_pmu_del_event(struct perf_event * event)2600 static void intel_pmu_del_event(struct perf_event *event)
2601 {
2602 	if (intel_pmu_needs_branch_stack(event))
2603 		intel_pmu_lbr_del(event);
2604 	if (event->attr.precise_ip)
2605 		intel_pmu_pebs_del(event);
2606 }
2607 
icl_set_topdown_event_period(struct perf_event * event)2608 static int icl_set_topdown_event_period(struct perf_event *event)
2609 {
2610 	struct hw_perf_event *hwc = &event->hw;
2611 	s64 left = local64_read(&hwc->period_left);
2612 
2613 	/*
2614 	 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2615 	 * Software should start both registers, PERF_METRICS and fixed
2616 	 * counter 3, from zero.
2617 	 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2618 	 * After that, both MSRs will be cleared for each read.
2619 	 * Don't need to clear them again.
2620 	 */
2621 	if (left == x86_pmu.max_period) {
2622 		wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2623 		wrmsrl(MSR_PERF_METRICS, 0);
2624 		hwc->saved_slots = 0;
2625 		hwc->saved_metric = 0;
2626 	}
2627 
2628 	if ((hwc->saved_slots) && is_slots_event(event)) {
2629 		wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2630 		wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
2631 	}
2632 
2633 	perf_event_update_userpage(event);
2634 
2635 	return 0;
2636 }
2637 
2638 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period);
2639 
icl_get_metrics_event_value(u64 metric,u64 slots,int idx)2640 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2641 {
2642 	u32 val;
2643 
2644 	/*
2645 	 * The metric is reported as an 8bit integer fraction
2646 	 * summing up to 0xff.
2647 	 * slots-in-metric = (Metric / 0xff) * slots
2648 	 */
2649 	val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2650 	return  mul_u64_u32_div(slots, val, 0xff);
2651 }
2652 
icl_get_topdown_value(struct perf_event * event,u64 slots,u64 metrics)2653 static u64 icl_get_topdown_value(struct perf_event *event,
2654 				       u64 slots, u64 metrics)
2655 {
2656 	int idx = event->hw.idx;
2657 	u64 delta;
2658 
2659 	if (is_metric_idx(idx))
2660 		delta = icl_get_metrics_event_value(metrics, slots, idx);
2661 	else
2662 		delta = slots;
2663 
2664 	return delta;
2665 }
2666 
__icl_update_topdown_event(struct perf_event * event,u64 slots,u64 metrics,u64 last_slots,u64 last_metrics)2667 static void __icl_update_topdown_event(struct perf_event *event,
2668 				       u64 slots, u64 metrics,
2669 				       u64 last_slots, u64 last_metrics)
2670 {
2671 	u64 delta, last = 0;
2672 
2673 	delta = icl_get_topdown_value(event, slots, metrics);
2674 	if (last_slots)
2675 		last = icl_get_topdown_value(event, last_slots, last_metrics);
2676 
2677 	/*
2678 	 * The 8bit integer fraction of metric may be not accurate,
2679 	 * especially when the changes is very small.
2680 	 * For example, if only a few bad_spec happens, the fraction
2681 	 * may be reduced from 1 to 0. If so, the bad_spec event value
2682 	 * will be 0 which is definitely less than the last value.
2683 	 * Avoid update event->count for this case.
2684 	 */
2685 	if (delta > last) {
2686 		delta -= last;
2687 		local64_add(delta, &event->count);
2688 	}
2689 }
2690 
update_saved_topdown_regs(struct perf_event * event,u64 slots,u64 metrics,int metric_end)2691 static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2692 				      u64 metrics, int metric_end)
2693 {
2694 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2695 	struct perf_event *other;
2696 	int idx;
2697 
2698 	event->hw.saved_slots = slots;
2699 	event->hw.saved_metric = metrics;
2700 
2701 	for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2702 		if (!is_topdown_idx(idx))
2703 			continue;
2704 		other = cpuc->events[idx];
2705 		other->hw.saved_slots = slots;
2706 		other->hw.saved_metric = metrics;
2707 	}
2708 }
2709 
2710 /*
2711  * Update all active Topdown events.
2712  *
2713  * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2714  * modify by a NMI. PMU has to be disabled before calling this function.
2715  */
2716 
intel_update_topdown_event(struct perf_event * event,int metric_end)2717 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
2718 {
2719 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2720 	struct perf_event *other;
2721 	u64 slots, metrics;
2722 	bool reset = true;
2723 	int idx;
2724 
2725 	/* read Fixed counter 3 */
2726 	rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2727 	if (!slots)
2728 		return 0;
2729 
2730 	/* read PERF_METRICS */
2731 	rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2732 
2733 	for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2734 		if (!is_topdown_idx(idx))
2735 			continue;
2736 		other = cpuc->events[idx];
2737 		__icl_update_topdown_event(other, slots, metrics,
2738 					   event ? event->hw.saved_slots : 0,
2739 					   event ? event->hw.saved_metric : 0);
2740 	}
2741 
2742 	/*
2743 	 * Check and update this event, which may have been cleared
2744 	 * in active_mask e.g. x86_pmu_stop()
2745 	 */
2746 	if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2747 		__icl_update_topdown_event(event, slots, metrics,
2748 					   event->hw.saved_slots,
2749 					   event->hw.saved_metric);
2750 
2751 		/*
2752 		 * In x86_pmu_stop(), the event is cleared in active_mask first,
2753 		 * then drain the delta, which indicates context switch for
2754 		 * counting.
2755 		 * Save metric and slots for context switch.
2756 		 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2757 		 * Because the values will be restored in next schedule in.
2758 		 */
2759 		update_saved_topdown_regs(event, slots, metrics, metric_end);
2760 		reset = false;
2761 	}
2762 
2763 	if (reset) {
2764 		/* The fixed counter 3 has to be written before the PERF_METRICS. */
2765 		wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2766 		wrmsrl(MSR_PERF_METRICS, 0);
2767 		if (event)
2768 			update_saved_topdown_regs(event, 0, 0, metric_end);
2769 	}
2770 
2771 	return slots;
2772 }
2773 
icl_update_topdown_event(struct perf_event * event)2774 static u64 icl_update_topdown_event(struct perf_event *event)
2775 {
2776 	return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2777 						 x86_pmu.num_topdown_events - 1);
2778 }
2779 
2780 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
2781 
intel_pmu_read_topdown_event(struct perf_event * event)2782 static void intel_pmu_read_topdown_event(struct perf_event *event)
2783 {
2784 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2785 
2786 	/* Only need to call update_topdown_event() once for group read. */
2787 	if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2788 	    !is_slots_event(event))
2789 		return;
2790 
2791 	perf_pmu_disable(event->pmu);
2792 	static_call(intel_pmu_update_topdown_event)(event);
2793 	perf_pmu_enable(event->pmu);
2794 }
2795 
intel_pmu_read_event(struct perf_event * event)2796 static void intel_pmu_read_event(struct perf_event *event)
2797 {
2798 	if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2799 		intel_pmu_auto_reload_read(event);
2800 	else if (is_topdown_count(event))
2801 		intel_pmu_read_topdown_event(event);
2802 	else
2803 		x86_perf_event_update(event);
2804 }
2805 
intel_pmu_enable_fixed(struct perf_event * event)2806 static void intel_pmu_enable_fixed(struct perf_event *event)
2807 {
2808 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2809 	struct hw_perf_event *hwc = &event->hw;
2810 	u64 mask, bits = 0;
2811 	int idx = hwc->idx;
2812 
2813 	if (is_topdown_idx(idx)) {
2814 		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2815 		/*
2816 		 * When there are other active TopDown events,
2817 		 * don't enable the fixed counter 3 again.
2818 		 */
2819 		if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2820 			return;
2821 
2822 		idx = INTEL_PMC_IDX_FIXED_SLOTS;
2823 
2824 		if (event->attr.config1 & INTEL_TD_CFG_METRIC_CLEAR)
2825 			bits |= INTEL_FIXED_3_METRICS_CLEAR;
2826 	}
2827 
2828 	intel_set_masks(event, idx);
2829 
2830 	/*
2831 	 * Enable IRQ generation (0x8), if not PEBS,
2832 	 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2833 	 * if requested:
2834 	 */
2835 	if (!event->attr.precise_ip)
2836 		bits |= INTEL_FIXED_0_ENABLE_PMI;
2837 	if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2838 		bits |= INTEL_FIXED_0_USER;
2839 	if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2840 		bits |= INTEL_FIXED_0_KERNEL;
2841 
2842 	/*
2843 	 * ANY bit is supported in v3 and up
2844 	 */
2845 	if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2846 		bits |= INTEL_FIXED_0_ANYTHREAD;
2847 
2848 	idx -= INTEL_PMC_IDX_FIXED;
2849 	bits = intel_fixed_bits_by_idx(idx, bits);
2850 	mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
2851 
2852 	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2853 		bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2854 		mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2855 	}
2856 
2857 	cpuc->fixed_ctrl_val &= ~mask;
2858 	cpuc->fixed_ctrl_val |= bits;
2859 }
2860 
intel_pmu_enable_event(struct perf_event * event)2861 static void intel_pmu_enable_event(struct perf_event *event)
2862 {
2863 	u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE;
2864 	struct hw_perf_event *hwc = &event->hw;
2865 	int idx = hwc->idx;
2866 
2867 	if (unlikely(event->attr.precise_ip))
2868 		intel_pmu_pebs_enable(event);
2869 
2870 	switch (idx) {
2871 	case 0 ... INTEL_PMC_IDX_FIXED - 1:
2872 		if (branch_sample_counters(event))
2873 			enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR;
2874 		intel_set_masks(event, idx);
2875 		__x86_pmu_enable_event(hwc, enable_mask);
2876 		break;
2877 	case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2878 	case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2879 		intel_pmu_enable_fixed(event);
2880 		break;
2881 	case INTEL_PMC_IDX_FIXED_BTS:
2882 		if (!__this_cpu_read(cpu_hw_events.enabled))
2883 			return;
2884 		intel_pmu_enable_bts(hwc->config);
2885 		break;
2886 	case INTEL_PMC_IDX_FIXED_VLBR:
2887 		intel_set_masks(event, idx);
2888 		break;
2889 	default:
2890 		pr_warn("Failed to enable the event with invalid index %d\n",
2891 			idx);
2892 	}
2893 }
2894 
intel_pmu_add_event(struct perf_event * event)2895 static void intel_pmu_add_event(struct perf_event *event)
2896 {
2897 	if (event->attr.precise_ip)
2898 		intel_pmu_pebs_add(event);
2899 	if (intel_pmu_needs_branch_stack(event))
2900 		intel_pmu_lbr_add(event);
2901 }
2902 
2903 /*
2904  * Save and restart an expired event. Called by NMI contexts,
2905  * so it has to be careful about preempting normal event ops:
2906  */
intel_pmu_save_and_restart(struct perf_event * event)2907 int intel_pmu_save_and_restart(struct perf_event *event)
2908 {
2909 	static_call(x86_pmu_update)(event);
2910 	/*
2911 	 * For a checkpointed counter always reset back to 0.  This
2912 	 * avoids a situation where the counter overflows, aborts the
2913 	 * transaction and is then set back to shortly before the
2914 	 * overflow, and overflows and aborts again.
2915 	 */
2916 	if (unlikely(event_is_checkpointed(event))) {
2917 		/* No race with NMIs because the counter should not be armed */
2918 		wrmsrl(event->hw.event_base, 0);
2919 		local64_set(&event->hw.prev_count, 0);
2920 	}
2921 	return static_call(x86_pmu_set_period)(event);
2922 }
2923 
intel_pmu_set_period(struct perf_event * event)2924 static int intel_pmu_set_period(struct perf_event *event)
2925 {
2926 	if (unlikely(is_topdown_count(event)))
2927 		return static_call(intel_pmu_set_topdown_event_period)(event);
2928 
2929 	return x86_perf_event_set_period(event);
2930 }
2931 
intel_pmu_update(struct perf_event * event)2932 static u64 intel_pmu_update(struct perf_event *event)
2933 {
2934 	if (unlikely(is_topdown_count(event)))
2935 		return static_call(intel_pmu_update_topdown_event)(event);
2936 
2937 	return x86_perf_event_update(event);
2938 }
2939 
intel_pmu_reset(void)2940 static void intel_pmu_reset(void)
2941 {
2942 	struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2943 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2944 	unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
2945 	unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
2946 	unsigned long flags;
2947 	int idx;
2948 
2949 	if (!*(u64 *)cntr_mask)
2950 		return;
2951 
2952 	local_irq_save(flags);
2953 
2954 	pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2955 
2956 	for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) {
2957 		wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2958 		wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
2959 	}
2960 	for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
2961 		if (fixed_counter_disabled(idx, cpuc->pmu))
2962 			continue;
2963 		wrmsrl_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
2964 	}
2965 
2966 	if (ds)
2967 		ds->bts_index = ds->bts_buffer_base;
2968 
2969 	/* Ack all overflows and disable fixed counters */
2970 	if (x86_pmu.version >= 2) {
2971 		intel_pmu_ack_status(intel_pmu_get_status());
2972 		wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2973 	}
2974 
2975 	/* Reset LBRs and LBR freezing */
2976 	if (x86_pmu.lbr_nr) {
2977 		update_debugctlmsr(get_debugctlmsr() &
2978 			~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2979 	}
2980 
2981 	local_irq_restore(flags);
2982 }
2983 
2984 /*
2985  * We may be running with guest PEBS events created by KVM, and the
2986  * PEBS records are logged into the guest's DS and invisible to host.
2987  *
2988  * In the case of guest PEBS overflow, we only trigger a fake event
2989  * to emulate the PEBS overflow PMI for guest PEBS counters in KVM.
2990  * The guest will then vm-entry and check the guest DS area to read
2991  * the guest PEBS records.
2992  *
2993  * The contents and other behavior of the guest event do not matter.
2994  */
x86_pmu_handle_guest_pebs(struct pt_regs * regs,struct perf_sample_data * data)2995 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
2996 				      struct perf_sample_data *data)
2997 {
2998 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2999 	u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
3000 	struct perf_event *event = NULL;
3001 	int bit;
3002 
3003 	if (!unlikely(perf_guest_state()))
3004 		return;
3005 
3006 	if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active ||
3007 	    !guest_pebs_idxs)
3008 		return;
3009 
3010 	for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, X86_PMC_IDX_MAX) {
3011 		event = cpuc->events[bit];
3012 		if (!event->attr.precise_ip)
3013 			continue;
3014 
3015 		perf_sample_data_init(data, 0, event->hw.last_period);
3016 		if (perf_event_overflow(event, data, regs))
3017 			x86_pmu_stop(event, 0);
3018 
3019 		/* Inject one fake event is enough. */
3020 		break;
3021 	}
3022 }
3023 
handle_pmi_common(struct pt_regs * regs,u64 status)3024 static int handle_pmi_common(struct pt_regs *regs, u64 status)
3025 {
3026 	struct perf_sample_data data;
3027 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3028 	int bit;
3029 	int handled = 0;
3030 	u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
3031 
3032 	inc_irq_stat(apic_perf_irqs);
3033 
3034 	/*
3035 	 * Ignore a range of extra bits in status that do not indicate
3036 	 * overflow by themselves.
3037 	 */
3038 	status &= ~(GLOBAL_STATUS_COND_CHG |
3039 		    GLOBAL_STATUS_ASIF |
3040 		    GLOBAL_STATUS_LBRS_FROZEN);
3041 	if (!status)
3042 		return 0;
3043 	/*
3044 	 * In case multiple PEBS events are sampled at the same time,
3045 	 * it is possible to have GLOBAL_STATUS bit 62 set indicating
3046 	 * PEBS buffer overflow and also seeing at most 3 PEBS counters
3047 	 * having their bits set in the status register. This is a sign
3048 	 * that there was at least one PEBS record pending at the time
3049 	 * of the PMU interrupt. PEBS counters must only be processed
3050 	 * via the drain_pebs() calls and not via the regular sample
3051 	 * processing loop coming after that the function, otherwise
3052 	 * phony regular samples may be generated in the sampling buffer
3053 	 * not marked with the EXACT tag. Another possibility is to have
3054 	 * one PEBS event and at least one non-PEBS event which overflows
3055 	 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
3056 	 * not be set, yet the overflow status bit for the PEBS counter will
3057 	 * be on Skylake.
3058 	 *
3059 	 * To avoid this problem, we systematically ignore the PEBS-enabled
3060 	 * counters from the GLOBAL_STATUS mask and we always process PEBS
3061 	 * events via drain_pebs().
3062 	 */
3063 	status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable);
3064 
3065 	/*
3066 	 * PEBS overflow sets bit 62 in the global status register
3067 	 */
3068 	if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
3069 		u64 pebs_enabled = cpuc->pebs_enabled;
3070 
3071 		handled++;
3072 		x86_pmu_handle_guest_pebs(regs, &data);
3073 		x86_pmu.drain_pebs(regs, &data);
3074 		status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
3075 
3076 		/*
3077 		 * PMI throttle may be triggered, which stops the PEBS event.
3078 		 * Although cpuc->pebs_enabled is updated accordingly, the
3079 		 * MSR_IA32_PEBS_ENABLE is not updated. Because the
3080 		 * cpuc->enabled has been forced to 0 in PMI.
3081 		 * Update the MSR if pebs_enabled is changed.
3082 		 */
3083 		if (pebs_enabled != cpuc->pebs_enabled)
3084 			wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
3085 	}
3086 
3087 	/*
3088 	 * Intel PT
3089 	 */
3090 	if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
3091 		handled++;
3092 		if (!perf_guest_handle_intel_pt_intr())
3093 			intel_pt_interrupt();
3094 	}
3095 
3096 	/*
3097 	 * Intel Perf metrics
3098 	 */
3099 	if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
3100 		handled++;
3101 		static_call(intel_pmu_update_topdown_event)(NULL);
3102 	}
3103 
3104 	/*
3105 	 * Checkpointed counters can lead to 'spurious' PMIs because the
3106 	 * rollback caused by the PMI will have cleared the overflow status
3107 	 * bit. Therefore always force probe these counters.
3108 	 */
3109 	status |= cpuc->intel_cp_status;
3110 
3111 	for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
3112 		struct perf_event *event = cpuc->events[bit];
3113 
3114 		handled++;
3115 
3116 		if (!test_bit(bit, cpuc->active_mask))
3117 			continue;
3118 
3119 		if (!intel_pmu_save_and_restart(event))
3120 			continue;
3121 
3122 		perf_sample_data_init(&data, 0, event->hw.last_period);
3123 
3124 		if (has_branch_stack(event))
3125 			intel_pmu_lbr_save_brstack(&data, cpuc, event);
3126 
3127 		if (perf_event_overflow(event, &data, regs))
3128 			x86_pmu_stop(event, 0);
3129 	}
3130 
3131 	return handled;
3132 }
3133 
3134 /*
3135  * This handler is triggered by the local APIC, so the APIC IRQ handling
3136  * rules apply:
3137  */
intel_pmu_handle_irq(struct pt_regs * regs)3138 static int intel_pmu_handle_irq(struct pt_regs *regs)
3139 {
3140 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3141 	bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
3142 	bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
3143 	int loops;
3144 	u64 status;
3145 	int handled;
3146 	int pmu_enabled;
3147 
3148 	/*
3149 	 * Save the PMU state.
3150 	 * It needs to be restored when leaving the handler.
3151 	 */
3152 	pmu_enabled = cpuc->enabled;
3153 	/*
3154 	 * In general, the early ACK is only applied for old platforms.
3155 	 * For the big core starts from Haswell, the late ACK should be
3156 	 * applied.
3157 	 * For the small core after Tremont, we have to do the ACK right
3158 	 * before re-enabling counters, which is in the middle of the
3159 	 * NMI handler.
3160 	 */
3161 	if (!late_ack && !mid_ack)
3162 		apic_write(APIC_LVTPC, APIC_DM_NMI);
3163 	intel_bts_disable_local();
3164 	cpuc->enabled = 0;
3165 	__intel_pmu_disable_all(true);
3166 	handled = intel_pmu_drain_bts_buffer();
3167 	handled += intel_bts_interrupt();
3168 	status = intel_pmu_get_status();
3169 	if (!status)
3170 		goto done;
3171 
3172 	loops = 0;
3173 again:
3174 	intel_pmu_lbr_read();
3175 	intel_pmu_ack_status(status);
3176 	if (++loops > 100) {
3177 		static bool warned;
3178 
3179 		if (!warned) {
3180 			WARN(1, "perfevents: irq loop stuck!\n");
3181 			perf_event_print_debug();
3182 			warned = true;
3183 		}
3184 		intel_pmu_reset();
3185 		goto done;
3186 	}
3187 
3188 	handled += handle_pmi_common(regs, status);
3189 
3190 	/*
3191 	 * Repeat if there is more work to be done:
3192 	 */
3193 	status = intel_pmu_get_status();
3194 	if (status)
3195 		goto again;
3196 
3197 done:
3198 	if (mid_ack)
3199 		apic_write(APIC_LVTPC, APIC_DM_NMI);
3200 	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
3201 	cpuc->enabled = pmu_enabled;
3202 	if (pmu_enabled)
3203 		__intel_pmu_enable_all(0, true);
3204 	intel_bts_enable_local();
3205 
3206 	/*
3207 	 * Only unmask the NMI after the overflow counters
3208 	 * have been reset. This avoids spurious NMIs on
3209 	 * Haswell CPUs.
3210 	 */
3211 	if (late_ack)
3212 		apic_write(APIC_LVTPC, APIC_DM_NMI);
3213 	return handled;
3214 }
3215 
3216 static struct event_constraint *
intel_bts_constraints(struct perf_event * event)3217 intel_bts_constraints(struct perf_event *event)
3218 {
3219 	if (unlikely(intel_pmu_has_bts(event)))
3220 		return &bts_constraint;
3221 
3222 	return NULL;
3223 }
3224 
3225 /*
3226  * Note: matches a fake event, like Fixed2.
3227  */
3228 static struct event_constraint *
intel_vlbr_constraints(struct perf_event * event)3229 intel_vlbr_constraints(struct perf_event *event)
3230 {
3231 	struct event_constraint *c = &vlbr_constraint;
3232 
3233 	if (unlikely(constraint_match(c, event->hw.config))) {
3234 		event->hw.flags |= c->flags;
3235 		return c;
3236 	}
3237 
3238 	return NULL;
3239 }
3240 
intel_alt_er(struct cpu_hw_events * cpuc,int idx,u64 config)3241 static int intel_alt_er(struct cpu_hw_events *cpuc,
3242 			int idx, u64 config)
3243 {
3244 	struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3245 	int alt_idx = idx;
3246 
3247 	if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3248 		return idx;
3249 
3250 	if (idx == EXTRA_REG_RSP_0)
3251 		alt_idx = EXTRA_REG_RSP_1;
3252 
3253 	if (idx == EXTRA_REG_RSP_1)
3254 		alt_idx = EXTRA_REG_RSP_0;
3255 
3256 	if (config & ~extra_regs[alt_idx].valid_mask)
3257 		return idx;
3258 
3259 	return alt_idx;
3260 }
3261 
intel_fixup_er(struct perf_event * event,int idx)3262 static void intel_fixup_er(struct perf_event *event, int idx)
3263 {
3264 	struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3265 	event->hw.extra_reg.idx = idx;
3266 
3267 	if (idx == EXTRA_REG_RSP_0) {
3268 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3269 		event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3270 		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3271 	} else if (idx == EXTRA_REG_RSP_1) {
3272 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3273 		event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3274 		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3275 	}
3276 }
3277 
3278 /*
3279  * manage allocation of shared extra msr for certain events
3280  *
3281  * sharing can be:
3282  * per-cpu: to be shared between the various events on a single PMU
3283  * per-core: per-cpu + shared by HT threads
3284  */
3285 static struct event_constraint *
__intel_shared_reg_get_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,struct hw_perf_event_extra * reg)3286 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3287 				   struct perf_event *event,
3288 				   struct hw_perf_event_extra *reg)
3289 {
3290 	struct event_constraint *c = &emptyconstraint;
3291 	struct er_account *era;
3292 	unsigned long flags;
3293 	int idx = reg->idx;
3294 
3295 	/*
3296 	 * reg->alloc can be set due to existing state, so for fake cpuc we
3297 	 * need to ignore this, otherwise we might fail to allocate proper fake
3298 	 * state for this extra reg constraint. Also see the comment below.
3299 	 */
3300 	if (reg->alloc && !cpuc->is_fake)
3301 		return NULL; /* call x86_get_event_constraint() */
3302 
3303 again:
3304 	era = &cpuc->shared_regs->regs[idx];
3305 	/*
3306 	 * we use spin_lock_irqsave() to avoid lockdep issues when
3307 	 * passing a fake cpuc
3308 	 */
3309 	raw_spin_lock_irqsave(&era->lock, flags);
3310 
3311 	if (!atomic_read(&era->ref) || era->config == reg->config) {
3312 
3313 		/*
3314 		 * If its a fake cpuc -- as per validate_{group,event}() we
3315 		 * shouldn't touch event state and we can avoid doing so
3316 		 * since both will only call get_event_constraints() once
3317 		 * on each event, this avoids the need for reg->alloc.
3318 		 *
3319 		 * Not doing the ER fixup will only result in era->reg being
3320 		 * wrong, but since we won't actually try and program hardware
3321 		 * this isn't a problem either.
3322 		 */
3323 		if (!cpuc->is_fake) {
3324 			if (idx != reg->idx)
3325 				intel_fixup_er(event, idx);
3326 
3327 			/*
3328 			 * x86_schedule_events() can call get_event_constraints()
3329 			 * multiple times on events in the case of incremental
3330 			 * scheduling(). reg->alloc ensures we only do the ER
3331 			 * allocation once.
3332 			 */
3333 			reg->alloc = 1;
3334 		}
3335 
3336 		/* lock in msr value */
3337 		era->config = reg->config;
3338 		era->reg = reg->reg;
3339 
3340 		/* one more user */
3341 		atomic_inc(&era->ref);
3342 
3343 		/*
3344 		 * need to call x86_get_event_constraint()
3345 		 * to check if associated event has constraints
3346 		 */
3347 		c = NULL;
3348 	} else {
3349 		idx = intel_alt_er(cpuc, idx, reg->config);
3350 		if (idx != reg->idx) {
3351 			raw_spin_unlock_irqrestore(&era->lock, flags);
3352 			goto again;
3353 		}
3354 	}
3355 	raw_spin_unlock_irqrestore(&era->lock, flags);
3356 
3357 	return c;
3358 }
3359 
3360 static void
__intel_shared_reg_put_constraints(struct cpu_hw_events * cpuc,struct hw_perf_event_extra * reg)3361 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3362 				   struct hw_perf_event_extra *reg)
3363 {
3364 	struct er_account *era;
3365 
3366 	/*
3367 	 * Only put constraint if extra reg was actually allocated. Also takes
3368 	 * care of event which do not use an extra shared reg.
3369 	 *
3370 	 * Also, if this is a fake cpuc we shouldn't touch any event state
3371 	 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3372 	 * either since it'll be thrown out.
3373 	 */
3374 	if (!reg->alloc || cpuc->is_fake)
3375 		return;
3376 
3377 	era = &cpuc->shared_regs->regs[reg->idx];
3378 
3379 	/* one fewer user */
3380 	atomic_dec(&era->ref);
3381 
3382 	/* allocate again next time */
3383 	reg->alloc = 0;
3384 }
3385 
3386 static struct event_constraint *
intel_shared_regs_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3387 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3388 			      struct perf_event *event)
3389 {
3390 	struct event_constraint *c = NULL, *d;
3391 	struct hw_perf_event_extra *xreg, *breg;
3392 
3393 	xreg = &event->hw.extra_reg;
3394 	if (xreg->idx != EXTRA_REG_NONE) {
3395 		c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3396 		if (c == &emptyconstraint)
3397 			return c;
3398 	}
3399 	breg = &event->hw.branch_reg;
3400 	if (breg->idx != EXTRA_REG_NONE) {
3401 		d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3402 		if (d == &emptyconstraint) {
3403 			__intel_shared_reg_put_constraints(cpuc, xreg);
3404 			c = d;
3405 		}
3406 	}
3407 	return c;
3408 }
3409 
3410 struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3411 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3412 			  struct perf_event *event)
3413 {
3414 	struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3415 	struct event_constraint *c;
3416 
3417 	if (event_constraints) {
3418 		for_each_event_constraint(c, event_constraints) {
3419 			if (constraint_match(c, event->hw.config)) {
3420 				event->hw.flags |= c->flags;
3421 				return c;
3422 			}
3423 		}
3424 	}
3425 
3426 	return &hybrid_var(cpuc->pmu, unconstrained);
3427 }
3428 
3429 static struct event_constraint *
__intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3430 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3431 			    struct perf_event *event)
3432 {
3433 	struct event_constraint *c;
3434 
3435 	c = intel_vlbr_constraints(event);
3436 	if (c)
3437 		return c;
3438 
3439 	c = intel_bts_constraints(event);
3440 	if (c)
3441 		return c;
3442 
3443 	c = intel_shared_regs_constraints(cpuc, event);
3444 	if (c)
3445 		return c;
3446 
3447 	c = intel_pebs_constraints(event);
3448 	if (c)
3449 		return c;
3450 
3451 	return x86_get_event_constraints(cpuc, idx, event);
3452 }
3453 
3454 static void
intel_start_scheduling(struct cpu_hw_events * cpuc)3455 intel_start_scheduling(struct cpu_hw_events *cpuc)
3456 {
3457 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3458 	struct intel_excl_states *xl;
3459 	int tid = cpuc->excl_thread_id;
3460 
3461 	/*
3462 	 * nothing needed if in group validation mode
3463 	 */
3464 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3465 		return;
3466 
3467 	/*
3468 	 * no exclusion needed
3469 	 */
3470 	if (WARN_ON_ONCE(!excl_cntrs))
3471 		return;
3472 
3473 	xl = &excl_cntrs->states[tid];
3474 
3475 	xl->sched_started = true;
3476 	/*
3477 	 * lock shared state until we are done scheduling
3478 	 * in stop_event_scheduling()
3479 	 * makes scheduling appear as a transaction
3480 	 */
3481 	raw_spin_lock(&excl_cntrs->lock);
3482 }
3483 
intel_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)3484 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3485 {
3486 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3487 	struct event_constraint *c = cpuc->event_constraint[idx];
3488 	struct intel_excl_states *xl;
3489 	int tid = cpuc->excl_thread_id;
3490 
3491 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3492 		return;
3493 
3494 	if (WARN_ON_ONCE(!excl_cntrs))
3495 		return;
3496 
3497 	if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3498 		return;
3499 
3500 	xl = &excl_cntrs->states[tid];
3501 
3502 	lockdep_assert_held(&excl_cntrs->lock);
3503 
3504 	if (c->flags & PERF_X86_EVENT_EXCL)
3505 		xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3506 	else
3507 		xl->state[cntr] = INTEL_EXCL_SHARED;
3508 }
3509 
3510 static void
intel_stop_scheduling(struct cpu_hw_events * cpuc)3511 intel_stop_scheduling(struct cpu_hw_events *cpuc)
3512 {
3513 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3514 	struct intel_excl_states *xl;
3515 	int tid = cpuc->excl_thread_id;
3516 
3517 	/*
3518 	 * nothing needed if in group validation mode
3519 	 */
3520 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3521 		return;
3522 	/*
3523 	 * no exclusion needed
3524 	 */
3525 	if (WARN_ON_ONCE(!excl_cntrs))
3526 		return;
3527 
3528 	xl = &excl_cntrs->states[tid];
3529 
3530 	xl->sched_started = false;
3531 	/*
3532 	 * release shared state lock (acquired in intel_start_scheduling())
3533 	 */
3534 	raw_spin_unlock(&excl_cntrs->lock);
3535 }
3536 
3537 static struct event_constraint *
dyn_constraint(struct cpu_hw_events * cpuc,struct event_constraint * c,int idx)3538 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3539 {
3540 	WARN_ON_ONCE(!cpuc->constraint_list);
3541 
3542 	if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3543 		struct event_constraint *cx;
3544 
3545 		/*
3546 		 * grab pre-allocated constraint entry
3547 		 */
3548 		cx = &cpuc->constraint_list[idx];
3549 
3550 		/*
3551 		 * initialize dynamic constraint
3552 		 * with static constraint
3553 		 */
3554 		*cx = *c;
3555 
3556 		/*
3557 		 * mark constraint as dynamic
3558 		 */
3559 		cx->flags |= PERF_X86_EVENT_DYNAMIC;
3560 		c = cx;
3561 	}
3562 
3563 	return c;
3564 }
3565 
3566 static struct event_constraint *
intel_get_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,int idx,struct event_constraint * c)3567 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3568 			   int idx, struct event_constraint *c)
3569 {
3570 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3571 	struct intel_excl_states *xlo;
3572 	int tid = cpuc->excl_thread_id;
3573 	int is_excl, i, w;
3574 
3575 	/*
3576 	 * validating a group does not require
3577 	 * enforcing cross-thread  exclusion
3578 	 */
3579 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3580 		return c;
3581 
3582 	/*
3583 	 * no exclusion needed
3584 	 */
3585 	if (WARN_ON_ONCE(!excl_cntrs))
3586 		return c;
3587 
3588 	/*
3589 	 * because we modify the constraint, we need
3590 	 * to make a copy. Static constraints come
3591 	 * from static const tables.
3592 	 *
3593 	 * only needed when constraint has not yet
3594 	 * been cloned (marked dynamic)
3595 	 */
3596 	c = dyn_constraint(cpuc, c, idx);
3597 
3598 	/*
3599 	 * From here on, the constraint is dynamic.
3600 	 * Either it was just allocated above, or it
3601 	 * was allocated during a earlier invocation
3602 	 * of this function
3603 	 */
3604 
3605 	/*
3606 	 * state of sibling HT
3607 	 */
3608 	xlo = &excl_cntrs->states[tid ^ 1];
3609 
3610 	/*
3611 	 * event requires exclusive counter access
3612 	 * across HT threads
3613 	 */
3614 	is_excl = c->flags & PERF_X86_EVENT_EXCL;
3615 	if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3616 		event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3617 		if (!cpuc->n_excl++)
3618 			WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3619 	}
3620 
3621 	/*
3622 	 * Modify static constraint with current dynamic
3623 	 * state of thread
3624 	 *
3625 	 * EXCLUSIVE: sibling counter measuring exclusive event
3626 	 * SHARED   : sibling counter measuring non-exclusive event
3627 	 * UNUSED   : sibling counter unused
3628 	 */
3629 	w = c->weight;
3630 	for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3631 		/*
3632 		 * exclusive event in sibling counter
3633 		 * our corresponding counter cannot be used
3634 		 * regardless of our event
3635 		 */
3636 		if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3637 			__clear_bit(i, c->idxmsk);
3638 			w--;
3639 			continue;
3640 		}
3641 		/*
3642 		 * if measuring an exclusive event, sibling
3643 		 * measuring non-exclusive, then counter cannot
3644 		 * be used
3645 		 */
3646 		if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3647 			__clear_bit(i, c->idxmsk);
3648 			w--;
3649 			continue;
3650 		}
3651 	}
3652 
3653 	/*
3654 	 * if we return an empty mask, then switch
3655 	 * back to static empty constraint to avoid
3656 	 * the cost of freeing later on
3657 	 */
3658 	if (!w)
3659 		c = &emptyconstraint;
3660 
3661 	c->weight = w;
3662 
3663 	return c;
3664 }
3665 
3666 static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3667 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3668 			    struct perf_event *event)
3669 {
3670 	struct event_constraint *c1, *c2;
3671 
3672 	c1 = cpuc->event_constraint[idx];
3673 
3674 	/*
3675 	 * first time only
3676 	 * - static constraint: no change across incremental scheduling calls
3677 	 * - dynamic constraint: handled by intel_get_excl_constraints()
3678 	 */
3679 	c2 = __intel_get_event_constraints(cpuc, idx, event);
3680 	if (c1) {
3681 	        WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3682 		bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3683 		c1->weight = c2->weight;
3684 		c2 = c1;
3685 	}
3686 
3687 	if (cpuc->excl_cntrs)
3688 		return intel_get_excl_constraints(cpuc, event, idx, c2);
3689 
3690 	/* Not all counters support the branch counter feature. */
3691 	if (branch_sample_counters(event)) {
3692 		c2 = dyn_constraint(cpuc, c2, idx);
3693 		c2->idxmsk64 &= x86_pmu.lbr_counters;
3694 		c2->weight = hweight64(c2->idxmsk64);
3695 	}
3696 
3697 	return c2;
3698 }
3699 
intel_put_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3700 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3701 		struct perf_event *event)
3702 {
3703 	struct hw_perf_event *hwc = &event->hw;
3704 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3705 	int tid = cpuc->excl_thread_id;
3706 	struct intel_excl_states *xl;
3707 
3708 	/*
3709 	 * nothing needed if in group validation mode
3710 	 */
3711 	if (cpuc->is_fake)
3712 		return;
3713 
3714 	if (WARN_ON_ONCE(!excl_cntrs))
3715 		return;
3716 
3717 	if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3718 		hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3719 		if (!--cpuc->n_excl)
3720 			WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3721 	}
3722 
3723 	/*
3724 	 * If event was actually assigned, then mark the counter state as
3725 	 * unused now.
3726 	 */
3727 	if (hwc->idx >= 0) {
3728 		xl = &excl_cntrs->states[tid];
3729 
3730 		/*
3731 		 * put_constraint may be called from x86_schedule_events()
3732 		 * which already has the lock held so here make locking
3733 		 * conditional.
3734 		 */
3735 		if (!xl->sched_started)
3736 			raw_spin_lock(&excl_cntrs->lock);
3737 
3738 		xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3739 
3740 		if (!xl->sched_started)
3741 			raw_spin_unlock(&excl_cntrs->lock);
3742 	}
3743 }
3744 
3745 static void
intel_put_shared_regs_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3746 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3747 					struct perf_event *event)
3748 {
3749 	struct hw_perf_event_extra *reg;
3750 
3751 	reg = &event->hw.extra_reg;
3752 	if (reg->idx != EXTRA_REG_NONE)
3753 		__intel_shared_reg_put_constraints(cpuc, reg);
3754 
3755 	reg = &event->hw.branch_reg;
3756 	if (reg->idx != EXTRA_REG_NONE)
3757 		__intel_shared_reg_put_constraints(cpuc, reg);
3758 }
3759 
intel_put_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3760 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3761 					struct perf_event *event)
3762 {
3763 	intel_put_shared_regs_event_constraints(cpuc, event);
3764 
3765 	/*
3766 	 * is PMU has exclusive counter restrictions, then
3767 	 * all events are subject to and must call the
3768 	 * put_excl_constraints() routine
3769 	 */
3770 	if (cpuc->excl_cntrs)
3771 		intel_put_excl_constraints(cpuc, event);
3772 }
3773 
intel_pebs_aliases_core2(struct perf_event * event)3774 static void intel_pebs_aliases_core2(struct perf_event *event)
3775 {
3776 	if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3777 		/*
3778 		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3779 		 * (0x003c) so that we can use it with PEBS.
3780 		 *
3781 		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3782 		 * PEBS capable. However we can use INST_RETIRED.ANY_P
3783 		 * (0x00c0), which is a PEBS capable event, to get the same
3784 		 * count.
3785 		 *
3786 		 * INST_RETIRED.ANY_P counts the number of cycles that retires
3787 		 * CNTMASK instructions. By setting CNTMASK to a value (16)
3788 		 * larger than the maximum number of instructions that can be
3789 		 * retired per cycle (4) and then inverting the condition, we
3790 		 * count all cycles that retire 16 or less instructions, which
3791 		 * is every cycle.
3792 		 *
3793 		 * Thereby we gain a PEBS capable cycle counter.
3794 		 */
3795 		u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3796 
3797 		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3798 		event->hw.config = alt_config;
3799 	}
3800 }
3801 
intel_pebs_aliases_snb(struct perf_event * event)3802 static void intel_pebs_aliases_snb(struct perf_event *event)
3803 {
3804 	if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3805 		/*
3806 		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3807 		 * (0x003c) so that we can use it with PEBS.
3808 		 *
3809 		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3810 		 * PEBS capable. However we can use UOPS_RETIRED.ALL
3811 		 * (0x01c2), which is a PEBS capable event, to get the same
3812 		 * count.
3813 		 *
3814 		 * UOPS_RETIRED.ALL counts the number of cycles that retires
3815 		 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3816 		 * larger than the maximum number of micro-ops that can be
3817 		 * retired per cycle (4) and then inverting the condition, we
3818 		 * count all cycles that retire 16 or less micro-ops, which
3819 		 * is every cycle.
3820 		 *
3821 		 * Thereby we gain a PEBS capable cycle counter.
3822 		 */
3823 		u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3824 
3825 		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3826 		event->hw.config = alt_config;
3827 	}
3828 }
3829 
intel_pebs_aliases_precdist(struct perf_event * event)3830 static void intel_pebs_aliases_precdist(struct perf_event *event)
3831 {
3832 	if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3833 		/*
3834 		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3835 		 * (0x003c) so that we can use it with PEBS.
3836 		 *
3837 		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3838 		 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3839 		 * (0x01c0), which is a PEBS capable event, to get the same
3840 		 * count.
3841 		 *
3842 		 * The PREC_DIST event has special support to minimize sample
3843 		 * shadowing effects. One drawback is that it can be
3844 		 * only programmed on counter 1, but that seems like an
3845 		 * acceptable trade off.
3846 		 */
3847 		u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3848 
3849 		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3850 		event->hw.config = alt_config;
3851 	}
3852 }
3853 
intel_pebs_aliases_ivb(struct perf_event * event)3854 static void intel_pebs_aliases_ivb(struct perf_event *event)
3855 {
3856 	if (event->attr.precise_ip < 3)
3857 		return intel_pebs_aliases_snb(event);
3858 	return intel_pebs_aliases_precdist(event);
3859 }
3860 
intel_pebs_aliases_skl(struct perf_event * event)3861 static void intel_pebs_aliases_skl(struct perf_event *event)
3862 {
3863 	if (event->attr.precise_ip < 3)
3864 		return intel_pebs_aliases_core2(event);
3865 	return intel_pebs_aliases_precdist(event);
3866 }
3867 
intel_pmu_large_pebs_flags(struct perf_event * event)3868 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3869 {
3870 	unsigned long flags = x86_pmu.large_pebs_flags;
3871 
3872 	if (event->attr.use_clockid)
3873 		flags &= ~PERF_SAMPLE_TIME;
3874 	if (!event->attr.exclude_kernel)
3875 		flags &= ~PERF_SAMPLE_REGS_USER;
3876 	if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3877 		flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3878 	return flags;
3879 }
3880 
intel_pmu_bts_config(struct perf_event * event)3881 static int intel_pmu_bts_config(struct perf_event *event)
3882 {
3883 	struct perf_event_attr *attr = &event->attr;
3884 
3885 	if (unlikely(intel_pmu_has_bts(event))) {
3886 		/* BTS is not supported by this architecture. */
3887 		if (!x86_pmu.bts_active)
3888 			return -EOPNOTSUPP;
3889 
3890 		/* BTS is currently only allowed for user-mode. */
3891 		if (!attr->exclude_kernel)
3892 			return -EOPNOTSUPP;
3893 
3894 		/* BTS is not allowed for precise events. */
3895 		if (attr->precise_ip)
3896 			return -EOPNOTSUPP;
3897 
3898 		/* disallow bts if conflicting events are present */
3899 		if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3900 			return -EBUSY;
3901 
3902 		event->destroy = hw_perf_lbr_event_destroy;
3903 	}
3904 
3905 	return 0;
3906 }
3907 
core_pmu_hw_config(struct perf_event * event)3908 static int core_pmu_hw_config(struct perf_event *event)
3909 {
3910 	int ret = x86_pmu_hw_config(event);
3911 
3912 	if (ret)
3913 		return ret;
3914 
3915 	return intel_pmu_bts_config(event);
3916 }
3917 
3918 #define INTEL_TD_METRIC_AVAILABLE_MAX	(INTEL_TD_METRIC_RETIRING + \
3919 					 ((x86_pmu.num_topdown_events - 1) << 8))
3920 
is_available_metric_event(struct perf_event * event)3921 static bool is_available_metric_event(struct perf_event *event)
3922 {
3923 	return is_metric_event(event) &&
3924 		event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
3925 }
3926 
is_mem_loads_event(struct perf_event * event)3927 static inline bool is_mem_loads_event(struct perf_event *event)
3928 {
3929 	return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
3930 }
3931 
is_mem_loads_aux_event(struct perf_event * event)3932 static inline bool is_mem_loads_aux_event(struct perf_event *event)
3933 {
3934 	return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
3935 }
3936 
require_mem_loads_aux_event(struct perf_event * event)3937 static inline bool require_mem_loads_aux_event(struct perf_event *event)
3938 {
3939 	if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
3940 		return false;
3941 
3942 	if (is_hybrid())
3943 		return hybrid_pmu(event->pmu)->pmu_type == hybrid_big;
3944 
3945 	return true;
3946 }
3947 
intel_pmu_has_cap(struct perf_event * event,int idx)3948 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
3949 {
3950 	union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
3951 
3952 	return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
3953 }
3954 
intel_pmu_freq_start_period(struct perf_event * event)3955 static u64 intel_pmu_freq_start_period(struct perf_event *event)
3956 {
3957 	int type = event->attr.type;
3958 	u64 config, factor;
3959 	s64 start;
3960 
3961 	/*
3962 	 * The 127 is the lowest possible recommended SAV (sample after value)
3963 	 * for a 4000 freq (default freq), according to the event list JSON file.
3964 	 * Also, assume the workload is idle 50% time.
3965 	 */
3966 	factor = 64 * 4000;
3967 	if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
3968 		goto end;
3969 
3970 	/*
3971 	 * The estimation of the start period in the freq mode is
3972 	 * based on the below assumption.
3973 	 *
3974 	 * For a cycles or an instructions event, 1GHZ of the
3975 	 * underlying platform, 1 IPC. The workload is idle 50% time.
3976 	 * The start period = 1,000,000,000 * 1 / freq / 2.
3977 	 *		    = 500,000,000 / freq
3978 	 *
3979 	 * Usually, the branch-related events occur less than the
3980 	 * instructions event. According to the Intel event list JSON
3981 	 * file, the SAV (sample after value) of a branch-related event
3982 	 * is usually 1/4 of an instruction event.
3983 	 * The start period of branch-related events = 125,000,000 / freq.
3984 	 *
3985 	 * The cache-related events occurs even less. The SAV is usually
3986 	 * 1/20 of an instruction event.
3987 	 * The start period of cache-related events = 25,000,000 / freq.
3988 	 */
3989 	config = event->attr.config & PERF_HW_EVENT_MASK;
3990 	if (type == PERF_TYPE_HARDWARE) {
3991 		switch (config) {
3992 		case PERF_COUNT_HW_CPU_CYCLES:
3993 		case PERF_COUNT_HW_INSTRUCTIONS:
3994 		case PERF_COUNT_HW_BUS_CYCLES:
3995 		case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
3996 		case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
3997 		case PERF_COUNT_HW_REF_CPU_CYCLES:
3998 			factor = 500000000;
3999 			break;
4000 		case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
4001 		case PERF_COUNT_HW_BRANCH_MISSES:
4002 			factor = 125000000;
4003 			break;
4004 		case PERF_COUNT_HW_CACHE_REFERENCES:
4005 		case PERF_COUNT_HW_CACHE_MISSES:
4006 			factor = 25000000;
4007 			break;
4008 		default:
4009 			goto end;
4010 		}
4011 	}
4012 
4013 	if (type == PERF_TYPE_HW_CACHE)
4014 		factor = 25000000;
4015 end:
4016 	/*
4017 	 * Usually, a prime or a number with less factors (close to prime)
4018 	 * is chosen as an SAV, which makes it less likely that the sampling
4019 	 * period synchronizes with some periodic event in the workload.
4020 	 * Minus 1 to make it at least avoiding values near power of twos
4021 	 * for the default freq.
4022 	 */
4023 	start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
4024 
4025 	if (start > x86_pmu.max_period)
4026 		start = x86_pmu.max_period;
4027 
4028 	if (x86_pmu.limit_period)
4029 		x86_pmu.limit_period(event, &start);
4030 
4031 	return start;
4032 }
4033 
intel_pmu_hw_config(struct perf_event * event)4034 static int intel_pmu_hw_config(struct perf_event *event)
4035 {
4036 	int ret = x86_pmu_hw_config(event);
4037 
4038 	if (ret)
4039 		return ret;
4040 
4041 	ret = intel_pmu_bts_config(event);
4042 	if (ret)
4043 		return ret;
4044 
4045 	if (event->attr.freq && event->attr.sample_freq) {
4046 		event->hw.sample_period = intel_pmu_freq_start_period(event);
4047 		event->hw.last_period = event->hw.sample_period;
4048 		local64_set(&event->hw.period_left, event->hw.sample_period);
4049 	}
4050 
4051 	if (event->attr.precise_ip) {
4052 		if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
4053 			return -EINVAL;
4054 
4055 		if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
4056 			event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
4057 			if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event)) &&
4058 			    !has_aux_action(event)) {
4059 				event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
4060 				event->attach_state |= PERF_ATTACH_SCHED_CB;
4061 			}
4062 		}
4063 		if (x86_pmu.pebs_aliases)
4064 			x86_pmu.pebs_aliases(event);
4065 	}
4066 
4067 	if (needs_branch_stack(event)) {
4068 		/* Avoid branch stack setup for counting events in SAMPLE READ */
4069 		if (is_sampling_event(event) ||
4070 		    !(event->attr.sample_type & PERF_SAMPLE_READ))
4071 			event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
4072 	}
4073 
4074 	if (branch_sample_counters(event)) {
4075 		struct perf_event *leader, *sibling;
4076 		int num = 0;
4077 
4078 		if (!(x86_pmu.flags & PMU_FL_BR_CNTR) ||
4079 		    (event->attr.config & ~INTEL_ARCH_EVENT_MASK))
4080 			return -EINVAL;
4081 
4082 		/*
4083 		 * The branch counter logging is not supported in the call stack
4084 		 * mode yet, since we cannot simply flush the LBR during e.g.,
4085 		 * multiplexing. Also, there is no obvious usage with the call
4086 		 * stack mode. Simply forbids it for now.
4087 		 *
4088 		 * If any events in the group enable the branch counter logging
4089 		 * feature, the group is treated as a branch counter logging
4090 		 * group, which requires the extra space to store the counters.
4091 		 */
4092 		leader = event->group_leader;
4093 		if (branch_sample_call_stack(leader))
4094 			return -EINVAL;
4095 		if (branch_sample_counters(leader))
4096 			num++;
4097 		leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
4098 
4099 		for_each_sibling_event(sibling, leader) {
4100 			if (branch_sample_call_stack(sibling))
4101 				return -EINVAL;
4102 			if (branch_sample_counters(sibling))
4103 				num++;
4104 		}
4105 
4106 		if (num > fls(x86_pmu.lbr_counters))
4107 			return -EINVAL;
4108 		/*
4109 		 * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't
4110 		 * require any branch stack setup.
4111 		 * Clear the bit to avoid unnecessary branch stack setup.
4112 		 */
4113 		if (0 == (event->attr.branch_sample_type &
4114 			  ~(PERF_SAMPLE_BRANCH_PLM_ALL |
4115 			    PERF_SAMPLE_BRANCH_COUNTERS)))
4116 			event->hw.flags  &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK;
4117 
4118 		/*
4119 		 * Force the leader to be a LBR event. So LBRs can be reset
4120 		 * with the leader event. See intel_pmu_lbr_del() for details.
4121 		 */
4122 		if (!intel_pmu_needs_branch_stack(leader))
4123 			return -EINVAL;
4124 	}
4125 
4126 	if (intel_pmu_needs_branch_stack(event)) {
4127 		ret = intel_pmu_setup_lbr_filter(event);
4128 		if (ret)
4129 			return ret;
4130 		event->attach_state |= PERF_ATTACH_SCHED_CB;
4131 
4132 		/*
4133 		 * BTS is set up earlier in this path, so don't account twice
4134 		 */
4135 		if (!unlikely(intel_pmu_has_bts(event))) {
4136 			/* disallow lbr if conflicting events are present */
4137 			if (x86_add_exclusive(x86_lbr_exclusive_lbr))
4138 				return -EBUSY;
4139 
4140 			event->destroy = hw_perf_lbr_event_destroy;
4141 		}
4142 	}
4143 
4144 	if (event->attr.aux_output) {
4145 		if (!event->attr.precise_ip)
4146 			return -EINVAL;
4147 
4148 		event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
4149 	}
4150 
4151 	if ((event->attr.type == PERF_TYPE_HARDWARE) ||
4152 	    (event->attr.type == PERF_TYPE_HW_CACHE))
4153 		return 0;
4154 
4155 	/*
4156 	 * Config Topdown slots and metric events
4157 	 *
4158 	 * The slots event on Fixed Counter 3 can support sampling,
4159 	 * which will be handled normally in x86_perf_event_update().
4160 	 *
4161 	 * Metric events don't support sampling and require being paired
4162 	 * with a slots event as group leader. When the slots event
4163 	 * is used in a metrics group, it too cannot support sampling.
4164 	 */
4165 	if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
4166 		/* The metrics_clear can only be set for the slots event */
4167 		if (event->attr.config1 &&
4168 		    (!is_slots_event(event) || (event->attr.config1 & ~INTEL_TD_CFG_METRIC_CLEAR)))
4169 			return -EINVAL;
4170 
4171 		if (event->attr.config2)
4172 			return -EINVAL;
4173 
4174 		/*
4175 		 * The TopDown metrics events and slots event don't
4176 		 * support any filters.
4177 		 */
4178 		if (event->attr.config & X86_ALL_EVENT_FLAGS)
4179 			return -EINVAL;
4180 
4181 		if (is_available_metric_event(event)) {
4182 			struct perf_event *leader = event->group_leader;
4183 
4184 			/* The metric events don't support sampling. */
4185 			if (is_sampling_event(event))
4186 				return -EINVAL;
4187 
4188 			/* The metric events require a slots group leader. */
4189 			if (!is_slots_event(leader))
4190 				return -EINVAL;
4191 
4192 			/*
4193 			 * The leader/SLOTS must not be a sampling event for
4194 			 * metric use; hardware requires it starts at 0 when used
4195 			 * in conjunction with MSR_PERF_METRICS.
4196 			 */
4197 			if (is_sampling_event(leader))
4198 				return -EINVAL;
4199 
4200 			event->event_caps |= PERF_EV_CAP_SIBLING;
4201 			/*
4202 			 * Only once we have a METRICs sibling do we
4203 			 * need TopDown magic.
4204 			 */
4205 			leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
4206 			event->hw.flags  |= PERF_X86_EVENT_TOPDOWN;
4207 		}
4208 	}
4209 
4210 	/*
4211 	 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
4212 	 * doesn't function quite right. As a work-around it needs to always be
4213 	 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
4214 	 * The actual count of this second event is irrelevant it just needs
4215 	 * to be active to make the first event function correctly.
4216 	 *
4217 	 * In a group, the auxiliary event must be in front of the load latency
4218 	 * event. The rule is to simplify the implementation of the check.
4219 	 * That's because perf cannot have a complete group at the moment.
4220 	 */
4221 	if (require_mem_loads_aux_event(event) &&
4222 	    (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
4223 	    is_mem_loads_event(event)) {
4224 		struct perf_event *leader = event->group_leader;
4225 		struct perf_event *sibling = NULL;
4226 
4227 		/*
4228 		 * When this memload event is also the first event (no group
4229 		 * exists yet), then there is no aux event before it.
4230 		 */
4231 		if (leader == event)
4232 			return -ENODATA;
4233 
4234 		if (!is_mem_loads_aux_event(leader)) {
4235 			for_each_sibling_event(sibling, leader) {
4236 				if (is_mem_loads_aux_event(sibling))
4237 					break;
4238 			}
4239 			if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
4240 				return -ENODATA;
4241 		}
4242 	}
4243 
4244 	if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
4245 		return 0;
4246 
4247 	if (x86_pmu.version < 3)
4248 		return -EINVAL;
4249 
4250 	ret = perf_allow_cpu(&event->attr);
4251 	if (ret)
4252 		return ret;
4253 
4254 	event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
4255 
4256 	return 0;
4257 }
4258 
4259 /*
4260  * Currently, the only caller of this function is the atomic_switch_perf_msrs().
4261  * The host perf context helps to prepare the values of the real hardware for
4262  * a set of msrs that need to be switched atomically in a vmx transaction.
4263  *
4264  * For example, the pseudocode needed to add a new msr should look like:
4265  *
4266  * arr[(*nr)++] = (struct perf_guest_switch_msr){
4267  *	.msr = the hardware msr address,
4268  *	.host = the value the hardware has when it doesn't run a guest,
4269  *	.guest = the value the hardware has when it runs a guest,
4270  * };
4271  *
4272  * These values have nothing to do with the emulated values the guest sees
4273  * when it uses {RD,WR}MSR, which should be handled by the KVM context,
4274  * specifically in the intel_pmu_{get,set}_msr().
4275  */
intel_guest_get_msrs(int * nr,void * data)4276 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
4277 {
4278 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4279 	struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4280 	struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
4281 	u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
4282 	u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
4283 	int global_ctrl, pebs_enable;
4284 
4285 	/*
4286 	 * In addition to obeying exclude_guest/exclude_host, remove bits being
4287 	 * used for PEBS when running a guest, because PEBS writes to virtual
4288 	 * addresses (not physical addresses).
4289 	 */
4290 	*nr = 0;
4291 	global_ctrl = (*nr)++;
4292 	arr[global_ctrl] = (struct perf_guest_switch_msr){
4293 		.msr = MSR_CORE_PERF_GLOBAL_CTRL,
4294 		.host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
4295 		.guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
4296 	};
4297 
4298 	if (!x86_pmu.pebs)
4299 		return arr;
4300 
4301 	/*
4302 	 * If PMU counter has PEBS enabled it is not enough to
4303 	 * disable counter on a guest entry since PEBS memory
4304 	 * write can overshoot guest entry and corrupt guest
4305 	 * memory. Disabling PEBS solves the problem.
4306 	 *
4307 	 * Don't do this if the CPU already enforces it.
4308 	 */
4309 	if (x86_pmu.pebs_no_isolation) {
4310 		arr[(*nr)++] = (struct perf_guest_switch_msr){
4311 			.msr = MSR_IA32_PEBS_ENABLE,
4312 			.host = cpuc->pebs_enabled,
4313 			.guest = 0,
4314 		};
4315 		return arr;
4316 	}
4317 
4318 	if (!kvm_pmu || !x86_pmu.pebs_ept)
4319 		return arr;
4320 
4321 	arr[(*nr)++] = (struct perf_guest_switch_msr){
4322 		.msr = MSR_IA32_DS_AREA,
4323 		.host = (unsigned long)cpuc->ds,
4324 		.guest = kvm_pmu->ds_area,
4325 	};
4326 
4327 	if (x86_pmu.intel_cap.pebs_baseline) {
4328 		arr[(*nr)++] = (struct perf_guest_switch_msr){
4329 			.msr = MSR_PEBS_DATA_CFG,
4330 			.host = cpuc->active_pebs_data_cfg,
4331 			.guest = kvm_pmu->pebs_data_cfg,
4332 		};
4333 	}
4334 
4335 	pebs_enable = (*nr)++;
4336 	arr[pebs_enable] = (struct perf_guest_switch_msr){
4337 		.msr = MSR_IA32_PEBS_ENABLE,
4338 		.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
4339 		.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
4340 	};
4341 
4342 	if (arr[pebs_enable].host) {
4343 		/* Disable guest PEBS if host PEBS is enabled. */
4344 		arr[pebs_enable].guest = 0;
4345 	} else {
4346 		/* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
4347 		arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
4348 		arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
4349 		/* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
4350 		arr[global_ctrl].guest |= arr[pebs_enable].guest;
4351 	}
4352 
4353 	return arr;
4354 }
4355 
core_guest_get_msrs(int * nr,void * data)4356 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
4357 {
4358 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4359 	struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4360 	int idx;
4361 
4362 	for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
4363 		struct perf_event *event = cpuc->events[idx];
4364 
4365 		arr[idx].msr = x86_pmu_config_addr(idx);
4366 		arr[idx].host = arr[idx].guest = 0;
4367 
4368 		if (!test_bit(idx, cpuc->active_mask))
4369 			continue;
4370 
4371 		arr[idx].host = arr[idx].guest =
4372 			event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
4373 
4374 		if (event->attr.exclude_host)
4375 			arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4376 		else if (event->attr.exclude_guest)
4377 			arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4378 	}
4379 
4380 	*nr = x86_pmu_max_num_counters(cpuc->pmu);
4381 	return arr;
4382 }
4383 
core_pmu_enable_event(struct perf_event * event)4384 static void core_pmu_enable_event(struct perf_event *event)
4385 {
4386 	if (!event->attr.exclude_host)
4387 		x86_pmu_enable_event(event);
4388 }
4389 
core_pmu_enable_all(int added)4390 static void core_pmu_enable_all(int added)
4391 {
4392 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4393 	int idx;
4394 
4395 	for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
4396 		struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
4397 
4398 		if (!test_bit(idx, cpuc->active_mask) ||
4399 				cpuc->events[idx]->attr.exclude_host)
4400 			continue;
4401 
4402 		__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
4403 	}
4404 }
4405 
hsw_hw_config(struct perf_event * event)4406 static int hsw_hw_config(struct perf_event *event)
4407 {
4408 	int ret = intel_pmu_hw_config(event);
4409 
4410 	if (ret)
4411 		return ret;
4412 	if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
4413 		return 0;
4414 	event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
4415 
4416 	/*
4417 	 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
4418 	 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
4419 	 * this combination.
4420 	 */
4421 	if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
4422 	     ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
4423 	      event->attr.precise_ip > 0))
4424 		return -EOPNOTSUPP;
4425 
4426 	if (event_is_checkpointed(event)) {
4427 		/*
4428 		 * Sampling of checkpointed events can cause situations where
4429 		 * the CPU constantly aborts because of a overflow, which is
4430 		 * then checkpointed back and ignored. Forbid checkpointing
4431 		 * for sampling.
4432 		 *
4433 		 * But still allow a long sampling period, so that perf stat
4434 		 * from KVM works.
4435 		 */
4436 		if (event->attr.sample_period > 0 &&
4437 		    event->attr.sample_period < 0x7fffffff)
4438 			return -EOPNOTSUPP;
4439 	}
4440 	return 0;
4441 }
4442 
4443 static struct event_constraint counter0_constraint =
4444 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
4445 
4446 static struct event_constraint counter1_constraint =
4447 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x2);
4448 
4449 static struct event_constraint counter0_1_constraint =
4450 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x3);
4451 
4452 static struct event_constraint counter2_constraint =
4453 			EVENT_CONSTRAINT(0, 0x4, 0);
4454 
4455 static struct event_constraint fixed0_constraint =
4456 			FIXED_EVENT_CONSTRAINT(0x00c0, 0);
4457 
4458 static struct event_constraint fixed0_counter0_constraint =
4459 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
4460 
4461 static struct event_constraint fixed0_counter0_1_constraint =
4462 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL);
4463 
4464 static struct event_constraint counters_1_7_constraint =
4465 			INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL);
4466 
4467 static struct event_constraint *
hsw_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4468 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4469 			  struct perf_event *event)
4470 {
4471 	struct event_constraint *c;
4472 
4473 	c = intel_get_event_constraints(cpuc, idx, event);
4474 
4475 	/* Handle special quirk on in_tx_checkpointed only in counter 2 */
4476 	if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4477 		if (c->idxmsk64 & (1U << 2))
4478 			return &counter2_constraint;
4479 		return &emptyconstraint;
4480 	}
4481 
4482 	return c;
4483 }
4484 
4485 static struct event_constraint *
icl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4486 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4487 			  struct perf_event *event)
4488 {
4489 	/*
4490 	 * Fixed counter 0 has less skid.
4491 	 * Force instruction:ppp in Fixed counter 0
4492 	 */
4493 	if ((event->attr.precise_ip == 3) &&
4494 	    constraint_match(&fixed0_constraint, event->hw.config))
4495 		return &fixed0_constraint;
4496 
4497 	return hsw_get_event_constraints(cpuc, idx, event);
4498 }
4499 
4500 static struct event_constraint *
glc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4501 glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4502 			  struct perf_event *event)
4503 {
4504 	struct event_constraint *c;
4505 
4506 	c = icl_get_event_constraints(cpuc, idx, event);
4507 
4508 	/*
4509 	 * The :ppp indicates the Precise Distribution (PDist) facility, which
4510 	 * is only supported on the GP counter 0. If a :ppp event which is not
4511 	 * available on the GP counter 0, error out.
4512 	 * Exception: Instruction PDIR is only available on the fixed counter 0.
4513 	 */
4514 	if ((event->attr.precise_ip == 3) &&
4515 	    !constraint_match(&fixed0_constraint, event->hw.config)) {
4516 		if (c->idxmsk64 & BIT_ULL(0))
4517 			return &counter0_constraint;
4518 
4519 		return &emptyconstraint;
4520 	}
4521 
4522 	return c;
4523 }
4524 
4525 static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4526 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4527 			  struct perf_event *event)
4528 {
4529 	struct event_constraint *c;
4530 
4531 	/* :ppp means to do reduced skid PEBS which is PMC0 only. */
4532 	if (event->attr.precise_ip == 3)
4533 		return &counter0_constraint;
4534 
4535 	c = intel_get_event_constraints(cpuc, idx, event);
4536 
4537 	return c;
4538 }
4539 
4540 static struct event_constraint *
tnt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4541 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4542 			  struct perf_event *event)
4543 {
4544 	struct event_constraint *c;
4545 
4546 	c = intel_get_event_constraints(cpuc, idx, event);
4547 
4548 	/*
4549 	 * :ppp means to do reduced skid PEBS,
4550 	 * which is available on PMC0 and fixed counter 0.
4551 	 */
4552 	if (event->attr.precise_ip == 3) {
4553 		/* Force instruction:ppp on PMC0 and Fixed counter 0 */
4554 		if (constraint_match(&fixed0_constraint, event->hw.config))
4555 			return &fixed0_counter0_constraint;
4556 
4557 		return &counter0_constraint;
4558 	}
4559 
4560 	return c;
4561 }
4562 
4563 static bool allow_tsx_force_abort = true;
4564 
4565 static struct event_constraint *
tfa_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4566 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4567 			  struct perf_event *event)
4568 {
4569 	struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4570 
4571 	/*
4572 	 * Without TFA we must not use PMC3.
4573 	 */
4574 	if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4575 		c = dyn_constraint(cpuc, c, idx);
4576 		c->idxmsk64 &= ~(1ULL << 3);
4577 		c->weight--;
4578 	}
4579 
4580 	return c;
4581 }
4582 
4583 static struct event_constraint *
adl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4584 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4585 			  struct perf_event *event)
4586 {
4587 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4588 
4589 	if (pmu->pmu_type == hybrid_big)
4590 		return glc_get_event_constraints(cpuc, idx, event);
4591 	else if (pmu->pmu_type == hybrid_small)
4592 		return tnt_get_event_constraints(cpuc, idx, event);
4593 
4594 	WARN_ON(1);
4595 	return &emptyconstraint;
4596 }
4597 
4598 static struct event_constraint *
cmt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4599 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4600 			  struct perf_event *event)
4601 {
4602 	struct event_constraint *c;
4603 
4604 	c = intel_get_event_constraints(cpuc, idx, event);
4605 
4606 	/*
4607 	 * The :ppp indicates the Precise Distribution (PDist) facility, which
4608 	 * is only supported on the GP counter 0 & 1 and Fixed counter 0.
4609 	 * If a :ppp event which is not available on the above eligible counters,
4610 	 * error out.
4611 	 */
4612 	if (event->attr.precise_ip == 3) {
4613 		/* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */
4614 		if (constraint_match(&fixed0_constraint, event->hw.config)) {
4615 			/* The fixed counter 0 doesn't support LBR event logging. */
4616 			if (branch_sample_counters(event))
4617 				return &counter0_1_constraint;
4618 			else
4619 				return &fixed0_counter0_1_constraint;
4620 		}
4621 
4622 		switch (c->idxmsk64 & 0x3ull) {
4623 		case 0x1:
4624 			return &counter0_constraint;
4625 		case 0x2:
4626 			return &counter1_constraint;
4627 		case 0x3:
4628 			return &counter0_1_constraint;
4629 		}
4630 		return &emptyconstraint;
4631 	}
4632 
4633 	return c;
4634 }
4635 
4636 static struct event_constraint *
rwc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4637 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4638 			  struct perf_event *event)
4639 {
4640 	struct event_constraint *c;
4641 
4642 	c = glc_get_event_constraints(cpuc, idx, event);
4643 
4644 	/* The Retire Latency is not supported by the fixed counter 0. */
4645 	if (event->attr.precise_ip &&
4646 	    (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
4647 	    constraint_match(&fixed0_constraint, event->hw.config)) {
4648 		/*
4649 		 * The Instruction PDIR is only available
4650 		 * on the fixed counter 0. Error out for this case.
4651 		 */
4652 		if (event->attr.precise_ip == 3)
4653 			return &emptyconstraint;
4654 		return &counters_1_7_constraint;
4655 	}
4656 
4657 	return c;
4658 }
4659 
4660 static struct event_constraint *
mtl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4661 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4662 			  struct perf_event *event)
4663 {
4664 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4665 
4666 	if (pmu->pmu_type == hybrid_big)
4667 		return rwc_get_event_constraints(cpuc, idx, event);
4668 	if (pmu->pmu_type == hybrid_small)
4669 		return cmt_get_event_constraints(cpuc, idx, event);
4670 
4671 	WARN_ON(1);
4672 	return &emptyconstraint;
4673 }
4674 
adl_hw_config(struct perf_event * event)4675 static int adl_hw_config(struct perf_event *event)
4676 {
4677 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4678 
4679 	if (pmu->pmu_type == hybrid_big)
4680 		return hsw_hw_config(event);
4681 	else if (pmu->pmu_type == hybrid_small)
4682 		return intel_pmu_hw_config(event);
4683 
4684 	WARN_ON(1);
4685 	return -EOPNOTSUPP;
4686 }
4687 
adl_get_hybrid_cpu_type(void)4688 static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void)
4689 {
4690 	return HYBRID_INTEL_CORE;
4691 }
4692 
erratum_hsw11(struct perf_event * event)4693 static inline bool erratum_hsw11(struct perf_event *event)
4694 {
4695 	return (event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4696 		X86_CONFIG(.event=0xc0, .umask=0x01);
4697 }
4698 
4699 static struct event_constraint *
arl_h_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4700 arl_h_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4701 			  struct perf_event *event)
4702 {
4703 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4704 
4705 	if (pmu->pmu_type == hybrid_tiny)
4706 		return cmt_get_event_constraints(cpuc, idx, event);
4707 
4708 	return mtl_get_event_constraints(cpuc, idx, event);
4709 }
4710 
arl_h_hw_config(struct perf_event * event)4711 static int arl_h_hw_config(struct perf_event *event)
4712 {
4713 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4714 
4715 	if (pmu->pmu_type == hybrid_tiny)
4716 		return intel_pmu_hw_config(event);
4717 
4718 	return adl_hw_config(event);
4719 }
4720 
4721 /*
4722  * The HSW11 requires a period larger than 100 which is the same as the BDM11.
4723  * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL.
4724  *
4725  * The message 'interrupt took too long' can be observed on any counter which
4726  * was armed with a period < 32 and two events expired in the same NMI.
4727  * A minimum period of 32 is enforced for the rest of the events.
4728  */
hsw_limit_period(struct perf_event * event,s64 * left)4729 static void hsw_limit_period(struct perf_event *event, s64 *left)
4730 {
4731 	*left = max(*left, erratum_hsw11(event) ? 128 : 32);
4732 }
4733 
4734 /*
4735  * Broadwell:
4736  *
4737  * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
4738  * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
4739  * the two to enforce a minimum period of 128 (the smallest value that has bits
4740  * 0-5 cleared and >= 100).
4741  *
4742  * Because of how the code in x86_perf_event_set_period() works, the truncation
4743  * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
4744  * to make up for the 'lost' events due to carrying the 'error' in period_left.
4745  *
4746  * Therefore the effective (average) period matches the requested period,
4747  * despite coarser hardware granularity.
4748  */
bdw_limit_period(struct perf_event * event,s64 * left)4749 static void bdw_limit_period(struct perf_event *event, s64 *left)
4750 {
4751 	if (erratum_hsw11(event)) {
4752 		if (*left < 128)
4753 			*left = 128;
4754 		*left &= ~0x3fULL;
4755 	}
4756 }
4757 
nhm_limit_period(struct perf_event * event,s64 * left)4758 static void nhm_limit_period(struct perf_event *event, s64 *left)
4759 {
4760 	*left = max(*left, 32LL);
4761 }
4762 
glc_limit_period(struct perf_event * event,s64 * left)4763 static void glc_limit_period(struct perf_event *event, s64 *left)
4764 {
4765 	if (event->attr.precise_ip == 3)
4766 		*left = max(*left, 128LL);
4767 }
4768 
4769 PMU_FORMAT_ATTR(event,	"config:0-7"	);
4770 PMU_FORMAT_ATTR(umask,	"config:8-15"	);
4771 PMU_FORMAT_ATTR(edge,	"config:18"	);
4772 PMU_FORMAT_ATTR(pc,	"config:19"	);
4773 PMU_FORMAT_ATTR(any,	"config:21"	); /* v3 + */
4774 PMU_FORMAT_ATTR(inv,	"config:23"	);
4775 PMU_FORMAT_ATTR(cmask,	"config:24-31"	);
4776 PMU_FORMAT_ATTR(in_tx,  "config:32"	);
4777 PMU_FORMAT_ATTR(in_tx_cp, "config:33"	);
4778 PMU_FORMAT_ATTR(eq,	"config:36"	); /* v6 + */
4779 
4780 PMU_FORMAT_ATTR(metrics_clear,	"config1:0"); /* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */
4781 
umask2_show(struct device * dev,struct device_attribute * attr,char * page)4782 static ssize_t umask2_show(struct device *dev,
4783 			   struct device_attribute *attr,
4784 			   char *page)
4785 {
4786 	u64 mask = hybrid(dev_get_drvdata(dev), config_mask) & ARCH_PERFMON_EVENTSEL_UMASK2;
4787 
4788 	if (mask == ARCH_PERFMON_EVENTSEL_UMASK2)
4789 		return sprintf(page, "config:8-15,40-47\n");
4790 
4791 	/* Roll back to the old format if umask2 is not supported. */
4792 	return sprintf(page, "config:8-15\n");
4793 }
4794 
4795 static struct device_attribute format_attr_umask2  =
4796 		__ATTR(umask, 0444, umask2_show, NULL);
4797 
4798 static struct attribute *format_evtsel_ext_attrs[] = {
4799 	&format_attr_umask2.attr,
4800 	&format_attr_eq.attr,
4801 	&format_attr_metrics_clear.attr,
4802 	NULL
4803 };
4804 
4805 static umode_t
evtsel_ext_is_visible(struct kobject * kobj,struct attribute * attr,int i)4806 evtsel_ext_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4807 {
4808 	struct device *dev = kobj_to_dev(kobj);
4809 	u64 mask;
4810 
4811 	/*
4812 	 * The umask and umask2 have different formats but share the
4813 	 * same attr name. In update mode, the previous value of the
4814 	 * umask is unconditionally removed before is_visible. If
4815 	 * umask2 format is not enumerated, it's impossible to roll
4816 	 * back to the old format.
4817 	 * Does the check in umask2_show rather than is_visible.
4818 	 */
4819 	if (i == 0)
4820 		return attr->mode;
4821 
4822 	mask = hybrid(dev_get_drvdata(dev), config_mask);
4823 	if (i == 1)
4824 		return (mask & ARCH_PERFMON_EVENTSEL_EQ) ? attr->mode : 0;
4825 
4826 	/* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */
4827 	if (i == 2) {
4828 		union perf_capabilities intel_cap = hybrid(dev_get_drvdata(dev), intel_cap);
4829 
4830 		return intel_cap.rdpmc_metrics_clear ? attr->mode : 0;
4831 	}
4832 
4833 	return 0;
4834 }
4835 
4836 static struct attribute *intel_arch_formats_attr[] = {
4837 	&format_attr_event.attr,
4838 	&format_attr_umask.attr,
4839 	&format_attr_edge.attr,
4840 	&format_attr_pc.attr,
4841 	&format_attr_inv.attr,
4842 	&format_attr_cmask.attr,
4843 	NULL,
4844 };
4845 
intel_event_sysfs_show(char * page,u64 config)4846 ssize_t intel_event_sysfs_show(char *page, u64 config)
4847 {
4848 	u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
4849 
4850 	return x86_event_sysfs_show(page, config, event);
4851 }
4852 
allocate_shared_regs(int cpu)4853 static struct intel_shared_regs *allocate_shared_regs(int cpu)
4854 {
4855 	struct intel_shared_regs *regs;
4856 	int i;
4857 
4858 	regs = kzalloc_node(sizeof(struct intel_shared_regs),
4859 			    GFP_KERNEL, cpu_to_node(cpu));
4860 	if (regs) {
4861 		/*
4862 		 * initialize the locks to keep lockdep happy
4863 		 */
4864 		for (i = 0; i < EXTRA_REG_MAX; i++)
4865 			raw_spin_lock_init(&regs->regs[i].lock);
4866 
4867 		regs->core_id = -1;
4868 	}
4869 	return regs;
4870 }
4871 
allocate_excl_cntrs(int cpu)4872 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4873 {
4874 	struct intel_excl_cntrs *c;
4875 
4876 	c = kzalloc_node(sizeof(struct intel_excl_cntrs),
4877 			 GFP_KERNEL, cpu_to_node(cpu));
4878 	if (c) {
4879 		raw_spin_lock_init(&c->lock);
4880 		c->core_id = -1;
4881 	}
4882 	return c;
4883 }
4884 
4885 
intel_cpuc_prepare(struct cpu_hw_events * cpuc,int cpu)4886 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4887 {
4888 	cpuc->pebs_record_size = x86_pmu.pebs_record_size;
4889 
4890 	if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4891 		cpuc->shared_regs = allocate_shared_regs(cpu);
4892 		if (!cpuc->shared_regs)
4893 			goto err;
4894 	}
4895 
4896 	if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) {
4897 		size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4898 
4899 		cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4900 		if (!cpuc->constraint_list)
4901 			goto err_shared_regs;
4902 	}
4903 
4904 	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4905 		cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4906 		if (!cpuc->excl_cntrs)
4907 			goto err_constraint_list;
4908 
4909 		cpuc->excl_thread_id = 0;
4910 	}
4911 
4912 	return 0;
4913 
4914 err_constraint_list:
4915 	kfree(cpuc->constraint_list);
4916 	cpuc->constraint_list = NULL;
4917 
4918 err_shared_regs:
4919 	kfree(cpuc->shared_regs);
4920 	cpuc->shared_regs = NULL;
4921 
4922 err:
4923 	return -ENOMEM;
4924 }
4925 
intel_pmu_cpu_prepare(int cpu)4926 static int intel_pmu_cpu_prepare(int cpu)
4927 {
4928 	return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4929 }
4930 
flip_smm_bit(void * data)4931 static void flip_smm_bit(void *data)
4932 {
4933 	unsigned long set = *(unsigned long *)data;
4934 
4935 	if (set > 0) {
4936 		msr_set_bit(MSR_IA32_DEBUGCTLMSR,
4937 			    DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4938 	} else {
4939 		msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
4940 			      DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4941 	}
4942 }
4943 
intel_pmu_check_counters_mask(u64 * cntr_mask,u64 * fixed_cntr_mask,u64 * intel_ctrl)4944 static void intel_pmu_check_counters_mask(u64 *cntr_mask,
4945 					  u64 *fixed_cntr_mask,
4946 					  u64 *intel_ctrl)
4947 {
4948 	unsigned int bit;
4949 
4950 	bit = fls64(*cntr_mask);
4951 	if (bit > INTEL_PMC_MAX_GENERIC) {
4952 		WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
4953 		     bit, INTEL_PMC_MAX_GENERIC);
4954 		*cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
4955 	}
4956 	*intel_ctrl = *cntr_mask;
4957 
4958 	bit = fls64(*fixed_cntr_mask);
4959 	if (bit > INTEL_PMC_MAX_FIXED) {
4960 		WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
4961 		     bit, INTEL_PMC_MAX_FIXED);
4962 		*fixed_cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
4963 	}
4964 
4965 	*intel_ctrl |= *fixed_cntr_mask << INTEL_PMC_IDX_FIXED;
4966 }
4967 
4968 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
4969 					      u64 cntr_mask,
4970 					      u64 fixed_cntr_mask,
4971 					      u64 intel_ctrl);
4972 
4973 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
4974 
intel_pmu_broken_perf_cap(void)4975 static inline bool intel_pmu_broken_perf_cap(void)
4976 {
4977 	/* The Perf Metric (Bit 15) is always cleared */
4978 	if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE ||
4979 	    boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L)
4980 		return true;
4981 
4982 	return false;
4983 }
4984 
update_pmu_cap(struct x86_hybrid_pmu * pmu)4985 static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
4986 {
4987 	unsigned int cntr, fixed_cntr, ecx, edx;
4988 	union cpuid35_eax eax;
4989 	union cpuid35_ebx ebx;
4990 
4991 	cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx);
4992 
4993 	if (ebx.split.umask2)
4994 		pmu->config_mask |= ARCH_PERFMON_EVENTSEL_UMASK2;
4995 	if (ebx.split.eq)
4996 		pmu->config_mask |= ARCH_PERFMON_EVENTSEL_EQ;
4997 
4998 	if (eax.split.cntr_subleaf) {
4999 		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
5000 			    &cntr, &fixed_cntr, &ecx, &edx);
5001 		pmu->cntr_mask64 = cntr;
5002 		pmu->fixed_cntr_mask64 = fixed_cntr;
5003 	}
5004 
5005 	if (!intel_pmu_broken_perf_cap()) {
5006 		/* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
5007 		rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities);
5008 	}
5009 }
5010 
intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu * pmu)5011 static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
5012 {
5013 	intel_pmu_check_counters_mask(&pmu->cntr_mask64, &pmu->fixed_cntr_mask64,
5014 				      &pmu->intel_ctrl);
5015 	pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
5016 	pmu->unconstrained = (struct event_constraint)
5017 			     __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
5018 						0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
5019 
5020 	if (pmu->intel_cap.perf_metrics)
5021 		pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
5022 	else
5023 		pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
5024 
5025 	intel_pmu_check_event_constraints(pmu->event_constraints,
5026 					  pmu->cntr_mask64,
5027 					  pmu->fixed_cntr_mask64,
5028 					  pmu->intel_ctrl);
5029 
5030 	intel_pmu_check_extra_regs(pmu->extra_regs);
5031 }
5032 
find_hybrid_pmu_for_cpu(void)5033 static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
5034 {
5035 	u8 cpu_type = get_this_hybrid_cpu_type();
5036 	int i;
5037 
5038 	/*
5039 	 * This is running on a CPU model that is known to have hybrid
5040 	 * configurations. But the CPU told us it is not hybrid, shame
5041 	 * on it. There should be a fixup function provided for these
5042 	 * troublesome CPUs (->get_hybrid_cpu_type).
5043 	 */
5044 	if (cpu_type == HYBRID_INTEL_NONE) {
5045 		if (x86_pmu.get_hybrid_cpu_type)
5046 			cpu_type = x86_pmu.get_hybrid_cpu_type();
5047 		else
5048 			return NULL;
5049 	}
5050 
5051 	/*
5052 	 * This essentially just maps between the 'hybrid_cpu_type'
5053 	 * and 'hybrid_pmu_type' enums except for ARL-H processor
5054 	 * which needs to compare atom uarch native id since ARL-H
5055 	 * contains two different atom uarchs.
5056 	 */
5057 	for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
5058 		enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type;
5059 		u32 native_id;
5060 
5061 		if (cpu_type == HYBRID_INTEL_CORE && pmu_type == hybrid_big)
5062 			return &x86_pmu.hybrid_pmu[i];
5063 		if (cpu_type == HYBRID_INTEL_ATOM) {
5064 			if (x86_pmu.num_hybrid_pmus == 2 && pmu_type == hybrid_small)
5065 				return &x86_pmu.hybrid_pmu[i];
5066 
5067 			native_id = get_this_hybrid_cpu_native_id();
5068 			if (native_id == skt_native_id && pmu_type == hybrid_small)
5069 				return &x86_pmu.hybrid_pmu[i];
5070 			if (native_id == cmt_native_id && pmu_type == hybrid_tiny)
5071 				return &x86_pmu.hybrid_pmu[i];
5072 		}
5073 	}
5074 
5075 	return NULL;
5076 }
5077 
init_hybrid_pmu(int cpu)5078 static bool init_hybrid_pmu(int cpu)
5079 {
5080 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5081 	struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu();
5082 
5083 	if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
5084 		cpuc->pmu = NULL;
5085 		return false;
5086 	}
5087 
5088 	/* Only check and dump the PMU information for the first CPU */
5089 	if (!cpumask_empty(&pmu->supported_cpus))
5090 		goto end;
5091 
5092 	if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
5093 		update_pmu_cap(pmu);
5094 
5095 	intel_pmu_check_hybrid_pmus(pmu);
5096 
5097 	if (!check_hw_exists(&pmu->pmu, pmu->cntr_mask, pmu->fixed_cntr_mask))
5098 		return false;
5099 
5100 	pr_info("%s PMU driver: ", pmu->name);
5101 
5102 	pr_cont("\n");
5103 
5104 	x86_pmu_show_pmu_cap(&pmu->pmu);
5105 
5106 end:
5107 	cpumask_set_cpu(cpu, &pmu->supported_cpus);
5108 	cpuc->pmu = &pmu->pmu;
5109 
5110 	return true;
5111 }
5112 
intel_pmu_cpu_starting(int cpu)5113 static void intel_pmu_cpu_starting(int cpu)
5114 {
5115 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5116 	int core_id = topology_core_id(cpu);
5117 	int i;
5118 
5119 	if (is_hybrid() && !init_hybrid_pmu(cpu))
5120 		return;
5121 
5122 	init_debug_store_on_cpu(cpu);
5123 	/*
5124 	 * Deal with CPUs that don't clear their LBRs on power-up, and that may
5125 	 * even boot with LBRs enabled.
5126 	 */
5127 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr)
5128 		msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT);
5129 	intel_pmu_lbr_reset();
5130 
5131 	cpuc->lbr_sel = NULL;
5132 
5133 	if (x86_pmu.flags & PMU_FL_TFA) {
5134 		WARN_ON_ONCE(cpuc->tfa_shadow);
5135 		cpuc->tfa_shadow = ~0ULL;
5136 		intel_set_tfa(cpuc, false);
5137 	}
5138 
5139 	if (x86_pmu.version > 1)
5140 		flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
5141 
5142 	/*
5143 	 * Disable perf metrics if any added CPU doesn't support it.
5144 	 *
5145 	 * Turn off the check for a hybrid architecture, because the
5146 	 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
5147 	 * the architecture features. The perf metrics is a model-specific
5148 	 * feature for now. The corresponding bit should always be 0 on
5149 	 * a hybrid platform, e.g., Alder Lake.
5150 	 */
5151 	if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
5152 		union perf_capabilities perf_cap;
5153 
5154 		rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
5155 		if (!perf_cap.perf_metrics) {
5156 			x86_pmu.intel_cap.perf_metrics = 0;
5157 			x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
5158 		}
5159 	}
5160 
5161 	if (!cpuc->shared_regs)
5162 		return;
5163 
5164 	if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
5165 		for_each_cpu(i, topology_sibling_cpumask(cpu)) {
5166 			struct intel_shared_regs *pc;
5167 
5168 			pc = per_cpu(cpu_hw_events, i).shared_regs;
5169 			if (pc && pc->core_id == core_id) {
5170 				cpuc->kfree_on_online[0] = cpuc->shared_regs;
5171 				cpuc->shared_regs = pc;
5172 				break;
5173 			}
5174 		}
5175 		cpuc->shared_regs->core_id = core_id;
5176 		cpuc->shared_regs->refcnt++;
5177 	}
5178 
5179 	if (x86_pmu.lbr_sel_map)
5180 		cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
5181 
5182 	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
5183 		for_each_cpu(i, topology_sibling_cpumask(cpu)) {
5184 			struct cpu_hw_events *sibling;
5185 			struct intel_excl_cntrs *c;
5186 
5187 			sibling = &per_cpu(cpu_hw_events, i);
5188 			c = sibling->excl_cntrs;
5189 			if (c && c->core_id == core_id) {
5190 				cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
5191 				cpuc->excl_cntrs = c;
5192 				if (!sibling->excl_thread_id)
5193 					cpuc->excl_thread_id = 1;
5194 				break;
5195 			}
5196 		}
5197 		cpuc->excl_cntrs->core_id = core_id;
5198 		cpuc->excl_cntrs->refcnt++;
5199 	}
5200 }
5201 
free_excl_cntrs(struct cpu_hw_events * cpuc)5202 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
5203 {
5204 	struct intel_excl_cntrs *c;
5205 
5206 	c = cpuc->excl_cntrs;
5207 	if (c) {
5208 		if (c->core_id == -1 || --c->refcnt == 0)
5209 			kfree(c);
5210 		cpuc->excl_cntrs = NULL;
5211 	}
5212 
5213 	kfree(cpuc->constraint_list);
5214 	cpuc->constraint_list = NULL;
5215 }
5216 
intel_pmu_cpu_dying(int cpu)5217 static void intel_pmu_cpu_dying(int cpu)
5218 {
5219 	fini_debug_store_on_cpu(cpu);
5220 }
5221 
intel_cpuc_finish(struct cpu_hw_events * cpuc)5222 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
5223 {
5224 	struct intel_shared_regs *pc;
5225 
5226 	pc = cpuc->shared_regs;
5227 	if (pc) {
5228 		if (pc->core_id == -1 || --pc->refcnt == 0)
5229 			kfree(pc);
5230 		cpuc->shared_regs = NULL;
5231 	}
5232 
5233 	free_excl_cntrs(cpuc);
5234 }
5235 
intel_pmu_cpu_dead(int cpu)5236 static void intel_pmu_cpu_dead(int cpu)
5237 {
5238 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5239 
5240 	intel_cpuc_finish(cpuc);
5241 
5242 	if (is_hybrid() && cpuc->pmu)
5243 		cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
5244 }
5245 
intel_pmu_sched_task(struct perf_event_pmu_context * pmu_ctx,bool sched_in)5246 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
5247 				 bool sched_in)
5248 {
5249 	intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
5250 	intel_pmu_lbr_sched_task(pmu_ctx, sched_in);
5251 }
5252 
intel_pmu_swap_task_ctx(struct perf_event_pmu_context * prev_epc,struct perf_event_pmu_context * next_epc)5253 static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
5254 				    struct perf_event_pmu_context *next_epc)
5255 {
5256 	intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc);
5257 }
5258 
intel_pmu_check_period(struct perf_event * event,u64 value)5259 static int intel_pmu_check_period(struct perf_event *event, u64 value)
5260 {
5261 	return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
5262 }
5263 
intel_aux_output_init(void)5264 static void intel_aux_output_init(void)
5265 {
5266 	/* Refer also intel_pmu_aux_output_match() */
5267 	if (x86_pmu.intel_cap.pebs_output_pt_available)
5268 		x86_pmu.assign = intel_pmu_assign_event;
5269 }
5270 
intel_pmu_aux_output_match(struct perf_event * event)5271 static int intel_pmu_aux_output_match(struct perf_event *event)
5272 {
5273 	/* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */
5274 	if (!x86_pmu.intel_cap.pebs_output_pt_available)
5275 		return 0;
5276 
5277 	return is_intel_pt_event(event);
5278 }
5279 
intel_pmu_filter(struct pmu * pmu,int cpu,bool * ret)5280 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
5281 {
5282 	struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu);
5283 
5284 	*ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
5285 }
5286 
5287 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
5288 
5289 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
5290 
5291 PMU_FORMAT_ATTR(frontend, "config1:0-23");
5292 
5293 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63");
5294 
5295 static struct attribute *intel_arch3_formats_attr[] = {
5296 	&format_attr_event.attr,
5297 	&format_attr_umask.attr,
5298 	&format_attr_edge.attr,
5299 	&format_attr_pc.attr,
5300 	&format_attr_any.attr,
5301 	&format_attr_inv.attr,
5302 	&format_attr_cmask.attr,
5303 	NULL,
5304 };
5305 
5306 static struct attribute *hsw_format_attr[] = {
5307 	&format_attr_in_tx.attr,
5308 	&format_attr_in_tx_cp.attr,
5309 	&format_attr_offcore_rsp.attr,
5310 	&format_attr_ldlat.attr,
5311 	NULL
5312 };
5313 
5314 static struct attribute *nhm_format_attr[] = {
5315 	&format_attr_offcore_rsp.attr,
5316 	&format_attr_ldlat.attr,
5317 	NULL
5318 };
5319 
5320 static struct attribute *slm_format_attr[] = {
5321 	&format_attr_offcore_rsp.attr,
5322 	NULL
5323 };
5324 
5325 static struct attribute *cmt_format_attr[] = {
5326 	&format_attr_offcore_rsp.attr,
5327 	&format_attr_ldlat.attr,
5328 	&format_attr_snoop_rsp.attr,
5329 	NULL
5330 };
5331 
5332 static struct attribute *skl_format_attr[] = {
5333 	&format_attr_frontend.attr,
5334 	NULL,
5335 };
5336 
5337 static __initconst const struct x86_pmu core_pmu = {
5338 	.name			= "core",
5339 	.handle_irq		= x86_pmu_handle_irq,
5340 	.disable_all		= x86_pmu_disable_all,
5341 	.enable_all		= core_pmu_enable_all,
5342 	.enable			= core_pmu_enable_event,
5343 	.disable		= x86_pmu_disable_event,
5344 	.hw_config		= core_pmu_hw_config,
5345 	.schedule_events	= x86_schedule_events,
5346 	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
5347 	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
5348 	.fixedctr		= MSR_ARCH_PERFMON_FIXED_CTR0,
5349 	.event_map		= intel_pmu_event_map,
5350 	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
5351 	.apic			= 1,
5352 	.large_pebs_flags	= LARGE_PEBS_FLAGS,
5353 
5354 	/*
5355 	 * Intel PMCs cannot be accessed sanely above 32-bit width,
5356 	 * so we install an artificial 1<<31 period regardless of
5357 	 * the generic event period:
5358 	 */
5359 	.max_period		= (1ULL<<31) - 1,
5360 	.get_event_constraints	= intel_get_event_constraints,
5361 	.put_event_constraints	= intel_put_event_constraints,
5362 	.event_constraints	= intel_core_event_constraints,
5363 	.guest_get_msrs		= core_guest_get_msrs,
5364 	.format_attrs		= intel_arch_formats_attr,
5365 	.events_sysfs_show	= intel_event_sysfs_show,
5366 
5367 	/*
5368 	 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
5369 	 * together with PMU version 1 and thus be using core_pmu with
5370 	 * shared_regs. We need following callbacks here to allocate
5371 	 * it properly.
5372 	 */
5373 	.cpu_prepare		= intel_pmu_cpu_prepare,
5374 	.cpu_starting		= intel_pmu_cpu_starting,
5375 	.cpu_dying		= intel_pmu_cpu_dying,
5376 	.cpu_dead		= intel_pmu_cpu_dead,
5377 
5378 	.check_period		= intel_pmu_check_period,
5379 
5380 	.lbr_reset		= intel_pmu_lbr_reset_64,
5381 	.lbr_read		= intel_pmu_lbr_read_64,
5382 	.lbr_save		= intel_pmu_lbr_save,
5383 	.lbr_restore		= intel_pmu_lbr_restore,
5384 };
5385 
5386 static __initconst const struct x86_pmu intel_pmu = {
5387 	.name			= "Intel",
5388 	.handle_irq		= intel_pmu_handle_irq,
5389 	.disable_all		= intel_pmu_disable_all,
5390 	.enable_all		= intel_pmu_enable_all,
5391 	.enable			= intel_pmu_enable_event,
5392 	.disable		= intel_pmu_disable_event,
5393 	.add			= intel_pmu_add_event,
5394 	.del			= intel_pmu_del_event,
5395 	.read			= intel_pmu_read_event,
5396 	.set_period		= intel_pmu_set_period,
5397 	.update			= intel_pmu_update,
5398 	.hw_config		= intel_pmu_hw_config,
5399 	.schedule_events	= x86_schedule_events,
5400 	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
5401 	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
5402 	.fixedctr		= MSR_ARCH_PERFMON_FIXED_CTR0,
5403 	.event_map		= intel_pmu_event_map,
5404 	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
5405 	.apic			= 1,
5406 	.large_pebs_flags	= LARGE_PEBS_FLAGS,
5407 	/*
5408 	 * Intel PMCs cannot be accessed sanely above 32 bit width,
5409 	 * so we install an artificial 1<<31 period regardless of
5410 	 * the generic event period:
5411 	 */
5412 	.max_period		= (1ULL << 31) - 1,
5413 	.get_event_constraints	= intel_get_event_constraints,
5414 	.put_event_constraints	= intel_put_event_constraints,
5415 	.pebs_aliases		= intel_pebs_aliases_core2,
5416 
5417 	.format_attrs		= intel_arch3_formats_attr,
5418 	.events_sysfs_show	= intel_event_sysfs_show,
5419 
5420 	.cpu_prepare		= intel_pmu_cpu_prepare,
5421 	.cpu_starting		= intel_pmu_cpu_starting,
5422 	.cpu_dying		= intel_pmu_cpu_dying,
5423 	.cpu_dead		= intel_pmu_cpu_dead,
5424 
5425 	.guest_get_msrs		= intel_guest_get_msrs,
5426 	.sched_task		= intel_pmu_sched_task,
5427 	.swap_task_ctx		= intel_pmu_swap_task_ctx,
5428 
5429 	.check_period		= intel_pmu_check_period,
5430 
5431 	.aux_output_match	= intel_pmu_aux_output_match,
5432 
5433 	.lbr_reset		= intel_pmu_lbr_reset_64,
5434 	.lbr_read		= intel_pmu_lbr_read_64,
5435 	.lbr_save		= intel_pmu_lbr_save,
5436 	.lbr_restore		= intel_pmu_lbr_restore,
5437 
5438 	/*
5439 	 * SMM has access to all 4 rings and while traditionally SMM code only
5440 	 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
5441 	 *
5442 	 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
5443 	 * between SMM or not, this results in what should be pure userspace
5444 	 * counters including SMM data.
5445 	 *
5446 	 * This is a clear privilege issue, therefore globally disable
5447 	 * counting SMM by default.
5448 	 */
5449 	.attr_freeze_on_smi	= 1,
5450 };
5451 
intel_clovertown_quirk(void)5452 static __init void intel_clovertown_quirk(void)
5453 {
5454 	/*
5455 	 * PEBS is unreliable due to:
5456 	 *
5457 	 *   AJ67  - PEBS may experience CPL leaks
5458 	 *   AJ68  - PEBS PMI may be delayed by one event
5459 	 *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
5460 	 *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
5461 	 *
5462 	 * AJ67 could be worked around by restricting the OS/USR flags.
5463 	 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
5464 	 *
5465 	 * AJ106 could possibly be worked around by not allowing LBR
5466 	 *       usage from PEBS, including the fixup.
5467 	 * AJ68  could possibly be worked around by always programming
5468 	 *	 a pebs_event_reset[0] value and coping with the lost events.
5469 	 *
5470 	 * But taken together it might just make sense to not enable PEBS on
5471 	 * these chips.
5472 	 */
5473 	pr_warn("PEBS disabled due to CPU errata\n");
5474 	x86_pmu.pebs = 0;
5475 	x86_pmu.pebs_constraints = NULL;
5476 }
5477 
5478 static const struct x86_cpu_id isolation_ucodes[] = {
5479 	X86_MATCH_VFM_STEPS(INTEL_HASWELL,	 3,  3, 0x0000001f),
5480 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_L,	 1,  1, 0x0000001e),
5481 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_G,	 1,  1, 0x00000015),
5482 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_X,	 2,  2, 0x00000037),
5483 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_X,	 4,  4, 0x0000000a),
5484 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL,	 4,  4, 0x00000023),
5485 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_G,	 1,  1, 0x00000014),
5486 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 2,  2, 0x00000010),
5487 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 3,  3, 0x07000009),
5488 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 4,  4, 0x0f000009),
5489 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 5,  5, 0x0e000002),
5490 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_X,	 1,  1, 0x0b000014),
5491 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	 3,  3, 0x00000021),
5492 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	 4,  7, 0x00000000),
5493 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	11, 11, 0x00000000),
5494 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_L,	 3,  3, 0x0000007c),
5495 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE,	 3,  3, 0x0000007c),
5496 	X86_MATCH_VFM_STEPS(INTEL_KABYLAKE,	 9, 13, 0x0000004e),
5497 	X86_MATCH_VFM_STEPS(INTEL_KABYLAKE_L,	 9, 12, 0x0000004e),
5498 	{}
5499 };
5500 
intel_check_pebs_isolation(void)5501 static void intel_check_pebs_isolation(void)
5502 {
5503 	x86_pmu.pebs_no_isolation = !x86_match_min_microcode_rev(isolation_ucodes);
5504 }
5505 
intel_pebs_isolation_quirk(void)5506 static __init void intel_pebs_isolation_quirk(void)
5507 {
5508 	WARN_ON_ONCE(x86_pmu.check_microcode);
5509 	x86_pmu.check_microcode = intel_check_pebs_isolation;
5510 	intel_check_pebs_isolation();
5511 }
5512 
5513 static const struct x86_cpu_id pebs_ucodes[] = {
5514 	X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE,	7, 7, 0x00000028),
5515 	X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X,	6, 6, 0x00000618),
5516 	X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X,	7, 7, 0x0000070c),
5517 	{}
5518 };
5519 
intel_snb_pebs_broken(void)5520 static bool intel_snb_pebs_broken(void)
5521 {
5522 	return !x86_match_min_microcode_rev(pebs_ucodes);
5523 }
5524 
intel_snb_check_microcode(void)5525 static void intel_snb_check_microcode(void)
5526 {
5527 	if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
5528 		return;
5529 
5530 	/*
5531 	 * Serialized by the microcode lock..
5532 	 */
5533 	if (x86_pmu.pebs_broken) {
5534 		pr_info("PEBS enabled due to microcode update\n");
5535 		x86_pmu.pebs_broken = 0;
5536 	} else {
5537 		pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
5538 		x86_pmu.pebs_broken = 1;
5539 	}
5540 }
5541 
is_lbr_from(unsigned long msr)5542 static bool is_lbr_from(unsigned long msr)
5543 {
5544 	unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
5545 
5546 	return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
5547 }
5548 
5549 /*
5550  * Under certain circumstances, access certain MSR may cause #GP.
5551  * The function tests if the input MSR can be safely accessed.
5552  */
check_msr(unsigned long msr,u64 mask)5553 static bool check_msr(unsigned long msr, u64 mask)
5554 {
5555 	u64 val_old, val_new, val_tmp;
5556 
5557 	/*
5558 	 * Disable the check for real HW, so we don't
5559 	 * mess with potentially enabled registers:
5560 	 */
5561 	if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
5562 		return true;
5563 
5564 	/*
5565 	 * Read the current value, change it and read it back to see if it
5566 	 * matches, this is needed to detect certain hardware emulators
5567 	 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
5568 	 */
5569 	if (rdmsrl_safe(msr, &val_old))
5570 		return false;
5571 
5572 	/*
5573 	 * Only change the bits which can be updated by wrmsrl.
5574 	 */
5575 	val_tmp = val_old ^ mask;
5576 
5577 	if (is_lbr_from(msr))
5578 		val_tmp = lbr_from_signext_quirk_wr(val_tmp);
5579 
5580 	if (wrmsrl_safe(msr, val_tmp) ||
5581 	    rdmsrl_safe(msr, &val_new))
5582 		return false;
5583 
5584 	/*
5585 	 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
5586 	 * should equal rdmsrl()'s even with the quirk.
5587 	 */
5588 	if (val_new != val_tmp)
5589 		return false;
5590 
5591 	if (is_lbr_from(msr))
5592 		val_old = lbr_from_signext_quirk_wr(val_old);
5593 
5594 	/* Here it's sure that the MSR can be safely accessed.
5595 	 * Restore the old value and return.
5596 	 */
5597 	wrmsrl(msr, val_old);
5598 
5599 	return true;
5600 }
5601 
intel_sandybridge_quirk(void)5602 static __init void intel_sandybridge_quirk(void)
5603 {
5604 	x86_pmu.check_microcode = intel_snb_check_microcode;
5605 	cpus_read_lock();
5606 	intel_snb_check_microcode();
5607 	cpus_read_unlock();
5608 }
5609 
5610 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
5611 	{ PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
5612 	{ PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
5613 	{ PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
5614 	{ PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
5615 	{ PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
5616 	{ PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
5617 	{ PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
5618 };
5619 
intel_arch_events_quirk(void)5620 static __init void intel_arch_events_quirk(void)
5621 {
5622 	int bit;
5623 
5624 	/* disable event that reported as not present by cpuid */
5625 	for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
5626 		intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
5627 		pr_warn("CPUID marked event: \'%s\' unavailable\n",
5628 			intel_arch_events_map[bit].name);
5629 	}
5630 }
5631 
intel_nehalem_quirk(void)5632 static __init void intel_nehalem_quirk(void)
5633 {
5634 	union cpuid10_ebx ebx;
5635 
5636 	ebx.full = x86_pmu.events_maskl;
5637 	if (ebx.split.no_branch_misses_retired) {
5638 		/*
5639 		 * Erratum AAJ80 detected, we work it around by using
5640 		 * the BR_MISP_EXEC.ANY event. This will over-count
5641 		 * branch-misses, but it's still much better than the
5642 		 * architectural event which is often completely bogus:
5643 		 */
5644 		intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
5645 		ebx.split.no_branch_misses_retired = 0;
5646 		x86_pmu.events_maskl = ebx.full;
5647 		pr_info("CPU erratum AAJ80 worked around\n");
5648 	}
5649 }
5650 
5651 /*
5652  * enable software workaround for errata:
5653  * SNB: BJ122
5654  * IVB: BV98
5655  * HSW: HSD29
5656  *
5657  * Only needed when HT is enabled. However detecting
5658  * if HT is enabled is difficult (model specific). So instead,
5659  * we enable the workaround in the early boot, and verify if
5660  * it is needed in a later initcall phase once we have valid
5661  * topology information to check if HT is actually enabled
5662  */
intel_ht_bug(void)5663 static __init void intel_ht_bug(void)
5664 {
5665 	x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
5666 
5667 	x86_pmu.start_scheduling = intel_start_scheduling;
5668 	x86_pmu.commit_scheduling = intel_commit_scheduling;
5669 	x86_pmu.stop_scheduling = intel_stop_scheduling;
5670 }
5671 
5672 EVENT_ATTR_STR(mem-loads,	mem_ld_hsw,	"event=0xcd,umask=0x1,ldlat=3");
5673 EVENT_ATTR_STR(mem-stores,	mem_st_hsw,	"event=0xd0,umask=0x82")
5674 
5675 /* Haswell special events */
5676 EVENT_ATTR_STR(tx-start,	tx_start,	"event=0xc9,umask=0x1");
5677 EVENT_ATTR_STR(tx-commit,	tx_commit,	"event=0xc9,umask=0x2");
5678 EVENT_ATTR_STR(tx-abort,	tx_abort,	"event=0xc9,umask=0x4");
5679 EVENT_ATTR_STR(tx-capacity,	tx_capacity,	"event=0x54,umask=0x2");
5680 EVENT_ATTR_STR(tx-conflict,	tx_conflict,	"event=0x54,umask=0x1");
5681 EVENT_ATTR_STR(el-start,	el_start,	"event=0xc8,umask=0x1");
5682 EVENT_ATTR_STR(el-commit,	el_commit,	"event=0xc8,umask=0x2");
5683 EVENT_ATTR_STR(el-abort,	el_abort,	"event=0xc8,umask=0x4");
5684 EVENT_ATTR_STR(el-capacity,	el_capacity,	"event=0x54,umask=0x2");
5685 EVENT_ATTR_STR(el-conflict,	el_conflict,	"event=0x54,umask=0x1");
5686 EVENT_ATTR_STR(cycles-t,	cycles_t,	"event=0x3c,in_tx=1");
5687 EVENT_ATTR_STR(cycles-ct,	cycles_ct,	"event=0x3c,in_tx=1,in_tx_cp=1");
5688 
5689 static struct attribute *hsw_events_attrs[] = {
5690 	EVENT_PTR(td_slots_issued),
5691 	EVENT_PTR(td_slots_retired),
5692 	EVENT_PTR(td_fetch_bubbles),
5693 	EVENT_PTR(td_total_slots),
5694 	EVENT_PTR(td_total_slots_scale),
5695 	EVENT_PTR(td_recovery_bubbles),
5696 	EVENT_PTR(td_recovery_bubbles_scale),
5697 	NULL
5698 };
5699 
5700 static struct attribute *hsw_mem_events_attrs[] = {
5701 	EVENT_PTR(mem_ld_hsw),
5702 	EVENT_PTR(mem_st_hsw),
5703 	NULL,
5704 };
5705 
5706 static struct attribute *hsw_tsx_events_attrs[] = {
5707 	EVENT_PTR(tx_start),
5708 	EVENT_PTR(tx_commit),
5709 	EVENT_PTR(tx_abort),
5710 	EVENT_PTR(tx_capacity),
5711 	EVENT_PTR(tx_conflict),
5712 	EVENT_PTR(el_start),
5713 	EVENT_PTR(el_commit),
5714 	EVENT_PTR(el_abort),
5715 	EVENT_PTR(el_capacity),
5716 	EVENT_PTR(el_conflict),
5717 	EVENT_PTR(cycles_t),
5718 	EVENT_PTR(cycles_ct),
5719 	NULL
5720 };
5721 
5722 EVENT_ATTR_STR(tx-capacity-read,  tx_capacity_read,  "event=0x54,umask=0x80");
5723 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
5724 EVENT_ATTR_STR(el-capacity-read,  el_capacity_read,  "event=0x54,umask=0x80");
5725 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
5726 
5727 static struct attribute *icl_events_attrs[] = {
5728 	EVENT_PTR(mem_ld_hsw),
5729 	EVENT_PTR(mem_st_hsw),
5730 	NULL,
5731 };
5732 
5733 static struct attribute *icl_td_events_attrs[] = {
5734 	EVENT_PTR(slots),
5735 	EVENT_PTR(td_retiring),
5736 	EVENT_PTR(td_bad_spec),
5737 	EVENT_PTR(td_fe_bound),
5738 	EVENT_PTR(td_be_bound),
5739 	NULL,
5740 };
5741 
5742 static struct attribute *icl_tsx_events_attrs[] = {
5743 	EVENT_PTR(tx_start),
5744 	EVENT_PTR(tx_abort),
5745 	EVENT_PTR(tx_commit),
5746 	EVENT_PTR(tx_capacity_read),
5747 	EVENT_PTR(tx_capacity_write),
5748 	EVENT_PTR(tx_conflict),
5749 	EVENT_PTR(el_start),
5750 	EVENT_PTR(el_abort),
5751 	EVENT_PTR(el_commit),
5752 	EVENT_PTR(el_capacity_read),
5753 	EVENT_PTR(el_capacity_write),
5754 	EVENT_PTR(el_conflict),
5755 	EVENT_PTR(cycles_t),
5756 	EVENT_PTR(cycles_ct),
5757 	NULL,
5758 };
5759 
5760 
5761 EVENT_ATTR_STR(mem-stores,	mem_st_spr,	"event=0xcd,umask=0x2");
5762 EVENT_ATTR_STR(mem-loads-aux,	mem_ld_aux,	"event=0x03,umask=0x82");
5763 
5764 static struct attribute *glc_events_attrs[] = {
5765 	EVENT_PTR(mem_ld_hsw),
5766 	EVENT_PTR(mem_st_spr),
5767 	EVENT_PTR(mem_ld_aux),
5768 	NULL,
5769 };
5770 
5771 static struct attribute *glc_td_events_attrs[] = {
5772 	EVENT_PTR(slots),
5773 	EVENT_PTR(td_retiring),
5774 	EVENT_PTR(td_bad_spec),
5775 	EVENT_PTR(td_fe_bound),
5776 	EVENT_PTR(td_be_bound),
5777 	EVENT_PTR(td_heavy_ops),
5778 	EVENT_PTR(td_br_mispredict),
5779 	EVENT_PTR(td_fetch_lat),
5780 	EVENT_PTR(td_mem_bound),
5781 	NULL,
5782 };
5783 
5784 static struct attribute *glc_tsx_events_attrs[] = {
5785 	EVENT_PTR(tx_start),
5786 	EVENT_PTR(tx_abort),
5787 	EVENT_PTR(tx_commit),
5788 	EVENT_PTR(tx_capacity_read),
5789 	EVENT_PTR(tx_capacity_write),
5790 	EVENT_PTR(tx_conflict),
5791 	EVENT_PTR(cycles_t),
5792 	EVENT_PTR(cycles_ct),
5793 	NULL,
5794 };
5795 
freeze_on_smi_show(struct device * cdev,struct device_attribute * attr,char * buf)5796 static ssize_t freeze_on_smi_show(struct device *cdev,
5797 				  struct device_attribute *attr,
5798 				  char *buf)
5799 {
5800 	return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
5801 }
5802 
5803 static DEFINE_MUTEX(freeze_on_smi_mutex);
5804 
freeze_on_smi_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)5805 static ssize_t freeze_on_smi_store(struct device *cdev,
5806 				   struct device_attribute *attr,
5807 				   const char *buf, size_t count)
5808 {
5809 	unsigned long val;
5810 	ssize_t ret;
5811 
5812 	ret = kstrtoul(buf, 0, &val);
5813 	if (ret)
5814 		return ret;
5815 
5816 	if (val > 1)
5817 		return -EINVAL;
5818 
5819 	mutex_lock(&freeze_on_smi_mutex);
5820 
5821 	if (x86_pmu.attr_freeze_on_smi == val)
5822 		goto done;
5823 
5824 	x86_pmu.attr_freeze_on_smi = val;
5825 
5826 	cpus_read_lock();
5827 	on_each_cpu(flip_smm_bit, &val, 1);
5828 	cpus_read_unlock();
5829 done:
5830 	mutex_unlock(&freeze_on_smi_mutex);
5831 
5832 	return count;
5833 }
5834 
update_tfa_sched(void * ignored)5835 static void update_tfa_sched(void *ignored)
5836 {
5837 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5838 
5839 	/*
5840 	 * check if PMC3 is used
5841 	 * and if so force schedule out for all event types all contexts
5842 	 */
5843 	if (test_bit(3, cpuc->active_mask))
5844 		perf_pmu_resched(x86_get_pmu(smp_processor_id()));
5845 }
5846 
show_sysctl_tfa(struct device * cdev,struct device_attribute * attr,char * buf)5847 static ssize_t show_sysctl_tfa(struct device *cdev,
5848 			      struct device_attribute *attr,
5849 			      char *buf)
5850 {
5851 	return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
5852 }
5853 
set_sysctl_tfa(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)5854 static ssize_t set_sysctl_tfa(struct device *cdev,
5855 			      struct device_attribute *attr,
5856 			      const char *buf, size_t count)
5857 {
5858 	bool val;
5859 	ssize_t ret;
5860 
5861 	ret = kstrtobool(buf, &val);
5862 	if (ret)
5863 		return ret;
5864 
5865 	/* no change */
5866 	if (val == allow_tsx_force_abort)
5867 		return count;
5868 
5869 	allow_tsx_force_abort = val;
5870 
5871 	cpus_read_lock();
5872 	on_each_cpu(update_tfa_sched, NULL, 1);
5873 	cpus_read_unlock();
5874 
5875 	return count;
5876 }
5877 
5878 
5879 static DEVICE_ATTR_RW(freeze_on_smi);
5880 
branches_show(struct device * cdev,struct device_attribute * attr,char * buf)5881 static ssize_t branches_show(struct device *cdev,
5882 			     struct device_attribute *attr,
5883 			     char *buf)
5884 {
5885 	return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
5886 }
5887 
5888 static DEVICE_ATTR_RO(branches);
5889 
branch_counter_nr_show(struct device * cdev,struct device_attribute * attr,char * buf)5890 static ssize_t branch_counter_nr_show(struct device *cdev,
5891 				      struct device_attribute *attr,
5892 				      char *buf)
5893 {
5894 	return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters));
5895 }
5896 
5897 static DEVICE_ATTR_RO(branch_counter_nr);
5898 
branch_counter_width_show(struct device * cdev,struct device_attribute * attr,char * buf)5899 static ssize_t branch_counter_width_show(struct device *cdev,
5900 					 struct device_attribute *attr,
5901 					 char *buf)
5902 {
5903 	return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS);
5904 }
5905 
5906 static DEVICE_ATTR_RO(branch_counter_width);
5907 
5908 static struct attribute *lbr_attrs[] = {
5909 	&dev_attr_branches.attr,
5910 	&dev_attr_branch_counter_nr.attr,
5911 	&dev_attr_branch_counter_width.attr,
5912 	NULL
5913 };
5914 
5915 static umode_t
lbr_is_visible(struct kobject * kobj,struct attribute * attr,int i)5916 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5917 {
5918 	/* branches */
5919 	if (i == 0)
5920 		return x86_pmu.lbr_nr ? attr->mode : 0;
5921 
5922 	return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0;
5923 }
5924 
5925 static char pmu_name_str[30];
5926 
5927 static DEVICE_STRING_ATTR_RO(pmu_name, 0444, pmu_name_str);
5928 
5929 static struct attribute *intel_pmu_caps_attrs[] = {
5930 	&dev_attr_pmu_name.attr.attr,
5931 	NULL
5932 };
5933 
5934 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
5935 		   show_sysctl_tfa,
5936 		   set_sysctl_tfa);
5937 
5938 static struct attribute *intel_pmu_attrs[] = {
5939 	&dev_attr_freeze_on_smi.attr,
5940 	&dev_attr_allow_tsx_force_abort.attr,
5941 	NULL,
5942 };
5943 
5944 static umode_t
default_is_visible(struct kobject * kobj,struct attribute * attr,int i)5945 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5946 {
5947 	if (attr == &dev_attr_allow_tsx_force_abort.attr)
5948 		return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
5949 
5950 	return attr->mode;
5951 }
5952 
5953 static umode_t
tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)5954 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5955 {
5956 	return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
5957 }
5958 
5959 static umode_t
pebs_is_visible(struct kobject * kobj,struct attribute * attr,int i)5960 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5961 {
5962 	return x86_pmu.pebs ? attr->mode : 0;
5963 }
5964 
5965 static umode_t
mem_is_visible(struct kobject * kobj,struct attribute * attr,int i)5966 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5967 {
5968 	if (attr == &event_attr_mem_ld_aux.attr.attr)
5969 		return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0;
5970 
5971 	return pebs_is_visible(kobj, attr, i);
5972 }
5973 
5974 static umode_t
exra_is_visible(struct kobject * kobj,struct attribute * attr,int i)5975 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5976 {
5977 	return x86_pmu.version >= 2 ? attr->mode : 0;
5978 }
5979 
5980 static umode_t
td_is_visible(struct kobject * kobj,struct attribute * attr,int i)5981 td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5982 {
5983 	/*
5984 	 * Hide the perf metrics topdown events
5985 	 * if the feature is not enumerated.
5986 	 */
5987 	if (x86_pmu.num_topdown_events)
5988 		return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0;
5989 
5990 	return attr->mode;
5991 }
5992 
5993 static struct attribute_group group_events_td  = {
5994 	.name = "events",
5995 	.is_visible = td_is_visible,
5996 };
5997 
5998 static struct attribute_group group_events_mem = {
5999 	.name       = "events",
6000 	.is_visible = mem_is_visible,
6001 };
6002 
6003 static struct attribute_group group_events_tsx = {
6004 	.name       = "events",
6005 	.is_visible = tsx_is_visible,
6006 };
6007 
6008 static struct attribute_group group_caps_gen = {
6009 	.name  = "caps",
6010 	.attrs = intel_pmu_caps_attrs,
6011 };
6012 
6013 static struct attribute_group group_caps_lbr = {
6014 	.name       = "caps",
6015 	.attrs	    = lbr_attrs,
6016 	.is_visible = lbr_is_visible,
6017 };
6018 
6019 static struct attribute_group group_format_extra = {
6020 	.name       = "format",
6021 	.is_visible = exra_is_visible,
6022 };
6023 
6024 static struct attribute_group group_format_extra_skl = {
6025 	.name       = "format",
6026 	.is_visible = exra_is_visible,
6027 };
6028 
6029 static struct attribute_group group_format_evtsel_ext = {
6030 	.name       = "format",
6031 	.attrs      = format_evtsel_ext_attrs,
6032 	.is_visible = evtsel_ext_is_visible,
6033 };
6034 
6035 static struct attribute_group group_default = {
6036 	.attrs      = intel_pmu_attrs,
6037 	.is_visible = default_is_visible,
6038 };
6039 
6040 static const struct attribute_group *attr_update[] = {
6041 	&group_events_td,
6042 	&group_events_mem,
6043 	&group_events_tsx,
6044 	&group_caps_gen,
6045 	&group_caps_lbr,
6046 	&group_format_extra,
6047 	&group_format_extra_skl,
6048 	&group_format_evtsel_ext,
6049 	&group_default,
6050 	NULL,
6051 };
6052 
6053 EVENT_ATTR_STR_HYBRID(slots,                 slots_adl,        "event=0x00,umask=0x4",                       hybrid_big);
6054 EVENT_ATTR_STR_HYBRID(topdown-retiring,      td_retiring_adl,  "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
6055 EVENT_ATTR_STR_HYBRID(topdown-bad-spec,      td_bad_spec_adl,  "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
6056 EVENT_ATTR_STR_HYBRID(topdown-fe-bound,      td_fe_bound_adl,  "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
6057 EVENT_ATTR_STR_HYBRID(topdown-be-bound,      td_be_bound_adl,  "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
6058 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops,     td_heavy_ops_adl, "event=0x00,umask=0x84",                      hybrid_big);
6059 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl,    "event=0x00,umask=0x85",                      hybrid_big);
6060 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat,     td_fetch_lat_adl, "event=0x00,umask=0x86",                      hybrid_big);
6061 EVENT_ATTR_STR_HYBRID(topdown-mem-bound,     td_mem_bound_adl, "event=0x00,umask=0x87",                      hybrid_big);
6062 
6063 static struct attribute *adl_hybrid_events_attrs[] = {
6064 	EVENT_PTR(slots_adl),
6065 	EVENT_PTR(td_retiring_adl),
6066 	EVENT_PTR(td_bad_spec_adl),
6067 	EVENT_PTR(td_fe_bound_adl),
6068 	EVENT_PTR(td_be_bound_adl),
6069 	EVENT_PTR(td_heavy_ops_adl),
6070 	EVENT_PTR(td_br_mis_adl),
6071 	EVENT_PTR(td_fetch_lat_adl),
6072 	EVENT_PTR(td_mem_bound_adl),
6073 	NULL,
6074 };
6075 
6076 EVENT_ATTR_STR_HYBRID(topdown-retiring,      td_retiring_lnl,  "event=0xc2,umask=0x02;event=0x00,umask=0x80", hybrid_big_small);
6077 EVENT_ATTR_STR_HYBRID(topdown-fe-bound,      td_fe_bound_lnl,  "event=0x9c,umask=0x01;event=0x00,umask=0x82", hybrid_big_small);
6078 EVENT_ATTR_STR_HYBRID(topdown-be-bound,      td_be_bound_lnl,  "event=0xa4,umask=0x02;event=0x00,umask=0x83", hybrid_big_small);
6079 
6080 static struct attribute *lnl_hybrid_events_attrs[] = {
6081 	EVENT_PTR(slots_adl),
6082 	EVENT_PTR(td_retiring_lnl),
6083 	EVENT_PTR(td_bad_spec_adl),
6084 	EVENT_PTR(td_fe_bound_lnl),
6085 	EVENT_PTR(td_be_bound_lnl),
6086 	EVENT_PTR(td_heavy_ops_adl),
6087 	EVENT_PTR(td_br_mis_adl),
6088 	EVENT_PTR(td_fetch_lat_adl),
6089 	EVENT_PTR(td_mem_bound_adl),
6090 	NULL
6091 };
6092 
6093 /* The event string must be in PMU IDX order. */
6094 EVENT_ATTR_STR_HYBRID(topdown-retiring,
6095 		      td_retiring_arl_h,
6096 		      "event=0xc2,umask=0x02;event=0x00,umask=0x80;event=0xc2,umask=0x0",
6097 		      hybrid_big_small_tiny);
6098 EVENT_ATTR_STR_HYBRID(topdown-bad-spec,
6099 		      td_bad_spec_arl_h,
6100 		      "event=0x73,umask=0x0;event=0x00,umask=0x81;event=0x73,umask=0x0",
6101 		      hybrid_big_small_tiny);
6102 EVENT_ATTR_STR_HYBRID(topdown-fe-bound,
6103 		      td_fe_bound_arl_h,
6104 		      "event=0x9c,umask=0x01;event=0x00,umask=0x82;event=0x71,umask=0x0",
6105 		      hybrid_big_small_tiny);
6106 EVENT_ATTR_STR_HYBRID(topdown-be-bound,
6107 		      td_be_bound_arl_h,
6108 		      "event=0xa4,umask=0x02;event=0x00,umask=0x83;event=0x74,umask=0x0",
6109 		      hybrid_big_small_tiny);
6110 
6111 static struct attribute *arl_h_hybrid_events_attrs[] = {
6112 	EVENT_PTR(slots_adl),
6113 	EVENT_PTR(td_retiring_arl_h),
6114 	EVENT_PTR(td_bad_spec_arl_h),
6115 	EVENT_PTR(td_fe_bound_arl_h),
6116 	EVENT_PTR(td_be_bound_arl_h),
6117 	EVENT_PTR(td_heavy_ops_adl),
6118 	EVENT_PTR(td_br_mis_adl),
6119 	EVENT_PTR(td_fetch_lat_adl),
6120 	EVENT_PTR(td_mem_bound_adl),
6121 	NULL,
6122 };
6123 
6124 /* Must be in IDX order */
6125 EVENT_ATTR_STR_HYBRID(mem-loads,     mem_ld_adl,     "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
6126 EVENT_ATTR_STR_HYBRID(mem-stores,    mem_st_adl,     "event=0xd0,umask=0x6;event=0xcd,umask=0x2",                 hybrid_big_small);
6127 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82",                                     hybrid_big);
6128 
6129 static struct attribute *adl_hybrid_mem_attrs[] = {
6130 	EVENT_PTR(mem_ld_adl),
6131 	EVENT_PTR(mem_st_adl),
6132 	EVENT_PTR(mem_ld_aux_adl),
6133 	NULL,
6134 };
6135 
6136 static struct attribute *mtl_hybrid_mem_attrs[] = {
6137 	EVENT_PTR(mem_ld_adl),
6138 	EVENT_PTR(mem_st_adl),
6139 	NULL
6140 };
6141 
6142 EVENT_ATTR_STR_HYBRID(mem-loads,
6143 		      mem_ld_arl_h,
6144 		      "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3;event=0xd0,umask=0x5,ldlat=3",
6145 		      hybrid_big_small_tiny);
6146 EVENT_ATTR_STR_HYBRID(mem-stores,
6147 		      mem_st_arl_h,
6148 		      "event=0xd0,umask=0x6;event=0xcd,umask=0x2;event=0xd0,umask=0x6",
6149 		      hybrid_big_small_tiny);
6150 
6151 static struct attribute *arl_h_hybrid_mem_attrs[] = {
6152 	EVENT_PTR(mem_ld_arl_h),
6153 	EVENT_PTR(mem_st_arl_h),
6154 	NULL,
6155 };
6156 
6157 EVENT_ATTR_STR_HYBRID(tx-start,          tx_start_adl,          "event=0xc9,umask=0x1",          hybrid_big);
6158 EVENT_ATTR_STR_HYBRID(tx-commit,         tx_commit_adl,         "event=0xc9,umask=0x2",          hybrid_big);
6159 EVENT_ATTR_STR_HYBRID(tx-abort,          tx_abort_adl,          "event=0xc9,umask=0x4",          hybrid_big);
6160 EVENT_ATTR_STR_HYBRID(tx-conflict,       tx_conflict_adl,       "event=0x54,umask=0x1",          hybrid_big);
6161 EVENT_ATTR_STR_HYBRID(cycles-t,          cycles_t_adl,          "event=0x3c,in_tx=1",            hybrid_big);
6162 EVENT_ATTR_STR_HYBRID(cycles-ct,         cycles_ct_adl,         "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
6163 EVENT_ATTR_STR_HYBRID(tx-capacity-read,  tx_capacity_read_adl,  "event=0x54,umask=0x80",         hybrid_big);
6164 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2",          hybrid_big);
6165 
6166 static struct attribute *adl_hybrid_tsx_attrs[] = {
6167 	EVENT_PTR(tx_start_adl),
6168 	EVENT_PTR(tx_abort_adl),
6169 	EVENT_PTR(tx_commit_adl),
6170 	EVENT_PTR(tx_capacity_read_adl),
6171 	EVENT_PTR(tx_capacity_write_adl),
6172 	EVENT_PTR(tx_conflict_adl),
6173 	EVENT_PTR(cycles_t_adl),
6174 	EVENT_PTR(cycles_ct_adl),
6175 	NULL,
6176 };
6177 
6178 FORMAT_ATTR_HYBRID(in_tx,       hybrid_big);
6179 FORMAT_ATTR_HYBRID(in_tx_cp,    hybrid_big);
6180 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small_tiny);
6181 FORMAT_ATTR_HYBRID(ldlat,       hybrid_big_small_tiny);
6182 FORMAT_ATTR_HYBRID(frontend,    hybrid_big);
6183 
6184 #define ADL_HYBRID_RTM_FORMAT_ATTR	\
6185 	FORMAT_HYBRID_PTR(in_tx),	\
6186 	FORMAT_HYBRID_PTR(in_tx_cp)
6187 
6188 #define ADL_HYBRID_FORMAT_ATTR		\
6189 	FORMAT_HYBRID_PTR(offcore_rsp),	\
6190 	FORMAT_HYBRID_PTR(ldlat),	\
6191 	FORMAT_HYBRID_PTR(frontend)
6192 
6193 static struct attribute *adl_hybrid_extra_attr_rtm[] = {
6194 	ADL_HYBRID_RTM_FORMAT_ATTR,
6195 	ADL_HYBRID_FORMAT_ATTR,
6196 	NULL
6197 };
6198 
6199 static struct attribute *adl_hybrid_extra_attr[] = {
6200 	ADL_HYBRID_FORMAT_ATTR,
6201 	NULL
6202 };
6203 
6204 FORMAT_ATTR_HYBRID(snoop_rsp,	hybrid_small_tiny);
6205 
6206 static struct attribute *mtl_hybrid_extra_attr_rtm[] = {
6207 	ADL_HYBRID_RTM_FORMAT_ATTR,
6208 	ADL_HYBRID_FORMAT_ATTR,
6209 	FORMAT_HYBRID_PTR(snoop_rsp),
6210 	NULL
6211 };
6212 
6213 static struct attribute *mtl_hybrid_extra_attr[] = {
6214 	ADL_HYBRID_FORMAT_ATTR,
6215 	FORMAT_HYBRID_PTR(snoop_rsp),
6216 	NULL
6217 };
6218 
is_attr_for_this_pmu(struct kobject * kobj,struct attribute * attr)6219 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
6220 {
6221 	struct device *dev = kobj_to_dev(kobj);
6222 	struct x86_hybrid_pmu *pmu =
6223 		container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6224 	struct perf_pmu_events_hybrid_attr *pmu_attr =
6225 		container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
6226 
6227 	return pmu->pmu_type & pmu_attr->pmu_type;
6228 }
6229 
hybrid_events_is_visible(struct kobject * kobj,struct attribute * attr,int i)6230 static umode_t hybrid_events_is_visible(struct kobject *kobj,
6231 					struct attribute *attr, int i)
6232 {
6233 	return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
6234 }
6235 
hybrid_find_supported_cpu(struct x86_hybrid_pmu * pmu)6236 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
6237 {
6238 	int cpu = cpumask_first(&pmu->supported_cpus);
6239 
6240 	return (cpu >= nr_cpu_ids) ? -1 : cpu;
6241 }
6242 
hybrid_tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)6243 static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
6244 				     struct attribute *attr, int i)
6245 {
6246 	struct device *dev = kobj_to_dev(kobj);
6247 	struct x86_hybrid_pmu *pmu =
6248 		 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6249 	int cpu = hybrid_find_supported_cpu(pmu);
6250 
6251 	return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
6252 }
6253 
hybrid_format_is_visible(struct kobject * kobj,struct attribute * attr,int i)6254 static umode_t hybrid_format_is_visible(struct kobject *kobj,
6255 					struct attribute *attr, int i)
6256 {
6257 	struct device *dev = kobj_to_dev(kobj);
6258 	struct x86_hybrid_pmu *pmu =
6259 		container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6260 	struct perf_pmu_format_hybrid_attr *pmu_attr =
6261 		container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
6262 	int cpu = hybrid_find_supported_cpu(pmu);
6263 
6264 	return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0;
6265 }
6266 
hybrid_td_is_visible(struct kobject * kobj,struct attribute * attr,int i)6267 static umode_t hybrid_td_is_visible(struct kobject *kobj,
6268 				    struct attribute *attr, int i)
6269 {
6270 	struct device *dev = kobj_to_dev(kobj);
6271 	struct x86_hybrid_pmu *pmu =
6272 		 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6273 
6274 	if (!is_attr_for_this_pmu(kobj, attr))
6275 		return 0;
6276 
6277 
6278 	/* Only the big core supports perf metrics */
6279 	if (pmu->pmu_type == hybrid_big)
6280 		return pmu->intel_cap.perf_metrics ? attr->mode : 0;
6281 
6282 	return attr->mode;
6283 }
6284 
6285 static struct attribute_group hybrid_group_events_td  = {
6286 	.name		= "events",
6287 	.is_visible	= hybrid_td_is_visible,
6288 };
6289 
6290 static struct attribute_group hybrid_group_events_mem = {
6291 	.name		= "events",
6292 	.is_visible	= hybrid_events_is_visible,
6293 };
6294 
6295 static struct attribute_group hybrid_group_events_tsx = {
6296 	.name		= "events",
6297 	.is_visible	= hybrid_tsx_is_visible,
6298 };
6299 
6300 static struct attribute_group hybrid_group_format_extra = {
6301 	.name		= "format",
6302 	.is_visible	= hybrid_format_is_visible,
6303 };
6304 
intel_hybrid_get_attr_cpus(struct device * dev,struct device_attribute * attr,char * buf)6305 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
6306 					  struct device_attribute *attr,
6307 					  char *buf)
6308 {
6309 	struct x86_hybrid_pmu *pmu =
6310 		container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6311 
6312 	return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
6313 }
6314 
6315 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
6316 static struct attribute *intel_hybrid_cpus_attrs[] = {
6317 	&dev_attr_cpus.attr,
6318 	NULL,
6319 };
6320 
6321 static struct attribute_group hybrid_group_cpus = {
6322 	.attrs		= intel_hybrid_cpus_attrs,
6323 };
6324 
6325 static const struct attribute_group *hybrid_attr_update[] = {
6326 	&hybrid_group_events_td,
6327 	&hybrid_group_events_mem,
6328 	&hybrid_group_events_tsx,
6329 	&group_caps_gen,
6330 	&group_caps_lbr,
6331 	&hybrid_group_format_extra,
6332 	&group_format_evtsel_ext,
6333 	&group_default,
6334 	&hybrid_group_cpus,
6335 	NULL,
6336 };
6337 
6338 static struct attribute *empty_attrs;
6339 
intel_pmu_check_event_constraints(struct event_constraint * event_constraints,u64 cntr_mask,u64 fixed_cntr_mask,u64 intel_ctrl)6340 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
6341 					      u64 cntr_mask,
6342 					      u64 fixed_cntr_mask,
6343 					      u64 intel_ctrl)
6344 {
6345 	struct event_constraint *c;
6346 
6347 	if (!event_constraints)
6348 		return;
6349 
6350 	/*
6351 	 * event on fixed counter2 (REF_CYCLES) only works on this
6352 	 * counter, so do not extend mask to generic counters
6353 	 */
6354 	for_each_event_constraint(c, event_constraints) {
6355 		/*
6356 		 * Don't extend the topdown slots and metrics
6357 		 * events to the generic counters.
6358 		 */
6359 		if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
6360 			/*
6361 			 * Disable topdown slots and metrics events,
6362 			 * if slots event is not in CPUID.
6363 			 */
6364 			if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
6365 				c->idxmsk64 = 0;
6366 			c->weight = hweight64(c->idxmsk64);
6367 			continue;
6368 		}
6369 
6370 		if (c->cmask == FIXED_EVENT_FLAGS) {
6371 			/* Disabled fixed counters which are not in CPUID */
6372 			c->idxmsk64 &= intel_ctrl;
6373 
6374 			/*
6375 			 * Don't extend the pseudo-encoding to the
6376 			 * generic counters
6377 			 */
6378 			if (!use_fixed_pseudo_encoding(c->code))
6379 				c->idxmsk64 |= cntr_mask;
6380 		}
6381 		c->idxmsk64 &= cntr_mask | (fixed_cntr_mask << INTEL_PMC_IDX_FIXED);
6382 		c->weight = hweight64(c->idxmsk64);
6383 	}
6384 }
6385 
intel_pmu_check_extra_regs(struct extra_reg * extra_regs)6386 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
6387 {
6388 	struct extra_reg *er;
6389 
6390 	/*
6391 	 * Access extra MSR may cause #GP under certain circumstances.
6392 	 * E.g. KVM doesn't support offcore event
6393 	 * Check all extra_regs here.
6394 	 */
6395 	if (!extra_regs)
6396 		return;
6397 
6398 	for (er = extra_regs; er->msr; er++) {
6399 		er->extra_msr_access = check_msr(er->msr, 0x11UL);
6400 		/* Disable LBR select mapping */
6401 		if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
6402 			x86_pmu.lbr_sel_map = NULL;
6403 	}
6404 }
6405 
intel_pmu_v6_addr_offset(int index,bool eventsel)6406 static inline int intel_pmu_v6_addr_offset(int index, bool eventsel)
6407 {
6408 	return MSR_IA32_PMC_V6_STEP * index;
6409 }
6410 
6411 static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = {
6412 	{ hybrid_small,	"cpu_atom" },
6413 	{ hybrid_big,	"cpu_core" },
6414 	{ hybrid_tiny,	"cpu_lowpower" },
6415 };
6416 
intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)6417 static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
6418 {
6419 	unsigned long pmus_mask = pmus;
6420 	struct x86_hybrid_pmu *pmu;
6421 	int idx = 0, bit;
6422 
6423 	x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask);
6424 	x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus,
6425 				     sizeof(struct x86_hybrid_pmu),
6426 				     GFP_KERNEL);
6427 	if (!x86_pmu.hybrid_pmu)
6428 		return -ENOMEM;
6429 
6430 	static_branch_enable(&perf_is_hybrid);
6431 	x86_pmu.filter = intel_pmu_filter;
6432 
6433 	for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) {
6434 		pmu = &x86_pmu.hybrid_pmu[idx++];
6435 		pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id;
6436 		pmu->name = intel_hybrid_pmu_type_map[bit].name;
6437 
6438 		pmu->cntr_mask64 = x86_pmu.cntr_mask64;
6439 		pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
6440 		pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
6441 		pmu->config_mask = X86_RAW_EVENT_MASK;
6442 		pmu->unconstrained = (struct event_constraint)
6443 				     __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
6444 							0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
6445 
6446 		pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6447 		if (pmu->pmu_type & hybrid_small_tiny) {
6448 			pmu->intel_cap.perf_metrics = 0;
6449 			pmu->mid_ack = true;
6450 		} else if (pmu->pmu_type & hybrid_big) {
6451 			pmu->intel_cap.perf_metrics = 1;
6452 			pmu->late_ack = true;
6453 		}
6454 	}
6455 
6456 	return 0;
6457 }
6458 
intel_pmu_ref_cycles_ext(void)6459 static __always_inline void intel_pmu_ref_cycles_ext(void)
6460 {
6461 	if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED)))
6462 		intel_perfmon_event_map[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x013c;
6463 }
6464 
intel_pmu_init_glc(struct pmu * pmu)6465 static __always_inline void intel_pmu_init_glc(struct pmu *pmu)
6466 {
6467 	x86_pmu.late_ack = true;
6468 	x86_pmu.limit_period = glc_limit_period;
6469 	x86_pmu.pebs_aliases = NULL;
6470 	x86_pmu.pebs_prec_dist = true;
6471 	x86_pmu.pebs_block = true;
6472 	x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6473 	x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6474 	x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6475 	x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6476 	x86_pmu.lbr_pt_coexist = true;
6477 	x86_pmu.num_topdown_events = 8;
6478 	static_call_update(intel_pmu_update_topdown_event,
6479 			   &icl_update_topdown_event);
6480 	static_call_update(intel_pmu_set_topdown_event_period,
6481 			   &icl_set_topdown_event_period);
6482 
6483 	memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6484 	memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6485 	hybrid(pmu, event_constraints) = intel_glc_event_constraints;
6486 	hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints;
6487 
6488 	intel_pmu_ref_cycles_ext();
6489 }
6490 
intel_pmu_init_grt(struct pmu * pmu)6491 static __always_inline void intel_pmu_init_grt(struct pmu *pmu)
6492 {
6493 	x86_pmu.mid_ack = true;
6494 	x86_pmu.limit_period = glc_limit_period;
6495 	x86_pmu.pebs_aliases = NULL;
6496 	x86_pmu.pebs_prec_dist = true;
6497 	x86_pmu.pebs_block = true;
6498 	x86_pmu.lbr_pt_coexist = true;
6499 	x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6500 	x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6501 
6502 	memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6503 	memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6504 	hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6505 	hybrid(pmu, event_constraints) = intel_grt_event_constraints;
6506 	hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints;
6507 	hybrid(pmu, extra_regs) = intel_grt_extra_regs;
6508 
6509 	intel_pmu_ref_cycles_ext();
6510 }
6511 
intel_pmu_init_lnc(struct pmu * pmu)6512 static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
6513 {
6514 	intel_pmu_init_glc(pmu);
6515 	hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
6516 	hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
6517 	hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
6518 }
6519 
intel_pmu_init_skt(struct pmu * pmu)6520 static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
6521 {
6522 	intel_pmu_init_grt(pmu);
6523 	hybrid(pmu, event_constraints) = intel_skt_event_constraints;
6524 	hybrid(pmu, extra_regs) = intel_cmt_extra_regs;
6525 }
6526 
intel_pmu_init(void)6527 __init int intel_pmu_init(void)
6528 {
6529 	struct attribute **extra_skl_attr = &empty_attrs;
6530 	struct attribute **extra_attr = &empty_attrs;
6531 	struct attribute **td_attr    = &empty_attrs;
6532 	struct attribute **mem_attr   = &empty_attrs;
6533 	struct attribute **tsx_attr   = &empty_attrs;
6534 	union cpuid10_edx edx;
6535 	union cpuid10_eax eax;
6536 	union cpuid10_ebx ebx;
6537 	unsigned int fixed_mask;
6538 	bool pmem = false;
6539 	int version, i;
6540 	char *name;
6541 	struct x86_hybrid_pmu *pmu;
6542 
6543 	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
6544 		switch (boot_cpu_data.x86) {
6545 		case 0x6:
6546 			return p6_pmu_init();
6547 		case 0xb:
6548 			return knc_pmu_init();
6549 		case 0xf:
6550 			return p4_pmu_init();
6551 		}
6552 		return -ENODEV;
6553 	}
6554 
6555 	/*
6556 	 * Check whether the Architectural PerfMon supports
6557 	 * Branch Misses Retired hw_event or not.
6558 	 */
6559 	cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
6560 	if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
6561 		return -ENODEV;
6562 
6563 	version = eax.split.version_id;
6564 	if (version < 2)
6565 		x86_pmu = core_pmu;
6566 	else
6567 		x86_pmu = intel_pmu;
6568 
6569 	x86_pmu.version			= version;
6570 	x86_pmu.cntr_mask64		= GENMASK_ULL(eax.split.num_counters - 1, 0);
6571 	x86_pmu.cntval_bits		= eax.split.bit_width;
6572 	x86_pmu.cntval_mask		= (1ULL << eax.split.bit_width) - 1;
6573 
6574 	x86_pmu.events_maskl		= ebx.full;
6575 	x86_pmu.events_mask_len		= eax.split.mask_length;
6576 
6577 	x86_pmu.pebs_events_mask	= intel_pmu_pebs_mask(x86_pmu.cntr_mask64);
6578 	x86_pmu.pebs_capable		= PEBS_COUNTER_MASK;
6579 
6580 	/*
6581 	 * Quirk: v2 perfmon does not report fixed-purpose events, so
6582 	 * assume at least 3 events, when not running in a hypervisor:
6583 	 */
6584 	if (version > 1 && version < 5) {
6585 		int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
6586 
6587 		x86_pmu.fixed_cntr_mask64 =
6588 			GENMASK_ULL(max((int)edx.split.num_counters_fixed, assume) - 1, 0);
6589 	} else if (version >= 5)
6590 		x86_pmu.fixed_cntr_mask64 = fixed_mask;
6591 
6592 	if (boot_cpu_has(X86_FEATURE_PDCM)) {
6593 		u64 capabilities;
6594 
6595 		rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
6596 		x86_pmu.intel_cap.capabilities = capabilities;
6597 	}
6598 
6599 	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
6600 		x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
6601 		x86_pmu.lbr_read = intel_pmu_lbr_read_32;
6602 	}
6603 
6604 	if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
6605 		intel_pmu_arch_lbr_init();
6606 
6607 	intel_ds_init();
6608 
6609 	x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
6610 
6611 	if (version >= 5) {
6612 		x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
6613 		if (x86_pmu.intel_cap.anythread_deprecated)
6614 			pr_cont(" AnyThread deprecated, ");
6615 	}
6616 
6617 	/*
6618 	 * Install the hw-cache-events table:
6619 	 */
6620 	switch (boot_cpu_data.x86_vfm) {
6621 	case INTEL_CORE_YONAH:
6622 		pr_cont("Core events, ");
6623 		name = "core";
6624 		break;
6625 
6626 	case INTEL_CORE2_MEROM:
6627 		x86_add_quirk(intel_clovertown_quirk);
6628 		fallthrough;
6629 
6630 	case INTEL_CORE2_MEROM_L:
6631 	case INTEL_CORE2_PENRYN:
6632 	case INTEL_CORE2_DUNNINGTON:
6633 		memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
6634 		       sizeof(hw_cache_event_ids));
6635 
6636 		intel_pmu_lbr_init_core();
6637 
6638 		x86_pmu.event_constraints = intel_core2_event_constraints;
6639 		x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
6640 		pr_cont("Core2 events, ");
6641 		name = "core2";
6642 		break;
6643 
6644 	case INTEL_NEHALEM:
6645 	case INTEL_NEHALEM_EP:
6646 	case INTEL_NEHALEM_EX:
6647 		memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
6648 		       sizeof(hw_cache_event_ids));
6649 		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6650 		       sizeof(hw_cache_extra_regs));
6651 
6652 		intel_pmu_lbr_init_nhm();
6653 
6654 		x86_pmu.event_constraints = intel_nehalem_event_constraints;
6655 		x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
6656 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6657 		x86_pmu.extra_regs = intel_nehalem_extra_regs;
6658 		x86_pmu.limit_period = nhm_limit_period;
6659 
6660 		mem_attr = nhm_mem_events_attrs;
6661 
6662 		/* UOPS_ISSUED.STALLED_CYCLES */
6663 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6664 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6665 		/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6666 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6667 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6668 
6669 		intel_pmu_pebs_data_source_nhm();
6670 		x86_add_quirk(intel_nehalem_quirk);
6671 		x86_pmu.pebs_no_tlb = 1;
6672 		extra_attr = nhm_format_attr;
6673 
6674 		pr_cont("Nehalem events, ");
6675 		name = "nehalem";
6676 		break;
6677 
6678 	case INTEL_ATOM_BONNELL:
6679 	case INTEL_ATOM_BONNELL_MID:
6680 	case INTEL_ATOM_SALTWELL:
6681 	case INTEL_ATOM_SALTWELL_MID:
6682 	case INTEL_ATOM_SALTWELL_TABLET:
6683 		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
6684 		       sizeof(hw_cache_event_ids));
6685 
6686 		intel_pmu_lbr_init_atom();
6687 
6688 		x86_pmu.event_constraints = intel_gen_event_constraints;
6689 		x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
6690 		x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
6691 		pr_cont("Atom events, ");
6692 		name = "bonnell";
6693 		break;
6694 
6695 	case INTEL_ATOM_SILVERMONT:
6696 	case INTEL_ATOM_SILVERMONT_D:
6697 	case INTEL_ATOM_SILVERMONT_MID:
6698 	case INTEL_ATOM_AIRMONT:
6699 	case INTEL_ATOM_AIRMONT_MID:
6700 		memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
6701 			sizeof(hw_cache_event_ids));
6702 		memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
6703 		       sizeof(hw_cache_extra_regs));
6704 
6705 		intel_pmu_lbr_init_slm();
6706 
6707 		x86_pmu.event_constraints = intel_slm_event_constraints;
6708 		x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6709 		x86_pmu.extra_regs = intel_slm_extra_regs;
6710 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6711 		td_attr = slm_events_attrs;
6712 		extra_attr = slm_format_attr;
6713 		pr_cont("Silvermont events, ");
6714 		name = "silvermont";
6715 		break;
6716 
6717 	case INTEL_ATOM_GOLDMONT:
6718 	case INTEL_ATOM_GOLDMONT_D:
6719 		memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
6720 		       sizeof(hw_cache_event_ids));
6721 		memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
6722 		       sizeof(hw_cache_extra_regs));
6723 
6724 		intel_pmu_lbr_init_skl();
6725 
6726 		x86_pmu.event_constraints = intel_slm_event_constraints;
6727 		x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
6728 		x86_pmu.extra_regs = intel_glm_extra_regs;
6729 		/*
6730 		 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6731 		 * for precise cycles.
6732 		 * :pp is identical to :ppp
6733 		 */
6734 		x86_pmu.pebs_aliases = NULL;
6735 		x86_pmu.pebs_prec_dist = true;
6736 		x86_pmu.lbr_pt_coexist = true;
6737 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6738 		td_attr = glm_events_attrs;
6739 		extra_attr = slm_format_attr;
6740 		pr_cont("Goldmont events, ");
6741 		name = "goldmont";
6742 		break;
6743 
6744 	case INTEL_ATOM_GOLDMONT_PLUS:
6745 		memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6746 		       sizeof(hw_cache_event_ids));
6747 		memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
6748 		       sizeof(hw_cache_extra_regs));
6749 
6750 		intel_pmu_lbr_init_skl();
6751 
6752 		x86_pmu.event_constraints = intel_slm_event_constraints;
6753 		x86_pmu.extra_regs = intel_glm_extra_regs;
6754 		/*
6755 		 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6756 		 * for precise cycles.
6757 		 */
6758 		x86_pmu.pebs_aliases = NULL;
6759 		x86_pmu.pebs_prec_dist = true;
6760 		x86_pmu.lbr_pt_coexist = true;
6761 		x86_pmu.pebs_capable = ~0ULL;
6762 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6763 		x86_pmu.flags |= PMU_FL_PEBS_ALL;
6764 		x86_pmu.get_event_constraints = glp_get_event_constraints;
6765 		td_attr = glm_events_attrs;
6766 		/* Goldmont Plus has 4-wide pipeline */
6767 		event_attr_td_total_slots_scale_glm.event_str = "4";
6768 		extra_attr = slm_format_attr;
6769 		pr_cont("Goldmont plus events, ");
6770 		name = "goldmont_plus";
6771 		break;
6772 
6773 	case INTEL_ATOM_TREMONT_D:
6774 	case INTEL_ATOM_TREMONT:
6775 	case INTEL_ATOM_TREMONT_L:
6776 		x86_pmu.late_ack = true;
6777 		memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6778 		       sizeof(hw_cache_event_ids));
6779 		memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
6780 		       sizeof(hw_cache_extra_regs));
6781 		hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6782 
6783 		intel_pmu_lbr_init_skl();
6784 
6785 		x86_pmu.event_constraints = intel_slm_event_constraints;
6786 		x86_pmu.extra_regs = intel_tnt_extra_regs;
6787 		/*
6788 		 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6789 		 * for precise cycles.
6790 		 */
6791 		x86_pmu.pebs_aliases = NULL;
6792 		x86_pmu.pebs_prec_dist = true;
6793 		x86_pmu.lbr_pt_coexist = true;
6794 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6795 		x86_pmu.get_event_constraints = tnt_get_event_constraints;
6796 		td_attr = tnt_events_attrs;
6797 		extra_attr = slm_format_attr;
6798 		pr_cont("Tremont events, ");
6799 		name = "Tremont";
6800 		break;
6801 
6802 	case INTEL_ATOM_GRACEMONT:
6803 		intel_pmu_init_grt(NULL);
6804 		intel_pmu_pebs_data_source_grt();
6805 		x86_pmu.pebs_latency_data = grt_latency_data;
6806 		x86_pmu.get_event_constraints = tnt_get_event_constraints;
6807 		td_attr = tnt_events_attrs;
6808 		mem_attr = grt_mem_attrs;
6809 		extra_attr = nhm_format_attr;
6810 		pr_cont("Gracemont events, ");
6811 		name = "gracemont";
6812 		break;
6813 
6814 	case INTEL_ATOM_CRESTMONT:
6815 	case INTEL_ATOM_CRESTMONT_X:
6816 		intel_pmu_init_grt(NULL);
6817 		x86_pmu.extra_regs = intel_cmt_extra_regs;
6818 		intel_pmu_pebs_data_source_cmt();
6819 		x86_pmu.pebs_latency_data = cmt_latency_data;
6820 		x86_pmu.get_event_constraints = cmt_get_event_constraints;
6821 		td_attr = cmt_events_attrs;
6822 		mem_attr = grt_mem_attrs;
6823 		extra_attr = cmt_format_attr;
6824 		pr_cont("Crestmont events, ");
6825 		name = "crestmont";
6826 		break;
6827 
6828 	case INTEL_WESTMERE:
6829 	case INTEL_WESTMERE_EP:
6830 	case INTEL_WESTMERE_EX:
6831 		memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
6832 		       sizeof(hw_cache_event_ids));
6833 		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6834 		       sizeof(hw_cache_extra_regs));
6835 
6836 		intel_pmu_lbr_init_nhm();
6837 
6838 		x86_pmu.event_constraints = intel_westmere_event_constraints;
6839 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6840 		x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
6841 		x86_pmu.extra_regs = intel_westmere_extra_regs;
6842 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6843 
6844 		mem_attr = nhm_mem_events_attrs;
6845 
6846 		/* UOPS_ISSUED.STALLED_CYCLES */
6847 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6848 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6849 		/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6850 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6851 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6852 
6853 		intel_pmu_pebs_data_source_nhm();
6854 		extra_attr = nhm_format_attr;
6855 		pr_cont("Westmere events, ");
6856 		name = "westmere";
6857 		break;
6858 
6859 	case INTEL_SANDYBRIDGE:
6860 	case INTEL_SANDYBRIDGE_X:
6861 		x86_add_quirk(intel_sandybridge_quirk);
6862 		x86_add_quirk(intel_ht_bug);
6863 		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6864 		       sizeof(hw_cache_event_ids));
6865 		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6866 		       sizeof(hw_cache_extra_regs));
6867 
6868 		intel_pmu_lbr_init_snb();
6869 
6870 		x86_pmu.event_constraints = intel_snb_event_constraints;
6871 		x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
6872 		x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
6873 		if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X)
6874 			x86_pmu.extra_regs = intel_snbep_extra_regs;
6875 		else
6876 			x86_pmu.extra_regs = intel_snb_extra_regs;
6877 
6878 
6879 		/* all extra regs are per-cpu when HT is on */
6880 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6881 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6882 
6883 		td_attr  = snb_events_attrs;
6884 		mem_attr = snb_mem_events_attrs;
6885 
6886 		/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6887 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6888 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6889 		/* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
6890 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6891 			X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
6892 
6893 		extra_attr = nhm_format_attr;
6894 
6895 		pr_cont("SandyBridge events, ");
6896 		name = "sandybridge";
6897 		break;
6898 
6899 	case INTEL_IVYBRIDGE:
6900 	case INTEL_IVYBRIDGE_X:
6901 		x86_add_quirk(intel_ht_bug);
6902 		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6903 		       sizeof(hw_cache_event_ids));
6904 		/* dTLB-load-misses on IVB is different than SNB */
6905 		hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
6906 
6907 		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6908 		       sizeof(hw_cache_extra_regs));
6909 
6910 		intel_pmu_lbr_init_snb();
6911 
6912 		x86_pmu.event_constraints = intel_ivb_event_constraints;
6913 		x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
6914 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6915 		x86_pmu.pebs_prec_dist = true;
6916 		if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X)
6917 			x86_pmu.extra_regs = intel_snbep_extra_regs;
6918 		else
6919 			x86_pmu.extra_regs = intel_snb_extra_regs;
6920 		/* all extra regs are per-cpu when HT is on */
6921 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6922 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6923 
6924 		td_attr  = snb_events_attrs;
6925 		mem_attr = snb_mem_events_attrs;
6926 
6927 		/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6928 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6929 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6930 
6931 		extra_attr = nhm_format_attr;
6932 
6933 		pr_cont("IvyBridge events, ");
6934 		name = "ivybridge";
6935 		break;
6936 
6937 
6938 	case INTEL_HASWELL:
6939 	case INTEL_HASWELL_X:
6940 	case INTEL_HASWELL_L:
6941 	case INTEL_HASWELL_G:
6942 		x86_add_quirk(intel_ht_bug);
6943 		x86_add_quirk(intel_pebs_isolation_quirk);
6944 		x86_pmu.late_ack = true;
6945 		memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6946 		memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6947 
6948 		intel_pmu_lbr_init_hsw();
6949 
6950 		x86_pmu.event_constraints = intel_hsw_event_constraints;
6951 		x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
6952 		x86_pmu.extra_regs = intel_snbep_extra_regs;
6953 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6954 		x86_pmu.pebs_prec_dist = true;
6955 		/* all extra regs are per-cpu when HT is on */
6956 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6957 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6958 
6959 		x86_pmu.hw_config = hsw_hw_config;
6960 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
6961 		x86_pmu.limit_period = hsw_limit_period;
6962 		x86_pmu.lbr_double_abort = true;
6963 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6964 			hsw_format_attr : nhm_format_attr;
6965 		td_attr  = hsw_events_attrs;
6966 		mem_attr = hsw_mem_events_attrs;
6967 		tsx_attr = hsw_tsx_events_attrs;
6968 		pr_cont("Haswell events, ");
6969 		name = "haswell";
6970 		break;
6971 
6972 	case INTEL_BROADWELL:
6973 	case INTEL_BROADWELL_D:
6974 	case INTEL_BROADWELL_G:
6975 	case INTEL_BROADWELL_X:
6976 		x86_add_quirk(intel_pebs_isolation_quirk);
6977 		x86_pmu.late_ack = true;
6978 		memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6979 		memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6980 
6981 		/* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
6982 		hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
6983 									 BDW_L3_MISS|HSW_SNOOP_DRAM;
6984 		hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
6985 									  HSW_SNOOP_DRAM;
6986 		hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
6987 									     BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6988 		hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
6989 									      BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6990 
6991 		intel_pmu_lbr_init_hsw();
6992 
6993 		x86_pmu.event_constraints = intel_bdw_event_constraints;
6994 		x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
6995 		x86_pmu.extra_regs = intel_snbep_extra_regs;
6996 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6997 		x86_pmu.pebs_prec_dist = true;
6998 		/* all extra regs are per-cpu when HT is on */
6999 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7000 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7001 
7002 		x86_pmu.hw_config = hsw_hw_config;
7003 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
7004 		x86_pmu.limit_period = bdw_limit_period;
7005 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7006 			hsw_format_attr : nhm_format_attr;
7007 		td_attr  = hsw_events_attrs;
7008 		mem_attr = hsw_mem_events_attrs;
7009 		tsx_attr = hsw_tsx_events_attrs;
7010 		pr_cont("Broadwell events, ");
7011 		name = "broadwell";
7012 		break;
7013 
7014 	case INTEL_XEON_PHI_KNL:
7015 	case INTEL_XEON_PHI_KNM:
7016 		memcpy(hw_cache_event_ids,
7017 		       slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7018 		memcpy(hw_cache_extra_regs,
7019 		       knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7020 		intel_pmu_lbr_init_knl();
7021 
7022 		x86_pmu.event_constraints = intel_slm_event_constraints;
7023 		x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
7024 		x86_pmu.extra_regs = intel_knl_extra_regs;
7025 
7026 		/* all extra regs are per-cpu when HT is on */
7027 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7028 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7029 		extra_attr = slm_format_attr;
7030 		pr_cont("Knights Landing/Mill events, ");
7031 		name = "knights-landing";
7032 		break;
7033 
7034 	case INTEL_SKYLAKE_X:
7035 		pmem = true;
7036 		fallthrough;
7037 	case INTEL_SKYLAKE_L:
7038 	case INTEL_SKYLAKE:
7039 	case INTEL_KABYLAKE_L:
7040 	case INTEL_KABYLAKE:
7041 	case INTEL_COMETLAKE_L:
7042 	case INTEL_COMETLAKE:
7043 		x86_add_quirk(intel_pebs_isolation_quirk);
7044 		x86_pmu.late_ack = true;
7045 		memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7046 		memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7047 		intel_pmu_lbr_init_skl();
7048 
7049 		/* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
7050 		event_attr_td_recovery_bubbles.event_str_noht =
7051 			"event=0xd,umask=0x1,cmask=1";
7052 		event_attr_td_recovery_bubbles.event_str_ht =
7053 			"event=0xd,umask=0x1,cmask=1,any=1";
7054 
7055 		x86_pmu.event_constraints = intel_skl_event_constraints;
7056 		x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
7057 		x86_pmu.extra_regs = intel_skl_extra_regs;
7058 		x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
7059 		x86_pmu.pebs_prec_dist = true;
7060 		/* all extra regs are per-cpu when HT is on */
7061 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7062 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7063 
7064 		x86_pmu.hw_config = hsw_hw_config;
7065 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
7066 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7067 			hsw_format_attr : nhm_format_attr;
7068 		extra_skl_attr = skl_format_attr;
7069 		td_attr  = hsw_events_attrs;
7070 		mem_attr = hsw_mem_events_attrs;
7071 		tsx_attr = hsw_tsx_events_attrs;
7072 		intel_pmu_pebs_data_source_skl(pmem);
7073 
7074 		/*
7075 		 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
7076 		 * TSX force abort hooks are not required on these systems. Only deploy
7077 		 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
7078 		 */
7079 		if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
7080 		   !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
7081 			x86_pmu.flags |= PMU_FL_TFA;
7082 			x86_pmu.get_event_constraints = tfa_get_event_constraints;
7083 			x86_pmu.enable_all = intel_tfa_pmu_enable_all;
7084 			x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
7085 		}
7086 
7087 		pr_cont("Skylake events, ");
7088 		name = "skylake";
7089 		break;
7090 
7091 	case INTEL_ICELAKE_X:
7092 	case INTEL_ICELAKE_D:
7093 		x86_pmu.pebs_ept = 1;
7094 		pmem = true;
7095 		fallthrough;
7096 	case INTEL_ICELAKE_L:
7097 	case INTEL_ICELAKE:
7098 	case INTEL_TIGERLAKE_L:
7099 	case INTEL_TIGERLAKE:
7100 	case INTEL_ROCKETLAKE:
7101 		x86_pmu.late_ack = true;
7102 		memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7103 		memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7104 		hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
7105 		intel_pmu_lbr_init_skl();
7106 
7107 		x86_pmu.event_constraints = intel_icl_event_constraints;
7108 		x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
7109 		x86_pmu.extra_regs = intel_icl_extra_regs;
7110 		x86_pmu.pebs_aliases = NULL;
7111 		x86_pmu.pebs_prec_dist = true;
7112 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7113 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7114 
7115 		x86_pmu.hw_config = hsw_hw_config;
7116 		x86_pmu.get_event_constraints = icl_get_event_constraints;
7117 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7118 			hsw_format_attr : nhm_format_attr;
7119 		extra_skl_attr = skl_format_attr;
7120 		mem_attr = icl_events_attrs;
7121 		td_attr = icl_td_events_attrs;
7122 		tsx_attr = icl_tsx_events_attrs;
7123 		x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
7124 		x86_pmu.lbr_pt_coexist = true;
7125 		intel_pmu_pebs_data_source_skl(pmem);
7126 		x86_pmu.num_topdown_events = 4;
7127 		static_call_update(intel_pmu_update_topdown_event,
7128 				   &icl_update_topdown_event);
7129 		static_call_update(intel_pmu_set_topdown_event_period,
7130 				   &icl_set_topdown_event_period);
7131 		pr_cont("Icelake events, ");
7132 		name = "icelake";
7133 		break;
7134 
7135 	case INTEL_SAPPHIRERAPIDS_X:
7136 	case INTEL_EMERALDRAPIDS_X:
7137 		x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
7138 		x86_pmu.extra_regs = intel_glc_extra_regs;
7139 		pr_cont("Sapphire Rapids events, ");
7140 		name = "sapphire_rapids";
7141 		goto glc_common;
7142 
7143 	case INTEL_GRANITERAPIDS_X:
7144 	case INTEL_GRANITERAPIDS_D:
7145 		x86_pmu.extra_regs = intel_rwc_extra_regs;
7146 		pr_cont("Granite Rapids events, ");
7147 		name = "granite_rapids";
7148 
7149 	glc_common:
7150 		intel_pmu_init_glc(NULL);
7151 		x86_pmu.pebs_ept = 1;
7152 		x86_pmu.hw_config = hsw_hw_config;
7153 		x86_pmu.get_event_constraints = glc_get_event_constraints;
7154 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7155 			hsw_format_attr : nhm_format_attr;
7156 		extra_skl_attr = skl_format_attr;
7157 		mem_attr = glc_events_attrs;
7158 		td_attr = glc_td_events_attrs;
7159 		tsx_attr = glc_tsx_events_attrs;
7160 		intel_pmu_pebs_data_source_skl(true);
7161 		break;
7162 
7163 	case INTEL_ALDERLAKE:
7164 	case INTEL_ALDERLAKE_L:
7165 	case INTEL_RAPTORLAKE:
7166 	case INTEL_RAPTORLAKE_P:
7167 	case INTEL_RAPTORLAKE_S:
7168 		/*
7169 		 * Alder Lake has 2 types of CPU, core and atom.
7170 		 *
7171 		 * Initialize the common PerfMon capabilities here.
7172 		 */
7173 		intel_pmu_init_hybrid(hybrid_big_small);
7174 
7175 		x86_pmu.pebs_latency_data = grt_latency_data;
7176 		x86_pmu.get_event_constraints = adl_get_event_constraints;
7177 		x86_pmu.hw_config = adl_hw_config;
7178 		x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
7179 
7180 		td_attr = adl_hybrid_events_attrs;
7181 		mem_attr = adl_hybrid_mem_attrs;
7182 		tsx_attr = adl_hybrid_tsx_attrs;
7183 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7184 			adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
7185 
7186 		/* Initialize big core specific PerfMon capabilities.*/
7187 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7188 		intel_pmu_init_glc(&pmu->pmu);
7189 		if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
7190 			pmu->cntr_mask64 <<= 2;
7191 			pmu->cntr_mask64 |= 0x3;
7192 			pmu->fixed_cntr_mask64 <<= 1;
7193 			pmu->fixed_cntr_mask64 |= 0x1;
7194 		} else {
7195 			pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7196 			pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7197 		}
7198 
7199 		/*
7200 		 * Quirk: For some Alder Lake machine, when all E-cores are disabled in
7201 		 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
7202 		 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
7203 		 * mistakenly add extra counters for P-cores. Correct the number of
7204 		 * counters here.
7205 		 */
7206 		if ((x86_pmu_num_counters(&pmu->pmu) > 8) || (x86_pmu_num_counters_fixed(&pmu->pmu) > 4)) {
7207 			pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7208 			pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7209 		}
7210 
7211 		pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
7212 		pmu->unconstrained = (struct event_constraint)
7213 				     __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
7214 				     0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
7215 
7216 		pmu->extra_regs = intel_glc_extra_regs;
7217 
7218 		/* Initialize Atom core specific PerfMon capabilities.*/
7219 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7220 		intel_pmu_init_grt(&pmu->pmu);
7221 
7222 		x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
7223 		intel_pmu_pebs_data_source_adl();
7224 		pr_cont("Alderlake Hybrid events, ");
7225 		name = "alderlake_hybrid";
7226 		break;
7227 
7228 	case INTEL_METEORLAKE:
7229 	case INTEL_METEORLAKE_L:
7230 	case INTEL_ARROWLAKE_U:
7231 		intel_pmu_init_hybrid(hybrid_big_small);
7232 
7233 		x86_pmu.pebs_latency_data = cmt_latency_data;
7234 		x86_pmu.get_event_constraints = mtl_get_event_constraints;
7235 		x86_pmu.hw_config = adl_hw_config;
7236 
7237 		td_attr = adl_hybrid_events_attrs;
7238 		mem_attr = mtl_hybrid_mem_attrs;
7239 		tsx_attr = adl_hybrid_tsx_attrs;
7240 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7241 			mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7242 
7243 		/* Initialize big core specific PerfMon capabilities.*/
7244 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7245 		intel_pmu_init_glc(&pmu->pmu);
7246 		pmu->extra_regs = intel_rwc_extra_regs;
7247 
7248 		/* Initialize Atom core specific PerfMon capabilities.*/
7249 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7250 		intel_pmu_init_grt(&pmu->pmu);
7251 		pmu->extra_regs = intel_cmt_extra_regs;
7252 
7253 		intel_pmu_pebs_data_source_mtl();
7254 		pr_cont("Meteorlake Hybrid events, ");
7255 		name = "meteorlake_hybrid";
7256 		break;
7257 
7258 	case INTEL_LUNARLAKE_M:
7259 	case INTEL_ARROWLAKE:
7260 		intel_pmu_init_hybrid(hybrid_big_small);
7261 
7262 		x86_pmu.pebs_latency_data = lnl_latency_data;
7263 		x86_pmu.get_event_constraints = mtl_get_event_constraints;
7264 		x86_pmu.hw_config = adl_hw_config;
7265 
7266 		td_attr = lnl_hybrid_events_attrs;
7267 		mem_attr = mtl_hybrid_mem_attrs;
7268 		tsx_attr = adl_hybrid_tsx_attrs;
7269 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7270 			mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7271 
7272 		/* Initialize big core specific PerfMon capabilities.*/
7273 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7274 		intel_pmu_init_lnc(&pmu->pmu);
7275 
7276 		/* Initialize Atom core specific PerfMon capabilities.*/
7277 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7278 		intel_pmu_init_skt(&pmu->pmu);
7279 
7280 		intel_pmu_pebs_data_source_lnl();
7281 		pr_cont("Lunarlake Hybrid events, ");
7282 		name = "lunarlake_hybrid";
7283 		break;
7284 
7285 	case INTEL_ARROWLAKE_H:
7286 		intel_pmu_init_hybrid(hybrid_big_small_tiny);
7287 
7288 		x86_pmu.pebs_latency_data = arl_h_latency_data;
7289 		x86_pmu.get_event_constraints = arl_h_get_event_constraints;
7290 		x86_pmu.hw_config = arl_h_hw_config;
7291 
7292 		td_attr = arl_h_hybrid_events_attrs;
7293 		mem_attr = arl_h_hybrid_mem_attrs;
7294 		tsx_attr = adl_hybrid_tsx_attrs;
7295 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7296 			mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7297 
7298 		/* Initialize big core specific PerfMon capabilities. */
7299 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7300 		intel_pmu_init_lnc(&pmu->pmu);
7301 
7302 		/* Initialize Atom core specific PerfMon capabilities. */
7303 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7304 		intel_pmu_init_skt(&pmu->pmu);
7305 
7306 		/* Initialize Lower Power Atom specific PerfMon capabilities. */
7307 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX];
7308 		intel_pmu_init_grt(&pmu->pmu);
7309 		pmu->extra_regs = intel_cmt_extra_regs;
7310 
7311 		intel_pmu_pebs_data_source_arl_h();
7312 		pr_cont("ArrowLake-H Hybrid events, ");
7313 		name = "arrowlake_h_hybrid";
7314 		break;
7315 
7316 	default:
7317 		switch (x86_pmu.version) {
7318 		case 1:
7319 			x86_pmu.event_constraints = intel_v1_event_constraints;
7320 			pr_cont("generic architected perfmon v1, ");
7321 			name = "generic_arch_v1";
7322 			break;
7323 		case 2:
7324 		case 3:
7325 		case 4:
7326 			/*
7327 			 * default constraints for v2 and up
7328 			 */
7329 			x86_pmu.event_constraints = intel_gen_event_constraints;
7330 			pr_cont("generic architected perfmon, ");
7331 			name = "generic_arch_v2+";
7332 			break;
7333 		default:
7334 			/*
7335 			 * The default constraints for v5 and up can support up to
7336 			 * 16 fixed counters. For the fixed counters 4 and later,
7337 			 * the pseudo-encoding is applied.
7338 			 * The constraints may be cut according to the CPUID enumeration
7339 			 * by inserting the EVENT_CONSTRAINT_END.
7340 			 */
7341 			if (fls64(x86_pmu.fixed_cntr_mask64) > INTEL_PMC_MAX_FIXED)
7342 				x86_pmu.fixed_cntr_mask64 &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
7343 			intel_v5_gen_event_constraints[fls64(x86_pmu.fixed_cntr_mask64)].weight = -1;
7344 			x86_pmu.event_constraints = intel_v5_gen_event_constraints;
7345 			pr_cont("generic architected perfmon, ");
7346 			name = "generic_arch_v5+";
7347 			break;
7348 		}
7349 	}
7350 
7351 	snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
7352 
7353 	if (!is_hybrid()) {
7354 		group_events_td.attrs  = td_attr;
7355 		group_events_mem.attrs = mem_attr;
7356 		group_events_tsx.attrs = tsx_attr;
7357 		group_format_extra.attrs = extra_attr;
7358 		group_format_extra_skl.attrs = extra_skl_attr;
7359 
7360 		x86_pmu.attr_update = attr_update;
7361 	} else {
7362 		hybrid_group_events_td.attrs  = td_attr;
7363 		hybrid_group_events_mem.attrs = mem_attr;
7364 		hybrid_group_events_tsx.attrs = tsx_attr;
7365 		hybrid_group_format_extra.attrs = extra_attr;
7366 
7367 		x86_pmu.attr_update = hybrid_attr_update;
7368 	}
7369 
7370 	intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64,
7371 				      &x86_pmu.fixed_cntr_mask64,
7372 				      &x86_pmu.intel_ctrl);
7373 
7374 	/* AnyThread may be deprecated on arch perfmon v5 or later */
7375 	if (x86_pmu.intel_cap.anythread_deprecated)
7376 		x86_pmu.format_attrs = intel_arch_formats_attr;
7377 
7378 	intel_pmu_check_event_constraints(x86_pmu.event_constraints,
7379 					  x86_pmu.cntr_mask64,
7380 					  x86_pmu.fixed_cntr_mask64,
7381 					  x86_pmu.intel_ctrl);
7382 	/*
7383 	 * Access LBR MSR may cause #GP under certain circumstances.
7384 	 * Check all LBR MSR here.
7385 	 * Disable LBR access if any LBR MSRs can not be accessed.
7386 	 */
7387 	if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
7388 		x86_pmu.lbr_nr = 0;
7389 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
7390 		if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
7391 		      check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
7392 			x86_pmu.lbr_nr = 0;
7393 	}
7394 
7395 	if (x86_pmu.lbr_nr) {
7396 		intel_pmu_lbr_init();
7397 
7398 		pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
7399 
7400 		/* only support branch_stack snapshot for perfmon >= v2 */
7401 		if (x86_pmu.disable_all == intel_pmu_disable_all) {
7402 			if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
7403 				static_call_update(perf_snapshot_branch_stack,
7404 						   intel_pmu_snapshot_arch_branch_stack);
7405 			} else {
7406 				static_call_update(perf_snapshot_branch_stack,
7407 						   intel_pmu_snapshot_branch_stack);
7408 			}
7409 		}
7410 	}
7411 
7412 	intel_pmu_check_extra_regs(x86_pmu.extra_regs);
7413 
7414 	/* Support full width counters using alternative MSR range */
7415 	if (x86_pmu.intel_cap.full_width_write) {
7416 		x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
7417 		x86_pmu.perfctr = MSR_IA32_PMC0;
7418 		pr_cont("full-width counters, ");
7419 	}
7420 
7421 	/* Support V6+ MSR Aliasing */
7422 	if (x86_pmu.version >= 6) {
7423 		x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR;
7424 		x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A;
7425 		x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR;
7426 		x86_pmu.addr_offset = intel_pmu_v6_addr_offset;
7427 	}
7428 
7429 	if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
7430 		x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
7431 
7432 	if (x86_pmu.intel_cap.pebs_timing_info)
7433 		x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
7434 
7435 	intel_aux_output_init();
7436 
7437 	return 0;
7438 }
7439 
7440 /*
7441  * HT bug: phase 2 init
7442  * Called once we have valid topology information to check
7443  * whether or not HT is enabled
7444  * If HT is off, then we disable the workaround
7445  */
fixup_ht_bug(void)7446 static __init int fixup_ht_bug(void)
7447 {
7448 	int c;
7449 	/*
7450 	 * problem not present on this CPU model, nothing to do
7451 	 */
7452 	if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
7453 		return 0;
7454 
7455 	if (topology_max_smt_threads() > 1) {
7456 		pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
7457 		return 0;
7458 	}
7459 
7460 	cpus_read_lock();
7461 
7462 	hardlockup_detector_perf_stop();
7463 
7464 	x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
7465 
7466 	x86_pmu.start_scheduling = NULL;
7467 	x86_pmu.commit_scheduling = NULL;
7468 	x86_pmu.stop_scheduling = NULL;
7469 
7470 	hardlockup_detector_perf_restart();
7471 
7472 	for_each_online_cpu(c)
7473 		free_excl_cntrs(&per_cpu(cpu_hw_events, c));
7474 
7475 	cpus_read_unlock();
7476 	pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
7477 	return 0;
7478 }
7479 subsys_initcall(fixup_ht_bug)
7480