xref: /linux/arch/x86/events/intel/core.c (revision cba09e3ed06db4b6c87bc97e0aea080421fb8f7d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Per core/cpu state
4  *
5  * Used to coordinate shared registers between HT threads or
6  * among events on a single PMU.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
17 #include <linux/kvm_host.h>
18 
19 #include <asm/cpufeature.h>
20 #include <asm/debugreg.h>
21 #include <asm/hardirq.h>
22 #include <asm/intel-family.h>
23 #include <asm/intel_pt.h>
24 #include <asm/apic.h>
25 #include <asm/cpu_device_id.h>
26 #include <asm/msr.h>
27 
28 #include "../perf_event.h"
29 
30 /*
31  * Intel PerfMon, used on Core and later.
32  */
33 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
34 {
35 	[PERF_COUNT_HW_CPU_CYCLES]		= 0x003c,
36 	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
37 	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0x4f2e,
38 	[PERF_COUNT_HW_CACHE_MISSES]		= 0x412e,
39 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
40 	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
41 	[PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
42 	[PERF_COUNT_HW_REF_CPU_CYCLES]		= 0x0300, /* pseudo-encoding */
43 };
44 
45 static struct event_constraint intel_core_event_constraints[] __read_mostly =
46 {
47 	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
48 	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
49 	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
50 	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
51 	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
52 	INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
53 	EVENT_CONSTRAINT_END
54 };
55 
56 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
57 {
58 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
59 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
60 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
61 	INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
62 	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
63 	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
64 	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
65 	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
66 	INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
67 	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
68 	INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
69 	INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
70 	INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
71 	EVENT_CONSTRAINT_END
72 };
73 
74 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
75 {
76 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
77 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
78 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
79 	INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
80 	INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
81 	INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
82 	INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
83 	INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
84 	INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
85 	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
86 	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
87 	EVENT_CONSTRAINT_END
88 };
89 
90 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
91 {
92 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
93 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
94 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
95 	EVENT_EXTRA_END
96 };
97 
98 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
99 {
100 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
101 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
102 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
103 	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
104 	INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
105 	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
106 	INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
107 	EVENT_CONSTRAINT_END
108 };
109 
110 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
111 {
112 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
113 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
114 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
115 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
116 	INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
117 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
118 	INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
119 	INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
120 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
121 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
122 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
123 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
124 
125 	/*
126 	 * When HT is off these events can only run on the bottom 4 counters
127 	 * When HT is on, they are impacted by the HT bug and require EXCL access
128 	 */
129 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
130 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
131 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
132 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
133 
134 	EVENT_CONSTRAINT_END
135 };
136 
137 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
138 {
139 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
140 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
141 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
142 	INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
143 	INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
144 	INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
145 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
146 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
147 	INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
148 	INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
149 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
150 	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
151 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
152 
153 	/*
154 	 * When HT is off these events can only run on the bottom 4 counters
155 	 * When HT is on, they are impacted by the HT bug and require EXCL access
156 	 */
157 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
158 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
159 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
160 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
161 
162 	EVENT_CONSTRAINT_END
163 };
164 
165 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
166 {
167 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
168 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
169 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
170 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
171 	EVENT_EXTRA_END
172 };
173 
174 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
175 {
176 	EVENT_CONSTRAINT_END
177 };
178 
179 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
180 {
181 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
182 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
183 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
184 	EVENT_CONSTRAINT_END
185 };
186 
187 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly =
188 {
189 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
190 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
191 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
192 	FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
193 	FIXED_EVENT_CONSTRAINT(0x0500, 4),
194 	FIXED_EVENT_CONSTRAINT(0x0600, 5),
195 	FIXED_EVENT_CONSTRAINT(0x0700, 6),
196 	FIXED_EVENT_CONSTRAINT(0x0800, 7),
197 	FIXED_EVENT_CONSTRAINT(0x0900, 8),
198 	FIXED_EVENT_CONSTRAINT(0x0a00, 9),
199 	FIXED_EVENT_CONSTRAINT(0x0b00, 10),
200 	FIXED_EVENT_CONSTRAINT(0x0c00, 11),
201 	FIXED_EVENT_CONSTRAINT(0x0d00, 12),
202 	FIXED_EVENT_CONSTRAINT(0x0e00, 13),
203 	FIXED_EVENT_CONSTRAINT(0x0f00, 14),
204 	FIXED_EVENT_CONSTRAINT(0x1000, 15),
205 	EVENT_CONSTRAINT_END
206 };
207 
208 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
209 {
210 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
211 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
212 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
213 	EVENT_CONSTRAINT_END
214 };
215 
216 static struct event_constraint intel_grt_event_constraints[] __read_mostly = {
217 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
218 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
219 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
220 	FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
221 	EVENT_CONSTRAINT_END
222 };
223 
224 static struct event_constraint intel_skt_event_constraints[] __read_mostly = {
225 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
226 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
227 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
228 	FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
229 	FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */
230 	FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */
231 	FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */
232 	EVENT_CONSTRAINT_END
233 };
234 
235 static struct event_constraint intel_skl_event_constraints[] = {
236 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
237 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
238 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
239 	INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),	/* INST_RETIRED.PREC_DIST */
240 
241 	/*
242 	 * when HT is off, these can only run on the bottom 4 counters
243 	 */
244 	INTEL_EVENT_CONSTRAINT(0xd0, 0xf),	/* MEM_INST_RETIRED.* */
245 	INTEL_EVENT_CONSTRAINT(0xd1, 0xf),	/* MEM_LOAD_RETIRED.* */
246 	INTEL_EVENT_CONSTRAINT(0xd2, 0xf),	/* MEM_LOAD_L3_HIT_RETIRED.* */
247 	INTEL_EVENT_CONSTRAINT(0xcd, 0xf),	/* MEM_TRANS_RETIRED.* */
248 	INTEL_EVENT_CONSTRAINT(0xc6, 0xf),	/* FRONTEND_RETIRED.* */
249 
250 	EVENT_CONSTRAINT_END
251 };
252 
253 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
254 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
255 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
256 	EVENT_EXTRA_END
257 };
258 
259 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
260 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
261 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
262 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
263 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
264 	EVENT_EXTRA_END
265 };
266 
267 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
268 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
269 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
270 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
271 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
272 	EVENT_EXTRA_END
273 };
274 
275 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
276 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
277 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
278 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
279 	/*
280 	 * Note the low 8 bits eventsel code is not a continuous field, containing
281 	 * some #GPing bits. These are masked out.
282 	 */
283 	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
284 	EVENT_EXTRA_END
285 };
286 
287 static struct event_constraint intel_icl_event_constraints[] = {
288 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
289 	FIXED_EVENT_CONSTRAINT(0x01c0, 0),	/* old INST_RETIRED.PREC_DIST */
290 	FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */
291 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
292 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
293 	FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */
294 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
295 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
296 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
297 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
298 	INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
299 	INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
300 	INTEL_EVENT_CONSTRAINT(0x32, 0xf),	/* SW_PREFETCH_ACCESS.* */
301 	INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
302 	INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
303 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_TOTAL */
304 	INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff),  /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
305 	INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
306 	INTEL_EVENT_CONSTRAINT(0xa3, 0xf),      /* CYCLE_ACTIVITY.* */
307 	INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
308 	INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
309 	INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
310 	INTEL_EVENT_CONSTRAINT(0xef, 0xf),
311 	INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
312 	EVENT_CONSTRAINT_END
313 };
314 
315 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
316 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
317 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
318 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
319 	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
320 	EVENT_EXTRA_END
321 };
322 
323 static struct extra_reg intel_glc_extra_regs[] __read_mostly = {
324 	INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
325 	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
326 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
327 	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
328 	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
329 	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
330 	EVENT_EXTRA_END
331 };
332 
333 static struct event_constraint intel_glc_event_constraints[] = {
334 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
335 	FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */
336 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
337 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
338 	FIXED_EVENT_CONSTRAINT(0x013c, 2),	/* CPU_CLK_UNHALTED.REF_TSC_P */
339 	FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */
340 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
341 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
342 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
343 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
344 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
345 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
346 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
347 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
348 
349 	INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
350 	INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
351 	/*
352 	 * Generally event codes < 0x90 are restricted to counters 0-3.
353 	 * The 0x2E and 0x3C are exception, which has no restriction.
354 	 */
355 	INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
356 
357 	INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
358 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
359 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
360 	INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
361 	INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
362 	INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
363 	INTEL_EVENT_CONSTRAINT(0xce, 0x1),
364 	INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
365 	/*
366 	 * Generally event codes >= 0x90 are likely to have no restrictions.
367 	 * The exception are defined as above.
368 	 */
369 	INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
370 
371 	EVENT_CONSTRAINT_END
372 };
373 
374 static struct extra_reg intel_rwc_extra_regs[] __read_mostly = {
375 	INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
376 	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
377 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
378 	INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
379 	INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
380 	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
381 	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
382 	EVENT_EXTRA_END
383 };
384 
385 static struct event_constraint intel_lnc_event_constraints[] = {
386 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
387 	FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */
388 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
389 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
390 	FIXED_EVENT_CONSTRAINT(0x013c, 2),	/* CPU_CLK_UNHALTED.REF_TSC_P */
391 	FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */
392 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
393 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
394 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
395 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
396 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
397 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
398 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
399 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
400 
401 	INTEL_EVENT_CONSTRAINT(0x20, 0xf),
402 
403 	INTEL_UEVENT_CONSTRAINT(0x012a, 0xf),
404 	INTEL_UEVENT_CONSTRAINT(0x012b, 0xf),
405 	INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
406 	INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
407 
408 	INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
409 	INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
410 
411 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
412 	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
413 	INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
414 	INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
415 	INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
416 	INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
417 	INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc),
418 	INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
419 
420 	INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
421 
422 	INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf),
423 
424 	EVENT_CONSTRAINT_END
425 };
426 
427 static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
428 	INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0),
429 	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1),
430 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
431 	INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
432 	INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
433 	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
434 	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
435 	EVENT_EXTRA_END
436 };
437 
438 EVENT_ATTR_STR(mem-loads,	mem_ld_nhm,	"event=0x0b,umask=0x10,ldlat=3");
439 EVENT_ATTR_STR(mem-loads,	mem_ld_snb,	"event=0xcd,umask=0x1,ldlat=3");
440 EVENT_ATTR_STR(mem-stores,	mem_st_snb,	"event=0xcd,umask=0x2");
441 
442 static struct attribute *nhm_mem_events_attrs[] = {
443 	EVENT_PTR(mem_ld_nhm),
444 	NULL,
445 };
446 
447 /*
448  * topdown events for Intel Core CPUs.
449  *
450  * The events are all in slots, which is a free slot in a 4 wide
451  * pipeline. Some events are already reported in slots, for cycle
452  * events we multiply by the pipeline width (4).
453  *
454  * With Hyper Threading on, topdown metrics are either summed or averaged
455  * between the threads of a core: (count_t0 + count_t1).
456  *
457  * For the average case the metric is always scaled to pipeline width,
458  * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
459  */
460 
461 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
462 	"event=0x3c,umask=0x0",			/* cpu_clk_unhalted.thread */
463 	"event=0x3c,umask=0x0,any=1");		/* cpu_clk_unhalted.thread_any */
464 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
465 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
466 	"event=0xe,umask=0x1");			/* uops_issued.any */
467 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
468 	"event=0xc2,umask=0x2");		/* uops_retired.retire_slots */
469 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
470 	"event=0x9c,umask=0x1");		/* idq_uops_not_delivered_core */
471 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
472 	"event=0xd,umask=0x3,cmask=1",		/* int_misc.recovery_cycles */
473 	"event=0xd,umask=0x3,cmask=1,any=1");	/* int_misc.recovery_cycles_any */
474 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
475 	"4", "2");
476 
477 EVENT_ATTR_STR(slots,			slots,			"event=0x00,umask=0x4");
478 EVENT_ATTR_STR(topdown-retiring,	td_retiring,		"event=0x00,umask=0x80");
479 EVENT_ATTR_STR(topdown-bad-spec,	td_bad_spec,		"event=0x00,umask=0x81");
480 EVENT_ATTR_STR(topdown-fe-bound,	td_fe_bound,		"event=0x00,umask=0x82");
481 EVENT_ATTR_STR(topdown-be-bound,	td_be_bound,		"event=0x00,umask=0x83");
482 EVENT_ATTR_STR(topdown-heavy-ops,	td_heavy_ops,		"event=0x00,umask=0x84");
483 EVENT_ATTR_STR(topdown-br-mispredict,	td_br_mispredict,	"event=0x00,umask=0x85");
484 EVENT_ATTR_STR(topdown-fetch-lat,	td_fetch_lat,		"event=0x00,umask=0x86");
485 EVENT_ATTR_STR(topdown-mem-bound,	td_mem_bound,		"event=0x00,umask=0x87");
486 
487 static struct attribute *snb_events_attrs[] = {
488 	EVENT_PTR(td_slots_issued),
489 	EVENT_PTR(td_slots_retired),
490 	EVENT_PTR(td_fetch_bubbles),
491 	EVENT_PTR(td_total_slots),
492 	EVENT_PTR(td_total_slots_scale),
493 	EVENT_PTR(td_recovery_bubbles),
494 	EVENT_PTR(td_recovery_bubbles_scale),
495 	NULL,
496 };
497 
498 static struct attribute *snb_mem_events_attrs[] = {
499 	EVENT_PTR(mem_ld_snb),
500 	EVENT_PTR(mem_st_snb),
501 	NULL,
502 };
503 
504 static struct event_constraint intel_hsw_event_constraints[] = {
505 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
506 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
507 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
508 	INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */
509 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
510 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
511 	/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
512 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
513 	/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
514 	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
515 	/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
516 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
517 
518 	/*
519 	 * When HT is off these events can only run on the bottom 4 counters
520 	 * When HT is on, they are impacted by the HT bug and require EXCL access
521 	 */
522 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
523 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
524 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
525 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
526 
527 	EVENT_CONSTRAINT_END
528 };
529 
530 static struct event_constraint intel_bdw_event_constraints[] = {
531 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
532 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
533 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
534 	INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */
535 	INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),	/* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
536 	/*
537 	 * when HT is off, these can only run on the bottom 4 counters
538 	 */
539 	INTEL_EVENT_CONSTRAINT(0xd0, 0xf),	/* MEM_INST_RETIRED.* */
540 	INTEL_EVENT_CONSTRAINT(0xd1, 0xf),	/* MEM_LOAD_RETIRED.* */
541 	INTEL_EVENT_CONSTRAINT(0xd2, 0xf),	/* MEM_LOAD_L3_HIT_RETIRED.* */
542 	INTEL_EVENT_CONSTRAINT(0xcd, 0xf),	/* MEM_TRANS_RETIRED.* */
543 	EVENT_CONSTRAINT_END
544 };
545 
intel_pmu_event_map(int hw_event)546 static u64 intel_pmu_event_map(int hw_event)
547 {
548 	return intel_perfmon_event_map[hw_event];
549 }
550 
551 static __initconst const u64 glc_hw_cache_event_ids
552 				[PERF_COUNT_HW_CACHE_MAX]
553 				[PERF_COUNT_HW_CACHE_OP_MAX]
554 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
555 {
556  [ C(L1D ) ] = {
557 	[ C(OP_READ) ] = {
558 		[ C(RESULT_ACCESS) ] = 0x81d0,
559 		[ C(RESULT_MISS)   ] = 0xe124,
560 	},
561 	[ C(OP_WRITE) ] = {
562 		[ C(RESULT_ACCESS) ] = 0x82d0,
563 	},
564  },
565  [ C(L1I ) ] = {
566 	[ C(OP_READ) ] = {
567 		[ C(RESULT_MISS)   ] = 0xe424,
568 	},
569 	[ C(OP_WRITE) ] = {
570 		[ C(RESULT_ACCESS) ] = -1,
571 		[ C(RESULT_MISS)   ] = -1,
572 	},
573  },
574  [ C(LL  ) ] = {
575 	[ C(OP_READ) ] = {
576 		[ C(RESULT_ACCESS) ] = 0x12a,
577 		[ C(RESULT_MISS)   ] = 0x12a,
578 	},
579 	[ C(OP_WRITE) ] = {
580 		[ C(RESULT_ACCESS) ] = 0x12a,
581 		[ C(RESULT_MISS)   ] = 0x12a,
582 	},
583  },
584  [ C(DTLB) ] = {
585 	[ C(OP_READ) ] = {
586 		[ C(RESULT_ACCESS) ] = 0x81d0,
587 		[ C(RESULT_MISS)   ] = 0xe12,
588 	},
589 	[ C(OP_WRITE) ] = {
590 		[ C(RESULT_ACCESS) ] = 0x82d0,
591 		[ C(RESULT_MISS)   ] = 0xe13,
592 	},
593  },
594  [ C(ITLB) ] = {
595 	[ C(OP_READ) ] = {
596 		[ C(RESULT_ACCESS) ] = -1,
597 		[ C(RESULT_MISS)   ] = 0xe11,
598 	},
599 	[ C(OP_WRITE) ] = {
600 		[ C(RESULT_ACCESS) ] = -1,
601 		[ C(RESULT_MISS)   ] = -1,
602 	},
603 	[ C(OP_PREFETCH) ] = {
604 		[ C(RESULT_ACCESS) ] = -1,
605 		[ C(RESULT_MISS)   ] = -1,
606 	},
607  },
608  [ C(BPU ) ] = {
609 	[ C(OP_READ) ] = {
610 		[ C(RESULT_ACCESS) ] = 0x4c4,
611 		[ C(RESULT_MISS)   ] = 0x4c5,
612 	},
613 	[ C(OP_WRITE) ] = {
614 		[ C(RESULT_ACCESS) ] = -1,
615 		[ C(RESULT_MISS)   ] = -1,
616 	},
617 	[ C(OP_PREFETCH) ] = {
618 		[ C(RESULT_ACCESS) ] = -1,
619 		[ C(RESULT_MISS)   ] = -1,
620 	},
621  },
622  [ C(NODE) ] = {
623 	[ C(OP_READ) ] = {
624 		[ C(RESULT_ACCESS) ] = 0x12a,
625 		[ C(RESULT_MISS)   ] = 0x12a,
626 	},
627  },
628 };
629 
630 static __initconst const u64 glc_hw_cache_extra_regs
631 				[PERF_COUNT_HW_CACHE_MAX]
632 				[PERF_COUNT_HW_CACHE_OP_MAX]
633 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
634 {
635  [ C(LL  ) ] = {
636 	[ C(OP_READ) ] = {
637 		[ C(RESULT_ACCESS) ] = 0x10001,
638 		[ C(RESULT_MISS)   ] = 0x3fbfc00001,
639 	},
640 	[ C(OP_WRITE) ] = {
641 		[ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
642 		[ C(RESULT_MISS)   ] = 0x3f3fc00002,
643 	},
644  },
645  [ C(NODE) ] = {
646 	[ C(OP_READ) ] = {
647 		[ C(RESULT_ACCESS) ] = 0x10c000001,
648 		[ C(RESULT_MISS)   ] = 0x3fb3000001,
649 	},
650  },
651 };
652 
653 /*
654  * Notes on the events:
655  * - data reads do not include code reads (comparable to earlier tables)
656  * - data counts include speculative execution (except L1 write, dtlb, bpu)
657  * - remote node access includes remote memory, remote cache, remote mmio.
658  * - prefetches are not included in the counts.
659  * - icache miss does not include decoded icache
660  */
661 
662 #define SKL_DEMAND_DATA_RD		BIT_ULL(0)
663 #define SKL_DEMAND_RFO			BIT_ULL(1)
664 #define SKL_ANY_RESPONSE		BIT_ULL(16)
665 #define SKL_SUPPLIER_NONE		BIT_ULL(17)
666 #define SKL_L3_MISS_LOCAL_DRAM		BIT_ULL(26)
667 #define SKL_L3_MISS_REMOTE_HOP0_DRAM	BIT_ULL(27)
668 #define SKL_L3_MISS_REMOTE_HOP1_DRAM	BIT_ULL(28)
669 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM	BIT_ULL(29)
670 #define SKL_L3_MISS			(SKL_L3_MISS_LOCAL_DRAM| \
671 					 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
672 					 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
673 					 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
674 #define SKL_SPL_HIT			BIT_ULL(30)
675 #define SKL_SNOOP_NONE			BIT_ULL(31)
676 #define SKL_SNOOP_NOT_NEEDED		BIT_ULL(32)
677 #define SKL_SNOOP_MISS			BIT_ULL(33)
678 #define SKL_SNOOP_HIT_NO_FWD		BIT_ULL(34)
679 #define SKL_SNOOP_HIT_WITH_FWD		BIT_ULL(35)
680 #define SKL_SNOOP_HITM			BIT_ULL(36)
681 #define SKL_SNOOP_NON_DRAM		BIT_ULL(37)
682 #define SKL_ANY_SNOOP			(SKL_SPL_HIT|SKL_SNOOP_NONE| \
683 					 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
684 					 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
685 					 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
686 #define SKL_DEMAND_READ			SKL_DEMAND_DATA_RD
687 #define SKL_SNOOP_DRAM			(SKL_SNOOP_NONE| \
688 					 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
689 					 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
690 					 SKL_SNOOP_HITM|SKL_SPL_HIT)
691 #define SKL_DEMAND_WRITE		SKL_DEMAND_RFO
692 #define SKL_LLC_ACCESS			SKL_ANY_RESPONSE
693 #define SKL_L3_MISS_REMOTE		(SKL_L3_MISS_REMOTE_HOP0_DRAM| \
694 					 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
695 					 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
696 
697 static __initconst const u64 skl_hw_cache_event_ids
698 				[PERF_COUNT_HW_CACHE_MAX]
699 				[PERF_COUNT_HW_CACHE_OP_MAX]
700 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
701 {
702  [ C(L1D ) ] = {
703 	[ C(OP_READ) ] = {
704 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
705 		[ C(RESULT_MISS)   ] = 0x151,	/* L1D.REPLACEMENT */
706 	},
707 	[ C(OP_WRITE) ] = {
708 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
709 		[ C(RESULT_MISS)   ] = 0x0,
710 	},
711 	[ C(OP_PREFETCH) ] = {
712 		[ C(RESULT_ACCESS) ] = 0x0,
713 		[ C(RESULT_MISS)   ] = 0x0,
714 	},
715  },
716  [ C(L1I ) ] = {
717 	[ C(OP_READ) ] = {
718 		[ C(RESULT_ACCESS) ] = 0x0,
719 		[ C(RESULT_MISS)   ] = 0x283,	/* ICACHE_64B.MISS */
720 	},
721 	[ C(OP_WRITE) ] = {
722 		[ C(RESULT_ACCESS) ] = -1,
723 		[ C(RESULT_MISS)   ] = -1,
724 	},
725 	[ C(OP_PREFETCH) ] = {
726 		[ C(RESULT_ACCESS) ] = 0x0,
727 		[ C(RESULT_MISS)   ] = 0x0,
728 	},
729  },
730  [ C(LL  ) ] = {
731 	[ C(OP_READ) ] = {
732 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
733 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
734 	},
735 	[ C(OP_WRITE) ] = {
736 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
737 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
738 	},
739 	[ C(OP_PREFETCH) ] = {
740 		[ C(RESULT_ACCESS) ] = 0x0,
741 		[ C(RESULT_MISS)   ] = 0x0,
742 	},
743  },
744  [ C(DTLB) ] = {
745 	[ C(OP_READ) ] = {
746 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
747 		[ C(RESULT_MISS)   ] = 0xe08,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
748 	},
749 	[ C(OP_WRITE) ] = {
750 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
751 		[ C(RESULT_MISS)   ] = 0xe49,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
752 	},
753 	[ C(OP_PREFETCH) ] = {
754 		[ C(RESULT_ACCESS) ] = 0x0,
755 		[ C(RESULT_MISS)   ] = 0x0,
756 	},
757  },
758  [ C(ITLB) ] = {
759 	[ C(OP_READ) ] = {
760 		[ C(RESULT_ACCESS) ] = 0x2085,	/* ITLB_MISSES.STLB_HIT */
761 		[ C(RESULT_MISS)   ] = 0xe85,	/* ITLB_MISSES.WALK_COMPLETED */
762 	},
763 	[ C(OP_WRITE) ] = {
764 		[ C(RESULT_ACCESS) ] = -1,
765 		[ C(RESULT_MISS)   ] = -1,
766 	},
767 	[ C(OP_PREFETCH) ] = {
768 		[ C(RESULT_ACCESS) ] = -1,
769 		[ C(RESULT_MISS)   ] = -1,
770 	},
771  },
772  [ C(BPU ) ] = {
773 	[ C(OP_READ) ] = {
774 		[ C(RESULT_ACCESS) ] = 0xc4,	/* BR_INST_RETIRED.ALL_BRANCHES */
775 		[ C(RESULT_MISS)   ] = 0xc5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
776 	},
777 	[ C(OP_WRITE) ] = {
778 		[ C(RESULT_ACCESS) ] = -1,
779 		[ C(RESULT_MISS)   ] = -1,
780 	},
781 	[ C(OP_PREFETCH) ] = {
782 		[ C(RESULT_ACCESS) ] = -1,
783 		[ C(RESULT_MISS)   ] = -1,
784 	},
785  },
786  [ C(NODE) ] = {
787 	[ C(OP_READ) ] = {
788 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
789 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
790 	},
791 	[ C(OP_WRITE) ] = {
792 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
793 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
794 	},
795 	[ C(OP_PREFETCH) ] = {
796 		[ C(RESULT_ACCESS) ] = 0x0,
797 		[ C(RESULT_MISS)   ] = 0x0,
798 	},
799  },
800 };
801 
802 static __initconst const u64 skl_hw_cache_extra_regs
803 				[PERF_COUNT_HW_CACHE_MAX]
804 				[PERF_COUNT_HW_CACHE_OP_MAX]
805 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
806 {
807  [ C(LL  ) ] = {
808 	[ C(OP_READ) ] = {
809 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
810 				       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
811 		[ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
812 				       SKL_L3_MISS|SKL_ANY_SNOOP|
813 				       SKL_SUPPLIER_NONE,
814 	},
815 	[ C(OP_WRITE) ] = {
816 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
817 				       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
818 		[ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
819 				       SKL_L3_MISS|SKL_ANY_SNOOP|
820 				       SKL_SUPPLIER_NONE,
821 	},
822 	[ C(OP_PREFETCH) ] = {
823 		[ C(RESULT_ACCESS) ] = 0x0,
824 		[ C(RESULT_MISS)   ] = 0x0,
825 	},
826  },
827  [ C(NODE) ] = {
828 	[ C(OP_READ) ] = {
829 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
830 				       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
831 		[ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
832 				       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
833 	},
834 	[ C(OP_WRITE) ] = {
835 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
836 				       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
837 		[ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
838 				       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
839 	},
840 	[ C(OP_PREFETCH) ] = {
841 		[ C(RESULT_ACCESS) ] = 0x0,
842 		[ C(RESULT_MISS)   ] = 0x0,
843 	},
844  },
845 };
846 
847 #define SNB_DMND_DATA_RD	(1ULL << 0)
848 #define SNB_DMND_RFO		(1ULL << 1)
849 #define SNB_DMND_IFETCH		(1ULL << 2)
850 #define SNB_DMND_WB		(1ULL << 3)
851 #define SNB_PF_DATA_RD		(1ULL << 4)
852 #define SNB_PF_RFO		(1ULL << 5)
853 #define SNB_PF_IFETCH		(1ULL << 6)
854 #define SNB_LLC_DATA_RD		(1ULL << 7)
855 #define SNB_LLC_RFO		(1ULL << 8)
856 #define SNB_LLC_IFETCH		(1ULL << 9)
857 #define SNB_BUS_LOCKS		(1ULL << 10)
858 #define SNB_STRM_ST		(1ULL << 11)
859 #define SNB_OTHER		(1ULL << 15)
860 #define SNB_RESP_ANY		(1ULL << 16)
861 #define SNB_NO_SUPP		(1ULL << 17)
862 #define SNB_LLC_HITM		(1ULL << 18)
863 #define SNB_LLC_HITE		(1ULL << 19)
864 #define SNB_LLC_HITS		(1ULL << 20)
865 #define SNB_LLC_HITF		(1ULL << 21)
866 #define SNB_LOCAL		(1ULL << 22)
867 #define SNB_REMOTE		(0xffULL << 23)
868 #define SNB_SNP_NONE		(1ULL << 31)
869 #define SNB_SNP_NOT_NEEDED	(1ULL << 32)
870 #define SNB_SNP_MISS		(1ULL << 33)
871 #define SNB_NO_FWD		(1ULL << 34)
872 #define SNB_SNP_FWD		(1ULL << 35)
873 #define SNB_HITM		(1ULL << 36)
874 #define SNB_NON_DRAM		(1ULL << 37)
875 
876 #define SNB_DMND_READ		(SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
877 #define SNB_DMND_WRITE		(SNB_DMND_RFO|SNB_LLC_RFO)
878 #define SNB_DMND_PREFETCH	(SNB_PF_DATA_RD|SNB_PF_RFO)
879 
880 #define SNB_SNP_ANY		(SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
881 				 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
882 				 SNB_HITM)
883 
884 #define SNB_DRAM_ANY		(SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
885 #define SNB_DRAM_REMOTE		(SNB_REMOTE|SNB_SNP_ANY)
886 
887 #define SNB_L3_ACCESS		SNB_RESP_ANY
888 #define SNB_L3_MISS		(SNB_DRAM_ANY|SNB_NON_DRAM)
889 
890 static __initconst const u64 snb_hw_cache_extra_regs
891 				[PERF_COUNT_HW_CACHE_MAX]
892 				[PERF_COUNT_HW_CACHE_OP_MAX]
893 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
894 {
895  [ C(LL  ) ] = {
896 	[ C(OP_READ) ] = {
897 		[ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
898 		[ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_L3_MISS,
899 	},
900 	[ C(OP_WRITE) ] = {
901 		[ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
902 		[ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_L3_MISS,
903 	},
904 	[ C(OP_PREFETCH) ] = {
905 		[ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
906 		[ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
907 	},
908  },
909  [ C(NODE) ] = {
910 	[ C(OP_READ) ] = {
911 		[ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
912 		[ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
913 	},
914 	[ C(OP_WRITE) ] = {
915 		[ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
916 		[ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
917 	},
918 	[ C(OP_PREFETCH) ] = {
919 		[ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
920 		[ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
921 	},
922  },
923 };
924 
925 static __initconst const u64 snb_hw_cache_event_ids
926 				[PERF_COUNT_HW_CACHE_MAX]
927 				[PERF_COUNT_HW_CACHE_OP_MAX]
928 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
929 {
930  [ C(L1D) ] = {
931 	[ C(OP_READ) ] = {
932 		[ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
933 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
934 	},
935 	[ C(OP_WRITE) ] = {
936 		[ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
937 		[ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
938 	},
939 	[ C(OP_PREFETCH) ] = {
940 		[ C(RESULT_ACCESS) ] = 0x0,
941 		[ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
942 	},
943  },
944  [ C(L1I ) ] = {
945 	[ C(OP_READ) ] = {
946 		[ C(RESULT_ACCESS) ] = 0x0,
947 		[ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
948 	},
949 	[ C(OP_WRITE) ] = {
950 		[ C(RESULT_ACCESS) ] = -1,
951 		[ C(RESULT_MISS)   ] = -1,
952 	},
953 	[ C(OP_PREFETCH) ] = {
954 		[ C(RESULT_ACCESS) ] = 0x0,
955 		[ C(RESULT_MISS)   ] = 0x0,
956 	},
957  },
958  [ C(LL  ) ] = {
959 	[ C(OP_READ) ] = {
960 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
961 		[ C(RESULT_ACCESS) ] = 0x01b7,
962 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
963 		[ C(RESULT_MISS)   ] = 0x01b7,
964 	},
965 	[ C(OP_WRITE) ] = {
966 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
967 		[ C(RESULT_ACCESS) ] = 0x01b7,
968 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
969 		[ C(RESULT_MISS)   ] = 0x01b7,
970 	},
971 	[ C(OP_PREFETCH) ] = {
972 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
973 		[ C(RESULT_ACCESS) ] = 0x01b7,
974 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
975 		[ C(RESULT_MISS)   ] = 0x01b7,
976 	},
977  },
978  [ C(DTLB) ] = {
979 	[ C(OP_READ) ] = {
980 		[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
981 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
982 	},
983 	[ C(OP_WRITE) ] = {
984 		[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
985 		[ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
986 	},
987 	[ C(OP_PREFETCH) ] = {
988 		[ C(RESULT_ACCESS) ] = 0x0,
989 		[ C(RESULT_MISS)   ] = 0x0,
990 	},
991  },
992  [ C(ITLB) ] = {
993 	[ C(OP_READ) ] = {
994 		[ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
995 		[ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
996 	},
997 	[ C(OP_WRITE) ] = {
998 		[ C(RESULT_ACCESS) ] = -1,
999 		[ C(RESULT_MISS)   ] = -1,
1000 	},
1001 	[ C(OP_PREFETCH) ] = {
1002 		[ C(RESULT_ACCESS) ] = -1,
1003 		[ C(RESULT_MISS)   ] = -1,
1004 	},
1005  },
1006  [ C(BPU ) ] = {
1007 	[ C(OP_READ) ] = {
1008 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1009 		[ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1010 	},
1011 	[ C(OP_WRITE) ] = {
1012 		[ C(RESULT_ACCESS) ] = -1,
1013 		[ C(RESULT_MISS)   ] = -1,
1014 	},
1015 	[ C(OP_PREFETCH) ] = {
1016 		[ C(RESULT_ACCESS) ] = -1,
1017 		[ C(RESULT_MISS)   ] = -1,
1018 	},
1019  },
1020  [ C(NODE) ] = {
1021 	[ C(OP_READ) ] = {
1022 		[ C(RESULT_ACCESS) ] = 0x01b7,
1023 		[ C(RESULT_MISS)   ] = 0x01b7,
1024 	},
1025 	[ C(OP_WRITE) ] = {
1026 		[ C(RESULT_ACCESS) ] = 0x01b7,
1027 		[ C(RESULT_MISS)   ] = 0x01b7,
1028 	},
1029 	[ C(OP_PREFETCH) ] = {
1030 		[ C(RESULT_ACCESS) ] = 0x01b7,
1031 		[ C(RESULT_MISS)   ] = 0x01b7,
1032 	},
1033  },
1034 
1035 };
1036 
1037 /*
1038  * Notes on the events:
1039  * - data reads do not include code reads (comparable to earlier tables)
1040  * - data counts include speculative execution (except L1 write, dtlb, bpu)
1041  * - remote node access includes remote memory, remote cache, remote mmio.
1042  * - prefetches are not included in the counts because they are not
1043  *   reliably counted.
1044  */
1045 
1046 #define HSW_DEMAND_DATA_RD		BIT_ULL(0)
1047 #define HSW_DEMAND_RFO			BIT_ULL(1)
1048 #define HSW_ANY_RESPONSE		BIT_ULL(16)
1049 #define HSW_SUPPLIER_NONE		BIT_ULL(17)
1050 #define HSW_L3_MISS_LOCAL_DRAM		BIT_ULL(22)
1051 #define HSW_L3_MISS_REMOTE_HOP0		BIT_ULL(27)
1052 #define HSW_L3_MISS_REMOTE_HOP1		BIT_ULL(28)
1053 #define HSW_L3_MISS_REMOTE_HOP2P	BIT_ULL(29)
1054 #define HSW_L3_MISS			(HSW_L3_MISS_LOCAL_DRAM| \
1055 					 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1056 					 HSW_L3_MISS_REMOTE_HOP2P)
1057 #define HSW_SNOOP_NONE			BIT_ULL(31)
1058 #define HSW_SNOOP_NOT_NEEDED		BIT_ULL(32)
1059 #define HSW_SNOOP_MISS			BIT_ULL(33)
1060 #define HSW_SNOOP_HIT_NO_FWD		BIT_ULL(34)
1061 #define HSW_SNOOP_HIT_WITH_FWD		BIT_ULL(35)
1062 #define HSW_SNOOP_HITM			BIT_ULL(36)
1063 #define HSW_SNOOP_NON_DRAM		BIT_ULL(37)
1064 #define HSW_ANY_SNOOP			(HSW_SNOOP_NONE| \
1065 					 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
1066 					 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
1067 					 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
1068 #define HSW_SNOOP_DRAM			(HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
1069 #define HSW_DEMAND_READ			HSW_DEMAND_DATA_RD
1070 #define HSW_DEMAND_WRITE		HSW_DEMAND_RFO
1071 #define HSW_L3_MISS_REMOTE		(HSW_L3_MISS_REMOTE_HOP0|\
1072 					 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
1073 #define HSW_LLC_ACCESS			HSW_ANY_RESPONSE
1074 
1075 #define BDW_L3_MISS_LOCAL		BIT(26)
1076 #define BDW_L3_MISS			(BDW_L3_MISS_LOCAL| \
1077 					 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1078 					 HSW_L3_MISS_REMOTE_HOP2P)
1079 
1080 
1081 static __initconst const u64 hsw_hw_cache_event_ids
1082 				[PERF_COUNT_HW_CACHE_MAX]
1083 				[PERF_COUNT_HW_CACHE_OP_MAX]
1084 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1085 {
1086  [ C(L1D ) ] = {
1087 	[ C(OP_READ) ] = {
1088 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1089 		[ C(RESULT_MISS)   ] = 0x151,	/* L1D.REPLACEMENT */
1090 	},
1091 	[ C(OP_WRITE) ] = {
1092 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1093 		[ C(RESULT_MISS)   ] = 0x0,
1094 	},
1095 	[ C(OP_PREFETCH) ] = {
1096 		[ C(RESULT_ACCESS) ] = 0x0,
1097 		[ C(RESULT_MISS)   ] = 0x0,
1098 	},
1099  },
1100  [ C(L1I ) ] = {
1101 	[ C(OP_READ) ] = {
1102 		[ C(RESULT_ACCESS) ] = 0x0,
1103 		[ C(RESULT_MISS)   ] = 0x280,	/* ICACHE.MISSES */
1104 	},
1105 	[ C(OP_WRITE) ] = {
1106 		[ C(RESULT_ACCESS) ] = -1,
1107 		[ C(RESULT_MISS)   ] = -1,
1108 	},
1109 	[ C(OP_PREFETCH) ] = {
1110 		[ C(RESULT_ACCESS) ] = 0x0,
1111 		[ C(RESULT_MISS)   ] = 0x0,
1112 	},
1113  },
1114  [ C(LL  ) ] = {
1115 	[ C(OP_READ) ] = {
1116 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1117 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1118 	},
1119 	[ C(OP_WRITE) ] = {
1120 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1121 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1122 	},
1123 	[ C(OP_PREFETCH) ] = {
1124 		[ C(RESULT_ACCESS) ] = 0x0,
1125 		[ C(RESULT_MISS)   ] = 0x0,
1126 	},
1127  },
1128  [ C(DTLB) ] = {
1129 	[ C(OP_READ) ] = {
1130 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1131 		[ C(RESULT_MISS)   ] = 0x108,	/* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1132 	},
1133 	[ C(OP_WRITE) ] = {
1134 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1135 		[ C(RESULT_MISS)   ] = 0x149,	/* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1136 	},
1137 	[ C(OP_PREFETCH) ] = {
1138 		[ C(RESULT_ACCESS) ] = 0x0,
1139 		[ C(RESULT_MISS)   ] = 0x0,
1140 	},
1141  },
1142  [ C(ITLB) ] = {
1143 	[ C(OP_READ) ] = {
1144 		[ C(RESULT_ACCESS) ] = 0x6085,	/* ITLB_MISSES.STLB_HIT */
1145 		[ C(RESULT_MISS)   ] = 0x185,	/* ITLB_MISSES.MISS_CAUSES_A_WALK */
1146 	},
1147 	[ C(OP_WRITE) ] = {
1148 		[ C(RESULT_ACCESS) ] = -1,
1149 		[ C(RESULT_MISS)   ] = -1,
1150 	},
1151 	[ C(OP_PREFETCH) ] = {
1152 		[ C(RESULT_ACCESS) ] = -1,
1153 		[ C(RESULT_MISS)   ] = -1,
1154 	},
1155  },
1156  [ C(BPU ) ] = {
1157 	[ C(OP_READ) ] = {
1158 		[ C(RESULT_ACCESS) ] = 0xc4,	/* BR_INST_RETIRED.ALL_BRANCHES */
1159 		[ C(RESULT_MISS)   ] = 0xc5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
1160 	},
1161 	[ C(OP_WRITE) ] = {
1162 		[ C(RESULT_ACCESS) ] = -1,
1163 		[ C(RESULT_MISS)   ] = -1,
1164 	},
1165 	[ C(OP_PREFETCH) ] = {
1166 		[ C(RESULT_ACCESS) ] = -1,
1167 		[ C(RESULT_MISS)   ] = -1,
1168 	},
1169  },
1170  [ C(NODE) ] = {
1171 	[ C(OP_READ) ] = {
1172 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1173 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1174 	},
1175 	[ C(OP_WRITE) ] = {
1176 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1177 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1178 	},
1179 	[ C(OP_PREFETCH) ] = {
1180 		[ C(RESULT_ACCESS) ] = 0x0,
1181 		[ C(RESULT_MISS)   ] = 0x0,
1182 	},
1183  },
1184 };
1185 
1186 static __initconst const u64 hsw_hw_cache_extra_regs
1187 				[PERF_COUNT_HW_CACHE_MAX]
1188 				[PERF_COUNT_HW_CACHE_OP_MAX]
1189 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1190 {
1191  [ C(LL  ) ] = {
1192 	[ C(OP_READ) ] = {
1193 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1194 				       HSW_LLC_ACCESS,
1195 		[ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
1196 				       HSW_L3_MISS|HSW_ANY_SNOOP,
1197 	},
1198 	[ C(OP_WRITE) ] = {
1199 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1200 				       HSW_LLC_ACCESS,
1201 		[ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
1202 				       HSW_L3_MISS|HSW_ANY_SNOOP,
1203 	},
1204 	[ C(OP_PREFETCH) ] = {
1205 		[ C(RESULT_ACCESS) ] = 0x0,
1206 		[ C(RESULT_MISS)   ] = 0x0,
1207 	},
1208  },
1209  [ C(NODE) ] = {
1210 	[ C(OP_READ) ] = {
1211 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1212 				       HSW_L3_MISS_LOCAL_DRAM|
1213 				       HSW_SNOOP_DRAM,
1214 		[ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
1215 				       HSW_L3_MISS_REMOTE|
1216 				       HSW_SNOOP_DRAM,
1217 	},
1218 	[ C(OP_WRITE) ] = {
1219 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1220 				       HSW_L3_MISS_LOCAL_DRAM|
1221 				       HSW_SNOOP_DRAM,
1222 		[ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
1223 				       HSW_L3_MISS_REMOTE|
1224 				       HSW_SNOOP_DRAM,
1225 	},
1226 	[ C(OP_PREFETCH) ] = {
1227 		[ C(RESULT_ACCESS) ] = 0x0,
1228 		[ C(RESULT_MISS)   ] = 0x0,
1229 	},
1230  },
1231 };
1232 
1233 static __initconst const u64 westmere_hw_cache_event_ids
1234 				[PERF_COUNT_HW_CACHE_MAX]
1235 				[PERF_COUNT_HW_CACHE_OP_MAX]
1236 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1237 {
1238  [ C(L1D) ] = {
1239 	[ C(OP_READ) ] = {
1240 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1241 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1242 	},
1243 	[ C(OP_WRITE) ] = {
1244 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1245 		[ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1246 	},
1247 	[ C(OP_PREFETCH) ] = {
1248 		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1249 		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1250 	},
1251  },
1252  [ C(L1I ) ] = {
1253 	[ C(OP_READ) ] = {
1254 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1255 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1256 	},
1257 	[ C(OP_WRITE) ] = {
1258 		[ C(RESULT_ACCESS) ] = -1,
1259 		[ C(RESULT_MISS)   ] = -1,
1260 	},
1261 	[ C(OP_PREFETCH) ] = {
1262 		[ C(RESULT_ACCESS) ] = 0x0,
1263 		[ C(RESULT_MISS)   ] = 0x0,
1264 	},
1265  },
1266  [ C(LL  ) ] = {
1267 	[ C(OP_READ) ] = {
1268 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1269 		[ C(RESULT_ACCESS) ] = 0x01b7,
1270 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1271 		[ C(RESULT_MISS)   ] = 0x01b7,
1272 	},
1273 	/*
1274 	 * Use RFO, not WRITEBACK, because a write miss would typically occur
1275 	 * on RFO.
1276 	 */
1277 	[ C(OP_WRITE) ] = {
1278 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1279 		[ C(RESULT_ACCESS) ] = 0x01b7,
1280 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1281 		[ C(RESULT_MISS)   ] = 0x01b7,
1282 	},
1283 	[ C(OP_PREFETCH) ] = {
1284 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1285 		[ C(RESULT_ACCESS) ] = 0x01b7,
1286 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1287 		[ C(RESULT_MISS)   ] = 0x01b7,
1288 	},
1289  },
1290  [ C(DTLB) ] = {
1291 	[ C(OP_READ) ] = {
1292 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1293 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1294 	},
1295 	[ C(OP_WRITE) ] = {
1296 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1297 		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1298 	},
1299 	[ C(OP_PREFETCH) ] = {
1300 		[ C(RESULT_ACCESS) ] = 0x0,
1301 		[ C(RESULT_MISS)   ] = 0x0,
1302 	},
1303  },
1304  [ C(ITLB) ] = {
1305 	[ C(OP_READ) ] = {
1306 		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1307 		[ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
1308 	},
1309 	[ C(OP_WRITE) ] = {
1310 		[ C(RESULT_ACCESS) ] = -1,
1311 		[ C(RESULT_MISS)   ] = -1,
1312 	},
1313 	[ C(OP_PREFETCH) ] = {
1314 		[ C(RESULT_ACCESS) ] = -1,
1315 		[ C(RESULT_MISS)   ] = -1,
1316 	},
1317  },
1318  [ C(BPU ) ] = {
1319 	[ C(OP_READ) ] = {
1320 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1321 		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1322 	},
1323 	[ C(OP_WRITE) ] = {
1324 		[ C(RESULT_ACCESS) ] = -1,
1325 		[ C(RESULT_MISS)   ] = -1,
1326 	},
1327 	[ C(OP_PREFETCH) ] = {
1328 		[ C(RESULT_ACCESS) ] = -1,
1329 		[ C(RESULT_MISS)   ] = -1,
1330 	},
1331  },
1332  [ C(NODE) ] = {
1333 	[ C(OP_READ) ] = {
1334 		[ C(RESULT_ACCESS) ] = 0x01b7,
1335 		[ C(RESULT_MISS)   ] = 0x01b7,
1336 	},
1337 	[ C(OP_WRITE) ] = {
1338 		[ C(RESULT_ACCESS) ] = 0x01b7,
1339 		[ C(RESULT_MISS)   ] = 0x01b7,
1340 	},
1341 	[ C(OP_PREFETCH) ] = {
1342 		[ C(RESULT_ACCESS) ] = 0x01b7,
1343 		[ C(RESULT_MISS)   ] = 0x01b7,
1344 	},
1345  },
1346 };
1347 
1348 /*
1349  * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1350  * See IA32 SDM Vol 3B 30.6.1.3
1351  */
1352 
1353 #define NHM_DMND_DATA_RD	(1 << 0)
1354 #define NHM_DMND_RFO		(1 << 1)
1355 #define NHM_DMND_IFETCH		(1 << 2)
1356 #define NHM_DMND_WB		(1 << 3)
1357 #define NHM_PF_DATA_RD		(1 << 4)
1358 #define NHM_PF_DATA_RFO		(1 << 5)
1359 #define NHM_PF_IFETCH		(1 << 6)
1360 #define NHM_OFFCORE_OTHER	(1 << 7)
1361 #define NHM_UNCORE_HIT		(1 << 8)
1362 #define NHM_OTHER_CORE_HIT_SNP	(1 << 9)
1363 #define NHM_OTHER_CORE_HITM	(1 << 10)
1364         			/* reserved */
1365 #define NHM_REMOTE_CACHE_FWD	(1 << 12)
1366 #define NHM_REMOTE_DRAM		(1 << 13)
1367 #define NHM_LOCAL_DRAM		(1 << 14)
1368 #define NHM_NON_DRAM		(1 << 15)
1369 
1370 #define NHM_LOCAL		(NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1371 #define NHM_REMOTE		(NHM_REMOTE_DRAM)
1372 
1373 #define NHM_DMND_READ		(NHM_DMND_DATA_RD)
1374 #define NHM_DMND_WRITE		(NHM_DMND_RFO|NHM_DMND_WB)
1375 #define NHM_DMND_PREFETCH	(NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1376 
1377 #define NHM_L3_HIT	(NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1378 #define NHM_L3_MISS	(NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1379 #define NHM_L3_ACCESS	(NHM_L3_HIT|NHM_L3_MISS)
1380 
1381 static __initconst const u64 nehalem_hw_cache_extra_regs
1382 				[PERF_COUNT_HW_CACHE_MAX]
1383 				[PERF_COUNT_HW_CACHE_OP_MAX]
1384 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1385 {
1386  [ C(LL  ) ] = {
1387 	[ C(OP_READ) ] = {
1388 		[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1389 		[ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
1390 	},
1391 	[ C(OP_WRITE) ] = {
1392 		[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1393 		[ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
1394 	},
1395 	[ C(OP_PREFETCH) ] = {
1396 		[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1397 		[ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1398 	},
1399  },
1400  [ C(NODE) ] = {
1401 	[ C(OP_READ) ] = {
1402 		[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1403 		[ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
1404 	},
1405 	[ C(OP_WRITE) ] = {
1406 		[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1407 		[ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
1408 	},
1409 	[ C(OP_PREFETCH) ] = {
1410 		[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1411 		[ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1412 	},
1413  },
1414 };
1415 
1416 static __initconst const u64 nehalem_hw_cache_event_ids
1417 				[PERF_COUNT_HW_CACHE_MAX]
1418 				[PERF_COUNT_HW_CACHE_OP_MAX]
1419 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1420 {
1421  [ C(L1D) ] = {
1422 	[ C(OP_READ) ] = {
1423 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1424 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1425 	},
1426 	[ C(OP_WRITE) ] = {
1427 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1428 		[ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1429 	},
1430 	[ C(OP_PREFETCH) ] = {
1431 		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1432 		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1433 	},
1434  },
1435  [ C(L1I ) ] = {
1436 	[ C(OP_READ) ] = {
1437 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1438 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1439 	},
1440 	[ C(OP_WRITE) ] = {
1441 		[ C(RESULT_ACCESS) ] = -1,
1442 		[ C(RESULT_MISS)   ] = -1,
1443 	},
1444 	[ C(OP_PREFETCH) ] = {
1445 		[ C(RESULT_ACCESS) ] = 0x0,
1446 		[ C(RESULT_MISS)   ] = 0x0,
1447 	},
1448  },
1449  [ C(LL  ) ] = {
1450 	[ C(OP_READ) ] = {
1451 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1452 		[ C(RESULT_ACCESS) ] = 0x01b7,
1453 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1454 		[ C(RESULT_MISS)   ] = 0x01b7,
1455 	},
1456 	/*
1457 	 * Use RFO, not WRITEBACK, because a write miss would typically occur
1458 	 * on RFO.
1459 	 */
1460 	[ C(OP_WRITE) ] = {
1461 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1462 		[ C(RESULT_ACCESS) ] = 0x01b7,
1463 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1464 		[ C(RESULT_MISS)   ] = 0x01b7,
1465 	},
1466 	[ C(OP_PREFETCH) ] = {
1467 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1468 		[ C(RESULT_ACCESS) ] = 0x01b7,
1469 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1470 		[ C(RESULT_MISS)   ] = 0x01b7,
1471 	},
1472  },
1473  [ C(DTLB) ] = {
1474 	[ C(OP_READ) ] = {
1475 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
1476 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1477 	},
1478 	[ C(OP_WRITE) ] = {
1479 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
1480 		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1481 	},
1482 	[ C(OP_PREFETCH) ] = {
1483 		[ C(RESULT_ACCESS) ] = 0x0,
1484 		[ C(RESULT_MISS)   ] = 0x0,
1485 	},
1486  },
1487  [ C(ITLB) ] = {
1488 	[ C(OP_READ) ] = {
1489 		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1490 		[ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
1491 	},
1492 	[ C(OP_WRITE) ] = {
1493 		[ C(RESULT_ACCESS) ] = -1,
1494 		[ C(RESULT_MISS)   ] = -1,
1495 	},
1496 	[ C(OP_PREFETCH) ] = {
1497 		[ C(RESULT_ACCESS) ] = -1,
1498 		[ C(RESULT_MISS)   ] = -1,
1499 	},
1500  },
1501  [ C(BPU ) ] = {
1502 	[ C(OP_READ) ] = {
1503 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1504 		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1505 	},
1506 	[ C(OP_WRITE) ] = {
1507 		[ C(RESULT_ACCESS) ] = -1,
1508 		[ C(RESULT_MISS)   ] = -1,
1509 	},
1510 	[ C(OP_PREFETCH) ] = {
1511 		[ C(RESULT_ACCESS) ] = -1,
1512 		[ C(RESULT_MISS)   ] = -1,
1513 	},
1514  },
1515  [ C(NODE) ] = {
1516 	[ C(OP_READ) ] = {
1517 		[ C(RESULT_ACCESS) ] = 0x01b7,
1518 		[ C(RESULT_MISS)   ] = 0x01b7,
1519 	},
1520 	[ C(OP_WRITE) ] = {
1521 		[ C(RESULT_ACCESS) ] = 0x01b7,
1522 		[ C(RESULT_MISS)   ] = 0x01b7,
1523 	},
1524 	[ C(OP_PREFETCH) ] = {
1525 		[ C(RESULT_ACCESS) ] = 0x01b7,
1526 		[ C(RESULT_MISS)   ] = 0x01b7,
1527 	},
1528  },
1529 };
1530 
1531 static __initconst const u64 core2_hw_cache_event_ids
1532 				[PERF_COUNT_HW_CACHE_MAX]
1533 				[PERF_COUNT_HW_CACHE_OP_MAX]
1534 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1535 {
1536  [ C(L1D) ] = {
1537 	[ C(OP_READ) ] = {
1538 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
1539 		[ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
1540 	},
1541 	[ C(OP_WRITE) ] = {
1542 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
1543 		[ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
1544 	},
1545 	[ C(OP_PREFETCH) ] = {
1546 		[ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
1547 		[ C(RESULT_MISS)   ] = 0,
1548 	},
1549  },
1550  [ C(L1I ) ] = {
1551 	[ C(OP_READ) ] = {
1552 		[ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
1553 		[ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
1554 	},
1555 	[ C(OP_WRITE) ] = {
1556 		[ C(RESULT_ACCESS) ] = -1,
1557 		[ C(RESULT_MISS)   ] = -1,
1558 	},
1559 	[ C(OP_PREFETCH) ] = {
1560 		[ C(RESULT_ACCESS) ] = 0,
1561 		[ C(RESULT_MISS)   ] = 0,
1562 	},
1563  },
1564  [ C(LL  ) ] = {
1565 	[ C(OP_READ) ] = {
1566 		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1567 		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1568 	},
1569 	[ C(OP_WRITE) ] = {
1570 		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1571 		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1572 	},
1573 	[ C(OP_PREFETCH) ] = {
1574 		[ C(RESULT_ACCESS) ] = 0,
1575 		[ C(RESULT_MISS)   ] = 0,
1576 	},
1577  },
1578  [ C(DTLB) ] = {
1579 	[ C(OP_READ) ] = {
1580 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
1581 		[ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
1582 	},
1583 	[ C(OP_WRITE) ] = {
1584 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
1585 		[ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
1586 	},
1587 	[ C(OP_PREFETCH) ] = {
1588 		[ C(RESULT_ACCESS) ] = 0,
1589 		[ C(RESULT_MISS)   ] = 0,
1590 	},
1591  },
1592  [ C(ITLB) ] = {
1593 	[ C(OP_READ) ] = {
1594 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1595 		[ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
1596 	},
1597 	[ C(OP_WRITE) ] = {
1598 		[ C(RESULT_ACCESS) ] = -1,
1599 		[ C(RESULT_MISS)   ] = -1,
1600 	},
1601 	[ C(OP_PREFETCH) ] = {
1602 		[ C(RESULT_ACCESS) ] = -1,
1603 		[ C(RESULT_MISS)   ] = -1,
1604 	},
1605  },
1606  [ C(BPU ) ] = {
1607 	[ C(OP_READ) ] = {
1608 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1609 		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1610 	},
1611 	[ C(OP_WRITE) ] = {
1612 		[ C(RESULT_ACCESS) ] = -1,
1613 		[ C(RESULT_MISS)   ] = -1,
1614 	},
1615 	[ C(OP_PREFETCH) ] = {
1616 		[ C(RESULT_ACCESS) ] = -1,
1617 		[ C(RESULT_MISS)   ] = -1,
1618 	},
1619  },
1620 };
1621 
1622 static __initconst const u64 atom_hw_cache_event_ids
1623 				[PERF_COUNT_HW_CACHE_MAX]
1624 				[PERF_COUNT_HW_CACHE_OP_MAX]
1625 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1626 {
1627  [ C(L1D) ] = {
1628 	[ C(OP_READ) ] = {
1629 		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
1630 		[ C(RESULT_MISS)   ] = 0,
1631 	},
1632 	[ C(OP_WRITE) ] = {
1633 		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
1634 		[ C(RESULT_MISS)   ] = 0,
1635 	},
1636 	[ C(OP_PREFETCH) ] = {
1637 		[ C(RESULT_ACCESS) ] = 0x0,
1638 		[ C(RESULT_MISS)   ] = 0,
1639 	},
1640  },
1641  [ C(L1I ) ] = {
1642 	[ C(OP_READ) ] = {
1643 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
1644 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
1645 	},
1646 	[ C(OP_WRITE) ] = {
1647 		[ C(RESULT_ACCESS) ] = -1,
1648 		[ C(RESULT_MISS)   ] = -1,
1649 	},
1650 	[ C(OP_PREFETCH) ] = {
1651 		[ C(RESULT_ACCESS) ] = 0,
1652 		[ C(RESULT_MISS)   ] = 0,
1653 	},
1654  },
1655  [ C(LL  ) ] = {
1656 	[ C(OP_READ) ] = {
1657 		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1658 		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1659 	},
1660 	[ C(OP_WRITE) ] = {
1661 		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1662 		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1663 	},
1664 	[ C(OP_PREFETCH) ] = {
1665 		[ C(RESULT_ACCESS) ] = 0,
1666 		[ C(RESULT_MISS)   ] = 0,
1667 	},
1668  },
1669  [ C(DTLB) ] = {
1670 	[ C(OP_READ) ] = {
1671 		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
1672 		[ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
1673 	},
1674 	[ C(OP_WRITE) ] = {
1675 		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
1676 		[ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
1677 	},
1678 	[ C(OP_PREFETCH) ] = {
1679 		[ C(RESULT_ACCESS) ] = 0,
1680 		[ C(RESULT_MISS)   ] = 0,
1681 	},
1682  },
1683  [ C(ITLB) ] = {
1684 	[ C(OP_READ) ] = {
1685 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1686 		[ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
1687 	},
1688 	[ C(OP_WRITE) ] = {
1689 		[ C(RESULT_ACCESS) ] = -1,
1690 		[ C(RESULT_MISS)   ] = -1,
1691 	},
1692 	[ C(OP_PREFETCH) ] = {
1693 		[ C(RESULT_ACCESS) ] = -1,
1694 		[ C(RESULT_MISS)   ] = -1,
1695 	},
1696  },
1697  [ C(BPU ) ] = {
1698 	[ C(OP_READ) ] = {
1699 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1700 		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1701 	},
1702 	[ C(OP_WRITE) ] = {
1703 		[ C(RESULT_ACCESS) ] = -1,
1704 		[ C(RESULT_MISS)   ] = -1,
1705 	},
1706 	[ C(OP_PREFETCH) ] = {
1707 		[ C(RESULT_ACCESS) ] = -1,
1708 		[ C(RESULT_MISS)   ] = -1,
1709 	},
1710  },
1711 };
1712 
1713 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1714 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1715 /* no_alloc_cycles.not_delivered */
1716 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1717 	       "event=0xca,umask=0x50");
1718 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1719 /* uops_retired.all */
1720 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1721 	       "event=0xc2,umask=0x10");
1722 /* uops_retired.all */
1723 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1724 	       "event=0xc2,umask=0x10");
1725 
1726 static struct attribute *slm_events_attrs[] = {
1727 	EVENT_PTR(td_total_slots_slm),
1728 	EVENT_PTR(td_total_slots_scale_slm),
1729 	EVENT_PTR(td_fetch_bubbles_slm),
1730 	EVENT_PTR(td_fetch_bubbles_scale_slm),
1731 	EVENT_PTR(td_slots_issued_slm),
1732 	EVENT_PTR(td_slots_retired_slm),
1733 	NULL
1734 };
1735 
1736 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1737 {
1738 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1739 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1740 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1741 	EVENT_EXTRA_END
1742 };
1743 
1744 #define SLM_DMND_READ		SNB_DMND_DATA_RD
1745 #define SLM_DMND_WRITE		SNB_DMND_RFO
1746 #define SLM_DMND_PREFETCH	(SNB_PF_DATA_RD|SNB_PF_RFO)
1747 
1748 #define SLM_SNP_ANY		(SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1749 #define SLM_LLC_ACCESS		SNB_RESP_ANY
1750 #define SLM_LLC_MISS		(SLM_SNP_ANY|SNB_NON_DRAM)
1751 
1752 static __initconst const u64 slm_hw_cache_extra_regs
1753 				[PERF_COUNT_HW_CACHE_MAX]
1754 				[PERF_COUNT_HW_CACHE_OP_MAX]
1755 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1756 {
1757  [ C(LL  ) ] = {
1758 	[ C(OP_READ) ] = {
1759 		[ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1760 		[ C(RESULT_MISS)   ] = 0,
1761 	},
1762 	[ C(OP_WRITE) ] = {
1763 		[ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1764 		[ C(RESULT_MISS)   ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1765 	},
1766 	[ C(OP_PREFETCH) ] = {
1767 		[ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1768 		[ C(RESULT_MISS)   ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1769 	},
1770  },
1771 };
1772 
1773 static __initconst const u64 slm_hw_cache_event_ids
1774 				[PERF_COUNT_HW_CACHE_MAX]
1775 				[PERF_COUNT_HW_CACHE_OP_MAX]
1776 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1777 {
1778  [ C(L1D) ] = {
1779 	[ C(OP_READ) ] = {
1780 		[ C(RESULT_ACCESS) ] = 0,
1781 		[ C(RESULT_MISS)   ] = 0x0104, /* LD_DCU_MISS */
1782 	},
1783 	[ C(OP_WRITE) ] = {
1784 		[ C(RESULT_ACCESS) ] = 0,
1785 		[ C(RESULT_MISS)   ] = 0,
1786 	},
1787 	[ C(OP_PREFETCH) ] = {
1788 		[ C(RESULT_ACCESS) ] = 0,
1789 		[ C(RESULT_MISS)   ] = 0,
1790 	},
1791  },
1792  [ C(L1I ) ] = {
1793 	[ C(OP_READ) ] = {
1794 		[ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1795 		[ C(RESULT_MISS)   ] = 0x0280, /* ICACGE.MISSES */
1796 	},
1797 	[ C(OP_WRITE) ] = {
1798 		[ C(RESULT_ACCESS) ] = -1,
1799 		[ C(RESULT_MISS)   ] = -1,
1800 	},
1801 	[ C(OP_PREFETCH) ] = {
1802 		[ C(RESULT_ACCESS) ] = 0,
1803 		[ C(RESULT_MISS)   ] = 0,
1804 	},
1805  },
1806  [ C(LL  ) ] = {
1807 	[ C(OP_READ) ] = {
1808 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1809 		[ C(RESULT_ACCESS) ] = 0x01b7,
1810 		[ C(RESULT_MISS)   ] = 0,
1811 	},
1812 	[ C(OP_WRITE) ] = {
1813 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1814 		[ C(RESULT_ACCESS) ] = 0x01b7,
1815 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1816 		[ C(RESULT_MISS)   ] = 0x01b7,
1817 	},
1818 	[ C(OP_PREFETCH) ] = {
1819 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1820 		[ C(RESULT_ACCESS) ] = 0x01b7,
1821 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1822 		[ C(RESULT_MISS)   ] = 0x01b7,
1823 	},
1824  },
1825  [ C(DTLB) ] = {
1826 	[ C(OP_READ) ] = {
1827 		[ C(RESULT_ACCESS) ] = 0,
1828 		[ C(RESULT_MISS)   ] = 0x0804, /* LD_DTLB_MISS */
1829 	},
1830 	[ C(OP_WRITE) ] = {
1831 		[ C(RESULT_ACCESS) ] = 0,
1832 		[ C(RESULT_MISS)   ] = 0,
1833 	},
1834 	[ C(OP_PREFETCH) ] = {
1835 		[ C(RESULT_ACCESS) ] = 0,
1836 		[ C(RESULT_MISS)   ] = 0,
1837 	},
1838  },
1839  [ C(ITLB) ] = {
1840 	[ C(OP_READ) ] = {
1841 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1842 		[ C(RESULT_MISS)   ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1843 	},
1844 	[ C(OP_WRITE) ] = {
1845 		[ C(RESULT_ACCESS) ] = -1,
1846 		[ C(RESULT_MISS)   ] = -1,
1847 	},
1848 	[ C(OP_PREFETCH) ] = {
1849 		[ C(RESULT_ACCESS) ] = -1,
1850 		[ C(RESULT_MISS)   ] = -1,
1851 	},
1852  },
1853  [ C(BPU ) ] = {
1854 	[ C(OP_READ) ] = {
1855 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1856 		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1857 	},
1858 	[ C(OP_WRITE) ] = {
1859 		[ C(RESULT_ACCESS) ] = -1,
1860 		[ C(RESULT_MISS)   ] = -1,
1861 	},
1862 	[ C(OP_PREFETCH) ] = {
1863 		[ C(RESULT_ACCESS) ] = -1,
1864 		[ C(RESULT_MISS)   ] = -1,
1865 	},
1866  },
1867 };
1868 
1869 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1870 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1871 /* UOPS_NOT_DELIVERED.ANY */
1872 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1873 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1874 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1875 /* UOPS_RETIRED.ANY */
1876 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1877 /* UOPS_ISSUED.ANY */
1878 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1879 
1880 static struct attribute *glm_events_attrs[] = {
1881 	EVENT_PTR(td_total_slots_glm),
1882 	EVENT_PTR(td_total_slots_scale_glm),
1883 	EVENT_PTR(td_fetch_bubbles_glm),
1884 	EVENT_PTR(td_recovery_bubbles_glm),
1885 	EVENT_PTR(td_slots_issued_glm),
1886 	EVENT_PTR(td_slots_retired_glm),
1887 	NULL
1888 };
1889 
1890 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1891 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1892 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1893 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1894 	EVENT_EXTRA_END
1895 };
1896 
1897 #define GLM_DEMAND_DATA_RD		BIT_ULL(0)
1898 #define GLM_DEMAND_RFO			BIT_ULL(1)
1899 #define GLM_ANY_RESPONSE		BIT_ULL(16)
1900 #define GLM_SNP_NONE_OR_MISS		BIT_ULL(33)
1901 #define GLM_DEMAND_READ			GLM_DEMAND_DATA_RD
1902 #define GLM_DEMAND_WRITE		GLM_DEMAND_RFO
1903 #define GLM_DEMAND_PREFETCH		(SNB_PF_DATA_RD|SNB_PF_RFO)
1904 #define GLM_LLC_ACCESS			GLM_ANY_RESPONSE
1905 #define GLM_SNP_ANY			(GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1906 #define GLM_LLC_MISS			(GLM_SNP_ANY|SNB_NON_DRAM)
1907 
1908 static __initconst const u64 glm_hw_cache_event_ids
1909 				[PERF_COUNT_HW_CACHE_MAX]
1910 				[PERF_COUNT_HW_CACHE_OP_MAX]
1911 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1912 	[C(L1D)] = {
1913 		[C(OP_READ)] = {
1914 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1915 			[C(RESULT_MISS)]	= 0x0,
1916 		},
1917 		[C(OP_WRITE)] = {
1918 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1919 			[C(RESULT_MISS)]	= 0x0,
1920 		},
1921 		[C(OP_PREFETCH)] = {
1922 			[C(RESULT_ACCESS)]	= 0x0,
1923 			[C(RESULT_MISS)]	= 0x0,
1924 		},
1925 	},
1926 	[C(L1I)] = {
1927 		[C(OP_READ)] = {
1928 			[C(RESULT_ACCESS)]	= 0x0380,	/* ICACHE.ACCESSES */
1929 			[C(RESULT_MISS)]	= 0x0280,	/* ICACHE.MISSES */
1930 		},
1931 		[C(OP_WRITE)] = {
1932 			[C(RESULT_ACCESS)]	= -1,
1933 			[C(RESULT_MISS)]	= -1,
1934 		},
1935 		[C(OP_PREFETCH)] = {
1936 			[C(RESULT_ACCESS)]	= 0x0,
1937 			[C(RESULT_MISS)]	= 0x0,
1938 		},
1939 	},
1940 	[C(LL)] = {
1941 		[C(OP_READ)] = {
1942 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1943 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1944 		},
1945 		[C(OP_WRITE)] = {
1946 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1947 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1948 		},
1949 		[C(OP_PREFETCH)] = {
1950 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1951 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1952 		},
1953 	},
1954 	[C(DTLB)] = {
1955 		[C(OP_READ)] = {
1956 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1957 			[C(RESULT_MISS)]	= 0x0,
1958 		},
1959 		[C(OP_WRITE)] = {
1960 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1961 			[C(RESULT_MISS)]	= 0x0,
1962 		},
1963 		[C(OP_PREFETCH)] = {
1964 			[C(RESULT_ACCESS)]	= 0x0,
1965 			[C(RESULT_MISS)]	= 0x0,
1966 		},
1967 	},
1968 	[C(ITLB)] = {
1969 		[C(OP_READ)] = {
1970 			[C(RESULT_ACCESS)]	= 0x00c0,	/* INST_RETIRED.ANY_P */
1971 			[C(RESULT_MISS)]	= 0x0481,	/* ITLB.MISS */
1972 		},
1973 		[C(OP_WRITE)] = {
1974 			[C(RESULT_ACCESS)]	= -1,
1975 			[C(RESULT_MISS)]	= -1,
1976 		},
1977 		[C(OP_PREFETCH)] = {
1978 			[C(RESULT_ACCESS)]	= -1,
1979 			[C(RESULT_MISS)]	= -1,
1980 		},
1981 	},
1982 	[C(BPU)] = {
1983 		[C(OP_READ)] = {
1984 			[C(RESULT_ACCESS)]	= 0x00c4,	/* BR_INST_RETIRED.ALL_BRANCHES */
1985 			[C(RESULT_MISS)]	= 0x00c5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
1986 		},
1987 		[C(OP_WRITE)] = {
1988 			[C(RESULT_ACCESS)]	= -1,
1989 			[C(RESULT_MISS)]	= -1,
1990 		},
1991 		[C(OP_PREFETCH)] = {
1992 			[C(RESULT_ACCESS)]	= -1,
1993 			[C(RESULT_MISS)]	= -1,
1994 		},
1995 	},
1996 };
1997 
1998 static __initconst const u64 glm_hw_cache_extra_regs
1999 				[PERF_COUNT_HW_CACHE_MAX]
2000 				[PERF_COUNT_HW_CACHE_OP_MAX]
2001 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2002 	[C(LL)] = {
2003 		[C(OP_READ)] = {
2004 			[C(RESULT_ACCESS)]	= GLM_DEMAND_READ|
2005 						  GLM_LLC_ACCESS,
2006 			[C(RESULT_MISS)]	= GLM_DEMAND_READ|
2007 						  GLM_LLC_MISS,
2008 		},
2009 		[C(OP_WRITE)] = {
2010 			[C(RESULT_ACCESS)]	= GLM_DEMAND_WRITE|
2011 						  GLM_LLC_ACCESS,
2012 			[C(RESULT_MISS)]	= GLM_DEMAND_WRITE|
2013 						  GLM_LLC_MISS,
2014 		},
2015 		[C(OP_PREFETCH)] = {
2016 			[C(RESULT_ACCESS)]	= GLM_DEMAND_PREFETCH|
2017 						  GLM_LLC_ACCESS,
2018 			[C(RESULT_MISS)]	= GLM_DEMAND_PREFETCH|
2019 						  GLM_LLC_MISS,
2020 		},
2021 	},
2022 };
2023 
2024 static __initconst const u64 glp_hw_cache_event_ids
2025 				[PERF_COUNT_HW_CACHE_MAX]
2026 				[PERF_COUNT_HW_CACHE_OP_MAX]
2027 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2028 	[C(L1D)] = {
2029 		[C(OP_READ)] = {
2030 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
2031 			[C(RESULT_MISS)]	= 0x0,
2032 		},
2033 		[C(OP_WRITE)] = {
2034 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
2035 			[C(RESULT_MISS)]	= 0x0,
2036 		},
2037 		[C(OP_PREFETCH)] = {
2038 			[C(RESULT_ACCESS)]	= 0x0,
2039 			[C(RESULT_MISS)]	= 0x0,
2040 		},
2041 	},
2042 	[C(L1I)] = {
2043 		[C(OP_READ)] = {
2044 			[C(RESULT_ACCESS)]	= 0x0380,	/* ICACHE.ACCESSES */
2045 			[C(RESULT_MISS)]	= 0x0280,	/* ICACHE.MISSES */
2046 		},
2047 		[C(OP_WRITE)] = {
2048 			[C(RESULT_ACCESS)]	= -1,
2049 			[C(RESULT_MISS)]	= -1,
2050 		},
2051 		[C(OP_PREFETCH)] = {
2052 			[C(RESULT_ACCESS)]	= 0x0,
2053 			[C(RESULT_MISS)]	= 0x0,
2054 		},
2055 	},
2056 	[C(LL)] = {
2057 		[C(OP_READ)] = {
2058 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2059 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2060 		},
2061 		[C(OP_WRITE)] = {
2062 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2063 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2064 		},
2065 		[C(OP_PREFETCH)] = {
2066 			[C(RESULT_ACCESS)]	= 0x0,
2067 			[C(RESULT_MISS)]	= 0x0,
2068 		},
2069 	},
2070 	[C(DTLB)] = {
2071 		[C(OP_READ)] = {
2072 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
2073 			[C(RESULT_MISS)]	= 0xe08,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
2074 		},
2075 		[C(OP_WRITE)] = {
2076 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
2077 			[C(RESULT_MISS)]	= 0xe49,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
2078 		},
2079 		[C(OP_PREFETCH)] = {
2080 			[C(RESULT_ACCESS)]	= 0x0,
2081 			[C(RESULT_MISS)]	= 0x0,
2082 		},
2083 	},
2084 	[C(ITLB)] = {
2085 		[C(OP_READ)] = {
2086 			[C(RESULT_ACCESS)]	= 0x00c0,	/* INST_RETIRED.ANY_P */
2087 			[C(RESULT_MISS)]	= 0x0481,	/* ITLB.MISS */
2088 		},
2089 		[C(OP_WRITE)] = {
2090 			[C(RESULT_ACCESS)]	= -1,
2091 			[C(RESULT_MISS)]	= -1,
2092 		},
2093 		[C(OP_PREFETCH)] = {
2094 			[C(RESULT_ACCESS)]	= -1,
2095 			[C(RESULT_MISS)]	= -1,
2096 		},
2097 	},
2098 	[C(BPU)] = {
2099 		[C(OP_READ)] = {
2100 			[C(RESULT_ACCESS)]	= 0x00c4,	/* BR_INST_RETIRED.ALL_BRANCHES */
2101 			[C(RESULT_MISS)]	= 0x00c5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
2102 		},
2103 		[C(OP_WRITE)] = {
2104 			[C(RESULT_ACCESS)]	= -1,
2105 			[C(RESULT_MISS)]	= -1,
2106 		},
2107 		[C(OP_PREFETCH)] = {
2108 			[C(RESULT_ACCESS)]	= -1,
2109 			[C(RESULT_MISS)]	= -1,
2110 		},
2111 	},
2112 };
2113 
2114 static __initconst const u64 glp_hw_cache_extra_regs
2115 				[PERF_COUNT_HW_CACHE_MAX]
2116 				[PERF_COUNT_HW_CACHE_OP_MAX]
2117 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2118 	[C(LL)] = {
2119 		[C(OP_READ)] = {
2120 			[C(RESULT_ACCESS)]	= GLM_DEMAND_READ|
2121 						  GLM_LLC_ACCESS,
2122 			[C(RESULT_MISS)]	= GLM_DEMAND_READ|
2123 						  GLM_LLC_MISS,
2124 		},
2125 		[C(OP_WRITE)] = {
2126 			[C(RESULT_ACCESS)]	= GLM_DEMAND_WRITE|
2127 						  GLM_LLC_ACCESS,
2128 			[C(RESULT_MISS)]	= GLM_DEMAND_WRITE|
2129 						  GLM_LLC_MISS,
2130 		},
2131 		[C(OP_PREFETCH)] = {
2132 			[C(RESULT_ACCESS)]	= 0x0,
2133 			[C(RESULT_MISS)]	= 0x0,
2134 		},
2135 	},
2136 };
2137 
2138 #define TNT_LOCAL_DRAM			BIT_ULL(26)
2139 #define TNT_DEMAND_READ			GLM_DEMAND_DATA_RD
2140 #define TNT_DEMAND_WRITE		GLM_DEMAND_RFO
2141 #define TNT_LLC_ACCESS			GLM_ANY_RESPONSE
2142 #define TNT_SNP_ANY			(SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2143 					 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2144 #define TNT_LLC_MISS			(TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2145 
2146 static __initconst const u64 tnt_hw_cache_extra_regs
2147 				[PERF_COUNT_HW_CACHE_MAX]
2148 				[PERF_COUNT_HW_CACHE_OP_MAX]
2149 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2150 	[C(LL)] = {
2151 		[C(OP_READ)] = {
2152 			[C(RESULT_ACCESS)]	= TNT_DEMAND_READ|
2153 						  TNT_LLC_ACCESS,
2154 			[C(RESULT_MISS)]	= TNT_DEMAND_READ|
2155 						  TNT_LLC_MISS,
2156 		},
2157 		[C(OP_WRITE)] = {
2158 			[C(RESULT_ACCESS)]	= TNT_DEMAND_WRITE|
2159 						  TNT_LLC_ACCESS,
2160 			[C(RESULT_MISS)]	= TNT_DEMAND_WRITE|
2161 						  TNT_LLC_MISS,
2162 		},
2163 		[C(OP_PREFETCH)] = {
2164 			[C(RESULT_ACCESS)]	= 0x0,
2165 			[C(RESULT_MISS)]	= 0x0,
2166 		},
2167 	},
2168 };
2169 
2170 EVENT_ATTR_STR(topdown-fe-bound,       td_fe_bound_tnt,        "event=0x71,umask=0x0");
2171 EVENT_ATTR_STR(topdown-retiring,       td_retiring_tnt,        "event=0xc2,umask=0x0");
2172 EVENT_ATTR_STR(topdown-bad-spec,       td_bad_spec_tnt,        "event=0x73,umask=0x6");
2173 EVENT_ATTR_STR(topdown-be-bound,       td_be_bound_tnt,        "event=0x74,umask=0x0");
2174 
2175 static struct attribute *tnt_events_attrs[] = {
2176 	EVENT_PTR(td_fe_bound_tnt),
2177 	EVENT_PTR(td_retiring_tnt),
2178 	EVENT_PTR(td_bad_spec_tnt),
2179 	EVENT_PTR(td_be_bound_tnt),
2180 	NULL,
2181 };
2182 
2183 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2184 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2185 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2186 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2187 	EVENT_EXTRA_END
2188 };
2189 
2190 EVENT_ATTR_STR(mem-loads,	mem_ld_grt,	"event=0xd0,umask=0x5,ldlat=3");
2191 EVENT_ATTR_STR(mem-stores,	mem_st_grt,	"event=0xd0,umask=0x6");
2192 
2193 static struct attribute *grt_mem_attrs[] = {
2194 	EVENT_PTR(mem_ld_grt),
2195 	EVENT_PTR(mem_st_grt),
2196 	NULL
2197 };
2198 
2199 static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2200 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2201 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2202 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2203 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2204 	EVENT_EXTRA_END
2205 };
2206 
2207 EVENT_ATTR_STR(topdown-retiring,       td_retiring_cmt,        "event=0x72,umask=0x0");
2208 EVENT_ATTR_STR(topdown-bad-spec,       td_bad_spec_cmt,        "event=0x73,umask=0x0");
2209 
2210 static struct attribute *cmt_events_attrs[] = {
2211 	EVENT_PTR(td_fe_bound_tnt),
2212 	EVENT_PTR(td_retiring_cmt),
2213 	EVENT_PTR(td_bad_spec_cmt),
2214 	EVENT_PTR(td_be_bound_tnt),
2215 	NULL
2216 };
2217 
2218 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
2219 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2220 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0),
2221 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1),
2222 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2223 	INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
2224 	INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
2225 	EVENT_EXTRA_END
2226 };
2227 
2228 EVENT_ATTR_STR(topdown-fe-bound,       td_fe_bound_skt,        "event=0x9c,umask=0x01");
2229 EVENT_ATTR_STR(topdown-retiring,       td_retiring_skt,        "event=0xc2,umask=0x02");
2230 EVENT_ATTR_STR(topdown-be-bound,       td_be_bound_skt,        "event=0xa4,umask=0x02");
2231 
2232 static struct attribute *skt_events_attrs[] = {
2233 	EVENT_PTR(td_fe_bound_skt),
2234 	EVENT_PTR(td_retiring_skt),
2235 	EVENT_PTR(td_bad_spec_cmt),
2236 	EVENT_PTR(td_be_bound_skt),
2237 	NULL,
2238 };
2239 
2240 #define KNL_OT_L2_HITE		BIT_ULL(19) /* Other Tile L2 Hit */
2241 #define KNL_OT_L2_HITF		BIT_ULL(20) /* Other Tile L2 Hit */
2242 #define KNL_MCDRAM_LOCAL	BIT_ULL(21)
2243 #define KNL_MCDRAM_FAR		BIT_ULL(22)
2244 #define KNL_DDR_LOCAL		BIT_ULL(23)
2245 #define KNL_DDR_FAR		BIT_ULL(24)
2246 #define KNL_DRAM_ANY		(KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2247 				    KNL_DDR_LOCAL | KNL_DDR_FAR)
2248 #define KNL_L2_READ		SLM_DMND_READ
2249 #define KNL_L2_WRITE		SLM_DMND_WRITE
2250 #define KNL_L2_PREFETCH		SLM_DMND_PREFETCH
2251 #define KNL_L2_ACCESS		SLM_LLC_ACCESS
2252 #define KNL_L2_MISS		(KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2253 				   KNL_DRAM_ANY | SNB_SNP_ANY | \
2254 						  SNB_NON_DRAM)
2255 
2256 static __initconst const u64 knl_hw_cache_extra_regs
2257 				[PERF_COUNT_HW_CACHE_MAX]
2258 				[PERF_COUNT_HW_CACHE_OP_MAX]
2259 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2260 	[C(LL)] = {
2261 		[C(OP_READ)] = {
2262 			[C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2263 			[C(RESULT_MISS)]   = 0,
2264 		},
2265 		[C(OP_WRITE)] = {
2266 			[C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2267 			[C(RESULT_MISS)]   = KNL_L2_WRITE | KNL_L2_MISS,
2268 		},
2269 		[C(OP_PREFETCH)] = {
2270 			[C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2271 			[C(RESULT_MISS)]   = KNL_L2_PREFETCH | KNL_L2_MISS,
2272 		},
2273 	},
2274 };
2275 
2276 /*
2277  * Used from PMIs where the LBRs are already disabled.
2278  *
2279  * This function could be called consecutively. It is required to remain in
2280  * disabled state if called consecutively.
2281  *
2282  * During consecutive calls, the same disable value will be written to related
2283  * registers, so the PMU state remains unchanged.
2284  *
2285  * intel_bts events don't coexist with intel PMU's BTS events because of
2286  * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2287  * disabled around intel PMU's event batching etc, only inside the PMI handler.
2288  *
2289  * Avoid PEBS_ENABLE MSR access in PMIs.
2290  * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2291  * It doesn't matter if the PEBS is enabled or not.
2292  * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2293  * access PEBS_ENABLE MSR in disable_all()/enable_all().
2294  * However, there are some cases which may change PEBS status, e.g. PMI
2295  * throttle. The PEBS_ENABLE should be updated where the status changes.
2296  */
__intel_pmu_disable_all(bool bts)2297 static __always_inline void __intel_pmu_disable_all(bool bts)
2298 {
2299 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2300 
2301 	wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2302 
2303 	if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2304 		intel_pmu_disable_bts();
2305 }
2306 
intel_pmu_disable_all(void)2307 static __always_inline void intel_pmu_disable_all(void)
2308 {
2309 	__intel_pmu_disable_all(true);
2310 	static_call_cond(x86_pmu_pebs_disable_all)();
2311 	intel_pmu_lbr_disable_all();
2312 }
2313 
__intel_pmu_enable_all(int added,bool pmi)2314 static void __intel_pmu_enable_all(int added, bool pmi)
2315 {
2316 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2317 	u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2318 
2319 	intel_pmu_lbr_enable_all(pmi);
2320 
2321 	if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) {
2322 		wrmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val);
2323 		cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val;
2324 	}
2325 
2326 	wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL,
2327 	       intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2328 
2329 	if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2330 		struct perf_event *event =
2331 			cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2332 
2333 		if (WARN_ON_ONCE(!event))
2334 			return;
2335 
2336 		intel_pmu_enable_bts(event->hw.config);
2337 	}
2338 }
2339 
intel_pmu_enable_all(int added)2340 static void intel_pmu_enable_all(int added)
2341 {
2342 	static_call_cond(x86_pmu_pebs_enable_all)();
2343 	__intel_pmu_enable_all(added, false);
2344 }
2345 
2346 static noinline int
__intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt,unsigned long flags)2347 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
2348 				  unsigned int cnt, unsigned long flags)
2349 {
2350 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2351 
2352 	intel_pmu_lbr_read();
2353 	cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
2354 
2355 	memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
2356 	intel_pmu_enable_all(0);
2357 	local_irq_restore(flags);
2358 	return cnt;
2359 }
2360 
2361 static int
intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2362 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2363 {
2364 	unsigned long flags;
2365 
2366 	/* must not have branches... */
2367 	local_irq_save(flags);
2368 	__intel_pmu_disable_all(false); /* we don't care about BTS */
2369 	__intel_pmu_lbr_disable();
2370 	/*            ... until here */
2371 	return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2372 }
2373 
2374 static int
intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2375 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2376 {
2377 	unsigned long flags;
2378 
2379 	/* must not have branches... */
2380 	local_irq_save(flags);
2381 	__intel_pmu_disable_all(false); /* we don't care about BTS */
2382 	__intel_pmu_arch_lbr_disable();
2383 	/*            ... until here */
2384 	return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2385 }
2386 
2387 /*
2388  * Workaround for:
2389  *   Intel Errata AAK100 (model 26)
2390  *   Intel Errata AAP53  (model 30)
2391  *   Intel Errata BD53   (model 44)
2392  *
2393  * The official story:
2394  *   These chips need to be 'reset' when adding counters by programming the
2395  *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2396  *   in sequence on the same PMC or on different PMCs.
2397  *
2398  * In practice it appears some of these events do in fact count, and
2399  * we need to program all 4 events.
2400  */
intel_pmu_nhm_workaround(void)2401 static void intel_pmu_nhm_workaround(void)
2402 {
2403 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2404 	static const unsigned long nhm_magic[4] = {
2405 		0x4300B5,
2406 		0x4300D2,
2407 		0x4300B1,
2408 		0x4300B1
2409 	};
2410 	struct perf_event *event;
2411 	int i;
2412 
2413 	/*
2414 	 * The Errata requires below steps:
2415 	 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2416 	 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2417 	 *    the corresponding PMCx;
2418 	 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2419 	 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2420 	 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2421 	 */
2422 
2423 	/*
2424 	 * The real steps we choose are a little different from above.
2425 	 * A) To reduce MSR operations, we don't run step 1) as they
2426 	 *    are already cleared before this function is called;
2427 	 * B) Call x86_perf_event_update to save PMCx before configuring
2428 	 *    PERFEVTSELx with magic number;
2429 	 * C) With step 5), we do clear only when the PERFEVTSELx is
2430 	 *    not used currently.
2431 	 * D) Call x86_perf_event_set_period to restore PMCx;
2432 	 */
2433 
2434 	/* We always operate 4 pairs of PERF Counters */
2435 	for (i = 0; i < 4; i++) {
2436 		event = cpuc->events[i];
2437 		if (event)
2438 			static_call(x86_pmu_update)(event);
2439 	}
2440 
2441 	for (i = 0; i < 4; i++) {
2442 		wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2443 		wrmsrq(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2444 	}
2445 
2446 	wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2447 	wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2448 
2449 	for (i = 0; i < 4; i++) {
2450 		event = cpuc->events[i];
2451 
2452 		if (event) {
2453 			static_call(x86_pmu_set_period)(event);
2454 			__x86_pmu_enable_event(&event->hw,
2455 					ARCH_PERFMON_EVENTSEL_ENABLE);
2456 		} else
2457 			wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2458 	}
2459 }
2460 
intel_pmu_nhm_enable_all(int added)2461 static void intel_pmu_nhm_enable_all(int added)
2462 {
2463 	if (added)
2464 		intel_pmu_nhm_workaround();
2465 	intel_pmu_enable_all(added);
2466 }
2467 
intel_set_tfa(struct cpu_hw_events * cpuc,bool on)2468 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2469 {
2470 	u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2471 
2472 	if (cpuc->tfa_shadow != val) {
2473 		cpuc->tfa_shadow = val;
2474 		wrmsrq(MSR_TSX_FORCE_ABORT, val);
2475 	}
2476 }
2477 
intel_tfa_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)2478 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2479 {
2480 	/*
2481 	 * We're going to use PMC3, make sure TFA is set before we touch it.
2482 	 */
2483 	if (cntr == 3)
2484 		intel_set_tfa(cpuc, true);
2485 }
2486 
intel_tfa_pmu_enable_all(int added)2487 static void intel_tfa_pmu_enable_all(int added)
2488 {
2489 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2490 
2491 	/*
2492 	 * If we find PMC3 is no longer used when we enable the PMU, we can
2493 	 * clear TFA.
2494 	 */
2495 	if (!test_bit(3, cpuc->active_mask))
2496 		intel_set_tfa(cpuc, false);
2497 
2498 	intel_pmu_enable_all(added);
2499 }
2500 
intel_pmu_get_status(void)2501 static inline u64 intel_pmu_get_status(void)
2502 {
2503 	u64 status;
2504 
2505 	rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status);
2506 
2507 	return status;
2508 }
2509 
intel_pmu_ack_status(u64 ack)2510 static inline void intel_pmu_ack_status(u64 ack)
2511 {
2512 	wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2513 }
2514 
event_is_checkpointed(struct perf_event * event)2515 static inline bool event_is_checkpointed(struct perf_event *event)
2516 {
2517 	return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2518 }
2519 
intel_set_masks(struct perf_event * event,int idx)2520 static inline void intel_set_masks(struct perf_event *event, int idx)
2521 {
2522 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2523 
2524 	if (event->attr.exclude_host)
2525 		__set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2526 	if (event->attr.exclude_guest)
2527 		__set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2528 	if (event_is_checkpointed(event))
2529 		__set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2530 }
2531 
intel_clear_masks(struct perf_event * event,int idx)2532 static inline void intel_clear_masks(struct perf_event *event, int idx)
2533 {
2534 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2535 
2536 	__clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2537 	__clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2538 	__clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2539 }
2540 
intel_pmu_disable_fixed(struct perf_event * event)2541 static void intel_pmu_disable_fixed(struct perf_event *event)
2542 {
2543 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2544 	struct hw_perf_event *hwc = &event->hw;
2545 	int idx = hwc->idx;
2546 	u64 mask;
2547 
2548 	if (is_topdown_idx(idx)) {
2549 		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2550 
2551 		/*
2552 		 * When there are other active TopDown events,
2553 		 * don't disable the fixed counter 3.
2554 		 */
2555 		if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2556 			return;
2557 		idx = INTEL_PMC_IDX_FIXED_SLOTS;
2558 	}
2559 
2560 	intel_clear_masks(event, idx);
2561 
2562 	mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
2563 	cpuc->fixed_ctrl_val &= ~mask;
2564 }
2565 
__intel_pmu_update_event_ext(int idx,u64 ext)2566 static inline void __intel_pmu_update_event_ext(int idx, u64 ext)
2567 {
2568 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2569 	u32 msr;
2570 
2571 	if (idx < INTEL_PMC_IDX_FIXED) {
2572 		msr = MSR_IA32_PMC_V6_GP0_CFG_C +
2573 		      x86_pmu.addr_offset(idx, false);
2574 	} else {
2575 		msr = MSR_IA32_PMC_V6_FX0_CFG_C +
2576 		      x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false);
2577 	}
2578 
2579 	cpuc->cfg_c_val[idx] = ext;
2580 	wrmsrq(msr, ext);
2581 }
2582 
intel_pmu_disable_event_ext(struct perf_event * event)2583 static void intel_pmu_disable_event_ext(struct perf_event *event)
2584 {
2585 	/*
2586 	 * Only clear CFG_C MSR for PEBS counter group events,
2587 	 * it avoids the HW counter's value to be added into
2588 	 * other PEBS records incorrectly after PEBS counter
2589 	 * group events are disabled.
2590 	 *
2591 	 * For other events, it's unnecessary to clear CFG_C MSRs
2592 	 * since CFG_C doesn't take effect if counter is in
2593 	 * disabled state. That helps to reduce the WRMSR overhead
2594 	 * in context switches.
2595 	 */
2596 	if (!is_pebs_counter_event_group(event))
2597 		return;
2598 
2599 	__intel_pmu_update_event_ext(event->hw.idx, 0);
2600 }
2601 
2602 DEFINE_STATIC_CALL_NULL(intel_pmu_disable_event_ext, intel_pmu_disable_event_ext);
2603 
intel_pmu_disable_event(struct perf_event * event)2604 static void intel_pmu_disable_event(struct perf_event *event)
2605 {
2606 	struct hw_perf_event *hwc = &event->hw;
2607 	int idx = hwc->idx;
2608 
2609 	switch (idx) {
2610 	case 0 ... INTEL_PMC_IDX_FIXED - 1:
2611 		intel_clear_masks(event, idx);
2612 		static_call_cond(intel_pmu_disable_event_ext)(event);
2613 		x86_pmu_disable_event(event);
2614 		break;
2615 	case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2616 		static_call_cond(intel_pmu_disable_event_ext)(event);
2617 		fallthrough;
2618 	case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2619 		intel_pmu_disable_fixed(event);
2620 		break;
2621 	case INTEL_PMC_IDX_FIXED_BTS:
2622 		intel_pmu_disable_bts();
2623 		intel_pmu_drain_bts_buffer();
2624 		return;
2625 	case INTEL_PMC_IDX_FIXED_VLBR:
2626 		intel_clear_masks(event, idx);
2627 		break;
2628 	default:
2629 		intel_clear_masks(event, idx);
2630 		pr_warn("Failed to disable the event with invalid index %d\n",
2631 			idx);
2632 		return;
2633 	}
2634 
2635 	/*
2636 	 * Needs to be called after x86_pmu_disable_event,
2637 	 * so we don't trigger the event without PEBS bit set.
2638 	 */
2639 	if (unlikely(event->attr.precise_ip))
2640 		static_call(x86_pmu_pebs_disable)(event);
2641 }
2642 
intel_pmu_assign_event(struct perf_event * event,int idx)2643 static void intel_pmu_assign_event(struct perf_event *event, int idx)
2644 {
2645 	if (is_pebs_pt(event))
2646 		perf_report_aux_output_id(event, idx);
2647 }
2648 
intel_pmu_needs_branch_stack(struct perf_event * event)2649 static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event)
2650 {
2651 	return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK;
2652 }
2653 
intel_pmu_del_event(struct perf_event * event)2654 static void intel_pmu_del_event(struct perf_event *event)
2655 {
2656 	if (intel_pmu_needs_branch_stack(event))
2657 		intel_pmu_lbr_del(event);
2658 	if (event->attr.precise_ip)
2659 		intel_pmu_pebs_del(event);
2660 	if (is_pebs_counter_event_group(event) ||
2661 	    is_acr_event_group(event))
2662 		this_cpu_ptr(&cpu_hw_events)->n_late_setup--;
2663 }
2664 
icl_set_topdown_event_period(struct perf_event * event)2665 static int icl_set_topdown_event_period(struct perf_event *event)
2666 {
2667 	struct hw_perf_event *hwc = &event->hw;
2668 	s64 left = local64_read(&hwc->period_left);
2669 
2670 	/*
2671 	 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2672 	 * Software should start both registers, PERF_METRICS and fixed
2673 	 * counter 3, from zero.
2674 	 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2675 	 * After that, both MSRs will be cleared for each read.
2676 	 * Don't need to clear them again.
2677 	 */
2678 	if (left == x86_pmu.max_period) {
2679 		wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0);
2680 		wrmsrq(MSR_PERF_METRICS, 0);
2681 		hwc->saved_slots = 0;
2682 		hwc->saved_metric = 0;
2683 	}
2684 
2685 	if ((hwc->saved_slots) && is_slots_event(event)) {
2686 		wrmsrq(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2687 		wrmsrq(MSR_PERF_METRICS, hwc->saved_metric);
2688 	}
2689 
2690 	perf_event_update_userpage(event);
2691 
2692 	return 0;
2693 }
2694 
2695 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period);
2696 
icl_get_metrics_event_value(u64 metric,u64 slots,int idx)2697 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2698 {
2699 	u32 val;
2700 
2701 	/*
2702 	 * The metric is reported as an 8bit integer fraction
2703 	 * summing up to 0xff.
2704 	 * slots-in-metric = (Metric / 0xff) * slots
2705 	 */
2706 	val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2707 	return  mul_u64_u32_div(slots, val, 0xff);
2708 }
2709 
icl_get_topdown_value(struct perf_event * event,u64 slots,u64 metrics)2710 static u64 icl_get_topdown_value(struct perf_event *event,
2711 				       u64 slots, u64 metrics)
2712 {
2713 	int idx = event->hw.idx;
2714 	u64 delta;
2715 
2716 	if (is_metric_idx(idx))
2717 		delta = icl_get_metrics_event_value(metrics, slots, idx);
2718 	else
2719 		delta = slots;
2720 
2721 	return delta;
2722 }
2723 
__icl_update_topdown_event(struct perf_event * event,u64 slots,u64 metrics,u64 last_slots,u64 last_metrics)2724 static void __icl_update_topdown_event(struct perf_event *event,
2725 				       u64 slots, u64 metrics,
2726 				       u64 last_slots, u64 last_metrics)
2727 {
2728 	u64 delta, last = 0;
2729 
2730 	delta = icl_get_topdown_value(event, slots, metrics);
2731 	if (last_slots)
2732 		last = icl_get_topdown_value(event, last_slots, last_metrics);
2733 
2734 	/*
2735 	 * The 8bit integer fraction of metric may be not accurate,
2736 	 * especially when the changes is very small.
2737 	 * For example, if only a few bad_spec happens, the fraction
2738 	 * may be reduced from 1 to 0. If so, the bad_spec event value
2739 	 * will be 0 which is definitely less than the last value.
2740 	 * Avoid update event->count for this case.
2741 	 */
2742 	if (delta > last) {
2743 		delta -= last;
2744 		local64_add(delta, &event->count);
2745 	}
2746 }
2747 
update_saved_topdown_regs(struct perf_event * event,u64 slots,u64 metrics,int metric_end)2748 static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2749 				      u64 metrics, int metric_end)
2750 {
2751 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2752 	struct perf_event *other;
2753 	int idx;
2754 
2755 	event->hw.saved_slots = slots;
2756 	event->hw.saved_metric = metrics;
2757 
2758 	for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2759 		if (!is_topdown_idx(idx))
2760 			continue;
2761 		other = cpuc->events[idx];
2762 		other->hw.saved_slots = slots;
2763 		other->hw.saved_metric = metrics;
2764 	}
2765 }
2766 
2767 /*
2768  * Update all active Topdown events.
2769  *
2770  * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2771  * modify by a NMI. PMU has to be disabled before calling this function.
2772  */
2773 
intel_update_topdown_event(struct perf_event * event,int metric_end,u64 * val)2774 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end, u64 *val)
2775 {
2776 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2777 	struct perf_event *other;
2778 	u64 slots, metrics;
2779 	bool reset = true;
2780 	int idx;
2781 
2782 	if (!val) {
2783 		/* read Fixed counter 3 */
2784 		slots = rdpmc(3 | INTEL_PMC_FIXED_RDPMC_BASE);
2785 		if (!slots)
2786 			return 0;
2787 
2788 		/* read PERF_METRICS */
2789 		metrics = rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS);
2790 	} else {
2791 		slots = val[0];
2792 		metrics = val[1];
2793 		/*
2794 		 * Don't reset the PERF_METRICS and Fixed counter 3
2795 		 * for each PEBS record read. Utilize the RDPMC metrics
2796 		 * clear mode.
2797 		 */
2798 		reset = false;
2799 	}
2800 
2801 	for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2802 		if (!is_topdown_idx(idx))
2803 			continue;
2804 		other = cpuc->events[idx];
2805 		__icl_update_topdown_event(other, slots, metrics,
2806 					   event ? event->hw.saved_slots : 0,
2807 					   event ? event->hw.saved_metric : 0);
2808 	}
2809 
2810 	/*
2811 	 * Check and update this event, which may have been cleared
2812 	 * in active_mask e.g. x86_pmu_stop()
2813 	 */
2814 	if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2815 		__icl_update_topdown_event(event, slots, metrics,
2816 					   event->hw.saved_slots,
2817 					   event->hw.saved_metric);
2818 
2819 		/*
2820 		 * In x86_pmu_stop(), the event is cleared in active_mask first,
2821 		 * then drain the delta, which indicates context switch for
2822 		 * counting.
2823 		 * Save metric and slots for context switch.
2824 		 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2825 		 * Because the values will be restored in next schedule in.
2826 		 */
2827 		update_saved_topdown_regs(event, slots, metrics, metric_end);
2828 		reset = false;
2829 	}
2830 
2831 	if (reset) {
2832 		/* The fixed counter 3 has to be written before the PERF_METRICS. */
2833 		wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0);
2834 		wrmsrq(MSR_PERF_METRICS, 0);
2835 		if (event)
2836 			update_saved_topdown_regs(event, 0, 0, metric_end);
2837 	}
2838 
2839 	return slots;
2840 }
2841 
icl_update_topdown_event(struct perf_event * event,u64 * val)2842 static u64 icl_update_topdown_event(struct perf_event *event, u64 *val)
2843 {
2844 	return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2845 						 x86_pmu.num_topdown_events - 1,
2846 					  val);
2847 }
2848 
2849 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, intel_pmu_topdown_event_update);
2850 
intel_pmu_read_event(struct perf_event * event)2851 static void intel_pmu_read_event(struct perf_event *event)
2852 {
2853 	if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN) ||
2854 	    is_pebs_counter_event_group(event)) {
2855 		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2856 		bool pmu_enabled = cpuc->enabled;
2857 
2858 		/* Only need to call update_topdown_event() once for group read. */
2859 		if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
2860 			return;
2861 
2862 		cpuc->enabled = 0;
2863 		if (pmu_enabled)
2864 			intel_pmu_disable_all();
2865 
2866 		/*
2867 		 * If the PEBS counters snapshotting is enabled,
2868 		 * the topdown event is available in PEBS records.
2869 		 */
2870 		if (is_topdown_count(event) && !is_pebs_counter_event_group(event))
2871 			static_call(intel_pmu_update_topdown_event)(event, NULL);
2872 		else
2873 			intel_pmu_drain_pebs_buffer();
2874 
2875 		cpuc->enabled = pmu_enabled;
2876 		if (pmu_enabled)
2877 			intel_pmu_enable_all(0);
2878 
2879 		return;
2880 	}
2881 
2882 	x86_perf_event_update(event);
2883 }
2884 
intel_pmu_enable_fixed(struct perf_event * event)2885 static void intel_pmu_enable_fixed(struct perf_event *event)
2886 {
2887 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2888 	struct hw_perf_event *hwc = &event->hw;
2889 	int idx = hwc->idx;
2890 	u64 bits = 0;
2891 
2892 	if (is_topdown_idx(idx)) {
2893 		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2894 		/*
2895 		 * When there are other active TopDown events,
2896 		 * don't enable the fixed counter 3 again.
2897 		 */
2898 		if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2899 			return;
2900 
2901 		idx = INTEL_PMC_IDX_FIXED_SLOTS;
2902 
2903 		if (event->attr.config1 & INTEL_TD_CFG_METRIC_CLEAR)
2904 			bits |= INTEL_FIXED_3_METRICS_CLEAR;
2905 	}
2906 
2907 	intel_set_masks(event, idx);
2908 
2909 	/*
2910 	 * Enable IRQ generation (0x8), if not PEBS,
2911 	 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2912 	 * if requested:
2913 	 */
2914 	if (!event->attr.precise_ip)
2915 		bits |= INTEL_FIXED_0_ENABLE_PMI;
2916 	if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2917 		bits |= INTEL_FIXED_0_USER;
2918 	if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2919 		bits |= INTEL_FIXED_0_KERNEL;
2920 
2921 	/*
2922 	 * ANY bit is supported in v3 and up
2923 	 */
2924 	if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2925 		bits |= INTEL_FIXED_0_ANYTHREAD;
2926 
2927 	idx -= INTEL_PMC_IDX_FIXED;
2928 	bits = intel_fixed_bits_by_idx(idx, bits);
2929 	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip)
2930 		bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2931 
2932 	cpuc->fixed_ctrl_val &= ~intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
2933 	cpuc->fixed_ctrl_val |= bits;
2934 }
2935 
intel_pmu_config_acr(int idx,u64 mask,u32 reload)2936 static void intel_pmu_config_acr(int idx, u64 mask, u32 reload)
2937 {
2938 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2939 	int msr_b, msr_c;
2940 	int msr_offset;
2941 
2942 	if (!mask && !cpuc->acr_cfg_b[idx])
2943 		return;
2944 
2945 	if (idx < INTEL_PMC_IDX_FIXED) {
2946 		msr_b = MSR_IA32_PMC_V6_GP0_CFG_B;
2947 		msr_c = MSR_IA32_PMC_V6_GP0_CFG_C;
2948 		msr_offset = x86_pmu.addr_offset(idx, false);
2949 	} else {
2950 		msr_b = MSR_IA32_PMC_V6_FX0_CFG_B;
2951 		msr_c = MSR_IA32_PMC_V6_FX0_CFG_C;
2952 		msr_offset = x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false);
2953 	}
2954 
2955 	if (cpuc->acr_cfg_b[idx] != mask) {
2956 		wrmsrl(msr_b + msr_offset, mask);
2957 		cpuc->acr_cfg_b[idx] = mask;
2958 	}
2959 	/* Only need to update the reload value when there is a valid config value. */
2960 	if (mask && cpuc->acr_cfg_c[idx] != reload) {
2961 		wrmsrl(msr_c + msr_offset, reload);
2962 		cpuc->acr_cfg_c[idx] = reload;
2963 	}
2964 }
2965 
intel_pmu_enable_acr(struct perf_event * event)2966 static void intel_pmu_enable_acr(struct perf_event *event)
2967 {
2968 	struct hw_perf_event *hwc = &event->hw;
2969 
2970 	if (!is_acr_event_group(event) || !event->attr.config2) {
2971 		/*
2972 		 * The disable doesn't clear the ACR CFG register.
2973 		 * Check and clear the ACR CFG register.
2974 		 */
2975 		intel_pmu_config_acr(hwc->idx, 0, 0);
2976 		return;
2977 	}
2978 
2979 	intel_pmu_config_acr(hwc->idx, hwc->config1, -hwc->sample_period);
2980 }
2981 
2982 DEFINE_STATIC_CALL_NULL(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
2983 
intel_pmu_enable_event_ext(struct perf_event * event)2984 static void intel_pmu_enable_event_ext(struct perf_event *event)
2985 {
2986 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2987 	struct hw_perf_event *hwc = &event->hw;
2988 	union arch_pebs_index old, new;
2989 	struct arch_pebs_cap cap;
2990 	u64 ext = 0;
2991 
2992 	cap = hybrid(cpuc->pmu, arch_pebs_cap);
2993 
2994 	if (event->attr.precise_ip) {
2995 		u64 pebs_data_cfg = intel_get_arch_pebs_data_config(event);
2996 
2997 		ext |= ARCH_PEBS_EN;
2998 		if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD)
2999 			ext |= (-hwc->sample_period) & ARCH_PEBS_RELOAD;
3000 
3001 		if (pebs_data_cfg && cap.caps) {
3002 			if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
3003 				ext |= ARCH_PEBS_AUX & cap.caps;
3004 
3005 			if (pebs_data_cfg & PEBS_DATACFG_GP)
3006 				ext |= ARCH_PEBS_GPR & cap.caps;
3007 
3008 			if (pebs_data_cfg & PEBS_DATACFG_XMMS)
3009 				ext |= ARCH_PEBS_VECR_XMM & cap.caps;
3010 
3011 			if (pebs_data_cfg & PEBS_DATACFG_LBRS)
3012 				ext |= ARCH_PEBS_LBR & cap.caps;
3013 
3014 			if (pebs_data_cfg &
3015 			    (PEBS_DATACFG_CNTR_MASK << PEBS_DATACFG_CNTR_SHIFT))
3016 				ext |= ARCH_PEBS_CNTR_GP & cap.caps;
3017 
3018 			if (pebs_data_cfg &
3019 			    (PEBS_DATACFG_FIX_MASK << PEBS_DATACFG_FIX_SHIFT))
3020 				ext |= ARCH_PEBS_CNTR_FIXED & cap.caps;
3021 
3022 			if (pebs_data_cfg & PEBS_DATACFG_METRICS)
3023 				ext |= ARCH_PEBS_CNTR_METRICS & cap.caps;
3024 		}
3025 
3026 		if (cpuc->n_pebs == cpuc->n_large_pebs)
3027 			new.thresh = ARCH_PEBS_THRESH_MULTI;
3028 		else
3029 			new.thresh = ARCH_PEBS_THRESH_SINGLE;
3030 
3031 		rdmsrq(MSR_IA32_PEBS_INDEX, old.whole);
3032 		if (new.thresh != old.thresh || !old.en) {
3033 			if (old.thresh == ARCH_PEBS_THRESH_MULTI && old.wr > 0) {
3034 				/*
3035 				 * Large PEBS was enabled.
3036 				 * Drain PEBS buffer before applying the single PEBS.
3037 				 */
3038 				intel_pmu_drain_pebs_buffer();
3039 			} else {
3040 				new.wr = 0;
3041 				new.full = 0;
3042 				new.en = 1;
3043 				wrmsrq(MSR_IA32_PEBS_INDEX, new.whole);
3044 			}
3045 		}
3046 	}
3047 
3048 	if (is_pebs_counter_event_group(event))
3049 		ext |= ARCH_PEBS_CNTR_ALLOW;
3050 
3051 	if (cpuc->cfg_c_val[hwc->idx] != ext)
3052 		__intel_pmu_update_event_ext(hwc->idx, ext);
3053 }
3054 
3055 DEFINE_STATIC_CALL_NULL(intel_pmu_enable_event_ext, intel_pmu_enable_event_ext);
3056 
intel_pmu_enable_event(struct perf_event * event)3057 static void intel_pmu_enable_event(struct perf_event *event)
3058 {
3059 	u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE;
3060 	struct hw_perf_event *hwc = &event->hw;
3061 	int idx = hwc->idx;
3062 
3063 	if (unlikely(event->attr.precise_ip))
3064 		static_call(x86_pmu_pebs_enable)(event);
3065 
3066 	switch (idx) {
3067 	case 0 ... INTEL_PMC_IDX_FIXED - 1:
3068 		if (branch_sample_counters(event))
3069 			enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR;
3070 		intel_set_masks(event, idx);
3071 		static_call_cond(intel_pmu_enable_acr_event)(event);
3072 		static_call_cond(intel_pmu_enable_event_ext)(event);
3073 		__x86_pmu_enable_event(hwc, enable_mask);
3074 		break;
3075 	case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
3076 		static_call_cond(intel_pmu_enable_acr_event)(event);
3077 		static_call_cond(intel_pmu_enable_event_ext)(event);
3078 		fallthrough;
3079 	case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
3080 		intel_pmu_enable_fixed(event);
3081 		break;
3082 	case INTEL_PMC_IDX_FIXED_BTS:
3083 		if (!__this_cpu_read(cpu_hw_events.enabled))
3084 			return;
3085 		intel_pmu_enable_bts(hwc->config);
3086 		break;
3087 	case INTEL_PMC_IDX_FIXED_VLBR:
3088 		intel_set_masks(event, idx);
3089 		break;
3090 	default:
3091 		pr_warn("Failed to enable the event with invalid index %d\n",
3092 			idx);
3093 	}
3094 }
3095 
intel_pmu_acr_late_setup(struct cpu_hw_events * cpuc)3096 static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc)
3097 {
3098 	struct perf_event *event, *leader;
3099 	int i, j, idx;
3100 
3101 	for (i = 0; i < cpuc->n_events; i++) {
3102 		leader = cpuc->event_list[i];
3103 		if (!is_acr_event_group(leader))
3104 			continue;
3105 
3106 		/* The ACR events must be contiguous. */
3107 		for (j = i; j < cpuc->n_events; j++) {
3108 			event = cpuc->event_list[j];
3109 			if (event->group_leader != leader->group_leader)
3110 				break;
3111 			for_each_set_bit(idx, (unsigned long *)&event->attr.config2, X86_PMC_IDX_MAX) {
3112 				if (i + idx >= cpuc->n_events ||
3113 				    !is_acr_event_group(cpuc->event_list[i + idx]))
3114 					return;
3115 				__set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1);
3116 			}
3117 		}
3118 		i = j - 1;
3119 	}
3120 }
3121 
intel_pmu_late_setup(void)3122 void intel_pmu_late_setup(void)
3123 {
3124 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3125 
3126 	if (!cpuc->n_late_setup)
3127 		return;
3128 
3129 	intel_pmu_pebs_late_setup(cpuc);
3130 	intel_pmu_acr_late_setup(cpuc);
3131 }
3132 
intel_pmu_add_event(struct perf_event * event)3133 static void intel_pmu_add_event(struct perf_event *event)
3134 {
3135 	if (event->attr.precise_ip)
3136 		intel_pmu_pebs_add(event);
3137 	if (intel_pmu_needs_branch_stack(event))
3138 		intel_pmu_lbr_add(event);
3139 	if (is_pebs_counter_event_group(event) ||
3140 	    is_acr_event_group(event))
3141 		this_cpu_ptr(&cpu_hw_events)->n_late_setup++;
3142 }
3143 
3144 /*
3145  * Save and restart an expired event. Called by NMI contexts,
3146  * so it has to be careful about preempting normal event ops:
3147  */
intel_pmu_save_and_restart(struct perf_event * event)3148 int intel_pmu_save_and_restart(struct perf_event *event)
3149 {
3150 	static_call(x86_pmu_update)(event);
3151 	/*
3152 	 * For a checkpointed counter always reset back to 0.  This
3153 	 * avoids a situation where the counter overflows, aborts the
3154 	 * transaction and is then set back to shortly before the
3155 	 * overflow, and overflows and aborts again.
3156 	 */
3157 	if (unlikely(event_is_checkpointed(event))) {
3158 		/* No race with NMIs because the counter should not be armed */
3159 		wrmsrq(event->hw.event_base, 0);
3160 		local64_set(&event->hw.prev_count, 0);
3161 	}
3162 	return static_call(x86_pmu_set_period)(event);
3163 }
3164 
intel_pmu_set_period(struct perf_event * event)3165 static int intel_pmu_set_period(struct perf_event *event)
3166 {
3167 	if (unlikely(is_topdown_count(event)))
3168 		return static_call(intel_pmu_set_topdown_event_period)(event);
3169 
3170 	return x86_perf_event_set_period(event);
3171 }
3172 
intel_pmu_update(struct perf_event * event)3173 static u64 intel_pmu_update(struct perf_event *event)
3174 {
3175 	if (unlikely(is_topdown_count(event)))
3176 		return static_call(intel_pmu_update_topdown_event)(event, NULL);
3177 
3178 	return x86_perf_event_update(event);
3179 }
3180 
intel_pmu_reset(void)3181 static void intel_pmu_reset(void)
3182 {
3183 	struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
3184 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3185 	unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
3186 	unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
3187 	unsigned long flags;
3188 	int idx;
3189 
3190 	if (!*(u64 *)cntr_mask)
3191 		return;
3192 
3193 	local_irq_save(flags);
3194 
3195 	pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
3196 
3197 	for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) {
3198 		wrmsrq_safe(x86_pmu_config_addr(idx), 0ull);
3199 		wrmsrq_safe(x86_pmu_event_addr(idx),  0ull);
3200 	}
3201 	for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
3202 		if (fixed_counter_disabled(idx, cpuc->pmu))
3203 			continue;
3204 		wrmsrq_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
3205 	}
3206 
3207 	if (ds)
3208 		ds->bts_index = ds->bts_buffer_base;
3209 
3210 	/* Ack all overflows and disable fixed counters */
3211 	if (x86_pmu.version >= 2) {
3212 		intel_pmu_ack_status(intel_pmu_get_status());
3213 		wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0);
3214 	}
3215 
3216 	/* Reset LBRs and LBR freezing */
3217 	if (x86_pmu.lbr_nr) {
3218 		update_debugctlmsr(get_debugctlmsr() &
3219 			~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
3220 	}
3221 
3222 	local_irq_restore(flags);
3223 }
3224 
3225 /*
3226  * We may be running with guest PEBS events created by KVM, and the
3227  * PEBS records are logged into the guest's DS and invisible to host.
3228  *
3229  * In the case of guest PEBS overflow, we only trigger a fake event
3230  * to emulate the PEBS overflow PMI for guest PEBS counters in KVM.
3231  * The guest will then vm-entry and check the guest DS area to read
3232  * the guest PEBS records.
3233  *
3234  * The contents and other behavior of the guest event do not matter.
3235  */
x86_pmu_handle_guest_pebs(struct pt_regs * regs,struct perf_sample_data * data)3236 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
3237 				      struct perf_sample_data *data)
3238 {
3239 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3240 	u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
3241 	struct perf_event *event = NULL;
3242 	int bit;
3243 
3244 	if (!unlikely(perf_guest_state()))
3245 		return;
3246 
3247 	if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active ||
3248 	    !guest_pebs_idxs)
3249 		return;
3250 
3251 	for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, X86_PMC_IDX_MAX) {
3252 		event = cpuc->events[bit];
3253 		if (!event->attr.precise_ip)
3254 			continue;
3255 
3256 		perf_sample_data_init(data, 0, event->hw.last_period);
3257 		perf_event_overflow(event, data, regs);
3258 
3259 		/* Inject one fake event is enough. */
3260 		break;
3261 	}
3262 }
3263 
handle_pmi_common(struct pt_regs * regs,u64 status)3264 static int handle_pmi_common(struct pt_regs *regs, u64 status)
3265 {
3266 	struct perf_sample_data data;
3267 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3268 	int bit;
3269 	int handled = 0;
3270 
3271 	inc_irq_stat(apic_perf_irqs);
3272 
3273 	/*
3274 	 * Ignore a range of extra bits in status that do not indicate
3275 	 * overflow by themselves.
3276 	 */
3277 	status &= ~(GLOBAL_STATUS_COND_CHG |
3278 		    GLOBAL_STATUS_ASIF |
3279 		    GLOBAL_STATUS_LBRS_FROZEN);
3280 	if (!status)
3281 		return 0;
3282 	/*
3283 	 * In case multiple PEBS events are sampled at the same time,
3284 	 * it is possible to have GLOBAL_STATUS bit 62 set indicating
3285 	 * PEBS buffer overflow and also seeing at most 3 PEBS counters
3286 	 * having their bits set in the status register. This is a sign
3287 	 * that there was at least one PEBS record pending at the time
3288 	 * of the PMU interrupt. PEBS counters must only be processed
3289 	 * via the drain_pebs() calls and not via the regular sample
3290 	 * processing loop coming after that the function, otherwise
3291 	 * phony regular samples may be generated in the sampling buffer
3292 	 * not marked with the EXACT tag. Another possibility is to have
3293 	 * one PEBS event and at least one non-PEBS event which overflows
3294 	 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
3295 	 * not be set, yet the overflow status bit for the PEBS counter will
3296 	 * be on Skylake.
3297 	 *
3298 	 * To avoid this problem, we systematically ignore the PEBS-enabled
3299 	 * counters from the GLOBAL_STATUS mask and we always process PEBS
3300 	 * events via drain_pebs().
3301 	 */
3302 	status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable);
3303 
3304 	/*
3305 	 * PEBS overflow sets bit 62 in the global status register
3306 	 */
3307 	if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
3308 		u64 pebs_enabled = cpuc->pebs_enabled;
3309 
3310 		handled++;
3311 		x86_pmu_handle_guest_pebs(regs, &data);
3312 		static_call(x86_pmu_drain_pebs)(regs, &data);
3313 
3314 		/*
3315 		 * PMI throttle may be triggered, which stops the PEBS event.
3316 		 * Although cpuc->pebs_enabled is updated accordingly, the
3317 		 * MSR_IA32_PEBS_ENABLE is not updated. Because the
3318 		 * cpuc->enabled has been forced to 0 in PMI.
3319 		 * Update the MSR if pebs_enabled is changed.
3320 		 */
3321 		if (pebs_enabled != cpuc->pebs_enabled)
3322 			wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
3323 
3324 		/*
3325 		 * Above PEBS handler (PEBS counters snapshotting) has updated fixed
3326 		 * counter 3 and perf metrics counts if they are in counter group,
3327 		 * unnecessary to update again.
3328 		 */
3329 		if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] &&
3330 		    is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS]))
3331 			status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT;
3332 	}
3333 
3334 	/*
3335 	 * Arch PEBS sets bit 54 in the global status register
3336 	 */
3337 	if (__test_and_clear_bit(GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT,
3338 				 (unsigned long *)&status)) {
3339 		handled++;
3340 		static_call(x86_pmu_drain_pebs)(regs, &data);
3341 
3342 		if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] &&
3343 		    is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS]))
3344 			status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT;
3345 	}
3346 
3347 	/*
3348 	 * Intel PT
3349 	 */
3350 	if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
3351 		handled++;
3352 		if (!perf_guest_handle_intel_pt_intr())
3353 			intel_pt_interrupt();
3354 	}
3355 
3356 	/*
3357 	 * Intel Perf metrics
3358 	 */
3359 	if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
3360 		handled++;
3361 		static_call(intel_pmu_update_topdown_event)(NULL, NULL);
3362 	}
3363 
3364 	status &= hybrid(cpuc->pmu, intel_ctrl);
3365 
3366 	/*
3367 	 * Checkpointed counters can lead to 'spurious' PMIs because the
3368 	 * rollback caused by the PMI will have cleared the overflow status
3369 	 * bit. Therefore always force probe these counters.
3370 	 */
3371 	status |= cpuc->intel_cp_status;
3372 
3373 	for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
3374 		struct perf_event *event = cpuc->events[bit];
3375 		u64 last_period;
3376 
3377 		handled++;
3378 
3379 		if (!test_bit(bit, cpuc->active_mask))
3380 			continue;
3381 		/* Event may have already been cleared: */
3382 		if (!event)
3383 			continue;
3384 
3385 		/*
3386 		 * There may be unprocessed PEBS records in the PEBS buffer,
3387 		 * which still stores the previous values.
3388 		 * Process those records first before handling the latest value.
3389 		 * For example,
3390 		 * A is a regular counter
3391 		 * B is a PEBS event which reads A
3392 		 * C is a PEBS event
3393 		 *
3394 		 * The following can happen:
3395 		 * B-assist			A=1
3396 		 * C				A=2
3397 		 * B-assist			A=3
3398 		 * A-overflow-PMI		A=4
3399 		 * C-assist-PMI (PEBS buffer)	A=5
3400 		 *
3401 		 * The PEBS buffer has to be drained before handling the A-PMI
3402 		 */
3403 		if (is_pebs_counter_event_group(event))
3404 			static_call(x86_pmu_drain_pebs)(regs, &data);
3405 
3406 		last_period = event->hw.last_period;
3407 
3408 		if (!intel_pmu_save_and_restart(event))
3409 			continue;
3410 
3411 		perf_sample_data_init(&data, 0, last_period);
3412 
3413 		if (has_branch_stack(event))
3414 			intel_pmu_lbr_save_brstack(&data, cpuc, event);
3415 
3416 		perf_event_overflow(event, &data, regs);
3417 	}
3418 
3419 	return handled;
3420 }
3421 
3422 /*
3423  * This handler is triggered by the local APIC, so the APIC IRQ handling
3424  * rules apply:
3425  */
intel_pmu_handle_irq(struct pt_regs * regs)3426 static int intel_pmu_handle_irq(struct pt_regs *regs)
3427 {
3428 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3429 	bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
3430 	bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
3431 	int loops;
3432 	u64 status;
3433 	int handled;
3434 	int pmu_enabled;
3435 
3436 	/*
3437 	 * Save the PMU state.
3438 	 * It needs to be restored when leaving the handler.
3439 	 */
3440 	pmu_enabled = cpuc->enabled;
3441 	/*
3442 	 * In general, the early ACK is only applied for old platforms.
3443 	 * For the big core starts from Haswell, the late ACK should be
3444 	 * applied.
3445 	 * For the small core after Tremont, we have to do the ACK right
3446 	 * before re-enabling counters, which is in the middle of the
3447 	 * NMI handler.
3448 	 */
3449 	if (!late_ack && !mid_ack)
3450 		apic_write(APIC_LVTPC, APIC_DM_NMI);
3451 	intel_bts_disable_local();
3452 	cpuc->enabled = 0;
3453 	__intel_pmu_disable_all(true);
3454 	handled = intel_pmu_drain_bts_buffer();
3455 	handled += intel_bts_interrupt();
3456 	status = intel_pmu_get_status();
3457 	if (!status)
3458 		goto done;
3459 
3460 	loops = 0;
3461 again:
3462 	intel_pmu_lbr_read();
3463 	intel_pmu_ack_status(status);
3464 	if (++loops > 100) {
3465 		static bool warned;
3466 
3467 		if (!warned) {
3468 			WARN(1, "perfevents: irq loop stuck!\n");
3469 			perf_event_print_debug();
3470 			warned = true;
3471 		}
3472 		intel_pmu_reset();
3473 		goto done;
3474 	}
3475 
3476 	handled += handle_pmi_common(regs, status);
3477 
3478 	/*
3479 	 * Repeat if there is more work to be done:
3480 	 */
3481 	status = intel_pmu_get_status();
3482 	if (status)
3483 		goto again;
3484 
3485 done:
3486 	if (mid_ack)
3487 		apic_write(APIC_LVTPC, APIC_DM_NMI);
3488 	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
3489 	cpuc->enabled = pmu_enabled;
3490 	if (pmu_enabled)
3491 		__intel_pmu_enable_all(0, true);
3492 	intel_bts_enable_local();
3493 
3494 	/*
3495 	 * Only unmask the NMI after the overflow counters
3496 	 * have been reset. This avoids spurious NMIs on
3497 	 * Haswell CPUs.
3498 	 */
3499 	if (late_ack)
3500 		apic_write(APIC_LVTPC, APIC_DM_NMI);
3501 	return handled;
3502 }
3503 
3504 static struct event_constraint *
intel_bts_constraints(struct perf_event * event)3505 intel_bts_constraints(struct perf_event *event)
3506 {
3507 	if (unlikely(intel_pmu_has_bts(event)))
3508 		return &bts_constraint;
3509 
3510 	return NULL;
3511 }
3512 
3513 /*
3514  * Note: matches a fake event, like Fixed2.
3515  */
3516 static struct event_constraint *
intel_vlbr_constraints(struct perf_event * event)3517 intel_vlbr_constraints(struct perf_event *event)
3518 {
3519 	struct event_constraint *c = &vlbr_constraint;
3520 
3521 	if (unlikely(constraint_match(c, event->hw.config))) {
3522 		event->hw.flags |= c->flags;
3523 		return c;
3524 	}
3525 
3526 	return NULL;
3527 }
3528 
intel_alt_er(struct cpu_hw_events * cpuc,int idx,u64 config)3529 static int intel_alt_er(struct cpu_hw_events *cpuc,
3530 			int idx, u64 config)
3531 {
3532 	struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3533 	int alt_idx = idx;
3534 
3535 	if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3536 		return idx;
3537 
3538 	if (idx == EXTRA_REG_RSP_0)
3539 		alt_idx = EXTRA_REG_RSP_1;
3540 
3541 	if (idx == EXTRA_REG_RSP_1)
3542 		alt_idx = EXTRA_REG_RSP_0;
3543 
3544 	if (config & ~extra_regs[alt_idx].valid_mask)
3545 		return idx;
3546 
3547 	return alt_idx;
3548 }
3549 
intel_fixup_er(struct perf_event * event,int idx)3550 static void intel_fixup_er(struct perf_event *event, int idx)
3551 {
3552 	struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3553 	event->hw.extra_reg.idx = idx;
3554 
3555 	if (idx == EXTRA_REG_RSP_0) {
3556 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3557 		event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3558 		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3559 	} else if (idx == EXTRA_REG_RSP_1) {
3560 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3561 		event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3562 		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3563 	}
3564 }
3565 
3566 /*
3567  * manage allocation of shared extra msr for certain events
3568  *
3569  * sharing can be:
3570  * per-cpu: to be shared between the various events on a single PMU
3571  * per-core: per-cpu + shared by HT threads
3572  */
3573 static struct event_constraint *
__intel_shared_reg_get_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,struct hw_perf_event_extra * reg)3574 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3575 				   struct perf_event *event,
3576 				   struct hw_perf_event_extra *reg)
3577 {
3578 	struct event_constraint *c = &emptyconstraint;
3579 	struct er_account *era;
3580 	unsigned long flags;
3581 	int idx = reg->idx;
3582 
3583 	/*
3584 	 * reg->alloc can be set due to existing state, so for fake cpuc we
3585 	 * need to ignore this, otherwise we might fail to allocate proper fake
3586 	 * state for this extra reg constraint. Also see the comment below.
3587 	 */
3588 	if (reg->alloc && !cpuc->is_fake)
3589 		return NULL; /* call x86_get_event_constraint() */
3590 
3591 again:
3592 	era = &cpuc->shared_regs->regs[idx];
3593 	/*
3594 	 * we use spin_lock_irqsave() to avoid lockdep issues when
3595 	 * passing a fake cpuc
3596 	 */
3597 	raw_spin_lock_irqsave(&era->lock, flags);
3598 
3599 	if (!atomic_read(&era->ref) || era->config == reg->config) {
3600 
3601 		/*
3602 		 * If its a fake cpuc -- as per validate_{group,event}() we
3603 		 * shouldn't touch event state and we can avoid doing so
3604 		 * since both will only call get_event_constraints() once
3605 		 * on each event, this avoids the need for reg->alloc.
3606 		 *
3607 		 * Not doing the ER fixup will only result in era->reg being
3608 		 * wrong, but since we won't actually try and program hardware
3609 		 * this isn't a problem either.
3610 		 */
3611 		if (!cpuc->is_fake) {
3612 			if (idx != reg->idx)
3613 				intel_fixup_er(event, idx);
3614 
3615 			/*
3616 			 * x86_schedule_events() can call get_event_constraints()
3617 			 * multiple times on events in the case of incremental
3618 			 * scheduling(). reg->alloc ensures we only do the ER
3619 			 * allocation once.
3620 			 */
3621 			reg->alloc = 1;
3622 		}
3623 
3624 		/* lock in msr value */
3625 		era->config = reg->config;
3626 		era->reg = reg->reg;
3627 
3628 		/* one more user */
3629 		atomic_inc(&era->ref);
3630 
3631 		/*
3632 		 * need to call x86_get_event_constraint()
3633 		 * to check if associated event has constraints
3634 		 */
3635 		c = NULL;
3636 	} else {
3637 		idx = intel_alt_er(cpuc, idx, reg->config);
3638 		if (idx != reg->idx) {
3639 			raw_spin_unlock_irqrestore(&era->lock, flags);
3640 			goto again;
3641 		}
3642 	}
3643 	raw_spin_unlock_irqrestore(&era->lock, flags);
3644 
3645 	return c;
3646 }
3647 
3648 static void
__intel_shared_reg_put_constraints(struct cpu_hw_events * cpuc,struct hw_perf_event_extra * reg)3649 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3650 				   struct hw_perf_event_extra *reg)
3651 {
3652 	struct er_account *era;
3653 
3654 	/*
3655 	 * Only put constraint if extra reg was actually allocated. Also takes
3656 	 * care of event which do not use an extra shared reg.
3657 	 *
3658 	 * Also, if this is a fake cpuc we shouldn't touch any event state
3659 	 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3660 	 * either since it'll be thrown out.
3661 	 */
3662 	if (!reg->alloc || cpuc->is_fake)
3663 		return;
3664 
3665 	era = &cpuc->shared_regs->regs[reg->idx];
3666 
3667 	/* one fewer user */
3668 	atomic_dec(&era->ref);
3669 
3670 	/* allocate again next time */
3671 	reg->alloc = 0;
3672 }
3673 
3674 static struct event_constraint *
intel_shared_regs_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3675 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3676 			      struct perf_event *event)
3677 {
3678 	struct event_constraint *c = NULL, *d;
3679 	struct hw_perf_event_extra *xreg, *breg;
3680 
3681 	xreg = &event->hw.extra_reg;
3682 	if (xreg->idx != EXTRA_REG_NONE) {
3683 		c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3684 		if (c == &emptyconstraint)
3685 			return c;
3686 	}
3687 	breg = &event->hw.branch_reg;
3688 	if (breg->idx != EXTRA_REG_NONE) {
3689 		d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3690 		if (d == &emptyconstraint) {
3691 			__intel_shared_reg_put_constraints(cpuc, xreg);
3692 			c = d;
3693 		}
3694 	}
3695 	return c;
3696 }
3697 
3698 struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3699 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3700 			  struct perf_event *event)
3701 {
3702 	struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3703 	struct event_constraint *c;
3704 
3705 	if (event_constraints) {
3706 		for_each_event_constraint(c, event_constraints) {
3707 			if (constraint_match(c, event->hw.config)) {
3708 				event->hw.flags |= c->flags;
3709 				return c;
3710 			}
3711 		}
3712 	}
3713 
3714 	return &hybrid_var(cpuc->pmu, unconstrained);
3715 }
3716 
3717 static struct event_constraint *
__intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3718 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3719 			    struct perf_event *event)
3720 {
3721 	struct event_constraint *c;
3722 
3723 	c = intel_vlbr_constraints(event);
3724 	if (c)
3725 		return c;
3726 
3727 	c = intel_bts_constraints(event);
3728 	if (c)
3729 		return c;
3730 
3731 	c = intel_shared_regs_constraints(cpuc, event);
3732 	if (c)
3733 		return c;
3734 
3735 	c = intel_pebs_constraints(event);
3736 	if (c)
3737 		return c;
3738 
3739 	return x86_get_event_constraints(cpuc, idx, event);
3740 }
3741 
3742 static void
intel_start_scheduling(struct cpu_hw_events * cpuc)3743 intel_start_scheduling(struct cpu_hw_events *cpuc)
3744 {
3745 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3746 	struct intel_excl_states *xl;
3747 	int tid = cpuc->excl_thread_id;
3748 
3749 	/*
3750 	 * nothing needed if in group validation mode
3751 	 */
3752 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3753 		return;
3754 
3755 	/*
3756 	 * no exclusion needed
3757 	 */
3758 	if (WARN_ON_ONCE(!excl_cntrs))
3759 		return;
3760 
3761 	xl = &excl_cntrs->states[tid];
3762 
3763 	xl->sched_started = true;
3764 	/*
3765 	 * lock shared state until we are done scheduling
3766 	 * in stop_event_scheduling()
3767 	 * makes scheduling appear as a transaction
3768 	 */
3769 	raw_spin_lock(&excl_cntrs->lock);
3770 }
3771 
intel_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)3772 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3773 {
3774 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3775 	struct event_constraint *c = cpuc->event_constraint[idx];
3776 	struct intel_excl_states *xl;
3777 	int tid = cpuc->excl_thread_id;
3778 
3779 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3780 		return;
3781 
3782 	if (WARN_ON_ONCE(!excl_cntrs))
3783 		return;
3784 
3785 	if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3786 		return;
3787 
3788 	xl = &excl_cntrs->states[tid];
3789 
3790 	lockdep_assert_held(&excl_cntrs->lock);
3791 
3792 	if (c->flags & PERF_X86_EVENT_EXCL)
3793 		xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3794 	else
3795 		xl->state[cntr] = INTEL_EXCL_SHARED;
3796 }
3797 
3798 static void
intel_stop_scheduling(struct cpu_hw_events * cpuc)3799 intel_stop_scheduling(struct cpu_hw_events *cpuc)
3800 {
3801 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3802 	struct intel_excl_states *xl;
3803 	int tid = cpuc->excl_thread_id;
3804 
3805 	/*
3806 	 * nothing needed if in group validation mode
3807 	 */
3808 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3809 		return;
3810 	/*
3811 	 * no exclusion needed
3812 	 */
3813 	if (WARN_ON_ONCE(!excl_cntrs))
3814 		return;
3815 
3816 	xl = &excl_cntrs->states[tid];
3817 
3818 	xl->sched_started = false;
3819 	/*
3820 	 * release shared state lock (acquired in intel_start_scheduling())
3821 	 */
3822 	raw_spin_unlock(&excl_cntrs->lock);
3823 }
3824 
3825 static struct event_constraint *
dyn_constraint(struct cpu_hw_events * cpuc,struct event_constraint * c,int idx)3826 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3827 {
3828 	WARN_ON_ONCE(!cpuc->constraint_list);
3829 
3830 	if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3831 		struct event_constraint *cx;
3832 
3833 		/*
3834 		 * grab pre-allocated constraint entry
3835 		 */
3836 		cx = &cpuc->constraint_list[idx];
3837 
3838 		/*
3839 		 * initialize dynamic constraint
3840 		 * with static constraint
3841 		 */
3842 		*cx = *c;
3843 
3844 		/*
3845 		 * mark constraint as dynamic
3846 		 */
3847 		cx->flags |= PERF_X86_EVENT_DYNAMIC;
3848 		c = cx;
3849 	}
3850 
3851 	return c;
3852 }
3853 
3854 static struct event_constraint *
intel_get_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,int idx,struct event_constraint * c)3855 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3856 			   int idx, struct event_constraint *c)
3857 {
3858 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3859 	struct intel_excl_states *xlo;
3860 	int tid = cpuc->excl_thread_id;
3861 	int is_excl, i, w;
3862 
3863 	/*
3864 	 * validating a group does not require
3865 	 * enforcing cross-thread  exclusion
3866 	 */
3867 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3868 		return c;
3869 
3870 	/*
3871 	 * no exclusion needed
3872 	 */
3873 	if (WARN_ON_ONCE(!excl_cntrs))
3874 		return c;
3875 
3876 	/*
3877 	 * because we modify the constraint, we need
3878 	 * to make a copy. Static constraints come
3879 	 * from static const tables.
3880 	 *
3881 	 * only needed when constraint has not yet
3882 	 * been cloned (marked dynamic)
3883 	 */
3884 	c = dyn_constraint(cpuc, c, idx);
3885 
3886 	/*
3887 	 * From here on, the constraint is dynamic.
3888 	 * Either it was just allocated above, or it
3889 	 * was allocated during a earlier invocation
3890 	 * of this function
3891 	 */
3892 
3893 	/*
3894 	 * state of sibling HT
3895 	 */
3896 	xlo = &excl_cntrs->states[tid ^ 1];
3897 
3898 	/*
3899 	 * event requires exclusive counter access
3900 	 * across HT threads
3901 	 */
3902 	is_excl = c->flags & PERF_X86_EVENT_EXCL;
3903 	if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3904 		event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3905 		if (!cpuc->n_excl++)
3906 			WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3907 	}
3908 
3909 	/*
3910 	 * Modify static constraint with current dynamic
3911 	 * state of thread
3912 	 *
3913 	 * EXCLUSIVE: sibling counter measuring exclusive event
3914 	 * SHARED   : sibling counter measuring non-exclusive event
3915 	 * UNUSED   : sibling counter unused
3916 	 */
3917 	w = c->weight;
3918 	for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3919 		/*
3920 		 * exclusive event in sibling counter
3921 		 * our corresponding counter cannot be used
3922 		 * regardless of our event
3923 		 */
3924 		if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3925 			__clear_bit(i, c->idxmsk);
3926 			w--;
3927 			continue;
3928 		}
3929 		/*
3930 		 * if measuring an exclusive event, sibling
3931 		 * measuring non-exclusive, then counter cannot
3932 		 * be used
3933 		 */
3934 		if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3935 			__clear_bit(i, c->idxmsk);
3936 			w--;
3937 			continue;
3938 		}
3939 	}
3940 
3941 	/*
3942 	 * if we return an empty mask, then switch
3943 	 * back to static empty constraint to avoid
3944 	 * the cost of freeing later on
3945 	 */
3946 	if (!w)
3947 		c = &emptyconstraint;
3948 
3949 	c->weight = w;
3950 
3951 	return c;
3952 }
3953 
3954 static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3955 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3956 			    struct perf_event *event)
3957 {
3958 	struct event_constraint *c1, *c2;
3959 
3960 	c1 = cpuc->event_constraint[idx];
3961 
3962 	/*
3963 	 * first time only
3964 	 * - static constraint: no change across incremental scheduling calls
3965 	 * - dynamic constraint: handled by intel_get_excl_constraints()
3966 	 */
3967 	c2 = __intel_get_event_constraints(cpuc, idx, event);
3968 	if (c1) {
3969 	        WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3970 		bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3971 		c1->weight = c2->weight;
3972 		c2 = c1;
3973 	}
3974 
3975 	if (cpuc->excl_cntrs)
3976 		return intel_get_excl_constraints(cpuc, event, idx, c2);
3977 
3978 	if (event->hw.dyn_constraint != ~0ULL) {
3979 		c2 = dyn_constraint(cpuc, c2, idx);
3980 		c2->idxmsk64 &= event->hw.dyn_constraint;
3981 		c2->weight = hweight64(c2->idxmsk64);
3982 	}
3983 
3984 	return c2;
3985 }
3986 
intel_put_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3987 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3988 		struct perf_event *event)
3989 {
3990 	struct hw_perf_event *hwc = &event->hw;
3991 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3992 	int tid = cpuc->excl_thread_id;
3993 	struct intel_excl_states *xl;
3994 
3995 	/*
3996 	 * nothing needed if in group validation mode
3997 	 */
3998 	if (cpuc->is_fake)
3999 		return;
4000 
4001 	if (WARN_ON_ONCE(!excl_cntrs))
4002 		return;
4003 
4004 	if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
4005 		hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
4006 		if (!--cpuc->n_excl)
4007 			WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
4008 	}
4009 
4010 	/*
4011 	 * If event was actually assigned, then mark the counter state as
4012 	 * unused now.
4013 	 */
4014 	if (hwc->idx >= 0) {
4015 		xl = &excl_cntrs->states[tid];
4016 
4017 		/*
4018 		 * put_constraint may be called from x86_schedule_events()
4019 		 * which already has the lock held so here make locking
4020 		 * conditional.
4021 		 */
4022 		if (!xl->sched_started)
4023 			raw_spin_lock(&excl_cntrs->lock);
4024 
4025 		xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
4026 
4027 		if (!xl->sched_started)
4028 			raw_spin_unlock(&excl_cntrs->lock);
4029 	}
4030 }
4031 
4032 static void
intel_put_shared_regs_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)4033 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
4034 					struct perf_event *event)
4035 {
4036 	struct hw_perf_event_extra *reg;
4037 
4038 	reg = &event->hw.extra_reg;
4039 	if (reg->idx != EXTRA_REG_NONE)
4040 		__intel_shared_reg_put_constraints(cpuc, reg);
4041 
4042 	reg = &event->hw.branch_reg;
4043 	if (reg->idx != EXTRA_REG_NONE)
4044 		__intel_shared_reg_put_constraints(cpuc, reg);
4045 }
4046 
intel_put_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)4047 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
4048 					struct perf_event *event)
4049 {
4050 	intel_put_shared_regs_event_constraints(cpuc, event);
4051 
4052 	/*
4053 	 * is PMU has exclusive counter restrictions, then
4054 	 * all events are subject to and must call the
4055 	 * put_excl_constraints() routine
4056 	 */
4057 	if (cpuc->excl_cntrs)
4058 		intel_put_excl_constraints(cpuc, event);
4059 }
4060 
intel_pebs_aliases_core2(struct perf_event * event)4061 static void intel_pebs_aliases_core2(struct perf_event *event)
4062 {
4063 	if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
4064 		/*
4065 		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
4066 		 * (0x003c) so that we can use it with PEBS.
4067 		 *
4068 		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
4069 		 * PEBS capable. However we can use INST_RETIRED.ANY_P
4070 		 * (0x00c0), which is a PEBS capable event, to get the same
4071 		 * count.
4072 		 *
4073 		 * INST_RETIRED.ANY_P counts the number of cycles that retires
4074 		 * CNTMASK instructions. By setting CNTMASK to a value (16)
4075 		 * larger than the maximum number of instructions that can be
4076 		 * retired per cycle (4) and then inverting the condition, we
4077 		 * count all cycles that retire 16 or less instructions, which
4078 		 * is every cycle.
4079 		 *
4080 		 * Thereby we gain a PEBS capable cycle counter.
4081 		 */
4082 		u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
4083 
4084 		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
4085 		event->hw.config = alt_config;
4086 	}
4087 }
4088 
intel_pebs_aliases_snb(struct perf_event * event)4089 static void intel_pebs_aliases_snb(struct perf_event *event)
4090 {
4091 	if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
4092 		/*
4093 		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
4094 		 * (0x003c) so that we can use it with PEBS.
4095 		 *
4096 		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
4097 		 * PEBS capable. However we can use UOPS_RETIRED.ALL
4098 		 * (0x01c2), which is a PEBS capable event, to get the same
4099 		 * count.
4100 		 *
4101 		 * UOPS_RETIRED.ALL counts the number of cycles that retires
4102 		 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
4103 		 * larger than the maximum number of micro-ops that can be
4104 		 * retired per cycle (4) and then inverting the condition, we
4105 		 * count all cycles that retire 16 or less micro-ops, which
4106 		 * is every cycle.
4107 		 *
4108 		 * Thereby we gain a PEBS capable cycle counter.
4109 		 */
4110 		u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
4111 
4112 		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
4113 		event->hw.config = alt_config;
4114 	}
4115 }
4116 
intel_pebs_aliases_precdist(struct perf_event * event)4117 static void intel_pebs_aliases_precdist(struct perf_event *event)
4118 {
4119 	if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
4120 		/*
4121 		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
4122 		 * (0x003c) so that we can use it with PEBS.
4123 		 *
4124 		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
4125 		 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
4126 		 * (0x01c0), which is a PEBS capable event, to get the same
4127 		 * count.
4128 		 *
4129 		 * The PREC_DIST event has special support to minimize sample
4130 		 * shadowing effects. One drawback is that it can be
4131 		 * only programmed on counter 1, but that seems like an
4132 		 * acceptable trade off.
4133 		 */
4134 		u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
4135 
4136 		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
4137 		event->hw.config = alt_config;
4138 	}
4139 }
4140 
intel_pebs_aliases_ivb(struct perf_event * event)4141 static void intel_pebs_aliases_ivb(struct perf_event *event)
4142 {
4143 	if (event->attr.precise_ip < 3)
4144 		return intel_pebs_aliases_snb(event);
4145 	return intel_pebs_aliases_precdist(event);
4146 }
4147 
intel_pebs_aliases_skl(struct perf_event * event)4148 static void intel_pebs_aliases_skl(struct perf_event *event)
4149 {
4150 	if (event->attr.precise_ip < 3)
4151 		return intel_pebs_aliases_core2(event);
4152 	return intel_pebs_aliases_precdist(event);
4153 }
4154 
intel_pmu_large_pebs_flags(struct perf_event * event)4155 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
4156 {
4157 	unsigned long flags = x86_pmu.large_pebs_flags;
4158 
4159 	if (event->attr.use_clockid)
4160 		flags &= ~PERF_SAMPLE_TIME;
4161 	if (!event->attr.exclude_kernel)
4162 		flags &= ~PERF_SAMPLE_REGS_USER;
4163 	if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
4164 		flags &= ~PERF_SAMPLE_REGS_USER;
4165 	if (event->attr.sample_regs_intr & ~PEBS_GP_REGS)
4166 		flags &= ~PERF_SAMPLE_REGS_INTR;
4167 	return flags;
4168 }
4169 
intel_pmu_bts_config(struct perf_event * event)4170 static int intel_pmu_bts_config(struct perf_event *event)
4171 {
4172 	struct perf_event_attr *attr = &event->attr;
4173 
4174 	if (unlikely(intel_pmu_has_bts(event))) {
4175 		/* BTS is not supported by this architecture. */
4176 		if (!x86_pmu.bts_active)
4177 			return -EOPNOTSUPP;
4178 
4179 		/* BTS is currently only allowed for user-mode. */
4180 		if (!attr->exclude_kernel)
4181 			return -EOPNOTSUPP;
4182 
4183 		/* BTS is not allowed for precise events. */
4184 		if (attr->precise_ip)
4185 			return -EOPNOTSUPP;
4186 
4187 		/* disallow bts if conflicting events are present */
4188 		if (x86_add_exclusive(x86_lbr_exclusive_lbr))
4189 			return -EBUSY;
4190 
4191 		event->destroy = hw_perf_lbr_event_destroy;
4192 	}
4193 
4194 	return 0;
4195 }
4196 
core_pmu_hw_config(struct perf_event * event)4197 static int core_pmu_hw_config(struct perf_event *event)
4198 {
4199 	int ret = x86_pmu_hw_config(event);
4200 
4201 	if (ret)
4202 		return ret;
4203 
4204 	return intel_pmu_bts_config(event);
4205 }
4206 
4207 #define INTEL_TD_METRIC_AVAILABLE_MAX	(INTEL_TD_METRIC_RETIRING + \
4208 					 ((x86_pmu.num_topdown_events - 1) << 8))
4209 
is_available_metric_event(struct perf_event * event)4210 static bool is_available_metric_event(struct perf_event *event)
4211 {
4212 	return is_metric_event(event) &&
4213 		event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
4214 }
4215 
is_mem_loads_event(struct perf_event * event)4216 static inline bool is_mem_loads_event(struct perf_event *event)
4217 {
4218 	return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
4219 }
4220 
is_mem_loads_aux_event(struct perf_event * event)4221 static inline bool is_mem_loads_aux_event(struct perf_event *event)
4222 {
4223 	return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
4224 }
4225 
require_mem_loads_aux_event(struct perf_event * event)4226 static inline bool require_mem_loads_aux_event(struct perf_event *event)
4227 {
4228 	if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
4229 		return false;
4230 
4231 	if (is_hybrid())
4232 		return hybrid_pmu(event->pmu)->pmu_type == hybrid_big;
4233 
4234 	return true;
4235 }
4236 
intel_pmu_has_cap(struct perf_event * event,int idx)4237 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
4238 {
4239 	union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
4240 
4241 	return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
4242 }
4243 
intel_pmu_freq_start_period(struct perf_event * event)4244 static u64 intel_pmu_freq_start_period(struct perf_event *event)
4245 {
4246 	int type = event->attr.type;
4247 	u64 config, factor;
4248 	s64 start;
4249 
4250 	/*
4251 	 * The 127 is the lowest possible recommended SAV (sample after value)
4252 	 * for a 4000 freq (default freq), according to the event list JSON file.
4253 	 * Also, assume the workload is idle 50% time.
4254 	 */
4255 	factor = 64 * 4000;
4256 	if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
4257 		goto end;
4258 
4259 	/*
4260 	 * The estimation of the start period in the freq mode is
4261 	 * based on the below assumption.
4262 	 *
4263 	 * For a cycles or an instructions event, 1GHZ of the
4264 	 * underlying platform, 1 IPC. The workload is idle 50% time.
4265 	 * The start period = 1,000,000,000 * 1 / freq / 2.
4266 	 *		    = 500,000,000 / freq
4267 	 *
4268 	 * Usually, the branch-related events occur less than the
4269 	 * instructions event. According to the Intel event list JSON
4270 	 * file, the SAV (sample after value) of a branch-related event
4271 	 * is usually 1/4 of an instruction event.
4272 	 * The start period of branch-related events = 125,000,000 / freq.
4273 	 *
4274 	 * The cache-related events occurs even less. The SAV is usually
4275 	 * 1/20 of an instruction event.
4276 	 * The start period of cache-related events = 25,000,000 / freq.
4277 	 */
4278 	config = event->attr.config & PERF_HW_EVENT_MASK;
4279 	if (type == PERF_TYPE_HARDWARE) {
4280 		switch (config) {
4281 		case PERF_COUNT_HW_CPU_CYCLES:
4282 		case PERF_COUNT_HW_INSTRUCTIONS:
4283 		case PERF_COUNT_HW_BUS_CYCLES:
4284 		case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
4285 		case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
4286 		case PERF_COUNT_HW_REF_CPU_CYCLES:
4287 			factor = 500000000;
4288 			break;
4289 		case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
4290 		case PERF_COUNT_HW_BRANCH_MISSES:
4291 			factor = 125000000;
4292 			break;
4293 		case PERF_COUNT_HW_CACHE_REFERENCES:
4294 		case PERF_COUNT_HW_CACHE_MISSES:
4295 			factor = 25000000;
4296 			break;
4297 		default:
4298 			goto end;
4299 		}
4300 	}
4301 
4302 	if (type == PERF_TYPE_HW_CACHE)
4303 		factor = 25000000;
4304 end:
4305 	/*
4306 	 * Usually, a prime or a number with less factors (close to prime)
4307 	 * is chosen as an SAV, which makes it less likely that the sampling
4308 	 * period synchronizes with some periodic event in the workload.
4309 	 * Minus 1 to make it at least avoiding values near power of twos
4310 	 * for the default freq.
4311 	 */
4312 	start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
4313 
4314 	if (start > x86_pmu.max_period)
4315 		start = x86_pmu.max_period;
4316 
4317 	if (x86_pmu.limit_period)
4318 		x86_pmu.limit_period(event, &start);
4319 
4320 	return start;
4321 }
4322 
intel_pmu_has_acr(struct pmu * pmu)4323 static inline bool intel_pmu_has_acr(struct pmu *pmu)
4324 {
4325 	return !!hybrid(pmu, acr_cause_mask64);
4326 }
4327 
intel_pmu_is_acr_group(struct perf_event * event)4328 static bool intel_pmu_is_acr_group(struct perf_event *event)
4329 {
4330 	/* The group leader has the ACR flag set */
4331 	if (is_acr_event_group(event))
4332 		return true;
4333 
4334 	/* The acr_mask is set */
4335 	if (event->attr.config2)
4336 		return true;
4337 
4338 	return false;
4339 }
4340 
intel_pmu_has_pebs_counter_group(struct pmu * pmu)4341 static inline bool intel_pmu_has_pebs_counter_group(struct pmu *pmu)
4342 {
4343 	u64 caps;
4344 
4345 	if (x86_pmu.intel_cap.pebs_format >= 6 && x86_pmu.intel_cap.pebs_baseline)
4346 		return true;
4347 
4348 	caps = hybrid(pmu, arch_pebs_cap).caps;
4349 	if (x86_pmu.arch_pebs && (caps & ARCH_PEBS_CNTR_MASK))
4350 		return true;
4351 
4352 	return false;
4353 }
4354 
intel_pmu_set_acr_cntr_constr(struct perf_event * event,u64 * cause_mask,int * num)4355 static inline void intel_pmu_set_acr_cntr_constr(struct perf_event *event,
4356 						 u64 *cause_mask, int *num)
4357 {
4358 	event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64);
4359 	*cause_mask |= event->attr.config2;
4360 	*num += 1;
4361 }
4362 
intel_pmu_set_acr_caused_constr(struct perf_event * event,int idx,u64 cause_mask)4363 static inline void intel_pmu_set_acr_caused_constr(struct perf_event *event,
4364 						   int idx, u64 cause_mask)
4365 {
4366 	if (test_bit(idx, (unsigned long *)&cause_mask))
4367 		event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64);
4368 }
4369 
intel_pmu_hw_config(struct perf_event * event)4370 static int intel_pmu_hw_config(struct perf_event *event)
4371 {
4372 	int ret = x86_pmu_hw_config(event);
4373 
4374 	if (ret)
4375 		return ret;
4376 
4377 	ret = intel_pmu_bts_config(event);
4378 	if (ret)
4379 		return ret;
4380 
4381 	if (event->attr.freq && event->attr.sample_freq) {
4382 		event->hw.sample_period = intel_pmu_freq_start_period(event);
4383 		event->hw.last_period = event->hw.sample_period;
4384 		local64_set(&event->hw.period_left, event->hw.sample_period);
4385 	}
4386 
4387 	if (event->attr.precise_ip) {
4388 		struct arch_pebs_cap pebs_cap = hybrid(event->pmu, arch_pebs_cap);
4389 
4390 		if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
4391 			return -EINVAL;
4392 
4393 		if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
4394 			event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
4395 			if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event)) &&
4396 			    !has_aux_action(event)) {
4397 				event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
4398 				event->attach_state |= PERF_ATTACH_SCHED_CB;
4399 			}
4400 		}
4401 		if (x86_pmu.pebs_aliases)
4402 			x86_pmu.pebs_aliases(event);
4403 
4404 		if (x86_pmu.arch_pebs) {
4405 			u64 cntr_mask = hybrid(event->pmu, intel_ctrl) &
4406 						~GLOBAL_CTRL_EN_PERF_METRICS;
4407 			u64 pebs_mask = event->attr.precise_ip >= 3 ?
4408 						pebs_cap.pdists : pebs_cap.counters;
4409 			if (cntr_mask != pebs_mask)
4410 				event->hw.dyn_constraint &= pebs_mask;
4411 		}
4412 	}
4413 
4414 	if (needs_branch_stack(event)) {
4415 		/* Avoid branch stack setup for counting events in SAMPLE READ */
4416 		if (is_sampling_event(event) ||
4417 		    !(event->attr.sample_type & PERF_SAMPLE_READ))
4418 			event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
4419 	}
4420 
4421 	if (branch_sample_counters(event)) {
4422 		struct perf_event *leader, *sibling;
4423 		int num = 0;
4424 
4425 		if (!(x86_pmu.flags & PMU_FL_BR_CNTR) ||
4426 		    (event->attr.config & ~INTEL_ARCH_EVENT_MASK))
4427 			return -EINVAL;
4428 
4429 		/*
4430 		 * The branch counter logging is not supported in the call stack
4431 		 * mode yet, since we cannot simply flush the LBR during e.g.,
4432 		 * multiplexing. Also, there is no obvious usage with the call
4433 		 * stack mode. Simply forbids it for now.
4434 		 *
4435 		 * If any events in the group enable the branch counter logging
4436 		 * feature, the group is treated as a branch counter logging
4437 		 * group, which requires the extra space to store the counters.
4438 		 */
4439 		leader = event->group_leader;
4440 		if (branch_sample_call_stack(leader))
4441 			return -EINVAL;
4442 		if (branch_sample_counters(leader)) {
4443 			num++;
4444 			leader->hw.dyn_constraint &= x86_pmu.lbr_counters;
4445 		}
4446 		leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
4447 
4448 		for_each_sibling_event(sibling, leader) {
4449 			if (branch_sample_call_stack(sibling))
4450 				return -EINVAL;
4451 			if (branch_sample_counters(sibling)) {
4452 				num++;
4453 				sibling->hw.dyn_constraint &= x86_pmu.lbr_counters;
4454 			}
4455 		}
4456 
4457 		if (num > fls(x86_pmu.lbr_counters))
4458 			return -EINVAL;
4459 		/*
4460 		 * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't
4461 		 * require any branch stack setup.
4462 		 * Clear the bit to avoid unnecessary branch stack setup.
4463 		 */
4464 		if (0 == (event->attr.branch_sample_type &
4465 			  ~(PERF_SAMPLE_BRANCH_PLM_ALL |
4466 			    PERF_SAMPLE_BRANCH_COUNTERS)))
4467 			event->hw.flags  &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK;
4468 
4469 		/*
4470 		 * Force the leader to be a LBR event. So LBRs can be reset
4471 		 * with the leader event. See intel_pmu_lbr_del() for details.
4472 		 */
4473 		if (!intel_pmu_needs_branch_stack(leader))
4474 			return -EINVAL;
4475 	}
4476 
4477 	if (intel_pmu_needs_branch_stack(event)) {
4478 		ret = intel_pmu_setup_lbr_filter(event);
4479 		if (ret)
4480 			return ret;
4481 		event->attach_state |= PERF_ATTACH_SCHED_CB;
4482 
4483 		/*
4484 		 * BTS is set up earlier in this path, so don't account twice
4485 		 */
4486 		if (!unlikely(intel_pmu_has_bts(event))) {
4487 			/* disallow lbr if conflicting events are present */
4488 			if (x86_add_exclusive(x86_lbr_exclusive_lbr))
4489 				return -EBUSY;
4490 
4491 			event->destroy = hw_perf_lbr_event_destroy;
4492 		}
4493 	}
4494 
4495 	if (event->attr.aux_output) {
4496 		if (!event->attr.precise_ip)
4497 			return -EINVAL;
4498 
4499 		event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
4500 	}
4501 
4502 	if ((event->attr.sample_type & PERF_SAMPLE_READ) &&
4503 	    intel_pmu_has_pebs_counter_group(event->pmu) &&
4504 	    is_sampling_event(event) &&
4505 	    event->attr.precise_ip)
4506 		event->group_leader->hw.flags |= PERF_X86_EVENT_PEBS_CNTR;
4507 
4508 	if (intel_pmu_has_acr(event->pmu) && intel_pmu_is_acr_group(event)) {
4509 		struct perf_event *sibling, *leader = event->group_leader;
4510 		struct pmu *pmu = event->pmu;
4511 		bool has_sw_event = false;
4512 		int num = 0, idx = 0;
4513 		u64 cause_mask = 0;
4514 
4515 		/* Not support perf metrics */
4516 		if (is_metric_event(event))
4517 			return -EINVAL;
4518 
4519 		/* Not support freq mode */
4520 		if (event->attr.freq)
4521 			return -EINVAL;
4522 
4523 		/* PDist is not supported */
4524 		if (event->attr.config2 && event->attr.precise_ip > 2)
4525 			return -EINVAL;
4526 
4527 		/* The reload value cannot exceeds the max period */
4528 		if (event->attr.sample_period > x86_pmu.max_period)
4529 			return -EINVAL;
4530 		/*
4531 		 * The counter-constraints of each event cannot be finalized
4532 		 * unless the whole group is scanned. However, it's hard
4533 		 * to know whether the event is the last one of the group.
4534 		 * Recalculate the counter-constraints for each event when
4535 		 * adding a new event.
4536 		 *
4537 		 * The group is traversed twice, which may be optimized later.
4538 		 * In the first round,
4539 		 * - Find all events which do reload when other events
4540 		 *   overflow and set the corresponding counter-constraints
4541 		 * - Add all events, which can cause other events reload,
4542 		 *   in the cause_mask
4543 		 * - Error out if the number of events exceeds the HW limit
4544 		 * - The ACR events must be contiguous.
4545 		 *   Error out if there are non-X86 events between ACR events.
4546 		 *   This is not a HW limit, but a SW limit.
4547 		 *   With the assumption, the intel_pmu_acr_late_setup() can
4548 		 *   easily convert the event idx to counter idx without
4549 		 *   traversing the whole event list.
4550 		 */
4551 		if (!is_x86_event(leader))
4552 			return -EINVAL;
4553 
4554 		if (leader->attr.config2)
4555 			intel_pmu_set_acr_cntr_constr(leader, &cause_mask, &num);
4556 
4557 		if (leader->nr_siblings) {
4558 			for_each_sibling_event(sibling, leader) {
4559 				if (!is_x86_event(sibling)) {
4560 					has_sw_event = true;
4561 					continue;
4562 				}
4563 				if (!sibling->attr.config2)
4564 					continue;
4565 				if (has_sw_event)
4566 					return -EINVAL;
4567 				intel_pmu_set_acr_cntr_constr(sibling, &cause_mask, &num);
4568 			}
4569 		}
4570 		if (leader != event && event->attr.config2) {
4571 			if (has_sw_event)
4572 				return -EINVAL;
4573 			intel_pmu_set_acr_cntr_constr(event, &cause_mask, &num);
4574 		}
4575 
4576 		if (hweight64(cause_mask) > hweight64(hybrid(pmu, acr_cause_mask64)) ||
4577 		    num > hweight64(hybrid(event->pmu, acr_cntr_mask64)))
4578 			return -EINVAL;
4579 		/*
4580 		 * In the second round, apply the counter-constraints for
4581 		 * the events which can cause other events reload.
4582 		 */
4583 		intel_pmu_set_acr_caused_constr(leader, idx++, cause_mask);
4584 
4585 		if (leader->nr_siblings) {
4586 			for_each_sibling_event(sibling, leader)
4587 				intel_pmu_set_acr_caused_constr(sibling, idx++, cause_mask);
4588 		}
4589 
4590 		if (leader != event)
4591 			intel_pmu_set_acr_caused_constr(event, idx, cause_mask);
4592 
4593 		leader->hw.flags |= PERF_X86_EVENT_ACR;
4594 	}
4595 
4596 	if ((event->attr.type == PERF_TYPE_HARDWARE) ||
4597 	    (event->attr.type == PERF_TYPE_HW_CACHE))
4598 		return 0;
4599 
4600 	/*
4601 	 * Config Topdown slots and metric events
4602 	 *
4603 	 * The slots event on Fixed Counter 3 can support sampling,
4604 	 * which will be handled normally in x86_perf_event_update().
4605 	 *
4606 	 * Metric events don't support sampling and require being paired
4607 	 * with a slots event as group leader. When the slots event
4608 	 * is used in a metrics group, it too cannot support sampling.
4609 	 */
4610 	if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
4611 		/* The metrics_clear can only be set for the slots event */
4612 		if (event->attr.config1 &&
4613 		    (!is_slots_event(event) || (event->attr.config1 & ~INTEL_TD_CFG_METRIC_CLEAR)))
4614 			return -EINVAL;
4615 
4616 		if (event->attr.config2)
4617 			return -EINVAL;
4618 
4619 		/*
4620 		 * The TopDown metrics events and slots event don't
4621 		 * support any filters.
4622 		 */
4623 		if (event->attr.config & X86_ALL_EVENT_FLAGS)
4624 			return -EINVAL;
4625 
4626 		if (is_available_metric_event(event)) {
4627 			struct perf_event *leader = event->group_leader;
4628 
4629 			/* The metric events don't support sampling. */
4630 			if (is_sampling_event(event))
4631 				return -EINVAL;
4632 
4633 			/* The metric events require a slots group leader. */
4634 			if (!is_slots_event(leader))
4635 				return -EINVAL;
4636 
4637 			/*
4638 			 * The leader/SLOTS must not be a sampling event for
4639 			 * metric use; hardware requires it starts at 0 when used
4640 			 * in conjunction with MSR_PERF_METRICS.
4641 			 */
4642 			if (is_sampling_event(leader))
4643 				return -EINVAL;
4644 
4645 			event->event_caps |= PERF_EV_CAP_SIBLING;
4646 			/*
4647 			 * Only once we have a METRICs sibling do we
4648 			 * need TopDown magic.
4649 			 */
4650 			leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
4651 			event->hw.flags  |= PERF_X86_EVENT_TOPDOWN;
4652 		}
4653 	}
4654 
4655 	/*
4656 	 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
4657 	 * doesn't function quite right. As a work-around it needs to always be
4658 	 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
4659 	 * The actual count of this second event is irrelevant it just needs
4660 	 * to be active to make the first event function correctly.
4661 	 *
4662 	 * In a group, the auxiliary event must be in front of the load latency
4663 	 * event. The rule is to simplify the implementation of the check.
4664 	 * That's because perf cannot have a complete group at the moment.
4665 	 */
4666 	if (require_mem_loads_aux_event(event) &&
4667 	    (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
4668 	    is_mem_loads_event(event)) {
4669 		struct perf_event *leader = event->group_leader;
4670 		struct perf_event *sibling = NULL;
4671 
4672 		/*
4673 		 * When this memload event is also the first event (no group
4674 		 * exists yet), then there is no aux event before it.
4675 		 */
4676 		if (leader == event)
4677 			return -ENODATA;
4678 
4679 		if (!is_mem_loads_aux_event(leader)) {
4680 			for_each_sibling_event(sibling, leader) {
4681 				if (is_mem_loads_aux_event(sibling))
4682 					break;
4683 			}
4684 			if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
4685 				return -ENODATA;
4686 		}
4687 	}
4688 
4689 	if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
4690 		return 0;
4691 
4692 	if (x86_pmu.version < 3)
4693 		return -EINVAL;
4694 
4695 	ret = perf_allow_cpu();
4696 	if (ret)
4697 		return ret;
4698 
4699 	event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
4700 
4701 	return 0;
4702 }
4703 
4704 /*
4705  * Currently, the only caller of this function is the atomic_switch_perf_msrs().
4706  * The host perf context helps to prepare the values of the real hardware for
4707  * a set of msrs that need to be switched atomically in a vmx transaction.
4708  *
4709  * For example, the pseudocode needed to add a new msr should look like:
4710  *
4711  * arr[(*nr)++] = (struct perf_guest_switch_msr){
4712  *	.msr = the hardware msr address,
4713  *	.host = the value the hardware has when it doesn't run a guest,
4714  *	.guest = the value the hardware has when it runs a guest,
4715  * };
4716  *
4717  * These values have nothing to do with the emulated values the guest sees
4718  * when it uses {RD,WR}MSR, which should be handled by the KVM context,
4719  * specifically in the intel_pmu_{get,set}_msr().
4720  */
intel_guest_get_msrs(int * nr,void * data)4721 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
4722 {
4723 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4724 	struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4725 	struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
4726 	u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
4727 	u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
4728 	int global_ctrl, pebs_enable;
4729 
4730 	/*
4731 	 * In addition to obeying exclude_guest/exclude_host, remove bits being
4732 	 * used for PEBS when running a guest, because PEBS writes to virtual
4733 	 * addresses (not physical addresses).
4734 	 */
4735 	*nr = 0;
4736 	global_ctrl = (*nr)++;
4737 	arr[global_ctrl] = (struct perf_guest_switch_msr){
4738 		.msr = MSR_CORE_PERF_GLOBAL_CTRL,
4739 		.host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
4740 		.guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
4741 	};
4742 
4743 	if (!x86_pmu.ds_pebs)
4744 		return arr;
4745 
4746 	/*
4747 	 * If PMU counter has PEBS enabled it is not enough to
4748 	 * disable counter on a guest entry since PEBS memory
4749 	 * write can overshoot guest entry and corrupt guest
4750 	 * memory. Disabling PEBS solves the problem.
4751 	 *
4752 	 * Don't do this if the CPU already enforces it.
4753 	 */
4754 	if (x86_pmu.pebs_no_isolation) {
4755 		arr[(*nr)++] = (struct perf_guest_switch_msr){
4756 			.msr = MSR_IA32_PEBS_ENABLE,
4757 			.host = cpuc->pebs_enabled,
4758 			.guest = 0,
4759 		};
4760 		return arr;
4761 	}
4762 
4763 	if (!kvm_pmu || !x86_pmu.pebs_ept)
4764 		return arr;
4765 
4766 	arr[(*nr)++] = (struct perf_guest_switch_msr){
4767 		.msr = MSR_IA32_DS_AREA,
4768 		.host = (unsigned long)cpuc->ds,
4769 		.guest = kvm_pmu->ds_area,
4770 	};
4771 
4772 	if (x86_pmu.intel_cap.pebs_baseline) {
4773 		arr[(*nr)++] = (struct perf_guest_switch_msr){
4774 			.msr = MSR_PEBS_DATA_CFG,
4775 			.host = cpuc->active_pebs_data_cfg,
4776 			.guest = kvm_pmu->pebs_data_cfg,
4777 		};
4778 	}
4779 
4780 	pebs_enable = (*nr)++;
4781 	arr[pebs_enable] = (struct perf_guest_switch_msr){
4782 		.msr = MSR_IA32_PEBS_ENABLE,
4783 		.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
4784 		.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
4785 	};
4786 
4787 	if (arr[pebs_enable].host) {
4788 		/* Disable guest PEBS if host PEBS is enabled. */
4789 		arr[pebs_enable].guest = 0;
4790 	} else {
4791 		/* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
4792 		arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
4793 		arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
4794 		/* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
4795 		arr[global_ctrl].guest |= arr[pebs_enable].guest;
4796 	}
4797 
4798 	return arr;
4799 }
4800 
core_guest_get_msrs(int * nr,void * data)4801 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
4802 {
4803 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4804 	struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4805 	int idx;
4806 
4807 	for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
4808 		struct perf_event *event = cpuc->events[idx];
4809 
4810 		arr[idx].msr = x86_pmu_config_addr(idx);
4811 		arr[idx].host = arr[idx].guest = 0;
4812 
4813 		if (!test_bit(idx, cpuc->active_mask))
4814 			continue;
4815 
4816 		arr[idx].host = arr[idx].guest =
4817 			event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
4818 
4819 		if (event->attr.exclude_host)
4820 			arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4821 		else if (event->attr.exclude_guest)
4822 			arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4823 	}
4824 
4825 	*nr = x86_pmu_max_num_counters(cpuc->pmu);
4826 	return arr;
4827 }
4828 
core_pmu_enable_event(struct perf_event * event)4829 static void core_pmu_enable_event(struct perf_event *event)
4830 {
4831 	if (!event->attr.exclude_host)
4832 		x86_pmu_enable_event(event);
4833 }
4834 
core_pmu_enable_all(int added)4835 static void core_pmu_enable_all(int added)
4836 {
4837 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4838 	int idx;
4839 
4840 	for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
4841 		struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
4842 
4843 		if (!test_bit(idx, cpuc->active_mask) ||
4844 				cpuc->events[idx]->attr.exclude_host)
4845 			continue;
4846 
4847 		__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
4848 	}
4849 }
4850 
hsw_hw_config(struct perf_event * event)4851 static int hsw_hw_config(struct perf_event *event)
4852 {
4853 	int ret = intel_pmu_hw_config(event);
4854 
4855 	if (ret)
4856 		return ret;
4857 	if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
4858 		return 0;
4859 	event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
4860 
4861 	/*
4862 	 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
4863 	 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
4864 	 * this combination.
4865 	 */
4866 	if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
4867 	     ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
4868 	      event->attr.precise_ip > 0))
4869 		return -EOPNOTSUPP;
4870 
4871 	if (event_is_checkpointed(event)) {
4872 		/*
4873 		 * Sampling of checkpointed events can cause situations where
4874 		 * the CPU constantly aborts because of a overflow, which is
4875 		 * then checkpointed back and ignored. Forbid checkpointing
4876 		 * for sampling.
4877 		 *
4878 		 * But still allow a long sampling period, so that perf stat
4879 		 * from KVM works.
4880 		 */
4881 		if (event->attr.sample_period > 0 &&
4882 		    event->attr.sample_period < 0x7fffffff)
4883 			return -EOPNOTSUPP;
4884 	}
4885 	return 0;
4886 }
4887 
4888 static struct event_constraint counter0_constraint =
4889 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
4890 
4891 static struct event_constraint counter1_constraint =
4892 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x2);
4893 
4894 static struct event_constraint counter0_1_constraint =
4895 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x3);
4896 
4897 static struct event_constraint counter2_constraint =
4898 			EVENT_CONSTRAINT(0, 0x4, 0);
4899 
4900 static struct event_constraint fixed0_constraint =
4901 			FIXED_EVENT_CONSTRAINT(0x00c0, 0);
4902 
4903 static struct event_constraint fixed0_counter0_constraint =
4904 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
4905 
4906 static struct event_constraint fixed0_counter0_1_constraint =
4907 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL);
4908 
4909 static struct event_constraint counters_1_7_constraint =
4910 			INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL);
4911 
4912 static struct event_constraint *
hsw_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4913 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4914 			  struct perf_event *event)
4915 {
4916 	struct event_constraint *c;
4917 
4918 	c = intel_get_event_constraints(cpuc, idx, event);
4919 
4920 	/* Handle special quirk on in_tx_checkpointed only in counter 2 */
4921 	if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4922 		if (c->idxmsk64 & (1U << 2))
4923 			return &counter2_constraint;
4924 		return &emptyconstraint;
4925 	}
4926 
4927 	return c;
4928 }
4929 
4930 static struct event_constraint *
icl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4931 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4932 			  struct perf_event *event)
4933 {
4934 	/*
4935 	 * Fixed counter 0 has less skid.
4936 	 * Force instruction:ppp in Fixed counter 0
4937 	 */
4938 	if ((event->attr.precise_ip == 3) &&
4939 	    constraint_match(&fixed0_constraint, event->hw.config))
4940 		return &fixed0_constraint;
4941 
4942 	return hsw_get_event_constraints(cpuc, idx, event);
4943 }
4944 
4945 static struct event_constraint *
glc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4946 glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4947 			  struct perf_event *event)
4948 {
4949 	struct event_constraint *c;
4950 
4951 	c = icl_get_event_constraints(cpuc, idx, event);
4952 
4953 	/*
4954 	 * The :ppp indicates the Precise Distribution (PDist) facility, which
4955 	 * is only supported on the GP counter 0. If a :ppp event which is not
4956 	 * available on the GP counter 0, error out.
4957 	 * Exception: Instruction PDIR is only available on the fixed counter 0.
4958 	 */
4959 	if ((event->attr.precise_ip == 3) &&
4960 	    !constraint_match(&fixed0_constraint, event->hw.config)) {
4961 		if (c->idxmsk64 & BIT_ULL(0))
4962 			return &counter0_constraint;
4963 
4964 		return &emptyconstraint;
4965 	}
4966 
4967 	return c;
4968 }
4969 
4970 static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4971 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4972 			  struct perf_event *event)
4973 {
4974 	struct event_constraint *c;
4975 
4976 	/* :ppp means to do reduced skid PEBS which is PMC0 only. */
4977 	if (event->attr.precise_ip == 3)
4978 		return &counter0_constraint;
4979 
4980 	c = intel_get_event_constraints(cpuc, idx, event);
4981 
4982 	return c;
4983 }
4984 
4985 static struct event_constraint *
tnt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4986 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4987 			  struct perf_event *event)
4988 {
4989 	struct event_constraint *c;
4990 
4991 	c = intel_get_event_constraints(cpuc, idx, event);
4992 
4993 	/*
4994 	 * :ppp means to do reduced skid PEBS,
4995 	 * which is available on PMC0 and fixed counter 0.
4996 	 */
4997 	if (event->attr.precise_ip == 3) {
4998 		/* Force instruction:ppp on PMC0 and Fixed counter 0 */
4999 		if (constraint_match(&fixed0_constraint, event->hw.config))
5000 			return &fixed0_counter0_constraint;
5001 
5002 		return &counter0_constraint;
5003 	}
5004 
5005 	return c;
5006 }
5007 
5008 static bool allow_tsx_force_abort = true;
5009 
5010 static struct event_constraint *
tfa_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5011 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5012 			  struct perf_event *event)
5013 {
5014 	struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
5015 
5016 	/*
5017 	 * Without TFA we must not use PMC3.
5018 	 */
5019 	if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
5020 		c = dyn_constraint(cpuc, c, idx);
5021 		c->idxmsk64 &= ~(1ULL << 3);
5022 		c->weight--;
5023 	}
5024 
5025 	return c;
5026 }
5027 
5028 static struct event_constraint *
adl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5029 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5030 			  struct perf_event *event)
5031 {
5032 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
5033 
5034 	if (pmu->pmu_type == hybrid_big)
5035 		return glc_get_event_constraints(cpuc, idx, event);
5036 	else if (pmu->pmu_type == hybrid_small)
5037 		return tnt_get_event_constraints(cpuc, idx, event);
5038 
5039 	WARN_ON(1);
5040 	return &emptyconstraint;
5041 }
5042 
5043 static struct event_constraint *
cmt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5044 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5045 			  struct perf_event *event)
5046 {
5047 	struct event_constraint *c;
5048 
5049 	c = intel_get_event_constraints(cpuc, idx, event);
5050 
5051 	/*
5052 	 * The :ppp indicates the Precise Distribution (PDist) facility, which
5053 	 * is only supported on the GP counter 0 & 1 and Fixed counter 0.
5054 	 * If a :ppp event which is not available on the above eligible counters,
5055 	 * error out.
5056 	 */
5057 	if (event->attr.precise_ip == 3) {
5058 		/* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */
5059 		if (constraint_match(&fixed0_constraint, event->hw.config)) {
5060 			/* The fixed counter 0 doesn't support LBR event logging. */
5061 			if (branch_sample_counters(event))
5062 				return &counter0_1_constraint;
5063 			else
5064 				return &fixed0_counter0_1_constraint;
5065 		}
5066 
5067 		switch (c->idxmsk64 & 0x3ull) {
5068 		case 0x1:
5069 			return &counter0_constraint;
5070 		case 0x2:
5071 			return &counter1_constraint;
5072 		case 0x3:
5073 			return &counter0_1_constraint;
5074 		}
5075 		return &emptyconstraint;
5076 	}
5077 
5078 	return c;
5079 }
5080 
5081 static struct event_constraint *
rwc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5082 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5083 			  struct perf_event *event)
5084 {
5085 	struct event_constraint *c;
5086 
5087 	c = glc_get_event_constraints(cpuc, idx, event);
5088 
5089 	/* The Retire Latency is not supported by the fixed counter 0. */
5090 	if (event->attr.precise_ip &&
5091 	    (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
5092 	    constraint_match(&fixed0_constraint, event->hw.config)) {
5093 		/*
5094 		 * The Instruction PDIR is only available
5095 		 * on the fixed counter 0. Error out for this case.
5096 		 */
5097 		if (event->attr.precise_ip == 3)
5098 			return &emptyconstraint;
5099 		return &counters_1_7_constraint;
5100 	}
5101 
5102 	return c;
5103 }
5104 
5105 static struct event_constraint *
mtl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5106 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5107 			  struct perf_event *event)
5108 {
5109 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
5110 
5111 	if (pmu->pmu_type == hybrid_big)
5112 		return rwc_get_event_constraints(cpuc, idx, event);
5113 	if (pmu->pmu_type == hybrid_small)
5114 		return cmt_get_event_constraints(cpuc, idx, event);
5115 
5116 	WARN_ON(1);
5117 	return &emptyconstraint;
5118 }
5119 
adl_hw_config(struct perf_event * event)5120 static int adl_hw_config(struct perf_event *event)
5121 {
5122 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
5123 
5124 	if (pmu->pmu_type == hybrid_big)
5125 		return hsw_hw_config(event);
5126 	else if (pmu->pmu_type == hybrid_small)
5127 		return intel_pmu_hw_config(event);
5128 
5129 	WARN_ON(1);
5130 	return -EOPNOTSUPP;
5131 }
5132 
adl_get_hybrid_cpu_type(void)5133 static enum intel_cpu_type adl_get_hybrid_cpu_type(void)
5134 {
5135 	return INTEL_CPU_TYPE_CORE;
5136 }
5137 
erratum_hsw11(struct perf_event * event)5138 static inline bool erratum_hsw11(struct perf_event *event)
5139 {
5140 	return (event->hw.config & INTEL_ARCH_EVENT_MASK) ==
5141 		X86_CONFIG(.event=0xc0, .umask=0x01);
5142 }
5143 
5144 static struct event_constraint *
arl_h_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)5145 arl_h_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
5146 			  struct perf_event *event)
5147 {
5148 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
5149 
5150 	if (pmu->pmu_type == hybrid_tiny)
5151 		return cmt_get_event_constraints(cpuc, idx, event);
5152 
5153 	return mtl_get_event_constraints(cpuc, idx, event);
5154 }
5155 
arl_h_hw_config(struct perf_event * event)5156 static int arl_h_hw_config(struct perf_event *event)
5157 {
5158 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
5159 
5160 	if (pmu->pmu_type == hybrid_tiny)
5161 		return intel_pmu_hw_config(event);
5162 
5163 	return adl_hw_config(event);
5164 }
5165 
5166 /*
5167  * The HSW11 requires a period larger than 100 which is the same as the BDM11.
5168  * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL.
5169  *
5170  * The message 'interrupt took too long' can be observed on any counter which
5171  * was armed with a period < 32 and two events expired in the same NMI.
5172  * A minimum period of 32 is enforced for the rest of the events.
5173  */
hsw_limit_period(struct perf_event * event,s64 * left)5174 static void hsw_limit_period(struct perf_event *event, s64 *left)
5175 {
5176 	*left = max(*left, erratum_hsw11(event) ? 128 : 32);
5177 }
5178 
5179 /*
5180  * Broadwell:
5181  *
5182  * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
5183  * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
5184  * the two to enforce a minimum period of 128 (the smallest value that has bits
5185  * 0-5 cleared and >= 100).
5186  *
5187  * Because of how the code in x86_perf_event_set_period() works, the truncation
5188  * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
5189  * to make up for the 'lost' events due to carrying the 'error' in period_left.
5190  *
5191  * Therefore the effective (average) period matches the requested period,
5192  * despite coarser hardware granularity.
5193  */
bdw_limit_period(struct perf_event * event,s64 * left)5194 static void bdw_limit_period(struct perf_event *event, s64 *left)
5195 {
5196 	if (erratum_hsw11(event)) {
5197 		if (*left < 128)
5198 			*left = 128;
5199 		*left &= ~0x3fULL;
5200 	}
5201 }
5202 
nhm_limit_period(struct perf_event * event,s64 * left)5203 static void nhm_limit_period(struct perf_event *event, s64 *left)
5204 {
5205 	*left = max(*left, 32LL);
5206 }
5207 
glc_limit_period(struct perf_event * event,s64 * left)5208 static void glc_limit_period(struct perf_event *event, s64 *left)
5209 {
5210 	if (event->attr.precise_ip == 3)
5211 		*left = max(*left, 128LL);
5212 }
5213 
5214 PMU_FORMAT_ATTR(event,	"config:0-7"	);
5215 PMU_FORMAT_ATTR(umask,	"config:8-15"	);
5216 PMU_FORMAT_ATTR(edge,	"config:18"	);
5217 PMU_FORMAT_ATTR(pc,	"config:19"	);
5218 PMU_FORMAT_ATTR(any,	"config:21"	); /* v3 + */
5219 PMU_FORMAT_ATTR(inv,	"config:23"	);
5220 PMU_FORMAT_ATTR(cmask,	"config:24-31"	);
5221 PMU_FORMAT_ATTR(in_tx,  "config:32"	);
5222 PMU_FORMAT_ATTR(in_tx_cp, "config:33"	);
5223 PMU_FORMAT_ATTR(eq,	"config:36"	); /* v6 + */
5224 
5225 PMU_FORMAT_ATTR(metrics_clear,	"config1:0"); /* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */
5226 
umask2_show(struct device * dev,struct device_attribute * attr,char * page)5227 static ssize_t umask2_show(struct device *dev,
5228 			   struct device_attribute *attr,
5229 			   char *page)
5230 {
5231 	u64 mask = hybrid(dev_get_drvdata(dev), config_mask) & ARCH_PERFMON_EVENTSEL_UMASK2;
5232 
5233 	if (mask == ARCH_PERFMON_EVENTSEL_UMASK2)
5234 		return sprintf(page, "config:8-15,40-47\n");
5235 
5236 	/* Roll back to the old format if umask2 is not supported. */
5237 	return sprintf(page, "config:8-15\n");
5238 }
5239 
5240 static struct device_attribute format_attr_umask2  =
5241 		__ATTR(umask, 0444, umask2_show, NULL);
5242 
5243 static struct attribute *format_evtsel_ext_attrs[] = {
5244 	&format_attr_umask2.attr,
5245 	&format_attr_eq.attr,
5246 	&format_attr_metrics_clear.attr,
5247 	NULL
5248 };
5249 
5250 static umode_t
evtsel_ext_is_visible(struct kobject * kobj,struct attribute * attr,int i)5251 evtsel_ext_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5252 {
5253 	struct device *dev = kobj_to_dev(kobj);
5254 	u64 mask;
5255 
5256 	/*
5257 	 * The umask and umask2 have different formats but share the
5258 	 * same attr name. In update mode, the previous value of the
5259 	 * umask is unconditionally removed before is_visible. If
5260 	 * umask2 format is not enumerated, it's impossible to roll
5261 	 * back to the old format.
5262 	 * Does the check in umask2_show rather than is_visible.
5263 	 */
5264 	if (i == 0)
5265 		return attr->mode;
5266 
5267 	mask = hybrid(dev_get_drvdata(dev), config_mask);
5268 	if (i == 1)
5269 		return (mask & ARCH_PERFMON_EVENTSEL_EQ) ? attr->mode : 0;
5270 
5271 	/* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */
5272 	if (i == 2) {
5273 		union perf_capabilities intel_cap = hybrid(dev_get_drvdata(dev), intel_cap);
5274 
5275 		return intel_cap.rdpmc_metrics_clear ? attr->mode : 0;
5276 	}
5277 
5278 	return 0;
5279 }
5280 
5281 static struct attribute *intel_arch_formats_attr[] = {
5282 	&format_attr_event.attr,
5283 	&format_attr_umask.attr,
5284 	&format_attr_edge.attr,
5285 	&format_attr_pc.attr,
5286 	&format_attr_inv.attr,
5287 	&format_attr_cmask.attr,
5288 	NULL,
5289 };
5290 
intel_event_sysfs_show(char * page,u64 config)5291 ssize_t intel_event_sysfs_show(char *page, u64 config)
5292 {
5293 	u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
5294 
5295 	return x86_event_sysfs_show(page, config, event);
5296 }
5297 
allocate_shared_regs(int cpu)5298 static struct intel_shared_regs *allocate_shared_regs(int cpu)
5299 {
5300 	struct intel_shared_regs *regs;
5301 	int i;
5302 
5303 	regs = kzalloc_node(sizeof(struct intel_shared_regs),
5304 			    GFP_KERNEL, cpu_to_node(cpu));
5305 	if (regs) {
5306 		/*
5307 		 * initialize the locks to keep lockdep happy
5308 		 */
5309 		for (i = 0; i < EXTRA_REG_MAX; i++)
5310 			raw_spin_lock_init(&regs->regs[i].lock);
5311 
5312 		regs->core_id = -1;
5313 	}
5314 	return regs;
5315 }
5316 
allocate_excl_cntrs(int cpu)5317 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
5318 {
5319 	struct intel_excl_cntrs *c;
5320 
5321 	c = kzalloc_node(sizeof(struct intel_excl_cntrs),
5322 			 GFP_KERNEL, cpu_to_node(cpu));
5323 	if (c) {
5324 		raw_spin_lock_init(&c->lock);
5325 		c->core_id = -1;
5326 	}
5327 	return c;
5328 }
5329 
5330 
intel_cpuc_prepare(struct cpu_hw_events * cpuc,int cpu)5331 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
5332 {
5333 	cpuc->pebs_record_size = x86_pmu.pebs_record_size;
5334 
5335 	if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
5336 		cpuc->shared_regs = allocate_shared_regs(cpu);
5337 		if (!cpuc->shared_regs)
5338 			goto err;
5339 	}
5340 
5341 	if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_DYN_CONSTRAINT)) {
5342 		size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
5343 
5344 		cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
5345 		if (!cpuc->constraint_list)
5346 			goto err_shared_regs;
5347 	}
5348 
5349 	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
5350 		cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
5351 		if (!cpuc->excl_cntrs)
5352 			goto err_constraint_list;
5353 
5354 		cpuc->excl_thread_id = 0;
5355 	}
5356 
5357 	return 0;
5358 
5359 err_constraint_list:
5360 	kfree(cpuc->constraint_list);
5361 	cpuc->constraint_list = NULL;
5362 
5363 err_shared_regs:
5364 	kfree(cpuc->shared_regs);
5365 	cpuc->shared_regs = NULL;
5366 
5367 err:
5368 	return -ENOMEM;
5369 }
5370 
intel_pmu_cpu_prepare(int cpu)5371 static int intel_pmu_cpu_prepare(int cpu)
5372 {
5373 	int ret;
5374 
5375 	ret = intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
5376 	if (ret)
5377 		return ret;
5378 
5379 	return alloc_arch_pebs_buf_on_cpu(cpu);
5380 }
5381 
flip_smm_bit(void * data)5382 static void flip_smm_bit(void *data)
5383 {
5384 	unsigned long set = *(unsigned long *)data;
5385 
5386 	if (set > 0) {
5387 		msr_set_bit(MSR_IA32_DEBUGCTLMSR,
5388 			    DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
5389 	} else {
5390 		msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
5391 			      DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
5392 	}
5393 }
5394 
intel_pmu_check_counters_mask(u64 * cntr_mask,u64 * fixed_cntr_mask,u64 * intel_ctrl)5395 static void intel_pmu_check_counters_mask(u64 *cntr_mask,
5396 					  u64 *fixed_cntr_mask,
5397 					  u64 *intel_ctrl)
5398 {
5399 	unsigned int bit;
5400 
5401 	bit = fls64(*cntr_mask);
5402 	if (bit > INTEL_PMC_MAX_GENERIC) {
5403 		WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5404 		     bit, INTEL_PMC_MAX_GENERIC);
5405 		*cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
5406 	}
5407 	*intel_ctrl = *cntr_mask;
5408 
5409 	bit = fls64(*fixed_cntr_mask);
5410 	if (bit > INTEL_PMC_MAX_FIXED) {
5411 		WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5412 		     bit, INTEL_PMC_MAX_FIXED);
5413 		*fixed_cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
5414 	}
5415 
5416 	*intel_ctrl |= *fixed_cntr_mask << INTEL_PMC_IDX_FIXED;
5417 }
5418 
5419 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
5420 					      u64 cntr_mask,
5421 					      u64 fixed_cntr_mask,
5422 					      u64 intel_ctrl);
5423 
5424 enum dyn_constr_type {
5425 	DYN_CONSTR_NONE,
5426 	DYN_CONSTR_BR_CNTR,
5427 	DYN_CONSTR_ACR_CNTR,
5428 	DYN_CONSTR_ACR_CAUSE,
5429 	DYN_CONSTR_PEBS,
5430 	DYN_CONSTR_PDIST,
5431 
5432 	DYN_CONSTR_MAX,
5433 };
5434 
5435 static const char * const dyn_constr_type_name[] = {
5436 	[DYN_CONSTR_NONE] = "a normal event",
5437 	[DYN_CONSTR_BR_CNTR] = "a branch counter logging event",
5438 	[DYN_CONSTR_ACR_CNTR] = "an auto-counter reload event",
5439 	[DYN_CONSTR_ACR_CAUSE] = "an auto-counter reload cause event",
5440 	[DYN_CONSTR_PEBS] = "a PEBS event",
5441 	[DYN_CONSTR_PDIST] = "a PEBS PDIST event",
5442 };
5443 
__intel_pmu_check_dyn_constr(struct event_constraint * constr,enum dyn_constr_type type,u64 mask)5444 static void __intel_pmu_check_dyn_constr(struct event_constraint *constr,
5445 					 enum dyn_constr_type type, u64 mask)
5446 {
5447 	struct event_constraint *c1, *c2;
5448 	int new_weight, check_weight;
5449 	u64 new_mask, check_mask;
5450 
5451 	for_each_event_constraint(c1, constr) {
5452 		new_mask = c1->idxmsk64 & mask;
5453 		new_weight = hweight64(new_mask);
5454 
5455 		/* ignore topdown perf metrics event */
5456 		if (c1->idxmsk64 & INTEL_PMC_MSK_TOPDOWN)
5457 			continue;
5458 
5459 		if (!new_weight && fls64(c1->idxmsk64) < INTEL_PMC_IDX_FIXED) {
5460 			pr_info("The event 0x%llx is not supported as %s.\n",
5461 				c1->code, dyn_constr_type_name[type]);
5462 		}
5463 
5464 		if (new_weight <= 1)
5465 			continue;
5466 
5467 		for_each_event_constraint(c2, c1 + 1) {
5468 			bool check_fail = false;
5469 
5470 			check_mask = c2->idxmsk64 & mask;
5471 			check_weight = hweight64(check_mask);
5472 
5473 			if (c2->idxmsk64 & INTEL_PMC_MSK_TOPDOWN ||
5474 			    !check_weight)
5475 				continue;
5476 
5477 			/* The same constraints or no overlap */
5478 			if (new_mask == check_mask ||
5479 			    (new_mask ^ check_mask) == (new_mask | check_mask))
5480 				continue;
5481 
5482 			/*
5483 			 * A scheduler issue may be triggered in the following cases.
5484 			 * - Two overlap constraints have the same weight.
5485 			 *   E.g., A constraints: 0x3, B constraints: 0x6
5486 			 *   event	counter		failure case
5487 			 *   B		PMC[2:1]	1
5488 			 *   A		PMC[1:0]	0
5489 			 *   A		PMC[1:0]	FAIL
5490 			 * - Two overlap constraints have different weight.
5491 			 *   The constraint has a low weight, but has high last bit.
5492 			 *   E.g., A constraints: 0x7, B constraints: 0xC
5493 			 *   event	counter		failure case
5494 			 *   B		PMC[3:2]	2
5495 			 *   A		PMC[2:0]	0
5496 			 *   A		PMC[2:0]	1
5497 			 *   A		PMC[2:0]	FAIL
5498 			 */
5499 			if (new_weight == check_weight) {
5500 				check_fail = true;
5501 			} else if (new_weight < check_weight) {
5502 				if ((new_mask | check_mask) != check_mask &&
5503 				    fls64(new_mask) > fls64(check_mask))
5504 					check_fail = true;
5505 			} else {
5506 				if ((new_mask | check_mask) != new_mask &&
5507 				    fls64(new_mask) < fls64(check_mask))
5508 					check_fail = true;
5509 			}
5510 
5511 			if (check_fail) {
5512 				pr_info("The two events 0x%llx and 0x%llx may not be "
5513 					"fully scheduled under some circumstances as "
5514 					"%s.\n",
5515 					c1->code, c2->code, dyn_constr_type_name[type]);
5516 			}
5517 		}
5518 	}
5519 }
5520 
intel_pmu_check_dyn_constr(struct pmu * pmu,struct event_constraint * constr,u64 cntr_mask)5521 static void intel_pmu_check_dyn_constr(struct pmu *pmu,
5522 				       struct event_constraint *constr,
5523 				       u64 cntr_mask)
5524 {
5525 	enum dyn_constr_type i;
5526 	u64 mask;
5527 
5528 	for (i = DYN_CONSTR_NONE; i < DYN_CONSTR_MAX; i++) {
5529 		mask = 0;
5530 		switch (i) {
5531 		case DYN_CONSTR_NONE:
5532 			mask = cntr_mask;
5533 			break;
5534 		case DYN_CONSTR_BR_CNTR:
5535 			if (x86_pmu.flags & PMU_FL_BR_CNTR)
5536 				mask = x86_pmu.lbr_counters;
5537 			break;
5538 		case DYN_CONSTR_ACR_CNTR:
5539 			mask = hybrid(pmu, acr_cntr_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
5540 			break;
5541 		case DYN_CONSTR_ACR_CAUSE:
5542 			if (hybrid(pmu, acr_cntr_mask64) == hybrid(pmu, acr_cause_mask64))
5543 				continue;
5544 			mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
5545 			break;
5546 		case DYN_CONSTR_PEBS:
5547 			if (x86_pmu.arch_pebs)
5548 				mask = hybrid(pmu, arch_pebs_cap).counters;
5549 			break;
5550 		case DYN_CONSTR_PDIST:
5551 			if (x86_pmu.arch_pebs)
5552 				mask = hybrid(pmu, arch_pebs_cap).pdists;
5553 			break;
5554 		default:
5555 			pr_warn("Unsupported dynamic constraint type %d\n", i);
5556 		}
5557 
5558 		if (mask)
5559 			__intel_pmu_check_dyn_constr(constr, i, mask);
5560 	}
5561 }
5562 
intel_pmu_check_event_constraints_all(struct pmu * pmu)5563 static void intel_pmu_check_event_constraints_all(struct pmu *pmu)
5564 {
5565 	struct event_constraint *event_constraints = hybrid(pmu, event_constraints);
5566 	struct event_constraint *pebs_constraints = hybrid(pmu, pebs_constraints);
5567 	u64 cntr_mask = hybrid(pmu, cntr_mask64);
5568 	u64 fixed_cntr_mask = hybrid(pmu, fixed_cntr_mask64);
5569 	u64 intel_ctrl = hybrid(pmu, intel_ctrl);
5570 
5571 	intel_pmu_check_event_constraints(event_constraints, cntr_mask,
5572 					  fixed_cntr_mask, intel_ctrl);
5573 
5574 	if (event_constraints)
5575 		intel_pmu_check_dyn_constr(pmu, event_constraints, cntr_mask);
5576 
5577 	if (pebs_constraints)
5578 		intel_pmu_check_dyn_constr(pmu, pebs_constraints, cntr_mask);
5579 }
5580 
5581 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
5582 
intel_pmu_broken_perf_cap(void)5583 static inline bool intel_pmu_broken_perf_cap(void)
5584 {
5585 	/* The Perf Metric (Bit 15) is always cleared */
5586 	if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE ||
5587 	    boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L)
5588 		return true;
5589 
5590 	return false;
5591 }
5592 
__intel_update_pmu_caps(struct pmu * pmu)5593 static inline void __intel_update_pmu_caps(struct pmu *pmu)
5594 {
5595 	struct pmu *dest_pmu = pmu ? pmu : x86_get_pmu(smp_processor_id());
5596 
5597 	if (hybrid(pmu, arch_pebs_cap).caps & ARCH_PEBS_VECR_XMM)
5598 		dest_pmu->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
5599 }
5600 
__intel_update_large_pebs_flags(struct pmu * pmu)5601 static inline void __intel_update_large_pebs_flags(struct pmu *pmu)
5602 {
5603 	u64 caps = hybrid(pmu, arch_pebs_cap).caps;
5604 
5605 	x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
5606 	if (caps & ARCH_PEBS_LBR)
5607 		x86_pmu.large_pebs_flags |= PERF_SAMPLE_BRANCH_STACK;
5608 	if (caps & ARCH_PEBS_CNTR_MASK)
5609 		x86_pmu.large_pebs_flags |= PERF_SAMPLE_READ;
5610 
5611 	if (!(caps & ARCH_PEBS_AUX))
5612 		x86_pmu.large_pebs_flags &= ~PERF_SAMPLE_DATA_SRC;
5613 	if (!(caps & ARCH_PEBS_GPR)) {
5614 		x86_pmu.large_pebs_flags &=
5615 			~(PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER);
5616 	}
5617 }
5618 
5619 #define counter_mask(_gp, _fixed) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX_FIXED))
5620 
update_pmu_cap(struct pmu * pmu)5621 static void update_pmu_cap(struct pmu *pmu)
5622 {
5623 	unsigned int eax, ebx, ecx, edx;
5624 	union cpuid35_eax eax_0;
5625 	union cpuid35_ebx ebx_0;
5626 	u64 cntrs_mask = 0;
5627 	u64 pebs_mask = 0;
5628 	u64 pdists_mask = 0;
5629 
5630 	cpuid(ARCH_PERFMON_EXT_LEAF, &eax_0.full, &ebx_0.full, &ecx, &edx);
5631 
5632 	if (ebx_0.split.umask2)
5633 		hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2;
5634 	if (ebx_0.split.eq)
5635 		hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ;
5636 
5637 	if (eax_0.split.cntr_subleaf) {
5638 		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
5639 			    &eax, &ebx, &ecx, &edx);
5640 		hybrid(pmu, cntr_mask64) = eax;
5641 		hybrid(pmu, fixed_cntr_mask64) = ebx;
5642 		cntrs_mask = counter_mask(eax, ebx);
5643 	}
5644 
5645 	if (eax_0.split.acr_subleaf) {
5646 		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF,
5647 			    &eax, &ebx, &ecx, &edx);
5648 		/* The mask of the counters which can be reloaded */
5649 		hybrid(pmu, acr_cntr_mask64) = counter_mask(eax, ebx);
5650 		/* The mask of the counters which can cause a reload of reloadable counters */
5651 		hybrid(pmu, acr_cause_mask64) = counter_mask(ecx, edx);
5652 	}
5653 
5654 	/* Bits[5:4] should be set simultaneously if arch-PEBS is supported */
5655 	if (eax_0.split.pebs_caps_subleaf && eax_0.split.pebs_cnts_subleaf) {
5656 		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_CAP_LEAF,
5657 			    &eax, &ebx, &ecx, &edx);
5658 		hybrid(pmu, arch_pebs_cap).caps = (u64)ebx << 32;
5659 
5660 		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_COUNTER_LEAF,
5661 			    &eax, &ebx, &ecx, &edx);
5662 		pebs_mask   = counter_mask(eax, ecx);
5663 		pdists_mask = counter_mask(ebx, edx);
5664 		hybrid(pmu, arch_pebs_cap).counters = pebs_mask;
5665 		hybrid(pmu, arch_pebs_cap).pdists = pdists_mask;
5666 
5667 		if (WARN_ON((pebs_mask | pdists_mask) & ~cntrs_mask)) {
5668 			x86_pmu.arch_pebs = 0;
5669 		} else {
5670 			__intel_update_pmu_caps(pmu);
5671 			__intel_update_large_pebs_flags(pmu);
5672 		}
5673 	} else {
5674 		WARN_ON(x86_pmu.arch_pebs == 1);
5675 		x86_pmu.arch_pebs = 0;
5676 	}
5677 
5678 	if (!intel_pmu_broken_perf_cap()) {
5679 		/* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
5680 		rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities);
5681 	}
5682 }
5683 
intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu * pmu)5684 static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
5685 {
5686 	intel_pmu_check_counters_mask(&pmu->cntr_mask64, &pmu->fixed_cntr_mask64,
5687 				      &pmu->intel_ctrl);
5688 	pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
5689 	pmu->unconstrained = (struct event_constraint)
5690 			     __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
5691 						0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
5692 
5693 	if (pmu->intel_cap.perf_metrics)
5694 		pmu->intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS;
5695 	else
5696 		pmu->intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS;
5697 
5698 	intel_pmu_check_event_constraints_all(&pmu->pmu);
5699 
5700 	intel_pmu_check_extra_regs(pmu->extra_regs);
5701 }
5702 
find_hybrid_pmu_for_cpu(void)5703 static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
5704 {
5705 	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
5706 	enum intel_cpu_type cpu_type = c->topo.intel_type;
5707 	int i;
5708 
5709 	/*
5710 	 * This is running on a CPU model that is known to have hybrid
5711 	 * configurations. But the CPU told us it is not hybrid, shame
5712 	 * on it. There should be a fixup function provided for these
5713 	 * troublesome CPUs (->get_hybrid_cpu_type).
5714 	 */
5715 	if (cpu_type == INTEL_CPU_TYPE_UNKNOWN) {
5716 		if (x86_pmu.get_hybrid_cpu_type)
5717 			cpu_type = x86_pmu.get_hybrid_cpu_type();
5718 		else
5719 			return NULL;
5720 	}
5721 
5722 	/*
5723 	 * This essentially just maps between the 'hybrid_cpu_type'
5724 	 * and 'hybrid_pmu_type' enums except for ARL-H processor
5725 	 * which needs to compare atom uarch native id since ARL-H
5726 	 * contains two different atom uarchs.
5727 	 */
5728 	for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
5729 		enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type;
5730 		u32 native_id;
5731 
5732 		if (cpu_type == INTEL_CPU_TYPE_CORE && pmu_type == hybrid_big)
5733 			return &x86_pmu.hybrid_pmu[i];
5734 		if (cpu_type == INTEL_CPU_TYPE_ATOM) {
5735 			if (x86_pmu.num_hybrid_pmus == 2 && pmu_type == hybrid_small)
5736 				return &x86_pmu.hybrid_pmu[i];
5737 
5738 			native_id = c->topo.intel_native_model_id;
5739 			if (native_id == INTEL_ATOM_SKT_NATIVE_ID && pmu_type == hybrid_small)
5740 				return &x86_pmu.hybrid_pmu[i];
5741 			if (native_id == INTEL_ATOM_CMT_NATIVE_ID && pmu_type == hybrid_tiny)
5742 				return &x86_pmu.hybrid_pmu[i];
5743 		}
5744 	}
5745 
5746 	return NULL;
5747 }
5748 
init_hybrid_pmu(int cpu)5749 static bool init_hybrid_pmu(int cpu)
5750 {
5751 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5752 	struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu();
5753 
5754 	if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
5755 		cpuc->pmu = NULL;
5756 		return false;
5757 	}
5758 
5759 	/* Only check and dump the PMU information for the first CPU */
5760 	if (!cpumask_empty(&pmu->supported_cpus))
5761 		goto end;
5762 
5763 	if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
5764 		update_pmu_cap(&pmu->pmu);
5765 
5766 	intel_pmu_check_hybrid_pmus(pmu);
5767 
5768 	if (!check_hw_exists(&pmu->pmu, pmu->cntr_mask, pmu->fixed_cntr_mask))
5769 		return false;
5770 
5771 	pr_info("%s PMU driver: ", pmu->name);
5772 
5773 	pr_cont("\n");
5774 
5775 	x86_pmu_show_pmu_cap(&pmu->pmu);
5776 
5777 end:
5778 	cpumask_set_cpu(cpu, &pmu->supported_cpus);
5779 	cpuc->pmu = &pmu->pmu;
5780 
5781 	return true;
5782 }
5783 
intel_pmu_cpu_starting(int cpu)5784 static void intel_pmu_cpu_starting(int cpu)
5785 {
5786 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5787 	int core_id = topology_core_id(cpu);
5788 	int i;
5789 
5790 	if (is_hybrid() && !init_hybrid_pmu(cpu))
5791 		return;
5792 
5793 	init_debug_store_on_cpu(cpu);
5794 	init_arch_pebs_on_cpu(cpu);
5795 	/*
5796 	 * Deal with CPUs that don't clear their LBRs on power-up, and that may
5797 	 * even boot with LBRs enabled.
5798 	 */
5799 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr)
5800 		msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT);
5801 	intel_pmu_lbr_reset();
5802 
5803 	cpuc->lbr_sel = NULL;
5804 
5805 	if (x86_pmu.flags & PMU_FL_TFA) {
5806 		WARN_ON_ONCE(cpuc->tfa_shadow);
5807 		cpuc->tfa_shadow = ~0ULL;
5808 		intel_set_tfa(cpuc, false);
5809 	}
5810 
5811 	if (x86_pmu.version > 1)
5812 		flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
5813 
5814 	/*
5815 	 * Disable perf metrics if any added CPU doesn't support it.
5816 	 *
5817 	 * Turn off the check for a hybrid architecture, because the
5818 	 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
5819 	 * the architecture features. The perf metrics is a model-specific
5820 	 * feature for now. The corresponding bit should always be 0 on
5821 	 * a hybrid platform, e.g., Alder Lake.
5822 	 */
5823 	if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
5824 		union perf_capabilities perf_cap;
5825 
5826 		rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
5827 		if (!perf_cap.perf_metrics) {
5828 			x86_pmu.intel_cap.perf_metrics = 0;
5829 			x86_pmu.intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS;
5830 		}
5831 	}
5832 
5833 	__intel_update_pmu_caps(cpuc->pmu);
5834 
5835 	if (!cpuc->shared_regs)
5836 		return;
5837 
5838 	if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
5839 		for_each_cpu(i, topology_sibling_cpumask(cpu)) {
5840 			struct intel_shared_regs *pc;
5841 
5842 			pc = per_cpu(cpu_hw_events, i).shared_regs;
5843 			if (pc && pc->core_id == core_id) {
5844 				cpuc->kfree_on_online[0] = cpuc->shared_regs;
5845 				cpuc->shared_regs = pc;
5846 				break;
5847 			}
5848 		}
5849 		cpuc->shared_regs->core_id = core_id;
5850 		cpuc->shared_regs->refcnt++;
5851 	}
5852 
5853 	if (x86_pmu.lbr_sel_map)
5854 		cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
5855 
5856 	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
5857 		for_each_cpu(i, topology_sibling_cpumask(cpu)) {
5858 			struct cpu_hw_events *sibling;
5859 			struct intel_excl_cntrs *c;
5860 
5861 			sibling = &per_cpu(cpu_hw_events, i);
5862 			c = sibling->excl_cntrs;
5863 			if (c && c->core_id == core_id) {
5864 				cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
5865 				cpuc->excl_cntrs = c;
5866 				if (!sibling->excl_thread_id)
5867 					cpuc->excl_thread_id = 1;
5868 				break;
5869 			}
5870 		}
5871 		cpuc->excl_cntrs->core_id = core_id;
5872 		cpuc->excl_cntrs->refcnt++;
5873 	}
5874 }
5875 
free_excl_cntrs(struct cpu_hw_events * cpuc)5876 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
5877 {
5878 	struct intel_excl_cntrs *c;
5879 
5880 	c = cpuc->excl_cntrs;
5881 	if (c) {
5882 		if (c->core_id == -1 || --c->refcnt == 0)
5883 			kfree(c);
5884 		cpuc->excl_cntrs = NULL;
5885 	}
5886 
5887 	kfree(cpuc->constraint_list);
5888 	cpuc->constraint_list = NULL;
5889 }
5890 
intel_pmu_cpu_dying(int cpu)5891 static void intel_pmu_cpu_dying(int cpu)
5892 {
5893 	fini_debug_store_on_cpu(cpu);
5894 	fini_arch_pebs_on_cpu(cpu);
5895 }
5896 
intel_cpuc_finish(struct cpu_hw_events * cpuc)5897 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
5898 {
5899 	struct intel_shared_regs *pc;
5900 
5901 	pc = cpuc->shared_regs;
5902 	if (pc) {
5903 		if (pc->core_id == -1 || --pc->refcnt == 0)
5904 			kfree(pc);
5905 		cpuc->shared_regs = NULL;
5906 	}
5907 
5908 	free_excl_cntrs(cpuc);
5909 }
5910 
intel_pmu_cpu_dead(int cpu)5911 static void intel_pmu_cpu_dead(int cpu)
5912 {
5913 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5914 
5915 	release_arch_pebs_buf_on_cpu(cpu);
5916 	intel_cpuc_finish(cpuc);
5917 
5918 	if (is_hybrid() && cpuc->pmu)
5919 		cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
5920 }
5921 
intel_pmu_sched_task(struct perf_event_pmu_context * pmu_ctx,struct task_struct * task,bool sched_in)5922 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
5923 				 struct task_struct *task, bool sched_in)
5924 {
5925 	intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
5926 	intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in);
5927 }
5928 
intel_pmu_check_period(struct perf_event * event,u64 value)5929 static int intel_pmu_check_period(struct perf_event *event, u64 value)
5930 {
5931 	return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
5932 }
5933 
intel_aux_output_init(void)5934 static void intel_aux_output_init(void)
5935 {
5936 	/* Refer also intel_pmu_aux_output_match() */
5937 	if (x86_pmu.intel_cap.pebs_output_pt_available)
5938 		x86_pmu.assign = intel_pmu_assign_event;
5939 }
5940 
intel_pmu_aux_output_match(struct perf_event * event)5941 static int intel_pmu_aux_output_match(struct perf_event *event)
5942 {
5943 	/* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */
5944 	if (!x86_pmu.intel_cap.pebs_output_pt_available)
5945 		return 0;
5946 
5947 	return is_intel_pt_event(event);
5948 }
5949 
intel_pmu_filter(struct pmu * pmu,int cpu,bool * ret)5950 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
5951 {
5952 	struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu);
5953 
5954 	*ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
5955 }
5956 
5957 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
5958 
5959 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
5960 
5961 PMU_FORMAT_ATTR(frontend, "config1:0-23");
5962 
5963 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63");
5964 
5965 static struct attribute *intel_arch3_formats_attr[] = {
5966 	&format_attr_event.attr,
5967 	&format_attr_umask.attr,
5968 	&format_attr_edge.attr,
5969 	&format_attr_pc.attr,
5970 	&format_attr_any.attr,
5971 	&format_attr_inv.attr,
5972 	&format_attr_cmask.attr,
5973 	NULL,
5974 };
5975 
5976 static struct attribute *hsw_format_attr[] = {
5977 	&format_attr_in_tx.attr,
5978 	&format_attr_in_tx_cp.attr,
5979 	&format_attr_offcore_rsp.attr,
5980 	&format_attr_ldlat.attr,
5981 	NULL
5982 };
5983 
5984 static struct attribute *nhm_format_attr[] = {
5985 	&format_attr_offcore_rsp.attr,
5986 	&format_attr_ldlat.attr,
5987 	NULL
5988 };
5989 
5990 static struct attribute *slm_format_attr[] = {
5991 	&format_attr_offcore_rsp.attr,
5992 	NULL
5993 };
5994 
5995 static struct attribute *cmt_format_attr[] = {
5996 	&format_attr_offcore_rsp.attr,
5997 	&format_attr_ldlat.attr,
5998 	&format_attr_snoop_rsp.attr,
5999 	NULL
6000 };
6001 
6002 static struct attribute *skl_format_attr[] = {
6003 	&format_attr_frontend.attr,
6004 	NULL,
6005 };
6006 
6007 static __initconst const struct x86_pmu core_pmu = {
6008 	.name			= "core",
6009 	.handle_irq		= x86_pmu_handle_irq,
6010 	.disable_all		= x86_pmu_disable_all,
6011 	.enable_all		= core_pmu_enable_all,
6012 	.enable			= core_pmu_enable_event,
6013 	.disable		= x86_pmu_disable_event,
6014 	.hw_config		= core_pmu_hw_config,
6015 	.schedule_events	= x86_schedule_events,
6016 	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
6017 	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
6018 	.fixedctr		= MSR_ARCH_PERFMON_FIXED_CTR0,
6019 	.event_map		= intel_pmu_event_map,
6020 	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
6021 	.apic			= 1,
6022 	.large_pebs_flags	= LARGE_PEBS_FLAGS,
6023 
6024 	/*
6025 	 * Intel PMCs cannot be accessed sanely above 32-bit width,
6026 	 * so we install an artificial 1<<31 period regardless of
6027 	 * the generic event period:
6028 	 */
6029 	.max_period		= (1ULL<<31) - 1,
6030 	.get_event_constraints	= intel_get_event_constraints,
6031 	.put_event_constraints	= intel_put_event_constraints,
6032 	.event_constraints	= intel_core_event_constraints,
6033 	.guest_get_msrs		= core_guest_get_msrs,
6034 	.format_attrs		= intel_arch_formats_attr,
6035 	.events_sysfs_show	= intel_event_sysfs_show,
6036 
6037 	/*
6038 	 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
6039 	 * together with PMU version 1 and thus be using core_pmu with
6040 	 * shared_regs. We need following callbacks here to allocate
6041 	 * it properly.
6042 	 */
6043 	.cpu_prepare		= intel_pmu_cpu_prepare,
6044 	.cpu_starting		= intel_pmu_cpu_starting,
6045 	.cpu_dying		= intel_pmu_cpu_dying,
6046 	.cpu_dead		= intel_pmu_cpu_dead,
6047 
6048 	.check_period		= intel_pmu_check_period,
6049 
6050 	.lbr_reset		= intel_pmu_lbr_reset_64,
6051 	.lbr_read		= intel_pmu_lbr_read_64,
6052 	.lbr_save		= intel_pmu_lbr_save,
6053 	.lbr_restore		= intel_pmu_lbr_restore,
6054 };
6055 
6056 static __initconst const struct x86_pmu intel_pmu = {
6057 	.name			= "Intel",
6058 	.handle_irq		= intel_pmu_handle_irq,
6059 	.disable_all		= intel_pmu_disable_all,
6060 	.enable_all		= intel_pmu_enable_all,
6061 	.enable			= intel_pmu_enable_event,
6062 	.disable		= intel_pmu_disable_event,
6063 	.add			= intel_pmu_add_event,
6064 	.del			= intel_pmu_del_event,
6065 	.read			= intel_pmu_read_event,
6066 	.set_period		= intel_pmu_set_period,
6067 	.update			= intel_pmu_update,
6068 	.hw_config		= intel_pmu_hw_config,
6069 	.schedule_events	= x86_schedule_events,
6070 	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
6071 	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
6072 	.fixedctr		= MSR_ARCH_PERFMON_FIXED_CTR0,
6073 	.event_map		= intel_pmu_event_map,
6074 	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
6075 	.apic			= 1,
6076 	.large_pebs_flags	= LARGE_PEBS_FLAGS,
6077 	/*
6078 	 * Intel PMCs cannot be accessed sanely above 32 bit width,
6079 	 * so we install an artificial 1<<31 period regardless of
6080 	 * the generic event period:
6081 	 */
6082 	.max_period		= (1ULL << 31) - 1,
6083 	.get_event_constraints	= intel_get_event_constraints,
6084 	.put_event_constraints	= intel_put_event_constraints,
6085 	.pebs_aliases		= intel_pebs_aliases_core2,
6086 
6087 	.format_attrs		= intel_arch3_formats_attr,
6088 	.events_sysfs_show	= intel_event_sysfs_show,
6089 
6090 	.cpu_prepare		= intel_pmu_cpu_prepare,
6091 	.cpu_starting		= intel_pmu_cpu_starting,
6092 	.cpu_dying		= intel_pmu_cpu_dying,
6093 	.cpu_dead		= intel_pmu_cpu_dead,
6094 
6095 	.guest_get_msrs		= intel_guest_get_msrs,
6096 	.sched_task		= intel_pmu_sched_task,
6097 
6098 	.check_period		= intel_pmu_check_period,
6099 
6100 	.aux_output_match	= intel_pmu_aux_output_match,
6101 
6102 	.lbr_reset		= intel_pmu_lbr_reset_64,
6103 	.lbr_read		= intel_pmu_lbr_read_64,
6104 	.lbr_save		= intel_pmu_lbr_save,
6105 	.lbr_restore		= intel_pmu_lbr_restore,
6106 
6107 	/*
6108 	 * SMM has access to all 4 rings and while traditionally SMM code only
6109 	 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
6110 	 *
6111 	 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
6112 	 * between SMM or not, this results in what should be pure userspace
6113 	 * counters including SMM data.
6114 	 *
6115 	 * This is a clear privilege issue, therefore globally disable
6116 	 * counting SMM by default.
6117 	 */
6118 	.attr_freeze_on_smi	= 1,
6119 };
6120 
intel_clovertown_quirk(void)6121 static __init void intel_clovertown_quirk(void)
6122 {
6123 	/*
6124 	 * PEBS is unreliable due to:
6125 	 *
6126 	 *   AJ67  - PEBS may experience CPL leaks
6127 	 *   AJ68  - PEBS PMI may be delayed by one event
6128 	 *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
6129 	 *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
6130 	 *
6131 	 * AJ67 could be worked around by restricting the OS/USR flags.
6132 	 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
6133 	 *
6134 	 * AJ106 could possibly be worked around by not allowing LBR
6135 	 *       usage from PEBS, including the fixup.
6136 	 * AJ68  could possibly be worked around by always programming
6137 	 *	 a pebs_event_reset[0] value and coping with the lost events.
6138 	 *
6139 	 * But taken together it might just make sense to not enable PEBS on
6140 	 * these chips.
6141 	 */
6142 	pr_warn("PEBS disabled due to CPU errata\n");
6143 	x86_pmu.ds_pebs = 0;
6144 	x86_pmu.pebs_constraints = NULL;
6145 }
6146 
6147 static const struct x86_cpu_id isolation_ucodes[] = {
6148 	X86_MATCH_VFM_STEPS(INTEL_HASWELL,	 3,  3, 0x0000001f),
6149 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_L,	 1,  1, 0x0000001e),
6150 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_G,	 1,  1, 0x00000015),
6151 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_X,	 2,  2, 0x00000037),
6152 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_X,	 4,  4, 0x0000000a),
6153 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL,	 4,  4, 0x00000023),
6154 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_G,	 1,  1, 0x00000014),
6155 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 2,  2, 0x00000010),
6156 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 3,  3, 0x07000009),
6157 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 4,  4, 0x0f000009),
6158 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 5,  5, 0x0e000002),
6159 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_X,	 1,  1, 0x0b000014),
6160 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	 3,  3, 0x00000021),
6161 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	 4,  7, 0x00000000),
6162 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	11, 11, 0x00000000),
6163 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_L,	 3,  3, 0x0000007c),
6164 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE,	 3,  3, 0x0000007c),
6165 	X86_MATCH_VFM_STEPS(INTEL_KABYLAKE,	 9, 13, 0x0000004e),
6166 	X86_MATCH_VFM_STEPS(INTEL_KABYLAKE_L,	 9, 12, 0x0000004e),
6167 	{}
6168 };
6169 
intel_check_pebs_isolation(void)6170 static void intel_check_pebs_isolation(void)
6171 {
6172 	x86_pmu.pebs_no_isolation = !x86_match_min_microcode_rev(isolation_ucodes);
6173 }
6174 
intel_pebs_isolation_quirk(void)6175 static __init void intel_pebs_isolation_quirk(void)
6176 {
6177 	WARN_ON_ONCE(x86_pmu.check_microcode);
6178 	x86_pmu.check_microcode = intel_check_pebs_isolation;
6179 	intel_check_pebs_isolation();
6180 }
6181 
6182 static const struct x86_cpu_id pebs_ucodes[] = {
6183 	X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE,	7, 7, 0x00000028),
6184 	X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X,	6, 6, 0x00000618),
6185 	X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X,	7, 7, 0x0000070c),
6186 	{}
6187 };
6188 
intel_snb_pebs_broken(void)6189 static bool intel_snb_pebs_broken(void)
6190 {
6191 	return !x86_match_min_microcode_rev(pebs_ucodes);
6192 }
6193 
intel_snb_check_microcode(void)6194 static void intel_snb_check_microcode(void)
6195 {
6196 	if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
6197 		return;
6198 
6199 	/*
6200 	 * Serialized by the microcode lock..
6201 	 */
6202 	if (x86_pmu.pebs_broken) {
6203 		pr_info("PEBS enabled due to microcode update\n");
6204 		x86_pmu.pebs_broken = 0;
6205 	} else {
6206 		pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
6207 		x86_pmu.pebs_broken = 1;
6208 	}
6209 }
6210 
is_lbr_from(unsigned long msr)6211 static bool is_lbr_from(unsigned long msr)
6212 {
6213 	unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
6214 
6215 	return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
6216 }
6217 
6218 /*
6219  * Under certain circumstances, access certain MSR may cause #GP.
6220  * The function tests if the input MSR can be safely accessed.
6221  */
check_msr(unsigned long msr,u64 mask)6222 static bool check_msr(unsigned long msr, u64 mask)
6223 {
6224 	u64 val_old, val_new, val_tmp;
6225 
6226 	/*
6227 	 * Disable the check for real HW, so we don't
6228 	 * mess with potentially enabled registers:
6229 	 */
6230 	if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
6231 		return true;
6232 
6233 	/*
6234 	 * Read the current value, change it and read it back to see if it
6235 	 * matches, this is needed to detect certain hardware emulators
6236 	 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
6237 	 */
6238 	if (rdmsrq_safe(msr, &val_old))
6239 		return false;
6240 
6241 	/*
6242 	 * Only change the bits which can be updated by wrmsrq.
6243 	 */
6244 	val_tmp = val_old ^ mask;
6245 
6246 	if (is_lbr_from(msr))
6247 		val_tmp = lbr_from_signext_quirk_wr(val_tmp);
6248 
6249 	if (wrmsrq_safe(msr, val_tmp) ||
6250 	    rdmsrq_safe(msr, &val_new))
6251 		return false;
6252 
6253 	/*
6254 	 * Quirk only affects validation in wrmsr(), so wrmsrq()'s value
6255 	 * should equal rdmsrq()'s even with the quirk.
6256 	 */
6257 	if (val_new != val_tmp)
6258 		return false;
6259 
6260 	if (is_lbr_from(msr))
6261 		val_old = lbr_from_signext_quirk_wr(val_old);
6262 
6263 	/* Here it's sure that the MSR can be safely accessed.
6264 	 * Restore the old value and return.
6265 	 */
6266 	wrmsrq(msr, val_old);
6267 
6268 	return true;
6269 }
6270 
intel_sandybridge_quirk(void)6271 static __init void intel_sandybridge_quirk(void)
6272 {
6273 	x86_pmu.check_microcode = intel_snb_check_microcode;
6274 	cpus_read_lock();
6275 	intel_snb_check_microcode();
6276 	cpus_read_unlock();
6277 }
6278 
6279 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
6280 	{ PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
6281 	{ PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
6282 	{ PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
6283 	{ PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
6284 	{ PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
6285 	{ PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
6286 	{ PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
6287 };
6288 
intel_arch_events_quirk(void)6289 static __init void intel_arch_events_quirk(void)
6290 {
6291 	int bit;
6292 
6293 	/* disable event that reported as not present by cpuid */
6294 	for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
6295 		intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
6296 		pr_warn("CPUID marked event: \'%s\' unavailable\n",
6297 			intel_arch_events_map[bit].name);
6298 	}
6299 }
6300 
intel_nehalem_quirk(void)6301 static __init void intel_nehalem_quirk(void)
6302 {
6303 	union cpuid10_ebx ebx;
6304 
6305 	ebx.full = x86_pmu.events_maskl;
6306 	if (ebx.split.no_branch_misses_retired) {
6307 		/*
6308 		 * Erratum AAJ80 detected, we work it around by using
6309 		 * the BR_MISP_EXEC.ANY event. This will over-count
6310 		 * branch-misses, but it's still much better than the
6311 		 * architectural event which is often completely bogus:
6312 		 */
6313 		intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
6314 		ebx.split.no_branch_misses_retired = 0;
6315 		x86_pmu.events_maskl = ebx.full;
6316 		pr_info("CPU erratum AAJ80 worked around\n");
6317 	}
6318 }
6319 
6320 /*
6321  * enable software workaround for errata:
6322  * SNB: BJ122
6323  * IVB: BV98
6324  * HSW: HSD29
6325  *
6326  * Only needed when HT is enabled. However detecting
6327  * if HT is enabled is difficult (model specific). So instead,
6328  * we enable the workaround in the early boot, and verify if
6329  * it is needed in a later initcall phase once we have valid
6330  * topology information to check if HT is actually enabled
6331  */
intel_ht_bug(void)6332 static __init void intel_ht_bug(void)
6333 {
6334 	x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
6335 
6336 	x86_pmu.start_scheduling = intel_start_scheduling;
6337 	x86_pmu.commit_scheduling = intel_commit_scheduling;
6338 	x86_pmu.stop_scheduling = intel_stop_scheduling;
6339 }
6340 
6341 EVENT_ATTR_STR(mem-loads,	mem_ld_hsw,	"event=0xcd,umask=0x1,ldlat=3");
6342 EVENT_ATTR_STR(mem-stores,	mem_st_hsw,	"event=0xd0,umask=0x82")
6343 
6344 /* Haswell special events */
6345 EVENT_ATTR_STR(tx-start,	tx_start,	"event=0xc9,umask=0x1");
6346 EVENT_ATTR_STR(tx-commit,	tx_commit,	"event=0xc9,umask=0x2");
6347 EVENT_ATTR_STR(tx-abort,	tx_abort,	"event=0xc9,umask=0x4");
6348 EVENT_ATTR_STR(tx-capacity,	tx_capacity,	"event=0x54,umask=0x2");
6349 EVENT_ATTR_STR(tx-conflict,	tx_conflict,	"event=0x54,umask=0x1");
6350 EVENT_ATTR_STR(el-start,	el_start,	"event=0xc8,umask=0x1");
6351 EVENT_ATTR_STR(el-commit,	el_commit,	"event=0xc8,umask=0x2");
6352 EVENT_ATTR_STR(el-abort,	el_abort,	"event=0xc8,umask=0x4");
6353 EVENT_ATTR_STR(el-capacity,	el_capacity,	"event=0x54,umask=0x2");
6354 EVENT_ATTR_STR(el-conflict,	el_conflict,	"event=0x54,umask=0x1");
6355 EVENT_ATTR_STR(cycles-t,	cycles_t,	"event=0x3c,in_tx=1");
6356 EVENT_ATTR_STR(cycles-ct,	cycles_ct,	"event=0x3c,in_tx=1,in_tx_cp=1");
6357 
6358 static struct attribute *hsw_events_attrs[] = {
6359 	EVENT_PTR(td_slots_issued),
6360 	EVENT_PTR(td_slots_retired),
6361 	EVENT_PTR(td_fetch_bubbles),
6362 	EVENT_PTR(td_total_slots),
6363 	EVENT_PTR(td_total_slots_scale),
6364 	EVENT_PTR(td_recovery_bubbles),
6365 	EVENT_PTR(td_recovery_bubbles_scale),
6366 	NULL
6367 };
6368 
6369 static struct attribute *hsw_mem_events_attrs[] = {
6370 	EVENT_PTR(mem_ld_hsw),
6371 	EVENT_PTR(mem_st_hsw),
6372 	NULL,
6373 };
6374 
6375 static struct attribute *hsw_tsx_events_attrs[] = {
6376 	EVENT_PTR(tx_start),
6377 	EVENT_PTR(tx_commit),
6378 	EVENT_PTR(tx_abort),
6379 	EVENT_PTR(tx_capacity),
6380 	EVENT_PTR(tx_conflict),
6381 	EVENT_PTR(el_start),
6382 	EVENT_PTR(el_commit),
6383 	EVENT_PTR(el_abort),
6384 	EVENT_PTR(el_capacity),
6385 	EVENT_PTR(el_conflict),
6386 	EVENT_PTR(cycles_t),
6387 	EVENT_PTR(cycles_ct),
6388 	NULL
6389 };
6390 
6391 EVENT_ATTR_STR(tx-capacity-read,  tx_capacity_read,  "event=0x54,umask=0x80");
6392 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
6393 EVENT_ATTR_STR(el-capacity-read,  el_capacity_read,  "event=0x54,umask=0x80");
6394 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
6395 
6396 static struct attribute *icl_events_attrs[] = {
6397 	EVENT_PTR(mem_ld_hsw),
6398 	EVENT_PTR(mem_st_hsw),
6399 	NULL,
6400 };
6401 
6402 static struct attribute *icl_td_events_attrs[] = {
6403 	EVENT_PTR(slots),
6404 	EVENT_PTR(td_retiring),
6405 	EVENT_PTR(td_bad_spec),
6406 	EVENT_PTR(td_fe_bound),
6407 	EVENT_PTR(td_be_bound),
6408 	NULL,
6409 };
6410 
6411 static struct attribute *icl_tsx_events_attrs[] = {
6412 	EVENT_PTR(tx_start),
6413 	EVENT_PTR(tx_abort),
6414 	EVENT_PTR(tx_commit),
6415 	EVENT_PTR(tx_capacity_read),
6416 	EVENT_PTR(tx_capacity_write),
6417 	EVENT_PTR(tx_conflict),
6418 	EVENT_PTR(el_start),
6419 	EVENT_PTR(el_abort),
6420 	EVENT_PTR(el_commit),
6421 	EVENT_PTR(el_capacity_read),
6422 	EVENT_PTR(el_capacity_write),
6423 	EVENT_PTR(el_conflict),
6424 	EVENT_PTR(cycles_t),
6425 	EVENT_PTR(cycles_ct),
6426 	NULL,
6427 };
6428 
6429 
6430 EVENT_ATTR_STR(mem-stores,	mem_st_spr,	"event=0xcd,umask=0x2");
6431 EVENT_ATTR_STR(mem-loads-aux,	mem_ld_aux,	"event=0x03,umask=0x82");
6432 
6433 static struct attribute *glc_events_attrs[] = {
6434 	EVENT_PTR(mem_ld_hsw),
6435 	EVENT_PTR(mem_st_spr),
6436 	EVENT_PTR(mem_ld_aux),
6437 	NULL,
6438 };
6439 
6440 static struct attribute *glc_td_events_attrs[] = {
6441 	EVENT_PTR(slots),
6442 	EVENT_PTR(td_retiring),
6443 	EVENT_PTR(td_bad_spec),
6444 	EVENT_PTR(td_fe_bound),
6445 	EVENT_PTR(td_be_bound),
6446 	EVENT_PTR(td_heavy_ops),
6447 	EVENT_PTR(td_br_mispredict),
6448 	EVENT_PTR(td_fetch_lat),
6449 	EVENT_PTR(td_mem_bound),
6450 	NULL,
6451 };
6452 
6453 static struct attribute *glc_tsx_events_attrs[] = {
6454 	EVENT_PTR(tx_start),
6455 	EVENT_PTR(tx_abort),
6456 	EVENT_PTR(tx_commit),
6457 	EVENT_PTR(tx_capacity_read),
6458 	EVENT_PTR(tx_capacity_write),
6459 	EVENT_PTR(tx_conflict),
6460 	EVENT_PTR(cycles_t),
6461 	EVENT_PTR(cycles_ct),
6462 	NULL,
6463 };
6464 
freeze_on_smi_show(struct device * cdev,struct device_attribute * attr,char * buf)6465 static ssize_t freeze_on_smi_show(struct device *cdev,
6466 				  struct device_attribute *attr,
6467 				  char *buf)
6468 {
6469 	return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
6470 }
6471 
6472 static DEFINE_MUTEX(freeze_on_smi_mutex);
6473 
freeze_on_smi_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)6474 static ssize_t freeze_on_smi_store(struct device *cdev,
6475 				   struct device_attribute *attr,
6476 				   const char *buf, size_t count)
6477 {
6478 	unsigned long val;
6479 	ssize_t ret;
6480 
6481 	ret = kstrtoul(buf, 0, &val);
6482 	if (ret)
6483 		return ret;
6484 
6485 	if (val > 1)
6486 		return -EINVAL;
6487 
6488 	mutex_lock(&freeze_on_smi_mutex);
6489 
6490 	if (x86_pmu.attr_freeze_on_smi == val)
6491 		goto done;
6492 
6493 	x86_pmu.attr_freeze_on_smi = val;
6494 
6495 	cpus_read_lock();
6496 	on_each_cpu(flip_smm_bit, &val, 1);
6497 	cpus_read_unlock();
6498 done:
6499 	mutex_unlock(&freeze_on_smi_mutex);
6500 
6501 	return count;
6502 }
6503 
update_tfa_sched(void * ignored)6504 static void update_tfa_sched(void *ignored)
6505 {
6506 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
6507 
6508 	/*
6509 	 * check if PMC3 is used
6510 	 * and if so force schedule out for all event types all contexts
6511 	 */
6512 	if (test_bit(3, cpuc->active_mask))
6513 		perf_pmu_resched(x86_get_pmu(smp_processor_id()));
6514 }
6515 
show_sysctl_tfa(struct device * cdev,struct device_attribute * attr,char * buf)6516 static ssize_t show_sysctl_tfa(struct device *cdev,
6517 			      struct device_attribute *attr,
6518 			      char *buf)
6519 {
6520 	return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
6521 }
6522 
set_sysctl_tfa(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)6523 static ssize_t set_sysctl_tfa(struct device *cdev,
6524 			      struct device_attribute *attr,
6525 			      const char *buf, size_t count)
6526 {
6527 	bool val;
6528 	ssize_t ret;
6529 
6530 	ret = kstrtobool(buf, &val);
6531 	if (ret)
6532 		return ret;
6533 
6534 	/* no change */
6535 	if (val == allow_tsx_force_abort)
6536 		return count;
6537 
6538 	allow_tsx_force_abort = val;
6539 
6540 	cpus_read_lock();
6541 	on_each_cpu(update_tfa_sched, NULL, 1);
6542 	cpus_read_unlock();
6543 
6544 	return count;
6545 }
6546 
6547 
6548 static DEVICE_ATTR_RW(freeze_on_smi);
6549 
branches_show(struct device * cdev,struct device_attribute * attr,char * buf)6550 static ssize_t branches_show(struct device *cdev,
6551 			     struct device_attribute *attr,
6552 			     char *buf)
6553 {
6554 	return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
6555 }
6556 
6557 static DEVICE_ATTR_RO(branches);
6558 
branch_counter_nr_show(struct device * cdev,struct device_attribute * attr,char * buf)6559 static ssize_t branch_counter_nr_show(struct device *cdev,
6560 				      struct device_attribute *attr,
6561 				      char *buf)
6562 {
6563 	return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters));
6564 }
6565 
6566 static DEVICE_ATTR_RO(branch_counter_nr);
6567 
branch_counter_width_show(struct device * cdev,struct device_attribute * attr,char * buf)6568 static ssize_t branch_counter_width_show(struct device *cdev,
6569 					 struct device_attribute *attr,
6570 					 char *buf)
6571 {
6572 	return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS);
6573 }
6574 
6575 static DEVICE_ATTR_RO(branch_counter_width);
6576 
6577 static struct attribute *lbr_attrs[] = {
6578 	&dev_attr_branches.attr,
6579 	&dev_attr_branch_counter_nr.attr,
6580 	&dev_attr_branch_counter_width.attr,
6581 	NULL
6582 };
6583 
6584 static umode_t
lbr_is_visible(struct kobject * kobj,struct attribute * attr,int i)6585 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6586 {
6587 	/* branches */
6588 	if (i == 0)
6589 		return x86_pmu.lbr_nr ? attr->mode : 0;
6590 
6591 	return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0;
6592 }
6593 
6594 static char pmu_name_str[30];
6595 
6596 static DEVICE_STRING_ATTR_RO(pmu_name, 0444, pmu_name_str);
6597 
6598 static struct attribute *intel_pmu_caps_attrs[] = {
6599 	&dev_attr_pmu_name.attr.attr,
6600 	NULL
6601 };
6602 
6603 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
6604 		   show_sysctl_tfa,
6605 		   set_sysctl_tfa);
6606 
6607 static struct attribute *intel_pmu_attrs[] = {
6608 	&dev_attr_freeze_on_smi.attr,
6609 	&dev_attr_allow_tsx_force_abort.attr,
6610 	NULL,
6611 };
6612 
6613 static umode_t
default_is_visible(struct kobject * kobj,struct attribute * attr,int i)6614 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6615 {
6616 	if (attr == &dev_attr_allow_tsx_force_abort.attr)
6617 		return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
6618 
6619 	return attr->mode;
6620 }
6621 
6622 static umode_t
tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)6623 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6624 {
6625 	return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
6626 }
6627 
6628 static umode_t
pebs_is_visible(struct kobject * kobj,struct attribute * attr,int i)6629 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6630 {
6631 	return intel_pmu_has_pebs() ? attr->mode : 0;
6632 }
6633 
6634 static umode_t
mem_is_visible(struct kobject * kobj,struct attribute * attr,int i)6635 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6636 {
6637 	if (attr == &event_attr_mem_ld_aux.attr.attr)
6638 		return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0;
6639 
6640 	return pebs_is_visible(kobj, attr, i);
6641 }
6642 
6643 static umode_t
exra_is_visible(struct kobject * kobj,struct attribute * attr,int i)6644 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6645 {
6646 	return x86_pmu.version >= 2 ? attr->mode : 0;
6647 }
6648 
6649 static umode_t
td_is_visible(struct kobject * kobj,struct attribute * attr,int i)6650 td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6651 {
6652 	/*
6653 	 * Hide the perf metrics topdown events
6654 	 * if the feature is not enumerated.
6655 	 */
6656 	if (x86_pmu.num_topdown_events)
6657 		return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0;
6658 
6659 	return attr->mode;
6660 }
6661 
6662 PMU_FORMAT_ATTR(acr_mask,	"config2:0-63");
6663 
6664 static struct attribute *format_acr_attrs[] = {
6665 	&format_attr_acr_mask.attr,
6666 	NULL
6667 };
6668 
6669 static umode_t
acr_is_visible(struct kobject * kobj,struct attribute * attr,int i)6670 acr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6671 {
6672 	struct device *dev = kobj_to_dev(kobj);
6673 
6674 	return intel_pmu_has_acr(dev_get_drvdata(dev)) ? attr->mode : 0;
6675 }
6676 
6677 static struct attribute_group group_events_td  = {
6678 	.name = "events",
6679 	.is_visible = td_is_visible,
6680 };
6681 
6682 static struct attribute_group group_events_mem = {
6683 	.name       = "events",
6684 	.is_visible = mem_is_visible,
6685 };
6686 
6687 static struct attribute_group group_events_tsx = {
6688 	.name       = "events",
6689 	.is_visible = tsx_is_visible,
6690 };
6691 
6692 static struct attribute_group group_caps_gen = {
6693 	.name  = "caps",
6694 	.attrs = intel_pmu_caps_attrs,
6695 };
6696 
6697 static struct attribute_group group_caps_lbr = {
6698 	.name       = "caps",
6699 	.attrs	    = lbr_attrs,
6700 	.is_visible = lbr_is_visible,
6701 };
6702 
6703 static struct attribute_group group_format_extra = {
6704 	.name       = "format",
6705 	.is_visible = exra_is_visible,
6706 };
6707 
6708 static struct attribute_group group_format_extra_skl = {
6709 	.name       = "format",
6710 	.is_visible = exra_is_visible,
6711 };
6712 
6713 static struct attribute_group group_format_evtsel_ext = {
6714 	.name       = "format",
6715 	.attrs      = format_evtsel_ext_attrs,
6716 	.is_visible = evtsel_ext_is_visible,
6717 };
6718 
6719 static struct attribute_group group_format_acr = {
6720 	.name       = "format",
6721 	.attrs      = format_acr_attrs,
6722 	.is_visible = acr_is_visible,
6723 };
6724 
6725 static struct attribute_group group_default = {
6726 	.attrs      = intel_pmu_attrs,
6727 	.is_visible = default_is_visible,
6728 };
6729 
6730 static const struct attribute_group *attr_update[] = {
6731 	&group_events_td,
6732 	&group_events_mem,
6733 	&group_events_tsx,
6734 	&group_caps_gen,
6735 	&group_caps_lbr,
6736 	&group_format_extra,
6737 	&group_format_extra_skl,
6738 	&group_format_evtsel_ext,
6739 	&group_format_acr,
6740 	&group_default,
6741 	NULL,
6742 };
6743 
6744 EVENT_ATTR_STR_HYBRID(slots,                 slots_adl,        "event=0x00,umask=0x4",                       hybrid_big);
6745 EVENT_ATTR_STR_HYBRID(topdown-retiring,      td_retiring_adl,  "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
6746 EVENT_ATTR_STR_HYBRID(topdown-bad-spec,      td_bad_spec_adl,  "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
6747 EVENT_ATTR_STR_HYBRID(topdown-fe-bound,      td_fe_bound_adl,  "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
6748 EVENT_ATTR_STR_HYBRID(topdown-be-bound,      td_be_bound_adl,  "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
6749 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops,     td_heavy_ops_adl, "event=0x00,umask=0x84",                      hybrid_big);
6750 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl,    "event=0x00,umask=0x85",                      hybrid_big);
6751 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat,     td_fetch_lat_adl, "event=0x00,umask=0x86",                      hybrid_big);
6752 EVENT_ATTR_STR_HYBRID(topdown-mem-bound,     td_mem_bound_adl, "event=0x00,umask=0x87",                      hybrid_big);
6753 
6754 static struct attribute *adl_hybrid_events_attrs[] = {
6755 	EVENT_PTR(slots_adl),
6756 	EVENT_PTR(td_retiring_adl),
6757 	EVENT_PTR(td_bad_spec_adl),
6758 	EVENT_PTR(td_fe_bound_adl),
6759 	EVENT_PTR(td_be_bound_adl),
6760 	EVENT_PTR(td_heavy_ops_adl),
6761 	EVENT_PTR(td_br_mis_adl),
6762 	EVENT_PTR(td_fetch_lat_adl),
6763 	EVENT_PTR(td_mem_bound_adl),
6764 	NULL,
6765 };
6766 
6767 EVENT_ATTR_STR_HYBRID(topdown-retiring,      td_retiring_lnl,  "event=0xc2,umask=0x02;event=0x00,umask=0x80", hybrid_big_small);
6768 EVENT_ATTR_STR_HYBRID(topdown-fe-bound,      td_fe_bound_lnl,  "event=0x9c,umask=0x01;event=0x00,umask=0x82", hybrid_big_small);
6769 EVENT_ATTR_STR_HYBRID(topdown-be-bound,      td_be_bound_lnl,  "event=0xa4,umask=0x02;event=0x00,umask=0x83", hybrid_big_small);
6770 
6771 static struct attribute *lnl_hybrid_events_attrs[] = {
6772 	EVENT_PTR(slots_adl),
6773 	EVENT_PTR(td_retiring_lnl),
6774 	EVENT_PTR(td_bad_spec_adl),
6775 	EVENT_PTR(td_fe_bound_lnl),
6776 	EVENT_PTR(td_be_bound_lnl),
6777 	EVENT_PTR(td_heavy_ops_adl),
6778 	EVENT_PTR(td_br_mis_adl),
6779 	EVENT_PTR(td_fetch_lat_adl),
6780 	EVENT_PTR(td_mem_bound_adl),
6781 	NULL
6782 };
6783 
6784 /* The event string must be in PMU IDX order. */
6785 EVENT_ATTR_STR_HYBRID(topdown-retiring,
6786 		      td_retiring_arl_h,
6787 		      "event=0xc2,umask=0x02;event=0x00,umask=0x80;event=0xc2,umask=0x0",
6788 		      hybrid_big_small_tiny);
6789 EVENT_ATTR_STR_HYBRID(topdown-bad-spec,
6790 		      td_bad_spec_arl_h,
6791 		      "event=0x73,umask=0x0;event=0x00,umask=0x81;event=0x73,umask=0x0",
6792 		      hybrid_big_small_tiny);
6793 EVENT_ATTR_STR_HYBRID(topdown-fe-bound,
6794 		      td_fe_bound_arl_h,
6795 		      "event=0x9c,umask=0x01;event=0x00,umask=0x82;event=0x71,umask=0x0",
6796 		      hybrid_big_small_tiny);
6797 EVENT_ATTR_STR_HYBRID(topdown-be-bound,
6798 		      td_be_bound_arl_h,
6799 		      "event=0xa4,umask=0x02;event=0x00,umask=0x83;event=0x74,umask=0x0",
6800 		      hybrid_big_small_tiny);
6801 
6802 static struct attribute *arl_h_hybrid_events_attrs[] = {
6803 	EVENT_PTR(slots_adl),
6804 	EVENT_PTR(td_retiring_arl_h),
6805 	EVENT_PTR(td_bad_spec_arl_h),
6806 	EVENT_PTR(td_fe_bound_arl_h),
6807 	EVENT_PTR(td_be_bound_arl_h),
6808 	EVENT_PTR(td_heavy_ops_adl),
6809 	EVENT_PTR(td_br_mis_adl),
6810 	EVENT_PTR(td_fetch_lat_adl),
6811 	EVENT_PTR(td_mem_bound_adl),
6812 	NULL,
6813 };
6814 
6815 /* Must be in IDX order */
6816 EVENT_ATTR_STR_HYBRID(mem-loads,     mem_ld_adl,     "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
6817 EVENT_ATTR_STR_HYBRID(mem-stores,    mem_st_adl,     "event=0xd0,umask=0x6;event=0xcd,umask=0x2",                 hybrid_big_small);
6818 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82",                                     hybrid_big);
6819 
6820 static struct attribute *adl_hybrid_mem_attrs[] = {
6821 	EVENT_PTR(mem_ld_adl),
6822 	EVENT_PTR(mem_st_adl),
6823 	EVENT_PTR(mem_ld_aux_adl),
6824 	NULL,
6825 };
6826 
6827 static struct attribute *mtl_hybrid_mem_attrs[] = {
6828 	EVENT_PTR(mem_ld_adl),
6829 	EVENT_PTR(mem_st_adl),
6830 	NULL
6831 };
6832 
6833 EVENT_ATTR_STR_HYBRID(mem-loads,
6834 		      mem_ld_arl_h,
6835 		      "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3;event=0xd0,umask=0x5,ldlat=3",
6836 		      hybrid_big_small_tiny);
6837 EVENT_ATTR_STR_HYBRID(mem-stores,
6838 		      mem_st_arl_h,
6839 		      "event=0xd0,umask=0x6;event=0xcd,umask=0x2;event=0xd0,umask=0x6",
6840 		      hybrid_big_small_tiny);
6841 
6842 static struct attribute *arl_h_hybrid_mem_attrs[] = {
6843 	EVENT_PTR(mem_ld_arl_h),
6844 	EVENT_PTR(mem_st_arl_h),
6845 	NULL,
6846 };
6847 
6848 EVENT_ATTR_STR_HYBRID(tx-start,          tx_start_adl,          "event=0xc9,umask=0x1",          hybrid_big);
6849 EVENT_ATTR_STR_HYBRID(tx-commit,         tx_commit_adl,         "event=0xc9,umask=0x2",          hybrid_big);
6850 EVENT_ATTR_STR_HYBRID(tx-abort,          tx_abort_adl,          "event=0xc9,umask=0x4",          hybrid_big);
6851 EVENT_ATTR_STR_HYBRID(tx-conflict,       tx_conflict_adl,       "event=0x54,umask=0x1",          hybrid_big);
6852 EVENT_ATTR_STR_HYBRID(cycles-t,          cycles_t_adl,          "event=0x3c,in_tx=1",            hybrid_big);
6853 EVENT_ATTR_STR_HYBRID(cycles-ct,         cycles_ct_adl,         "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
6854 EVENT_ATTR_STR_HYBRID(tx-capacity-read,  tx_capacity_read_adl,  "event=0x54,umask=0x80",         hybrid_big);
6855 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2",          hybrid_big);
6856 
6857 static struct attribute *adl_hybrid_tsx_attrs[] = {
6858 	EVENT_PTR(tx_start_adl),
6859 	EVENT_PTR(tx_abort_adl),
6860 	EVENT_PTR(tx_commit_adl),
6861 	EVENT_PTR(tx_capacity_read_adl),
6862 	EVENT_PTR(tx_capacity_write_adl),
6863 	EVENT_PTR(tx_conflict_adl),
6864 	EVENT_PTR(cycles_t_adl),
6865 	EVENT_PTR(cycles_ct_adl),
6866 	NULL,
6867 };
6868 
6869 FORMAT_ATTR_HYBRID(in_tx,       hybrid_big);
6870 FORMAT_ATTR_HYBRID(in_tx_cp,    hybrid_big);
6871 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small_tiny);
6872 FORMAT_ATTR_HYBRID(ldlat,       hybrid_big_small_tiny);
6873 FORMAT_ATTR_HYBRID(frontend,    hybrid_big);
6874 
6875 #define ADL_HYBRID_RTM_FORMAT_ATTR	\
6876 	FORMAT_HYBRID_PTR(in_tx),	\
6877 	FORMAT_HYBRID_PTR(in_tx_cp)
6878 
6879 #define ADL_HYBRID_FORMAT_ATTR		\
6880 	FORMAT_HYBRID_PTR(offcore_rsp),	\
6881 	FORMAT_HYBRID_PTR(ldlat),	\
6882 	FORMAT_HYBRID_PTR(frontend)
6883 
6884 static struct attribute *adl_hybrid_extra_attr_rtm[] = {
6885 	ADL_HYBRID_RTM_FORMAT_ATTR,
6886 	ADL_HYBRID_FORMAT_ATTR,
6887 	NULL
6888 };
6889 
6890 static struct attribute *adl_hybrid_extra_attr[] = {
6891 	ADL_HYBRID_FORMAT_ATTR,
6892 	NULL
6893 };
6894 
6895 FORMAT_ATTR_HYBRID(snoop_rsp,	hybrid_small_tiny);
6896 
6897 static struct attribute *mtl_hybrid_extra_attr_rtm[] = {
6898 	ADL_HYBRID_RTM_FORMAT_ATTR,
6899 	ADL_HYBRID_FORMAT_ATTR,
6900 	FORMAT_HYBRID_PTR(snoop_rsp),
6901 	NULL
6902 };
6903 
6904 static struct attribute *mtl_hybrid_extra_attr[] = {
6905 	ADL_HYBRID_FORMAT_ATTR,
6906 	FORMAT_HYBRID_PTR(snoop_rsp),
6907 	NULL
6908 };
6909 
is_attr_for_this_pmu(struct kobject * kobj,struct attribute * attr)6910 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
6911 {
6912 	struct device *dev = kobj_to_dev(kobj);
6913 	struct x86_hybrid_pmu *pmu =
6914 		container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6915 	struct perf_pmu_events_hybrid_attr *pmu_attr =
6916 		container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
6917 
6918 	return pmu->pmu_type & pmu_attr->pmu_type;
6919 }
6920 
hybrid_events_is_visible(struct kobject * kobj,struct attribute * attr,int i)6921 static umode_t hybrid_events_is_visible(struct kobject *kobj,
6922 					struct attribute *attr, int i)
6923 {
6924 	return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
6925 }
6926 
hybrid_find_supported_cpu(struct x86_hybrid_pmu * pmu)6927 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
6928 {
6929 	int cpu = cpumask_first(&pmu->supported_cpus);
6930 
6931 	return (cpu >= nr_cpu_ids) ? -1 : cpu;
6932 }
6933 
hybrid_tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)6934 static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
6935 				     struct attribute *attr, int i)
6936 {
6937 	struct device *dev = kobj_to_dev(kobj);
6938 	struct x86_hybrid_pmu *pmu =
6939 		 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6940 	int cpu = hybrid_find_supported_cpu(pmu);
6941 
6942 	return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
6943 }
6944 
hybrid_format_is_visible(struct kobject * kobj,struct attribute * attr,int i)6945 static umode_t hybrid_format_is_visible(struct kobject *kobj,
6946 					struct attribute *attr, int i)
6947 {
6948 	struct device *dev = kobj_to_dev(kobj);
6949 	struct x86_hybrid_pmu *pmu =
6950 		container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6951 	struct perf_pmu_format_hybrid_attr *pmu_attr =
6952 		container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
6953 	int cpu = hybrid_find_supported_cpu(pmu);
6954 
6955 	return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0;
6956 }
6957 
hybrid_td_is_visible(struct kobject * kobj,struct attribute * attr,int i)6958 static umode_t hybrid_td_is_visible(struct kobject *kobj,
6959 				    struct attribute *attr, int i)
6960 {
6961 	struct device *dev = kobj_to_dev(kobj);
6962 	struct x86_hybrid_pmu *pmu =
6963 		 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6964 
6965 	if (!is_attr_for_this_pmu(kobj, attr))
6966 		return 0;
6967 
6968 
6969 	/* Only the big core supports perf metrics */
6970 	if (pmu->pmu_type == hybrid_big)
6971 		return pmu->intel_cap.perf_metrics ? attr->mode : 0;
6972 
6973 	return attr->mode;
6974 }
6975 
6976 static struct attribute_group hybrid_group_events_td  = {
6977 	.name		= "events",
6978 	.is_visible	= hybrid_td_is_visible,
6979 };
6980 
6981 static struct attribute_group hybrid_group_events_mem = {
6982 	.name		= "events",
6983 	.is_visible	= hybrid_events_is_visible,
6984 };
6985 
6986 static struct attribute_group hybrid_group_events_tsx = {
6987 	.name		= "events",
6988 	.is_visible	= hybrid_tsx_is_visible,
6989 };
6990 
6991 static struct attribute_group hybrid_group_format_extra = {
6992 	.name		= "format",
6993 	.is_visible	= hybrid_format_is_visible,
6994 };
6995 
intel_hybrid_get_attr_cpus(struct device * dev,struct device_attribute * attr,char * buf)6996 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
6997 					  struct device_attribute *attr,
6998 					  char *buf)
6999 {
7000 	struct x86_hybrid_pmu *pmu =
7001 		container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
7002 
7003 	return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
7004 }
7005 
7006 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
7007 static struct attribute *intel_hybrid_cpus_attrs[] = {
7008 	&dev_attr_cpus.attr,
7009 	NULL,
7010 };
7011 
7012 static struct attribute_group hybrid_group_cpus = {
7013 	.attrs		= intel_hybrid_cpus_attrs,
7014 };
7015 
7016 static const struct attribute_group *hybrid_attr_update[] = {
7017 	&hybrid_group_events_td,
7018 	&hybrid_group_events_mem,
7019 	&hybrid_group_events_tsx,
7020 	&group_caps_gen,
7021 	&group_caps_lbr,
7022 	&hybrid_group_format_extra,
7023 	&group_format_evtsel_ext,
7024 	&group_format_acr,
7025 	&group_default,
7026 	&hybrid_group_cpus,
7027 	NULL,
7028 };
7029 
7030 static struct attribute *empty_attrs;
7031 
intel_pmu_check_event_constraints(struct event_constraint * event_constraints,u64 cntr_mask,u64 fixed_cntr_mask,u64 intel_ctrl)7032 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
7033 					      u64 cntr_mask,
7034 					      u64 fixed_cntr_mask,
7035 					      u64 intel_ctrl)
7036 {
7037 	struct event_constraint *c;
7038 
7039 	if (!event_constraints)
7040 		return;
7041 
7042 	/*
7043 	 * event on fixed counter2 (REF_CYCLES) only works on this
7044 	 * counter, so do not extend mask to generic counters
7045 	 */
7046 	for_each_event_constraint(c, event_constraints) {
7047 		/*
7048 		 * Don't extend the topdown slots and metrics
7049 		 * events to the generic counters.
7050 		 */
7051 		if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
7052 			/*
7053 			 * Disable topdown slots and metrics events,
7054 			 * if slots event is not in CPUID.
7055 			 */
7056 			if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
7057 				c->idxmsk64 = 0;
7058 			c->weight = hweight64(c->idxmsk64);
7059 			continue;
7060 		}
7061 
7062 		if (c->cmask == FIXED_EVENT_FLAGS) {
7063 			/* Disabled fixed counters which are not in CPUID */
7064 			c->idxmsk64 &= intel_ctrl;
7065 
7066 			/*
7067 			 * Don't extend the pseudo-encoding to the
7068 			 * generic counters
7069 			 */
7070 			if (!use_fixed_pseudo_encoding(c->code))
7071 				c->idxmsk64 |= cntr_mask;
7072 		}
7073 		c->idxmsk64 &= cntr_mask | (fixed_cntr_mask << INTEL_PMC_IDX_FIXED);
7074 		c->weight = hweight64(c->idxmsk64);
7075 	}
7076 }
7077 
intel_pmu_check_extra_regs(struct extra_reg * extra_regs)7078 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
7079 {
7080 	struct extra_reg *er;
7081 
7082 	/*
7083 	 * Access extra MSR may cause #GP under certain circumstances.
7084 	 * E.g. KVM doesn't support offcore event
7085 	 * Check all extra_regs here.
7086 	 */
7087 	if (!extra_regs)
7088 		return;
7089 
7090 	for (er = extra_regs; er->msr; er++) {
7091 		er->extra_msr_access = check_msr(er->msr, 0x11UL);
7092 		/* Disable LBR select mapping */
7093 		if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
7094 			x86_pmu.lbr_sel_map = NULL;
7095 	}
7096 }
7097 
intel_pmu_v6_addr_offset(int index,bool eventsel)7098 static inline int intel_pmu_v6_addr_offset(int index, bool eventsel)
7099 {
7100 	return MSR_IA32_PMC_V6_STEP * index;
7101 }
7102 
7103 static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = {
7104 	{ hybrid_small,	"cpu_atom" },
7105 	{ hybrid_big,	"cpu_core" },
7106 	{ hybrid_tiny,	"cpu_lowpower" },
7107 };
7108 
intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)7109 static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
7110 {
7111 	unsigned long pmus_mask = pmus;
7112 	struct x86_hybrid_pmu *pmu;
7113 	int idx = 0, bit;
7114 
7115 	x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask);
7116 	x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus,
7117 				     sizeof(struct x86_hybrid_pmu),
7118 				     GFP_KERNEL);
7119 	if (!x86_pmu.hybrid_pmu)
7120 		return -ENOMEM;
7121 
7122 	static_branch_enable(&perf_is_hybrid);
7123 	x86_pmu.filter = intel_pmu_filter;
7124 
7125 	for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) {
7126 		pmu = &x86_pmu.hybrid_pmu[idx++];
7127 		pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id;
7128 		pmu->name = intel_hybrid_pmu_type_map[bit].name;
7129 
7130 		pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7131 		pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7132 		pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
7133 		pmu->config_mask = X86_RAW_EVENT_MASK;
7134 		pmu->unconstrained = (struct event_constraint)
7135 				     __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
7136 							0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
7137 
7138 		pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
7139 		if (pmu->pmu_type & hybrid_small_tiny) {
7140 			pmu->intel_cap.perf_metrics = 0;
7141 			pmu->mid_ack = true;
7142 		} else if (pmu->pmu_type & hybrid_big) {
7143 			pmu->intel_cap.perf_metrics = 1;
7144 			pmu->late_ack = true;
7145 		}
7146 	}
7147 
7148 	return 0;
7149 }
7150 
intel_pmu_ref_cycles_ext(void)7151 static __always_inline void intel_pmu_ref_cycles_ext(void)
7152 {
7153 	if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED)))
7154 		intel_perfmon_event_map[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x013c;
7155 }
7156 
intel_pmu_init_glc(struct pmu * pmu)7157 static __always_inline void intel_pmu_init_glc(struct pmu *pmu)
7158 {
7159 	x86_pmu.late_ack = true;
7160 	x86_pmu.limit_period = glc_limit_period;
7161 	x86_pmu.pebs_aliases = NULL;
7162 	x86_pmu.pebs_prec_dist = true;
7163 	x86_pmu.pebs_block = true;
7164 	x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7165 	x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7166 	x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
7167 	x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
7168 	x86_pmu.lbr_pt_coexist = true;
7169 	x86_pmu.num_topdown_events = 8;
7170 	static_call_update(intel_pmu_update_topdown_event,
7171 			   &icl_update_topdown_event);
7172 	static_call_update(intel_pmu_set_topdown_event_period,
7173 			   &icl_set_topdown_event_period);
7174 
7175 	memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7176 	memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7177 	hybrid(pmu, event_constraints) = intel_glc_event_constraints;
7178 	hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints;
7179 
7180 	intel_pmu_ref_cycles_ext();
7181 }
7182 
intel_pmu_init_grt(struct pmu * pmu)7183 static __always_inline void intel_pmu_init_grt(struct pmu *pmu)
7184 {
7185 	x86_pmu.mid_ack = true;
7186 	x86_pmu.limit_period = glc_limit_period;
7187 	x86_pmu.pebs_aliases = NULL;
7188 	x86_pmu.pebs_prec_dist = true;
7189 	x86_pmu.pebs_block = true;
7190 	x86_pmu.lbr_pt_coexist = true;
7191 	x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7192 	x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
7193 
7194 	memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7195 	memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7196 	hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
7197 	hybrid(pmu, event_constraints) = intel_grt_event_constraints;
7198 	hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints;
7199 	hybrid(pmu, extra_regs) = intel_grt_extra_regs;
7200 
7201 	intel_pmu_ref_cycles_ext();
7202 }
7203 
intel_pmu_init_lnc(struct pmu * pmu)7204 static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
7205 {
7206 	intel_pmu_init_glc(pmu);
7207 	hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
7208 	hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
7209 	hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
7210 }
7211 
intel_pmu_init_skt(struct pmu * pmu)7212 static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
7213 {
7214 	intel_pmu_init_grt(pmu);
7215 	hybrid(pmu, event_constraints) = intel_skt_event_constraints;
7216 	hybrid(pmu, extra_regs) = intel_cmt_extra_regs;
7217 	static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
7218 }
7219 
intel_pmu_init(void)7220 __init int intel_pmu_init(void)
7221 {
7222 	struct attribute **extra_skl_attr = &empty_attrs;
7223 	struct attribute **extra_attr = &empty_attrs;
7224 	struct attribute **td_attr    = &empty_attrs;
7225 	struct attribute **mem_attr   = &empty_attrs;
7226 	struct attribute **tsx_attr   = &empty_attrs;
7227 	union cpuid10_edx edx;
7228 	union cpuid10_eax eax;
7229 	union cpuid10_ebx ebx;
7230 	unsigned int fixed_mask;
7231 	bool pmem = false;
7232 	int version, i;
7233 	char *name;
7234 	struct x86_hybrid_pmu *pmu;
7235 
7236 	/* Architectural Perfmon was introduced starting with Core "Yonah" */
7237 	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
7238 		switch (boot_cpu_data.x86) {
7239 		case  6:
7240 			if (boot_cpu_data.x86_vfm < INTEL_CORE_YONAH)
7241 				return p6_pmu_init();
7242 			break;
7243 		case 11:
7244 			return knc_pmu_init();
7245 		case 15:
7246 			return p4_pmu_init();
7247 		}
7248 
7249 		pr_cont("unsupported CPU family %d model %d ",
7250 			boot_cpu_data.x86, boot_cpu_data.x86_model);
7251 		return -ENODEV;
7252 	}
7253 
7254 	/*
7255 	 * Check whether the Architectural PerfMon supports
7256 	 * Branch Misses Retired hw_event or not.
7257 	 */
7258 	cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
7259 	if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
7260 		return -ENODEV;
7261 
7262 	version = eax.split.version_id;
7263 	if (version < 2)
7264 		x86_pmu = core_pmu;
7265 	else
7266 		x86_pmu = intel_pmu;
7267 
7268 	x86_pmu.version			= version;
7269 	x86_pmu.cntr_mask64		= GENMASK_ULL(eax.split.num_counters - 1, 0);
7270 	x86_pmu.cntval_bits		= eax.split.bit_width;
7271 	x86_pmu.cntval_mask		= (1ULL << eax.split.bit_width) - 1;
7272 
7273 	x86_pmu.events_maskl		= ebx.full;
7274 	x86_pmu.events_mask_len		= eax.split.mask_length;
7275 
7276 	x86_pmu.pebs_events_mask	= intel_pmu_pebs_mask(x86_pmu.cntr_mask64);
7277 	x86_pmu.pebs_capable		= PEBS_COUNTER_MASK;
7278 	x86_pmu.config_mask		= X86_RAW_EVENT_MASK;
7279 
7280 	/*
7281 	 * Quirk: v2 perfmon does not report fixed-purpose events, so
7282 	 * assume at least 3 events, when not running in a hypervisor:
7283 	 */
7284 	if (version > 1 && version < 5) {
7285 		int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
7286 
7287 		x86_pmu.fixed_cntr_mask64 =
7288 			GENMASK_ULL(max((int)edx.split.num_counters_fixed, assume) - 1, 0);
7289 	} else if (version >= 5)
7290 		x86_pmu.fixed_cntr_mask64 = fixed_mask;
7291 
7292 	if (boot_cpu_has(X86_FEATURE_PDCM)) {
7293 		u64 capabilities;
7294 
7295 		rdmsrq(MSR_IA32_PERF_CAPABILITIES, capabilities);
7296 		x86_pmu.intel_cap.capabilities = capabilities;
7297 	}
7298 
7299 	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
7300 		x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
7301 		x86_pmu.lbr_read = intel_pmu_lbr_read_32;
7302 	}
7303 
7304 	if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
7305 		intel_pmu_arch_lbr_init();
7306 
7307 	intel_pebs_init();
7308 
7309 	x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
7310 
7311 	if (version >= 5) {
7312 		x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
7313 		if (x86_pmu.intel_cap.anythread_deprecated)
7314 			pr_cont(" AnyThread deprecated, ");
7315 	}
7316 
7317 	/*
7318 	 * Many features on and after V6 require dynamic constraint,
7319 	 * e.g., Arch PEBS, ACR.
7320 	 */
7321 	if (version >= 6) {
7322 		x86_pmu.flags |= PMU_FL_DYN_CONSTRAINT;
7323 		x86_pmu.late_setup = intel_pmu_late_setup;
7324 	}
7325 
7326 	/*
7327 	 * Install the hw-cache-events table:
7328 	 */
7329 	switch (boot_cpu_data.x86_vfm) {
7330 	case INTEL_CORE_YONAH:
7331 		pr_cont("Core events, ");
7332 		name = "core";
7333 		break;
7334 
7335 	case INTEL_CORE2_MEROM:
7336 		x86_add_quirk(intel_clovertown_quirk);
7337 		fallthrough;
7338 
7339 	case INTEL_CORE2_MEROM_L:
7340 	case INTEL_CORE2_PENRYN:
7341 	case INTEL_CORE2_DUNNINGTON:
7342 		memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
7343 		       sizeof(hw_cache_event_ids));
7344 
7345 		intel_pmu_lbr_init_core();
7346 
7347 		x86_pmu.event_constraints = intel_core2_event_constraints;
7348 		x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
7349 		pr_cont("Core2 events, ");
7350 		name = "core2";
7351 		break;
7352 
7353 	case INTEL_NEHALEM:
7354 	case INTEL_NEHALEM_EP:
7355 	case INTEL_NEHALEM_EX:
7356 		memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
7357 		       sizeof(hw_cache_event_ids));
7358 		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
7359 		       sizeof(hw_cache_extra_regs));
7360 
7361 		intel_pmu_lbr_init_nhm();
7362 
7363 		x86_pmu.event_constraints = intel_nehalem_event_constraints;
7364 		x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
7365 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
7366 		x86_pmu.extra_regs = intel_nehalem_extra_regs;
7367 		x86_pmu.limit_period = nhm_limit_period;
7368 
7369 		mem_attr = nhm_mem_events_attrs;
7370 
7371 		/* UOPS_ISSUED.STALLED_CYCLES */
7372 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
7373 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
7374 		/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
7375 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
7376 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
7377 
7378 		intel_pmu_pebs_data_source_nhm();
7379 		x86_add_quirk(intel_nehalem_quirk);
7380 		x86_pmu.pebs_no_tlb = 1;
7381 		extra_attr = nhm_format_attr;
7382 
7383 		pr_cont("Nehalem events, ");
7384 		name = "nehalem";
7385 		break;
7386 
7387 	case INTEL_ATOM_BONNELL:
7388 	case INTEL_ATOM_BONNELL_MID:
7389 	case INTEL_ATOM_SALTWELL:
7390 	case INTEL_ATOM_SALTWELL_MID:
7391 	case INTEL_ATOM_SALTWELL_TABLET:
7392 		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
7393 		       sizeof(hw_cache_event_ids));
7394 
7395 		intel_pmu_lbr_init_atom();
7396 
7397 		x86_pmu.event_constraints = intel_gen_event_constraints;
7398 		x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
7399 		x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
7400 		pr_cont("Atom events, ");
7401 		name = "bonnell";
7402 		break;
7403 
7404 	case INTEL_ATOM_SILVERMONT:
7405 	case INTEL_ATOM_SILVERMONT_D:
7406 	case INTEL_ATOM_SILVERMONT_MID:
7407 	case INTEL_ATOM_AIRMONT:
7408 	case INTEL_ATOM_SILVERMONT_MID2:
7409 		memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
7410 			sizeof(hw_cache_event_ids));
7411 		memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
7412 		       sizeof(hw_cache_extra_regs));
7413 
7414 		intel_pmu_lbr_init_slm();
7415 
7416 		x86_pmu.event_constraints = intel_slm_event_constraints;
7417 		x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
7418 		x86_pmu.extra_regs = intel_slm_extra_regs;
7419 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7420 		td_attr = slm_events_attrs;
7421 		extra_attr = slm_format_attr;
7422 		pr_cont("Silvermont events, ");
7423 		name = "silvermont";
7424 		break;
7425 
7426 	case INTEL_ATOM_GOLDMONT:
7427 	case INTEL_ATOM_GOLDMONT_D:
7428 		memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
7429 		       sizeof(hw_cache_event_ids));
7430 		memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
7431 		       sizeof(hw_cache_extra_regs));
7432 
7433 		intel_pmu_lbr_init_skl();
7434 
7435 		x86_pmu.event_constraints = intel_slm_event_constraints;
7436 		x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
7437 		x86_pmu.extra_regs = intel_glm_extra_regs;
7438 		/*
7439 		 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
7440 		 * for precise cycles.
7441 		 * :pp is identical to :ppp
7442 		 */
7443 		x86_pmu.pebs_aliases = NULL;
7444 		x86_pmu.pebs_prec_dist = true;
7445 		x86_pmu.lbr_pt_coexist = true;
7446 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7447 		td_attr = glm_events_attrs;
7448 		extra_attr = slm_format_attr;
7449 		pr_cont("Goldmont events, ");
7450 		name = "goldmont";
7451 		break;
7452 
7453 	case INTEL_ATOM_GOLDMONT_PLUS:
7454 		memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
7455 		       sizeof(hw_cache_event_ids));
7456 		memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
7457 		       sizeof(hw_cache_extra_regs));
7458 
7459 		intel_pmu_lbr_init_skl();
7460 
7461 		x86_pmu.event_constraints = intel_slm_event_constraints;
7462 		x86_pmu.extra_regs = intel_glm_extra_regs;
7463 		/*
7464 		 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
7465 		 * for precise cycles.
7466 		 */
7467 		x86_pmu.pebs_aliases = NULL;
7468 		x86_pmu.pebs_prec_dist = true;
7469 		x86_pmu.lbr_pt_coexist = true;
7470 		x86_pmu.pebs_capable = ~0ULL;
7471 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7472 		x86_pmu.flags |= PMU_FL_PEBS_ALL;
7473 		x86_pmu.get_event_constraints = glp_get_event_constraints;
7474 		td_attr = glm_events_attrs;
7475 		/* Goldmont Plus has 4-wide pipeline */
7476 		event_attr_td_total_slots_scale_glm.event_str = "4";
7477 		extra_attr = slm_format_attr;
7478 		pr_cont("Goldmont plus events, ");
7479 		name = "goldmont_plus";
7480 		break;
7481 
7482 	case INTEL_ATOM_TREMONT_D:
7483 	case INTEL_ATOM_TREMONT:
7484 	case INTEL_ATOM_TREMONT_L:
7485 		x86_pmu.late_ack = true;
7486 		memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
7487 		       sizeof(hw_cache_event_ids));
7488 		memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
7489 		       sizeof(hw_cache_extra_regs));
7490 		hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
7491 
7492 		intel_pmu_lbr_init_skl();
7493 
7494 		x86_pmu.event_constraints = intel_slm_event_constraints;
7495 		x86_pmu.extra_regs = intel_tnt_extra_regs;
7496 		/*
7497 		 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
7498 		 * for precise cycles.
7499 		 */
7500 		x86_pmu.pebs_aliases = NULL;
7501 		x86_pmu.pebs_prec_dist = true;
7502 		x86_pmu.lbr_pt_coexist = true;
7503 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7504 		x86_pmu.get_event_constraints = tnt_get_event_constraints;
7505 		td_attr = tnt_events_attrs;
7506 		extra_attr = slm_format_attr;
7507 		pr_cont("Tremont events, ");
7508 		name = "Tremont";
7509 		break;
7510 
7511 	case INTEL_ATOM_GRACEMONT:
7512 		intel_pmu_init_grt(NULL);
7513 		intel_pmu_pebs_data_source_grt();
7514 		x86_pmu.pebs_latency_data = grt_latency_data;
7515 		x86_pmu.get_event_constraints = tnt_get_event_constraints;
7516 		td_attr = tnt_events_attrs;
7517 		mem_attr = grt_mem_attrs;
7518 		extra_attr = nhm_format_attr;
7519 		pr_cont("Gracemont events, ");
7520 		name = "gracemont";
7521 		break;
7522 
7523 	case INTEL_ATOM_CRESTMONT:
7524 	case INTEL_ATOM_CRESTMONT_X:
7525 		intel_pmu_init_grt(NULL);
7526 		x86_pmu.extra_regs = intel_cmt_extra_regs;
7527 		intel_pmu_pebs_data_source_cmt();
7528 		x86_pmu.pebs_latency_data = cmt_latency_data;
7529 		x86_pmu.get_event_constraints = cmt_get_event_constraints;
7530 		td_attr = cmt_events_attrs;
7531 		mem_attr = grt_mem_attrs;
7532 		extra_attr = cmt_format_attr;
7533 		pr_cont("Crestmont events, ");
7534 		name = "crestmont";
7535 		break;
7536 
7537 	case INTEL_ATOM_DARKMONT_X:
7538 		intel_pmu_init_skt(NULL);
7539 		intel_pmu_pebs_data_source_cmt();
7540 		x86_pmu.pebs_latency_data = cmt_latency_data;
7541 		x86_pmu.get_event_constraints = cmt_get_event_constraints;
7542 		td_attr = skt_events_attrs;
7543 		mem_attr = grt_mem_attrs;
7544 		extra_attr = cmt_format_attr;
7545 		pr_cont("Darkmont events, ");
7546 		name = "darkmont";
7547 		break;
7548 
7549 	case INTEL_WESTMERE:
7550 	case INTEL_WESTMERE_EP:
7551 	case INTEL_WESTMERE_EX:
7552 		memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
7553 		       sizeof(hw_cache_event_ids));
7554 		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
7555 		       sizeof(hw_cache_extra_regs));
7556 
7557 		intel_pmu_lbr_init_nhm();
7558 
7559 		x86_pmu.event_constraints = intel_westmere_event_constraints;
7560 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
7561 		x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
7562 		x86_pmu.extra_regs = intel_westmere_extra_regs;
7563 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7564 
7565 		mem_attr = nhm_mem_events_attrs;
7566 
7567 		/* UOPS_ISSUED.STALLED_CYCLES */
7568 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
7569 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
7570 		/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
7571 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
7572 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
7573 
7574 		intel_pmu_pebs_data_source_nhm();
7575 		extra_attr = nhm_format_attr;
7576 		pr_cont("Westmere events, ");
7577 		name = "westmere";
7578 		break;
7579 
7580 	case INTEL_SANDYBRIDGE:
7581 	case INTEL_SANDYBRIDGE_X:
7582 		x86_add_quirk(intel_sandybridge_quirk);
7583 		x86_add_quirk(intel_ht_bug);
7584 		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
7585 		       sizeof(hw_cache_event_ids));
7586 		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
7587 		       sizeof(hw_cache_extra_regs));
7588 
7589 		intel_pmu_lbr_init_snb();
7590 
7591 		x86_pmu.event_constraints = intel_snb_event_constraints;
7592 		x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
7593 		x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
7594 		if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X)
7595 			x86_pmu.extra_regs = intel_snbep_extra_regs;
7596 		else
7597 			x86_pmu.extra_regs = intel_snb_extra_regs;
7598 
7599 
7600 		/* all extra regs are per-cpu when HT is on */
7601 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7602 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7603 
7604 		td_attr  = snb_events_attrs;
7605 		mem_attr = snb_mem_events_attrs;
7606 
7607 		/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
7608 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
7609 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
7610 		/* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
7611 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
7612 			X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
7613 
7614 		extra_attr = nhm_format_attr;
7615 
7616 		pr_cont("SandyBridge events, ");
7617 		name = "sandybridge";
7618 		break;
7619 
7620 	case INTEL_IVYBRIDGE:
7621 	case INTEL_IVYBRIDGE_X:
7622 		x86_add_quirk(intel_ht_bug);
7623 		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
7624 		       sizeof(hw_cache_event_ids));
7625 		/* dTLB-load-misses on IVB is different than SNB */
7626 		hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
7627 
7628 		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
7629 		       sizeof(hw_cache_extra_regs));
7630 
7631 		intel_pmu_lbr_init_snb();
7632 
7633 		x86_pmu.event_constraints = intel_ivb_event_constraints;
7634 		x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
7635 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
7636 		x86_pmu.pebs_prec_dist = true;
7637 		if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X)
7638 			x86_pmu.extra_regs = intel_snbep_extra_regs;
7639 		else
7640 			x86_pmu.extra_regs = intel_snb_extra_regs;
7641 		/* all extra regs are per-cpu when HT is on */
7642 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7643 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7644 
7645 		td_attr  = snb_events_attrs;
7646 		mem_attr = snb_mem_events_attrs;
7647 
7648 		/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
7649 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
7650 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
7651 
7652 		extra_attr = nhm_format_attr;
7653 
7654 		pr_cont("IvyBridge events, ");
7655 		name = "ivybridge";
7656 		break;
7657 
7658 
7659 	case INTEL_HASWELL:
7660 	case INTEL_HASWELL_X:
7661 	case INTEL_HASWELL_L:
7662 	case INTEL_HASWELL_G:
7663 		x86_add_quirk(intel_ht_bug);
7664 		x86_add_quirk(intel_pebs_isolation_quirk);
7665 		x86_pmu.late_ack = true;
7666 		memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7667 		memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7668 
7669 		intel_pmu_lbr_init_hsw();
7670 
7671 		x86_pmu.event_constraints = intel_hsw_event_constraints;
7672 		x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
7673 		x86_pmu.extra_regs = intel_snbep_extra_regs;
7674 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
7675 		x86_pmu.pebs_prec_dist = true;
7676 		/* all extra regs are per-cpu when HT is on */
7677 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7678 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7679 
7680 		x86_pmu.hw_config = hsw_hw_config;
7681 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
7682 		x86_pmu.limit_period = hsw_limit_period;
7683 		x86_pmu.lbr_double_abort = true;
7684 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7685 			hsw_format_attr : nhm_format_attr;
7686 		td_attr  = hsw_events_attrs;
7687 		mem_attr = hsw_mem_events_attrs;
7688 		tsx_attr = hsw_tsx_events_attrs;
7689 		pr_cont("Haswell events, ");
7690 		name = "haswell";
7691 		break;
7692 
7693 	case INTEL_BROADWELL:
7694 	case INTEL_BROADWELL_D:
7695 	case INTEL_BROADWELL_G:
7696 	case INTEL_BROADWELL_X:
7697 		x86_add_quirk(intel_pebs_isolation_quirk);
7698 		x86_pmu.late_ack = true;
7699 		memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7700 		memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7701 
7702 		/* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
7703 		hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
7704 									 BDW_L3_MISS|HSW_SNOOP_DRAM;
7705 		hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
7706 									  HSW_SNOOP_DRAM;
7707 		hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
7708 									     BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
7709 		hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
7710 									      BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
7711 
7712 		intel_pmu_lbr_init_hsw();
7713 
7714 		x86_pmu.event_constraints = intel_bdw_event_constraints;
7715 		x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
7716 		x86_pmu.extra_regs = intel_snbep_extra_regs;
7717 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
7718 		x86_pmu.pebs_prec_dist = true;
7719 		/* all extra regs are per-cpu when HT is on */
7720 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7721 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7722 
7723 		x86_pmu.hw_config = hsw_hw_config;
7724 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
7725 		x86_pmu.limit_period = bdw_limit_period;
7726 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7727 			hsw_format_attr : nhm_format_attr;
7728 		td_attr  = hsw_events_attrs;
7729 		mem_attr = hsw_mem_events_attrs;
7730 		tsx_attr = hsw_tsx_events_attrs;
7731 		pr_cont("Broadwell events, ");
7732 		name = "broadwell";
7733 		break;
7734 
7735 	case INTEL_XEON_PHI_KNL:
7736 	case INTEL_XEON_PHI_KNM:
7737 		memcpy(hw_cache_event_ids,
7738 		       slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7739 		memcpy(hw_cache_extra_regs,
7740 		       knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7741 		intel_pmu_lbr_init_knl();
7742 
7743 		x86_pmu.event_constraints = intel_slm_event_constraints;
7744 		x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
7745 		x86_pmu.extra_regs = intel_knl_extra_regs;
7746 
7747 		/* all extra regs are per-cpu when HT is on */
7748 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7749 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7750 		extra_attr = slm_format_attr;
7751 		pr_cont("Knights Landing/Mill events, ");
7752 		name = "knights-landing";
7753 		break;
7754 
7755 	case INTEL_SKYLAKE_X:
7756 		pmem = true;
7757 		fallthrough;
7758 	case INTEL_SKYLAKE_L:
7759 	case INTEL_SKYLAKE:
7760 	case INTEL_KABYLAKE_L:
7761 	case INTEL_KABYLAKE:
7762 	case INTEL_COMETLAKE_L:
7763 	case INTEL_COMETLAKE:
7764 		x86_add_quirk(intel_pebs_isolation_quirk);
7765 		x86_pmu.late_ack = true;
7766 		memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7767 		memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7768 		intel_pmu_lbr_init_skl();
7769 
7770 		/* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
7771 		event_attr_td_recovery_bubbles.event_str_noht =
7772 			"event=0xd,umask=0x1,cmask=1";
7773 		event_attr_td_recovery_bubbles.event_str_ht =
7774 			"event=0xd,umask=0x1,cmask=1,any=1";
7775 
7776 		x86_pmu.event_constraints = intel_skl_event_constraints;
7777 		x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
7778 		x86_pmu.extra_regs = intel_skl_extra_regs;
7779 		x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
7780 		x86_pmu.pebs_prec_dist = true;
7781 		/* all extra regs are per-cpu when HT is on */
7782 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7783 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7784 
7785 		x86_pmu.hw_config = hsw_hw_config;
7786 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
7787 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7788 			hsw_format_attr : nhm_format_attr;
7789 		extra_skl_attr = skl_format_attr;
7790 		td_attr  = hsw_events_attrs;
7791 		mem_attr = hsw_mem_events_attrs;
7792 		tsx_attr = hsw_tsx_events_attrs;
7793 		intel_pmu_pebs_data_source_skl(pmem);
7794 
7795 		/*
7796 		 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
7797 		 * TSX force abort hooks are not required on these systems. Only deploy
7798 		 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
7799 		 */
7800 		if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
7801 		   !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
7802 			x86_pmu.flags |= PMU_FL_TFA;
7803 			x86_pmu.get_event_constraints = tfa_get_event_constraints;
7804 			x86_pmu.enable_all = intel_tfa_pmu_enable_all;
7805 			x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
7806 		}
7807 
7808 		pr_cont("Skylake events, ");
7809 		name = "skylake";
7810 		break;
7811 
7812 	case INTEL_ICELAKE_X:
7813 	case INTEL_ICELAKE_D:
7814 		x86_pmu.pebs_ept = 1;
7815 		pmem = true;
7816 		fallthrough;
7817 	case INTEL_ICELAKE_L:
7818 	case INTEL_ICELAKE:
7819 	case INTEL_TIGERLAKE_L:
7820 	case INTEL_TIGERLAKE:
7821 	case INTEL_ROCKETLAKE:
7822 		x86_pmu.late_ack = true;
7823 		memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7824 		memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7825 		hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
7826 		intel_pmu_lbr_init_skl();
7827 
7828 		x86_pmu.event_constraints = intel_icl_event_constraints;
7829 		x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
7830 		x86_pmu.extra_regs = intel_icl_extra_regs;
7831 		x86_pmu.pebs_aliases = NULL;
7832 		x86_pmu.pebs_prec_dist = true;
7833 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7834 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7835 
7836 		x86_pmu.hw_config = hsw_hw_config;
7837 		x86_pmu.get_event_constraints = icl_get_event_constraints;
7838 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7839 			hsw_format_attr : nhm_format_attr;
7840 		extra_skl_attr = skl_format_attr;
7841 		mem_attr = icl_events_attrs;
7842 		td_attr = icl_td_events_attrs;
7843 		tsx_attr = icl_tsx_events_attrs;
7844 		x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
7845 		x86_pmu.lbr_pt_coexist = true;
7846 		intel_pmu_pebs_data_source_skl(pmem);
7847 		x86_pmu.num_topdown_events = 4;
7848 		static_call_update(intel_pmu_update_topdown_event,
7849 				   &icl_update_topdown_event);
7850 		static_call_update(intel_pmu_set_topdown_event_period,
7851 				   &icl_set_topdown_event_period);
7852 		pr_cont("Icelake events, ");
7853 		name = "icelake";
7854 		break;
7855 
7856 	case INTEL_SAPPHIRERAPIDS_X:
7857 	case INTEL_EMERALDRAPIDS_X:
7858 		x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
7859 		x86_pmu.extra_regs = intel_glc_extra_regs;
7860 		pr_cont("Sapphire Rapids events, ");
7861 		name = "sapphire_rapids";
7862 		goto glc_common;
7863 
7864 	case INTEL_GRANITERAPIDS_X:
7865 	case INTEL_GRANITERAPIDS_D:
7866 		x86_pmu.extra_regs = intel_rwc_extra_regs;
7867 		pr_cont("Granite Rapids events, ");
7868 		name = "granite_rapids";
7869 
7870 	glc_common:
7871 		intel_pmu_init_glc(NULL);
7872 		x86_pmu.pebs_ept = 1;
7873 		x86_pmu.hw_config = hsw_hw_config;
7874 		x86_pmu.get_event_constraints = glc_get_event_constraints;
7875 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7876 			hsw_format_attr : nhm_format_attr;
7877 		extra_skl_attr = skl_format_attr;
7878 		mem_attr = glc_events_attrs;
7879 		td_attr = glc_td_events_attrs;
7880 		tsx_attr = glc_tsx_events_attrs;
7881 		intel_pmu_pebs_data_source_skl(true);
7882 		break;
7883 
7884 	case INTEL_ALDERLAKE:
7885 	case INTEL_ALDERLAKE_L:
7886 	case INTEL_RAPTORLAKE:
7887 	case INTEL_RAPTORLAKE_P:
7888 	case INTEL_RAPTORLAKE_S:
7889 		/*
7890 		 * Alder Lake has 2 types of CPU, core and atom.
7891 		 *
7892 		 * Initialize the common PerfMon capabilities here.
7893 		 */
7894 		intel_pmu_init_hybrid(hybrid_big_small);
7895 
7896 		x86_pmu.pebs_latency_data = grt_latency_data;
7897 		x86_pmu.get_event_constraints = adl_get_event_constraints;
7898 		x86_pmu.hw_config = adl_hw_config;
7899 		x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
7900 
7901 		td_attr = adl_hybrid_events_attrs;
7902 		mem_attr = adl_hybrid_mem_attrs;
7903 		tsx_attr = adl_hybrid_tsx_attrs;
7904 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7905 			adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
7906 
7907 		/* Initialize big core specific PerfMon capabilities.*/
7908 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7909 		intel_pmu_init_glc(&pmu->pmu);
7910 		if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
7911 			pmu->cntr_mask64 <<= 2;
7912 			pmu->cntr_mask64 |= 0x3;
7913 			pmu->fixed_cntr_mask64 <<= 1;
7914 			pmu->fixed_cntr_mask64 |= 0x1;
7915 		} else {
7916 			pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7917 			pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7918 		}
7919 
7920 		/*
7921 		 * Quirk: For some Alder Lake machine, when all E-cores are disabled in
7922 		 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
7923 		 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
7924 		 * mistakenly add extra counters for P-cores. Correct the number of
7925 		 * counters here.
7926 		 */
7927 		if ((x86_pmu_num_counters(&pmu->pmu) > 8) || (x86_pmu_num_counters_fixed(&pmu->pmu) > 4)) {
7928 			pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7929 			pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7930 		}
7931 
7932 		pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
7933 		pmu->unconstrained = (struct event_constraint)
7934 				     __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
7935 				     0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
7936 
7937 		pmu->extra_regs = intel_glc_extra_regs;
7938 
7939 		/* Initialize Atom core specific PerfMon capabilities.*/
7940 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7941 		intel_pmu_init_grt(&pmu->pmu);
7942 
7943 		x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
7944 		intel_pmu_pebs_data_source_adl();
7945 		pr_cont("Alderlake Hybrid events, ");
7946 		name = "alderlake_hybrid";
7947 		break;
7948 
7949 	case INTEL_METEORLAKE:
7950 	case INTEL_METEORLAKE_L:
7951 	case INTEL_ARROWLAKE_U:
7952 		intel_pmu_init_hybrid(hybrid_big_small);
7953 
7954 		x86_pmu.pebs_latency_data = cmt_latency_data;
7955 		x86_pmu.get_event_constraints = mtl_get_event_constraints;
7956 		x86_pmu.hw_config = adl_hw_config;
7957 
7958 		td_attr = adl_hybrid_events_attrs;
7959 		mem_attr = mtl_hybrid_mem_attrs;
7960 		tsx_attr = adl_hybrid_tsx_attrs;
7961 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7962 			mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7963 
7964 		/* Initialize big core specific PerfMon capabilities.*/
7965 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7966 		intel_pmu_init_glc(&pmu->pmu);
7967 		pmu->extra_regs = intel_rwc_extra_regs;
7968 
7969 		/* Initialize Atom core specific PerfMon capabilities.*/
7970 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7971 		intel_pmu_init_grt(&pmu->pmu);
7972 		pmu->extra_regs = intel_cmt_extra_regs;
7973 
7974 		intel_pmu_pebs_data_source_mtl();
7975 		pr_cont("Meteorlake Hybrid events, ");
7976 		name = "meteorlake_hybrid";
7977 		break;
7978 
7979 	case INTEL_PANTHERLAKE_L:
7980 	case INTEL_WILDCATLAKE_L:
7981 		pr_cont("Pantherlake Hybrid events, ");
7982 		name = "pantherlake_hybrid";
7983 		goto lnl_common;
7984 
7985 	case INTEL_LUNARLAKE_M:
7986 	case INTEL_ARROWLAKE:
7987 		pr_cont("Lunarlake Hybrid events, ");
7988 		name = "lunarlake_hybrid";
7989 
7990 	lnl_common:
7991 		intel_pmu_init_hybrid(hybrid_big_small);
7992 
7993 		x86_pmu.pebs_latency_data = lnl_latency_data;
7994 		x86_pmu.get_event_constraints = mtl_get_event_constraints;
7995 		x86_pmu.hw_config = adl_hw_config;
7996 
7997 		td_attr = lnl_hybrid_events_attrs;
7998 		mem_attr = mtl_hybrid_mem_attrs;
7999 		tsx_attr = adl_hybrid_tsx_attrs;
8000 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8001 			mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
8002 
8003 		/* Initialize big core specific PerfMon capabilities.*/
8004 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
8005 		intel_pmu_init_lnc(&pmu->pmu);
8006 
8007 		/* Initialize Atom core specific PerfMon capabilities.*/
8008 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
8009 		intel_pmu_init_skt(&pmu->pmu);
8010 
8011 		intel_pmu_pebs_data_source_lnl();
8012 		break;
8013 
8014 	case INTEL_ARROWLAKE_H:
8015 		intel_pmu_init_hybrid(hybrid_big_small_tiny);
8016 
8017 		x86_pmu.pebs_latency_data = arl_h_latency_data;
8018 		x86_pmu.get_event_constraints = arl_h_get_event_constraints;
8019 		x86_pmu.hw_config = arl_h_hw_config;
8020 
8021 		td_attr = arl_h_hybrid_events_attrs;
8022 		mem_attr = arl_h_hybrid_mem_attrs;
8023 		tsx_attr = adl_hybrid_tsx_attrs;
8024 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
8025 			mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
8026 
8027 		/* Initialize big core specific PerfMon capabilities. */
8028 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
8029 		intel_pmu_init_lnc(&pmu->pmu);
8030 
8031 		/* Initialize Atom core specific PerfMon capabilities. */
8032 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
8033 		intel_pmu_init_skt(&pmu->pmu);
8034 
8035 		/* Initialize Lower Power Atom specific PerfMon capabilities. */
8036 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX];
8037 		intel_pmu_init_grt(&pmu->pmu);
8038 		pmu->extra_regs = intel_cmt_extra_regs;
8039 
8040 		intel_pmu_pebs_data_source_arl_h();
8041 		pr_cont("ArrowLake-H Hybrid events, ");
8042 		name = "arrowlake_h_hybrid";
8043 		break;
8044 
8045 	default:
8046 		switch (x86_pmu.version) {
8047 		case 1:
8048 			x86_pmu.event_constraints = intel_v1_event_constraints;
8049 			pr_cont("generic architected perfmon v1, ");
8050 			name = "generic_arch_v1";
8051 			break;
8052 		case 2:
8053 		case 3:
8054 		case 4:
8055 			/*
8056 			 * default constraints for v2 and up
8057 			 */
8058 			x86_pmu.event_constraints = intel_gen_event_constraints;
8059 			pr_cont("generic architected perfmon, ");
8060 			name = "generic_arch_v2+";
8061 			break;
8062 		default:
8063 			/*
8064 			 * The default constraints for v5 and up can support up to
8065 			 * 16 fixed counters. For the fixed counters 4 and later,
8066 			 * the pseudo-encoding is applied.
8067 			 * The constraints may be cut according to the CPUID enumeration
8068 			 * by inserting the EVENT_CONSTRAINT_END.
8069 			 */
8070 			if (fls64(x86_pmu.fixed_cntr_mask64) > INTEL_PMC_MAX_FIXED)
8071 				x86_pmu.fixed_cntr_mask64 &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
8072 			intel_v5_gen_event_constraints[fls64(x86_pmu.fixed_cntr_mask64)].weight = -1;
8073 			x86_pmu.event_constraints = intel_v5_gen_event_constraints;
8074 			pr_cont("generic architected perfmon, ");
8075 			name = "generic_arch_v5+";
8076 			break;
8077 		}
8078 	}
8079 
8080 	snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
8081 
8082 	if (!is_hybrid()) {
8083 		group_events_td.attrs  = td_attr;
8084 		group_events_mem.attrs = mem_attr;
8085 		group_events_tsx.attrs = tsx_attr;
8086 		group_format_extra.attrs = extra_attr;
8087 		group_format_extra_skl.attrs = extra_skl_attr;
8088 
8089 		x86_pmu.attr_update = attr_update;
8090 	} else {
8091 		hybrid_group_events_td.attrs  = td_attr;
8092 		hybrid_group_events_mem.attrs = mem_attr;
8093 		hybrid_group_events_tsx.attrs = tsx_attr;
8094 		hybrid_group_format_extra.attrs = extra_attr;
8095 
8096 		x86_pmu.attr_update = hybrid_attr_update;
8097 	}
8098 
8099 	/*
8100 	 * The archPerfmonExt (0x23) includes an enhanced enumeration of
8101 	 * PMU architectural features with a per-core view. For non-hybrid,
8102 	 * each core has the same PMU capabilities. It's good enough to
8103 	 * update the x86_pmu from the booting CPU. For hybrid, the x86_pmu
8104 	 * is used to keep the common capabilities. Still keep the values
8105 	 * from the leaf 0xa. The core specific update will be done later
8106 	 * when a new type is online.
8107 	 */
8108 	if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
8109 		update_pmu_cap(NULL);
8110 
8111 	if (x86_pmu.arch_pebs) {
8112 		static_call_update(intel_pmu_disable_event_ext,
8113 				   intel_pmu_disable_event_ext);
8114 		static_call_update(intel_pmu_enable_event_ext,
8115 				   intel_pmu_enable_event_ext);
8116 		pr_cont("Architectural PEBS, ");
8117 	}
8118 
8119 	intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64,
8120 				      &x86_pmu.fixed_cntr_mask64,
8121 				      &x86_pmu.intel_ctrl);
8122 
8123 	/* AnyThread may be deprecated on arch perfmon v5 or later */
8124 	if (x86_pmu.intel_cap.anythread_deprecated)
8125 		x86_pmu.format_attrs = intel_arch_formats_attr;
8126 
8127 	intel_pmu_check_event_constraints_all(NULL);
8128 
8129 	/*
8130 	 * Access LBR MSR may cause #GP under certain circumstances.
8131 	 * Check all LBR MSR here.
8132 	 * Disable LBR access if any LBR MSRs can not be accessed.
8133 	 */
8134 	if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
8135 		x86_pmu.lbr_nr = 0;
8136 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
8137 		if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
8138 		      check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
8139 			x86_pmu.lbr_nr = 0;
8140 	}
8141 
8142 	if (x86_pmu.lbr_nr) {
8143 		intel_pmu_lbr_init();
8144 
8145 		pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
8146 
8147 		/* only support branch_stack snapshot for perfmon >= v2 */
8148 		if (x86_pmu.disable_all == intel_pmu_disable_all) {
8149 			if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
8150 				static_call_update(perf_snapshot_branch_stack,
8151 						   intel_pmu_snapshot_arch_branch_stack);
8152 			} else {
8153 				static_call_update(perf_snapshot_branch_stack,
8154 						   intel_pmu_snapshot_branch_stack);
8155 			}
8156 		}
8157 	}
8158 
8159 	intel_pmu_check_extra_regs(x86_pmu.extra_regs);
8160 
8161 	/* Support full width counters using alternative MSR range */
8162 	if (x86_pmu.intel_cap.full_width_write) {
8163 		x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
8164 		x86_pmu.perfctr = MSR_IA32_PMC0;
8165 		pr_cont("full-width counters, ");
8166 	}
8167 
8168 	/* Support V6+ MSR Aliasing */
8169 	if (x86_pmu.version >= 6) {
8170 		x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR;
8171 		x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A;
8172 		x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR;
8173 		x86_pmu.addr_offset = intel_pmu_v6_addr_offset;
8174 	}
8175 
8176 	if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
8177 		x86_pmu.intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS;
8178 
8179 	if (x86_pmu.intel_cap.pebs_timing_info)
8180 		x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
8181 
8182 	intel_aux_output_init();
8183 
8184 	return 0;
8185 }
8186 
8187 /*
8188  * HT bug: phase 2 init
8189  * Called once we have valid topology information to check
8190  * whether or not HT is enabled
8191  * If HT is off, then we disable the workaround
8192  */
fixup_ht_bug(void)8193 static __init int fixup_ht_bug(void)
8194 {
8195 	int c;
8196 	/*
8197 	 * problem not present on this CPU model, nothing to do
8198 	 */
8199 	if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
8200 		return 0;
8201 
8202 	if (topology_max_smt_threads() > 1) {
8203 		pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
8204 		return 0;
8205 	}
8206 
8207 	cpus_read_lock();
8208 
8209 	hardlockup_detector_perf_stop();
8210 
8211 	x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
8212 
8213 	x86_pmu.start_scheduling = NULL;
8214 	x86_pmu.commit_scheduling = NULL;
8215 	x86_pmu.stop_scheduling = NULL;
8216 
8217 	hardlockup_detector_perf_restart();
8218 
8219 	for_each_online_cpu(c)
8220 		free_excl_cntrs(&per_cpu(cpu_hw_events, c));
8221 
8222 	cpus_read_unlock();
8223 	pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
8224 	return 0;
8225 }
8226 subsys_initcall(fixup_ht_bug)
8227