xref: /linux/arch/x86/events/intel/core.c (revision 17ef32ae66b1afc9fa6dbea40eb18a13edba9c31)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Per core/cpu state
4  *
5  * Used to coordinate shared registers between HT threads or
6  * among events on a single PMU.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
17 #include <linux/kvm_host.h>
18 
19 #include <asm/cpufeature.h>
20 #include <asm/debugreg.h>
21 #include <asm/hardirq.h>
22 #include <asm/intel-family.h>
23 #include <asm/intel_pt.h>
24 #include <asm/apic.h>
25 #include <asm/cpu_device_id.h>
26 #include <asm/msr.h>
27 
28 #include "../perf_event.h"
29 
30 /*
31  * Intel PerfMon, used on Core and later.
32  */
33 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
34 {
35 	[PERF_COUNT_HW_CPU_CYCLES]		= 0x003c,
36 	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
37 	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0x4f2e,
38 	[PERF_COUNT_HW_CACHE_MISSES]		= 0x412e,
39 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
40 	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
41 	[PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
42 	[PERF_COUNT_HW_REF_CPU_CYCLES]		= 0x0300, /* pseudo-encoding */
43 };
44 
45 static struct event_constraint intel_core_event_constraints[] __read_mostly =
46 {
47 	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
48 	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
49 	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
50 	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
51 	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
52 	INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
53 	EVENT_CONSTRAINT_END
54 };
55 
56 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
57 {
58 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
59 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
60 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
61 	INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
62 	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
63 	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
64 	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
65 	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
66 	INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
67 	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
68 	INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
69 	INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
70 	INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
71 	EVENT_CONSTRAINT_END
72 };
73 
74 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
75 {
76 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
77 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
78 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
79 	INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
80 	INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
81 	INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
82 	INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
83 	INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
84 	INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
85 	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
86 	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
87 	EVENT_CONSTRAINT_END
88 };
89 
90 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
91 {
92 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
93 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
94 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
95 	EVENT_EXTRA_END
96 };
97 
98 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
99 {
100 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
101 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
102 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
103 	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
104 	INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
105 	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
106 	INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
107 	EVENT_CONSTRAINT_END
108 };
109 
110 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
111 {
112 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
113 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
114 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
115 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
116 	INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
117 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
118 	INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
119 	INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
120 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
121 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
122 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
123 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
124 
125 	/*
126 	 * When HT is off these events can only run on the bottom 4 counters
127 	 * When HT is on, they are impacted by the HT bug and require EXCL access
128 	 */
129 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
130 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
131 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
132 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
133 
134 	EVENT_CONSTRAINT_END
135 };
136 
137 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
138 {
139 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
140 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
141 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
142 	INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
143 	INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
144 	INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
145 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
146 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
147 	INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
148 	INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
149 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
150 	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
151 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
152 
153 	/*
154 	 * When HT is off these events can only run on the bottom 4 counters
155 	 * When HT is on, they are impacted by the HT bug and require EXCL access
156 	 */
157 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
158 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
159 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
160 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
161 
162 	EVENT_CONSTRAINT_END
163 };
164 
165 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
166 {
167 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
168 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
169 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
170 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
171 	EVENT_EXTRA_END
172 };
173 
174 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
175 {
176 	EVENT_CONSTRAINT_END
177 };
178 
179 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
180 {
181 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
182 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
183 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
184 	EVENT_CONSTRAINT_END
185 };
186 
187 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly =
188 {
189 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
190 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
191 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
192 	FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
193 	FIXED_EVENT_CONSTRAINT(0x0500, 4),
194 	FIXED_EVENT_CONSTRAINT(0x0600, 5),
195 	FIXED_EVENT_CONSTRAINT(0x0700, 6),
196 	FIXED_EVENT_CONSTRAINT(0x0800, 7),
197 	FIXED_EVENT_CONSTRAINT(0x0900, 8),
198 	FIXED_EVENT_CONSTRAINT(0x0a00, 9),
199 	FIXED_EVENT_CONSTRAINT(0x0b00, 10),
200 	FIXED_EVENT_CONSTRAINT(0x0c00, 11),
201 	FIXED_EVENT_CONSTRAINT(0x0d00, 12),
202 	FIXED_EVENT_CONSTRAINT(0x0e00, 13),
203 	FIXED_EVENT_CONSTRAINT(0x0f00, 14),
204 	FIXED_EVENT_CONSTRAINT(0x1000, 15),
205 	EVENT_CONSTRAINT_END
206 };
207 
208 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
209 {
210 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
211 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
212 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
213 	EVENT_CONSTRAINT_END
214 };
215 
216 static struct event_constraint intel_grt_event_constraints[] __read_mostly = {
217 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
218 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
219 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
220 	FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
221 	EVENT_CONSTRAINT_END
222 };
223 
224 static struct event_constraint intel_skt_event_constraints[] __read_mostly = {
225 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
226 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
227 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
228 	FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
229 	FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */
230 	FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */
231 	FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */
232 	EVENT_CONSTRAINT_END
233 };
234 
235 static struct event_constraint intel_skl_event_constraints[] = {
236 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
237 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
238 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
239 	INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),	/* INST_RETIRED.PREC_DIST */
240 
241 	/*
242 	 * when HT is off, these can only run on the bottom 4 counters
243 	 */
244 	INTEL_EVENT_CONSTRAINT(0xd0, 0xf),	/* MEM_INST_RETIRED.* */
245 	INTEL_EVENT_CONSTRAINT(0xd1, 0xf),	/* MEM_LOAD_RETIRED.* */
246 	INTEL_EVENT_CONSTRAINT(0xd2, 0xf),	/* MEM_LOAD_L3_HIT_RETIRED.* */
247 	INTEL_EVENT_CONSTRAINT(0xcd, 0xf),	/* MEM_TRANS_RETIRED.* */
248 	INTEL_EVENT_CONSTRAINT(0xc6, 0xf),	/* FRONTEND_RETIRED.* */
249 
250 	EVENT_CONSTRAINT_END
251 };
252 
253 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
254 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
255 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
256 	EVENT_EXTRA_END
257 };
258 
259 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
260 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
261 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
262 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
263 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
264 	EVENT_EXTRA_END
265 };
266 
267 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
268 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
269 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
270 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
271 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
272 	EVENT_EXTRA_END
273 };
274 
275 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
276 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
277 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
278 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
279 	/*
280 	 * Note the low 8 bits eventsel code is not a continuous field, containing
281 	 * some #GPing bits. These are masked out.
282 	 */
283 	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
284 	EVENT_EXTRA_END
285 };
286 
287 static struct event_constraint intel_icl_event_constraints[] = {
288 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
289 	FIXED_EVENT_CONSTRAINT(0x01c0, 0),	/* old INST_RETIRED.PREC_DIST */
290 	FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */
291 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
292 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
293 	FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */
294 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
295 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
296 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
297 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
298 	INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
299 	INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
300 	INTEL_EVENT_CONSTRAINT(0x32, 0xf),	/* SW_PREFETCH_ACCESS.* */
301 	INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
302 	INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
303 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_TOTAL */
304 	INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff),  /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
305 	INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
306 	INTEL_EVENT_CONSTRAINT(0xa3, 0xf),      /* CYCLE_ACTIVITY.* */
307 	INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
308 	INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
309 	INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
310 	INTEL_EVENT_CONSTRAINT(0xef, 0xf),
311 	INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
312 	EVENT_CONSTRAINT_END
313 };
314 
315 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
316 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
317 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
318 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
319 	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
320 	EVENT_EXTRA_END
321 };
322 
323 static struct extra_reg intel_glc_extra_regs[] __read_mostly = {
324 	INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
325 	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
326 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
327 	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
328 	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
329 	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
330 	EVENT_EXTRA_END
331 };
332 
333 static struct event_constraint intel_glc_event_constraints[] = {
334 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
335 	FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */
336 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
337 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
338 	FIXED_EVENT_CONSTRAINT(0x013c, 2),	/* CPU_CLK_UNHALTED.REF_TSC_P */
339 	FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */
340 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
341 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
342 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
343 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
344 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
345 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
346 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
347 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
348 
349 	INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
350 	INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
351 	/*
352 	 * Generally event codes < 0x90 are restricted to counters 0-3.
353 	 * The 0x2E and 0x3C are exception, which has no restriction.
354 	 */
355 	INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
356 
357 	INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
358 	INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
359 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
360 	INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
361 	INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
362 	INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
363 	INTEL_EVENT_CONSTRAINT(0xce, 0x1),
364 	INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
365 	/*
366 	 * Generally event codes >= 0x90 are likely to have no restrictions.
367 	 * The exception are defined as above.
368 	 */
369 	INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
370 
371 	EVENT_CONSTRAINT_END
372 };
373 
374 static struct extra_reg intel_rwc_extra_regs[] __read_mostly = {
375 	INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
376 	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
377 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
378 	INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
379 	INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
380 	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
381 	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
382 	EVENT_EXTRA_END
383 };
384 
385 static struct event_constraint intel_lnc_event_constraints[] = {
386 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
387 	FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */
388 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
389 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
390 	FIXED_EVENT_CONSTRAINT(0x013c, 2),	/* CPU_CLK_UNHALTED.REF_TSC_P */
391 	FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */
392 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
393 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
394 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
395 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
396 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
397 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
398 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
399 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
400 
401 	INTEL_EVENT_CONSTRAINT(0x20, 0xf),
402 
403 	INTEL_UEVENT_CONSTRAINT(0x012a, 0xf),
404 	INTEL_UEVENT_CONSTRAINT(0x012b, 0xf),
405 	INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
406 	INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
407 
408 	INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
409 	INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
410 
411 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
412 	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
413 	INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
414 	INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
415 	INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
416 	INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
417 	INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc),
418 	INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
419 
420 	INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
421 
422 	INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf),
423 
424 	EVENT_CONSTRAINT_END
425 };
426 
427 static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
428 	INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0),
429 	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1),
430 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
431 	INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
432 	INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
433 	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
434 	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
435 	EVENT_EXTRA_END
436 };
437 
438 EVENT_ATTR_STR(mem-loads,	mem_ld_nhm,	"event=0x0b,umask=0x10,ldlat=3");
439 EVENT_ATTR_STR(mem-loads,	mem_ld_snb,	"event=0xcd,umask=0x1,ldlat=3");
440 EVENT_ATTR_STR(mem-stores,	mem_st_snb,	"event=0xcd,umask=0x2");
441 
442 static struct attribute *nhm_mem_events_attrs[] = {
443 	EVENT_PTR(mem_ld_nhm),
444 	NULL,
445 };
446 
447 /*
448  * topdown events for Intel Core CPUs.
449  *
450  * The events are all in slots, which is a free slot in a 4 wide
451  * pipeline. Some events are already reported in slots, for cycle
452  * events we multiply by the pipeline width (4).
453  *
454  * With Hyper Threading on, topdown metrics are either summed or averaged
455  * between the threads of a core: (count_t0 + count_t1).
456  *
457  * For the average case the metric is always scaled to pipeline width,
458  * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
459  */
460 
461 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
462 	"event=0x3c,umask=0x0",			/* cpu_clk_unhalted.thread */
463 	"event=0x3c,umask=0x0,any=1");		/* cpu_clk_unhalted.thread_any */
464 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
465 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
466 	"event=0xe,umask=0x1");			/* uops_issued.any */
467 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
468 	"event=0xc2,umask=0x2");		/* uops_retired.retire_slots */
469 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
470 	"event=0x9c,umask=0x1");		/* idq_uops_not_delivered_core */
471 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
472 	"event=0xd,umask=0x3,cmask=1",		/* int_misc.recovery_cycles */
473 	"event=0xd,umask=0x3,cmask=1,any=1");	/* int_misc.recovery_cycles_any */
474 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
475 	"4", "2");
476 
477 EVENT_ATTR_STR(slots,			slots,			"event=0x00,umask=0x4");
478 EVENT_ATTR_STR(topdown-retiring,	td_retiring,		"event=0x00,umask=0x80");
479 EVENT_ATTR_STR(topdown-bad-spec,	td_bad_spec,		"event=0x00,umask=0x81");
480 EVENT_ATTR_STR(topdown-fe-bound,	td_fe_bound,		"event=0x00,umask=0x82");
481 EVENT_ATTR_STR(topdown-be-bound,	td_be_bound,		"event=0x00,umask=0x83");
482 EVENT_ATTR_STR(topdown-heavy-ops,	td_heavy_ops,		"event=0x00,umask=0x84");
483 EVENT_ATTR_STR(topdown-br-mispredict,	td_br_mispredict,	"event=0x00,umask=0x85");
484 EVENT_ATTR_STR(topdown-fetch-lat,	td_fetch_lat,		"event=0x00,umask=0x86");
485 EVENT_ATTR_STR(topdown-mem-bound,	td_mem_bound,		"event=0x00,umask=0x87");
486 
487 static struct attribute *snb_events_attrs[] = {
488 	EVENT_PTR(td_slots_issued),
489 	EVENT_PTR(td_slots_retired),
490 	EVENT_PTR(td_fetch_bubbles),
491 	EVENT_PTR(td_total_slots),
492 	EVENT_PTR(td_total_slots_scale),
493 	EVENT_PTR(td_recovery_bubbles),
494 	EVENT_PTR(td_recovery_bubbles_scale),
495 	NULL,
496 };
497 
498 static struct attribute *snb_mem_events_attrs[] = {
499 	EVENT_PTR(mem_ld_snb),
500 	EVENT_PTR(mem_st_snb),
501 	NULL,
502 };
503 
504 static struct event_constraint intel_hsw_event_constraints[] = {
505 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
506 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
507 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
508 	INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */
509 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
510 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
511 	/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
512 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
513 	/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
514 	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
515 	/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
516 	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
517 
518 	/*
519 	 * When HT is off these events can only run on the bottom 4 counters
520 	 * When HT is on, they are impacted by the HT bug and require EXCL access
521 	 */
522 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
523 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
524 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
525 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
526 
527 	EVENT_CONSTRAINT_END
528 };
529 
530 static struct event_constraint intel_bdw_event_constraints[] = {
531 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
532 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
533 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
534 	INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */
535 	INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),	/* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
536 	/*
537 	 * when HT is off, these can only run on the bottom 4 counters
538 	 */
539 	INTEL_EVENT_CONSTRAINT(0xd0, 0xf),	/* MEM_INST_RETIRED.* */
540 	INTEL_EVENT_CONSTRAINT(0xd1, 0xf),	/* MEM_LOAD_RETIRED.* */
541 	INTEL_EVENT_CONSTRAINT(0xd2, 0xf),	/* MEM_LOAD_L3_HIT_RETIRED.* */
542 	INTEL_EVENT_CONSTRAINT(0xcd, 0xf),	/* MEM_TRANS_RETIRED.* */
543 	EVENT_CONSTRAINT_END
544 };
545 
intel_pmu_event_map(int hw_event)546 static u64 intel_pmu_event_map(int hw_event)
547 {
548 	return intel_perfmon_event_map[hw_event];
549 }
550 
551 static __initconst const u64 glc_hw_cache_event_ids
552 				[PERF_COUNT_HW_CACHE_MAX]
553 				[PERF_COUNT_HW_CACHE_OP_MAX]
554 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
555 {
556  [ C(L1D ) ] = {
557 	[ C(OP_READ) ] = {
558 		[ C(RESULT_ACCESS) ] = 0x81d0,
559 		[ C(RESULT_MISS)   ] = 0xe124,
560 	},
561 	[ C(OP_WRITE) ] = {
562 		[ C(RESULT_ACCESS) ] = 0x82d0,
563 	},
564  },
565  [ C(L1I ) ] = {
566 	[ C(OP_READ) ] = {
567 		[ C(RESULT_MISS)   ] = 0xe424,
568 	},
569 	[ C(OP_WRITE) ] = {
570 		[ C(RESULT_ACCESS) ] = -1,
571 		[ C(RESULT_MISS)   ] = -1,
572 	},
573  },
574  [ C(LL  ) ] = {
575 	[ C(OP_READ) ] = {
576 		[ C(RESULT_ACCESS) ] = 0x12a,
577 		[ C(RESULT_MISS)   ] = 0x12a,
578 	},
579 	[ C(OP_WRITE) ] = {
580 		[ C(RESULT_ACCESS) ] = 0x12a,
581 		[ C(RESULT_MISS)   ] = 0x12a,
582 	},
583  },
584  [ C(DTLB) ] = {
585 	[ C(OP_READ) ] = {
586 		[ C(RESULT_ACCESS) ] = 0x81d0,
587 		[ C(RESULT_MISS)   ] = 0xe12,
588 	},
589 	[ C(OP_WRITE) ] = {
590 		[ C(RESULT_ACCESS) ] = 0x82d0,
591 		[ C(RESULT_MISS)   ] = 0xe13,
592 	},
593  },
594  [ C(ITLB) ] = {
595 	[ C(OP_READ) ] = {
596 		[ C(RESULT_ACCESS) ] = -1,
597 		[ C(RESULT_MISS)   ] = 0xe11,
598 	},
599 	[ C(OP_WRITE) ] = {
600 		[ C(RESULT_ACCESS) ] = -1,
601 		[ C(RESULT_MISS)   ] = -1,
602 	},
603 	[ C(OP_PREFETCH) ] = {
604 		[ C(RESULT_ACCESS) ] = -1,
605 		[ C(RESULT_MISS)   ] = -1,
606 	},
607  },
608  [ C(BPU ) ] = {
609 	[ C(OP_READ) ] = {
610 		[ C(RESULT_ACCESS) ] = 0x4c4,
611 		[ C(RESULT_MISS)   ] = 0x4c5,
612 	},
613 	[ C(OP_WRITE) ] = {
614 		[ C(RESULT_ACCESS) ] = -1,
615 		[ C(RESULT_MISS)   ] = -1,
616 	},
617 	[ C(OP_PREFETCH) ] = {
618 		[ C(RESULT_ACCESS) ] = -1,
619 		[ C(RESULT_MISS)   ] = -1,
620 	},
621  },
622  [ C(NODE) ] = {
623 	[ C(OP_READ) ] = {
624 		[ C(RESULT_ACCESS) ] = 0x12a,
625 		[ C(RESULT_MISS)   ] = 0x12a,
626 	},
627  },
628 };
629 
630 static __initconst const u64 glc_hw_cache_extra_regs
631 				[PERF_COUNT_HW_CACHE_MAX]
632 				[PERF_COUNT_HW_CACHE_OP_MAX]
633 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
634 {
635  [ C(LL  ) ] = {
636 	[ C(OP_READ) ] = {
637 		[ C(RESULT_ACCESS) ] = 0x10001,
638 		[ C(RESULT_MISS)   ] = 0x3fbfc00001,
639 	},
640 	[ C(OP_WRITE) ] = {
641 		[ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
642 		[ C(RESULT_MISS)   ] = 0x3f3fc00002,
643 	},
644  },
645  [ C(NODE) ] = {
646 	[ C(OP_READ) ] = {
647 		[ C(RESULT_ACCESS) ] = 0x10c000001,
648 		[ C(RESULT_MISS)   ] = 0x3fb3000001,
649 	},
650  },
651 };
652 
653 /*
654  * Notes on the events:
655  * - data reads do not include code reads (comparable to earlier tables)
656  * - data counts include speculative execution (except L1 write, dtlb, bpu)
657  * - remote node access includes remote memory, remote cache, remote mmio.
658  * - prefetches are not included in the counts.
659  * - icache miss does not include decoded icache
660  */
661 
662 #define SKL_DEMAND_DATA_RD		BIT_ULL(0)
663 #define SKL_DEMAND_RFO			BIT_ULL(1)
664 #define SKL_ANY_RESPONSE		BIT_ULL(16)
665 #define SKL_SUPPLIER_NONE		BIT_ULL(17)
666 #define SKL_L3_MISS_LOCAL_DRAM		BIT_ULL(26)
667 #define SKL_L3_MISS_REMOTE_HOP0_DRAM	BIT_ULL(27)
668 #define SKL_L3_MISS_REMOTE_HOP1_DRAM	BIT_ULL(28)
669 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM	BIT_ULL(29)
670 #define SKL_L3_MISS			(SKL_L3_MISS_LOCAL_DRAM| \
671 					 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
672 					 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
673 					 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
674 #define SKL_SPL_HIT			BIT_ULL(30)
675 #define SKL_SNOOP_NONE			BIT_ULL(31)
676 #define SKL_SNOOP_NOT_NEEDED		BIT_ULL(32)
677 #define SKL_SNOOP_MISS			BIT_ULL(33)
678 #define SKL_SNOOP_HIT_NO_FWD		BIT_ULL(34)
679 #define SKL_SNOOP_HIT_WITH_FWD		BIT_ULL(35)
680 #define SKL_SNOOP_HITM			BIT_ULL(36)
681 #define SKL_SNOOP_NON_DRAM		BIT_ULL(37)
682 #define SKL_ANY_SNOOP			(SKL_SPL_HIT|SKL_SNOOP_NONE| \
683 					 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
684 					 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
685 					 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
686 #define SKL_DEMAND_READ			SKL_DEMAND_DATA_RD
687 #define SKL_SNOOP_DRAM			(SKL_SNOOP_NONE| \
688 					 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
689 					 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
690 					 SKL_SNOOP_HITM|SKL_SPL_HIT)
691 #define SKL_DEMAND_WRITE		SKL_DEMAND_RFO
692 #define SKL_LLC_ACCESS			SKL_ANY_RESPONSE
693 #define SKL_L3_MISS_REMOTE		(SKL_L3_MISS_REMOTE_HOP0_DRAM| \
694 					 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
695 					 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
696 
697 static __initconst const u64 skl_hw_cache_event_ids
698 				[PERF_COUNT_HW_CACHE_MAX]
699 				[PERF_COUNT_HW_CACHE_OP_MAX]
700 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
701 {
702  [ C(L1D ) ] = {
703 	[ C(OP_READ) ] = {
704 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
705 		[ C(RESULT_MISS)   ] = 0x151,	/* L1D.REPLACEMENT */
706 	},
707 	[ C(OP_WRITE) ] = {
708 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
709 		[ C(RESULT_MISS)   ] = 0x0,
710 	},
711 	[ C(OP_PREFETCH) ] = {
712 		[ C(RESULT_ACCESS) ] = 0x0,
713 		[ C(RESULT_MISS)   ] = 0x0,
714 	},
715  },
716  [ C(L1I ) ] = {
717 	[ C(OP_READ) ] = {
718 		[ C(RESULT_ACCESS) ] = 0x0,
719 		[ C(RESULT_MISS)   ] = 0x283,	/* ICACHE_64B.MISS */
720 	},
721 	[ C(OP_WRITE) ] = {
722 		[ C(RESULT_ACCESS) ] = -1,
723 		[ C(RESULT_MISS)   ] = -1,
724 	},
725 	[ C(OP_PREFETCH) ] = {
726 		[ C(RESULT_ACCESS) ] = 0x0,
727 		[ C(RESULT_MISS)   ] = 0x0,
728 	},
729  },
730  [ C(LL  ) ] = {
731 	[ C(OP_READ) ] = {
732 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
733 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
734 	},
735 	[ C(OP_WRITE) ] = {
736 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
737 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
738 	},
739 	[ C(OP_PREFETCH) ] = {
740 		[ C(RESULT_ACCESS) ] = 0x0,
741 		[ C(RESULT_MISS)   ] = 0x0,
742 	},
743  },
744  [ C(DTLB) ] = {
745 	[ C(OP_READ) ] = {
746 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
747 		[ C(RESULT_MISS)   ] = 0xe08,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
748 	},
749 	[ C(OP_WRITE) ] = {
750 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
751 		[ C(RESULT_MISS)   ] = 0xe49,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
752 	},
753 	[ C(OP_PREFETCH) ] = {
754 		[ C(RESULT_ACCESS) ] = 0x0,
755 		[ C(RESULT_MISS)   ] = 0x0,
756 	},
757  },
758  [ C(ITLB) ] = {
759 	[ C(OP_READ) ] = {
760 		[ C(RESULT_ACCESS) ] = 0x2085,	/* ITLB_MISSES.STLB_HIT */
761 		[ C(RESULT_MISS)   ] = 0xe85,	/* ITLB_MISSES.WALK_COMPLETED */
762 	},
763 	[ C(OP_WRITE) ] = {
764 		[ C(RESULT_ACCESS) ] = -1,
765 		[ C(RESULT_MISS)   ] = -1,
766 	},
767 	[ C(OP_PREFETCH) ] = {
768 		[ C(RESULT_ACCESS) ] = -1,
769 		[ C(RESULT_MISS)   ] = -1,
770 	},
771  },
772  [ C(BPU ) ] = {
773 	[ C(OP_READ) ] = {
774 		[ C(RESULT_ACCESS) ] = 0xc4,	/* BR_INST_RETIRED.ALL_BRANCHES */
775 		[ C(RESULT_MISS)   ] = 0xc5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
776 	},
777 	[ C(OP_WRITE) ] = {
778 		[ C(RESULT_ACCESS) ] = -1,
779 		[ C(RESULT_MISS)   ] = -1,
780 	},
781 	[ C(OP_PREFETCH) ] = {
782 		[ C(RESULT_ACCESS) ] = -1,
783 		[ C(RESULT_MISS)   ] = -1,
784 	},
785  },
786  [ C(NODE) ] = {
787 	[ C(OP_READ) ] = {
788 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
789 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
790 	},
791 	[ C(OP_WRITE) ] = {
792 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
793 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
794 	},
795 	[ C(OP_PREFETCH) ] = {
796 		[ C(RESULT_ACCESS) ] = 0x0,
797 		[ C(RESULT_MISS)   ] = 0x0,
798 	},
799  },
800 };
801 
802 static __initconst const u64 skl_hw_cache_extra_regs
803 				[PERF_COUNT_HW_CACHE_MAX]
804 				[PERF_COUNT_HW_CACHE_OP_MAX]
805 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
806 {
807  [ C(LL  ) ] = {
808 	[ C(OP_READ) ] = {
809 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
810 				       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
811 		[ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
812 				       SKL_L3_MISS|SKL_ANY_SNOOP|
813 				       SKL_SUPPLIER_NONE,
814 	},
815 	[ C(OP_WRITE) ] = {
816 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
817 				       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
818 		[ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
819 				       SKL_L3_MISS|SKL_ANY_SNOOP|
820 				       SKL_SUPPLIER_NONE,
821 	},
822 	[ C(OP_PREFETCH) ] = {
823 		[ C(RESULT_ACCESS) ] = 0x0,
824 		[ C(RESULT_MISS)   ] = 0x0,
825 	},
826  },
827  [ C(NODE) ] = {
828 	[ C(OP_READ) ] = {
829 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
830 				       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
831 		[ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
832 				       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
833 	},
834 	[ C(OP_WRITE) ] = {
835 		[ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
836 				       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
837 		[ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
838 				       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
839 	},
840 	[ C(OP_PREFETCH) ] = {
841 		[ C(RESULT_ACCESS) ] = 0x0,
842 		[ C(RESULT_MISS)   ] = 0x0,
843 	},
844  },
845 };
846 
847 #define SNB_DMND_DATA_RD	(1ULL << 0)
848 #define SNB_DMND_RFO		(1ULL << 1)
849 #define SNB_DMND_IFETCH		(1ULL << 2)
850 #define SNB_DMND_WB		(1ULL << 3)
851 #define SNB_PF_DATA_RD		(1ULL << 4)
852 #define SNB_PF_RFO		(1ULL << 5)
853 #define SNB_PF_IFETCH		(1ULL << 6)
854 #define SNB_LLC_DATA_RD		(1ULL << 7)
855 #define SNB_LLC_RFO		(1ULL << 8)
856 #define SNB_LLC_IFETCH		(1ULL << 9)
857 #define SNB_BUS_LOCKS		(1ULL << 10)
858 #define SNB_STRM_ST		(1ULL << 11)
859 #define SNB_OTHER		(1ULL << 15)
860 #define SNB_RESP_ANY		(1ULL << 16)
861 #define SNB_NO_SUPP		(1ULL << 17)
862 #define SNB_LLC_HITM		(1ULL << 18)
863 #define SNB_LLC_HITE		(1ULL << 19)
864 #define SNB_LLC_HITS		(1ULL << 20)
865 #define SNB_LLC_HITF		(1ULL << 21)
866 #define SNB_LOCAL		(1ULL << 22)
867 #define SNB_REMOTE		(0xffULL << 23)
868 #define SNB_SNP_NONE		(1ULL << 31)
869 #define SNB_SNP_NOT_NEEDED	(1ULL << 32)
870 #define SNB_SNP_MISS		(1ULL << 33)
871 #define SNB_NO_FWD		(1ULL << 34)
872 #define SNB_SNP_FWD		(1ULL << 35)
873 #define SNB_HITM		(1ULL << 36)
874 #define SNB_NON_DRAM		(1ULL << 37)
875 
876 #define SNB_DMND_READ		(SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
877 #define SNB_DMND_WRITE		(SNB_DMND_RFO|SNB_LLC_RFO)
878 #define SNB_DMND_PREFETCH	(SNB_PF_DATA_RD|SNB_PF_RFO)
879 
880 #define SNB_SNP_ANY		(SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
881 				 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
882 				 SNB_HITM)
883 
884 #define SNB_DRAM_ANY		(SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
885 #define SNB_DRAM_REMOTE		(SNB_REMOTE|SNB_SNP_ANY)
886 
887 #define SNB_L3_ACCESS		SNB_RESP_ANY
888 #define SNB_L3_MISS		(SNB_DRAM_ANY|SNB_NON_DRAM)
889 
890 static __initconst const u64 snb_hw_cache_extra_regs
891 				[PERF_COUNT_HW_CACHE_MAX]
892 				[PERF_COUNT_HW_CACHE_OP_MAX]
893 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
894 {
895  [ C(LL  ) ] = {
896 	[ C(OP_READ) ] = {
897 		[ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
898 		[ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_L3_MISS,
899 	},
900 	[ C(OP_WRITE) ] = {
901 		[ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
902 		[ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_L3_MISS,
903 	},
904 	[ C(OP_PREFETCH) ] = {
905 		[ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
906 		[ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
907 	},
908  },
909  [ C(NODE) ] = {
910 	[ C(OP_READ) ] = {
911 		[ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
912 		[ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
913 	},
914 	[ C(OP_WRITE) ] = {
915 		[ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
916 		[ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
917 	},
918 	[ C(OP_PREFETCH) ] = {
919 		[ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
920 		[ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
921 	},
922  },
923 };
924 
925 static __initconst const u64 snb_hw_cache_event_ids
926 				[PERF_COUNT_HW_CACHE_MAX]
927 				[PERF_COUNT_HW_CACHE_OP_MAX]
928 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
929 {
930  [ C(L1D) ] = {
931 	[ C(OP_READ) ] = {
932 		[ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
933 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
934 	},
935 	[ C(OP_WRITE) ] = {
936 		[ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
937 		[ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
938 	},
939 	[ C(OP_PREFETCH) ] = {
940 		[ C(RESULT_ACCESS) ] = 0x0,
941 		[ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
942 	},
943  },
944  [ C(L1I ) ] = {
945 	[ C(OP_READ) ] = {
946 		[ C(RESULT_ACCESS) ] = 0x0,
947 		[ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
948 	},
949 	[ C(OP_WRITE) ] = {
950 		[ C(RESULT_ACCESS) ] = -1,
951 		[ C(RESULT_MISS)   ] = -1,
952 	},
953 	[ C(OP_PREFETCH) ] = {
954 		[ C(RESULT_ACCESS) ] = 0x0,
955 		[ C(RESULT_MISS)   ] = 0x0,
956 	},
957  },
958  [ C(LL  ) ] = {
959 	[ C(OP_READ) ] = {
960 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
961 		[ C(RESULT_ACCESS) ] = 0x01b7,
962 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
963 		[ C(RESULT_MISS)   ] = 0x01b7,
964 	},
965 	[ C(OP_WRITE) ] = {
966 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
967 		[ C(RESULT_ACCESS) ] = 0x01b7,
968 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
969 		[ C(RESULT_MISS)   ] = 0x01b7,
970 	},
971 	[ C(OP_PREFETCH) ] = {
972 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
973 		[ C(RESULT_ACCESS) ] = 0x01b7,
974 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
975 		[ C(RESULT_MISS)   ] = 0x01b7,
976 	},
977  },
978  [ C(DTLB) ] = {
979 	[ C(OP_READ) ] = {
980 		[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
981 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
982 	},
983 	[ C(OP_WRITE) ] = {
984 		[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
985 		[ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
986 	},
987 	[ C(OP_PREFETCH) ] = {
988 		[ C(RESULT_ACCESS) ] = 0x0,
989 		[ C(RESULT_MISS)   ] = 0x0,
990 	},
991  },
992  [ C(ITLB) ] = {
993 	[ C(OP_READ) ] = {
994 		[ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
995 		[ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
996 	},
997 	[ C(OP_WRITE) ] = {
998 		[ C(RESULT_ACCESS) ] = -1,
999 		[ C(RESULT_MISS)   ] = -1,
1000 	},
1001 	[ C(OP_PREFETCH) ] = {
1002 		[ C(RESULT_ACCESS) ] = -1,
1003 		[ C(RESULT_MISS)   ] = -1,
1004 	},
1005  },
1006  [ C(BPU ) ] = {
1007 	[ C(OP_READ) ] = {
1008 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1009 		[ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1010 	},
1011 	[ C(OP_WRITE) ] = {
1012 		[ C(RESULT_ACCESS) ] = -1,
1013 		[ C(RESULT_MISS)   ] = -1,
1014 	},
1015 	[ C(OP_PREFETCH) ] = {
1016 		[ C(RESULT_ACCESS) ] = -1,
1017 		[ C(RESULT_MISS)   ] = -1,
1018 	},
1019  },
1020  [ C(NODE) ] = {
1021 	[ C(OP_READ) ] = {
1022 		[ C(RESULT_ACCESS) ] = 0x01b7,
1023 		[ C(RESULT_MISS)   ] = 0x01b7,
1024 	},
1025 	[ C(OP_WRITE) ] = {
1026 		[ C(RESULT_ACCESS) ] = 0x01b7,
1027 		[ C(RESULT_MISS)   ] = 0x01b7,
1028 	},
1029 	[ C(OP_PREFETCH) ] = {
1030 		[ C(RESULT_ACCESS) ] = 0x01b7,
1031 		[ C(RESULT_MISS)   ] = 0x01b7,
1032 	},
1033  },
1034 
1035 };
1036 
1037 /*
1038  * Notes on the events:
1039  * - data reads do not include code reads (comparable to earlier tables)
1040  * - data counts include speculative execution (except L1 write, dtlb, bpu)
1041  * - remote node access includes remote memory, remote cache, remote mmio.
1042  * - prefetches are not included in the counts because they are not
1043  *   reliably counted.
1044  */
1045 
1046 #define HSW_DEMAND_DATA_RD		BIT_ULL(0)
1047 #define HSW_DEMAND_RFO			BIT_ULL(1)
1048 #define HSW_ANY_RESPONSE		BIT_ULL(16)
1049 #define HSW_SUPPLIER_NONE		BIT_ULL(17)
1050 #define HSW_L3_MISS_LOCAL_DRAM		BIT_ULL(22)
1051 #define HSW_L3_MISS_REMOTE_HOP0		BIT_ULL(27)
1052 #define HSW_L3_MISS_REMOTE_HOP1		BIT_ULL(28)
1053 #define HSW_L3_MISS_REMOTE_HOP2P	BIT_ULL(29)
1054 #define HSW_L3_MISS			(HSW_L3_MISS_LOCAL_DRAM| \
1055 					 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1056 					 HSW_L3_MISS_REMOTE_HOP2P)
1057 #define HSW_SNOOP_NONE			BIT_ULL(31)
1058 #define HSW_SNOOP_NOT_NEEDED		BIT_ULL(32)
1059 #define HSW_SNOOP_MISS			BIT_ULL(33)
1060 #define HSW_SNOOP_HIT_NO_FWD		BIT_ULL(34)
1061 #define HSW_SNOOP_HIT_WITH_FWD		BIT_ULL(35)
1062 #define HSW_SNOOP_HITM			BIT_ULL(36)
1063 #define HSW_SNOOP_NON_DRAM		BIT_ULL(37)
1064 #define HSW_ANY_SNOOP			(HSW_SNOOP_NONE| \
1065 					 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
1066 					 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
1067 					 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
1068 #define HSW_SNOOP_DRAM			(HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
1069 #define HSW_DEMAND_READ			HSW_DEMAND_DATA_RD
1070 #define HSW_DEMAND_WRITE		HSW_DEMAND_RFO
1071 #define HSW_L3_MISS_REMOTE		(HSW_L3_MISS_REMOTE_HOP0|\
1072 					 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
1073 #define HSW_LLC_ACCESS			HSW_ANY_RESPONSE
1074 
1075 #define BDW_L3_MISS_LOCAL		BIT(26)
1076 #define BDW_L3_MISS			(BDW_L3_MISS_LOCAL| \
1077 					 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1078 					 HSW_L3_MISS_REMOTE_HOP2P)
1079 
1080 
1081 static __initconst const u64 hsw_hw_cache_event_ids
1082 				[PERF_COUNT_HW_CACHE_MAX]
1083 				[PERF_COUNT_HW_CACHE_OP_MAX]
1084 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1085 {
1086  [ C(L1D ) ] = {
1087 	[ C(OP_READ) ] = {
1088 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1089 		[ C(RESULT_MISS)   ] = 0x151,	/* L1D.REPLACEMENT */
1090 	},
1091 	[ C(OP_WRITE) ] = {
1092 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1093 		[ C(RESULT_MISS)   ] = 0x0,
1094 	},
1095 	[ C(OP_PREFETCH) ] = {
1096 		[ C(RESULT_ACCESS) ] = 0x0,
1097 		[ C(RESULT_MISS)   ] = 0x0,
1098 	},
1099  },
1100  [ C(L1I ) ] = {
1101 	[ C(OP_READ) ] = {
1102 		[ C(RESULT_ACCESS) ] = 0x0,
1103 		[ C(RESULT_MISS)   ] = 0x280,	/* ICACHE.MISSES */
1104 	},
1105 	[ C(OP_WRITE) ] = {
1106 		[ C(RESULT_ACCESS) ] = -1,
1107 		[ C(RESULT_MISS)   ] = -1,
1108 	},
1109 	[ C(OP_PREFETCH) ] = {
1110 		[ C(RESULT_ACCESS) ] = 0x0,
1111 		[ C(RESULT_MISS)   ] = 0x0,
1112 	},
1113  },
1114  [ C(LL  ) ] = {
1115 	[ C(OP_READ) ] = {
1116 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1117 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1118 	},
1119 	[ C(OP_WRITE) ] = {
1120 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1121 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1122 	},
1123 	[ C(OP_PREFETCH) ] = {
1124 		[ C(RESULT_ACCESS) ] = 0x0,
1125 		[ C(RESULT_MISS)   ] = 0x0,
1126 	},
1127  },
1128  [ C(DTLB) ] = {
1129 	[ C(OP_READ) ] = {
1130 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1131 		[ C(RESULT_MISS)   ] = 0x108,	/* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1132 	},
1133 	[ C(OP_WRITE) ] = {
1134 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1135 		[ C(RESULT_MISS)   ] = 0x149,	/* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1136 	},
1137 	[ C(OP_PREFETCH) ] = {
1138 		[ C(RESULT_ACCESS) ] = 0x0,
1139 		[ C(RESULT_MISS)   ] = 0x0,
1140 	},
1141  },
1142  [ C(ITLB) ] = {
1143 	[ C(OP_READ) ] = {
1144 		[ C(RESULT_ACCESS) ] = 0x6085,	/* ITLB_MISSES.STLB_HIT */
1145 		[ C(RESULT_MISS)   ] = 0x185,	/* ITLB_MISSES.MISS_CAUSES_A_WALK */
1146 	},
1147 	[ C(OP_WRITE) ] = {
1148 		[ C(RESULT_ACCESS) ] = -1,
1149 		[ C(RESULT_MISS)   ] = -1,
1150 	},
1151 	[ C(OP_PREFETCH) ] = {
1152 		[ C(RESULT_ACCESS) ] = -1,
1153 		[ C(RESULT_MISS)   ] = -1,
1154 	},
1155  },
1156  [ C(BPU ) ] = {
1157 	[ C(OP_READ) ] = {
1158 		[ C(RESULT_ACCESS) ] = 0xc4,	/* BR_INST_RETIRED.ALL_BRANCHES */
1159 		[ C(RESULT_MISS)   ] = 0xc5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
1160 	},
1161 	[ C(OP_WRITE) ] = {
1162 		[ C(RESULT_ACCESS) ] = -1,
1163 		[ C(RESULT_MISS)   ] = -1,
1164 	},
1165 	[ C(OP_PREFETCH) ] = {
1166 		[ C(RESULT_ACCESS) ] = -1,
1167 		[ C(RESULT_MISS)   ] = -1,
1168 	},
1169  },
1170  [ C(NODE) ] = {
1171 	[ C(OP_READ) ] = {
1172 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1173 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1174 	},
1175 	[ C(OP_WRITE) ] = {
1176 		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
1177 		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
1178 	},
1179 	[ C(OP_PREFETCH) ] = {
1180 		[ C(RESULT_ACCESS) ] = 0x0,
1181 		[ C(RESULT_MISS)   ] = 0x0,
1182 	},
1183  },
1184 };
1185 
1186 static __initconst const u64 hsw_hw_cache_extra_regs
1187 				[PERF_COUNT_HW_CACHE_MAX]
1188 				[PERF_COUNT_HW_CACHE_OP_MAX]
1189 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1190 {
1191  [ C(LL  ) ] = {
1192 	[ C(OP_READ) ] = {
1193 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1194 				       HSW_LLC_ACCESS,
1195 		[ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
1196 				       HSW_L3_MISS|HSW_ANY_SNOOP,
1197 	},
1198 	[ C(OP_WRITE) ] = {
1199 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1200 				       HSW_LLC_ACCESS,
1201 		[ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
1202 				       HSW_L3_MISS|HSW_ANY_SNOOP,
1203 	},
1204 	[ C(OP_PREFETCH) ] = {
1205 		[ C(RESULT_ACCESS) ] = 0x0,
1206 		[ C(RESULT_MISS)   ] = 0x0,
1207 	},
1208  },
1209  [ C(NODE) ] = {
1210 	[ C(OP_READ) ] = {
1211 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1212 				       HSW_L3_MISS_LOCAL_DRAM|
1213 				       HSW_SNOOP_DRAM,
1214 		[ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
1215 				       HSW_L3_MISS_REMOTE|
1216 				       HSW_SNOOP_DRAM,
1217 	},
1218 	[ C(OP_WRITE) ] = {
1219 		[ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1220 				       HSW_L3_MISS_LOCAL_DRAM|
1221 				       HSW_SNOOP_DRAM,
1222 		[ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
1223 				       HSW_L3_MISS_REMOTE|
1224 				       HSW_SNOOP_DRAM,
1225 	},
1226 	[ C(OP_PREFETCH) ] = {
1227 		[ C(RESULT_ACCESS) ] = 0x0,
1228 		[ C(RESULT_MISS)   ] = 0x0,
1229 	},
1230  },
1231 };
1232 
1233 static __initconst const u64 westmere_hw_cache_event_ids
1234 				[PERF_COUNT_HW_CACHE_MAX]
1235 				[PERF_COUNT_HW_CACHE_OP_MAX]
1236 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1237 {
1238  [ C(L1D) ] = {
1239 	[ C(OP_READ) ] = {
1240 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1241 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1242 	},
1243 	[ C(OP_WRITE) ] = {
1244 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1245 		[ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1246 	},
1247 	[ C(OP_PREFETCH) ] = {
1248 		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1249 		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1250 	},
1251  },
1252  [ C(L1I ) ] = {
1253 	[ C(OP_READ) ] = {
1254 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1255 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1256 	},
1257 	[ C(OP_WRITE) ] = {
1258 		[ C(RESULT_ACCESS) ] = -1,
1259 		[ C(RESULT_MISS)   ] = -1,
1260 	},
1261 	[ C(OP_PREFETCH) ] = {
1262 		[ C(RESULT_ACCESS) ] = 0x0,
1263 		[ C(RESULT_MISS)   ] = 0x0,
1264 	},
1265  },
1266  [ C(LL  ) ] = {
1267 	[ C(OP_READ) ] = {
1268 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1269 		[ C(RESULT_ACCESS) ] = 0x01b7,
1270 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1271 		[ C(RESULT_MISS)   ] = 0x01b7,
1272 	},
1273 	/*
1274 	 * Use RFO, not WRITEBACK, because a write miss would typically occur
1275 	 * on RFO.
1276 	 */
1277 	[ C(OP_WRITE) ] = {
1278 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1279 		[ C(RESULT_ACCESS) ] = 0x01b7,
1280 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1281 		[ C(RESULT_MISS)   ] = 0x01b7,
1282 	},
1283 	[ C(OP_PREFETCH) ] = {
1284 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1285 		[ C(RESULT_ACCESS) ] = 0x01b7,
1286 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1287 		[ C(RESULT_MISS)   ] = 0x01b7,
1288 	},
1289  },
1290  [ C(DTLB) ] = {
1291 	[ C(OP_READ) ] = {
1292 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1293 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1294 	},
1295 	[ C(OP_WRITE) ] = {
1296 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1297 		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1298 	},
1299 	[ C(OP_PREFETCH) ] = {
1300 		[ C(RESULT_ACCESS) ] = 0x0,
1301 		[ C(RESULT_MISS)   ] = 0x0,
1302 	},
1303  },
1304  [ C(ITLB) ] = {
1305 	[ C(OP_READ) ] = {
1306 		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1307 		[ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
1308 	},
1309 	[ C(OP_WRITE) ] = {
1310 		[ C(RESULT_ACCESS) ] = -1,
1311 		[ C(RESULT_MISS)   ] = -1,
1312 	},
1313 	[ C(OP_PREFETCH) ] = {
1314 		[ C(RESULT_ACCESS) ] = -1,
1315 		[ C(RESULT_MISS)   ] = -1,
1316 	},
1317  },
1318  [ C(BPU ) ] = {
1319 	[ C(OP_READ) ] = {
1320 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1321 		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1322 	},
1323 	[ C(OP_WRITE) ] = {
1324 		[ C(RESULT_ACCESS) ] = -1,
1325 		[ C(RESULT_MISS)   ] = -1,
1326 	},
1327 	[ C(OP_PREFETCH) ] = {
1328 		[ C(RESULT_ACCESS) ] = -1,
1329 		[ C(RESULT_MISS)   ] = -1,
1330 	},
1331  },
1332  [ C(NODE) ] = {
1333 	[ C(OP_READ) ] = {
1334 		[ C(RESULT_ACCESS) ] = 0x01b7,
1335 		[ C(RESULT_MISS)   ] = 0x01b7,
1336 	},
1337 	[ C(OP_WRITE) ] = {
1338 		[ C(RESULT_ACCESS) ] = 0x01b7,
1339 		[ C(RESULT_MISS)   ] = 0x01b7,
1340 	},
1341 	[ C(OP_PREFETCH) ] = {
1342 		[ C(RESULT_ACCESS) ] = 0x01b7,
1343 		[ C(RESULT_MISS)   ] = 0x01b7,
1344 	},
1345  },
1346 };
1347 
1348 /*
1349  * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1350  * See IA32 SDM Vol 3B 30.6.1.3
1351  */
1352 
1353 #define NHM_DMND_DATA_RD	(1 << 0)
1354 #define NHM_DMND_RFO		(1 << 1)
1355 #define NHM_DMND_IFETCH		(1 << 2)
1356 #define NHM_DMND_WB		(1 << 3)
1357 #define NHM_PF_DATA_RD		(1 << 4)
1358 #define NHM_PF_DATA_RFO		(1 << 5)
1359 #define NHM_PF_IFETCH		(1 << 6)
1360 #define NHM_OFFCORE_OTHER	(1 << 7)
1361 #define NHM_UNCORE_HIT		(1 << 8)
1362 #define NHM_OTHER_CORE_HIT_SNP	(1 << 9)
1363 #define NHM_OTHER_CORE_HITM	(1 << 10)
1364         			/* reserved */
1365 #define NHM_REMOTE_CACHE_FWD	(1 << 12)
1366 #define NHM_REMOTE_DRAM		(1 << 13)
1367 #define NHM_LOCAL_DRAM		(1 << 14)
1368 #define NHM_NON_DRAM		(1 << 15)
1369 
1370 #define NHM_LOCAL		(NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1371 #define NHM_REMOTE		(NHM_REMOTE_DRAM)
1372 
1373 #define NHM_DMND_READ		(NHM_DMND_DATA_RD)
1374 #define NHM_DMND_WRITE		(NHM_DMND_RFO|NHM_DMND_WB)
1375 #define NHM_DMND_PREFETCH	(NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1376 
1377 #define NHM_L3_HIT	(NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1378 #define NHM_L3_MISS	(NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1379 #define NHM_L3_ACCESS	(NHM_L3_HIT|NHM_L3_MISS)
1380 
1381 static __initconst const u64 nehalem_hw_cache_extra_regs
1382 				[PERF_COUNT_HW_CACHE_MAX]
1383 				[PERF_COUNT_HW_CACHE_OP_MAX]
1384 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1385 {
1386  [ C(LL  ) ] = {
1387 	[ C(OP_READ) ] = {
1388 		[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1389 		[ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
1390 	},
1391 	[ C(OP_WRITE) ] = {
1392 		[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1393 		[ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
1394 	},
1395 	[ C(OP_PREFETCH) ] = {
1396 		[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1397 		[ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1398 	},
1399  },
1400  [ C(NODE) ] = {
1401 	[ C(OP_READ) ] = {
1402 		[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1403 		[ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
1404 	},
1405 	[ C(OP_WRITE) ] = {
1406 		[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1407 		[ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
1408 	},
1409 	[ C(OP_PREFETCH) ] = {
1410 		[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1411 		[ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1412 	},
1413  },
1414 };
1415 
1416 static __initconst const u64 nehalem_hw_cache_event_ids
1417 				[PERF_COUNT_HW_CACHE_MAX]
1418 				[PERF_COUNT_HW_CACHE_OP_MAX]
1419 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1420 {
1421  [ C(L1D) ] = {
1422 	[ C(OP_READ) ] = {
1423 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1424 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1425 	},
1426 	[ C(OP_WRITE) ] = {
1427 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1428 		[ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1429 	},
1430 	[ C(OP_PREFETCH) ] = {
1431 		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1432 		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1433 	},
1434  },
1435  [ C(L1I ) ] = {
1436 	[ C(OP_READ) ] = {
1437 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1438 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1439 	},
1440 	[ C(OP_WRITE) ] = {
1441 		[ C(RESULT_ACCESS) ] = -1,
1442 		[ C(RESULT_MISS)   ] = -1,
1443 	},
1444 	[ C(OP_PREFETCH) ] = {
1445 		[ C(RESULT_ACCESS) ] = 0x0,
1446 		[ C(RESULT_MISS)   ] = 0x0,
1447 	},
1448  },
1449  [ C(LL  ) ] = {
1450 	[ C(OP_READ) ] = {
1451 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1452 		[ C(RESULT_ACCESS) ] = 0x01b7,
1453 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1454 		[ C(RESULT_MISS)   ] = 0x01b7,
1455 	},
1456 	/*
1457 	 * Use RFO, not WRITEBACK, because a write miss would typically occur
1458 	 * on RFO.
1459 	 */
1460 	[ C(OP_WRITE) ] = {
1461 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1462 		[ C(RESULT_ACCESS) ] = 0x01b7,
1463 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1464 		[ C(RESULT_MISS)   ] = 0x01b7,
1465 	},
1466 	[ C(OP_PREFETCH) ] = {
1467 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1468 		[ C(RESULT_ACCESS) ] = 0x01b7,
1469 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1470 		[ C(RESULT_MISS)   ] = 0x01b7,
1471 	},
1472  },
1473  [ C(DTLB) ] = {
1474 	[ C(OP_READ) ] = {
1475 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
1476 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1477 	},
1478 	[ C(OP_WRITE) ] = {
1479 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
1480 		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1481 	},
1482 	[ C(OP_PREFETCH) ] = {
1483 		[ C(RESULT_ACCESS) ] = 0x0,
1484 		[ C(RESULT_MISS)   ] = 0x0,
1485 	},
1486  },
1487  [ C(ITLB) ] = {
1488 	[ C(OP_READ) ] = {
1489 		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1490 		[ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
1491 	},
1492 	[ C(OP_WRITE) ] = {
1493 		[ C(RESULT_ACCESS) ] = -1,
1494 		[ C(RESULT_MISS)   ] = -1,
1495 	},
1496 	[ C(OP_PREFETCH) ] = {
1497 		[ C(RESULT_ACCESS) ] = -1,
1498 		[ C(RESULT_MISS)   ] = -1,
1499 	},
1500  },
1501  [ C(BPU ) ] = {
1502 	[ C(OP_READ) ] = {
1503 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1504 		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1505 	},
1506 	[ C(OP_WRITE) ] = {
1507 		[ C(RESULT_ACCESS) ] = -1,
1508 		[ C(RESULT_MISS)   ] = -1,
1509 	},
1510 	[ C(OP_PREFETCH) ] = {
1511 		[ C(RESULT_ACCESS) ] = -1,
1512 		[ C(RESULT_MISS)   ] = -1,
1513 	},
1514  },
1515  [ C(NODE) ] = {
1516 	[ C(OP_READ) ] = {
1517 		[ C(RESULT_ACCESS) ] = 0x01b7,
1518 		[ C(RESULT_MISS)   ] = 0x01b7,
1519 	},
1520 	[ C(OP_WRITE) ] = {
1521 		[ C(RESULT_ACCESS) ] = 0x01b7,
1522 		[ C(RESULT_MISS)   ] = 0x01b7,
1523 	},
1524 	[ C(OP_PREFETCH) ] = {
1525 		[ C(RESULT_ACCESS) ] = 0x01b7,
1526 		[ C(RESULT_MISS)   ] = 0x01b7,
1527 	},
1528  },
1529 };
1530 
1531 static __initconst const u64 core2_hw_cache_event_ids
1532 				[PERF_COUNT_HW_CACHE_MAX]
1533 				[PERF_COUNT_HW_CACHE_OP_MAX]
1534 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1535 {
1536  [ C(L1D) ] = {
1537 	[ C(OP_READ) ] = {
1538 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
1539 		[ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
1540 	},
1541 	[ C(OP_WRITE) ] = {
1542 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
1543 		[ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
1544 	},
1545 	[ C(OP_PREFETCH) ] = {
1546 		[ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
1547 		[ C(RESULT_MISS)   ] = 0,
1548 	},
1549  },
1550  [ C(L1I ) ] = {
1551 	[ C(OP_READ) ] = {
1552 		[ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
1553 		[ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
1554 	},
1555 	[ C(OP_WRITE) ] = {
1556 		[ C(RESULT_ACCESS) ] = -1,
1557 		[ C(RESULT_MISS)   ] = -1,
1558 	},
1559 	[ C(OP_PREFETCH) ] = {
1560 		[ C(RESULT_ACCESS) ] = 0,
1561 		[ C(RESULT_MISS)   ] = 0,
1562 	},
1563  },
1564  [ C(LL  ) ] = {
1565 	[ C(OP_READ) ] = {
1566 		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1567 		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1568 	},
1569 	[ C(OP_WRITE) ] = {
1570 		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1571 		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1572 	},
1573 	[ C(OP_PREFETCH) ] = {
1574 		[ C(RESULT_ACCESS) ] = 0,
1575 		[ C(RESULT_MISS)   ] = 0,
1576 	},
1577  },
1578  [ C(DTLB) ] = {
1579 	[ C(OP_READ) ] = {
1580 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
1581 		[ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
1582 	},
1583 	[ C(OP_WRITE) ] = {
1584 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
1585 		[ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
1586 	},
1587 	[ C(OP_PREFETCH) ] = {
1588 		[ C(RESULT_ACCESS) ] = 0,
1589 		[ C(RESULT_MISS)   ] = 0,
1590 	},
1591  },
1592  [ C(ITLB) ] = {
1593 	[ C(OP_READ) ] = {
1594 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1595 		[ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
1596 	},
1597 	[ C(OP_WRITE) ] = {
1598 		[ C(RESULT_ACCESS) ] = -1,
1599 		[ C(RESULT_MISS)   ] = -1,
1600 	},
1601 	[ C(OP_PREFETCH) ] = {
1602 		[ C(RESULT_ACCESS) ] = -1,
1603 		[ C(RESULT_MISS)   ] = -1,
1604 	},
1605  },
1606  [ C(BPU ) ] = {
1607 	[ C(OP_READ) ] = {
1608 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1609 		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1610 	},
1611 	[ C(OP_WRITE) ] = {
1612 		[ C(RESULT_ACCESS) ] = -1,
1613 		[ C(RESULT_MISS)   ] = -1,
1614 	},
1615 	[ C(OP_PREFETCH) ] = {
1616 		[ C(RESULT_ACCESS) ] = -1,
1617 		[ C(RESULT_MISS)   ] = -1,
1618 	},
1619  },
1620 };
1621 
1622 static __initconst const u64 atom_hw_cache_event_ids
1623 				[PERF_COUNT_HW_CACHE_MAX]
1624 				[PERF_COUNT_HW_CACHE_OP_MAX]
1625 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1626 {
1627  [ C(L1D) ] = {
1628 	[ C(OP_READ) ] = {
1629 		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
1630 		[ C(RESULT_MISS)   ] = 0,
1631 	},
1632 	[ C(OP_WRITE) ] = {
1633 		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
1634 		[ C(RESULT_MISS)   ] = 0,
1635 	},
1636 	[ C(OP_PREFETCH) ] = {
1637 		[ C(RESULT_ACCESS) ] = 0x0,
1638 		[ C(RESULT_MISS)   ] = 0,
1639 	},
1640  },
1641  [ C(L1I ) ] = {
1642 	[ C(OP_READ) ] = {
1643 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
1644 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
1645 	},
1646 	[ C(OP_WRITE) ] = {
1647 		[ C(RESULT_ACCESS) ] = -1,
1648 		[ C(RESULT_MISS)   ] = -1,
1649 	},
1650 	[ C(OP_PREFETCH) ] = {
1651 		[ C(RESULT_ACCESS) ] = 0,
1652 		[ C(RESULT_MISS)   ] = 0,
1653 	},
1654  },
1655  [ C(LL  ) ] = {
1656 	[ C(OP_READ) ] = {
1657 		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1658 		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1659 	},
1660 	[ C(OP_WRITE) ] = {
1661 		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1662 		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1663 	},
1664 	[ C(OP_PREFETCH) ] = {
1665 		[ C(RESULT_ACCESS) ] = 0,
1666 		[ C(RESULT_MISS)   ] = 0,
1667 	},
1668  },
1669  [ C(DTLB) ] = {
1670 	[ C(OP_READ) ] = {
1671 		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
1672 		[ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
1673 	},
1674 	[ C(OP_WRITE) ] = {
1675 		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
1676 		[ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
1677 	},
1678 	[ C(OP_PREFETCH) ] = {
1679 		[ C(RESULT_ACCESS) ] = 0,
1680 		[ C(RESULT_MISS)   ] = 0,
1681 	},
1682  },
1683  [ C(ITLB) ] = {
1684 	[ C(OP_READ) ] = {
1685 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1686 		[ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
1687 	},
1688 	[ C(OP_WRITE) ] = {
1689 		[ C(RESULT_ACCESS) ] = -1,
1690 		[ C(RESULT_MISS)   ] = -1,
1691 	},
1692 	[ C(OP_PREFETCH) ] = {
1693 		[ C(RESULT_ACCESS) ] = -1,
1694 		[ C(RESULT_MISS)   ] = -1,
1695 	},
1696  },
1697  [ C(BPU ) ] = {
1698 	[ C(OP_READ) ] = {
1699 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1700 		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1701 	},
1702 	[ C(OP_WRITE) ] = {
1703 		[ C(RESULT_ACCESS) ] = -1,
1704 		[ C(RESULT_MISS)   ] = -1,
1705 	},
1706 	[ C(OP_PREFETCH) ] = {
1707 		[ C(RESULT_ACCESS) ] = -1,
1708 		[ C(RESULT_MISS)   ] = -1,
1709 	},
1710  },
1711 };
1712 
1713 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1714 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1715 /* no_alloc_cycles.not_delivered */
1716 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1717 	       "event=0xca,umask=0x50");
1718 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1719 /* uops_retired.all */
1720 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1721 	       "event=0xc2,umask=0x10");
1722 /* uops_retired.all */
1723 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1724 	       "event=0xc2,umask=0x10");
1725 
1726 static struct attribute *slm_events_attrs[] = {
1727 	EVENT_PTR(td_total_slots_slm),
1728 	EVENT_PTR(td_total_slots_scale_slm),
1729 	EVENT_PTR(td_fetch_bubbles_slm),
1730 	EVENT_PTR(td_fetch_bubbles_scale_slm),
1731 	EVENT_PTR(td_slots_issued_slm),
1732 	EVENT_PTR(td_slots_retired_slm),
1733 	NULL
1734 };
1735 
1736 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1737 {
1738 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1739 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1740 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1741 	EVENT_EXTRA_END
1742 };
1743 
1744 #define SLM_DMND_READ		SNB_DMND_DATA_RD
1745 #define SLM_DMND_WRITE		SNB_DMND_RFO
1746 #define SLM_DMND_PREFETCH	(SNB_PF_DATA_RD|SNB_PF_RFO)
1747 
1748 #define SLM_SNP_ANY		(SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1749 #define SLM_LLC_ACCESS		SNB_RESP_ANY
1750 #define SLM_LLC_MISS		(SLM_SNP_ANY|SNB_NON_DRAM)
1751 
1752 static __initconst const u64 slm_hw_cache_extra_regs
1753 				[PERF_COUNT_HW_CACHE_MAX]
1754 				[PERF_COUNT_HW_CACHE_OP_MAX]
1755 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1756 {
1757  [ C(LL  ) ] = {
1758 	[ C(OP_READ) ] = {
1759 		[ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1760 		[ C(RESULT_MISS)   ] = 0,
1761 	},
1762 	[ C(OP_WRITE) ] = {
1763 		[ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1764 		[ C(RESULT_MISS)   ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1765 	},
1766 	[ C(OP_PREFETCH) ] = {
1767 		[ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1768 		[ C(RESULT_MISS)   ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1769 	},
1770  },
1771 };
1772 
1773 static __initconst const u64 slm_hw_cache_event_ids
1774 				[PERF_COUNT_HW_CACHE_MAX]
1775 				[PERF_COUNT_HW_CACHE_OP_MAX]
1776 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
1777 {
1778  [ C(L1D) ] = {
1779 	[ C(OP_READ) ] = {
1780 		[ C(RESULT_ACCESS) ] = 0,
1781 		[ C(RESULT_MISS)   ] = 0x0104, /* LD_DCU_MISS */
1782 	},
1783 	[ C(OP_WRITE) ] = {
1784 		[ C(RESULT_ACCESS) ] = 0,
1785 		[ C(RESULT_MISS)   ] = 0,
1786 	},
1787 	[ C(OP_PREFETCH) ] = {
1788 		[ C(RESULT_ACCESS) ] = 0,
1789 		[ C(RESULT_MISS)   ] = 0,
1790 	},
1791  },
1792  [ C(L1I ) ] = {
1793 	[ C(OP_READ) ] = {
1794 		[ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1795 		[ C(RESULT_MISS)   ] = 0x0280, /* ICACGE.MISSES */
1796 	},
1797 	[ C(OP_WRITE) ] = {
1798 		[ C(RESULT_ACCESS) ] = -1,
1799 		[ C(RESULT_MISS)   ] = -1,
1800 	},
1801 	[ C(OP_PREFETCH) ] = {
1802 		[ C(RESULT_ACCESS) ] = 0,
1803 		[ C(RESULT_MISS)   ] = 0,
1804 	},
1805  },
1806  [ C(LL  ) ] = {
1807 	[ C(OP_READ) ] = {
1808 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1809 		[ C(RESULT_ACCESS) ] = 0x01b7,
1810 		[ C(RESULT_MISS)   ] = 0,
1811 	},
1812 	[ C(OP_WRITE) ] = {
1813 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1814 		[ C(RESULT_ACCESS) ] = 0x01b7,
1815 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1816 		[ C(RESULT_MISS)   ] = 0x01b7,
1817 	},
1818 	[ C(OP_PREFETCH) ] = {
1819 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1820 		[ C(RESULT_ACCESS) ] = 0x01b7,
1821 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1822 		[ C(RESULT_MISS)   ] = 0x01b7,
1823 	},
1824  },
1825  [ C(DTLB) ] = {
1826 	[ C(OP_READ) ] = {
1827 		[ C(RESULT_ACCESS) ] = 0,
1828 		[ C(RESULT_MISS)   ] = 0x0804, /* LD_DTLB_MISS */
1829 	},
1830 	[ C(OP_WRITE) ] = {
1831 		[ C(RESULT_ACCESS) ] = 0,
1832 		[ C(RESULT_MISS)   ] = 0,
1833 	},
1834 	[ C(OP_PREFETCH) ] = {
1835 		[ C(RESULT_ACCESS) ] = 0,
1836 		[ C(RESULT_MISS)   ] = 0,
1837 	},
1838  },
1839  [ C(ITLB) ] = {
1840 	[ C(OP_READ) ] = {
1841 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1842 		[ C(RESULT_MISS)   ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1843 	},
1844 	[ C(OP_WRITE) ] = {
1845 		[ C(RESULT_ACCESS) ] = -1,
1846 		[ C(RESULT_MISS)   ] = -1,
1847 	},
1848 	[ C(OP_PREFETCH) ] = {
1849 		[ C(RESULT_ACCESS) ] = -1,
1850 		[ C(RESULT_MISS)   ] = -1,
1851 	},
1852  },
1853  [ C(BPU ) ] = {
1854 	[ C(OP_READ) ] = {
1855 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1856 		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1857 	},
1858 	[ C(OP_WRITE) ] = {
1859 		[ C(RESULT_ACCESS) ] = -1,
1860 		[ C(RESULT_MISS)   ] = -1,
1861 	},
1862 	[ C(OP_PREFETCH) ] = {
1863 		[ C(RESULT_ACCESS) ] = -1,
1864 		[ C(RESULT_MISS)   ] = -1,
1865 	},
1866  },
1867 };
1868 
1869 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1870 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1871 /* UOPS_NOT_DELIVERED.ANY */
1872 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1873 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1874 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1875 /* UOPS_RETIRED.ANY */
1876 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1877 /* UOPS_ISSUED.ANY */
1878 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1879 
1880 static struct attribute *glm_events_attrs[] = {
1881 	EVENT_PTR(td_total_slots_glm),
1882 	EVENT_PTR(td_total_slots_scale_glm),
1883 	EVENT_PTR(td_fetch_bubbles_glm),
1884 	EVENT_PTR(td_recovery_bubbles_glm),
1885 	EVENT_PTR(td_slots_issued_glm),
1886 	EVENT_PTR(td_slots_retired_glm),
1887 	NULL
1888 };
1889 
1890 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1891 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1892 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1893 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1894 	EVENT_EXTRA_END
1895 };
1896 
1897 #define GLM_DEMAND_DATA_RD		BIT_ULL(0)
1898 #define GLM_DEMAND_RFO			BIT_ULL(1)
1899 #define GLM_ANY_RESPONSE		BIT_ULL(16)
1900 #define GLM_SNP_NONE_OR_MISS		BIT_ULL(33)
1901 #define GLM_DEMAND_READ			GLM_DEMAND_DATA_RD
1902 #define GLM_DEMAND_WRITE		GLM_DEMAND_RFO
1903 #define GLM_DEMAND_PREFETCH		(SNB_PF_DATA_RD|SNB_PF_RFO)
1904 #define GLM_LLC_ACCESS			GLM_ANY_RESPONSE
1905 #define GLM_SNP_ANY			(GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1906 #define GLM_LLC_MISS			(GLM_SNP_ANY|SNB_NON_DRAM)
1907 
1908 static __initconst const u64 glm_hw_cache_event_ids
1909 				[PERF_COUNT_HW_CACHE_MAX]
1910 				[PERF_COUNT_HW_CACHE_OP_MAX]
1911 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1912 	[C(L1D)] = {
1913 		[C(OP_READ)] = {
1914 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1915 			[C(RESULT_MISS)]	= 0x0,
1916 		},
1917 		[C(OP_WRITE)] = {
1918 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1919 			[C(RESULT_MISS)]	= 0x0,
1920 		},
1921 		[C(OP_PREFETCH)] = {
1922 			[C(RESULT_ACCESS)]	= 0x0,
1923 			[C(RESULT_MISS)]	= 0x0,
1924 		},
1925 	},
1926 	[C(L1I)] = {
1927 		[C(OP_READ)] = {
1928 			[C(RESULT_ACCESS)]	= 0x0380,	/* ICACHE.ACCESSES */
1929 			[C(RESULT_MISS)]	= 0x0280,	/* ICACHE.MISSES */
1930 		},
1931 		[C(OP_WRITE)] = {
1932 			[C(RESULT_ACCESS)]	= -1,
1933 			[C(RESULT_MISS)]	= -1,
1934 		},
1935 		[C(OP_PREFETCH)] = {
1936 			[C(RESULT_ACCESS)]	= 0x0,
1937 			[C(RESULT_MISS)]	= 0x0,
1938 		},
1939 	},
1940 	[C(LL)] = {
1941 		[C(OP_READ)] = {
1942 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1943 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1944 		},
1945 		[C(OP_WRITE)] = {
1946 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1947 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1948 		},
1949 		[C(OP_PREFETCH)] = {
1950 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1951 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
1952 		},
1953 	},
1954 	[C(DTLB)] = {
1955 		[C(OP_READ)] = {
1956 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
1957 			[C(RESULT_MISS)]	= 0x0,
1958 		},
1959 		[C(OP_WRITE)] = {
1960 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
1961 			[C(RESULT_MISS)]	= 0x0,
1962 		},
1963 		[C(OP_PREFETCH)] = {
1964 			[C(RESULT_ACCESS)]	= 0x0,
1965 			[C(RESULT_MISS)]	= 0x0,
1966 		},
1967 	},
1968 	[C(ITLB)] = {
1969 		[C(OP_READ)] = {
1970 			[C(RESULT_ACCESS)]	= 0x00c0,	/* INST_RETIRED.ANY_P */
1971 			[C(RESULT_MISS)]	= 0x0481,	/* ITLB.MISS */
1972 		},
1973 		[C(OP_WRITE)] = {
1974 			[C(RESULT_ACCESS)]	= -1,
1975 			[C(RESULT_MISS)]	= -1,
1976 		},
1977 		[C(OP_PREFETCH)] = {
1978 			[C(RESULT_ACCESS)]	= -1,
1979 			[C(RESULT_MISS)]	= -1,
1980 		},
1981 	},
1982 	[C(BPU)] = {
1983 		[C(OP_READ)] = {
1984 			[C(RESULT_ACCESS)]	= 0x00c4,	/* BR_INST_RETIRED.ALL_BRANCHES */
1985 			[C(RESULT_MISS)]	= 0x00c5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
1986 		},
1987 		[C(OP_WRITE)] = {
1988 			[C(RESULT_ACCESS)]	= -1,
1989 			[C(RESULT_MISS)]	= -1,
1990 		},
1991 		[C(OP_PREFETCH)] = {
1992 			[C(RESULT_ACCESS)]	= -1,
1993 			[C(RESULT_MISS)]	= -1,
1994 		},
1995 	},
1996 };
1997 
1998 static __initconst const u64 glm_hw_cache_extra_regs
1999 				[PERF_COUNT_HW_CACHE_MAX]
2000 				[PERF_COUNT_HW_CACHE_OP_MAX]
2001 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2002 	[C(LL)] = {
2003 		[C(OP_READ)] = {
2004 			[C(RESULT_ACCESS)]	= GLM_DEMAND_READ|
2005 						  GLM_LLC_ACCESS,
2006 			[C(RESULT_MISS)]	= GLM_DEMAND_READ|
2007 						  GLM_LLC_MISS,
2008 		},
2009 		[C(OP_WRITE)] = {
2010 			[C(RESULT_ACCESS)]	= GLM_DEMAND_WRITE|
2011 						  GLM_LLC_ACCESS,
2012 			[C(RESULT_MISS)]	= GLM_DEMAND_WRITE|
2013 						  GLM_LLC_MISS,
2014 		},
2015 		[C(OP_PREFETCH)] = {
2016 			[C(RESULT_ACCESS)]	= GLM_DEMAND_PREFETCH|
2017 						  GLM_LLC_ACCESS,
2018 			[C(RESULT_MISS)]	= GLM_DEMAND_PREFETCH|
2019 						  GLM_LLC_MISS,
2020 		},
2021 	},
2022 };
2023 
2024 static __initconst const u64 glp_hw_cache_event_ids
2025 				[PERF_COUNT_HW_CACHE_MAX]
2026 				[PERF_COUNT_HW_CACHE_OP_MAX]
2027 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2028 	[C(L1D)] = {
2029 		[C(OP_READ)] = {
2030 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
2031 			[C(RESULT_MISS)]	= 0x0,
2032 		},
2033 		[C(OP_WRITE)] = {
2034 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
2035 			[C(RESULT_MISS)]	= 0x0,
2036 		},
2037 		[C(OP_PREFETCH)] = {
2038 			[C(RESULT_ACCESS)]	= 0x0,
2039 			[C(RESULT_MISS)]	= 0x0,
2040 		},
2041 	},
2042 	[C(L1I)] = {
2043 		[C(OP_READ)] = {
2044 			[C(RESULT_ACCESS)]	= 0x0380,	/* ICACHE.ACCESSES */
2045 			[C(RESULT_MISS)]	= 0x0280,	/* ICACHE.MISSES */
2046 		},
2047 		[C(OP_WRITE)] = {
2048 			[C(RESULT_ACCESS)]	= -1,
2049 			[C(RESULT_MISS)]	= -1,
2050 		},
2051 		[C(OP_PREFETCH)] = {
2052 			[C(RESULT_ACCESS)]	= 0x0,
2053 			[C(RESULT_MISS)]	= 0x0,
2054 		},
2055 	},
2056 	[C(LL)] = {
2057 		[C(OP_READ)] = {
2058 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2059 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2060 		},
2061 		[C(OP_WRITE)] = {
2062 			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2063 			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
2064 		},
2065 		[C(OP_PREFETCH)] = {
2066 			[C(RESULT_ACCESS)]	= 0x0,
2067 			[C(RESULT_MISS)]	= 0x0,
2068 		},
2069 	},
2070 	[C(DTLB)] = {
2071 		[C(OP_READ)] = {
2072 			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
2073 			[C(RESULT_MISS)]	= 0xe08,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
2074 		},
2075 		[C(OP_WRITE)] = {
2076 			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
2077 			[C(RESULT_MISS)]	= 0xe49,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
2078 		},
2079 		[C(OP_PREFETCH)] = {
2080 			[C(RESULT_ACCESS)]	= 0x0,
2081 			[C(RESULT_MISS)]	= 0x0,
2082 		},
2083 	},
2084 	[C(ITLB)] = {
2085 		[C(OP_READ)] = {
2086 			[C(RESULT_ACCESS)]	= 0x00c0,	/* INST_RETIRED.ANY_P */
2087 			[C(RESULT_MISS)]	= 0x0481,	/* ITLB.MISS */
2088 		},
2089 		[C(OP_WRITE)] = {
2090 			[C(RESULT_ACCESS)]	= -1,
2091 			[C(RESULT_MISS)]	= -1,
2092 		},
2093 		[C(OP_PREFETCH)] = {
2094 			[C(RESULT_ACCESS)]	= -1,
2095 			[C(RESULT_MISS)]	= -1,
2096 		},
2097 	},
2098 	[C(BPU)] = {
2099 		[C(OP_READ)] = {
2100 			[C(RESULT_ACCESS)]	= 0x00c4,	/* BR_INST_RETIRED.ALL_BRANCHES */
2101 			[C(RESULT_MISS)]	= 0x00c5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
2102 		},
2103 		[C(OP_WRITE)] = {
2104 			[C(RESULT_ACCESS)]	= -1,
2105 			[C(RESULT_MISS)]	= -1,
2106 		},
2107 		[C(OP_PREFETCH)] = {
2108 			[C(RESULT_ACCESS)]	= -1,
2109 			[C(RESULT_MISS)]	= -1,
2110 		},
2111 	},
2112 };
2113 
2114 static __initconst const u64 glp_hw_cache_extra_regs
2115 				[PERF_COUNT_HW_CACHE_MAX]
2116 				[PERF_COUNT_HW_CACHE_OP_MAX]
2117 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2118 	[C(LL)] = {
2119 		[C(OP_READ)] = {
2120 			[C(RESULT_ACCESS)]	= GLM_DEMAND_READ|
2121 						  GLM_LLC_ACCESS,
2122 			[C(RESULT_MISS)]	= GLM_DEMAND_READ|
2123 						  GLM_LLC_MISS,
2124 		},
2125 		[C(OP_WRITE)] = {
2126 			[C(RESULT_ACCESS)]	= GLM_DEMAND_WRITE|
2127 						  GLM_LLC_ACCESS,
2128 			[C(RESULT_MISS)]	= GLM_DEMAND_WRITE|
2129 						  GLM_LLC_MISS,
2130 		},
2131 		[C(OP_PREFETCH)] = {
2132 			[C(RESULT_ACCESS)]	= 0x0,
2133 			[C(RESULT_MISS)]	= 0x0,
2134 		},
2135 	},
2136 };
2137 
2138 #define TNT_LOCAL_DRAM			BIT_ULL(26)
2139 #define TNT_DEMAND_READ			GLM_DEMAND_DATA_RD
2140 #define TNT_DEMAND_WRITE		GLM_DEMAND_RFO
2141 #define TNT_LLC_ACCESS			GLM_ANY_RESPONSE
2142 #define TNT_SNP_ANY			(SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2143 					 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2144 #define TNT_LLC_MISS			(TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2145 
2146 static __initconst const u64 tnt_hw_cache_extra_regs
2147 				[PERF_COUNT_HW_CACHE_MAX]
2148 				[PERF_COUNT_HW_CACHE_OP_MAX]
2149 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2150 	[C(LL)] = {
2151 		[C(OP_READ)] = {
2152 			[C(RESULT_ACCESS)]	= TNT_DEMAND_READ|
2153 						  TNT_LLC_ACCESS,
2154 			[C(RESULT_MISS)]	= TNT_DEMAND_READ|
2155 						  TNT_LLC_MISS,
2156 		},
2157 		[C(OP_WRITE)] = {
2158 			[C(RESULT_ACCESS)]	= TNT_DEMAND_WRITE|
2159 						  TNT_LLC_ACCESS,
2160 			[C(RESULT_MISS)]	= TNT_DEMAND_WRITE|
2161 						  TNT_LLC_MISS,
2162 		},
2163 		[C(OP_PREFETCH)] = {
2164 			[C(RESULT_ACCESS)]	= 0x0,
2165 			[C(RESULT_MISS)]	= 0x0,
2166 		},
2167 	},
2168 };
2169 
2170 EVENT_ATTR_STR(topdown-fe-bound,       td_fe_bound_tnt,        "event=0x71,umask=0x0");
2171 EVENT_ATTR_STR(topdown-retiring,       td_retiring_tnt,        "event=0xc2,umask=0x0");
2172 EVENT_ATTR_STR(topdown-bad-spec,       td_bad_spec_tnt,        "event=0x73,umask=0x6");
2173 EVENT_ATTR_STR(topdown-be-bound,       td_be_bound_tnt,        "event=0x74,umask=0x0");
2174 
2175 static struct attribute *tnt_events_attrs[] = {
2176 	EVENT_PTR(td_fe_bound_tnt),
2177 	EVENT_PTR(td_retiring_tnt),
2178 	EVENT_PTR(td_bad_spec_tnt),
2179 	EVENT_PTR(td_be_bound_tnt),
2180 	NULL,
2181 };
2182 
2183 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2184 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2185 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2186 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2187 	EVENT_EXTRA_END
2188 };
2189 
2190 EVENT_ATTR_STR(mem-loads,	mem_ld_grt,	"event=0xd0,umask=0x5,ldlat=3");
2191 EVENT_ATTR_STR(mem-stores,	mem_st_grt,	"event=0xd0,umask=0x6");
2192 
2193 static struct attribute *grt_mem_attrs[] = {
2194 	EVENT_PTR(mem_ld_grt),
2195 	EVENT_PTR(mem_st_grt),
2196 	NULL
2197 };
2198 
2199 static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2200 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2201 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2202 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2203 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2204 	EVENT_EXTRA_END
2205 };
2206 
2207 EVENT_ATTR_STR(topdown-retiring,       td_retiring_cmt,        "event=0x72,umask=0x0");
2208 EVENT_ATTR_STR(topdown-bad-spec,       td_bad_spec_cmt,        "event=0x73,umask=0x0");
2209 
2210 static struct attribute *cmt_events_attrs[] = {
2211 	EVENT_PTR(td_fe_bound_tnt),
2212 	EVENT_PTR(td_retiring_cmt),
2213 	EVENT_PTR(td_bad_spec_cmt),
2214 	EVENT_PTR(td_be_bound_tnt),
2215 	NULL
2216 };
2217 
2218 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
2219 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2220 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0),
2221 	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1),
2222 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2223 	INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
2224 	INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
2225 	EVENT_EXTRA_END
2226 };
2227 
2228 EVENT_ATTR_STR(topdown-fe-bound,       td_fe_bound_skt,        "event=0x9c,umask=0x01");
2229 EVENT_ATTR_STR(topdown-retiring,       td_retiring_skt,        "event=0xc2,umask=0x02");
2230 EVENT_ATTR_STR(topdown-be-bound,       td_be_bound_skt,        "event=0xa4,umask=0x02");
2231 
2232 static struct attribute *skt_events_attrs[] = {
2233 	EVENT_PTR(td_fe_bound_skt),
2234 	EVENT_PTR(td_retiring_skt),
2235 	EVENT_PTR(td_bad_spec_cmt),
2236 	EVENT_PTR(td_be_bound_skt),
2237 	NULL,
2238 };
2239 
2240 #define KNL_OT_L2_HITE		BIT_ULL(19) /* Other Tile L2 Hit */
2241 #define KNL_OT_L2_HITF		BIT_ULL(20) /* Other Tile L2 Hit */
2242 #define KNL_MCDRAM_LOCAL	BIT_ULL(21)
2243 #define KNL_MCDRAM_FAR		BIT_ULL(22)
2244 #define KNL_DDR_LOCAL		BIT_ULL(23)
2245 #define KNL_DDR_FAR		BIT_ULL(24)
2246 #define KNL_DRAM_ANY		(KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2247 				    KNL_DDR_LOCAL | KNL_DDR_FAR)
2248 #define KNL_L2_READ		SLM_DMND_READ
2249 #define KNL_L2_WRITE		SLM_DMND_WRITE
2250 #define KNL_L2_PREFETCH		SLM_DMND_PREFETCH
2251 #define KNL_L2_ACCESS		SLM_LLC_ACCESS
2252 #define KNL_L2_MISS		(KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2253 				   KNL_DRAM_ANY | SNB_SNP_ANY | \
2254 						  SNB_NON_DRAM)
2255 
2256 static __initconst const u64 knl_hw_cache_extra_regs
2257 				[PERF_COUNT_HW_CACHE_MAX]
2258 				[PERF_COUNT_HW_CACHE_OP_MAX]
2259 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2260 	[C(LL)] = {
2261 		[C(OP_READ)] = {
2262 			[C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2263 			[C(RESULT_MISS)]   = 0,
2264 		},
2265 		[C(OP_WRITE)] = {
2266 			[C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2267 			[C(RESULT_MISS)]   = KNL_L2_WRITE | KNL_L2_MISS,
2268 		},
2269 		[C(OP_PREFETCH)] = {
2270 			[C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2271 			[C(RESULT_MISS)]   = KNL_L2_PREFETCH | KNL_L2_MISS,
2272 		},
2273 	},
2274 };
2275 
2276 /*
2277  * Used from PMIs where the LBRs are already disabled.
2278  *
2279  * This function could be called consecutively. It is required to remain in
2280  * disabled state if called consecutively.
2281  *
2282  * During consecutive calls, the same disable value will be written to related
2283  * registers, so the PMU state remains unchanged.
2284  *
2285  * intel_bts events don't coexist with intel PMU's BTS events because of
2286  * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2287  * disabled around intel PMU's event batching etc, only inside the PMI handler.
2288  *
2289  * Avoid PEBS_ENABLE MSR access in PMIs.
2290  * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2291  * It doesn't matter if the PEBS is enabled or not.
2292  * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2293  * access PEBS_ENABLE MSR in disable_all()/enable_all().
2294  * However, there are some cases which may change PEBS status, e.g. PMI
2295  * throttle. The PEBS_ENABLE should be updated where the status changes.
2296  */
__intel_pmu_disable_all(bool bts)2297 static __always_inline void __intel_pmu_disable_all(bool bts)
2298 {
2299 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2300 
2301 	wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2302 
2303 	if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2304 		intel_pmu_disable_bts();
2305 }
2306 
intel_pmu_disable_all(void)2307 static __always_inline void intel_pmu_disable_all(void)
2308 {
2309 	__intel_pmu_disable_all(true);
2310 	static_call_cond(x86_pmu_pebs_disable_all)();
2311 	intel_pmu_lbr_disable_all();
2312 }
2313 
__intel_pmu_enable_all(int added,bool pmi)2314 static void __intel_pmu_enable_all(int added, bool pmi)
2315 {
2316 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2317 	u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2318 
2319 	intel_pmu_lbr_enable_all(pmi);
2320 
2321 	if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) {
2322 		wrmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val);
2323 		cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val;
2324 	}
2325 
2326 	wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL,
2327 	       intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2328 
2329 	if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2330 		struct perf_event *event =
2331 			cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2332 
2333 		if (WARN_ON_ONCE(!event))
2334 			return;
2335 
2336 		intel_pmu_enable_bts(event->hw.config);
2337 	}
2338 }
2339 
intel_pmu_enable_all(int added)2340 static void intel_pmu_enable_all(int added)
2341 {
2342 	static_call_cond(x86_pmu_pebs_enable_all)();
2343 	__intel_pmu_enable_all(added, false);
2344 }
2345 
2346 static noinline int
__intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt,unsigned long flags)2347 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
2348 				  unsigned int cnt, unsigned long flags)
2349 {
2350 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2351 
2352 	intel_pmu_lbr_read();
2353 	cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
2354 
2355 	memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
2356 	intel_pmu_enable_all(0);
2357 	local_irq_restore(flags);
2358 	return cnt;
2359 }
2360 
2361 static int
intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2362 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2363 {
2364 	unsigned long flags;
2365 
2366 	/* must not have branches... */
2367 	local_irq_save(flags);
2368 	__intel_pmu_disable_all(false); /* we don't care about BTS */
2369 	__intel_pmu_lbr_disable();
2370 	/*            ... until here */
2371 	return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2372 }
2373 
2374 static int
intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2375 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2376 {
2377 	unsigned long flags;
2378 
2379 	/* must not have branches... */
2380 	local_irq_save(flags);
2381 	__intel_pmu_disable_all(false); /* we don't care about BTS */
2382 	__intel_pmu_arch_lbr_disable();
2383 	/*            ... until here */
2384 	return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2385 }
2386 
2387 /*
2388  * Workaround for:
2389  *   Intel Errata AAK100 (model 26)
2390  *   Intel Errata AAP53  (model 30)
2391  *   Intel Errata BD53   (model 44)
2392  *
2393  * The official story:
2394  *   These chips need to be 'reset' when adding counters by programming the
2395  *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2396  *   in sequence on the same PMC or on different PMCs.
2397  *
2398  * In practice it appears some of these events do in fact count, and
2399  * we need to program all 4 events.
2400  */
intel_pmu_nhm_workaround(void)2401 static void intel_pmu_nhm_workaround(void)
2402 {
2403 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2404 	static const unsigned long nhm_magic[4] = {
2405 		0x4300B5,
2406 		0x4300D2,
2407 		0x4300B1,
2408 		0x4300B1
2409 	};
2410 	struct perf_event *event;
2411 	int i;
2412 
2413 	/*
2414 	 * The Errata requires below steps:
2415 	 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2416 	 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2417 	 *    the corresponding PMCx;
2418 	 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2419 	 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2420 	 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2421 	 */
2422 
2423 	/*
2424 	 * The real steps we choose are a little different from above.
2425 	 * A) To reduce MSR operations, we don't run step 1) as they
2426 	 *    are already cleared before this function is called;
2427 	 * B) Call x86_perf_event_update to save PMCx before configuring
2428 	 *    PERFEVTSELx with magic number;
2429 	 * C) With step 5), we do clear only when the PERFEVTSELx is
2430 	 *    not used currently.
2431 	 * D) Call x86_perf_event_set_period to restore PMCx;
2432 	 */
2433 
2434 	/* We always operate 4 pairs of PERF Counters */
2435 	for (i = 0; i < 4; i++) {
2436 		event = cpuc->events[i];
2437 		if (event)
2438 			static_call(x86_pmu_update)(event);
2439 	}
2440 
2441 	for (i = 0; i < 4; i++) {
2442 		wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2443 		wrmsrq(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2444 	}
2445 
2446 	wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2447 	wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2448 
2449 	for (i = 0; i < 4; i++) {
2450 		event = cpuc->events[i];
2451 
2452 		if (event) {
2453 			static_call(x86_pmu_set_period)(event);
2454 			__x86_pmu_enable_event(&event->hw,
2455 					ARCH_PERFMON_EVENTSEL_ENABLE);
2456 		} else
2457 			wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2458 	}
2459 }
2460 
intel_pmu_nhm_enable_all(int added)2461 static void intel_pmu_nhm_enable_all(int added)
2462 {
2463 	if (added)
2464 		intel_pmu_nhm_workaround();
2465 	intel_pmu_enable_all(added);
2466 }
2467 
intel_set_tfa(struct cpu_hw_events * cpuc,bool on)2468 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2469 {
2470 	u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2471 
2472 	if (cpuc->tfa_shadow != val) {
2473 		cpuc->tfa_shadow = val;
2474 		wrmsrq(MSR_TSX_FORCE_ABORT, val);
2475 	}
2476 }
2477 
intel_tfa_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)2478 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2479 {
2480 	/*
2481 	 * We're going to use PMC3, make sure TFA is set before we touch it.
2482 	 */
2483 	if (cntr == 3)
2484 		intel_set_tfa(cpuc, true);
2485 }
2486 
intel_tfa_pmu_enable_all(int added)2487 static void intel_tfa_pmu_enable_all(int added)
2488 {
2489 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2490 
2491 	/*
2492 	 * If we find PMC3 is no longer used when we enable the PMU, we can
2493 	 * clear TFA.
2494 	 */
2495 	if (!test_bit(3, cpuc->active_mask))
2496 		intel_set_tfa(cpuc, false);
2497 
2498 	intel_pmu_enable_all(added);
2499 }
2500 
intel_pmu_get_status(void)2501 static inline u64 intel_pmu_get_status(void)
2502 {
2503 	u64 status;
2504 
2505 	rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status);
2506 
2507 	return status;
2508 }
2509 
intel_pmu_ack_status(u64 ack)2510 static inline void intel_pmu_ack_status(u64 ack)
2511 {
2512 	wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2513 }
2514 
event_is_checkpointed(struct perf_event * event)2515 static inline bool event_is_checkpointed(struct perf_event *event)
2516 {
2517 	return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2518 }
2519 
intel_set_masks(struct perf_event * event,int idx)2520 static inline void intel_set_masks(struct perf_event *event, int idx)
2521 {
2522 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2523 
2524 	if (event->attr.exclude_host)
2525 		__set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2526 	if (event->attr.exclude_guest)
2527 		__set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2528 	if (event_is_checkpointed(event))
2529 		__set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2530 }
2531 
intel_clear_masks(struct perf_event * event,int idx)2532 static inline void intel_clear_masks(struct perf_event *event, int idx)
2533 {
2534 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2535 
2536 	__clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2537 	__clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2538 	__clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2539 }
2540 
intel_pmu_disable_fixed(struct perf_event * event)2541 static void intel_pmu_disable_fixed(struct perf_event *event)
2542 {
2543 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2544 	struct hw_perf_event *hwc = &event->hw;
2545 	int idx = hwc->idx;
2546 	u64 mask;
2547 
2548 	if (is_topdown_idx(idx)) {
2549 		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2550 
2551 		/*
2552 		 * When there are other active TopDown events,
2553 		 * don't disable the fixed counter 3.
2554 		 */
2555 		if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2556 			return;
2557 		idx = INTEL_PMC_IDX_FIXED_SLOTS;
2558 	}
2559 
2560 	intel_clear_masks(event, idx);
2561 
2562 	mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
2563 	cpuc->fixed_ctrl_val &= ~mask;
2564 }
2565 
intel_pmu_disable_event(struct perf_event * event)2566 static void intel_pmu_disable_event(struct perf_event *event)
2567 {
2568 	struct hw_perf_event *hwc = &event->hw;
2569 	int idx = hwc->idx;
2570 
2571 	switch (idx) {
2572 	case 0 ... INTEL_PMC_IDX_FIXED - 1:
2573 		intel_clear_masks(event, idx);
2574 		x86_pmu_disable_event(event);
2575 		break;
2576 	case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2577 	case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2578 		intel_pmu_disable_fixed(event);
2579 		break;
2580 	case INTEL_PMC_IDX_FIXED_BTS:
2581 		intel_pmu_disable_bts();
2582 		intel_pmu_drain_bts_buffer();
2583 		return;
2584 	case INTEL_PMC_IDX_FIXED_VLBR:
2585 		intel_clear_masks(event, idx);
2586 		break;
2587 	default:
2588 		intel_clear_masks(event, idx);
2589 		pr_warn("Failed to disable the event with invalid index %d\n",
2590 			idx);
2591 		return;
2592 	}
2593 
2594 	/*
2595 	 * Needs to be called after x86_pmu_disable_event,
2596 	 * so we don't trigger the event without PEBS bit set.
2597 	 */
2598 	if (unlikely(event->attr.precise_ip))
2599 		static_call(x86_pmu_pebs_disable)(event);
2600 }
2601 
intel_pmu_assign_event(struct perf_event * event,int idx)2602 static void intel_pmu_assign_event(struct perf_event *event, int idx)
2603 {
2604 	if (is_pebs_pt(event))
2605 		perf_report_aux_output_id(event, idx);
2606 }
2607 
intel_pmu_needs_branch_stack(struct perf_event * event)2608 static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event)
2609 {
2610 	return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK;
2611 }
2612 
intel_pmu_del_event(struct perf_event * event)2613 static void intel_pmu_del_event(struct perf_event *event)
2614 {
2615 	if (intel_pmu_needs_branch_stack(event))
2616 		intel_pmu_lbr_del(event);
2617 	if (event->attr.precise_ip)
2618 		intel_pmu_pebs_del(event);
2619 	if (is_pebs_counter_event_group(event) ||
2620 	    is_acr_event_group(event))
2621 		this_cpu_ptr(&cpu_hw_events)->n_late_setup--;
2622 }
2623 
icl_set_topdown_event_period(struct perf_event * event)2624 static int icl_set_topdown_event_period(struct perf_event *event)
2625 {
2626 	struct hw_perf_event *hwc = &event->hw;
2627 	s64 left = local64_read(&hwc->period_left);
2628 
2629 	/*
2630 	 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2631 	 * Software should start both registers, PERF_METRICS and fixed
2632 	 * counter 3, from zero.
2633 	 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2634 	 * After that, both MSRs will be cleared for each read.
2635 	 * Don't need to clear them again.
2636 	 */
2637 	if (left == x86_pmu.max_period) {
2638 		wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0);
2639 		wrmsrq(MSR_PERF_METRICS, 0);
2640 		hwc->saved_slots = 0;
2641 		hwc->saved_metric = 0;
2642 	}
2643 
2644 	if ((hwc->saved_slots) && is_slots_event(event)) {
2645 		wrmsrq(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2646 		wrmsrq(MSR_PERF_METRICS, hwc->saved_metric);
2647 	}
2648 
2649 	perf_event_update_userpage(event);
2650 
2651 	return 0;
2652 }
2653 
2654 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period);
2655 
icl_get_metrics_event_value(u64 metric,u64 slots,int idx)2656 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2657 {
2658 	u32 val;
2659 
2660 	/*
2661 	 * The metric is reported as an 8bit integer fraction
2662 	 * summing up to 0xff.
2663 	 * slots-in-metric = (Metric / 0xff) * slots
2664 	 */
2665 	val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2666 	return  mul_u64_u32_div(slots, val, 0xff);
2667 }
2668 
icl_get_topdown_value(struct perf_event * event,u64 slots,u64 metrics)2669 static u64 icl_get_topdown_value(struct perf_event *event,
2670 				       u64 slots, u64 metrics)
2671 {
2672 	int idx = event->hw.idx;
2673 	u64 delta;
2674 
2675 	if (is_metric_idx(idx))
2676 		delta = icl_get_metrics_event_value(metrics, slots, idx);
2677 	else
2678 		delta = slots;
2679 
2680 	return delta;
2681 }
2682 
__icl_update_topdown_event(struct perf_event * event,u64 slots,u64 metrics,u64 last_slots,u64 last_metrics)2683 static void __icl_update_topdown_event(struct perf_event *event,
2684 				       u64 slots, u64 metrics,
2685 				       u64 last_slots, u64 last_metrics)
2686 {
2687 	u64 delta, last = 0;
2688 
2689 	delta = icl_get_topdown_value(event, slots, metrics);
2690 	if (last_slots)
2691 		last = icl_get_topdown_value(event, last_slots, last_metrics);
2692 
2693 	/*
2694 	 * The 8bit integer fraction of metric may be not accurate,
2695 	 * especially when the changes is very small.
2696 	 * For example, if only a few bad_spec happens, the fraction
2697 	 * may be reduced from 1 to 0. If so, the bad_spec event value
2698 	 * will be 0 which is definitely less than the last value.
2699 	 * Avoid update event->count for this case.
2700 	 */
2701 	if (delta > last) {
2702 		delta -= last;
2703 		local64_add(delta, &event->count);
2704 	}
2705 }
2706 
update_saved_topdown_regs(struct perf_event * event,u64 slots,u64 metrics,int metric_end)2707 static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2708 				      u64 metrics, int metric_end)
2709 {
2710 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2711 	struct perf_event *other;
2712 	int idx;
2713 
2714 	event->hw.saved_slots = slots;
2715 	event->hw.saved_metric = metrics;
2716 
2717 	for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2718 		if (!is_topdown_idx(idx))
2719 			continue;
2720 		other = cpuc->events[idx];
2721 		other->hw.saved_slots = slots;
2722 		other->hw.saved_metric = metrics;
2723 	}
2724 }
2725 
2726 /*
2727  * Update all active Topdown events.
2728  *
2729  * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2730  * modify by a NMI. PMU has to be disabled before calling this function.
2731  */
2732 
intel_update_topdown_event(struct perf_event * event,int metric_end,u64 * val)2733 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end, u64 *val)
2734 {
2735 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2736 	struct perf_event *other;
2737 	u64 slots, metrics;
2738 	bool reset = true;
2739 	int idx;
2740 
2741 	if (!val) {
2742 		/* read Fixed counter 3 */
2743 		slots = rdpmc(3 | INTEL_PMC_FIXED_RDPMC_BASE);
2744 		if (!slots)
2745 			return 0;
2746 
2747 		/* read PERF_METRICS */
2748 		metrics = rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS);
2749 	} else {
2750 		slots = val[0];
2751 		metrics = val[1];
2752 		/*
2753 		 * Don't reset the PERF_METRICS and Fixed counter 3
2754 		 * for each PEBS record read. Utilize the RDPMC metrics
2755 		 * clear mode.
2756 		 */
2757 		reset = false;
2758 	}
2759 
2760 	for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2761 		if (!is_topdown_idx(idx))
2762 			continue;
2763 		other = cpuc->events[idx];
2764 		__icl_update_topdown_event(other, slots, metrics,
2765 					   event ? event->hw.saved_slots : 0,
2766 					   event ? event->hw.saved_metric : 0);
2767 	}
2768 
2769 	/*
2770 	 * Check and update this event, which may have been cleared
2771 	 * in active_mask e.g. x86_pmu_stop()
2772 	 */
2773 	if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2774 		__icl_update_topdown_event(event, slots, metrics,
2775 					   event->hw.saved_slots,
2776 					   event->hw.saved_metric);
2777 
2778 		/*
2779 		 * In x86_pmu_stop(), the event is cleared in active_mask first,
2780 		 * then drain the delta, which indicates context switch for
2781 		 * counting.
2782 		 * Save metric and slots for context switch.
2783 		 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2784 		 * Because the values will be restored in next schedule in.
2785 		 */
2786 		update_saved_topdown_regs(event, slots, metrics, metric_end);
2787 		reset = false;
2788 	}
2789 
2790 	if (reset) {
2791 		/* The fixed counter 3 has to be written before the PERF_METRICS. */
2792 		wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0);
2793 		wrmsrq(MSR_PERF_METRICS, 0);
2794 		if (event)
2795 			update_saved_topdown_regs(event, 0, 0, metric_end);
2796 	}
2797 
2798 	return slots;
2799 }
2800 
icl_update_topdown_event(struct perf_event * event,u64 * val)2801 static u64 icl_update_topdown_event(struct perf_event *event, u64 *val)
2802 {
2803 	return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2804 						 x86_pmu.num_topdown_events - 1,
2805 					  val);
2806 }
2807 
2808 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, intel_pmu_topdown_event_update);
2809 
intel_pmu_read_event(struct perf_event * event)2810 static void intel_pmu_read_event(struct perf_event *event)
2811 {
2812 	if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN) ||
2813 	    is_pebs_counter_event_group(event)) {
2814 		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2815 		bool pmu_enabled = cpuc->enabled;
2816 
2817 		/* Only need to call update_topdown_event() once for group read. */
2818 		if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
2819 			return;
2820 
2821 		cpuc->enabled = 0;
2822 		if (pmu_enabled)
2823 			intel_pmu_disable_all();
2824 
2825 		/*
2826 		 * If the PEBS counters snapshotting is enabled,
2827 		 * the topdown event is available in PEBS records.
2828 		 */
2829 		if (is_topdown_count(event) && !is_pebs_counter_event_group(event))
2830 			static_call(intel_pmu_update_topdown_event)(event, NULL);
2831 		else
2832 			intel_pmu_drain_pebs_buffer();
2833 
2834 		cpuc->enabled = pmu_enabled;
2835 		if (pmu_enabled)
2836 			intel_pmu_enable_all(0);
2837 
2838 		return;
2839 	}
2840 
2841 	x86_perf_event_update(event);
2842 }
2843 
intel_pmu_enable_fixed(struct perf_event * event)2844 static void intel_pmu_enable_fixed(struct perf_event *event)
2845 {
2846 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2847 	struct hw_perf_event *hwc = &event->hw;
2848 	u64 mask, bits = 0;
2849 	int idx = hwc->idx;
2850 
2851 	if (is_topdown_idx(idx)) {
2852 		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2853 		/*
2854 		 * When there are other active TopDown events,
2855 		 * don't enable the fixed counter 3 again.
2856 		 */
2857 		if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2858 			return;
2859 
2860 		idx = INTEL_PMC_IDX_FIXED_SLOTS;
2861 
2862 		if (event->attr.config1 & INTEL_TD_CFG_METRIC_CLEAR)
2863 			bits |= INTEL_FIXED_3_METRICS_CLEAR;
2864 	}
2865 
2866 	intel_set_masks(event, idx);
2867 
2868 	/*
2869 	 * Enable IRQ generation (0x8), if not PEBS,
2870 	 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2871 	 * if requested:
2872 	 */
2873 	if (!event->attr.precise_ip)
2874 		bits |= INTEL_FIXED_0_ENABLE_PMI;
2875 	if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2876 		bits |= INTEL_FIXED_0_USER;
2877 	if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2878 		bits |= INTEL_FIXED_0_KERNEL;
2879 
2880 	/*
2881 	 * ANY bit is supported in v3 and up
2882 	 */
2883 	if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2884 		bits |= INTEL_FIXED_0_ANYTHREAD;
2885 
2886 	idx -= INTEL_PMC_IDX_FIXED;
2887 	bits = intel_fixed_bits_by_idx(idx, bits);
2888 	mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
2889 
2890 	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2891 		bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2892 		mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2893 	}
2894 
2895 	cpuc->fixed_ctrl_val &= ~mask;
2896 	cpuc->fixed_ctrl_val |= bits;
2897 }
2898 
intel_pmu_config_acr(int idx,u64 mask,u32 reload)2899 static void intel_pmu_config_acr(int idx, u64 mask, u32 reload)
2900 {
2901 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2902 	int msr_b, msr_c;
2903 	int msr_offset;
2904 
2905 	if (!mask && !cpuc->acr_cfg_b[idx])
2906 		return;
2907 
2908 	if (idx < INTEL_PMC_IDX_FIXED) {
2909 		msr_b = MSR_IA32_PMC_V6_GP0_CFG_B;
2910 		msr_c = MSR_IA32_PMC_V6_GP0_CFG_C;
2911 		msr_offset = x86_pmu.addr_offset(idx, false);
2912 	} else {
2913 		msr_b = MSR_IA32_PMC_V6_FX0_CFG_B;
2914 		msr_c = MSR_IA32_PMC_V6_FX0_CFG_C;
2915 		msr_offset = x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false);
2916 	}
2917 
2918 	if (cpuc->acr_cfg_b[idx] != mask) {
2919 		wrmsrl(msr_b + msr_offset, mask);
2920 		cpuc->acr_cfg_b[idx] = mask;
2921 	}
2922 	/* Only need to update the reload value when there is a valid config value. */
2923 	if (mask && cpuc->acr_cfg_c[idx] != reload) {
2924 		wrmsrl(msr_c + msr_offset, reload);
2925 		cpuc->acr_cfg_c[idx] = reload;
2926 	}
2927 }
2928 
intel_pmu_enable_acr(struct perf_event * event)2929 static void intel_pmu_enable_acr(struct perf_event *event)
2930 {
2931 	struct hw_perf_event *hwc = &event->hw;
2932 
2933 	if (!is_acr_event_group(event) || !event->attr.config2) {
2934 		/*
2935 		 * The disable doesn't clear the ACR CFG register.
2936 		 * Check and clear the ACR CFG register.
2937 		 */
2938 		intel_pmu_config_acr(hwc->idx, 0, 0);
2939 		return;
2940 	}
2941 
2942 	intel_pmu_config_acr(hwc->idx, hwc->config1, -hwc->sample_period);
2943 }
2944 
2945 DEFINE_STATIC_CALL_NULL(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
2946 
intel_pmu_enable_event(struct perf_event * event)2947 static void intel_pmu_enable_event(struct perf_event *event)
2948 {
2949 	u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE;
2950 	struct hw_perf_event *hwc = &event->hw;
2951 	int idx = hwc->idx;
2952 
2953 	if (unlikely(event->attr.precise_ip))
2954 		static_call(x86_pmu_pebs_enable)(event);
2955 
2956 	switch (idx) {
2957 	case 0 ... INTEL_PMC_IDX_FIXED - 1:
2958 		if (branch_sample_counters(event))
2959 			enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR;
2960 		intel_set_masks(event, idx);
2961 		static_call_cond(intel_pmu_enable_acr_event)(event);
2962 		__x86_pmu_enable_event(hwc, enable_mask);
2963 		break;
2964 	case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2965 		static_call_cond(intel_pmu_enable_acr_event)(event);
2966 		fallthrough;
2967 	case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2968 		intel_pmu_enable_fixed(event);
2969 		break;
2970 	case INTEL_PMC_IDX_FIXED_BTS:
2971 		if (!__this_cpu_read(cpu_hw_events.enabled))
2972 			return;
2973 		intel_pmu_enable_bts(hwc->config);
2974 		break;
2975 	case INTEL_PMC_IDX_FIXED_VLBR:
2976 		intel_set_masks(event, idx);
2977 		break;
2978 	default:
2979 		pr_warn("Failed to enable the event with invalid index %d\n",
2980 			idx);
2981 	}
2982 }
2983 
intel_pmu_acr_late_setup(struct cpu_hw_events * cpuc)2984 static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc)
2985 {
2986 	struct perf_event *event, *leader;
2987 	int i, j, idx;
2988 
2989 	for (i = 0; i < cpuc->n_events; i++) {
2990 		leader = cpuc->event_list[i];
2991 		if (!is_acr_event_group(leader))
2992 			continue;
2993 
2994 		/* The ACR events must be contiguous. */
2995 		for (j = i; j < cpuc->n_events; j++) {
2996 			event = cpuc->event_list[j];
2997 			if (event->group_leader != leader->group_leader)
2998 				break;
2999 			for_each_set_bit(idx, (unsigned long *)&event->attr.config2, X86_PMC_IDX_MAX) {
3000 				if (WARN_ON_ONCE(i + idx > cpuc->n_events))
3001 					return;
3002 				__set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1);
3003 			}
3004 		}
3005 		i = j - 1;
3006 	}
3007 }
3008 
intel_pmu_late_setup(void)3009 void intel_pmu_late_setup(void)
3010 {
3011 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3012 
3013 	if (!cpuc->n_late_setup)
3014 		return;
3015 
3016 	intel_pmu_pebs_late_setup(cpuc);
3017 	intel_pmu_acr_late_setup(cpuc);
3018 }
3019 
intel_pmu_add_event(struct perf_event * event)3020 static void intel_pmu_add_event(struct perf_event *event)
3021 {
3022 	if (event->attr.precise_ip)
3023 		intel_pmu_pebs_add(event);
3024 	if (intel_pmu_needs_branch_stack(event))
3025 		intel_pmu_lbr_add(event);
3026 	if (is_pebs_counter_event_group(event) ||
3027 	    is_acr_event_group(event))
3028 		this_cpu_ptr(&cpu_hw_events)->n_late_setup++;
3029 }
3030 
3031 /*
3032  * Save and restart an expired event. Called by NMI contexts,
3033  * so it has to be careful about preempting normal event ops:
3034  */
intel_pmu_save_and_restart(struct perf_event * event)3035 int intel_pmu_save_and_restart(struct perf_event *event)
3036 {
3037 	static_call(x86_pmu_update)(event);
3038 	/*
3039 	 * For a checkpointed counter always reset back to 0.  This
3040 	 * avoids a situation where the counter overflows, aborts the
3041 	 * transaction and is then set back to shortly before the
3042 	 * overflow, and overflows and aborts again.
3043 	 */
3044 	if (unlikely(event_is_checkpointed(event))) {
3045 		/* No race with NMIs because the counter should not be armed */
3046 		wrmsrq(event->hw.event_base, 0);
3047 		local64_set(&event->hw.prev_count, 0);
3048 	}
3049 	return static_call(x86_pmu_set_period)(event);
3050 }
3051 
intel_pmu_set_period(struct perf_event * event)3052 static int intel_pmu_set_period(struct perf_event *event)
3053 {
3054 	if (unlikely(is_topdown_count(event)))
3055 		return static_call(intel_pmu_set_topdown_event_period)(event);
3056 
3057 	return x86_perf_event_set_period(event);
3058 }
3059 
intel_pmu_update(struct perf_event * event)3060 static u64 intel_pmu_update(struct perf_event *event)
3061 {
3062 	if (unlikely(is_topdown_count(event)))
3063 		return static_call(intel_pmu_update_topdown_event)(event, NULL);
3064 
3065 	return x86_perf_event_update(event);
3066 }
3067 
intel_pmu_reset(void)3068 static void intel_pmu_reset(void)
3069 {
3070 	struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
3071 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3072 	unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
3073 	unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
3074 	unsigned long flags;
3075 	int idx;
3076 
3077 	if (!*(u64 *)cntr_mask)
3078 		return;
3079 
3080 	local_irq_save(flags);
3081 
3082 	pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
3083 
3084 	for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) {
3085 		wrmsrq_safe(x86_pmu_config_addr(idx), 0ull);
3086 		wrmsrq_safe(x86_pmu_event_addr(idx),  0ull);
3087 	}
3088 	for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
3089 		if (fixed_counter_disabled(idx, cpuc->pmu))
3090 			continue;
3091 		wrmsrq_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
3092 	}
3093 
3094 	if (ds)
3095 		ds->bts_index = ds->bts_buffer_base;
3096 
3097 	/* Ack all overflows and disable fixed counters */
3098 	if (x86_pmu.version >= 2) {
3099 		intel_pmu_ack_status(intel_pmu_get_status());
3100 		wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0);
3101 	}
3102 
3103 	/* Reset LBRs and LBR freezing */
3104 	if (x86_pmu.lbr_nr) {
3105 		update_debugctlmsr(get_debugctlmsr() &
3106 			~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
3107 	}
3108 
3109 	local_irq_restore(flags);
3110 }
3111 
3112 /*
3113  * We may be running with guest PEBS events created by KVM, and the
3114  * PEBS records are logged into the guest's DS and invisible to host.
3115  *
3116  * In the case of guest PEBS overflow, we only trigger a fake event
3117  * to emulate the PEBS overflow PMI for guest PEBS counters in KVM.
3118  * The guest will then vm-entry and check the guest DS area to read
3119  * the guest PEBS records.
3120  *
3121  * The contents and other behavior of the guest event do not matter.
3122  */
x86_pmu_handle_guest_pebs(struct pt_regs * regs,struct perf_sample_data * data)3123 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
3124 				      struct perf_sample_data *data)
3125 {
3126 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3127 	u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
3128 	struct perf_event *event = NULL;
3129 	int bit;
3130 
3131 	if (!unlikely(perf_guest_state()))
3132 		return;
3133 
3134 	if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active ||
3135 	    !guest_pebs_idxs)
3136 		return;
3137 
3138 	for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, X86_PMC_IDX_MAX) {
3139 		event = cpuc->events[bit];
3140 		if (!event->attr.precise_ip)
3141 			continue;
3142 
3143 		perf_sample_data_init(data, 0, event->hw.last_period);
3144 		perf_event_overflow(event, data, regs);
3145 
3146 		/* Inject one fake event is enough. */
3147 		break;
3148 	}
3149 }
3150 
handle_pmi_common(struct pt_regs * regs,u64 status)3151 static int handle_pmi_common(struct pt_regs *regs, u64 status)
3152 {
3153 	struct perf_sample_data data;
3154 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3155 	int bit;
3156 	int handled = 0;
3157 
3158 	inc_irq_stat(apic_perf_irqs);
3159 
3160 	/*
3161 	 * Ignore a range of extra bits in status that do not indicate
3162 	 * overflow by themselves.
3163 	 */
3164 	status &= ~(GLOBAL_STATUS_COND_CHG |
3165 		    GLOBAL_STATUS_ASIF |
3166 		    GLOBAL_STATUS_LBRS_FROZEN);
3167 	if (!status)
3168 		return 0;
3169 	/*
3170 	 * In case multiple PEBS events are sampled at the same time,
3171 	 * it is possible to have GLOBAL_STATUS bit 62 set indicating
3172 	 * PEBS buffer overflow and also seeing at most 3 PEBS counters
3173 	 * having their bits set in the status register. This is a sign
3174 	 * that there was at least one PEBS record pending at the time
3175 	 * of the PMU interrupt. PEBS counters must only be processed
3176 	 * via the drain_pebs() calls and not via the regular sample
3177 	 * processing loop coming after that the function, otherwise
3178 	 * phony regular samples may be generated in the sampling buffer
3179 	 * not marked with the EXACT tag. Another possibility is to have
3180 	 * one PEBS event and at least one non-PEBS event which overflows
3181 	 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
3182 	 * not be set, yet the overflow status bit for the PEBS counter will
3183 	 * be on Skylake.
3184 	 *
3185 	 * To avoid this problem, we systematically ignore the PEBS-enabled
3186 	 * counters from the GLOBAL_STATUS mask and we always process PEBS
3187 	 * events via drain_pebs().
3188 	 */
3189 	status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable);
3190 
3191 	/*
3192 	 * PEBS overflow sets bit 62 in the global status register
3193 	 */
3194 	if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
3195 		u64 pebs_enabled = cpuc->pebs_enabled;
3196 
3197 		handled++;
3198 		x86_pmu_handle_guest_pebs(regs, &data);
3199 		static_call(x86_pmu_drain_pebs)(regs, &data);
3200 
3201 		/*
3202 		 * PMI throttle may be triggered, which stops the PEBS event.
3203 		 * Although cpuc->pebs_enabled is updated accordingly, the
3204 		 * MSR_IA32_PEBS_ENABLE is not updated. Because the
3205 		 * cpuc->enabled has been forced to 0 in PMI.
3206 		 * Update the MSR if pebs_enabled is changed.
3207 		 */
3208 		if (pebs_enabled != cpuc->pebs_enabled)
3209 			wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
3210 
3211 		/*
3212 		 * Above PEBS handler (PEBS counters snapshotting) has updated fixed
3213 		 * counter 3 and perf metrics counts if they are in counter group,
3214 		 * unnecessary to update again.
3215 		 */
3216 		if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] &&
3217 		    is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS]))
3218 			status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT;
3219 	}
3220 
3221 	/*
3222 	 * Intel PT
3223 	 */
3224 	if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
3225 		handled++;
3226 		if (!perf_guest_handle_intel_pt_intr())
3227 			intel_pt_interrupt();
3228 	}
3229 
3230 	/*
3231 	 * Intel Perf metrics
3232 	 */
3233 	if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
3234 		handled++;
3235 		static_call(intel_pmu_update_topdown_event)(NULL, NULL);
3236 	}
3237 
3238 	status &= hybrid(cpuc->pmu, intel_ctrl);
3239 
3240 	/*
3241 	 * Checkpointed counters can lead to 'spurious' PMIs because the
3242 	 * rollback caused by the PMI will have cleared the overflow status
3243 	 * bit. Therefore always force probe these counters.
3244 	 */
3245 	status |= cpuc->intel_cp_status;
3246 
3247 	for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
3248 		struct perf_event *event = cpuc->events[bit];
3249 		u64 last_period;
3250 
3251 		handled++;
3252 
3253 		if (!test_bit(bit, cpuc->active_mask))
3254 			continue;
3255 
3256 		/*
3257 		 * There may be unprocessed PEBS records in the PEBS buffer,
3258 		 * which still stores the previous values.
3259 		 * Process those records first before handling the latest value.
3260 		 * For example,
3261 		 * A is a regular counter
3262 		 * B is a PEBS event which reads A
3263 		 * C is a PEBS event
3264 		 *
3265 		 * The following can happen:
3266 		 * B-assist			A=1
3267 		 * C				A=2
3268 		 * B-assist			A=3
3269 		 * A-overflow-PMI		A=4
3270 		 * C-assist-PMI (PEBS buffer)	A=5
3271 		 *
3272 		 * The PEBS buffer has to be drained before handling the A-PMI
3273 		 */
3274 		if (is_pebs_counter_event_group(event))
3275 			x86_pmu.drain_pebs(regs, &data);
3276 
3277 		last_period = event->hw.last_period;
3278 
3279 		if (!intel_pmu_save_and_restart(event))
3280 			continue;
3281 
3282 		perf_sample_data_init(&data, 0, last_period);
3283 
3284 		if (has_branch_stack(event))
3285 			intel_pmu_lbr_save_brstack(&data, cpuc, event);
3286 
3287 		perf_event_overflow(event, &data, regs);
3288 	}
3289 
3290 	return handled;
3291 }
3292 
3293 /*
3294  * This handler is triggered by the local APIC, so the APIC IRQ handling
3295  * rules apply:
3296  */
intel_pmu_handle_irq(struct pt_regs * regs)3297 static int intel_pmu_handle_irq(struct pt_regs *regs)
3298 {
3299 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3300 	bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
3301 	bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
3302 	int loops;
3303 	u64 status;
3304 	int handled;
3305 	int pmu_enabled;
3306 
3307 	/*
3308 	 * Save the PMU state.
3309 	 * It needs to be restored when leaving the handler.
3310 	 */
3311 	pmu_enabled = cpuc->enabled;
3312 	/*
3313 	 * In general, the early ACK is only applied for old platforms.
3314 	 * For the big core starts from Haswell, the late ACK should be
3315 	 * applied.
3316 	 * For the small core after Tremont, we have to do the ACK right
3317 	 * before re-enabling counters, which is in the middle of the
3318 	 * NMI handler.
3319 	 */
3320 	if (!late_ack && !mid_ack)
3321 		apic_write(APIC_LVTPC, APIC_DM_NMI);
3322 	intel_bts_disable_local();
3323 	cpuc->enabled = 0;
3324 	__intel_pmu_disable_all(true);
3325 	handled = intel_pmu_drain_bts_buffer();
3326 	handled += intel_bts_interrupt();
3327 	status = intel_pmu_get_status();
3328 	if (!status)
3329 		goto done;
3330 
3331 	loops = 0;
3332 again:
3333 	intel_pmu_lbr_read();
3334 	intel_pmu_ack_status(status);
3335 	if (++loops > 100) {
3336 		static bool warned;
3337 
3338 		if (!warned) {
3339 			WARN(1, "perfevents: irq loop stuck!\n");
3340 			perf_event_print_debug();
3341 			warned = true;
3342 		}
3343 		intel_pmu_reset();
3344 		goto done;
3345 	}
3346 
3347 	handled += handle_pmi_common(regs, status);
3348 
3349 	/*
3350 	 * Repeat if there is more work to be done:
3351 	 */
3352 	status = intel_pmu_get_status();
3353 	if (status)
3354 		goto again;
3355 
3356 done:
3357 	if (mid_ack)
3358 		apic_write(APIC_LVTPC, APIC_DM_NMI);
3359 	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
3360 	cpuc->enabled = pmu_enabled;
3361 	if (pmu_enabled)
3362 		__intel_pmu_enable_all(0, true);
3363 	intel_bts_enable_local();
3364 
3365 	/*
3366 	 * Only unmask the NMI after the overflow counters
3367 	 * have been reset. This avoids spurious NMIs on
3368 	 * Haswell CPUs.
3369 	 */
3370 	if (late_ack)
3371 		apic_write(APIC_LVTPC, APIC_DM_NMI);
3372 	return handled;
3373 }
3374 
3375 static struct event_constraint *
intel_bts_constraints(struct perf_event * event)3376 intel_bts_constraints(struct perf_event *event)
3377 {
3378 	if (unlikely(intel_pmu_has_bts(event)))
3379 		return &bts_constraint;
3380 
3381 	return NULL;
3382 }
3383 
3384 /*
3385  * Note: matches a fake event, like Fixed2.
3386  */
3387 static struct event_constraint *
intel_vlbr_constraints(struct perf_event * event)3388 intel_vlbr_constraints(struct perf_event *event)
3389 {
3390 	struct event_constraint *c = &vlbr_constraint;
3391 
3392 	if (unlikely(constraint_match(c, event->hw.config))) {
3393 		event->hw.flags |= c->flags;
3394 		return c;
3395 	}
3396 
3397 	return NULL;
3398 }
3399 
intel_alt_er(struct cpu_hw_events * cpuc,int idx,u64 config)3400 static int intel_alt_er(struct cpu_hw_events *cpuc,
3401 			int idx, u64 config)
3402 {
3403 	struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3404 	int alt_idx = idx;
3405 
3406 	if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3407 		return idx;
3408 
3409 	if (idx == EXTRA_REG_RSP_0)
3410 		alt_idx = EXTRA_REG_RSP_1;
3411 
3412 	if (idx == EXTRA_REG_RSP_1)
3413 		alt_idx = EXTRA_REG_RSP_0;
3414 
3415 	if (config & ~extra_regs[alt_idx].valid_mask)
3416 		return idx;
3417 
3418 	return alt_idx;
3419 }
3420 
intel_fixup_er(struct perf_event * event,int idx)3421 static void intel_fixup_er(struct perf_event *event, int idx)
3422 {
3423 	struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3424 	event->hw.extra_reg.idx = idx;
3425 
3426 	if (idx == EXTRA_REG_RSP_0) {
3427 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3428 		event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3429 		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3430 	} else if (idx == EXTRA_REG_RSP_1) {
3431 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3432 		event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3433 		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3434 	}
3435 }
3436 
3437 /*
3438  * manage allocation of shared extra msr for certain events
3439  *
3440  * sharing can be:
3441  * per-cpu: to be shared between the various events on a single PMU
3442  * per-core: per-cpu + shared by HT threads
3443  */
3444 static struct event_constraint *
__intel_shared_reg_get_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,struct hw_perf_event_extra * reg)3445 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3446 				   struct perf_event *event,
3447 				   struct hw_perf_event_extra *reg)
3448 {
3449 	struct event_constraint *c = &emptyconstraint;
3450 	struct er_account *era;
3451 	unsigned long flags;
3452 	int idx = reg->idx;
3453 
3454 	/*
3455 	 * reg->alloc can be set due to existing state, so for fake cpuc we
3456 	 * need to ignore this, otherwise we might fail to allocate proper fake
3457 	 * state for this extra reg constraint. Also see the comment below.
3458 	 */
3459 	if (reg->alloc && !cpuc->is_fake)
3460 		return NULL; /* call x86_get_event_constraint() */
3461 
3462 again:
3463 	era = &cpuc->shared_regs->regs[idx];
3464 	/*
3465 	 * we use spin_lock_irqsave() to avoid lockdep issues when
3466 	 * passing a fake cpuc
3467 	 */
3468 	raw_spin_lock_irqsave(&era->lock, flags);
3469 
3470 	if (!atomic_read(&era->ref) || era->config == reg->config) {
3471 
3472 		/*
3473 		 * If its a fake cpuc -- as per validate_{group,event}() we
3474 		 * shouldn't touch event state and we can avoid doing so
3475 		 * since both will only call get_event_constraints() once
3476 		 * on each event, this avoids the need for reg->alloc.
3477 		 *
3478 		 * Not doing the ER fixup will only result in era->reg being
3479 		 * wrong, but since we won't actually try and program hardware
3480 		 * this isn't a problem either.
3481 		 */
3482 		if (!cpuc->is_fake) {
3483 			if (idx != reg->idx)
3484 				intel_fixup_er(event, idx);
3485 
3486 			/*
3487 			 * x86_schedule_events() can call get_event_constraints()
3488 			 * multiple times on events in the case of incremental
3489 			 * scheduling(). reg->alloc ensures we only do the ER
3490 			 * allocation once.
3491 			 */
3492 			reg->alloc = 1;
3493 		}
3494 
3495 		/* lock in msr value */
3496 		era->config = reg->config;
3497 		era->reg = reg->reg;
3498 
3499 		/* one more user */
3500 		atomic_inc(&era->ref);
3501 
3502 		/*
3503 		 * need to call x86_get_event_constraint()
3504 		 * to check if associated event has constraints
3505 		 */
3506 		c = NULL;
3507 	} else {
3508 		idx = intel_alt_er(cpuc, idx, reg->config);
3509 		if (idx != reg->idx) {
3510 			raw_spin_unlock_irqrestore(&era->lock, flags);
3511 			goto again;
3512 		}
3513 	}
3514 	raw_spin_unlock_irqrestore(&era->lock, flags);
3515 
3516 	return c;
3517 }
3518 
3519 static void
__intel_shared_reg_put_constraints(struct cpu_hw_events * cpuc,struct hw_perf_event_extra * reg)3520 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3521 				   struct hw_perf_event_extra *reg)
3522 {
3523 	struct er_account *era;
3524 
3525 	/*
3526 	 * Only put constraint if extra reg was actually allocated. Also takes
3527 	 * care of event which do not use an extra shared reg.
3528 	 *
3529 	 * Also, if this is a fake cpuc we shouldn't touch any event state
3530 	 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3531 	 * either since it'll be thrown out.
3532 	 */
3533 	if (!reg->alloc || cpuc->is_fake)
3534 		return;
3535 
3536 	era = &cpuc->shared_regs->regs[reg->idx];
3537 
3538 	/* one fewer user */
3539 	atomic_dec(&era->ref);
3540 
3541 	/* allocate again next time */
3542 	reg->alloc = 0;
3543 }
3544 
3545 static struct event_constraint *
intel_shared_regs_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3546 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3547 			      struct perf_event *event)
3548 {
3549 	struct event_constraint *c = NULL, *d;
3550 	struct hw_perf_event_extra *xreg, *breg;
3551 
3552 	xreg = &event->hw.extra_reg;
3553 	if (xreg->idx != EXTRA_REG_NONE) {
3554 		c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3555 		if (c == &emptyconstraint)
3556 			return c;
3557 	}
3558 	breg = &event->hw.branch_reg;
3559 	if (breg->idx != EXTRA_REG_NONE) {
3560 		d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3561 		if (d == &emptyconstraint) {
3562 			__intel_shared_reg_put_constraints(cpuc, xreg);
3563 			c = d;
3564 		}
3565 	}
3566 	return c;
3567 }
3568 
3569 struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3570 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3571 			  struct perf_event *event)
3572 {
3573 	struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3574 	struct event_constraint *c;
3575 
3576 	if (event_constraints) {
3577 		for_each_event_constraint(c, event_constraints) {
3578 			if (constraint_match(c, event->hw.config)) {
3579 				event->hw.flags |= c->flags;
3580 				return c;
3581 			}
3582 		}
3583 	}
3584 
3585 	return &hybrid_var(cpuc->pmu, unconstrained);
3586 }
3587 
3588 static struct event_constraint *
__intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3589 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3590 			    struct perf_event *event)
3591 {
3592 	struct event_constraint *c;
3593 
3594 	c = intel_vlbr_constraints(event);
3595 	if (c)
3596 		return c;
3597 
3598 	c = intel_bts_constraints(event);
3599 	if (c)
3600 		return c;
3601 
3602 	c = intel_shared_regs_constraints(cpuc, event);
3603 	if (c)
3604 		return c;
3605 
3606 	c = intel_pebs_constraints(event);
3607 	if (c)
3608 		return c;
3609 
3610 	return x86_get_event_constraints(cpuc, idx, event);
3611 }
3612 
3613 static void
intel_start_scheduling(struct cpu_hw_events * cpuc)3614 intel_start_scheduling(struct cpu_hw_events *cpuc)
3615 {
3616 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3617 	struct intel_excl_states *xl;
3618 	int tid = cpuc->excl_thread_id;
3619 
3620 	/*
3621 	 * nothing needed if in group validation mode
3622 	 */
3623 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3624 		return;
3625 
3626 	/*
3627 	 * no exclusion needed
3628 	 */
3629 	if (WARN_ON_ONCE(!excl_cntrs))
3630 		return;
3631 
3632 	xl = &excl_cntrs->states[tid];
3633 
3634 	xl->sched_started = true;
3635 	/*
3636 	 * lock shared state until we are done scheduling
3637 	 * in stop_event_scheduling()
3638 	 * makes scheduling appear as a transaction
3639 	 */
3640 	raw_spin_lock(&excl_cntrs->lock);
3641 }
3642 
intel_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)3643 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3644 {
3645 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3646 	struct event_constraint *c = cpuc->event_constraint[idx];
3647 	struct intel_excl_states *xl;
3648 	int tid = cpuc->excl_thread_id;
3649 
3650 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3651 		return;
3652 
3653 	if (WARN_ON_ONCE(!excl_cntrs))
3654 		return;
3655 
3656 	if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3657 		return;
3658 
3659 	xl = &excl_cntrs->states[tid];
3660 
3661 	lockdep_assert_held(&excl_cntrs->lock);
3662 
3663 	if (c->flags & PERF_X86_EVENT_EXCL)
3664 		xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3665 	else
3666 		xl->state[cntr] = INTEL_EXCL_SHARED;
3667 }
3668 
3669 static void
intel_stop_scheduling(struct cpu_hw_events * cpuc)3670 intel_stop_scheduling(struct cpu_hw_events *cpuc)
3671 {
3672 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3673 	struct intel_excl_states *xl;
3674 	int tid = cpuc->excl_thread_id;
3675 
3676 	/*
3677 	 * nothing needed if in group validation mode
3678 	 */
3679 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3680 		return;
3681 	/*
3682 	 * no exclusion needed
3683 	 */
3684 	if (WARN_ON_ONCE(!excl_cntrs))
3685 		return;
3686 
3687 	xl = &excl_cntrs->states[tid];
3688 
3689 	xl->sched_started = false;
3690 	/*
3691 	 * release shared state lock (acquired in intel_start_scheduling())
3692 	 */
3693 	raw_spin_unlock(&excl_cntrs->lock);
3694 }
3695 
3696 static struct event_constraint *
dyn_constraint(struct cpu_hw_events * cpuc,struct event_constraint * c,int idx)3697 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3698 {
3699 	WARN_ON_ONCE(!cpuc->constraint_list);
3700 
3701 	if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3702 		struct event_constraint *cx;
3703 
3704 		/*
3705 		 * grab pre-allocated constraint entry
3706 		 */
3707 		cx = &cpuc->constraint_list[idx];
3708 
3709 		/*
3710 		 * initialize dynamic constraint
3711 		 * with static constraint
3712 		 */
3713 		*cx = *c;
3714 
3715 		/*
3716 		 * mark constraint as dynamic
3717 		 */
3718 		cx->flags |= PERF_X86_EVENT_DYNAMIC;
3719 		c = cx;
3720 	}
3721 
3722 	return c;
3723 }
3724 
3725 static struct event_constraint *
intel_get_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,int idx,struct event_constraint * c)3726 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3727 			   int idx, struct event_constraint *c)
3728 {
3729 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3730 	struct intel_excl_states *xlo;
3731 	int tid = cpuc->excl_thread_id;
3732 	int is_excl, i, w;
3733 
3734 	/*
3735 	 * validating a group does not require
3736 	 * enforcing cross-thread  exclusion
3737 	 */
3738 	if (cpuc->is_fake || !is_ht_workaround_enabled())
3739 		return c;
3740 
3741 	/*
3742 	 * no exclusion needed
3743 	 */
3744 	if (WARN_ON_ONCE(!excl_cntrs))
3745 		return c;
3746 
3747 	/*
3748 	 * because we modify the constraint, we need
3749 	 * to make a copy. Static constraints come
3750 	 * from static const tables.
3751 	 *
3752 	 * only needed when constraint has not yet
3753 	 * been cloned (marked dynamic)
3754 	 */
3755 	c = dyn_constraint(cpuc, c, idx);
3756 
3757 	/*
3758 	 * From here on, the constraint is dynamic.
3759 	 * Either it was just allocated above, or it
3760 	 * was allocated during a earlier invocation
3761 	 * of this function
3762 	 */
3763 
3764 	/*
3765 	 * state of sibling HT
3766 	 */
3767 	xlo = &excl_cntrs->states[tid ^ 1];
3768 
3769 	/*
3770 	 * event requires exclusive counter access
3771 	 * across HT threads
3772 	 */
3773 	is_excl = c->flags & PERF_X86_EVENT_EXCL;
3774 	if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3775 		event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3776 		if (!cpuc->n_excl++)
3777 			WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3778 	}
3779 
3780 	/*
3781 	 * Modify static constraint with current dynamic
3782 	 * state of thread
3783 	 *
3784 	 * EXCLUSIVE: sibling counter measuring exclusive event
3785 	 * SHARED   : sibling counter measuring non-exclusive event
3786 	 * UNUSED   : sibling counter unused
3787 	 */
3788 	w = c->weight;
3789 	for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3790 		/*
3791 		 * exclusive event in sibling counter
3792 		 * our corresponding counter cannot be used
3793 		 * regardless of our event
3794 		 */
3795 		if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3796 			__clear_bit(i, c->idxmsk);
3797 			w--;
3798 			continue;
3799 		}
3800 		/*
3801 		 * if measuring an exclusive event, sibling
3802 		 * measuring non-exclusive, then counter cannot
3803 		 * be used
3804 		 */
3805 		if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3806 			__clear_bit(i, c->idxmsk);
3807 			w--;
3808 			continue;
3809 		}
3810 	}
3811 
3812 	/*
3813 	 * if we return an empty mask, then switch
3814 	 * back to static empty constraint to avoid
3815 	 * the cost of freeing later on
3816 	 */
3817 	if (!w)
3818 		c = &emptyconstraint;
3819 
3820 	c->weight = w;
3821 
3822 	return c;
3823 }
3824 
3825 static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3826 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3827 			    struct perf_event *event)
3828 {
3829 	struct event_constraint *c1, *c2;
3830 
3831 	c1 = cpuc->event_constraint[idx];
3832 
3833 	/*
3834 	 * first time only
3835 	 * - static constraint: no change across incremental scheduling calls
3836 	 * - dynamic constraint: handled by intel_get_excl_constraints()
3837 	 */
3838 	c2 = __intel_get_event_constraints(cpuc, idx, event);
3839 	if (c1) {
3840 	        WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3841 		bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3842 		c1->weight = c2->weight;
3843 		c2 = c1;
3844 	}
3845 
3846 	if (cpuc->excl_cntrs)
3847 		return intel_get_excl_constraints(cpuc, event, idx, c2);
3848 
3849 	if (event->hw.dyn_constraint != ~0ULL) {
3850 		c2 = dyn_constraint(cpuc, c2, idx);
3851 		c2->idxmsk64 &= event->hw.dyn_constraint;
3852 		c2->weight = hweight64(c2->idxmsk64);
3853 	}
3854 
3855 	return c2;
3856 }
3857 
intel_put_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3858 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3859 		struct perf_event *event)
3860 {
3861 	struct hw_perf_event *hwc = &event->hw;
3862 	struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3863 	int tid = cpuc->excl_thread_id;
3864 	struct intel_excl_states *xl;
3865 
3866 	/*
3867 	 * nothing needed if in group validation mode
3868 	 */
3869 	if (cpuc->is_fake)
3870 		return;
3871 
3872 	if (WARN_ON_ONCE(!excl_cntrs))
3873 		return;
3874 
3875 	if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3876 		hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3877 		if (!--cpuc->n_excl)
3878 			WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3879 	}
3880 
3881 	/*
3882 	 * If event was actually assigned, then mark the counter state as
3883 	 * unused now.
3884 	 */
3885 	if (hwc->idx >= 0) {
3886 		xl = &excl_cntrs->states[tid];
3887 
3888 		/*
3889 		 * put_constraint may be called from x86_schedule_events()
3890 		 * which already has the lock held so here make locking
3891 		 * conditional.
3892 		 */
3893 		if (!xl->sched_started)
3894 			raw_spin_lock(&excl_cntrs->lock);
3895 
3896 		xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3897 
3898 		if (!xl->sched_started)
3899 			raw_spin_unlock(&excl_cntrs->lock);
3900 	}
3901 }
3902 
3903 static void
intel_put_shared_regs_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3904 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3905 					struct perf_event *event)
3906 {
3907 	struct hw_perf_event_extra *reg;
3908 
3909 	reg = &event->hw.extra_reg;
3910 	if (reg->idx != EXTRA_REG_NONE)
3911 		__intel_shared_reg_put_constraints(cpuc, reg);
3912 
3913 	reg = &event->hw.branch_reg;
3914 	if (reg->idx != EXTRA_REG_NONE)
3915 		__intel_shared_reg_put_constraints(cpuc, reg);
3916 }
3917 
intel_put_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3918 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3919 					struct perf_event *event)
3920 {
3921 	intel_put_shared_regs_event_constraints(cpuc, event);
3922 
3923 	/*
3924 	 * is PMU has exclusive counter restrictions, then
3925 	 * all events are subject to and must call the
3926 	 * put_excl_constraints() routine
3927 	 */
3928 	if (cpuc->excl_cntrs)
3929 		intel_put_excl_constraints(cpuc, event);
3930 }
3931 
intel_pebs_aliases_core2(struct perf_event * event)3932 static void intel_pebs_aliases_core2(struct perf_event *event)
3933 {
3934 	if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3935 		/*
3936 		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3937 		 * (0x003c) so that we can use it with PEBS.
3938 		 *
3939 		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3940 		 * PEBS capable. However we can use INST_RETIRED.ANY_P
3941 		 * (0x00c0), which is a PEBS capable event, to get the same
3942 		 * count.
3943 		 *
3944 		 * INST_RETIRED.ANY_P counts the number of cycles that retires
3945 		 * CNTMASK instructions. By setting CNTMASK to a value (16)
3946 		 * larger than the maximum number of instructions that can be
3947 		 * retired per cycle (4) and then inverting the condition, we
3948 		 * count all cycles that retire 16 or less instructions, which
3949 		 * is every cycle.
3950 		 *
3951 		 * Thereby we gain a PEBS capable cycle counter.
3952 		 */
3953 		u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3954 
3955 		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3956 		event->hw.config = alt_config;
3957 	}
3958 }
3959 
intel_pebs_aliases_snb(struct perf_event * event)3960 static void intel_pebs_aliases_snb(struct perf_event *event)
3961 {
3962 	if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3963 		/*
3964 		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3965 		 * (0x003c) so that we can use it with PEBS.
3966 		 *
3967 		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3968 		 * PEBS capable. However we can use UOPS_RETIRED.ALL
3969 		 * (0x01c2), which is a PEBS capable event, to get the same
3970 		 * count.
3971 		 *
3972 		 * UOPS_RETIRED.ALL counts the number of cycles that retires
3973 		 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3974 		 * larger than the maximum number of micro-ops that can be
3975 		 * retired per cycle (4) and then inverting the condition, we
3976 		 * count all cycles that retire 16 or less micro-ops, which
3977 		 * is every cycle.
3978 		 *
3979 		 * Thereby we gain a PEBS capable cycle counter.
3980 		 */
3981 		u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3982 
3983 		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3984 		event->hw.config = alt_config;
3985 	}
3986 }
3987 
intel_pebs_aliases_precdist(struct perf_event * event)3988 static void intel_pebs_aliases_precdist(struct perf_event *event)
3989 {
3990 	if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3991 		/*
3992 		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3993 		 * (0x003c) so that we can use it with PEBS.
3994 		 *
3995 		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3996 		 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3997 		 * (0x01c0), which is a PEBS capable event, to get the same
3998 		 * count.
3999 		 *
4000 		 * The PREC_DIST event has special support to minimize sample
4001 		 * shadowing effects. One drawback is that it can be
4002 		 * only programmed on counter 1, but that seems like an
4003 		 * acceptable trade off.
4004 		 */
4005 		u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
4006 
4007 		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
4008 		event->hw.config = alt_config;
4009 	}
4010 }
4011 
intel_pebs_aliases_ivb(struct perf_event * event)4012 static void intel_pebs_aliases_ivb(struct perf_event *event)
4013 {
4014 	if (event->attr.precise_ip < 3)
4015 		return intel_pebs_aliases_snb(event);
4016 	return intel_pebs_aliases_precdist(event);
4017 }
4018 
intel_pebs_aliases_skl(struct perf_event * event)4019 static void intel_pebs_aliases_skl(struct perf_event *event)
4020 {
4021 	if (event->attr.precise_ip < 3)
4022 		return intel_pebs_aliases_core2(event);
4023 	return intel_pebs_aliases_precdist(event);
4024 }
4025 
intel_pmu_large_pebs_flags(struct perf_event * event)4026 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
4027 {
4028 	unsigned long flags = x86_pmu.large_pebs_flags;
4029 
4030 	if (event->attr.use_clockid)
4031 		flags &= ~PERF_SAMPLE_TIME;
4032 	if (!event->attr.exclude_kernel)
4033 		flags &= ~PERF_SAMPLE_REGS_USER;
4034 	if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
4035 		flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
4036 	return flags;
4037 }
4038 
intel_pmu_bts_config(struct perf_event * event)4039 static int intel_pmu_bts_config(struct perf_event *event)
4040 {
4041 	struct perf_event_attr *attr = &event->attr;
4042 
4043 	if (unlikely(intel_pmu_has_bts(event))) {
4044 		/* BTS is not supported by this architecture. */
4045 		if (!x86_pmu.bts_active)
4046 			return -EOPNOTSUPP;
4047 
4048 		/* BTS is currently only allowed for user-mode. */
4049 		if (!attr->exclude_kernel)
4050 			return -EOPNOTSUPP;
4051 
4052 		/* BTS is not allowed for precise events. */
4053 		if (attr->precise_ip)
4054 			return -EOPNOTSUPP;
4055 
4056 		/* disallow bts if conflicting events are present */
4057 		if (x86_add_exclusive(x86_lbr_exclusive_lbr))
4058 			return -EBUSY;
4059 
4060 		event->destroy = hw_perf_lbr_event_destroy;
4061 	}
4062 
4063 	return 0;
4064 }
4065 
core_pmu_hw_config(struct perf_event * event)4066 static int core_pmu_hw_config(struct perf_event *event)
4067 {
4068 	int ret = x86_pmu_hw_config(event);
4069 
4070 	if (ret)
4071 		return ret;
4072 
4073 	return intel_pmu_bts_config(event);
4074 }
4075 
4076 #define INTEL_TD_METRIC_AVAILABLE_MAX	(INTEL_TD_METRIC_RETIRING + \
4077 					 ((x86_pmu.num_topdown_events - 1) << 8))
4078 
is_available_metric_event(struct perf_event * event)4079 static bool is_available_metric_event(struct perf_event *event)
4080 {
4081 	return is_metric_event(event) &&
4082 		event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
4083 }
4084 
is_mem_loads_event(struct perf_event * event)4085 static inline bool is_mem_loads_event(struct perf_event *event)
4086 {
4087 	return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
4088 }
4089 
is_mem_loads_aux_event(struct perf_event * event)4090 static inline bool is_mem_loads_aux_event(struct perf_event *event)
4091 {
4092 	return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
4093 }
4094 
require_mem_loads_aux_event(struct perf_event * event)4095 static inline bool require_mem_loads_aux_event(struct perf_event *event)
4096 {
4097 	if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
4098 		return false;
4099 
4100 	if (is_hybrid())
4101 		return hybrid_pmu(event->pmu)->pmu_type == hybrid_big;
4102 
4103 	return true;
4104 }
4105 
intel_pmu_has_cap(struct perf_event * event,int idx)4106 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
4107 {
4108 	union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
4109 
4110 	return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
4111 }
4112 
intel_pmu_freq_start_period(struct perf_event * event)4113 static u64 intel_pmu_freq_start_period(struct perf_event *event)
4114 {
4115 	int type = event->attr.type;
4116 	u64 config, factor;
4117 	s64 start;
4118 
4119 	/*
4120 	 * The 127 is the lowest possible recommended SAV (sample after value)
4121 	 * for a 4000 freq (default freq), according to the event list JSON file.
4122 	 * Also, assume the workload is idle 50% time.
4123 	 */
4124 	factor = 64 * 4000;
4125 	if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
4126 		goto end;
4127 
4128 	/*
4129 	 * The estimation of the start period in the freq mode is
4130 	 * based on the below assumption.
4131 	 *
4132 	 * For a cycles or an instructions event, 1GHZ of the
4133 	 * underlying platform, 1 IPC. The workload is idle 50% time.
4134 	 * The start period = 1,000,000,000 * 1 / freq / 2.
4135 	 *		    = 500,000,000 / freq
4136 	 *
4137 	 * Usually, the branch-related events occur less than the
4138 	 * instructions event. According to the Intel event list JSON
4139 	 * file, the SAV (sample after value) of a branch-related event
4140 	 * is usually 1/4 of an instruction event.
4141 	 * The start period of branch-related events = 125,000,000 / freq.
4142 	 *
4143 	 * The cache-related events occurs even less. The SAV is usually
4144 	 * 1/20 of an instruction event.
4145 	 * The start period of cache-related events = 25,000,000 / freq.
4146 	 */
4147 	config = event->attr.config & PERF_HW_EVENT_MASK;
4148 	if (type == PERF_TYPE_HARDWARE) {
4149 		switch (config) {
4150 		case PERF_COUNT_HW_CPU_CYCLES:
4151 		case PERF_COUNT_HW_INSTRUCTIONS:
4152 		case PERF_COUNT_HW_BUS_CYCLES:
4153 		case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
4154 		case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
4155 		case PERF_COUNT_HW_REF_CPU_CYCLES:
4156 			factor = 500000000;
4157 			break;
4158 		case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
4159 		case PERF_COUNT_HW_BRANCH_MISSES:
4160 			factor = 125000000;
4161 			break;
4162 		case PERF_COUNT_HW_CACHE_REFERENCES:
4163 		case PERF_COUNT_HW_CACHE_MISSES:
4164 			factor = 25000000;
4165 			break;
4166 		default:
4167 			goto end;
4168 		}
4169 	}
4170 
4171 	if (type == PERF_TYPE_HW_CACHE)
4172 		factor = 25000000;
4173 end:
4174 	/*
4175 	 * Usually, a prime or a number with less factors (close to prime)
4176 	 * is chosen as an SAV, which makes it less likely that the sampling
4177 	 * period synchronizes with some periodic event in the workload.
4178 	 * Minus 1 to make it at least avoiding values near power of twos
4179 	 * for the default freq.
4180 	 */
4181 	start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
4182 
4183 	if (start > x86_pmu.max_period)
4184 		start = x86_pmu.max_period;
4185 
4186 	if (x86_pmu.limit_period)
4187 		x86_pmu.limit_period(event, &start);
4188 
4189 	return start;
4190 }
4191 
intel_pmu_has_acr(struct pmu * pmu)4192 static inline bool intel_pmu_has_acr(struct pmu *pmu)
4193 {
4194 	return !!hybrid(pmu, acr_cause_mask64);
4195 }
4196 
intel_pmu_is_acr_group(struct perf_event * event)4197 static bool intel_pmu_is_acr_group(struct perf_event *event)
4198 {
4199 	/* The group leader has the ACR flag set */
4200 	if (is_acr_event_group(event))
4201 		return true;
4202 
4203 	/* The acr_mask is set */
4204 	if (event->attr.config2)
4205 		return true;
4206 
4207 	return false;
4208 }
4209 
intel_pmu_set_acr_cntr_constr(struct perf_event * event,u64 * cause_mask,int * num)4210 static inline void intel_pmu_set_acr_cntr_constr(struct perf_event *event,
4211 						 u64 *cause_mask, int *num)
4212 {
4213 	event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64);
4214 	*cause_mask |= event->attr.config2;
4215 	*num += 1;
4216 }
4217 
intel_pmu_set_acr_caused_constr(struct perf_event * event,int idx,u64 cause_mask)4218 static inline void intel_pmu_set_acr_caused_constr(struct perf_event *event,
4219 						   int idx, u64 cause_mask)
4220 {
4221 	if (test_bit(idx, (unsigned long *)&cause_mask))
4222 		event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64);
4223 }
4224 
intel_pmu_hw_config(struct perf_event * event)4225 static int intel_pmu_hw_config(struct perf_event *event)
4226 {
4227 	int ret = x86_pmu_hw_config(event);
4228 
4229 	if (ret)
4230 		return ret;
4231 
4232 	ret = intel_pmu_bts_config(event);
4233 	if (ret)
4234 		return ret;
4235 
4236 	if (event->attr.freq && event->attr.sample_freq) {
4237 		event->hw.sample_period = intel_pmu_freq_start_period(event);
4238 		event->hw.last_period = event->hw.sample_period;
4239 		local64_set(&event->hw.period_left, event->hw.sample_period);
4240 	}
4241 
4242 	if (event->attr.precise_ip) {
4243 		if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
4244 			return -EINVAL;
4245 
4246 		if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
4247 			event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
4248 			if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event)) &&
4249 			    !has_aux_action(event)) {
4250 				event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
4251 				event->attach_state |= PERF_ATTACH_SCHED_CB;
4252 			}
4253 		}
4254 		if (x86_pmu.pebs_aliases)
4255 			x86_pmu.pebs_aliases(event);
4256 	}
4257 
4258 	if (needs_branch_stack(event)) {
4259 		/* Avoid branch stack setup for counting events in SAMPLE READ */
4260 		if (is_sampling_event(event) ||
4261 		    !(event->attr.sample_type & PERF_SAMPLE_READ))
4262 			event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
4263 	}
4264 
4265 	if (branch_sample_counters(event)) {
4266 		struct perf_event *leader, *sibling;
4267 		int num = 0;
4268 
4269 		if (!(x86_pmu.flags & PMU_FL_BR_CNTR) ||
4270 		    (event->attr.config & ~INTEL_ARCH_EVENT_MASK))
4271 			return -EINVAL;
4272 
4273 		/*
4274 		 * The branch counter logging is not supported in the call stack
4275 		 * mode yet, since we cannot simply flush the LBR during e.g.,
4276 		 * multiplexing. Also, there is no obvious usage with the call
4277 		 * stack mode. Simply forbids it for now.
4278 		 *
4279 		 * If any events in the group enable the branch counter logging
4280 		 * feature, the group is treated as a branch counter logging
4281 		 * group, which requires the extra space to store the counters.
4282 		 */
4283 		leader = event->group_leader;
4284 		if (branch_sample_call_stack(leader))
4285 			return -EINVAL;
4286 		if (branch_sample_counters(leader)) {
4287 			num++;
4288 			leader->hw.dyn_constraint &= x86_pmu.lbr_counters;
4289 		}
4290 		leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
4291 
4292 		for_each_sibling_event(sibling, leader) {
4293 			if (branch_sample_call_stack(sibling))
4294 				return -EINVAL;
4295 			if (branch_sample_counters(sibling)) {
4296 				num++;
4297 				sibling->hw.dyn_constraint &= x86_pmu.lbr_counters;
4298 			}
4299 		}
4300 
4301 		if (num > fls(x86_pmu.lbr_counters))
4302 			return -EINVAL;
4303 		/*
4304 		 * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't
4305 		 * require any branch stack setup.
4306 		 * Clear the bit to avoid unnecessary branch stack setup.
4307 		 */
4308 		if (0 == (event->attr.branch_sample_type &
4309 			  ~(PERF_SAMPLE_BRANCH_PLM_ALL |
4310 			    PERF_SAMPLE_BRANCH_COUNTERS)))
4311 			event->hw.flags  &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK;
4312 
4313 		/*
4314 		 * Force the leader to be a LBR event. So LBRs can be reset
4315 		 * with the leader event. See intel_pmu_lbr_del() for details.
4316 		 */
4317 		if (!intel_pmu_needs_branch_stack(leader))
4318 			return -EINVAL;
4319 	}
4320 
4321 	if (intel_pmu_needs_branch_stack(event)) {
4322 		ret = intel_pmu_setup_lbr_filter(event);
4323 		if (ret)
4324 			return ret;
4325 		event->attach_state |= PERF_ATTACH_SCHED_CB;
4326 
4327 		/*
4328 		 * BTS is set up earlier in this path, so don't account twice
4329 		 */
4330 		if (!unlikely(intel_pmu_has_bts(event))) {
4331 			/* disallow lbr if conflicting events are present */
4332 			if (x86_add_exclusive(x86_lbr_exclusive_lbr))
4333 				return -EBUSY;
4334 
4335 			event->destroy = hw_perf_lbr_event_destroy;
4336 		}
4337 	}
4338 
4339 	if (event->attr.aux_output) {
4340 		if (!event->attr.precise_ip)
4341 			return -EINVAL;
4342 
4343 		event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
4344 	}
4345 
4346 	if ((event->attr.sample_type & PERF_SAMPLE_READ) &&
4347 	    (x86_pmu.intel_cap.pebs_format >= 6) &&
4348 	    x86_pmu.intel_cap.pebs_baseline &&
4349 	    is_sampling_event(event) &&
4350 	    event->attr.precise_ip)
4351 		event->group_leader->hw.flags |= PERF_X86_EVENT_PEBS_CNTR;
4352 
4353 	if (intel_pmu_has_acr(event->pmu) && intel_pmu_is_acr_group(event)) {
4354 		struct perf_event *sibling, *leader = event->group_leader;
4355 		struct pmu *pmu = event->pmu;
4356 		bool has_sw_event = false;
4357 		int num = 0, idx = 0;
4358 		u64 cause_mask = 0;
4359 
4360 		/* Not support perf metrics */
4361 		if (is_metric_event(event))
4362 			return -EINVAL;
4363 
4364 		/* Not support freq mode */
4365 		if (event->attr.freq)
4366 			return -EINVAL;
4367 
4368 		/* PDist is not supported */
4369 		if (event->attr.config2 && event->attr.precise_ip > 2)
4370 			return -EINVAL;
4371 
4372 		/* The reload value cannot exceeds the max period */
4373 		if (event->attr.sample_period > x86_pmu.max_period)
4374 			return -EINVAL;
4375 		/*
4376 		 * The counter-constraints of each event cannot be finalized
4377 		 * unless the whole group is scanned. However, it's hard
4378 		 * to know whether the event is the last one of the group.
4379 		 * Recalculate the counter-constraints for each event when
4380 		 * adding a new event.
4381 		 *
4382 		 * The group is traversed twice, which may be optimized later.
4383 		 * In the first round,
4384 		 * - Find all events which do reload when other events
4385 		 *   overflow and set the corresponding counter-constraints
4386 		 * - Add all events, which can cause other events reload,
4387 		 *   in the cause_mask
4388 		 * - Error out if the number of events exceeds the HW limit
4389 		 * - The ACR events must be contiguous.
4390 		 *   Error out if there are non-X86 events between ACR events.
4391 		 *   This is not a HW limit, but a SW limit.
4392 		 *   With the assumption, the intel_pmu_acr_late_setup() can
4393 		 *   easily convert the event idx to counter idx without
4394 		 *   traversing the whole event list.
4395 		 */
4396 		if (!is_x86_event(leader))
4397 			return -EINVAL;
4398 
4399 		if (leader->attr.config2)
4400 			intel_pmu_set_acr_cntr_constr(leader, &cause_mask, &num);
4401 
4402 		if (leader->nr_siblings) {
4403 			for_each_sibling_event(sibling, leader) {
4404 				if (!is_x86_event(sibling)) {
4405 					has_sw_event = true;
4406 					continue;
4407 				}
4408 				if (!sibling->attr.config2)
4409 					continue;
4410 				if (has_sw_event)
4411 					return -EINVAL;
4412 				intel_pmu_set_acr_cntr_constr(sibling, &cause_mask, &num);
4413 			}
4414 		}
4415 		if (leader != event && event->attr.config2) {
4416 			if (has_sw_event)
4417 				return -EINVAL;
4418 			intel_pmu_set_acr_cntr_constr(event, &cause_mask, &num);
4419 		}
4420 
4421 		if (hweight64(cause_mask) > hweight64(hybrid(pmu, acr_cause_mask64)) ||
4422 		    num > hweight64(hybrid(event->pmu, acr_cntr_mask64)))
4423 			return -EINVAL;
4424 		/*
4425 		 * In the second round, apply the counter-constraints for
4426 		 * the events which can cause other events reload.
4427 		 */
4428 		intel_pmu_set_acr_caused_constr(leader, idx++, cause_mask);
4429 
4430 		if (leader->nr_siblings) {
4431 			for_each_sibling_event(sibling, leader)
4432 				intel_pmu_set_acr_caused_constr(sibling, idx++, cause_mask);
4433 		}
4434 
4435 		if (leader != event)
4436 			intel_pmu_set_acr_caused_constr(event, idx, cause_mask);
4437 
4438 		leader->hw.flags |= PERF_X86_EVENT_ACR;
4439 	}
4440 
4441 	if ((event->attr.type == PERF_TYPE_HARDWARE) ||
4442 	    (event->attr.type == PERF_TYPE_HW_CACHE))
4443 		return 0;
4444 
4445 	/*
4446 	 * Config Topdown slots and metric events
4447 	 *
4448 	 * The slots event on Fixed Counter 3 can support sampling,
4449 	 * which will be handled normally in x86_perf_event_update().
4450 	 *
4451 	 * Metric events don't support sampling and require being paired
4452 	 * with a slots event as group leader. When the slots event
4453 	 * is used in a metrics group, it too cannot support sampling.
4454 	 */
4455 	if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
4456 		/* The metrics_clear can only be set for the slots event */
4457 		if (event->attr.config1 &&
4458 		    (!is_slots_event(event) || (event->attr.config1 & ~INTEL_TD_CFG_METRIC_CLEAR)))
4459 			return -EINVAL;
4460 
4461 		if (event->attr.config2)
4462 			return -EINVAL;
4463 
4464 		/*
4465 		 * The TopDown metrics events and slots event don't
4466 		 * support any filters.
4467 		 */
4468 		if (event->attr.config & X86_ALL_EVENT_FLAGS)
4469 			return -EINVAL;
4470 
4471 		if (is_available_metric_event(event)) {
4472 			struct perf_event *leader = event->group_leader;
4473 
4474 			/* The metric events don't support sampling. */
4475 			if (is_sampling_event(event))
4476 				return -EINVAL;
4477 
4478 			/* The metric events require a slots group leader. */
4479 			if (!is_slots_event(leader))
4480 				return -EINVAL;
4481 
4482 			/*
4483 			 * The leader/SLOTS must not be a sampling event for
4484 			 * metric use; hardware requires it starts at 0 when used
4485 			 * in conjunction with MSR_PERF_METRICS.
4486 			 */
4487 			if (is_sampling_event(leader))
4488 				return -EINVAL;
4489 
4490 			event->event_caps |= PERF_EV_CAP_SIBLING;
4491 			/*
4492 			 * Only once we have a METRICs sibling do we
4493 			 * need TopDown magic.
4494 			 */
4495 			leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
4496 			event->hw.flags  |= PERF_X86_EVENT_TOPDOWN;
4497 		}
4498 	}
4499 
4500 	/*
4501 	 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
4502 	 * doesn't function quite right. As a work-around it needs to always be
4503 	 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
4504 	 * The actual count of this second event is irrelevant it just needs
4505 	 * to be active to make the first event function correctly.
4506 	 *
4507 	 * In a group, the auxiliary event must be in front of the load latency
4508 	 * event. The rule is to simplify the implementation of the check.
4509 	 * That's because perf cannot have a complete group at the moment.
4510 	 */
4511 	if (require_mem_loads_aux_event(event) &&
4512 	    (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
4513 	    is_mem_loads_event(event)) {
4514 		struct perf_event *leader = event->group_leader;
4515 		struct perf_event *sibling = NULL;
4516 
4517 		/*
4518 		 * When this memload event is also the first event (no group
4519 		 * exists yet), then there is no aux event before it.
4520 		 */
4521 		if (leader == event)
4522 			return -ENODATA;
4523 
4524 		if (!is_mem_loads_aux_event(leader)) {
4525 			for_each_sibling_event(sibling, leader) {
4526 				if (is_mem_loads_aux_event(sibling))
4527 					break;
4528 			}
4529 			if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
4530 				return -ENODATA;
4531 		}
4532 	}
4533 
4534 	if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
4535 		return 0;
4536 
4537 	if (x86_pmu.version < 3)
4538 		return -EINVAL;
4539 
4540 	ret = perf_allow_cpu();
4541 	if (ret)
4542 		return ret;
4543 
4544 	event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
4545 
4546 	return 0;
4547 }
4548 
4549 /*
4550  * Currently, the only caller of this function is the atomic_switch_perf_msrs().
4551  * The host perf context helps to prepare the values of the real hardware for
4552  * a set of msrs that need to be switched atomically in a vmx transaction.
4553  *
4554  * For example, the pseudocode needed to add a new msr should look like:
4555  *
4556  * arr[(*nr)++] = (struct perf_guest_switch_msr){
4557  *	.msr = the hardware msr address,
4558  *	.host = the value the hardware has when it doesn't run a guest,
4559  *	.guest = the value the hardware has when it runs a guest,
4560  * };
4561  *
4562  * These values have nothing to do with the emulated values the guest sees
4563  * when it uses {RD,WR}MSR, which should be handled by the KVM context,
4564  * specifically in the intel_pmu_{get,set}_msr().
4565  */
intel_guest_get_msrs(int * nr,void * data)4566 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
4567 {
4568 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4569 	struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4570 	struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
4571 	u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
4572 	u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
4573 	int global_ctrl, pebs_enable;
4574 
4575 	/*
4576 	 * In addition to obeying exclude_guest/exclude_host, remove bits being
4577 	 * used for PEBS when running a guest, because PEBS writes to virtual
4578 	 * addresses (not physical addresses).
4579 	 */
4580 	*nr = 0;
4581 	global_ctrl = (*nr)++;
4582 	arr[global_ctrl] = (struct perf_guest_switch_msr){
4583 		.msr = MSR_CORE_PERF_GLOBAL_CTRL,
4584 		.host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
4585 		.guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
4586 	};
4587 
4588 	if (!x86_pmu.ds_pebs)
4589 		return arr;
4590 
4591 	/*
4592 	 * If PMU counter has PEBS enabled it is not enough to
4593 	 * disable counter on a guest entry since PEBS memory
4594 	 * write can overshoot guest entry and corrupt guest
4595 	 * memory. Disabling PEBS solves the problem.
4596 	 *
4597 	 * Don't do this if the CPU already enforces it.
4598 	 */
4599 	if (x86_pmu.pebs_no_isolation) {
4600 		arr[(*nr)++] = (struct perf_guest_switch_msr){
4601 			.msr = MSR_IA32_PEBS_ENABLE,
4602 			.host = cpuc->pebs_enabled,
4603 			.guest = 0,
4604 		};
4605 		return arr;
4606 	}
4607 
4608 	if (!kvm_pmu || !x86_pmu.pebs_ept)
4609 		return arr;
4610 
4611 	arr[(*nr)++] = (struct perf_guest_switch_msr){
4612 		.msr = MSR_IA32_DS_AREA,
4613 		.host = (unsigned long)cpuc->ds,
4614 		.guest = kvm_pmu->ds_area,
4615 	};
4616 
4617 	if (x86_pmu.intel_cap.pebs_baseline) {
4618 		arr[(*nr)++] = (struct perf_guest_switch_msr){
4619 			.msr = MSR_PEBS_DATA_CFG,
4620 			.host = cpuc->active_pebs_data_cfg,
4621 			.guest = kvm_pmu->pebs_data_cfg,
4622 		};
4623 	}
4624 
4625 	pebs_enable = (*nr)++;
4626 	arr[pebs_enable] = (struct perf_guest_switch_msr){
4627 		.msr = MSR_IA32_PEBS_ENABLE,
4628 		.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
4629 		.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
4630 	};
4631 
4632 	if (arr[pebs_enable].host) {
4633 		/* Disable guest PEBS if host PEBS is enabled. */
4634 		arr[pebs_enable].guest = 0;
4635 	} else {
4636 		/* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
4637 		arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
4638 		arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
4639 		/* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
4640 		arr[global_ctrl].guest |= arr[pebs_enable].guest;
4641 	}
4642 
4643 	return arr;
4644 }
4645 
core_guest_get_msrs(int * nr,void * data)4646 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
4647 {
4648 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4649 	struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4650 	int idx;
4651 
4652 	for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
4653 		struct perf_event *event = cpuc->events[idx];
4654 
4655 		arr[idx].msr = x86_pmu_config_addr(idx);
4656 		arr[idx].host = arr[idx].guest = 0;
4657 
4658 		if (!test_bit(idx, cpuc->active_mask))
4659 			continue;
4660 
4661 		arr[idx].host = arr[idx].guest =
4662 			event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
4663 
4664 		if (event->attr.exclude_host)
4665 			arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4666 		else if (event->attr.exclude_guest)
4667 			arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4668 	}
4669 
4670 	*nr = x86_pmu_max_num_counters(cpuc->pmu);
4671 	return arr;
4672 }
4673 
core_pmu_enable_event(struct perf_event * event)4674 static void core_pmu_enable_event(struct perf_event *event)
4675 {
4676 	if (!event->attr.exclude_host)
4677 		x86_pmu_enable_event(event);
4678 }
4679 
core_pmu_enable_all(int added)4680 static void core_pmu_enable_all(int added)
4681 {
4682 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4683 	int idx;
4684 
4685 	for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
4686 		struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
4687 
4688 		if (!test_bit(idx, cpuc->active_mask) ||
4689 				cpuc->events[idx]->attr.exclude_host)
4690 			continue;
4691 
4692 		__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
4693 	}
4694 }
4695 
hsw_hw_config(struct perf_event * event)4696 static int hsw_hw_config(struct perf_event *event)
4697 {
4698 	int ret = intel_pmu_hw_config(event);
4699 
4700 	if (ret)
4701 		return ret;
4702 	if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
4703 		return 0;
4704 	event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
4705 
4706 	/*
4707 	 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
4708 	 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
4709 	 * this combination.
4710 	 */
4711 	if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
4712 	     ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
4713 	      event->attr.precise_ip > 0))
4714 		return -EOPNOTSUPP;
4715 
4716 	if (event_is_checkpointed(event)) {
4717 		/*
4718 		 * Sampling of checkpointed events can cause situations where
4719 		 * the CPU constantly aborts because of a overflow, which is
4720 		 * then checkpointed back and ignored. Forbid checkpointing
4721 		 * for sampling.
4722 		 *
4723 		 * But still allow a long sampling period, so that perf stat
4724 		 * from KVM works.
4725 		 */
4726 		if (event->attr.sample_period > 0 &&
4727 		    event->attr.sample_period < 0x7fffffff)
4728 			return -EOPNOTSUPP;
4729 	}
4730 	return 0;
4731 }
4732 
4733 static struct event_constraint counter0_constraint =
4734 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
4735 
4736 static struct event_constraint counter1_constraint =
4737 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x2);
4738 
4739 static struct event_constraint counter0_1_constraint =
4740 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x3);
4741 
4742 static struct event_constraint counter2_constraint =
4743 			EVENT_CONSTRAINT(0, 0x4, 0);
4744 
4745 static struct event_constraint fixed0_constraint =
4746 			FIXED_EVENT_CONSTRAINT(0x00c0, 0);
4747 
4748 static struct event_constraint fixed0_counter0_constraint =
4749 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
4750 
4751 static struct event_constraint fixed0_counter0_1_constraint =
4752 			INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL);
4753 
4754 static struct event_constraint counters_1_7_constraint =
4755 			INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL);
4756 
4757 static struct event_constraint *
hsw_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4758 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4759 			  struct perf_event *event)
4760 {
4761 	struct event_constraint *c;
4762 
4763 	c = intel_get_event_constraints(cpuc, idx, event);
4764 
4765 	/* Handle special quirk on in_tx_checkpointed only in counter 2 */
4766 	if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4767 		if (c->idxmsk64 & (1U << 2))
4768 			return &counter2_constraint;
4769 		return &emptyconstraint;
4770 	}
4771 
4772 	return c;
4773 }
4774 
4775 static struct event_constraint *
icl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4776 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4777 			  struct perf_event *event)
4778 {
4779 	/*
4780 	 * Fixed counter 0 has less skid.
4781 	 * Force instruction:ppp in Fixed counter 0
4782 	 */
4783 	if ((event->attr.precise_ip == 3) &&
4784 	    constraint_match(&fixed0_constraint, event->hw.config))
4785 		return &fixed0_constraint;
4786 
4787 	return hsw_get_event_constraints(cpuc, idx, event);
4788 }
4789 
4790 static struct event_constraint *
glc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4791 glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4792 			  struct perf_event *event)
4793 {
4794 	struct event_constraint *c;
4795 
4796 	c = icl_get_event_constraints(cpuc, idx, event);
4797 
4798 	/*
4799 	 * The :ppp indicates the Precise Distribution (PDist) facility, which
4800 	 * is only supported on the GP counter 0. If a :ppp event which is not
4801 	 * available on the GP counter 0, error out.
4802 	 * Exception: Instruction PDIR is only available on the fixed counter 0.
4803 	 */
4804 	if ((event->attr.precise_ip == 3) &&
4805 	    !constraint_match(&fixed0_constraint, event->hw.config)) {
4806 		if (c->idxmsk64 & BIT_ULL(0))
4807 			return &counter0_constraint;
4808 
4809 		return &emptyconstraint;
4810 	}
4811 
4812 	return c;
4813 }
4814 
4815 static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4816 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4817 			  struct perf_event *event)
4818 {
4819 	struct event_constraint *c;
4820 
4821 	/* :ppp means to do reduced skid PEBS which is PMC0 only. */
4822 	if (event->attr.precise_ip == 3)
4823 		return &counter0_constraint;
4824 
4825 	c = intel_get_event_constraints(cpuc, idx, event);
4826 
4827 	return c;
4828 }
4829 
4830 static struct event_constraint *
tnt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4831 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4832 			  struct perf_event *event)
4833 {
4834 	struct event_constraint *c;
4835 
4836 	c = intel_get_event_constraints(cpuc, idx, event);
4837 
4838 	/*
4839 	 * :ppp means to do reduced skid PEBS,
4840 	 * which is available on PMC0 and fixed counter 0.
4841 	 */
4842 	if (event->attr.precise_ip == 3) {
4843 		/* Force instruction:ppp on PMC0 and Fixed counter 0 */
4844 		if (constraint_match(&fixed0_constraint, event->hw.config))
4845 			return &fixed0_counter0_constraint;
4846 
4847 		return &counter0_constraint;
4848 	}
4849 
4850 	return c;
4851 }
4852 
4853 static bool allow_tsx_force_abort = true;
4854 
4855 static struct event_constraint *
tfa_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4856 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4857 			  struct perf_event *event)
4858 {
4859 	struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4860 
4861 	/*
4862 	 * Without TFA we must not use PMC3.
4863 	 */
4864 	if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4865 		c = dyn_constraint(cpuc, c, idx);
4866 		c->idxmsk64 &= ~(1ULL << 3);
4867 		c->weight--;
4868 	}
4869 
4870 	return c;
4871 }
4872 
4873 static struct event_constraint *
adl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4874 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4875 			  struct perf_event *event)
4876 {
4877 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4878 
4879 	if (pmu->pmu_type == hybrid_big)
4880 		return glc_get_event_constraints(cpuc, idx, event);
4881 	else if (pmu->pmu_type == hybrid_small)
4882 		return tnt_get_event_constraints(cpuc, idx, event);
4883 
4884 	WARN_ON(1);
4885 	return &emptyconstraint;
4886 }
4887 
4888 static struct event_constraint *
cmt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4889 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4890 			  struct perf_event *event)
4891 {
4892 	struct event_constraint *c;
4893 
4894 	c = intel_get_event_constraints(cpuc, idx, event);
4895 
4896 	/*
4897 	 * The :ppp indicates the Precise Distribution (PDist) facility, which
4898 	 * is only supported on the GP counter 0 & 1 and Fixed counter 0.
4899 	 * If a :ppp event which is not available on the above eligible counters,
4900 	 * error out.
4901 	 */
4902 	if (event->attr.precise_ip == 3) {
4903 		/* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */
4904 		if (constraint_match(&fixed0_constraint, event->hw.config)) {
4905 			/* The fixed counter 0 doesn't support LBR event logging. */
4906 			if (branch_sample_counters(event))
4907 				return &counter0_1_constraint;
4908 			else
4909 				return &fixed0_counter0_1_constraint;
4910 		}
4911 
4912 		switch (c->idxmsk64 & 0x3ull) {
4913 		case 0x1:
4914 			return &counter0_constraint;
4915 		case 0x2:
4916 			return &counter1_constraint;
4917 		case 0x3:
4918 			return &counter0_1_constraint;
4919 		}
4920 		return &emptyconstraint;
4921 	}
4922 
4923 	return c;
4924 }
4925 
4926 static struct event_constraint *
rwc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4927 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4928 			  struct perf_event *event)
4929 {
4930 	struct event_constraint *c;
4931 
4932 	c = glc_get_event_constraints(cpuc, idx, event);
4933 
4934 	/* The Retire Latency is not supported by the fixed counter 0. */
4935 	if (event->attr.precise_ip &&
4936 	    (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
4937 	    constraint_match(&fixed0_constraint, event->hw.config)) {
4938 		/*
4939 		 * The Instruction PDIR is only available
4940 		 * on the fixed counter 0. Error out for this case.
4941 		 */
4942 		if (event->attr.precise_ip == 3)
4943 			return &emptyconstraint;
4944 		return &counters_1_7_constraint;
4945 	}
4946 
4947 	return c;
4948 }
4949 
4950 static struct event_constraint *
mtl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4951 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4952 			  struct perf_event *event)
4953 {
4954 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4955 
4956 	if (pmu->pmu_type == hybrid_big)
4957 		return rwc_get_event_constraints(cpuc, idx, event);
4958 	if (pmu->pmu_type == hybrid_small)
4959 		return cmt_get_event_constraints(cpuc, idx, event);
4960 
4961 	WARN_ON(1);
4962 	return &emptyconstraint;
4963 }
4964 
adl_hw_config(struct perf_event * event)4965 static int adl_hw_config(struct perf_event *event)
4966 {
4967 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4968 
4969 	if (pmu->pmu_type == hybrid_big)
4970 		return hsw_hw_config(event);
4971 	else if (pmu->pmu_type == hybrid_small)
4972 		return intel_pmu_hw_config(event);
4973 
4974 	WARN_ON(1);
4975 	return -EOPNOTSUPP;
4976 }
4977 
adl_get_hybrid_cpu_type(void)4978 static enum intel_cpu_type adl_get_hybrid_cpu_type(void)
4979 {
4980 	return INTEL_CPU_TYPE_CORE;
4981 }
4982 
erratum_hsw11(struct perf_event * event)4983 static inline bool erratum_hsw11(struct perf_event *event)
4984 {
4985 	return (event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4986 		X86_CONFIG(.event=0xc0, .umask=0x01);
4987 }
4988 
4989 static struct event_constraint *
arl_h_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4990 arl_h_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4991 			  struct perf_event *event)
4992 {
4993 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4994 
4995 	if (pmu->pmu_type == hybrid_tiny)
4996 		return cmt_get_event_constraints(cpuc, idx, event);
4997 
4998 	return mtl_get_event_constraints(cpuc, idx, event);
4999 }
5000 
arl_h_hw_config(struct perf_event * event)5001 static int arl_h_hw_config(struct perf_event *event)
5002 {
5003 	struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
5004 
5005 	if (pmu->pmu_type == hybrid_tiny)
5006 		return intel_pmu_hw_config(event);
5007 
5008 	return adl_hw_config(event);
5009 }
5010 
5011 /*
5012  * The HSW11 requires a period larger than 100 which is the same as the BDM11.
5013  * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL.
5014  *
5015  * The message 'interrupt took too long' can be observed on any counter which
5016  * was armed with a period < 32 and two events expired in the same NMI.
5017  * A minimum period of 32 is enforced for the rest of the events.
5018  */
hsw_limit_period(struct perf_event * event,s64 * left)5019 static void hsw_limit_period(struct perf_event *event, s64 *left)
5020 {
5021 	*left = max(*left, erratum_hsw11(event) ? 128 : 32);
5022 }
5023 
5024 /*
5025  * Broadwell:
5026  *
5027  * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
5028  * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
5029  * the two to enforce a minimum period of 128 (the smallest value that has bits
5030  * 0-5 cleared and >= 100).
5031  *
5032  * Because of how the code in x86_perf_event_set_period() works, the truncation
5033  * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
5034  * to make up for the 'lost' events due to carrying the 'error' in period_left.
5035  *
5036  * Therefore the effective (average) period matches the requested period,
5037  * despite coarser hardware granularity.
5038  */
bdw_limit_period(struct perf_event * event,s64 * left)5039 static void bdw_limit_period(struct perf_event *event, s64 *left)
5040 {
5041 	if (erratum_hsw11(event)) {
5042 		if (*left < 128)
5043 			*left = 128;
5044 		*left &= ~0x3fULL;
5045 	}
5046 }
5047 
nhm_limit_period(struct perf_event * event,s64 * left)5048 static void nhm_limit_period(struct perf_event *event, s64 *left)
5049 {
5050 	*left = max(*left, 32LL);
5051 }
5052 
glc_limit_period(struct perf_event * event,s64 * left)5053 static void glc_limit_period(struct perf_event *event, s64 *left)
5054 {
5055 	if (event->attr.precise_ip == 3)
5056 		*left = max(*left, 128LL);
5057 }
5058 
5059 PMU_FORMAT_ATTR(event,	"config:0-7"	);
5060 PMU_FORMAT_ATTR(umask,	"config:8-15"	);
5061 PMU_FORMAT_ATTR(edge,	"config:18"	);
5062 PMU_FORMAT_ATTR(pc,	"config:19"	);
5063 PMU_FORMAT_ATTR(any,	"config:21"	); /* v3 + */
5064 PMU_FORMAT_ATTR(inv,	"config:23"	);
5065 PMU_FORMAT_ATTR(cmask,	"config:24-31"	);
5066 PMU_FORMAT_ATTR(in_tx,  "config:32"	);
5067 PMU_FORMAT_ATTR(in_tx_cp, "config:33"	);
5068 PMU_FORMAT_ATTR(eq,	"config:36"	); /* v6 + */
5069 
5070 PMU_FORMAT_ATTR(metrics_clear,	"config1:0"); /* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */
5071 
umask2_show(struct device * dev,struct device_attribute * attr,char * page)5072 static ssize_t umask2_show(struct device *dev,
5073 			   struct device_attribute *attr,
5074 			   char *page)
5075 {
5076 	u64 mask = hybrid(dev_get_drvdata(dev), config_mask) & ARCH_PERFMON_EVENTSEL_UMASK2;
5077 
5078 	if (mask == ARCH_PERFMON_EVENTSEL_UMASK2)
5079 		return sprintf(page, "config:8-15,40-47\n");
5080 
5081 	/* Roll back to the old format if umask2 is not supported. */
5082 	return sprintf(page, "config:8-15\n");
5083 }
5084 
5085 static struct device_attribute format_attr_umask2  =
5086 		__ATTR(umask, 0444, umask2_show, NULL);
5087 
5088 static struct attribute *format_evtsel_ext_attrs[] = {
5089 	&format_attr_umask2.attr,
5090 	&format_attr_eq.attr,
5091 	&format_attr_metrics_clear.attr,
5092 	NULL
5093 };
5094 
5095 static umode_t
evtsel_ext_is_visible(struct kobject * kobj,struct attribute * attr,int i)5096 evtsel_ext_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5097 {
5098 	struct device *dev = kobj_to_dev(kobj);
5099 	u64 mask;
5100 
5101 	/*
5102 	 * The umask and umask2 have different formats but share the
5103 	 * same attr name. In update mode, the previous value of the
5104 	 * umask is unconditionally removed before is_visible. If
5105 	 * umask2 format is not enumerated, it's impossible to roll
5106 	 * back to the old format.
5107 	 * Does the check in umask2_show rather than is_visible.
5108 	 */
5109 	if (i == 0)
5110 		return attr->mode;
5111 
5112 	mask = hybrid(dev_get_drvdata(dev), config_mask);
5113 	if (i == 1)
5114 		return (mask & ARCH_PERFMON_EVENTSEL_EQ) ? attr->mode : 0;
5115 
5116 	/* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */
5117 	if (i == 2) {
5118 		union perf_capabilities intel_cap = hybrid(dev_get_drvdata(dev), intel_cap);
5119 
5120 		return intel_cap.rdpmc_metrics_clear ? attr->mode : 0;
5121 	}
5122 
5123 	return 0;
5124 }
5125 
5126 static struct attribute *intel_arch_formats_attr[] = {
5127 	&format_attr_event.attr,
5128 	&format_attr_umask.attr,
5129 	&format_attr_edge.attr,
5130 	&format_attr_pc.attr,
5131 	&format_attr_inv.attr,
5132 	&format_attr_cmask.attr,
5133 	NULL,
5134 };
5135 
intel_event_sysfs_show(char * page,u64 config)5136 ssize_t intel_event_sysfs_show(char *page, u64 config)
5137 {
5138 	u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
5139 
5140 	return x86_event_sysfs_show(page, config, event);
5141 }
5142 
allocate_shared_regs(int cpu)5143 static struct intel_shared_regs *allocate_shared_regs(int cpu)
5144 {
5145 	struct intel_shared_regs *regs;
5146 	int i;
5147 
5148 	regs = kzalloc_node(sizeof(struct intel_shared_regs),
5149 			    GFP_KERNEL, cpu_to_node(cpu));
5150 	if (regs) {
5151 		/*
5152 		 * initialize the locks to keep lockdep happy
5153 		 */
5154 		for (i = 0; i < EXTRA_REG_MAX; i++)
5155 			raw_spin_lock_init(&regs->regs[i].lock);
5156 
5157 		regs->core_id = -1;
5158 	}
5159 	return regs;
5160 }
5161 
allocate_excl_cntrs(int cpu)5162 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
5163 {
5164 	struct intel_excl_cntrs *c;
5165 
5166 	c = kzalloc_node(sizeof(struct intel_excl_cntrs),
5167 			 GFP_KERNEL, cpu_to_node(cpu));
5168 	if (c) {
5169 		raw_spin_lock_init(&c->lock);
5170 		c->core_id = -1;
5171 	}
5172 	return c;
5173 }
5174 
5175 
intel_cpuc_prepare(struct cpu_hw_events * cpuc,int cpu)5176 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
5177 {
5178 	cpuc->pebs_record_size = x86_pmu.pebs_record_size;
5179 
5180 	if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
5181 		cpuc->shared_regs = allocate_shared_regs(cpu);
5182 		if (!cpuc->shared_regs)
5183 			goto err;
5184 	}
5185 
5186 	if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_DYN_CONSTRAINT)) {
5187 		size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
5188 
5189 		cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
5190 		if (!cpuc->constraint_list)
5191 			goto err_shared_regs;
5192 	}
5193 
5194 	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
5195 		cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
5196 		if (!cpuc->excl_cntrs)
5197 			goto err_constraint_list;
5198 
5199 		cpuc->excl_thread_id = 0;
5200 	}
5201 
5202 	return 0;
5203 
5204 err_constraint_list:
5205 	kfree(cpuc->constraint_list);
5206 	cpuc->constraint_list = NULL;
5207 
5208 err_shared_regs:
5209 	kfree(cpuc->shared_regs);
5210 	cpuc->shared_regs = NULL;
5211 
5212 err:
5213 	return -ENOMEM;
5214 }
5215 
intel_pmu_cpu_prepare(int cpu)5216 static int intel_pmu_cpu_prepare(int cpu)
5217 {
5218 	return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
5219 }
5220 
flip_smm_bit(void * data)5221 static void flip_smm_bit(void *data)
5222 {
5223 	unsigned long set = *(unsigned long *)data;
5224 
5225 	if (set > 0) {
5226 		msr_set_bit(MSR_IA32_DEBUGCTLMSR,
5227 			    DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
5228 	} else {
5229 		msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
5230 			      DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
5231 	}
5232 }
5233 
intel_pmu_check_counters_mask(u64 * cntr_mask,u64 * fixed_cntr_mask,u64 * intel_ctrl)5234 static void intel_pmu_check_counters_mask(u64 *cntr_mask,
5235 					  u64 *fixed_cntr_mask,
5236 					  u64 *intel_ctrl)
5237 {
5238 	unsigned int bit;
5239 
5240 	bit = fls64(*cntr_mask);
5241 	if (bit > INTEL_PMC_MAX_GENERIC) {
5242 		WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5243 		     bit, INTEL_PMC_MAX_GENERIC);
5244 		*cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
5245 	}
5246 	*intel_ctrl = *cntr_mask;
5247 
5248 	bit = fls64(*fixed_cntr_mask);
5249 	if (bit > INTEL_PMC_MAX_FIXED) {
5250 		WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5251 		     bit, INTEL_PMC_MAX_FIXED);
5252 		*fixed_cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
5253 	}
5254 
5255 	*intel_ctrl |= *fixed_cntr_mask << INTEL_PMC_IDX_FIXED;
5256 }
5257 
5258 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
5259 					      u64 cntr_mask,
5260 					      u64 fixed_cntr_mask,
5261 					      u64 intel_ctrl);
5262 
5263 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
5264 
intel_pmu_broken_perf_cap(void)5265 static inline bool intel_pmu_broken_perf_cap(void)
5266 {
5267 	/* The Perf Metric (Bit 15) is always cleared */
5268 	if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE ||
5269 	    boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L)
5270 		return true;
5271 
5272 	return false;
5273 }
5274 
update_pmu_cap(struct pmu * pmu)5275 static void update_pmu_cap(struct pmu *pmu)
5276 {
5277 	unsigned int cntr, fixed_cntr, ecx, edx;
5278 	union cpuid35_eax eax;
5279 	union cpuid35_ebx ebx;
5280 
5281 	cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx);
5282 
5283 	if (ebx.split.umask2)
5284 		hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2;
5285 	if (ebx.split.eq)
5286 		hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ;
5287 
5288 	if (eax.split.cntr_subleaf) {
5289 		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
5290 			    &cntr, &fixed_cntr, &ecx, &edx);
5291 		hybrid(pmu, cntr_mask64) = cntr;
5292 		hybrid(pmu, fixed_cntr_mask64) = fixed_cntr;
5293 	}
5294 
5295 	if (eax.split.acr_subleaf) {
5296 		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF,
5297 			    &cntr, &fixed_cntr, &ecx, &edx);
5298 		/* The mask of the counters which can be reloaded */
5299 		hybrid(pmu, acr_cntr_mask64) = cntr | ((u64)fixed_cntr << INTEL_PMC_IDX_FIXED);
5300 
5301 		/* The mask of the counters which can cause a reload of reloadable counters */
5302 		hybrid(pmu, acr_cause_mask64) = ecx | ((u64)edx << INTEL_PMC_IDX_FIXED);
5303 	}
5304 
5305 	if (!intel_pmu_broken_perf_cap()) {
5306 		/* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
5307 		rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities);
5308 	}
5309 }
5310 
intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu * pmu)5311 static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
5312 {
5313 	intel_pmu_check_counters_mask(&pmu->cntr_mask64, &pmu->fixed_cntr_mask64,
5314 				      &pmu->intel_ctrl);
5315 	pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
5316 	pmu->unconstrained = (struct event_constraint)
5317 			     __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
5318 						0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
5319 
5320 	if (pmu->intel_cap.perf_metrics)
5321 		pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
5322 	else
5323 		pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
5324 
5325 	intel_pmu_check_event_constraints(pmu->event_constraints,
5326 					  pmu->cntr_mask64,
5327 					  pmu->fixed_cntr_mask64,
5328 					  pmu->intel_ctrl);
5329 
5330 	intel_pmu_check_extra_regs(pmu->extra_regs);
5331 }
5332 
find_hybrid_pmu_for_cpu(void)5333 static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
5334 {
5335 	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
5336 	enum intel_cpu_type cpu_type = c->topo.intel_type;
5337 	int i;
5338 
5339 	/*
5340 	 * This is running on a CPU model that is known to have hybrid
5341 	 * configurations. But the CPU told us it is not hybrid, shame
5342 	 * on it. There should be a fixup function provided for these
5343 	 * troublesome CPUs (->get_hybrid_cpu_type).
5344 	 */
5345 	if (cpu_type == INTEL_CPU_TYPE_UNKNOWN) {
5346 		if (x86_pmu.get_hybrid_cpu_type)
5347 			cpu_type = x86_pmu.get_hybrid_cpu_type();
5348 		else
5349 			return NULL;
5350 	}
5351 
5352 	/*
5353 	 * This essentially just maps between the 'hybrid_cpu_type'
5354 	 * and 'hybrid_pmu_type' enums except for ARL-H processor
5355 	 * which needs to compare atom uarch native id since ARL-H
5356 	 * contains two different atom uarchs.
5357 	 */
5358 	for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
5359 		enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type;
5360 		u32 native_id;
5361 
5362 		if (cpu_type == INTEL_CPU_TYPE_CORE && pmu_type == hybrid_big)
5363 			return &x86_pmu.hybrid_pmu[i];
5364 		if (cpu_type == INTEL_CPU_TYPE_ATOM) {
5365 			if (x86_pmu.num_hybrid_pmus == 2 && pmu_type == hybrid_small)
5366 				return &x86_pmu.hybrid_pmu[i];
5367 
5368 			native_id = c->topo.intel_native_model_id;
5369 			if (native_id == INTEL_ATOM_SKT_NATIVE_ID && pmu_type == hybrid_small)
5370 				return &x86_pmu.hybrid_pmu[i];
5371 			if (native_id == INTEL_ATOM_CMT_NATIVE_ID && pmu_type == hybrid_tiny)
5372 				return &x86_pmu.hybrid_pmu[i];
5373 		}
5374 	}
5375 
5376 	return NULL;
5377 }
5378 
init_hybrid_pmu(int cpu)5379 static bool init_hybrid_pmu(int cpu)
5380 {
5381 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5382 	struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu();
5383 
5384 	if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
5385 		cpuc->pmu = NULL;
5386 		return false;
5387 	}
5388 
5389 	/* Only check and dump the PMU information for the first CPU */
5390 	if (!cpumask_empty(&pmu->supported_cpus))
5391 		goto end;
5392 
5393 	if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
5394 		update_pmu_cap(&pmu->pmu);
5395 
5396 	intel_pmu_check_hybrid_pmus(pmu);
5397 
5398 	if (!check_hw_exists(&pmu->pmu, pmu->cntr_mask, pmu->fixed_cntr_mask))
5399 		return false;
5400 
5401 	pr_info("%s PMU driver: ", pmu->name);
5402 
5403 	pr_cont("\n");
5404 
5405 	x86_pmu_show_pmu_cap(&pmu->pmu);
5406 
5407 end:
5408 	cpumask_set_cpu(cpu, &pmu->supported_cpus);
5409 	cpuc->pmu = &pmu->pmu;
5410 
5411 	return true;
5412 }
5413 
intel_pmu_cpu_starting(int cpu)5414 static void intel_pmu_cpu_starting(int cpu)
5415 {
5416 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5417 	int core_id = topology_core_id(cpu);
5418 	int i;
5419 
5420 	if (is_hybrid() && !init_hybrid_pmu(cpu))
5421 		return;
5422 
5423 	init_debug_store_on_cpu(cpu);
5424 	/*
5425 	 * Deal with CPUs that don't clear their LBRs on power-up, and that may
5426 	 * even boot with LBRs enabled.
5427 	 */
5428 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr)
5429 		msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT);
5430 	intel_pmu_lbr_reset();
5431 
5432 	cpuc->lbr_sel = NULL;
5433 
5434 	if (x86_pmu.flags & PMU_FL_TFA) {
5435 		WARN_ON_ONCE(cpuc->tfa_shadow);
5436 		cpuc->tfa_shadow = ~0ULL;
5437 		intel_set_tfa(cpuc, false);
5438 	}
5439 
5440 	if (x86_pmu.version > 1)
5441 		flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
5442 
5443 	/*
5444 	 * Disable perf metrics if any added CPU doesn't support it.
5445 	 *
5446 	 * Turn off the check for a hybrid architecture, because the
5447 	 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
5448 	 * the architecture features. The perf metrics is a model-specific
5449 	 * feature for now. The corresponding bit should always be 0 on
5450 	 * a hybrid platform, e.g., Alder Lake.
5451 	 */
5452 	if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
5453 		union perf_capabilities perf_cap;
5454 
5455 		rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
5456 		if (!perf_cap.perf_metrics) {
5457 			x86_pmu.intel_cap.perf_metrics = 0;
5458 			x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
5459 		}
5460 	}
5461 
5462 	if (!cpuc->shared_regs)
5463 		return;
5464 
5465 	if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
5466 		for_each_cpu(i, topology_sibling_cpumask(cpu)) {
5467 			struct intel_shared_regs *pc;
5468 
5469 			pc = per_cpu(cpu_hw_events, i).shared_regs;
5470 			if (pc && pc->core_id == core_id) {
5471 				cpuc->kfree_on_online[0] = cpuc->shared_regs;
5472 				cpuc->shared_regs = pc;
5473 				break;
5474 			}
5475 		}
5476 		cpuc->shared_regs->core_id = core_id;
5477 		cpuc->shared_regs->refcnt++;
5478 	}
5479 
5480 	if (x86_pmu.lbr_sel_map)
5481 		cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
5482 
5483 	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
5484 		for_each_cpu(i, topology_sibling_cpumask(cpu)) {
5485 			struct cpu_hw_events *sibling;
5486 			struct intel_excl_cntrs *c;
5487 
5488 			sibling = &per_cpu(cpu_hw_events, i);
5489 			c = sibling->excl_cntrs;
5490 			if (c && c->core_id == core_id) {
5491 				cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
5492 				cpuc->excl_cntrs = c;
5493 				if (!sibling->excl_thread_id)
5494 					cpuc->excl_thread_id = 1;
5495 				break;
5496 			}
5497 		}
5498 		cpuc->excl_cntrs->core_id = core_id;
5499 		cpuc->excl_cntrs->refcnt++;
5500 	}
5501 }
5502 
free_excl_cntrs(struct cpu_hw_events * cpuc)5503 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
5504 {
5505 	struct intel_excl_cntrs *c;
5506 
5507 	c = cpuc->excl_cntrs;
5508 	if (c) {
5509 		if (c->core_id == -1 || --c->refcnt == 0)
5510 			kfree(c);
5511 		cpuc->excl_cntrs = NULL;
5512 	}
5513 
5514 	kfree(cpuc->constraint_list);
5515 	cpuc->constraint_list = NULL;
5516 }
5517 
intel_pmu_cpu_dying(int cpu)5518 static void intel_pmu_cpu_dying(int cpu)
5519 {
5520 	fini_debug_store_on_cpu(cpu);
5521 }
5522 
intel_cpuc_finish(struct cpu_hw_events * cpuc)5523 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
5524 {
5525 	struct intel_shared_regs *pc;
5526 
5527 	pc = cpuc->shared_regs;
5528 	if (pc) {
5529 		if (pc->core_id == -1 || --pc->refcnt == 0)
5530 			kfree(pc);
5531 		cpuc->shared_regs = NULL;
5532 	}
5533 
5534 	free_excl_cntrs(cpuc);
5535 }
5536 
intel_pmu_cpu_dead(int cpu)5537 static void intel_pmu_cpu_dead(int cpu)
5538 {
5539 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5540 
5541 	intel_cpuc_finish(cpuc);
5542 
5543 	if (is_hybrid() && cpuc->pmu)
5544 		cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
5545 }
5546 
intel_pmu_sched_task(struct perf_event_pmu_context * pmu_ctx,struct task_struct * task,bool sched_in)5547 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
5548 				 struct task_struct *task, bool sched_in)
5549 {
5550 	intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
5551 	intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in);
5552 }
5553 
intel_pmu_check_period(struct perf_event * event,u64 value)5554 static int intel_pmu_check_period(struct perf_event *event, u64 value)
5555 {
5556 	return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
5557 }
5558 
intel_aux_output_init(void)5559 static void intel_aux_output_init(void)
5560 {
5561 	/* Refer also intel_pmu_aux_output_match() */
5562 	if (x86_pmu.intel_cap.pebs_output_pt_available)
5563 		x86_pmu.assign = intel_pmu_assign_event;
5564 }
5565 
intel_pmu_aux_output_match(struct perf_event * event)5566 static int intel_pmu_aux_output_match(struct perf_event *event)
5567 {
5568 	/* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */
5569 	if (!x86_pmu.intel_cap.pebs_output_pt_available)
5570 		return 0;
5571 
5572 	return is_intel_pt_event(event);
5573 }
5574 
intel_pmu_filter(struct pmu * pmu,int cpu,bool * ret)5575 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
5576 {
5577 	struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu);
5578 
5579 	*ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
5580 }
5581 
5582 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
5583 
5584 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
5585 
5586 PMU_FORMAT_ATTR(frontend, "config1:0-23");
5587 
5588 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63");
5589 
5590 static struct attribute *intel_arch3_formats_attr[] = {
5591 	&format_attr_event.attr,
5592 	&format_attr_umask.attr,
5593 	&format_attr_edge.attr,
5594 	&format_attr_pc.attr,
5595 	&format_attr_any.attr,
5596 	&format_attr_inv.attr,
5597 	&format_attr_cmask.attr,
5598 	NULL,
5599 };
5600 
5601 static struct attribute *hsw_format_attr[] = {
5602 	&format_attr_in_tx.attr,
5603 	&format_attr_in_tx_cp.attr,
5604 	&format_attr_offcore_rsp.attr,
5605 	&format_attr_ldlat.attr,
5606 	NULL
5607 };
5608 
5609 static struct attribute *nhm_format_attr[] = {
5610 	&format_attr_offcore_rsp.attr,
5611 	&format_attr_ldlat.attr,
5612 	NULL
5613 };
5614 
5615 static struct attribute *slm_format_attr[] = {
5616 	&format_attr_offcore_rsp.attr,
5617 	NULL
5618 };
5619 
5620 static struct attribute *cmt_format_attr[] = {
5621 	&format_attr_offcore_rsp.attr,
5622 	&format_attr_ldlat.attr,
5623 	&format_attr_snoop_rsp.attr,
5624 	NULL
5625 };
5626 
5627 static struct attribute *skl_format_attr[] = {
5628 	&format_attr_frontend.attr,
5629 	NULL,
5630 };
5631 
5632 static __initconst const struct x86_pmu core_pmu = {
5633 	.name			= "core",
5634 	.handle_irq		= x86_pmu_handle_irq,
5635 	.disable_all		= x86_pmu_disable_all,
5636 	.enable_all		= core_pmu_enable_all,
5637 	.enable			= core_pmu_enable_event,
5638 	.disable		= x86_pmu_disable_event,
5639 	.hw_config		= core_pmu_hw_config,
5640 	.schedule_events	= x86_schedule_events,
5641 	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
5642 	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
5643 	.fixedctr		= MSR_ARCH_PERFMON_FIXED_CTR0,
5644 	.event_map		= intel_pmu_event_map,
5645 	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
5646 	.apic			= 1,
5647 	.large_pebs_flags	= LARGE_PEBS_FLAGS,
5648 
5649 	/*
5650 	 * Intel PMCs cannot be accessed sanely above 32-bit width,
5651 	 * so we install an artificial 1<<31 period regardless of
5652 	 * the generic event period:
5653 	 */
5654 	.max_period		= (1ULL<<31) - 1,
5655 	.get_event_constraints	= intel_get_event_constraints,
5656 	.put_event_constraints	= intel_put_event_constraints,
5657 	.event_constraints	= intel_core_event_constraints,
5658 	.guest_get_msrs		= core_guest_get_msrs,
5659 	.format_attrs		= intel_arch_formats_attr,
5660 	.events_sysfs_show	= intel_event_sysfs_show,
5661 
5662 	/*
5663 	 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
5664 	 * together with PMU version 1 and thus be using core_pmu with
5665 	 * shared_regs. We need following callbacks here to allocate
5666 	 * it properly.
5667 	 */
5668 	.cpu_prepare		= intel_pmu_cpu_prepare,
5669 	.cpu_starting		= intel_pmu_cpu_starting,
5670 	.cpu_dying		= intel_pmu_cpu_dying,
5671 	.cpu_dead		= intel_pmu_cpu_dead,
5672 
5673 	.check_period		= intel_pmu_check_period,
5674 
5675 	.lbr_reset		= intel_pmu_lbr_reset_64,
5676 	.lbr_read		= intel_pmu_lbr_read_64,
5677 	.lbr_save		= intel_pmu_lbr_save,
5678 	.lbr_restore		= intel_pmu_lbr_restore,
5679 };
5680 
5681 static __initconst const struct x86_pmu intel_pmu = {
5682 	.name			= "Intel",
5683 	.handle_irq		= intel_pmu_handle_irq,
5684 	.disable_all		= intel_pmu_disable_all,
5685 	.enable_all		= intel_pmu_enable_all,
5686 	.enable			= intel_pmu_enable_event,
5687 	.disable		= intel_pmu_disable_event,
5688 	.add			= intel_pmu_add_event,
5689 	.del			= intel_pmu_del_event,
5690 	.read			= intel_pmu_read_event,
5691 	.set_period		= intel_pmu_set_period,
5692 	.update			= intel_pmu_update,
5693 	.hw_config		= intel_pmu_hw_config,
5694 	.schedule_events	= x86_schedule_events,
5695 	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
5696 	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
5697 	.fixedctr		= MSR_ARCH_PERFMON_FIXED_CTR0,
5698 	.event_map		= intel_pmu_event_map,
5699 	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
5700 	.apic			= 1,
5701 	.large_pebs_flags	= LARGE_PEBS_FLAGS,
5702 	/*
5703 	 * Intel PMCs cannot be accessed sanely above 32 bit width,
5704 	 * so we install an artificial 1<<31 period regardless of
5705 	 * the generic event period:
5706 	 */
5707 	.max_period		= (1ULL << 31) - 1,
5708 	.get_event_constraints	= intel_get_event_constraints,
5709 	.put_event_constraints	= intel_put_event_constraints,
5710 	.pebs_aliases		= intel_pebs_aliases_core2,
5711 
5712 	.format_attrs		= intel_arch3_formats_attr,
5713 	.events_sysfs_show	= intel_event_sysfs_show,
5714 
5715 	.cpu_prepare		= intel_pmu_cpu_prepare,
5716 	.cpu_starting		= intel_pmu_cpu_starting,
5717 	.cpu_dying		= intel_pmu_cpu_dying,
5718 	.cpu_dead		= intel_pmu_cpu_dead,
5719 
5720 	.guest_get_msrs		= intel_guest_get_msrs,
5721 	.sched_task		= intel_pmu_sched_task,
5722 
5723 	.check_period		= intel_pmu_check_period,
5724 
5725 	.aux_output_match	= intel_pmu_aux_output_match,
5726 
5727 	.lbr_reset		= intel_pmu_lbr_reset_64,
5728 	.lbr_read		= intel_pmu_lbr_read_64,
5729 	.lbr_save		= intel_pmu_lbr_save,
5730 	.lbr_restore		= intel_pmu_lbr_restore,
5731 
5732 	/*
5733 	 * SMM has access to all 4 rings and while traditionally SMM code only
5734 	 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
5735 	 *
5736 	 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
5737 	 * between SMM or not, this results in what should be pure userspace
5738 	 * counters including SMM data.
5739 	 *
5740 	 * This is a clear privilege issue, therefore globally disable
5741 	 * counting SMM by default.
5742 	 */
5743 	.attr_freeze_on_smi	= 1,
5744 };
5745 
intel_clovertown_quirk(void)5746 static __init void intel_clovertown_quirk(void)
5747 {
5748 	/*
5749 	 * PEBS is unreliable due to:
5750 	 *
5751 	 *   AJ67  - PEBS may experience CPL leaks
5752 	 *   AJ68  - PEBS PMI may be delayed by one event
5753 	 *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
5754 	 *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
5755 	 *
5756 	 * AJ67 could be worked around by restricting the OS/USR flags.
5757 	 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
5758 	 *
5759 	 * AJ106 could possibly be worked around by not allowing LBR
5760 	 *       usage from PEBS, including the fixup.
5761 	 * AJ68  could possibly be worked around by always programming
5762 	 *	 a pebs_event_reset[0] value and coping with the lost events.
5763 	 *
5764 	 * But taken together it might just make sense to not enable PEBS on
5765 	 * these chips.
5766 	 */
5767 	pr_warn("PEBS disabled due to CPU errata\n");
5768 	x86_pmu.ds_pebs = 0;
5769 	x86_pmu.pebs_constraints = NULL;
5770 }
5771 
5772 static const struct x86_cpu_id isolation_ucodes[] = {
5773 	X86_MATCH_VFM_STEPS(INTEL_HASWELL,	 3,  3, 0x0000001f),
5774 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_L,	 1,  1, 0x0000001e),
5775 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_G,	 1,  1, 0x00000015),
5776 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_X,	 2,  2, 0x00000037),
5777 	X86_MATCH_VFM_STEPS(INTEL_HASWELL_X,	 4,  4, 0x0000000a),
5778 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL,	 4,  4, 0x00000023),
5779 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_G,	 1,  1, 0x00000014),
5780 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 2,  2, 0x00000010),
5781 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 3,  3, 0x07000009),
5782 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 4,  4, 0x0f000009),
5783 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 5,  5, 0x0e000002),
5784 	X86_MATCH_VFM_STEPS(INTEL_BROADWELL_X,	 1,  1, 0x0b000014),
5785 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	 3,  3, 0x00000021),
5786 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	 4,  7, 0x00000000),
5787 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	11, 11, 0x00000000),
5788 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_L,	 3,  3, 0x0000007c),
5789 	X86_MATCH_VFM_STEPS(INTEL_SKYLAKE,	 3,  3, 0x0000007c),
5790 	X86_MATCH_VFM_STEPS(INTEL_KABYLAKE,	 9, 13, 0x0000004e),
5791 	X86_MATCH_VFM_STEPS(INTEL_KABYLAKE_L,	 9, 12, 0x0000004e),
5792 	{}
5793 };
5794 
intel_check_pebs_isolation(void)5795 static void intel_check_pebs_isolation(void)
5796 {
5797 	x86_pmu.pebs_no_isolation = !x86_match_min_microcode_rev(isolation_ucodes);
5798 }
5799 
intel_pebs_isolation_quirk(void)5800 static __init void intel_pebs_isolation_quirk(void)
5801 {
5802 	WARN_ON_ONCE(x86_pmu.check_microcode);
5803 	x86_pmu.check_microcode = intel_check_pebs_isolation;
5804 	intel_check_pebs_isolation();
5805 }
5806 
5807 static const struct x86_cpu_id pebs_ucodes[] = {
5808 	X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE,	7, 7, 0x00000028),
5809 	X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X,	6, 6, 0x00000618),
5810 	X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X,	7, 7, 0x0000070c),
5811 	{}
5812 };
5813 
intel_snb_pebs_broken(void)5814 static bool intel_snb_pebs_broken(void)
5815 {
5816 	return !x86_match_min_microcode_rev(pebs_ucodes);
5817 }
5818 
intel_snb_check_microcode(void)5819 static void intel_snb_check_microcode(void)
5820 {
5821 	if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
5822 		return;
5823 
5824 	/*
5825 	 * Serialized by the microcode lock..
5826 	 */
5827 	if (x86_pmu.pebs_broken) {
5828 		pr_info("PEBS enabled due to microcode update\n");
5829 		x86_pmu.pebs_broken = 0;
5830 	} else {
5831 		pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
5832 		x86_pmu.pebs_broken = 1;
5833 	}
5834 }
5835 
is_lbr_from(unsigned long msr)5836 static bool is_lbr_from(unsigned long msr)
5837 {
5838 	unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
5839 
5840 	return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
5841 }
5842 
5843 /*
5844  * Under certain circumstances, access certain MSR may cause #GP.
5845  * The function tests if the input MSR can be safely accessed.
5846  */
check_msr(unsigned long msr,u64 mask)5847 static bool check_msr(unsigned long msr, u64 mask)
5848 {
5849 	u64 val_old, val_new, val_tmp;
5850 
5851 	/*
5852 	 * Disable the check for real HW, so we don't
5853 	 * mess with potentially enabled registers:
5854 	 */
5855 	if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
5856 		return true;
5857 
5858 	/*
5859 	 * Read the current value, change it and read it back to see if it
5860 	 * matches, this is needed to detect certain hardware emulators
5861 	 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
5862 	 */
5863 	if (rdmsrq_safe(msr, &val_old))
5864 		return false;
5865 
5866 	/*
5867 	 * Only change the bits which can be updated by wrmsrq.
5868 	 */
5869 	val_tmp = val_old ^ mask;
5870 
5871 	if (is_lbr_from(msr))
5872 		val_tmp = lbr_from_signext_quirk_wr(val_tmp);
5873 
5874 	if (wrmsrq_safe(msr, val_tmp) ||
5875 	    rdmsrq_safe(msr, &val_new))
5876 		return false;
5877 
5878 	/*
5879 	 * Quirk only affects validation in wrmsr(), so wrmsrq()'s value
5880 	 * should equal rdmsrq()'s even with the quirk.
5881 	 */
5882 	if (val_new != val_tmp)
5883 		return false;
5884 
5885 	if (is_lbr_from(msr))
5886 		val_old = lbr_from_signext_quirk_wr(val_old);
5887 
5888 	/* Here it's sure that the MSR can be safely accessed.
5889 	 * Restore the old value and return.
5890 	 */
5891 	wrmsrq(msr, val_old);
5892 
5893 	return true;
5894 }
5895 
intel_sandybridge_quirk(void)5896 static __init void intel_sandybridge_quirk(void)
5897 {
5898 	x86_pmu.check_microcode = intel_snb_check_microcode;
5899 	cpus_read_lock();
5900 	intel_snb_check_microcode();
5901 	cpus_read_unlock();
5902 }
5903 
5904 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
5905 	{ PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
5906 	{ PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
5907 	{ PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
5908 	{ PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
5909 	{ PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
5910 	{ PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
5911 	{ PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
5912 };
5913 
intel_arch_events_quirk(void)5914 static __init void intel_arch_events_quirk(void)
5915 {
5916 	int bit;
5917 
5918 	/* disable event that reported as not present by cpuid */
5919 	for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
5920 		intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
5921 		pr_warn("CPUID marked event: \'%s\' unavailable\n",
5922 			intel_arch_events_map[bit].name);
5923 	}
5924 }
5925 
intel_nehalem_quirk(void)5926 static __init void intel_nehalem_quirk(void)
5927 {
5928 	union cpuid10_ebx ebx;
5929 
5930 	ebx.full = x86_pmu.events_maskl;
5931 	if (ebx.split.no_branch_misses_retired) {
5932 		/*
5933 		 * Erratum AAJ80 detected, we work it around by using
5934 		 * the BR_MISP_EXEC.ANY event. This will over-count
5935 		 * branch-misses, but it's still much better than the
5936 		 * architectural event which is often completely bogus:
5937 		 */
5938 		intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
5939 		ebx.split.no_branch_misses_retired = 0;
5940 		x86_pmu.events_maskl = ebx.full;
5941 		pr_info("CPU erratum AAJ80 worked around\n");
5942 	}
5943 }
5944 
5945 /*
5946  * enable software workaround for errata:
5947  * SNB: BJ122
5948  * IVB: BV98
5949  * HSW: HSD29
5950  *
5951  * Only needed when HT is enabled. However detecting
5952  * if HT is enabled is difficult (model specific). So instead,
5953  * we enable the workaround in the early boot, and verify if
5954  * it is needed in a later initcall phase once we have valid
5955  * topology information to check if HT is actually enabled
5956  */
intel_ht_bug(void)5957 static __init void intel_ht_bug(void)
5958 {
5959 	x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
5960 
5961 	x86_pmu.start_scheduling = intel_start_scheduling;
5962 	x86_pmu.commit_scheduling = intel_commit_scheduling;
5963 	x86_pmu.stop_scheduling = intel_stop_scheduling;
5964 }
5965 
5966 EVENT_ATTR_STR(mem-loads,	mem_ld_hsw,	"event=0xcd,umask=0x1,ldlat=3");
5967 EVENT_ATTR_STR(mem-stores,	mem_st_hsw,	"event=0xd0,umask=0x82")
5968 
5969 /* Haswell special events */
5970 EVENT_ATTR_STR(tx-start,	tx_start,	"event=0xc9,umask=0x1");
5971 EVENT_ATTR_STR(tx-commit,	tx_commit,	"event=0xc9,umask=0x2");
5972 EVENT_ATTR_STR(tx-abort,	tx_abort,	"event=0xc9,umask=0x4");
5973 EVENT_ATTR_STR(tx-capacity,	tx_capacity,	"event=0x54,umask=0x2");
5974 EVENT_ATTR_STR(tx-conflict,	tx_conflict,	"event=0x54,umask=0x1");
5975 EVENT_ATTR_STR(el-start,	el_start,	"event=0xc8,umask=0x1");
5976 EVENT_ATTR_STR(el-commit,	el_commit,	"event=0xc8,umask=0x2");
5977 EVENT_ATTR_STR(el-abort,	el_abort,	"event=0xc8,umask=0x4");
5978 EVENT_ATTR_STR(el-capacity,	el_capacity,	"event=0x54,umask=0x2");
5979 EVENT_ATTR_STR(el-conflict,	el_conflict,	"event=0x54,umask=0x1");
5980 EVENT_ATTR_STR(cycles-t,	cycles_t,	"event=0x3c,in_tx=1");
5981 EVENT_ATTR_STR(cycles-ct,	cycles_ct,	"event=0x3c,in_tx=1,in_tx_cp=1");
5982 
5983 static struct attribute *hsw_events_attrs[] = {
5984 	EVENT_PTR(td_slots_issued),
5985 	EVENT_PTR(td_slots_retired),
5986 	EVENT_PTR(td_fetch_bubbles),
5987 	EVENT_PTR(td_total_slots),
5988 	EVENT_PTR(td_total_slots_scale),
5989 	EVENT_PTR(td_recovery_bubbles),
5990 	EVENT_PTR(td_recovery_bubbles_scale),
5991 	NULL
5992 };
5993 
5994 static struct attribute *hsw_mem_events_attrs[] = {
5995 	EVENT_PTR(mem_ld_hsw),
5996 	EVENT_PTR(mem_st_hsw),
5997 	NULL,
5998 };
5999 
6000 static struct attribute *hsw_tsx_events_attrs[] = {
6001 	EVENT_PTR(tx_start),
6002 	EVENT_PTR(tx_commit),
6003 	EVENT_PTR(tx_abort),
6004 	EVENT_PTR(tx_capacity),
6005 	EVENT_PTR(tx_conflict),
6006 	EVENT_PTR(el_start),
6007 	EVENT_PTR(el_commit),
6008 	EVENT_PTR(el_abort),
6009 	EVENT_PTR(el_capacity),
6010 	EVENT_PTR(el_conflict),
6011 	EVENT_PTR(cycles_t),
6012 	EVENT_PTR(cycles_ct),
6013 	NULL
6014 };
6015 
6016 EVENT_ATTR_STR(tx-capacity-read,  tx_capacity_read,  "event=0x54,umask=0x80");
6017 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
6018 EVENT_ATTR_STR(el-capacity-read,  el_capacity_read,  "event=0x54,umask=0x80");
6019 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
6020 
6021 static struct attribute *icl_events_attrs[] = {
6022 	EVENT_PTR(mem_ld_hsw),
6023 	EVENT_PTR(mem_st_hsw),
6024 	NULL,
6025 };
6026 
6027 static struct attribute *icl_td_events_attrs[] = {
6028 	EVENT_PTR(slots),
6029 	EVENT_PTR(td_retiring),
6030 	EVENT_PTR(td_bad_spec),
6031 	EVENT_PTR(td_fe_bound),
6032 	EVENT_PTR(td_be_bound),
6033 	NULL,
6034 };
6035 
6036 static struct attribute *icl_tsx_events_attrs[] = {
6037 	EVENT_PTR(tx_start),
6038 	EVENT_PTR(tx_abort),
6039 	EVENT_PTR(tx_commit),
6040 	EVENT_PTR(tx_capacity_read),
6041 	EVENT_PTR(tx_capacity_write),
6042 	EVENT_PTR(tx_conflict),
6043 	EVENT_PTR(el_start),
6044 	EVENT_PTR(el_abort),
6045 	EVENT_PTR(el_commit),
6046 	EVENT_PTR(el_capacity_read),
6047 	EVENT_PTR(el_capacity_write),
6048 	EVENT_PTR(el_conflict),
6049 	EVENT_PTR(cycles_t),
6050 	EVENT_PTR(cycles_ct),
6051 	NULL,
6052 };
6053 
6054 
6055 EVENT_ATTR_STR(mem-stores,	mem_st_spr,	"event=0xcd,umask=0x2");
6056 EVENT_ATTR_STR(mem-loads-aux,	mem_ld_aux,	"event=0x03,umask=0x82");
6057 
6058 static struct attribute *glc_events_attrs[] = {
6059 	EVENT_PTR(mem_ld_hsw),
6060 	EVENT_PTR(mem_st_spr),
6061 	EVENT_PTR(mem_ld_aux),
6062 	NULL,
6063 };
6064 
6065 static struct attribute *glc_td_events_attrs[] = {
6066 	EVENT_PTR(slots),
6067 	EVENT_PTR(td_retiring),
6068 	EVENT_PTR(td_bad_spec),
6069 	EVENT_PTR(td_fe_bound),
6070 	EVENT_PTR(td_be_bound),
6071 	EVENT_PTR(td_heavy_ops),
6072 	EVENT_PTR(td_br_mispredict),
6073 	EVENT_PTR(td_fetch_lat),
6074 	EVENT_PTR(td_mem_bound),
6075 	NULL,
6076 };
6077 
6078 static struct attribute *glc_tsx_events_attrs[] = {
6079 	EVENT_PTR(tx_start),
6080 	EVENT_PTR(tx_abort),
6081 	EVENT_PTR(tx_commit),
6082 	EVENT_PTR(tx_capacity_read),
6083 	EVENT_PTR(tx_capacity_write),
6084 	EVENT_PTR(tx_conflict),
6085 	EVENT_PTR(cycles_t),
6086 	EVENT_PTR(cycles_ct),
6087 	NULL,
6088 };
6089 
freeze_on_smi_show(struct device * cdev,struct device_attribute * attr,char * buf)6090 static ssize_t freeze_on_smi_show(struct device *cdev,
6091 				  struct device_attribute *attr,
6092 				  char *buf)
6093 {
6094 	return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
6095 }
6096 
6097 static DEFINE_MUTEX(freeze_on_smi_mutex);
6098 
freeze_on_smi_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)6099 static ssize_t freeze_on_smi_store(struct device *cdev,
6100 				   struct device_attribute *attr,
6101 				   const char *buf, size_t count)
6102 {
6103 	unsigned long val;
6104 	ssize_t ret;
6105 
6106 	ret = kstrtoul(buf, 0, &val);
6107 	if (ret)
6108 		return ret;
6109 
6110 	if (val > 1)
6111 		return -EINVAL;
6112 
6113 	mutex_lock(&freeze_on_smi_mutex);
6114 
6115 	if (x86_pmu.attr_freeze_on_smi == val)
6116 		goto done;
6117 
6118 	x86_pmu.attr_freeze_on_smi = val;
6119 
6120 	cpus_read_lock();
6121 	on_each_cpu(flip_smm_bit, &val, 1);
6122 	cpus_read_unlock();
6123 done:
6124 	mutex_unlock(&freeze_on_smi_mutex);
6125 
6126 	return count;
6127 }
6128 
update_tfa_sched(void * ignored)6129 static void update_tfa_sched(void *ignored)
6130 {
6131 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
6132 
6133 	/*
6134 	 * check if PMC3 is used
6135 	 * and if so force schedule out for all event types all contexts
6136 	 */
6137 	if (test_bit(3, cpuc->active_mask))
6138 		perf_pmu_resched(x86_get_pmu(smp_processor_id()));
6139 }
6140 
show_sysctl_tfa(struct device * cdev,struct device_attribute * attr,char * buf)6141 static ssize_t show_sysctl_tfa(struct device *cdev,
6142 			      struct device_attribute *attr,
6143 			      char *buf)
6144 {
6145 	return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
6146 }
6147 
set_sysctl_tfa(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)6148 static ssize_t set_sysctl_tfa(struct device *cdev,
6149 			      struct device_attribute *attr,
6150 			      const char *buf, size_t count)
6151 {
6152 	bool val;
6153 	ssize_t ret;
6154 
6155 	ret = kstrtobool(buf, &val);
6156 	if (ret)
6157 		return ret;
6158 
6159 	/* no change */
6160 	if (val == allow_tsx_force_abort)
6161 		return count;
6162 
6163 	allow_tsx_force_abort = val;
6164 
6165 	cpus_read_lock();
6166 	on_each_cpu(update_tfa_sched, NULL, 1);
6167 	cpus_read_unlock();
6168 
6169 	return count;
6170 }
6171 
6172 
6173 static DEVICE_ATTR_RW(freeze_on_smi);
6174 
branches_show(struct device * cdev,struct device_attribute * attr,char * buf)6175 static ssize_t branches_show(struct device *cdev,
6176 			     struct device_attribute *attr,
6177 			     char *buf)
6178 {
6179 	return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
6180 }
6181 
6182 static DEVICE_ATTR_RO(branches);
6183 
branch_counter_nr_show(struct device * cdev,struct device_attribute * attr,char * buf)6184 static ssize_t branch_counter_nr_show(struct device *cdev,
6185 				      struct device_attribute *attr,
6186 				      char *buf)
6187 {
6188 	return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters));
6189 }
6190 
6191 static DEVICE_ATTR_RO(branch_counter_nr);
6192 
branch_counter_width_show(struct device * cdev,struct device_attribute * attr,char * buf)6193 static ssize_t branch_counter_width_show(struct device *cdev,
6194 					 struct device_attribute *attr,
6195 					 char *buf)
6196 {
6197 	return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS);
6198 }
6199 
6200 static DEVICE_ATTR_RO(branch_counter_width);
6201 
6202 static struct attribute *lbr_attrs[] = {
6203 	&dev_attr_branches.attr,
6204 	&dev_attr_branch_counter_nr.attr,
6205 	&dev_attr_branch_counter_width.attr,
6206 	NULL
6207 };
6208 
6209 static umode_t
lbr_is_visible(struct kobject * kobj,struct attribute * attr,int i)6210 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6211 {
6212 	/* branches */
6213 	if (i == 0)
6214 		return x86_pmu.lbr_nr ? attr->mode : 0;
6215 
6216 	return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0;
6217 }
6218 
6219 static char pmu_name_str[30];
6220 
6221 static DEVICE_STRING_ATTR_RO(pmu_name, 0444, pmu_name_str);
6222 
6223 static struct attribute *intel_pmu_caps_attrs[] = {
6224 	&dev_attr_pmu_name.attr.attr,
6225 	NULL
6226 };
6227 
6228 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
6229 		   show_sysctl_tfa,
6230 		   set_sysctl_tfa);
6231 
6232 static struct attribute *intel_pmu_attrs[] = {
6233 	&dev_attr_freeze_on_smi.attr,
6234 	&dev_attr_allow_tsx_force_abort.attr,
6235 	NULL,
6236 };
6237 
6238 static umode_t
default_is_visible(struct kobject * kobj,struct attribute * attr,int i)6239 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6240 {
6241 	if (attr == &dev_attr_allow_tsx_force_abort.attr)
6242 		return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
6243 
6244 	return attr->mode;
6245 }
6246 
6247 static umode_t
tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)6248 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6249 {
6250 	return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
6251 }
6252 
6253 static umode_t
pebs_is_visible(struct kobject * kobj,struct attribute * attr,int i)6254 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6255 {
6256 	return x86_pmu.ds_pebs ? attr->mode : 0;
6257 }
6258 
6259 static umode_t
mem_is_visible(struct kobject * kobj,struct attribute * attr,int i)6260 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6261 {
6262 	if (attr == &event_attr_mem_ld_aux.attr.attr)
6263 		return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0;
6264 
6265 	return pebs_is_visible(kobj, attr, i);
6266 }
6267 
6268 static umode_t
exra_is_visible(struct kobject * kobj,struct attribute * attr,int i)6269 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6270 {
6271 	return x86_pmu.version >= 2 ? attr->mode : 0;
6272 }
6273 
6274 static umode_t
td_is_visible(struct kobject * kobj,struct attribute * attr,int i)6275 td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6276 {
6277 	/*
6278 	 * Hide the perf metrics topdown events
6279 	 * if the feature is not enumerated.
6280 	 */
6281 	if (x86_pmu.num_topdown_events)
6282 		return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0;
6283 
6284 	return attr->mode;
6285 }
6286 
6287 PMU_FORMAT_ATTR(acr_mask,	"config2:0-63");
6288 
6289 static struct attribute *format_acr_attrs[] = {
6290 	&format_attr_acr_mask.attr,
6291 	NULL
6292 };
6293 
6294 static umode_t
acr_is_visible(struct kobject * kobj,struct attribute * attr,int i)6295 acr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
6296 {
6297 	struct device *dev = kobj_to_dev(kobj);
6298 
6299 	return intel_pmu_has_acr(dev_get_drvdata(dev)) ? attr->mode : 0;
6300 }
6301 
6302 static struct attribute_group group_events_td  = {
6303 	.name = "events",
6304 	.is_visible = td_is_visible,
6305 };
6306 
6307 static struct attribute_group group_events_mem = {
6308 	.name       = "events",
6309 	.is_visible = mem_is_visible,
6310 };
6311 
6312 static struct attribute_group group_events_tsx = {
6313 	.name       = "events",
6314 	.is_visible = tsx_is_visible,
6315 };
6316 
6317 static struct attribute_group group_caps_gen = {
6318 	.name  = "caps",
6319 	.attrs = intel_pmu_caps_attrs,
6320 };
6321 
6322 static struct attribute_group group_caps_lbr = {
6323 	.name       = "caps",
6324 	.attrs	    = lbr_attrs,
6325 	.is_visible = lbr_is_visible,
6326 };
6327 
6328 static struct attribute_group group_format_extra = {
6329 	.name       = "format",
6330 	.is_visible = exra_is_visible,
6331 };
6332 
6333 static struct attribute_group group_format_extra_skl = {
6334 	.name       = "format",
6335 	.is_visible = exra_is_visible,
6336 };
6337 
6338 static struct attribute_group group_format_evtsel_ext = {
6339 	.name       = "format",
6340 	.attrs      = format_evtsel_ext_attrs,
6341 	.is_visible = evtsel_ext_is_visible,
6342 };
6343 
6344 static struct attribute_group group_format_acr = {
6345 	.name       = "format",
6346 	.attrs      = format_acr_attrs,
6347 	.is_visible = acr_is_visible,
6348 };
6349 
6350 static struct attribute_group group_default = {
6351 	.attrs      = intel_pmu_attrs,
6352 	.is_visible = default_is_visible,
6353 };
6354 
6355 static const struct attribute_group *attr_update[] = {
6356 	&group_events_td,
6357 	&group_events_mem,
6358 	&group_events_tsx,
6359 	&group_caps_gen,
6360 	&group_caps_lbr,
6361 	&group_format_extra,
6362 	&group_format_extra_skl,
6363 	&group_format_evtsel_ext,
6364 	&group_format_acr,
6365 	&group_default,
6366 	NULL,
6367 };
6368 
6369 EVENT_ATTR_STR_HYBRID(slots,                 slots_adl,        "event=0x00,umask=0x4",                       hybrid_big);
6370 EVENT_ATTR_STR_HYBRID(topdown-retiring,      td_retiring_adl,  "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
6371 EVENT_ATTR_STR_HYBRID(topdown-bad-spec,      td_bad_spec_adl,  "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
6372 EVENT_ATTR_STR_HYBRID(topdown-fe-bound,      td_fe_bound_adl,  "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
6373 EVENT_ATTR_STR_HYBRID(topdown-be-bound,      td_be_bound_adl,  "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
6374 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops,     td_heavy_ops_adl, "event=0x00,umask=0x84",                      hybrid_big);
6375 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl,    "event=0x00,umask=0x85",                      hybrid_big);
6376 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat,     td_fetch_lat_adl, "event=0x00,umask=0x86",                      hybrid_big);
6377 EVENT_ATTR_STR_HYBRID(topdown-mem-bound,     td_mem_bound_adl, "event=0x00,umask=0x87",                      hybrid_big);
6378 
6379 static struct attribute *adl_hybrid_events_attrs[] = {
6380 	EVENT_PTR(slots_adl),
6381 	EVENT_PTR(td_retiring_adl),
6382 	EVENT_PTR(td_bad_spec_adl),
6383 	EVENT_PTR(td_fe_bound_adl),
6384 	EVENT_PTR(td_be_bound_adl),
6385 	EVENT_PTR(td_heavy_ops_adl),
6386 	EVENT_PTR(td_br_mis_adl),
6387 	EVENT_PTR(td_fetch_lat_adl),
6388 	EVENT_PTR(td_mem_bound_adl),
6389 	NULL,
6390 };
6391 
6392 EVENT_ATTR_STR_HYBRID(topdown-retiring,      td_retiring_lnl,  "event=0xc2,umask=0x02;event=0x00,umask=0x80", hybrid_big_small);
6393 EVENT_ATTR_STR_HYBRID(topdown-fe-bound,      td_fe_bound_lnl,  "event=0x9c,umask=0x01;event=0x00,umask=0x82", hybrid_big_small);
6394 EVENT_ATTR_STR_HYBRID(topdown-be-bound,      td_be_bound_lnl,  "event=0xa4,umask=0x02;event=0x00,umask=0x83", hybrid_big_small);
6395 
6396 static struct attribute *lnl_hybrid_events_attrs[] = {
6397 	EVENT_PTR(slots_adl),
6398 	EVENT_PTR(td_retiring_lnl),
6399 	EVENT_PTR(td_bad_spec_adl),
6400 	EVENT_PTR(td_fe_bound_lnl),
6401 	EVENT_PTR(td_be_bound_lnl),
6402 	EVENT_PTR(td_heavy_ops_adl),
6403 	EVENT_PTR(td_br_mis_adl),
6404 	EVENT_PTR(td_fetch_lat_adl),
6405 	EVENT_PTR(td_mem_bound_adl),
6406 	NULL
6407 };
6408 
6409 /* The event string must be in PMU IDX order. */
6410 EVENT_ATTR_STR_HYBRID(topdown-retiring,
6411 		      td_retiring_arl_h,
6412 		      "event=0xc2,umask=0x02;event=0x00,umask=0x80;event=0xc2,umask=0x0",
6413 		      hybrid_big_small_tiny);
6414 EVENT_ATTR_STR_HYBRID(topdown-bad-spec,
6415 		      td_bad_spec_arl_h,
6416 		      "event=0x73,umask=0x0;event=0x00,umask=0x81;event=0x73,umask=0x0",
6417 		      hybrid_big_small_tiny);
6418 EVENT_ATTR_STR_HYBRID(topdown-fe-bound,
6419 		      td_fe_bound_arl_h,
6420 		      "event=0x9c,umask=0x01;event=0x00,umask=0x82;event=0x71,umask=0x0",
6421 		      hybrid_big_small_tiny);
6422 EVENT_ATTR_STR_HYBRID(topdown-be-bound,
6423 		      td_be_bound_arl_h,
6424 		      "event=0xa4,umask=0x02;event=0x00,umask=0x83;event=0x74,umask=0x0",
6425 		      hybrid_big_small_tiny);
6426 
6427 static struct attribute *arl_h_hybrid_events_attrs[] = {
6428 	EVENT_PTR(slots_adl),
6429 	EVENT_PTR(td_retiring_arl_h),
6430 	EVENT_PTR(td_bad_spec_arl_h),
6431 	EVENT_PTR(td_fe_bound_arl_h),
6432 	EVENT_PTR(td_be_bound_arl_h),
6433 	EVENT_PTR(td_heavy_ops_adl),
6434 	EVENT_PTR(td_br_mis_adl),
6435 	EVENT_PTR(td_fetch_lat_adl),
6436 	EVENT_PTR(td_mem_bound_adl),
6437 	NULL,
6438 };
6439 
6440 /* Must be in IDX order */
6441 EVENT_ATTR_STR_HYBRID(mem-loads,     mem_ld_adl,     "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
6442 EVENT_ATTR_STR_HYBRID(mem-stores,    mem_st_adl,     "event=0xd0,umask=0x6;event=0xcd,umask=0x2",                 hybrid_big_small);
6443 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82",                                     hybrid_big);
6444 
6445 static struct attribute *adl_hybrid_mem_attrs[] = {
6446 	EVENT_PTR(mem_ld_adl),
6447 	EVENT_PTR(mem_st_adl),
6448 	EVENT_PTR(mem_ld_aux_adl),
6449 	NULL,
6450 };
6451 
6452 static struct attribute *mtl_hybrid_mem_attrs[] = {
6453 	EVENT_PTR(mem_ld_adl),
6454 	EVENT_PTR(mem_st_adl),
6455 	NULL
6456 };
6457 
6458 EVENT_ATTR_STR_HYBRID(mem-loads,
6459 		      mem_ld_arl_h,
6460 		      "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3;event=0xd0,umask=0x5,ldlat=3",
6461 		      hybrid_big_small_tiny);
6462 EVENT_ATTR_STR_HYBRID(mem-stores,
6463 		      mem_st_arl_h,
6464 		      "event=0xd0,umask=0x6;event=0xcd,umask=0x2;event=0xd0,umask=0x6",
6465 		      hybrid_big_small_tiny);
6466 
6467 static struct attribute *arl_h_hybrid_mem_attrs[] = {
6468 	EVENT_PTR(mem_ld_arl_h),
6469 	EVENT_PTR(mem_st_arl_h),
6470 	NULL,
6471 };
6472 
6473 EVENT_ATTR_STR_HYBRID(tx-start,          tx_start_adl,          "event=0xc9,umask=0x1",          hybrid_big);
6474 EVENT_ATTR_STR_HYBRID(tx-commit,         tx_commit_adl,         "event=0xc9,umask=0x2",          hybrid_big);
6475 EVENT_ATTR_STR_HYBRID(tx-abort,          tx_abort_adl,          "event=0xc9,umask=0x4",          hybrid_big);
6476 EVENT_ATTR_STR_HYBRID(tx-conflict,       tx_conflict_adl,       "event=0x54,umask=0x1",          hybrid_big);
6477 EVENT_ATTR_STR_HYBRID(cycles-t,          cycles_t_adl,          "event=0x3c,in_tx=1",            hybrid_big);
6478 EVENT_ATTR_STR_HYBRID(cycles-ct,         cycles_ct_adl,         "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
6479 EVENT_ATTR_STR_HYBRID(tx-capacity-read,  tx_capacity_read_adl,  "event=0x54,umask=0x80",         hybrid_big);
6480 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2",          hybrid_big);
6481 
6482 static struct attribute *adl_hybrid_tsx_attrs[] = {
6483 	EVENT_PTR(tx_start_adl),
6484 	EVENT_PTR(tx_abort_adl),
6485 	EVENT_PTR(tx_commit_adl),
6486 	EVENT_PTR(tx_capacity_read_adl),
6487 	EVENT_PTR(tx_capacity_write_adl),
6488 	EVENT_PTR(tx_conflict_adl),
6489 	EVENT_PTR(cycles_t_adl),
6490 	EVENT_PTR(cycles_ct_adl),
6491 	NULL,
6492 };
6493 
6494 FORMAT_ATTR_HYBRID(in_tx,       hybrid_big);
6495 FORMAT_ATTR_HYBRID(in_tx_cp,    hybrid_big);
6496 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small_tiny);
6497 FORMAT_ATTR_HYBRID(ldlat,       hybrid_big_small_tiny);
6498 FORMAT_ATTR_HYBRID(frontend,    hybrid_big);
6499 
6500 #define ADL_HYBRID_RTM_FORMAT_ATTR	\
6501 	FORMAT_HYBRID_PTR(in_tx),	\
6502 	FORMAT_HYBRID_PTR(in_tx_cp)
6503 
6504 #define ADL_HYBRID_FORMAT_ATTR		\
6505 	FORMAT_HYBRID_PTR(offcore_rsp),	\
6506 	FORMAT_HYBRID_PTR(ldlat),	\
6507 	FORMAT_HYBRID_PTR(frontend)
6508 
6509 static struct attribute *adl_hybrid_extra_attr_rtm[] = {
6510 	ADL_HYBRID_RTM_FORMAT_ATTR,
6511 	ADL_HYBRID_FORMAT_ATTR,
6512 	NULL
6513 };
6514 
6515 static struct attribute *adl_hybrid_extra_attr[] = {
6516 	ADL_HYBRID_FORMAT_ATTR,
6517 	NULL
6518 };
6519 
6520 FORMAT_ATTR_HYBRID(snoop_rsp,	hybrid_small_tiny);
6521 
6522 static struct attribute *mtl_hybrid_extra_attr_rtm[] = {
6523 	ADL_HYBRID_RTM_FORMAT_ATTR,
6524 	ADL_HYBRID_FORMAT_ATTR,
6525 	FORMAT_HYBRID_PTR(snoop_rsp),
6526 	NULL
6527 };
6528 
6529 static struct attribute *mtl_hybrid_extra_attr[] = {
6530 	ADL_HYBRID_FORMAT_ATTR,
6531 	FORMAT_HYBRID_PTR(snoop_rsp),
6532 	NULL
6533 };
6534 
is_attr_for_this_pmu(struct kobject * kobj,struct attribute * attr)6535 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
6536 {
6537 	struct device *dev = kobj_to_dev(kobj);
6538 	struct x86_hybrid_pmu *pmu =
6539 		container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6540 	struct perf_pmu_events_hybrid_attr *pmu_attr =
6541 		container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
6542 
6543 	return pmu->pmu_type & pmu_attr->pmu_type;
6544 }
6545 
hybrid_events_is_visible(struct kobject * kobj,struct attribute * attr,int i)6546 static umode_t hybrid_events_is_visible(struct kobject *kobj,
6547 					struct attribute *attr, int i)
6548 {
6549 	return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
6550 }
6551 
hybrid_find_supported_cpu(struct x86_hybrid_pmu * pmu)6552 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
6553 {
6554 	int cpu = cpumask_first(&pmu->supported_cpus);
6555 
6556 	return (cpu >= nr_cpu_ids) ? -1 : cpu;
6557 }
6558 
hybrid_tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)6559 static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
6560 				     struct attribute *attr, int i)
6561 {
6562 	struct device *dev = kobj_to_dev(kobj);
6563 	struct x86_hybrid_pmu *pmu =
6564 		 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6565 	int cpu = hybrid_find_supported_cpu(pmu);
6566 
6567 	return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
6568 }
6569 
hybrid_format_is_visible(struct kobject * kobj,struct attribute * attr,int i)6570 static umode_t hybrid_format_is_visible(struct kobject *kobj,
6571 					struct attribute *attr, int i)
6572 {
6573 	struct device *dev = kobj_to_dev(kobj);
6574 	struct x86_hybrid_pmu *pmu =
6575 		container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6576 	struct perf_pmu_format_hybrid_attr *pmu_attr =
6577 		container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
6578 	int cpu = hybrid_find_supported_cpu(pmu);
6579 
6580 	return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0;
6581 }
6582 
hybrid_td_is_visible(struct kobject * kobj,struct attribute * attr,int i)6583 static umode_t hybrid_td_is_visible(struct kobject *kobj,
6584 				    struct attribute *attr, int i)
6585 {
6586 	struct device *dev = kobj_to_dev(kobj);
6587 	struct x86_hybrid_pmu *pmu =
6588 		 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6589 
6590 	if (!is_attr_for_this_pmu(kobj, attr))
6591 		return 0;
6592 
6593 
6594 	/* Only the big core supports perf metrics */
6595 	if (pmu->pmu_type == hybrid_big)
6596 		return pmu->intel_cap.perf_metrics ? attr->mode : 0;
6597 
6598 	return attr->mode;
6599 }
6600 
6601 static struct attribute_group hybrid_group_events_td  = {
6602 	.name		= "events",
6603 	.is_visible	= hybrid_td_is_visible,
6604 };
6605 
6606 static struct attribute_group hybrid_group_events_mem = {
6607 	.name		= "events",
6608 	.is_visible	= hybrid_events_is_visible,
6609 };
6610 
6611 static struct attribute_group hybrid_group_events_tsx = {
6612 	.name		= "events",
6613 	.is_visible	= hybrid_tsx_is_visible,
6614 };
6615 
6616 static struct attribute_group hybrid_group_format_extra = {
6617 	.name		= "format",
6618 	.is_visible	= hybrid_format_is_visible,
6619 };
6620 
intel_hybrid_get_attr_cpus(struct device * dev,struct device_attribute * attr,char * buf)6621 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
6622 					  struct device_attribute *attr,
6623 					  char *buf)
6624 {
6625 	struct x86_hybrid_pmu *pmu =
6626 		container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6627 
6628 	return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
6629 }
6630 
6631 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
6632 static struct attribute *intel_hybrid_cpus_attrs[] = {
6633 	&dev_attr_cpus.attr,
6634 	NULL,
6635 };
6636 
6637 static struct attribute_group hybrid_group_cpus = {
6638 	.attrs		= intel_hybrid_cpus_attrs,
6639 };
6640 
6641 static const struct attribute_group *hybrid_attr_update[] = {
6642 	&hybrid_group_events_td,
6643 	&hybrid_group_events_mem,
6644 	&hybrid_group_events_tsx,
6645 	&group_caps_gen,
6646 	&group_caps_lbr,
6647 	&hybrid_group_format_extra,
6648 	&group_format_evtsel_ext,
6649 	&group_format_acr,
6650 	&group_default,
6651 	&hybrid_group_cpus,
6652 	NULL,
6653 };
6654 
6655 static struct attribute *empty_attrs;
6656 
intel_pmu_check_event_constraints(struct event_constraint * event_constraints,u64 cntr_mask,u64 fixed_cntr_mask,u64 intel_ctrl)6657 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
6658 					      u64 cntr_mask,
6659 					      u64 fixed_cntr_mask,
6660 					      u64 intel_ctrl)
6661 {
6662 	struct event_constraint *c;
6663 
6664 	if (!event_constraints)
6665 		return;
6666 
6667 	/*
6668 	 * event on fixed counter2 (REF_CYCLES) only works on this
6669 	 * counter, so do not extend mask to generic counters
6670 	 */
6671 	for_each_event_constraint(c, event_constraints) {
6672 		/*
6673 		 * Don't extend the topdown slots and metrics
6674 		 * events to the generic counters.
6675 		 */
6676 		if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
6677 			/*
6678 			 * Disable topdown slots and metrics events,
6679 			 * if slots event is not in CPUID.
6680 			 */
6681 			if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
6682 				c->idxmsk64 = 0;
6683 			c->weight = hweight64(c->idxmsk64);
6684 			continue;
6685 		}
6686 
6687 		if (c->cmask == FIXED_EVENT_FLAGS) {
6688 			/* Disabled fixed counters which are not in CPUID */
6689 			c->idxmsk64 &= intel_ctrl;
6690 
6691 			/*
6692 			 * Don't extend the pseudo-encoding to the
6693 			 * generic counters
6694 			 */
6695 			if (!use_fixed_pseudo_encoding(c->code))
6696 				c->idxmsk64 |= cntr_mask;
6697 		}
6698 		c->idxmsk64 &= cntr_mask | (fixed_cntr_mask << INTEL_PMC_IDX_FIXED);
6699 		c->weight = hweight64(c->idxmsk64);
6700 	}
6701 }
6702 
intel_pmu_check_extra_regs(struct extra_reg * extra_regs)6703 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
6704 {
6705 	struct extra_reg *er;
6706 
6707 	/*
6708 	 * Access extra MSR may cause #GP under certain circumstances.
6709 	 * E.g. KVM doesn't support offcore event
6710 	 * Check all extra_regs here.
6711 	 */
6712 	if (!extra_regs)
6713 		return;
6714 
6715 	for (er = extra_regs; er->msr; er++) {
6716 		er->extra_msr_access = check_msr(er->msr, 0x11UL);
6717 		/* Disable LBR select mapping */
6718 		if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
6719 			x86_pmu.lbr_sel_map = NULL;
6720 	}
6721 }
6722 
intel_pmu_v6_addr_offset(int index,bool eventsel)6723 static inline int intel_pmu_v6_addr_offset(int index, bool eventsel)
6724 {
6725 	return MSR_IA32_PMC_V6_STEP * index;
6726 }
6727 
6728 static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = {
6729 	{ hybrid_small,	"cpu_atom" },
6730 	{ hybrid_big,	"cpu_core" },
6731 	{ hybrid_tiny,	"cpu_lowpower" },
6732 };
6733 
intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)6734 static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
6735 {
6736 	unsigned long pmus_mask = pmus;
6737 	struct x86_hybrid_pmu *pmu;
6738 	int idx = 0, bit;
6739 
6740 	x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask);
6741 	x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus,
6742 				     sizeof(struct x86_hybrid_pmu),
6743 				     GFP_KERNEL);
6744 	if (!x86_pmu.hybrid_pmu)
6745 		return -ENOMEM;
6746 
6747 	static_branch_enable(&perf_is_hybrid);
6748 	x86_pmu.filter = intel_pmu_filter;
6749 
6750 	for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) {
6751 		pmu = &x86_pmu.hybrid_pmu[idx++];
6752 		pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id;
6753 		pmu->name = intel_hybrid_pmu_type_map[bit].name;
6754 
6755 		pmu->cntr_mask64 = x86_pmu.cntr_mask64;
6756 		pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
6757 		pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
6758 		pmu->config_mask = X86_RAW_EVENT_MASK;
6759 		pmu->unconstrained = (struct event_constraint)
6760 				     __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
6761 							0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
6762 
6763 		pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6764 		if (pmu->pmu_type & hybrid_small_tiny) {
6765 			pmu->intel_cap.perf_metrics = 0;
6766 			pmu->mid_ack = true;
6767 		} else if (pmu->pmu_type & hybrid_big) {
6768 			pmu->intel_cap.perf_metrics = 1;
6769 			pmu->late_ack = true;
6770 		}
6771 	}
6772 
6773 	return 0;
6774 }
6775 
intel_pmu_ref_cycles_ext(void)6776 static __always_inline void intel_pmu_ref_cycles_ext(void)
6777 {
6778 	if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED)))
6779 		intel_perfmon_event_map[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x013c;
6780 }
6781 
intel_pmu_init_glc(struct pmu * pmu)6782 static __always_inline void intel_pmu_init_glc(struct pmu *pmu)
6783 {
6784 	x86_pmu.late_ack = true;
6785 	x86_pmu.limit_period = glc_limit_period;
6786 	x86_pmu.pebs_aliases = NULL;
6787 	x86_pmu.pebs_prec_dist = true;
6788 	x86_pmu.pebs_block = true;
6789 	x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6790 	x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6791 	x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6792 	x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6793 	x86_pmu.lbr_pt_coexist = true;
6794 	x86_pmu.num_topdown_events = 8;
6795 	static_call_update(intel_pmu_update_topdown_event,
6796 			   &icl_update_topdown_event);
6797 	static_call_update(intel_pmu_set_topdown_event_period,
6798 			   &icl_set_topdown_event_period);
6799 
6800 	memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6801 	memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6802 	hybrid(pmu, event_constraints) = intel_glc_event_constraints;
6803 	hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints;
6804 
6805 	intel_pmu_ref_cycles_ext();
6806 }
6807 
intel_pmu_init_grt(struct pmu * pmu)6808 static __always_inline void intel_pmu_init_grt(struct pmu *pmu)
6809 {
6810 	x86_pmu.mid_ack = true;
6811 	x86_pmu.limit_period = glc_limit_period;
6812 	x86_pmu.pebs_aliases = NULL;
6813 	x86_pmu.pebs_prec_dist = true;
6814 	x86_pmu.pebs_block = true;
6815 	x86_pmu.lbr_pt_coexist = true;
6816 	x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6817 	x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6818 
6819 	memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6820 	memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6821 	hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6822 	hybrid(pmu, event_constraints) = intel_grt_event_constraints;
6823 	hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints;
6824 	hybrid(pmu, extra_regs) = intel_grt_extra_regs;
6825 
6826 	intel_pmu_ref_cycles_ext();
6827 }
6828 
intel_pmu_init_lnc(struct pmu * pmu)6829 static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
6830 {
6831 	intel_pmu_init_glc(pmu);
6832 	hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
6833 	hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
6834 	hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
6835 }
6836 
intel_pmu_init_skt(struct pmu * pmu)6837 static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
6838 {
6839 	intel_pmu_init_grt(pmu);
6840 	hybrid(pmu, event_constraints) = intel_skt_event_constraints;
6841 	hybrid(pmu, extra_regs) = intel_cmt_extra_regs;
6842 	static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
6843 }
6844 
intel_pmu_init(void)6845 __init int intel_pmu_init(void)
6846 {
6847 	struct attribute **extra_skl_attr = &empty_attrs;
6848 	struct attribute **extra_attr = &empty_attrs;
6849 	struct attribute **td_attr    = &empty_attrs;
6850 	struct attribute **mem_attr   = &empty_attrs;
6851 	struct attribute **tsx_attr   = &empty_attrs;
6852 	union cpuid10_edx edx;
6853 	union cpuid10_eax eax;
6854 	union cpuid10_ebx ebx;
6855 	unsigned int fixed_mask;
6856 	bool pmem = false;
6857 	int version, i;
6858 	char *name;
6859 	struct x86_hybrid_pmu *pmu;
6860 
6861 	/* Architectural Perfmon was introduced starting with Core "Yonah" */
6862 	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
6863 		switch (boot_cpu_data.x86) {
6864 		case  6:
6865 			if (boot_cpu_data.x86_vfm < INTEL_CORE_YONAH)
6866 				return p6_pmu_init();
6867 			break;
6868 		case 11:
6869 			return knc_pmu_init();
6870 		case 15:
6871 			return p4_pmu_init();
6872 		}
6873 
6874 		pr_cont("unsupported CPU family %d model %d ",
6875 			boot_cpu_data.x86, boot_cpu_data.x86_model);
6876 		return -ENODEV;
6877 	}
6878 
6879 	/*
6880 	 * Check whether the Architectural PerfMon supports
6881 	 * Branch Misses Retired hw_event or not.
6882 	 */
6883 	cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
6884 	if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
6885 		return -ENODEV;
6886 
6887 	version = eax.split.version_id;
6888 	if (version < 2)
6889 		x86_pmu = core_pmu;
6890 	else
6891 		x86_pmu = intel_pmu;
6892 
6893 	x86_pmu.version			= version;
6894 	x86_pmu.cntr_mask64		= GENMASK_ULL(eax.split.num_counters - 1, 0);
6895 	x86_pmu.cntval_bits		= eax.split.bit_width;
6896 	x86_pmu.cntval_mask		= (1ULL << eax.split.bit_width) - 1;
6897 
6898 	x86_pmu.events_maskl		= ebx.full;
6899 	x86_pmu.events_mask_len		= eax.split.mask_length;
6900 
6901 	x86_pmu.pebs_events_mask	= intel_pmu_pebs_mask(x86_pmu.cntr_mask64);
6902 	x86_pmu.pebs_capable		= PEBS_COUNTER_MASK;
6903 	x86_pmu.config_mask		= X86_RAW_EVENT_MASK;
6904 
6905 	/*
6906 	 * Quirk: v2 perfmon does not report fixed-purpose events, so
6907 	 * assume at least 3 events, when not running in a hypervisor:
6908 	 */
6909 	if (version > 1 && version < 5) {
6910 		int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
6911 
6912 		x86_pmu.fixed_cntr_mask64 =
6913 			GENMASK_ULL(max((int)edx.split.num_counters_fixed, assume) - 1, 0);
6914 	} else if (version >= 5)
6915 		x86_pmu.fixed_cntr_mask64 = fixed_mask;
6916 
6917 	if (boot_cpu_has(X86_FEATURE_PDCM)) {
6918 		u64 capabilities;
6919 
6920 		rdmsrq(MSR_IA32_PERF_CAPABILITIES, capabilities);
6921 		x86_pmu.intel_cap.capabilities = capabilities;
6922 	}
6923 
6924 	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
6925 		x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
6926 		x86_pmu.lbr_read = intel_pmu_lbr_read_32;
6927 	}
6928 
6929 	if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
6930 		intel_pmu_arch_lbr_init();
6931 
6932 	intel_pebs_init();
6933 
6934 	x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
6935 
6936 	if (version >= 5) {
6937 		x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
6938 		if (x86_pmu.intel_cap.anythread_deprecated)
6939 			pr_cont(" AnyThread deprecated, ");
6940 	}
6941 
6942 	/*
6943 	 * Many features on and after V6 require dynamic constraint,
6944 	 * e.g., Arch PEBS, ACR.
6945 	 */
6946 	if (version >= 6)
6947 		x86_pmu.flags |= PMU_FL_DYN_CONSTRAINT;
6948 	/*
6949 	 * Install the hw-cache-events table:
6950 	 */
6951 	switch (boot_cpu_data.x86_vfm) {
6952 	case INTEL_CORE_YONAH:
6953 		pr_cont("Core events, ");
6954 		name = "core";
6955 		break;
6956 
6957 	case INTEL_CORE2_MEROM:
6958 		x86_add_quirk(intel_clovertown_quirk);
6959 		fallthrough;
6960 
6961 	case INTEL_CORE2_MEROM_L:
6962 	case INTEL_CORE2_PENRYN:
6963 	case INTEL_CORE2_DUNNINGTON:
6964 		memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
6965 		       sizeof(hw_cache_event_ids));
6966 
6967 		intel_pmu_lbr_init_core();
6968 
6969 		x86_pmu.event_constraints = intel_core2_event_constraints;
6970 		x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
6971 		pr_cont("Core2 events, ");
6972 		name = "core2";
6973 		break;
6974 
6975 	case INTEL_NEHALEM:
6976 	case INTEL_NEHALEM_EP:
6977 	case INTEL_NEHALEM_EX:
6978 		memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
6979 		       sizeof(hw_cache_event_ids));
6980 		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6981 		       sizeof(hw_cache_extra_regs));
6982 
6983 		intel_pmu_lbr_init_nhm();
6984 
6985 		x86_pmu.event_constraints = intel_nehalem_event_constraints;
6986 		x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
6987 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6988 		x86_pmu.extra_regs = intel_nehalem_extra_regs;
6989 		x86_pmu.limit_period = nhm_limit_period;
6990 
6991 		mem_attr = nhm_mem_events_attrs;
6992 
6993 		/* UOPS_ISSUED.STALLED_CYCLES */
6994 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6995 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6996 		/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6997 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6998 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6999 
7000 		intel_pmu_pebs_data_source_nhm();
7001 		x86_add_quirk(intel_nehalem_quirk);
7002 		x86_pmu.pebs_no_tlb = 1;
7003 		extra_attr = nhm_format_attr;
7004 
7005 		pr_cont("Nehalem events, ");
7006 		name = "nehalem";
7007 		break;
7008 
7009 	case INTEL_ATOM_BONNELL:
7010 	case INTEL_ATOM_BONNELL_MID:
7011 	case INTEL_ATOM_SALTWELL:
7012 	case INTEL_ATOM_SALTWELL_MID:
7013 	case INTEL_ATOM_SALTWELL_TABLET:
7014 		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
7015 		       sizeof(hw_cache_event_ids));
7016 
7017 		intel_pmu_lbr_init_atom();
7018 
7019 		x86_pmu.event_constraints = intel_gen_event_constraints;
7020 		x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
7021 		x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
7022 		pr_cont("Atom events, ");
7023 		name = "bonnell";
7024 		break;
7025 
7026 	case INTEL_ATOM_SILVERMONT:
7027 	case INTEL_ATOM_SILVERMONT_D:
7028 	case INTEL_ATOM_SILVERMONT_MID:
7029 	case INTEL_ATOM_AIRMONT:
7030 	case INTEL_ATOM_SILVERMONT_MID2:
7031 		memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
7032 			sizeof(hw_cache_event_ids));
7033 		memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
7034 		       sizeof(hw_cache_extra_regs));
7035 
7036 		intel_pmu_lbr_init_slm();
7037 
7038 		x86_pmu.event_constraints = intel_slm_event_constraints;
7039 		x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
7040 		x86_pmu.extra_regs = intel_slm_extra_regs;
7041 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7042 		td_attr = slm_events_attrs;
7043 		extra_attr = slm_format_attr;
7044 		pr_cont("Silvermont events, ");
7045 		name = "silvermont";
7046 		break;
7047 
7048 	case INTEL_ATOM_GOLDMONT:
7049 	case INTEL_ATOM_GOLDMONT_D:
7050 		memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
7051 		       sizeof(hw_cache_event_ids));
7052 		memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
7053 		       sizeof(hw_cache_extra_regs));
7054 
7055 		intel_pmu_lbr_init_skl();
7056 
7057 		x86_pmu.event_constraints = intel_slm_event_constraints;
7058 		x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
7059 		x86_pmu.extra_regs = intel_glm_extra_regs;
7060 		/*
7061 		 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
7062 		 * for precise cycles.
7063 		 * :pp is identical to :ppp
7064 		 */
7065 		x86_pmu.pebs_aliases = NULL;
7066 		x86_pmu.pebs_prec_dist = true;
7067 		x86_pmu.lbr_pt_coexist = true;
7068 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7069 		td_attr = glm_events_attrs;
7070 		extra_attr = slm_format_attr;
7071 		pr_cont("Goldmont events, ");
7072 		name = "goldmont";
7073 		break;
7074 
7075 	case INTEL_ATOM_GOLDMONT_PLUS:
7076 		memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
7077 		       sizeof(hw_cache_event_ids));
7078 		memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
7079 		       sizeof(hw_cache_extra_regs));
7080 
7081 		intel_pmu_lbr_init_skl();
7082 
7083 		x86_pmu.event_constraints = intel_slm_event_constraints;
7084 		x86_pmu.extra_regs = intel_glm_extra_regs;
7085 		/*
7086 		 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
7087 		 * for precise cycles.
7088 		 */
7089 		x86_pmu.pebs_aliases = NULL;
7090 		x86_pmu.pebs_prec_dist = true;
7091 		x86_pmu.lbr_pt_coexist = true;
7092 		x86_pmu.pebs_capable = ~0ULL;
7093 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7094 		x86_pmu.flags |= PMU_FL_PEBS_ALL;
7095 		x86_pmu.get_event_constraints = glp_get_event_constraints;
7096 		td_attr = glm_events_attrs;
7097 		/* Goldmont Plus has 4-wide pipeline */
7098 		event_attr_td_total_slots_scale_glm.event_str = "4";
7099 		extra_attr = slm_format_attr;
7100 		pr_cont("Goldmont plus events, ");
7101 		name = "goldmont_plus";
7102 		break;
7103 
7104 	case INTEL_ATOM_TREMONT_D:
7105 	case INTEL_ATOM_TREMONT:
7106 	case INTEL_ATOM_TREMONT_L:
7107 		x86_pmu.late_ack = true;
7108 		memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
7109 		       sizeof(hw_cache_event_ids));
7110 		memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
7111 		       sizeof(hw_cache_extra_regs));
7112 		hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
7113 
7114 		intel_pmu_lbr_init_skl();
7115 
7116 		x86_pmu.event_constraints = intel_slm_event_constraints;
7117 		x86_pmu.extra_regs = intel_tnt_extra_regs;
7118 		/*
7119 		 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
7120 		 * for precise cycles.
7121 		 */
7122 		x86_pmu.pebs_aliases = NULL;
7123 		x86_pmu.pebs_prec_dist = true;
7124 		x86_pmu.lbr_pt_coexist = true;
7125 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7126 		x86_pmu.get_event_constraints = tnt_get_event_constraints;
7127 		td_attr = tnt_events_attrs;
7128 		extra_attr = slm_format_attr;
7129 		pr_cont("Tremont events, ");
7130 		name = "Tremont";
7131 		break;
7132 
7133 	case INTEL_ATOM_GRACEMONT:
7134 		intel_pmu_init_grt(NULL);
7135 		intel_pmu_pebs_data_source_grt();
7136 		x86_pmu.pebs_latency_data = grt_latency_data;
7137 		x86_pmu.get_event_constraints = tnt_get_event_constraints;
7138 		td_attr = tnt_events_attrs;
7139 		mem_attr = grt_mem_attrs;
7140 		extra_attr = nhm_format_attr;
7141 		pr_cont("Gracemont events, ");
7142 		name = "gracemont";
7143 		break;
7144 
7145 	case INTEL_ATOM_CRESTMONT:
7146 	case INTEL_ATOM_CRESTMONT_X:
7147 		intel_pmu_init_grt(NULL);
7148 		x86_pmu.extra_regs = intel_cmt_extra_regs;
7149 		intel_pmu_pebs_data_source_cmt();
7150 		x86_pmu.pebs_latency_data = cmt_latency_data;
7151 		x86_pmu.get_event_constraints = cmt_get_event_constraints;
7152 		td_attr = cmt_events_attrs;
7153 		mem_attr = grt_mem_attrs;
7154 		extra_attr = cmt_format_attr;
7155 		pr_cont("Crestmont events, ");
7156 		name = "crestmont";
7157 		break;
7158 
7159 	case INTEL_ATOM_DARKMONT_X:
7160 		intel_pmu_init_skt(NULL);
7161 		intel_pmu_pebs_data_source_cmt();
7162 		x86_pmu.pebs_latency_data = cmt_latency_data;
7163 		x86_pmu.get_event_constraints = cmt_get_event_constraints;
7164 		td_attr = skt_events_attrs;
7165 		mem_attr = grt_mem_attrs;
7166 		extra_attr = cmt_format_attr;
7167 		pr_cont("Darkmont events, ");
7168 		name = "darkmont";
7169 		break;
7170 
7171 	case INTEL_WESTMERE:
7172 	case INTEL_WESTMERE_EP:
7173 	case INTEL_WESTMERE_EX:
7174 		memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
7175 		       sizeof(hw_cache_event_ids));
7176 		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
7177 		       sizeof(hw_cache_extra_regs));
7178 
7179 		intel_pmu_lbr_init_nhm();
7180 
7181 		x86_pmu.event_constraints = intel_westmere_event_constraints;
7182 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
7183 		x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
7184 		x86_pmu.extra_regs = intel_westmere_extra_regs;
7185 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7186 
7187 		mem_attr = nhm_mem_events_attrs;
7188 
7189 		/* UOPS_ISSUED.STALLED_CYCLES */
7190 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
7191 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
7192 		/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
7193 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
7194 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
7195 
7196 		intel_pmu_pebs_data_source_nhm();
7197 		extra_attr = nhm_format_attr;
7198 		pr_cont("Westmere events, ");
7199 		name = "westmere";
7200 		break;
7201 
7202 	case INTEL_SANDYBRIDGE:
7203 	case INTEL_SANDYBRIDGE_X:
7204 		x86_add_quirk(intel_sandybridge_quirk);
7205 		x86_add_quirk(intel_ht_bug);
7206 		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
7207 		       sizeof(hw_cache_event_ids));
7208 		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
7209 		       sizeof(hw_cache_extra_regs));
7210 
7211 		intel_pmu_lbr_init_snb();
7212 
7213 		x86_pmu.event_constraints = intel_snb_event_constraints;
7214 		x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
7215 		x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
7216 		if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X)
7217 			x86_pmu.extra_regs = intel_snbep_extra_regs;
7218 		else
7219 			x86_pmu.extra_regs = intel_snb_extra_regs;
7220 
7221 
7222 		/* all extra regs are per-cpu when HT is on */
7223 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7224 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7225 
7226 		td_attr  = snb_events_attrs;
7227 		mem_attr = snb_mem_events_attrs;
7228 
7229 		/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
7230 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
7231 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
7232 		/* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
7233 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
7234 			X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
7235 
7236 		extra_attr = nhm_format_attr;
7237 
7238 		pr_cont("SandyBridge events, ");
7239 		name = "sandybridge";
7240 		break;
7241 
7242 	case INTEL_IVYBRIDGE:
7243 	case INTEL_IVYBRIDGE_X:
7244 		x86_add_quirk(intel_ht_bug);
7245 		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
7246 		       sizeof(hw_cache_event_ids));
7247 		/* dTLB-load-misses on IVB is different than SNB */
7248 		hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
7249 
7250 		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
7251 		       sizeof(hw_cache_extra_regs));
7252 
7253 		intel_pmu_lbr_init_snb();
7254 
7255 		x86_pmu.event_constraints = intel_ivb_event_constraints;
7256 		x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
7257 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
7258 		x86_pmu.pebs_prec_dist = true;
7259 		if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X)
7260 			x86_pmu.extra_regs = intel_snbep_extra_regs;
7261 		else
7262 			x86_pmu.extra_regs = intel_snb_extra_regs;
7263 		/* all extra regs are per-cpu when HT is on */
7264 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7265 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7266 
7267 		td_attr  = snb_events_attrs;
7268 		mem_attr = snb_mem_events_attrs;
7269 
7270 		/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
7271 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
7272 			X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
7273 
7274 		extra_attr = nhm_format_attr;
7275 
7276 		pr_cont("IvyBridge events, ");
7277 		name = "ivybridge";
7278 		break;
7279 
7280 
7281 	case INTEL_HASWELL:
7282 	case INTEL_HASWELL_X:
7283 	case INTEL_HASWELL_L:
7284 	case INTEL_HASWELL_G:
7285 		x86_add_quirk(intel_ht_bug);
7286 		x86_add_quirk(intel_pebs_isolation_quirk);
7287 		x86_pmu.late_ack = true;
7288 		memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7289 		memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7290 
7291 		intel_pmu_lbr_init_hsw();
7292 
7293 		x86_pmu.event_constraints = intel_hsw_event_constraints;
7294 		x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
7295 		x86_pmu.extra_regs = intel_snbep_extra_regs;
7296 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
7297 		x86_pmu.pebs_prec_dist = true;
7298 		/* all extra regs are per-cpu when HT is on */
7299 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7300 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7301 
7302 		x86_pmu.hw_config = hsw_hw_config;
7303 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
7304 		x86_pmu.limit_period = hsw_limit_period;
7305 		x86_pmu.lbr_double_abort = true;
7306 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7307 			hsw_format_attr : nhm_format_attr;
7308 		td_attr  = hsw_events_attrs;
7309 		mem_attr = hsw_mem_events_attrs;
7310 		tsx_attr = hsw_tsx_events_attrs;
7311 		pr_cont("Haswell events, ");
7312 		name = "haswell";
7313 		break;
7314 
7315 	case INTEL_BROADWELL:
7316 	case INTEL_BROADWELL_D:
7317 	case INTEL_BROADWELL_G:
7318 	case INTEL_BROADWELL_X:
7319 		x86_add_quirk(intel_pebs_isolation_quirk);
7320 		x86_pmu.late_ack = true;
7321 		memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7322 		memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7323 
7324 		/* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
7325 		hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
7326 									 BDW_L3_MISS|HSW_SNOOP_DRAM;
7327 		hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
7328 									  HSW_SNOOP_DRAM;
7329 		hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
7330 									     BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
7331 		hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
7332 									      BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
7333 
7334 		intel_pmu_lbr_init_hsw();
7335 
7336 		x86_pmu.event_constraints = intel_bdw_event_constraints;
7337 		x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
7338 		x86_pmu.extra_regs = intel_snbep_extra_regs;
7339 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
7340 		x86_pmu.pebs_prec_dist = true;
7341 		/* all extra regs are per-cpu when HT is on */
7342 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7343 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7344 
7345 		x86_pmu.hw_config = hsw_hw_config;
7346 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
7347 		x86_pmu.limit_period = bdw_limit_period;
7348 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7349 			hsw_format_attr : nhm_format_attr;
7350 		td_attr  = hsw_events_attrs;
7351 		mem_attr = hsw_mem_events_attrs;
7352 		tsx_attr = hsw_tsx_events_attrs;
7353 		pr_cont("Broadwell events, ");
7354 		name = "broadwell";
7355 		break;
7356 
7357 	case INTEL_XEON_PHI_KNL:
7358 	case INTEL_XEON_PHI_KNM:
7359 		memcpy(hw_cache_event_ids,
7360 		       slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7361 		memcpy(hw_cache_extra_regs,
7362 		       knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7363 		intel_pmu_lbr_init_knl();
7364 
7365 		x86_pmu.event_constraints = intel_slm_event_constraints;
7366 		x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
7367 		x86_pmu.extra_regs = intel_knl_extra_regs;
7368 
7369 		/* all extra regs are per-cpu when HT is on */
7370 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7371 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7372 		extra_attr = slm_format_attr;
7373 		pr_cont("Knights Landing/Mill events, ");
7374 		name = "knights-landing";
7375 		break;
7376 
7377 	case INTEL_SKYLAKE_X:
7378 		pmem = true;
7379 		fallthrough;
7380 	case INTEL_SKYLAKE_L:
7381 	case INTEL_SKYLAKE:
7382 	case INTEL_KABYLAKE_L:
7383 	case INTEL_KABYLAKE:
7384 	case INTEL_COMETLAKE_L:
7385 	case INTEL_COMETLAKE:
7386 		x86_add_quirk(intel_pebs_isolation_quirk);
7387 		x86_pmu.late_ack = true;
7388 		memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7389 		memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7390 		intel_pmu_lbr_init_skl();
7391 
7392 		/* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
7393 		event_attr_td_recovery_bubbles.event_str_noht =
7394 			"event=0xd,umask=0x1,cmask=1";
7395 		event_attr_td_recovery_bubbles.event_str_ht =
7396 			"event=0xd,umask=0x1,cmask=1,any=1";
7397 
7398 		x86_pmu.event_constraints = intel_skl_event_constraints;
7399 		x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
7400 		x86_pmu.extra_regs = intel_skl_extra_regs;
7401 		x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
7402 		x86_pmu.pebs_prec_dist = true;
7403 		/* all extra regs are per-cpu when HT is on */
7404 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7405 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7406 
7407 		x86_pmu.hw_config = hsw_hw_config;
7408 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
7409 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7410 			hsw_format_attr : nhm_format_attr;
7411 		extra_skl_attr = skl_format_attr;
7412 		td_attr  = hsw_events_attrs;
7413 		mem_attr = hsw_mem_events_attrs;
7414 		tsx_attr = hsw_tsx_events_attrs;
7415 		intel_pmu_pebs_data_source_skl(pmem);
7416 
7417 		/*
7418 		 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
7419 		 * TSX force abort hooks are not required on these systems. Only deploy
7420 		 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
7421 		 */
7422 		if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
7423 		   !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
7424 			x86_pmu.flags |= PMU_FL_TFA;
7425 			x86_pmu.get_event_constraints = tfa_get_event_constraints;
7426 			x86_pmu.enable_all = intel_tfa_pmu_enable_all;
7427 			x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
7428 		}
7429 
7430 		pr_cont("Skylake events, ");
7431 		name = "skylake";
7432 		break;
7433 
7434 	case INTEL_ICELAKE_X:
7435 	case INTEL_ICELAKE_D:
7436 		x86_pmu.pebs_ept = 1;
7437 		pmem = true;
7438 		fallthrough;
7439 	case INTEL_ICELAKE_L:
7440 	case INTEL_ICELAKE:
7441 	case INTEL_TIGERLAKE_L:
7442 	case INTEL_TIGERLAKE:
7443 	case INTEL_ROCKETLAKE:
7444 		x86_pmu.late_ack = true;
7445 		memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
7446 		memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
7447 		hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
7448 		intel_pmu_lbr_init_skl();
7449 
7450 		x86_pmu.event_constraints = intel_icl_event_constraints;
7451 		x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
7452 		x86_pmu.extra_regs = intel_icl_extra_regs;
7453 		x86_pmu.pebs_aliases = NULL;
7454 		x86_pmu.pebs_prec_dist = true;
7455 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
7456 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
7457 
7458 		x86_pmu.hw_config = hsw_hw_config;
7459 		x86_pmu.get_event_constraints = icl_get_event_constraints;
7460 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7461 			hsw_format_attr : nhm_format_attr;
7462 		extra_skl_attr = skl_format_attr;
7463 		mem_attr = icl_events_attrs;
7464 		td_attr = icl_td_events_attrs;
7465 		tsx_attr = icl_tsx_events_attrs;
7466 		x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
7467 		x86_pmu.lbr_pt_coexist = true;
7468 		intel_pmu_pebs_data_source_skl(pmem);
7469 		x86_pmu.num_topdown_events = 4;
7470 		static_call_update(intel_pmu_update_topdown_event,
7471 				   &icl_update_topdown_event);
7472 		static_call_update(intel_pmu_set_topdown_event_period,
7473 				   &icl_set_topdown_event_period);
7474 		pr_cont("Icelake events, ");
7475 		name = "icelake";
7476 		break;
7477 
7478 	case INTEL_SAPPHIRERAPIDS_X:
7479 	case INTEL_EMERALDRAPIDS_X:
7480 		x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
7481 		x86_pmu.extra_regs = intel_glc_extra_regs;
7482 		pr_cont("Sapphire Rapids events, ");
7483 		name = "sapphire_rapids";
7484 		goto glc_common;
7485 
7486 	case INTEL_GRANITERAPIDS_X:
7487 	case INTEL_GRANITERAPIDS_D:
7488 		x86_pmu.extra_regs = intel_rwc_extra_regs;
7489 		pr_cont("Granite Rapids events, ");
7490 		name = "granite_rapids";
7491 
7492 	glc_common:
7493 		intel_pmu_init_glc(NULL);
7494 		x86_pmu.pebs_ept = 1;
7495 		x86_pmu.hw_config = hsw_hw_config;
7496 		x86_pmu.get_event_constraints = glc_get_event_constraints;
7497 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7498 			hsw_format_attr : nhm_format_attr;
7499 		extra_skl_attr = skl_format_attr;
7500 		mem_attr = glc_events_attrs;
7501 		td_attr = glc_td_events_attrs;
7502 		tsx_attr = glc_tsx_events_attrs;
7503 		intel_pmu_pebs_data_source_skl(true);
7504 		break;
7505 
7506 	case INTEL_ALDERLAKE:
7507 	case INTEL_ALDERLAKE_L:
7508 	case INTEL_RAPTORLAKE:
7509 	case INTEL_RAPTORLAKE_P:
7510 	case INTEL_RAPTORLAKE_S:
7511 		/*
7512 		 * Alder Lake has 2 types of CPU, core and atom.
7513 		 *
7514 		 * Initialize the common PerfMon capabilities here.
7515 		 */
7516 		intel_pmu_init_hybrid(hybrid_big_small);
7517 
7518 		x86_pmu.pebs_latency_data = grt_latency_data;
7519 		x86_pmu.get_event_constraints = adl_get_event_constraints;
7520 		x86_pmu.hw_config = adl_hw_config;
7521 		x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
7522 
7523 		td_attr = adl_hybrid_events_attrs;
7524 		mem_attr = adl_hybrid_mem_attrs;
7525 		tsx_attr = adl_hybrid_tsx_attrs;
7526 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7527 			adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
7528 
7529 		/* Initialize big core specific PerfMon capabilities.*/
7530 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7531 		intel_pmu_init_glc(&pmu->pmu);
7532 		if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
7533 			pmu->cntr_mask64 <<= 2;
7534 			pmu->cntr_mask64 |= 0x3;
7535 			pmu->fixed_cntr_mask64 <<= 1;
7536 			pmu->fixed_cntr_mask64 |= 0x1;
7537 		} else {
7538 			pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7539 			pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7540 		}
7541 
7542 		/*
7543 		 * Quirk: For some Alder Lake machine, when all E-cores are disabled in
7544 		 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
7545 		 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
7546 		 * mistakenly add extra counters for P-cores. Correct the number of
7547 		 * counters here.
7548 		 */
7549 		if ((x86_pmu_num_counters(&pmu->pmu) > 8) || (x86_pmu_num_counters_fixed(&pmu->pmu) > 4)) {
7550 			pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7551 			pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7552 		}
7553 
7554 		pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
7555 		pmu->unconstrained = (struct event_constraint)
7556 				     __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
7557 				     0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
7558 
7559 		pmu->extra_regs = intel_glc_extra_regs;
7560 
7561 		/* Initialize Atom core specific PerfMon capabilities.*/
7562 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7563 		intel_pmu_init_grt(&pmu->pmu);
7564 
7565 		x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
7566 		intel_pmu_pebs_data_source_adl();
7567 		pr_cont("Alderlake Hybrid events, ");
7568 		name = "alderlake_hybrid";
7569 		break;
7570 
7571 	case INTEL_METEORLAKE:
7572 	case INTEL_METEORLAKE_L:
7573 	case INTEL_ARROWLAKE_U:
7574 		intel_pmu_init_hybrid(hybrid_big_small);
7575 
7576 		x86_pmu.pebs_latency_data = cmt_latency_data;
7577 		x86_pmu.get_event_constraints = mtl_get_event_constraints;
7578 		x86_pmu.hw_config = adl_hw_config;
7579 
7580 		td_attr = adl_hybrid_events_attrs;
7581 		mem_attr = mtl_hybrid_mem_attrs;
7582 		tsx_attr = adl_hybrid_tsx_attrs;
7583 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7584 			mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7585 
7586 		/* Initialize big core specific PerfMon capabilities.*/
7587 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7588 		intel_pmu_init_glc(&pmu->pmu);
7589 		pmu->extra_regs = intel_rwc_extra_regs;
7590 
7591 		/* Initialize Atom core specific PerfMon capabilities.*/
7592 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7593 		intel_pmu_init_grt(&pmu->pmu);
7594 		pmu->extra_regs = intel_cmt_extra_regs;
7595 
7596 		intel_pmu_pebs_data_source_mtl();
7597 		pr_cont("Meteorlake Hybrid events, ");
7598 		name = "meteorlake_hybrid";
7599 		break;
7600 
7601 	case INTEL_PANTHERLAKE_L:
7602 		pr_cont("Pantherlake Hybrid events, ");
7603 		name = "pantherlake_hybrid";
7604 		goto lnl_common;
7605 
7606 	case INTEL_LUNARLAKE_M:
7607 	case INTEL_ARROWLAKE:
7608 		pr_cont("Lunarlake Hybrid events, ");
7609 		name = "lunarlake_hybrid";
7610 
7611 	lnl_common:
7612 		intel_pmu_init_hybrid(hybrid_big_small);
7613 
7614 		x86_pmu.pebs_latency_data = lnl_latency_data;
7615 		x86_pmu.get_event_constraints = mtl_get_event_constraints;
7616 		x86_pmu.hw_config = adl_hw_config;
7617 
7618 		td_attr = lnl_hybrid_events_attrs;
7619 		mem_attr = mtl_hybrid_mem_attrs;
7620 		tsx_attr = adl_hybrid_tsx_attrs;
7621 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7622 			mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7623 
7624 		/* Initialize big core specific PerfMon capabilities.*/
7625 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7626 		intel_pmu_init_lnc(&pmu->pmu);
7627 
7628 		/* Initialize Atom core specific PerfMon capabilities.*/
7629 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7630 		intel_pmu_init_skt(&pmu->pmu);
7631 
7632 		intel_pmu_pebs_data_source_lnl();
7633 		break;
7634 
7635 	case INTEL_ARROWLAKE_H:
7636 		intel_pmu_init_hybrid(hybrid_big_small_tiny);
7637 
7638 		x86_pmu.pebs_latency_data = arl_h_latency_data;
7639 		x86_pmu.get_event_constraints = arl_h_get_event_constraints;
7640 		x86_pmu.hw_config = arl_h_hw_config;
7641 
7642 		td_attr = arl_h_hybrid_events_attrs;
7643 		mem_attr = arl_h_hybrid_mem_attrs;
7644 		tsx_attr = adl_hybrid_tsx_attrs;
7645 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7646 			mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7647 
7648 		/* Initialize big core specific PerfMon capabilities. */
7649 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7650 		intel_pmu_init_lnc(&pmu->pmu);
7651 
7652 		/* Initialize Atom core specific PerfMon capabilities. */
7653 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7654 		intel_pmu_init_skt(&pmu->pmu);
7655 
7656 		/* Initialize Lower Power Atom specific PerfMon capabilities. */
7657 		pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX];
7658 		intel_pmu_init_grt(&pmu->pmu);
7659 		pmu->extra_regs = intel_cmt_extra_regs;
7660 
7661 		intel_pmu_pebs_data_source_arl_h();
7662 		pr_cont("ArrowLake-H Hybrid events, ");
7663 		name = "arrowlake_h_hybrid";
7664 		break;
7665 
7666 	default:
7667 		switch (x86_pmu.version) {
7668 		case 1:
7669 			x86_pmu.event_constraints = intel_v1_event_constraints;
7670 			pr_cont("generic architected perfmon v1, ");
7671 			name = "generic_arch_v1";
7672 			break;
7673 		case 2:
7674 		case 3:
7675 		case 4:
7676 			/*
7677 			 * default constraints for v2 and up
7678 			 */
7679 			x86_pmu.event_constraints = intel_gen_event_constraints;
7680 			pr_cont("generic architected perfmon, ");
7681 			name = "generic_arch_v2+";
7682 			break;
7683 		default:
7684 			/*
7685 			 * The default constraints for v5 and up can support up to
7686 			 * 16 fixed counters. For the fixed counters 4 and later,
7687 			 * the pseudo-encoding is applied.
7688 			 * The constraints may be cut according to the CPUID enumeration
7689 			 * by inserting the EVENT_CONSTRAINT_END.
7690 			 */
7691 			if (fls64(x86_pmu.fixed_cntr_mask64) > INTEL_PMC_MAX_FIXED)
7692 				x86_pmu.fixed_cntr_mask64 &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
7693 			intel_v5_gen_event_constraints[fls64(x86_pmu.fixed_cntr_mask64)].weight = -1;
7694 			x86_pmu.event_constraints = intel_v5_gen_event_constraints;
7695 			pr_cont("generic architected perfmon, ");
7696 			name = "generic_arch_v5+";
7697 			break;
7698 		}
7699 	}
7700 
7701 	snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
7702 
7703 	if (!is_hybrid()) {
7704 		group_events_td.attrs  = td_attr;
7705 		group_events_mem.attrs = mem_attr;
7706 		group_events_tsx.attrs = tsx_attr;
7707 		group_format_extra.attrs = extra_attr;
7708 		group_format_extra_skl.attrs = extra_skl_attr;
7709 
7710 		x86_pmu.attr_update = attr_update;
7711 	} else {
7712 		hybrid_group_events_td.attrs  = td_attr;
7713 		hybrid_group_events_mem.attrs = mem_attr;
7714 		hybrid_group_events_tsx.attrs = tsx_attr;
7715 		hybrid_group_format_extra.attrs = extra_attr;
7716 
7717 		x86_pmu.attr_update = hybrid_attr_update;
7718 	}
7719 
7720 	/*
7721 	 * The archPerfmonExt (0x23) includes an enhanced enumeration of
7722 	 * PMU architectural features with a per-core view. For non-hybrid,
7723 	 * each core has the same PMU capabilities. It's good enough to
7724 	 * update the x86_pmu from the booting CPU. For hybrid, the x86_pmu
7725 	 * is used to keep the common capabilities. Still keep the values
7726 	 * from the leaf 0xa. The core specific update will be done later
7727 	 * when a new type is online.
7728 	 */
7729 	if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
7730 		update_pmu_cap(NULL);
7731 
7732 	intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64,
7733 				      &x86_pmu.fixed_cntr_mask64,
7734 				      &x86_pmu.intel_ctrl);
7735 
7736 	/* AnyThread may be deprecated on arch perfmon v5 or later */
7737 	if (x86_pmu.intel_cap.anythread_deprecated)
7738 		x86_pmu.format_attrs = intel_arch_formats_attr;
7739 
7740 	intel_pmu_check_event_constraints(x86_pmu.event_constraints,
7741 					  x86_pmu.cntr_mask64,
7742 					  x86_pmu.fixed_cntr_mask64,
7743 					  x86_pmu.intel_ctrl);
7744 	/*
7745 	 * Access LBR MSR may cause #GP under certain circumstances.
7746 	 * Check all LBR MSR here.
7747 	 * Disable LBR access if any LBR MSRs can not be accessed.
7748 	 */
7749 	if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
7750 		x86_pmu.lbr_nr = 0;
7751 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
7752 		if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
7753 		      check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
7754 			x86_pmu.lbr_nr = 0;
7755 	}
7756 
7757 	if (x86_pmu.lbr_nr) {
7758 		intel_pmu_lbr_init();
7759 
7760 		pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
7761 
7762 		/* only support branch_stack snapshot for perfmon >= v2 */
7763 		if (x86_pmu.disable_all == intel_pmu_disable_all) {
7764 			if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
7765 				static_call_update(perf_snapshot_branch_stack,
7766 						   intel_pmu_snapshot_arch_branch_stack);
7767 			} else {
7768 				static_call_update(perf_snapshot_branch_stack,
7769 						   intel_pmu_snapshot_branch_stack);
7770 			}
7771 		}
7772 	}
7773 
7774 	intel_pmu_check_extra_regs(x86_pmu.extra_regs);
7775 
7776 	/* Support full width counters using alternative MSR range */
7777 	if (x86_pmu.intel_cap.full_width_write) {
7778 		x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
7779 		x86_pmu.perfctr = MSR_IA32_PMC0;
7780 		pr_cont("full-width counters, ");
7781 	}
7782 
7783 	/* Support V6+ MSR Aliasing */
7784 	if (x86_pmu.version >= 6) {
7785 		x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR;
7786 		x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A;
7787 		x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR;
7788 		x86_pmu.addr_offset = intel_pmu_v6_addr_offset;
7789 	}
7790 
7791 	if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
7792 		x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
7793 
7794 	if (x86_pmu.intel_cap.pebs_timing_info)
7795 		x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
7796 
7797 	intel_aux_output_init();
7798 
7799 	return 0;
7800 }
7801 
7802 /*
7803  * HT bug: phase 2 init
7804  * Called once we have valid topology information to check
7805  * whether or not HT is enabled
7806  * If HT is off, then we disable the workaround
7807  */
fixup_ht_bug(void)7808 static __init int fixup_ht_bug(void)
7809 {
7810 	int c;
7811 	/*
7812 	 * problem not present on this CPU model, nothing to do
7813 	 */
7814 	if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
7815 		return 0;
7816 
7817 	if (topology_max_smt_threads() > 1) {
7818 		pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
7819 		return 0;
7820 	}
7821 
7822 	cpus_read_lock();
7823 
7824 	hardlockup_detector_perf_stop();
7825 
7826 	x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
7827 
7828 	x86_pmu.start_scheduling = NULL;
7829 	x86_pmu.commit_scheduling = NULL;
7830 	x86_pmu.stop_scheduling = NULL;
7831 
7832 	hardlockup_detector_perf_restart();
7833 
7834 	for_each_online_cpu(c)
7835 		free_excl_cntrs(&per_cpu(cpu_hw_events, c));
7836 
7837 	cpus_read_unlock();
7838 	pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
7839 	return 0;
7840 }
7841 subsys_initcall(fixup_ht_bug)
7842