xref: /linux/arch/x86/include/asm/perf_event.h (revision e4dcbdff114e2c0a8059c396e233aa5d9637afce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERF_EVENT_H
3 #define _ASM_X86_PERF_EVENT_H
4 
5 #include <linux/static_call.h>
6 
7 /*
8  * Performance event hw details:
9  */
10 
11 #define INTEL_PMC_MAX_GENERIC				       32
12 #define INTEL_PMC_MAX_FIXED				       16
13 #define INTEL_PMC_IDX_FIXED				       32
14 
15 #define X86_PMC_IDX_MAX					       64
16 
17 #define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
18 #define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
19 
20 #define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
21 #define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
22 
23 #define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
24 #define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
25 #define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
26 #define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
27 #define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
28 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL		(1ULL << 19)
29 #define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
30 #define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
31 #define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
32 #define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
33 #define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL
34 #define ARCH_PERFMON_EVENTSEL_BR_CNTR			(1ULL << 35)
35 #define ARCH_PERFMON_EVENTSEL_EQ			(1ULL << 36)
36 #define ARCH_PERFMON_EVENTSEL_UMASK2			(0xFFULL << 40)
37 
38 #define INTEL_FIXED_BITS_STRIDE			4
39 #define INTEL_FIXED_0_KERNEL				(1ULL << 0)
40 #define INTEL_FIXED_0_USER				(1ULL << 1)
41 #define INTEL_FIXED_0_ANYTHREAD			(1ULL << 2)
42 #define INTEL_FIXED_0_ENABLE_PMI			(1ULL << 3)
43 #define INTEL_FIXED_3_METRICS_CLEAR			(1ULL << 2)
44 
45 #define HSW_IN_TX					(1ULL << 32)
46 #define HSW_IN_TX_CHECKPOINTED				(1ULL << 33)
47 #define ICL_EVENTSEL_ADAPTIVE				(1ULL << 34)
48 #define ICL_FIXED_0_ADAPTIVE				(1ULL << 32)
49 
50 #define INTEL_FIXED_BITS_MASK					\
51 	(INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER |		\
52 	 INTEL_FIXED_0_ANYTHREAD | INTEL_FIXED_0_ENABLE_PMI |	\
53 	 ICL_FIXED_0_ADAPTIVE)
54 
55 #define intel_fixed_bits_by_idx(_idx, _bits)			\
56 	((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
57 
58 #define AMD64_EVENTSEL_INT_CORE_ENABLE			(1ULL << 36)
59 #define AMD64_EVENTSEL_GUESTONLY			(1ULL << 40)
60 #define AMD64_EVENTSEL_HOSTONLY				(1ULL << 41)
61 
62 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT		37
63 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK		\
64 	(0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
65 
66 #define AMD64_EVENTSEL_EVENT	\
67 	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
68 #define INTEL_ARCH_EVENT_MASK	\
69 	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
70 
71 #define AMD64_L3_SLICE_SHIFT				48
72 #define AMD64_L3_SLICE_MASK				\
73 	(0xFULL << AMD64_L3_SLICE_SHIFT)
74 #define AMD64_L3_SLICEID_MASK				\
75 	(0x7ULL << AMD64_L3_SLICE_SHIFT)
76 
77 #define AMD64_L3_THREAD_SHIFT				56
78 #define AMD64_L3_THREAD_MASK				\
79 	(0xFFULL << AMD64_L3_THREAD_SHIFT)
80 #define AMD64_L3_F19H_THREAD_MASK			\
81 	(0x3ULL << AMD64_L3_THREAD_SHIFT)
82 
83 #define AMD64_L3_EN_ALL_CORES				BIT_ULL(47)
84 #define AMD64_L3_EN_ALL_SLICES				BIT_ULL(46)
85 
86 #define AMD64_L3_COREID_SHIFT				42
87 #define AMD64_L3_COREID_MASK				\
88 	(0x7ULL << AMD64_L3_COREID_SHIFT)
89 
90 #define X86_RAW_EVENT_MASK		\
91 	(ARCH_PERFMON_EVENTSEL_EVENT |	\
92 	 ARCH_PERFMON_EVENTSEL_UMASK |	\
93 	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
94 	 ARCH_PERFMON_EVENTSEL_INV   |	\
95 	 ARCH_PERFMON_EVENTSEL_CMASK)
96 #define X86_ALL_EVENT_FLAGS  			\
97 	(ARCH_PERFMON_EVENTSEL_EDGE |  		\
98 	 ARCH_PERFMON_EVENTSEL_INV | 		\
99 	 ARCH_PERFMON_EVENTSEL_CMASK | 		\
100 	 ARCH_PERFMON_EVENTSEL_ANY | 		\
101 	 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | 	\
102 	 HSW_IN_TX | 				\
103 	 HSW_IN_TX_CHECKPOINTED)
104 #define AMD64_RAW_EVENT_MASK		\
105 	(X86_RAW_EVENT_MASK          |  \
106 	 AMD64_EVENTSEL_EVENT)
107 #define AMD64_RAW_EVENT_MASK_NB		\
108 	(AMD64_EVENTSEL_EVENT        |  \
109 	 ARCH_PERFMON_EVENTSEL_UMASK)
110 
111 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB	\
112 	(AMD64_EVENTSEL_EVENT	|		\
113 	 GENMASK_ULL(37, 36))
114 
115 #define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB	\
116 	(ARCH_PERFMON_EVENTSEL_UMASK	|	\
117 	 GENMASK_ULL(27, 24))
118 
119 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB		\
120 	(AMD64_PERFMON_V2_EVENTSEL_EVENT_NB	|	\
121 	 AMD64_PERFMON_V2_EVENTSEL_UMASK_NB)
122 
123 #define AMD64_PERFMON_V2_ENABLE_UMC			BIT_ULL(31)
124 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC		GENMASK_ULL(7, 0)
125 #define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC		GENMASK_ULL(9, 8)
126 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC		\
127 	(AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC	|	\
128 	 AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC)
129 
130 #define AMD64_NUM_COUNTERS				4
131 #define AMD64_NUM_COUNTERS_CORE				6
132 #define AMD64_NUM_COUNTERS_NB				4
133 
134 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
135 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
136 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
137 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
138 		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
139 
140 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
141 #define ARCH_PERFMON_EVENTS_COUNT			7
142 
143 #define PEBS_DATACFG_MEMINFO	BIT_ULL(0)
144 #define PEBS_DATACFG_GP	BIT_ULL(1)
145 #define PEBS_DATACFG_XMMS	BIT_ULL(2)
146 #define PEBS_DATACFG_LBRS	BIT_ULL(3)
147 #define PEBS_DATACFG_LBR_SHIFT	24
148 #define PEBS_DATACFG_CNTR	BIT_ULL(4)
149 #define PEBS_DATACFG_CNTR_SHIFT	32
150 #define PEBS_DATACFG_CNTR_MASK	GENMASK_ULL(15, 0)
151 #define PEBS_DATACFG_FIX_SHIFT	48
152 #define PEBS_DATACFG_FIX_MASK	GENMASK_ULL(7, 0)
153 #define PEBS_DATACFG_METRICS	BIT_ULL(5)
154 
155 /* Steal the highest bit of pebs_data_cfg for SW usage */
156 #define PEBS_UPDATE_DS_SW	BIT_ULL(63)
157 
158 /*
159  * Intel "Architectural Performance Monitoring" CPUID
160  * detection/enumeration details:
161  */
162 union cpuid10_eax {
163 	struct {
164 		unsigned int version_id:8;
165 		unsigned int num_counters:8;
166 		unsigned int bit_width:8;
167 		unsigned int mask_length:8;
168 	} split;
169 	unsigned int full;
170 };
171 
172 union cpuid10_ebx {
173 	struct {
174 		unsigned int no_unhalted_core_cycles:1;
175 		unsigned int no_instructions_retired:1;
176 		unsigned int no_unhalted_reference_cycles:1;
177 		unsigned int no_llc_reference:1;
178 		unsigned int no_llc_misses:1;
179 		unsigned int no_branch_instruction_retired:1;
180 		unsigned int no_branch_misses_retired:1;
181 	} split;
182 	unsigned int full;
183 };
184 
185 union cpuid10_edx {
186 	struct {
187 		unsigned int num_counters_fixed:5;
188 		unsigned int bit_width_fixed:8;
189 		unsigned int reserved1:2;
190 		unsigned int anythread_deprecated:1;
191 		unsigned int reserved2:16;
192 	} split;
193 	unsigned int full;
194 };
195 
196 /*
197  * Intel "Architectural Performance Monitoring extension" CPUID
198  * detection/enumeration details:
199  */
200 #define ARCH_PERFMON_EXT_LEAF			0x00000023
201 #define ARCH_PERFMON_NUM_COUNTER_LEAF		0x1
202 #define ARCH_PERFMON_ACR_LEAF			0x2
203 
204 union cpuid35_eax {
205 	struct {
206 		unsigned int	leaf0:1;
207 		/* Counters Sub-Leaf */
208 		unsigned int    cntr_subleaf:1;
209 		/* Auto Counter Reload Sub-Leaf */
210 		unsigned int    acr_subleaf:1;
211 		/* Events Sub-Leaf */
212 		unsigned int    events_subleaf:1;
213 		unsigned int	reserved:28;
214 	} split;
215 	unsigned int            full;
216 };
217 
218 union cpuid35_ebx {
219 	struct {
220 		/* UnitMask2 Supported */
221 		unsigned int    umask2:1;
222 		/* EQ-bit Supported */
223 		unsigned int    eq:1;
224 		unsigned int	reserved:30;
225 	} split;
226 	unsigned int            full;
227 };
228 
229 /*
230  * Intel Architectural LBR CPUID detection/enumeration details:
231  */
232 union cpuid28_eax {
233 	struct {
234 		/* Supported LBR depth values */
235 		unsigned int	lbr_depth_mask:8;
236 		unsigned int	reserved:22;
237 		/* Deep C-state Reset */
238 		unsigned int	lbr_deep_c_reset:1;
239 		/* IP values contain LIP */
240 		unsigned int	lbr_lip:1;
241 	} split;
242 	unsigned int		full;
243 };
244 
245 union cpuid28_ebx {
246 	struct {
247 		/* CPL Filtering Supported */
248 		unsigned int    lbr_cpl:1;
249 		/* Branch Filtering Supported */
250 		unsigned int    lbr_filter:1;
251 		/* Call-stack Mode Supported */
252 		unsigned int    lbr_call_stack:1;
253 	} split;
254 	unsigned int            full;
255 };
256 
257 union cpuid28_ecx {
258 	struct {
259 		/* Mispredict Bit Supported */
260 		unsigned int    lbr_mispred:1;
261 		/* Timed LBRs Supported */
262 		unsigned int    lbr_timed_lbr:1;
263 		/* Branch Type Field Supported */
264 		unsigned int    lbr_br_type:1;
265 		unsigned int	reserved:13;
266 		/* Branch counters (Event Logging) Supported */
267 		unsigned int	lbr_counters:4;
268 	} split;
269 	unsigned int            full;
270 };
271 
272 /*
273  * AMD "Extended Performance Monitoring and Debug" CPUID
274  * detection/enumeration details:
275  */
276 union cpuid_0x80000022_ebx {
277 	struct {
278 		/* Number of Core Performance Counters */
279 		unsigned int	num_core_pmc:4;
280 		/* Number of available LBR Stack Entries */
281 		unsigned int	lbr_v2_stack_sz:6;
282 		/* Number of Data Fabric Counters */
283 		unsigned int	num_df_pmc:6;
284 		/* Number of Unified Memory Controller Counters */
285 		unsigned int	num_umc_pmc:6;
286 	} split;
287 	unsigned int		full;
288 };
289 
290 struct x86_pmu_capability {
291 	int		version;
292 	int		num_counters_gp;
293 	int		num_counters_fixed;
294 	int		bit_width_gp;
295 	int		bit_width_fixed;
296 	unsigned int	events_mask;
297 	int		events_mask_len;
298 	unsigned int	pebs_ept	:1;
299 };
300 
301 /*
302  * Fixed-purpose performance events:
303  */
304 
305 /* RDPMC offset for Fixed PMCs */
306 #define INTEL_PMC_FIXED_RDPMC_BASE		(1 << 30)
307 #define INTEL_PMC_FIXED_RDPMC_METRICS		(1 << 29)
308 
309 /*
310  * All the fixed-mode PMCs are configured via this single MSR:
311  */
312 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
313 
314 /*
315  * There is no event-code assigned to the fixed-mode PMCs.
316  *
317  * For a fixed-mode PMC, which has an equivalent event on a general-purpose
318  * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
319  * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
320  *
321  * For a fixed-mode PMC, which doesn't have an equivalent event, a
322  * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
323  * The pseudo event-code for a fixed-mode PMC must be 0x00.
324  * The pseudo umask-code is 0xX. The X equals the index of the fixed
325  * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
326  *
327  * The counts are available in separate MSRs:
328  */
329 
330 /* Instr_Retired.Any: */
331 #define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
332 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)
333 
334 /* CPU_CLK_Unhalted.Core: */
335 #define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
336 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)
337 
338 /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
339 #define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
340 #define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2)
341 #define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
342 
343 /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
344 #define MSR_ARCH_PERFMON_FIXED_CTR3	0x30c
345 #define INTEL_PMC_IDX_FIXED_SLOTS	(INTEL_PMC_IDX_FIXED + 3)
346 #define INTEL_PMC_MSK_FIXED_SLOTS	(1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
347 
348 /* TOPDOWN_BAD_SPECULATION.ALL: fixed counter 4 (Atom only) */
349 /* TOPDOWN_FE_BOUND.ALL: fixed counter 5 (Atom only) */
350 /* TOPDOWN_RETIRING.ALL: fixed counter 6 (Atom only) */
351 
use_fixed_pseudo_encoding(u64 code)352 static inline bool use_fixed_pseudo_encoding(u64 code)
353 {
354 	return !(code & 0xff);
355 }
356 
357 /*
358  * We model BTS tracing as another fixed-mode PMC.
359  *
360  * We choose the value 47 for the fixed index of BTS, since lower
361  * values are used by actual fixed events and higher values are used
362  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
363  */
364 #define INTEL_PMC_IDX_FIXED_BTS			(INTEL_PMC_IDX_FIXED + 15)
365 
366 /*
367  * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
368  * each TopDown metric event.
369  *
370  * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
371  */
372 #define INTEL_PMC_IDX_METRIC_BASE		(INTEL_PMC_IDX_FIXED + 16)
373 #define INTEL_PMC_IDX_TD_RETIRING		(INTEL_PMC_IDX_METRIC_BASE + 0)
374 #define INTEL_PMC_IDX_TD_BAD_SPEC		(INTEL_PMC_IDX_METRIC_BASE + 1)
375 #define INTEL_PMC_IDX_TD_FE_BOUND		(INTEL_PMC_IDX_METRIC_BASE + 2)
376 #define INTEL_PMC_IDX_TD_BE_BOUND		(INTEL_PMC_IDX_METRIC_BASE + 3)
377 #define INTEL_PMC_IDX_TD_HEAVY_OPS		(INTEL_PMC_IDX_METRIC_BASE + 4)
378 #define INTEL_PMC_IDX_TD_BR_MISPREDICT		(INTEL_PMC_IDX_METRIC_BASE + 5)
379 #define INTEL_PMC_IDX_TD_FETCH_LAT		(INTEL_PMC_IDX_METRIC_BASE + 6)
380 #define INTEL_PMC_IDX_TD_MEM_BOUND		(INTEL_PMC_IDX_METRIC_BASE + 7)
381 #define INTEL_PMC_IDX_METRIC_END		INTEL_PMC_IDX_TD_MEM_BOUND
382 #define INTEL_PMC_MSK_TOPDOWN			((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \
383 						INTEL_PMC_MSK_FIXED_SLOTS)
384 
385 /*
386  * There is no event-code assigned to the TopDown events.
387  *
388  * For the slots event, use the pseudo code of the fixed counter 3.
389  *
390  * For the metric events, the pseudo event-code is 0x00.
391  * The pseudo umask-code starts from the middle of the pseudo event
392  * space, 0x80.
393  */
394 #define INTEL_TD_SLOTS				0x0400	/* TOPDOWN.SLOTS */
395 /* Level 1 metrics */
396 #define INTEL_TD_METRIC_RETIRING		0x8000	/* Retiring metric */
397 #define INTEL_TD_METRIC_BAD_SPEC		0x8100	/* Bad speculation metric */
398 #define INTEL_TD_METRIC_FE_BOUND		0x8200	/* FE bound metric */
399 #define INTEL_TD_METRIC_BE_BOUND		0x8300	/* BE bound metric */
400 /* Level 2 metrics */
401 #define INTEL_TD_METRIC_HEAVY_OPS		0x8400  /* Heavy Operations metric */
402 #define INTEL_TD_METRIC_BR_MISPREDICT		0x8500  /* Branch Mispredict metric */
403 #define INTEL_TD_METRIC_FETCH_LAT		0x8600  /* Fetch Latency metric */
404 #define INTEL_TD_METRIC_MEM_BOUND		0x8700  /* Memory bound metric */
405 
406 #define INTEL_TD_METRIC_MAX			INTEL_TD_METRIC_MEM_BOUND
407 #define INTEL_TD_METRIC_NUM			8
408 
409 #define INTEL_TD_CFG_METRIC_CLEAR_BIT		0
410 #define INTEL_TD_CFG_METRIC_CLEAR		BIT_ULL(INTEL_TD_CFG_METRIC_CLEAR_BIT)
411 
is_metric_idx(int idx)412 static inline bool is_metric_idx(int idx)
413 {
414 	return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM;
415 }
416 
is_topdown_idx(int idx)417 static inline bool is_topdown_idx(int idx)
418 {
419 	return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS;
420 }
421 
422 #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit)	\
423 			(~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN)
424 
425 #define GLOBAL_STATUS_COND_CHG			BIT_ULL(63)
426 #define GLOBAL_STATUS_BUFFER_OVF_BIT		62
427 #define GLOBAL_STATUS_BUFFER_OVF		BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
428 #define GLOBAL_STATUS_UNC_OVF			BIT_ULL(61)
429 #define GLOBAL_STATUS_ASIF			BIT_ULL(60)
430 #define GLOBAL_STATUS_COUNTERS_FROZEN		BIT_ULL(59)
431 #define GLOBAL_STATUS_LBRS_FROZEN_BIT		58
432 #define GLOBAL_STATUS_LBRS_FROZEN		BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
433 #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT		55
434 #define GLOBAL_STATUS_TRACE_TOPAPMI		BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
435 #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT	48
436 
437 #define GLOBAL_CTRL_EN_PERF_METRICS		BIT_ULL(48)
438 /*
439  * We model guest LBR event tracing as another fixed-mode PMC like BTS.
440  *
441  * We choose bit 58 because it's used to indicate LBR stack frozen state
442  * for architectural perfmon v4, also we unconditionally mask that bit in
443  * the handle_pmi_common(), so it'll never be set in the overflow handling.
444  *
445  * With this fake counter assigned, the guest LBR event user (such as KVM),
446  * can program the LBR registers on its own, and we don't actually do anything
447  * with then in the host context.
448  */
449 #define INTEL_PMC_IDX_FIXED_VLBR	(GLOBAL_STATUS_LBRS_FROZEN_BIT)
450 
451 /*
452  * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
453  * since it would claim bit 58 which is effectively Fixed26.
454  */
455 #define INTEL_FIXED_VLBR_EVENT	0x1b00
456 
457 /*
458  * Adaptive PEBS v4
459  */
460 
461 struct pebs_basic {
462 	u64 format_group:32,
463 	    retire_latency:16,
464 	    format_size:16;
465 	u64 ip;
466 	u64 applicable_counters;
467 	u64 tsc;
468 };
469 
470 struct pebs_meminfo {
471 	u64 address;
472 	u64 aux;
473 	union {
474 		/* pre Alder Lake */
475 		u64 mem_latency;
476 		/* Alder Lake and later */
477 		struct {
478 			u64 instr_latency:16;
479 			u64 pad2:16;
480 			u64 cache_latency:16;
481 			u64 pad3:16;
482 		};
483 	};
484 	u64 tsx_tuning;
485 };
486 
487 struct pebs_gprs {
488 	u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
489 	u64 r8, r9, r10, r11, r12, r13, r14, r15;
490 };
491 
492 struct pebs_xmm {
493 	u64 xmm[16*2];	/* two entries for each register */
494 };
495 
496 struct pebs_cntr_header {
497 	u32 cntr;
498 	u32 fixed;
499 	u32 metrics;
500 	u32 reserved;
501 };
502 
503 #define INTEL_CNTR_METRICS		0x3
504 
505 /*
506  * AMD Extended Performance Monitoring and Debug cpuid feature detection
507  */
508 #define EXT_PERFMON_DEBUG_FEATURES		0x80000022
509 
510 /*
511  * IBS cpuid feature detection
512  */
513 
514 #define IBS_CPUID_FEATURES		0x8000001b
515 
516 /*
517  * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
518  * bit 0 is used to indicate the existence of IBS.
519  */
520 #define IBS_CAPS_AVAIL			(1U<<0)
521 #define IBS_CAPS_FETCHSAM		(1U<<1)
522 #define IBS_CAPS_OPSAM			(1U<<2)
523 #define IBS_CAPS_RDWROPCNT		(1U<<3)
524 #define IBS_CAPS_OPCNT			(1U<<4)
525 #define IBS_CAPS_BRNTRGT		(1U<<5)
526 #define IBS_CAPS_OPCNTEXT		(1U<<6)
527 #define IBS_CAPS_RIPINVALIDCHK		(1U<<7)
528 #define IBS_CAPS_OPBRNFUSE		(1U<<8)
529 #define IBS_CAPS_FETCHCTLEXTD		(1U<<9)
530 #define IBS_CAPS_OPDATA4		(1U<<10)
531 #define IBS_CAPS_ZEN4			(1U<<11)
532 #define IBS_CAPS_OPLDLAT		(1U<<12)
533 #define IBS_CAPS_OPDTLBPGSIZE		(1U<<19)
534 
535 #define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
536 					 | IBS_CAPS_FETCHSAM	\
537 					 | IBS_CAPS_OPSAM)
538 
539 /*
540  * IBS APIC setup
541  */
542 #define IBSCTL				0x1cc
543 #define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
544 #define IBSCTL_LVT_OFFSET_MASK		0x0F
545 
546 /* IBS fetch bits/masks */
547 #define IBS_FETCH_L3MISSONLY	(1ULL<<59)
548 #define IBS_FETCH_RAND_EN	(1ULL<<57)
549 #define IBS_FETCH_VAL		(1ULL<<49)
550 #define IBS_FETCH_ENABLE	(1ULL<<48)
551 #define IBS_FETCH_CNT		0xFFFF0000ULL
552 #define IBS_FETCH_MAX_CNT	0x0000FFFFULL
553 
554 /*
555  * IBS op bits/masks
556  * The lower 7 bits of the current count are random bits
557  * preloaded by hardware and ignored in software
558  */
559 #define IBS_OP_LDLAT_EN		(1ULL<<63)
560 #define IBS_OP_LDLAT_THRSH	(0xFULL<<59)
561 #define IBS_OP_CUR_CNT		(0xFFF80ULL<<32)
562 #define IBS_OP_CUR_CNT_RAND	(0x0007FULL<<32)
563 #define IBS_OP_CUR_CNT_EXT_MASK	(0x7FULL<<52)
564 #define IBS_OP_CNT_CTL		(1ULL<<19)
565 #define IBS_OP_VAL		(1ULL<<18)
566 #define IBS_OP_ENABLE		(1ULL<<17)
567 #define IBS_OP_L3MISSONLY	(1ULL<<16)
568 #define IBS_OP_MAX_CNT		0x0000FFFFULL
569 #define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
570 #define IBS_OP_MAX_CNT_EXT_MASK	(0x7FULL<<20)	/* separate upper 7 bits */
571 #define IBS_RIP_INVALID		(1ULL<<38)
572 
573 #ifdef CONFIG_X86_LOCAL_APIC
574 extern u32 get_ibs_caps(void);
575 extern int forward_event_to_ibs(struct perf_event *event);
576 #else
get_ibs_caps(void)577 static inline u32 get_ibs_caps(void) { return 0; }
forward_event_to_ibs(struct perf_event * event)578 static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; }
579 #endif
580 
581 #ifdef CONFIG_PERF_EVENTS
582 extern void perf_events_lapic_init(void);
583 
584 /*
585  * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
586  * unused and ABI specified to be 0, so nobody should care what we do with
587  * them.
588  *
589  * EXACT - the IP points to the exact instruction that triggered the
590  *         event (HW bugs exempt).
591  * VM    - original X86_VM_MASK; see set_linear_ip().
592  */
593 #define PERF_EFLAGS_EXACT	(1UL << 3)
594 #define PERF_EFLAGS_VM		(1UL << 5)
595 
596 struct pt_regs;
597 struct x86_perf_regs {
598 	struct pt_regs	regs;
599 	u64		*xmm_regs;
600 };
601 
602 extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs);
603 extern unsigned long perf_arch_misc_flags(struct pt_regs *regs);
604 extern unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs);
605 #define perf_arch_misc_flags(regs)	perf_arch_misc_flags(regs)
606 #define perf_arch_guest_misc_flags(regs)	perf_arch_guest_misc_flags(regs)
607 
608 #include <asm/stacktrace.h>
609 
610 /*
611  * We abuse bit 3 from flags to pass exact information, see
612  * perf_arch_misc_flags() and the comment with PERF_EFLAGS_EXACT.
613  */
614 #define perf_arch_fetch_caller_regs(regs, __ip)		{	\
615 	(regs)->ip = (__ip);					\
616 	(regs)->sp = (unsigned long)__builtin_frame_address(0);	\
617 	(regs)->cs = __KERNEL_CS;				\
618 	regs->flags = 0;					\
619 }
620 
621 struct perf_guest_switch_msr {
622 	unsigned msr;
623 	u64 host, guest;
624 };
625 
626 struct x86_pmu_lbr {
627 	unsigned int	nr;
628 	unsigned int	from;
629 	unsigned int	to;
630 	unsigned int	info;
631 	bool		has_callstack;
632 };
633 
634 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
635 extern u64 perf_get_hw_event_config(int hw_event);
636 extern void perf_check_microcode(void);
637 extern void perf_clear_dirty_counters(void);
638 extern int x86_perf_rdpmc_index(struct perf_event *event);
639 #else
perf_get_x86_pmu_capability(struct x86_pmu_capability * cap)640 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
641 {
642 	memset(cap, 0, sizeof(*cap));
643 }
644 
perf_get_hw_event_config(int hw_event)645 static inline u64 perf_get_hw_event_config(int hw_event)
646 {
647 	return 0;
648 }
649 
perf_events_lapic_init(void)650 static inline void perf_events_lapic_init(void)	{ }
perf_check_microcode(void)651 static inline void perf_check_microcode(void) { }
652 #endif
653 
654 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
655 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
656 extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
657 #else
658 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
x86_perf_get_lbr(struct x86_pmu_lbr * lbr)659 static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
660 {
661 	memset(lbr, 0, sizeof(*lbr));
662 }
663 #endif
664 
665 #ifdef CONFIG_CPU_SUP_INTEL
666  extern void intel_pt_handle_vmx(int on);
667 #else
intel_pt_handle_vmx(int on)668 static inline void intel_pt_handle_vmx(int on)
669 {
670 
671 }
672 #endif
673 
674 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
675  extern void amd_pmu_enable_virt(void);
676  extern void amd_pmu_disable_virt(void);
677 
678 #if defined(CONFIG_PERF_EVENTS_AMD_BRS)
679 
680 #define PERF_NEEDS_LOPWR_CB 1
681 
682 /*
683  * architectural low power callback impacts
684  * drivers/acpi/processor_idle.c
685  * drivers/acpi/acpi_pad.c
686  */
687 extern void perf_amd_brs_lopwr_cb(bool lopwr_in);
688 
689 DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb);
690 
perf_lopwr_cb(bool lopwr_in)691 static __always_inline void perf_lopwr_cb(bool lopwr_in)
692 {
693 	static_call_mod(perf_lopwr_cb)(lopwr_in);
694 }
695 
696 #endif /* PERF_NEEDS_LOPWR_CB */
697 
698 #else
amd_pmu_enable_virt(void)699  static inline void amd_pmu_enable_virt(void) { }
amd_pmu_disable_virt(void)700  static inline void amd_pmu_disable_virt(void) { }
701 #endif
702 
703 #define arch_perf_out_copy_user copy_from_user_nmi
704 
705 #endif /* _ASM_X86_PERF_EVENT_H */
706