xref: /linux/arch/x86/include/asm/perf_event.h (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1 #ifndef _ASM_X86_PERF_EVENT_H
2 #define _ASM_X86_PERF_EVENT_H
3 
4 /*
5  * Performance event hw details:
6  */
7 
8 #define INTEL_PMC_MAX_GENERIC				       32
9 #define INTEL_PMC_MAX_FIXED					3
10 #define INTEL_PMC_IDX_FIXED				       32
11 
12 #define X86_PMC_IDX_MAX					       64
13 
14 #define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
15 #define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
16 
17 #define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
18 #define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
19 
20 #define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
21 #define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
22 #define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
23 #define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
24 #define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
25 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL		(1ULL << 19)
26 #define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
27 #define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
28 #define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
29 #define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
30 #define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL
31 
32 #define HSW_IN_TX					(1ULL << 32)
33 #define HSW_IN_TX_CHECKPOINTED				(1ULL << 33)
34 
35 #define AMD64_EVENTSEL_INT_CORE_ENABLE			(1ULL << 36)
36 #define AMD64_EVENTSEL_GUESTONLY			(1ULL << 40)
37 #define AMD64_EVENTSEL_HOSTONLY				(1ULL << 41)
38 
39 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT		37
40 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK		\
41 	(0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
42 
43 #define AMD64_EVENTSEL_EVENT	\
44 	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
45 #define INTEL_ARCH_EVENT_MASK	\
46 	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
47 
48 #define X86_RAW_EVENT_MASK		\
49 	(ARCH_PERFMON_EVENTSEL_EVENT |	\
50 	 ARCH_PERFMON_EVENTSEL_UMASK |	\
51 	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
52 	 ARCH_PERFMON_EVENTSEL_INV   |	\
53 	 ARCH_PERFMON_EVENTSEL_CMASK)
54 #define X86_ALL_EVENT_FLAGS  			\
55 	(ARCH_PERFMON_EVENTSEL_EDGE |  		\
56 	 ARCH_PERFMON_EVENTSEL_INV | 		\
57 	 ARCH_PERFMON_EVENTSEL_CMASK | 		\
58 	 ARCH_PERFMON_EVENTSEL_ANY | 		\
59 	 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | 	\
60 	 HSW_IN_TX | 				\
61 	 HSW_IN_TX_CHECKPOINTED)
62 #define AMD64_RAW_EVENT_MASK		\
63 	(X86_RAW_EVENT_MASK          |  \
64 	 AMD64_EVENTSEL_EVENT)
65 #define AMD64_RAW_EVENT_MASK_NB		\
66 	(AMD64_EVENTSEL_EVENT        |  \
67 	 ARCH_PERFMON_EVENTSEL_UMASK)
68 #define AMD64_NUM_COUNTERS				4
69 #define AMD64_NUM_COUNTERS_CORE				6
70 #define AMD64_NUM_COUNTERS_NB				4
71 
72 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
73 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
74 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
75 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
76 		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
77 
78 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
79 #define ARCH_PERFMON_EVENTS_COUNT			7
80 
81 /*
82  * Intel "Architectural Performance Monitoring" CPUID
83  * detection/enumeration details:
84  */
85 union cpuid10_eax {
86 	struct {
87 		unsigned int version_id:8;
88 		unsigned int num_counters:8;
89 		unsigned int bit_width:8;
90 		unsigned int mask_length:8;
91 	} split;
92 	unsigned int full;
93 };
94 
95 union cpuid10_ebx {
96 	struct {
97 		unsigned int no_unhalted_core_cycles:1;
98 		unsigned int no_instructions_retired:1;
99 		unsigned int no_unhalted_reference_cycles:1;
100 		unsigned int no_llc_reference:1;
101 		unsigned int no_llc_misses:1;
102 		unsigned int no_branch_instruction_retired:1;
103 		unsigned int no_branch_misses_retired:1;
104 	} split;
105 	unsigned int full;
106 };
107 
108 union cpuid10_edx {
109 	struct {
110 		unsigned int num_counters_fixed:5;
111 		unsigned int bit_width_fixed:8;
112 		unsigned int reserved:19;
113 	} split;
114 	unsigned int full;
115 };
116 
117 struct x86_pmu_capability {
118 	int		version;
119 	int		num_counters_gp;
120 	int		num_counters_fixed;
121 	int		bit_width_gp;
122 	int		bit_width_fixed;
123 	unsigned int	events_mask;
124 	int		events_mask_len;
125 };
126 
127 /*
128  * Fixed-purpose performance events:
129  */
130 
131 /*
132  * All 3 fixed-mode PMCs are configured via this single MSR:
133  */
134 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
135 
136 /*
137  * The counts are available in three separate MSRs:
138  */
139 
140 /* Instr_Retired.Any: */
141 #define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
142 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)
143 
144 /* CPU_CLK_Unhalted.Core: */
145 #define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
146 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)
147 
148 /* CPU_CLK_Unhalted.Ref: */
149 #define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
150 #define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2)
151 #define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
152 
153 /*
154  * We model BTS tracing as another fixed-mode PMC.
155  *
156  * We choose a value in the middle of the fixed event range, since lower
157  * values are used by actual fixed events and higher values are used
158  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
159  */
160 #define INTEL_PMC_IDX_FIXED_BTS				(INTEL_PMC_IDX_FIXED + 16)
161 
162 /*
163  * IBS cpuid feature detection
164  */
165 
166 #define IBS_CPUID_FEATURES		0x8000001b
167 
168 /*
169  * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
170  * bit 0 is used to indicate the existence of IBS.
171  */
172 #define IBS_CAPS_AVAIL			(1U<<0)
173 #define IBS_CAPS_FETCHSAM		(1U<<1)
174 #define IBS_CAPS_OPSAM			(1U<<2)
175 #define IBS_CAPS_RDWROPCNT		(1U<<3)
176 #define IBS_CAPS_OPCNT			(1U<<4)
177 #define IBS_CAPS_BRNTRGT		(1U<<5)
178 #define IBS_CAPS_OPCNTEXT		(1U<<6)
179 #define IBS_CAPS_RIPINVALIDCHK		(1U<<7)
180 #define IBS_CAPS_OPBRNFUSE		(1U<<8)
181 #define IBS_CAPS_FETCHCTLEXTD		(1U<<9)
182 #define IBS_CAPS_OPDATA4		(1U<<10)
183 
184 #define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
185 					 | IBS_CAPS_FETCHSAM	\
186 					 | IBS_CAPS_OPSAM)
187 
188 /*
189  * IBS APIC setup
190  */
191 #define IBSCTL				0x1cc
192 #define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
193 #define IBSCTL_LVT_OFFSET_MASK		0x0F
194 
195 /* ibs fetch bits/masks */
196 #define IBS_FETCH_RAND_EN	(1ULL<<57)
197 #define IBS_FETCH_VAL		(1ULL<<49)
198 #define IBS_FETCH_ENABLE	(1ULL<<48)
199 #define IBS_FETCH_CNT		0xFFFF0000ULL
200 #define IBS_FETCH_MAX_CNT	0x0000FFFFULL
201 
202 /* ibs op bits/masks */
203 /* lower 4 bits of the current count are ignored: */
204 #define IBS_OP_CUR_CNT		(0xFFFF0ULL<<32)
205 #define IBS_OP_CNT_CTL		(1ULL<<19)
206 #define IBS_OP_VAL		(1ULL<<18)
207 #define IBS_OP_ENABLE		(1ULL<<17)
208 #define IBS_OP_MAX_CNT		0x0000FFFFULL
209 #define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
210 #define IBS_RIP_INVALID		(1ULL<<38)
211 
212 #ifdef CONFIG_X86_LOCAL_APIC
213 extern u32 get_ibs_caps(void);
214 #else
215 static inline u32 get_ibs_caps(void) { return 0; }
216 #endif
217 
218 #ifdef CONFIG_PERF_EVENTS
219 extern void perf_events_lapic_init(void);
220 
221 /*
222  * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
223  * unused and ABI specified to be 0, so nobody should care what we do with
224  * them.
225  *
226  * EXACT - the IP points to the exact instruction that triggered the
227  *         event (HW bugs exempt).
228  * VM    - original X86_VM_MASK; see set_linear_ip().
229  */
230 #define PERF_EFLAGS_EXACT	(1UL << 3)
231 #define PERF_EFLAGS_VM		(1UL << 5)
232 
233 struct pt_regs;
234 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
235 extern unsigned long perf_misc_flags(struct pt_regs *regs);
236 #define perf_misc_flags(regs)	perf_misc_flags(regs)
237 
238 #include <asm/stacktrace.h>
239 
240 /*
241  * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
242  * and the comment with PERF_EFLAGS_EXACT.
243  */
244 #define perf_arch_fetch_caller_regs(regs, __ip)		{	\
245 	(regs)->ip = (__ip);					\
246 	(regs)->bp = caller_frame_pointer();			\
247 	(regs)->cs = __KERNEL_CS;				\
248 	regs->flags = 0;					\
249 	asm volatile(						\
250 		_ASM_MOV "%%"_ASM_SP ", %0\n"			\
251 		: "=m" ((regs)->sp)				\
252 		:: "memory"					\
253 	);							\
254 }
255 
256 struct perf_guest_switch_msr {
257 	unsigned msr;
258 	u64 host, guest;
259 };
260 
261 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
262 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
263 extern void perf_check_microcode(void);
264 #else
265 static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
266 {
267 	*nr = 0;
268 	return NULL;
269 }
270 
271 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
272 {
273 	memset(cap, 0, sizeof(*cap));
274 }
275 
276 static inline void perf_events_lapic_init(void)	{ }
277 static inline void perf_check_microcode(void) { }
278 #endif
279 
280 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
281  extern void amd_pmu_enable_virt(void);
282  extern void amd_pmu_disable_virt(void);
283 #else
284  static inline void amd_pmu_enable_virt(void) { }
285  static inline void amd_pmu_disable_virt(void) { }
286 #endif
287 
288 #define arch_perf_out_copy_user copy_from_user_nmi
289 
290 #endif /* _ASM_X86_PERF_EVENT_H */
291