xref: /linux/arch/x86/include/asm/perf_event.h (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 #ifndef _ASM_X86_PERF_EVENT_H
2 #define _ASM_X86_PERF_EVENT_H
3 
4 /*
5  * Performance event hw details:
6  */
7 
8 #define INTEL_PMC_MAX_GENERIC				       32
9 #define INTEL_PMC_MAX_FIXED					3
10 #define INTEL_PMC_IDX_FIXED				       32
11 
12 #define X86_PMC_IDX_MAX					       64
13 
14 #define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
15 #define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
16 
17 #define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
18 #define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
19 
20 #define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
21 #define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
22 #define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
23 #define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
24 #define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
25 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL		(1ULL << 19)
26 #define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
27 #define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
28 #define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
29 #define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
30 #define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL
31 
32 #define HSW_IN_TX					(1ULL << 32)
33 #define HSW_IN_TX_CHECKPOINTED				(1ULL << 33)
34 
35 #define AMD64_EVENTSEL_INT_CORE_ENABLE			(1ULL << 36)
36 #define AMD64_EVENTSEL_GUESTONLY			(1ULL << 40)
37 #define AMD64_EVENTSEL_HOSTONLY				(1ULL << 41)
38 
39 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT		37
40 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK		\
41 	(0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
42 
43 #define AMD64_EVENTSEL_EVENT	\
44 	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
45 #define INTEL_ARCH_EVENT_MASK	\
46 	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
47 
48 #define X86_RAW_EVENT_MASK		\
49 	(ARCH_PERFMON_EVENTSEL_EVENT |	\
50 	 ARCH_PERFMON_EVENTSEL_UMASK |	\
51 	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
52 	 ARCH_PERFMON_EVENTSEL_INV   |	\
53 	 ARCH_PERFMON_EVENTSEL_CMASK)
54 #define AMD64_RAW_EVENT_MASK		\
55 	(X86_RAW_EVENT_MASK          |  \
56 	 AMD64_EVENTSEL_EVENT)
57 #define AMD64_RAW_EVENT_MASK_NB		\
58 	(AMD64_EVENTSEL_EVENT        |  \
59 	 ARCH_PERFMON_EVENTSEL_UMASK)
60 #define AMD64_NUM_COUNTERS				4
61 #define AMD64_NUM_COUNTERS_CORE				6
62 #define AMD64_NUM_COUNTERS_NB				4
63 
64 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
65 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
66 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
67 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
68 		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
69 
70 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
71 #define ARCH_PERFMON_EVENTS_COUNT			7
72 
73 /*
74  * Intel "Architectural Performance Monitoring" CPUID
75  * detection/enumeration details:
76  */
77 union cpuid10_eax {
78 	struct {
79 		unsigned int version_id:8;
80 		unsigned int num_counters:8;
81 		unsigned int bit_width:8;
82 		unsigned int mask_length:8;
83 	} split;
84 	unsigned int full;
85 };
86 
87 union cpuid10_ebx {
88 	struct {
89 		unsigned int no_unhalted_core_cycles:1;
90 		unsigned int no_instructions_retired:1;
91 		unsigned int no_unhalted_reference_cycles:1;
92 		unsigned int no_llc_reference:1;
93 		unsigned int no_llc_misses:1;
94 		unsigned int no_branch_instruction_retired:1;
95 		unsigned int no_branch_misses_retired:1;
96 	} split;
97 	unsigned int full;
98 };
99 
100 union cpuid10_edx {
101 	struct {
102 		unsigned int num_counters_fixed:5;
103 		unsigned int bit_width_fixed:8;
104 		unsigned int reserved:19;
105 	} split;
106 	unsigned int full;
107 };
108 
109 struct x86_pmu_capability {
110 	int		version;
111 	int		num_counters_gp;
112 	int		num_counters_fixed;
113 	int		bit_width_gp;
114 	int		bit_width_fixed;
115 	unsigned int	events_mask;
116 	int		events_mask_len;
117 };
118 
119 /*
120  * Fixed-purpose performance events:
121  */
122 
123 /*
124  * All 3 fixed-mode PMCs are configured via this single MSR:
125  */
126 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
127 
128 /*
129  * The counts are available in three separate MSRs:
130  */
131 
132 /* Instr_Retired.Any: */
133 #define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
134 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)
135 
136 /* CPU_CLK_Unhalted.Core: */
137 #define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
138 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)
139 
140 /* CPU_CLK_Unhalted.Ref: */
141 #define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
142 #define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2)
143 #define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
144 
145 /*
146  * We model BTS tracing as another fixed-mode PMC.
147  *
148  * We choose a value in the middle of the fixed event range, since lower
149  * values are used by actual fixed events and higher values are used
150  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
151  */
152 #define INTEL_PMC_IDX_FIXED_BTS				(INTEL_PMC_IDX_FIXED + 16)
153 
154 /*
155  * IBS cpuid feature detection
156  */
157 
158 #define IBS_CPUID_FEATURES		0x8000001b
159 
160 /*
161  * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
162  * bit 0 is used to indicate the existence of IBS.
163  */
164 #define IBS_CAPS_AVAIL			(1U<<0)
165 #define IBS_CAPS_FETCHSAM		(1U<<1)
166 #define IBS_CAPS_OPSAM			(1U<<2)
167 #define IBS_CAPS_RDWROPCNT		(1U<<3)
168 #define IBS_CAPS_OPCNT			(1U<<4)
169 #define IBS_CAPS_BRNTRGT		(1U<<5)
170 #define IBS_CAPS_OPCNTEXT		(1U<<6)
171 #define IBS_CAPS_RIPINVALIDCHK		(1U<<7)
172 
173 #define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
174 					 | IBS_CAPS_FETCHSAM	\
175 					 | IBS_CAPS_OPSAM)
176 
177 /*
178  * IBS APIC setup
179  */
180 #define IBSCTL				0x1cc
181 #define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
182 #define IBSCTL_LVT_OFFSET_MASK		0x0F
183 
184 /* ibs fetch bits/masks */
185 #define IBS_FETCH_RAND_EN	(1ULL<<57)
186 #define IBS_FETCH_VAL		(1ULL<<49)
187 #define IBS_FETCH_ENABLE	(1ULL<<48)
188 #define IBS_FETCH_CNT		0xFFFF0000ULL
189 #define IBS_FETCH_MAX_CNT	0x0000FFFFULL
190 
191 /* ibs op bits/masks */
192 /* lower 4 bits of the current count are ignored: */
193 #define IBS_OP_CUR_CNT		(0xFFFF0ULL<<32)
194 #define IBS_OP_CNT_CTL		(1ULL<<19)
195 #define IBS_OP_VAL		(1ULL<<18)
196 #define IBS_OP_ENABLE		(1ULL<<17)
197 #define IBS_OP_MAX_CNT		0x0000FFFFULL
198 #define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
199 #define IBS_RIP_INVALID		(1ULL<<38)
200 
201 #ifdef CONFIG_X86_LOCAL_APIC
202 extern u32 get_ibs_caps(void);
203 #else
204 static inline u32 get_ibs_caps(void) { return 0; }
205 #endif
206 
207 #ifdef CONFIG_PERF_EVENTS
208 extern void perf_events_lapic_init(void);
209 
210 /*
211  * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
212  * unused and ABI specified to be 0, so nobody should care what we do with
213  * them.
214  *
215  * EXACT - the IP points to the exact instruction that triggered the
216  *         event (HW bugs exempt).
217  * VM    - original X86_VM_MASK; see set_linear_ip().
218  */
219 #define PERF_EFLAGS_EXACT	(1UL << 3)
220 #define PERF_EFLAGS_VM		(1UL << 5)
221 
222 struct pt_regs;
223 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
224 extern unsigned long perf_misc_flags(struct pt_regs *regs);
225 #define perf_misc_flags(regs)	perf_misc_flags(regs)
226 
227 #include <asm/stacktrace.h>
228 
229 /*
230  * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
231  * and the comment with PERF_EFLAGS_EXACT.
232  */
233 #define perf_arch_fetch_caller_regs(regs, __ip)		{	\
234 	(regs)->ip = (__ip);					\
235 	(regs)->bp = caller_frame_pointer();			\
236 	(regs)->cs = __KERNEL_CS;				\
237 	regs->flags = 0;					\
238 	asm volatile(						\
239 		_ASM_MOV "%%"_ASM_SP ", %0\n"			\
240 		: "=m" ((regs)->sp)				\
241 		:: "memory"					\
242 	);							\
243 }
244 
245 struct perf_guest_switch_msr {
246 	unsigned msr;
247 	u64 host, guest;
248 };
249 
250 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
251 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
252 extern void perf_check_microcode(void);
253 #else
254 static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
255 {
256 	*nr = 0;
257 	return NULL;
258 }
259 
260 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
261 {
262 	memset(cap, 0, sizeof(*cap));
263 }
264 
265 static inline void perf_events_lapic_init(void)	{ }
266 static inline void perf_check_microcode(void) { }
267 #endif
268 
269 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
270  extern void amd_pmu_enable_virt(void);
271  extern void amd_pmu_disable_virt(void);
272 #else
273  static inline void amd_pmu_enable_virt(void) { }
274  static inline void amd_pmu_disable_virt(void) { }
275 #endif
276 
277 #define arch_perf_out_copy_user copy_from_user_nmi
278 
279 #endif /* _ASM_X86_PERF_EVENT_H */
280