xref: /linux/arch/x86/include/asm/perf_event.h (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 #ifndef _ASM_X86_PERF_EVENT_H
2 #define _ASM_X86_PERF_EVENT_H
3 
4 /*
5  * Performance event hw details:
6  */
7 
8 #define X86_PMC_MAX_GENERIC				       32
9 #define X86_PMC_MAX_FIXED					3
10 
11 #define X86_PMC_IDX_GENERIC				        0
12 #define X86_PMC_IDX_FIXED				       32
13 #define X86_PMC_IDX_MAX					       64
14 
15 #define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
16 #define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
17 
18 #define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
19 #define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
20 
21 #define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
22 #define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
23 #define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
24 #define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
25 #define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
26 #define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
27 #define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
28 #define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
29 #define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
30 #define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL
31 
32 #define AMD_PERFMON_EVENTSEL_GUESTONLY			(1ULL << 40)
33 #define AMD_PERFMON_EVENTSEL_HOSTONLY			(1ULL << 41)
34 
35 #define AMD64_EVENTSEL_EVENT	\
36 	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
37 #define INTEL_ARCH_EVENT_MASK	\
38 	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
39 
40 #define X86_RAW_EVENT_MASK		\
41 	(ARCH_PERFMON_EVENTSEL_EVENT |	\
42 	 ARCH_PERFMON_EVENTSEL_UMASK |	\
43 	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
44 	 ARCH_PERFMON_EVENTSEL_INV   |	\
45 	 ARCH_PERFMON_EVENTSEL_CMASK)
46 #define AMD64_RAW_EVENT_MASK		\
47 	(X86_RAW_EVENT_MASK          |  \
48 	 AMD64_EVENTSEL_EVENT)
49 #define AMD64_NUM_COUNTERS				4
50 #define AMD64_NUM_COUNTERS_F15H				6
51 #define AMD64_NUM_COUNTERS_MAX				AMD64_NUM_COUNTERS_F15H
52 
53 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
54 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
55 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
56 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
57 		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
58 
59 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
60 #define ARCH_PERFMON_EVENTS_COUNT			7
61 
62 /*
63  * Intel "Architectural Performance Monitoring" CPUID
64  * detection/enumeration details:
65  */
66 union cpuid10_eax {
67 	struct {
68 		unsigned int version_id:8;
69 		unsigned int num_counters:8;
70 		unsigned int bit_width:8;
71 		unsigned int mask_length:8;
72 	} split;
73 	unsigned int full;
74 };
75 
76 union cpuid10_ebx {
77 	struct {
78 		unsigned int no_unhalted_core_cycles:1;
79 		unsigned int no_instructions_retired:1;
80 		unsigned int no_unhalted_reference_cycles:1;
81 		unsigned int no_llc_reference:1;
82 		unsigned int no_llc_misses:1;
83 		unsigned int no_branch_instruction_retired:1;
84 		unsigned int no_branch_misses_retired:1;
85 	} split;
86 	unsigned int full;
87 };
88 
89 union cpuid10_edx {
90 	struct {
91 		unsigned int num_counters_fixed:5;
92 		unsigned int bit_width_fixed:8;
93 		unsigned int reserved:19;
94 	} split;
95 	unsigned int full;
96 };
97 
98 struct x86_pmu_capability {
99 	int		version;
100 	int		num_counters_gp;
101 	int		num_counters_fixed;
102 	int		bit_width_gp;
103 	int		bit_width_fixed;
104 	unsigned int	events_mask;
105 	int		events_mask_len;
106 };
107 
108 /*
109  * Fixed-purpose performance events:
110  */
111 
112 /*
113  * All 3 fixed-mode PMCs are configured via this single MSR:
114  */
115 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
116 
117 /*
118  * The counts are available in three separate MSRs:
119  */
120 
121 /* Instr_Retired.Any: */
122 #define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
123 #define X86_PMC_IDX_FIXED_INSTRUCTIONS	(X86_PMC_IDX_FIXED + 0)
124 
125 /* CPU_CLK_Unhalted.Core: */
126 #define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
127 #define X86_PMC_IDX_FIXED_CPU_CYCLES	(X86_PMC_IDX_FIXED + 1)
128 
129 /* CPU_CLK_Unhalted.Ref: */
130 #define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
131 #define X86_PMC_IDX_FIXED_REF_CYCLES	(X86_PMC_IDX_FIXED + 2)
132 #define X86_PMC_MSK_FIXED_REF_CYCLES	(1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
133 
134 /*
135  * We model BTS tracing as another fixed-mode PMC.
136  *
137  * We choose a value in the middle of the fixed event range, since lower
138  * values are used by actual fixed events and higher values are used
139  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
140  */
141 #define X86_PMC_IDX_FIXED_BTS				(X86_PMC_IDX_FIXED + 16)
142 
143 /*
144  * IBS cpuid feature detection
145  */
146 
147 #define IBS_CPUID_FEATURES		0x8000001b
148 
149 /*
150  * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
151  * bit 0 is used to indicate the existence of IBS.
152  */
153 #define IBS_CAPS_AVAIL			(1U<<0)
154 #define IBS_CAPS_FETCHSAM		(1U<<1)
155 #define IBS_CAPS_OPSAM			(1U<<2)
156 #define IBS_CAPS_RDWROPCNT		(1U<<3)
157 #define IBS_CAPS_OPCNT			(1U<<4)
158 #define IBS_CAPS_BRNTRGT		(1U<<5)
159 #define IBS_CAPS_OPCNTEXT		(1U<<6)
160 
161 #define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
162 					 | IBS_CAPS_FETCHSAM	\
163 					 | IBS_CAPS_OPSAM)
164 
165 /*
166  * IBS APIC setup
167  */
168 #define IBSCTL				0x1cc
169 #define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
170 #define IBSCTL_LVT_OFFSET_MASK		0x0F
171 
172 /* IbsFetchCtl bits/masks */
173 #define IBS_FETCH_RAND_EN	(1ULL<<57)
174 #define IBS_FETCH_VAL		(1ULL<<49)
175 #define IBS_FETCH_ENABLE	(1ULL<<48)
176 #define IBS_FETCH_CNT		0xFFFF0000ULL
177 #define IBS_FETCH_MAX_CNT	0x0000FFFFULL
178 
179 /* IbsOpCtl bits */
180 #define IBS_OP_CNT_CTL		(1ULL<<19)
181 #define IBS_OP_VAL		(1ULL<<18)
182 #define IBS_OP_ENABLE		(1ULL<<17)
183 #define IBS_OP_MAX_CNT		0x0000FFFFULL
184 #define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
185 
186 extern u32 get_ibs_caps(void);
187 
188 #ifdef CONFIG_PERF_EVENTS
189 extern void perf_events_lapic_init(void);
190 
191 #define PERF_EVENT_INDEX_OFFSET			0
192 
193 /*
194  * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
195  * This flag is otherwise unused and ABI specified to be 0, so nobody should
196  * care what we do with it.
197  */
198 #define PERF_EFLAGS_EXACT	(1UL << 3)
199 
200 struct pt_regs;
201 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
202 extern unsigned long perf_misc_flags(struct pt_regs *regs);
203 #define perf_misc_flags(regs)	perf_misc_flags(regs)
204 
205 #include <asm/stacktrace.h>
206 
207 /*
208  * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
209  * and the comment with PERF_EFLAGS_EXACT.
210  */
211 #define perf_arch_fetch_caller_regs(regs, __ip)		{	\
212 	(regs)->ip = (__ip);					\
213 	(regs)->bp = caller_frame_pointer();			\
214 	(regs)->cs = __KERNEL_CS;				\
215 	regs->flags = 0;					\
216 	asm volatile(						\
217 		_ASM_MOV "%%"_ASM_SP ", %0\n"			\
218 		: "=m" ((regs)->sp)				\
219 		:: "memory"					\
220 	);							\
221 }
222 
223 struct perf_guest_switch_msr {
224 	unsigned msr;
225 	u64 host, guest;
226 };
227 
228 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
229 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
230 #else
231 static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
232 {
233 	*nr = 0;
234 	return NULL;
235 }
236 
237 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
238 {
239 	memset(cap, 0, sizeof(*cap));
240 }
241 
242 static inline void perf_events_lapic_init(void)	{ }
243 #endif
244 
245 #endif /* _ASM_X86_PERF_EVENT_H */
246