xref: /linux/arch/x86/include/asm/perf_event.h (revision 8b1935e6a36b0967efc593d67ed3aebbfbc1f5b1)
1 #ifndef _ASM_X86_PERF_EVENT_H
2 #define _ASM_X86_PERF_EVENT_H
3 
4 /*
5  * Performance event hw details:
6  */
7 
8 #define X86_PMC_MAX_GENERIC					8
9 #define X86_PMC_MAX_FIXED					3
10 
11 #define X86_PMC_IDX_GENERIC				        0
12 #define X86_PMC_IDX_FIXED				       32
13 #define X86_PMC_IDX_MAX					       64
14 
15 #define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
16 #define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
17 
18 #define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
19 #define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
20 
21 #define ARCH_PERFMON_EVENTSEL0_ENABLE			  (1 << 22)
22 #define ARCH_PERFMON_EVENTSEL_ANY			  (1 << 21)
23 #define ARCH_PERFMON_EVENTSEL_INT			  (1 << 20)
24 #define ARCH_PERFMON_EVENTSEL_OS			  (1 << 17)
25 #define ARCH_PERFMON_EVENTSEL_USR			  (1 << 16)
26 
27 /*
28  * Includes eventsel and unit mask as well:
29  */
30 
31 
32 #define INTEL_ARCH_EVTSEL_MASK		0x000000FFULL
33 #define INTEL_ARCH_UNIT_MASK		0x0000FF00ULL
34 #define INTEL_ARCH_EDGE_MASK		0x00040000ULL
35 #define INTEL_ARCH_INV_MASK		0x00800000ULL
36 #define INTEL_ARCH_CNT_MASK		0xFF000000ULL
37 #define INTEL_ARCH_EVENT_MASK	(INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
38 
39 /*
40  * filter mask to validate fixed counter events.
41  * the following filters disqualify for fixed counters:
42  *  - inv
43  *  - edge
44  *  - cnt-mask
45  *  The other filters are supported by fixed counters.
46  *  The any-thread option is supported starting with v3.
47  */
48 #define INTEL_ARCH_FIXED_MASK \
49 	(INTEL_ARCH_CNT_MASK| \
50 	 INTEL_ARCH_INV_MASK| \
51 	 INTEL_ARCH_EDGE_MASK|\
52 	 INTEL_ARCH_UNIT_MASK|\
53 	 INTEL_ARCH_EVTSEL_MASK)
54 
55 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		      0x3c
56 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
57 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX			 0
58 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
59 		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
60 
61 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED			 6
62 
63 /*
64  * Intel "Architectural Performance Monitoring" CPUID
65  * detection/enumeration details:
66  */
67 union cpuid10_eax {
68 	struct {
69 		unsigned int version_id:8;
70 		unsigned int num_events:8;
71 		unsigned int bit_width:8;
72 		unsigned int mask_length:8;
73 	} split;
74 	unsigned int full;
75 };
76 
77 union cpuid10_edx {
78 	struct {
79 		unsigned int num_events_fixed:4;
80 		unsigned int reserved:28;
81 	} split;
82 	unsigned int full;
83 };
84 
85 
86 /*
87  * Fixed-purpose performance events:
88  */
89 
90 /*
91  * All 3 fixed-mode PMCs are configured via this single MSR:
92  */
93 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL			0x38d
94 
95 /*
96  * The counts are available in three separate MSRs:
97  */
98 
99 /* Instr_Retired.Any: */
100 #define MSR_ARCH_PERFMON_FIXED_CTR0			0x309
101 #define X86_PMC_IDX_FIXED_INSTRUCTIONS			(X86_PMC_IDX_FIXED + 0)
102 
103 /* CPU_CLK_Unhalted.Core: */
104 #define MSR_ARCH_PERFMON_FIXED_CTR1			0x30a
105 #define X86_PMC_IDX_FIXED_CPU_CYCLES			(X86_PMC_IDX_FIXED + 1)
106 
107 /* CPU_CLK_Unhalted.Ref: */
108 #define MSR_ARCH_PERFMON_FIXED_CTR2			0x30b
109 #define X86_PMC_IDX_FIXED_BUS_CYCLES			(X86_PMC_IDX_FIXED + 2)
110 
111 /*
112  * We model BTS tracing as another fixed-mode PMC.
113  *
114  * We choose a value in the middle of the fixed event range, since lower
115  * values are used by actual fixed events and higher values are used
116  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
117  */
118 #define X86_PMC_IDX_FIXED_BTS				(X86_PMC_IDX_FIXED + 16)
119 
120 
121 #ifdef CONFIG_PERF_EVENTS
122 extern void init_hw_perf_events(void);
123 extern void perf_events_lapic_init(void);
124 
125 #define PERF_EVENT_INDEX_OFFSET			0
126 
127 #else
128 static inline void init_hw_perf_events(void)		{ }
129 static inline void perf_events_lapic_init(void)	{ }
130 #endif
131 
132 #endif /* _ASM_X86_PERF_EVENT_H */
133