1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2018, Google LLC.
4 */
5
6 #ifndef SELFTEST_KVM_PROCESSOR_H
7 #define SELFTEST_KVM_PROCESSOR_H
8
9 #include <assert.h>
10 #include <stdint.h>
11 #include <syscall.h>
12
13 #include <asm/msr-index.h>
14 #include <asm/prctl.h>
15
16 #include <linux/kvm_para.h>
17 #include <linux/stringify.h>
18
19 #include "kvm_util.h"
20 #include "ucall_common.h"
21
22 extern bool host_cpu_is_intel;
23 extern bool host_cpu_is_amd;
24 extern uint64_t guest_tsc_khz;
25
26 #ifndef MAX_NR_CPUID_ENTRIES
27 #define MAX_NR_CPUID_ENTRIES 100
28 #endif
29
30 #define NONCANONICAL 0xaaaaaaaaaaaaaaaaull
31
32 /* Forced emulation prefix, used to invoke the emulator unconditionally. */
33 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
34
35 #define NMI_VECTOR 0x02
36
37 #define X86_EFLAGS_FIXED (1u << 1)
38
39 #define X86_CR4_VME (1ul << 0)
40 #define X86_CR4_PVI (1ul << 1)
41 #define X86_CR4_TSD (1ul << 2)
42 #define X86_CR4_DE (1ul << 3)
43 #define X86_CR4_PSE (1ul << 4)
44 #define X86_CR4_PAE (1ul << 5)
45 #define X86_CR4_MCE (1ul << 6)
46 #define X86_CR4_PGE (1ul << 7)
47 #define X86_CR4_PCE (1ul << 8)
48 #define X86_CR4_OSFXSR (1ul << 9)
49 #define X86_CR4_OSXMMEXCPT (1ul << 10)
50 #define X86_CR4_UMIP (1ul << 11)
51 #define X86_CR4_LA57 (1ul << 12)
52 #define X86_CR4_VMXE (1ul << 13)
53 #define X86_CR4_SMXE (1ul << 14)
54 #define X86_CR4_FSGSBASE (1ul << 16)
55 #define X86_CR4_PCIDE (1ul << 17)
56 #define X86_CR4_OSXSAVE (1ul << 18)
57 #define X86_CR4_SMEP (1ul << 20)
58 #define X86_CR4_SMAP (1ul << 21)
59 #define X86_CR4_PKE (1ul << 22)
60
61 struct xstate_header {
62 u64 xstate_bv;
63 u64 xcomp_bv;
64 u64 reserved[6];
65 } __attribute__((packed));
66
67 struct xstate {
68 u8 i387[512];
69 struct xstate_header header;
70 u8 extended_state_area[0];
71 } __attribute__ ((packed, aligned (64)));
72
73 #define XFEATURE_MASK_FP BIT_ULL(0)
74 #define XFEATURE_MASK_SSE BIT_ULL(1)
75 #define XFEATURE_MASK_YMM BIT_ULL(2)
76 #define XFEATURE_MASK_BNDREGS BIT_ULL(3)
77 #define XFEATURE_MASK_BNDCSR BIT_ULL(4)
78 #define XFEATURE_MASK_OPMASK BIT_ULL(5)
79 #define XFEATURE_MASK_ZMM_Hi256 BIT_ULL(6)
80 #define XFEATURE_MASK_Hi16_ZMM BIT_ULL(7)
81 #define XFEATURE_MASK_PT BIT_ULL(8)
82 #define XFEATURE_MASK_PKRU BIT_ULL(9)
83 #define XFEATURE_MASK_PASID BIT_ULL(10)
84 #define XFEATURE_MASK_CET_USER BIT_ULL(11)
85 #define XFEATURE_MASK_CET_KERNEL BIT_ULL(12)
86 #define XFEATURE_MASK_LBR BIT_ULL(15)
87 #define XFEATURE_MASK_XTILE_CFG BIT_ULL(17)
88 #define XFEATURE_MASK_XTILE_DATA BIT_ULL(18)
89
90 #define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK | \
91 XFEATURE_MASK_ZMM_Hi256 | \
92 XFEATURE_MASK_Hi16_ZMM)
93 #define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILE_DATA | \
94 XFEATURE_MASK_XTILE_CFG)
95
96 /* Note, these are ordered alphabetically to match kvm_cpuid_entry2. Eww. */
97 enum cpuid_output_regs {
98 KVM_CPUID_EAX,
99 KVM_CPUID_EBX,
100 KVM_CPUID_ECX,
101 KVM_CPUID_EDX
102 };
103
104 /*
105 * Pack the information into a 64-bit value so that each X86_FEATURE_XXX can be
106 * passed by value with no overhead.
107 */
108 struct kvm_x86_cpu_feature {
109 u32 function;
110 u16 index;
111 u8 reg;
112 u8 bit;
113 };
114 #define KVM_X86_CPU_FEATURE(fn, idx, gpr, __bit) \
115 ({ \
116 struct kvm_x86_cpu_feature feature = { \
117 .function = fn, \
118 .index = idx, \
119 .reg = KVM_CPUID_##gpr, \
120 .bit = __bit, \
121 }; \
122 \
123 kvm_static_assert((fn & 0xc0000000) == 0 || \
124 (fn & 0xc0000000) == 0x40000000 || \
125 (fn & 0xc0000000) == 0x80000000 || \
126 (fn & 0xc0000000) == 0xc0000000); \
127 kvm_static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE)); \
128 feature; \
129 })
130
131 /*
132 * Basic Leafs, a.k.a. Intel defined
133 */
134 #define X86_FEATURE_MWAIT KVM_X86_CPU_FEATURE(0x1, 0, ECX, 3)
135 #define X86_FEATURE_VMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 5)
136 #define X86_FEATURE_SMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 6)
137 #define X86_FEATURE_PDCM KVM_X86_CPU_FEATURE(0x1, 0, ECX, 15)
138 #define X86_FEATURE_PCID KVM_X86_CPU_FEATURE(0x1, 0, ECX, 17)
139 #define X86_FEATURE_X2APIC KVM_X86_CPU_FEATURE(0x1, 0, ECX, 21)
140 #define X86_FEATURE_MOVBE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 22)
141 #define X86_FEATURE_TSC_DEADLINE_TIMER KVM_X86_CPU_FEATURE(0x1, 0, ECX, 24)
142 #define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26)
143 #define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27)
144 #define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30)
145 #define X86_FEATURE_HYPERVISOR KVM_X86_CPU_FEATURE(0x1, 0, ECX, 31)
146 #define X86_FEATURE_PAE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6)
147 #define X86_FEATURE_MCE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7)
148 #define X86_FEATURE_APIC KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9)
149 #define X86_FEATURE_CLFLUSH KVM_X86_CPU_FEATURE(0x1, 0, EDX, 19)
150 #define X86_FEATURE_XMM KVM_X86_CPU_FEATURE(0x1, 0, EDX, 25)
151 #define X86_FEATURE_XMM2 KVM_X86_CPU_FEATURE(0x1, 0, EDX, 26)
152 #define X86_FEATURE_FSGSBASE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 0)
153 #define X86_FEATURE_TSC_ADJUST KVM_X86_CPU_FEATURE(0x7, 0, EBX, 1)
154 #define X86_FEATURE_SGX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 2)
155 #define X86_FEATURE_HLE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 4)
156 #define X86_FEATURE_SMEP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 7)
157 #define X86_FEATURE_INVPCID KVM_X86_CPU_FEATURE(0x7, 0, EBX, 10)
158 #define X86_FEATURE_RTM KVM_X86_CPU_FEATURE(0x7, 0, EBX, 11)
159 #define X86_FEATURE_MPX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 14)
160 #define X86_FEATURE_SMAP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 20)
161 #define X86_FEATURE_PCOMMIT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 22)
162 #define X86_FEATURE_CLFLUSHOPT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 23)
163 #define X86_FEATURE_CLWB KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24)
164 #define X86_FEATURE_UMIP KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2)
165 #define X86_FEATURE_PKU KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3)
166 #define X86_FEATURE_OSPKE KVM_X86_CPU_FEATURE(0x7, 0, ECX, 4)
167 #define X86_FEATURE_LA57 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16)
168 #define X86_FEATURE_RDPID KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22)
169 #define X86_FEATURE_SGX_LC KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30)
170 #define X86_FEATURE_SHSTK KVM_X86_CPU_FEATURE(0x7, 0, ECX, 7)
171 #define X86_FEATURE_IBT KVM_X86_CPU_FEATURE(0x7, 0, EDX, 20)
172 #define X86_FEATURE_AMX_TILE KVM_X86_CPU_FEATURE(0x7, 0, EDX, 24)
173 #define X86_FEATURE_SPEC_CTRL KVM_X86_CPU_FEATURE(0x7, 0, EDX, 26)
174 #define X86_FEATURE_ARCH_CAPABILITIES KVM_X86_CPU_FEATURE(0x7, 0, EDX, 29)
175 #define X86_FEATURE_PKS KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31)
176 #define X86_FEATURE_XTILECFG KVM_X86_CPU_FEATURE(0xD, 0, EAX, 17)
177 #define X86_FEATURE_XTILEDATA KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18)
178 #define X86_FEATURE_XSAVES KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3)
179 #define X86_FEATURE_XFD KVM_X86_CPU_FEATURE(0xD, 1, EAX, 4)
180 #define X86_FEATURE_XTILEDATA_XFD KVM_X86_CPU_FEATURE(0xD, 18, ECX, 2)
181
182 /*
183 * Extended Leafs, a.k.a. AMD defined
184 */
185 #define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
186 #define X86_FEATURE_PERFCTR_CORE KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 23)
187 #define X86_FEATURE_PERFCTR_NB KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 24)
188 #define X86_FEATURE_PERFCTR_LLC KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 28)
189 #define X86_FEATURE_NX KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
190 #define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
191 #define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
192 #define X86_FEATURE_LM KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29)
193 #define X86_FEATURE_INVTSC KVM_X86_CPU_FEATURE(0x80000007, 0, EDX, 8)
194 #define X86_FEATURE_RDPRU KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4)
195 #define X86_FEATURE_AMD_IBPB KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12)
196 #define X86_FEATURE_NPT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0)
197 #define X86_FEATURE_LBRV KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 1)
198 #define X86_FEATURE_NRIPS KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 3)
199 #define X86_FEATURE_TSCRATEMSR KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4)
200 #define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
201 #define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
202 #define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
203 #define X86_FEATURE_IDLE_HLT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 30)
204 #define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
205 #define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
206 #define X86_FEATURE_SEV_SNP KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 4)
207 #define X86_FEATURE_PERFMON_V2 KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 0)
208 #define X86_FEATURE_LBR_PMC_FREEZE KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 2)
209
210 /*
211 * KVM defined paravirt features.
212 */
213 #define X86_FEATURE_KVM_CLOCKSOURCE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 0)
214 #define X86_FEATURE_KVM_NOP_IO_DELAY KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 1)
215 #define X86_FEATURE_KVM_MMU_OP KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 2)
216 #define X86_FEATURE_KVM_CLOCKSOURCE2 KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 3)
217 #define X86_FEATURE_KVM_ASYNC_PF KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 4)
218 #define X86_FEATURE_KVM_STEAL_TIME KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 5)
219 #define X86_FEATURE_KVM_PV_EOI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 6)
220 #define X86_FEATURE_KVM_PV_UNHALT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 7)
221 /* Bit 8 apparently isn't used?!?! */
222 #define X86_FEATURE_KVM_PV_TLB_FLUSH KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 9)
223 #define X86_FEATURE_KVM_ASYNC_PF_VMEXIT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 10)
224 #define X86_FEATURE_KVM_PV_SEND_IPI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 11)
225 #define X86_FEATURE_KVM_POLL_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 12)
226 #define X86_FEATURE_KVM_PV_SCHED_YIELD KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 13)
227 #define X86_FEATURE_KVM_ASYNC_PF_INT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 14)
228 #define X86_FEATURE_KVM_MSI_EXT_DEST_ID KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 15)
229 #define X86_FEATURE_KVM_HC_MAP_GPA_RANGE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16)
230 #define X86_FEATURE_KVM_MIGRATION_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17)
231
232 /*
233 * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
234 * value/property as opposed to a single-bit feature. Again, pack the info
235 * into a 64-bit value to pass by value with no overhead.
236 */
237 struct kvm_x86_cpu_property {
238 u32 function;
239 u8 index;
240 u8 reg;
241 u8 lo_bit;
242 u8 hi_bit;
243 };
244 #define KVM_X86_CPU_PROPERTY(fn, idx, gpr, low_bit, high_bit) \
245 ({ \
246 struct kvm_x86_cpu_property property = { \
247 .function = fn, \
248 .index = idx, \
249 .reg = KVM_CPUID_##gpr, \
250 .lo_bit = low_bit, \
251 .hi_bit = high_bit, \
252 }; \
253 \
254 kvm_static_assert(low_bit < high_bit); \
255 kvm_static_assert((fn & 0xc0000000) == 0 || \
256 (fn & 0xc0000000) == 0x40000000 || \
257 (fn & 0xc0000000) == 0x80000000 || \
258 (fn & 0xc0000000) == 0xc0000000); \
259 kvm_static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE)); \
260 property; \
261 })
262
263 #define X86_PROPERTY_MAX_BASIC_LEAF KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31)
264 #define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
265 #define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
266 #define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
267 #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
268 #define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
269 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
270 #define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
271 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
272
273 #define X86_PROPERTY_SUPPORTED_XCR0_LO KVM_X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31)
274 #define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31)
275 #define X86_PROPERTY_XSTATE_MAX_SIZE KVM_X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31)
276 #define X86_PROPERTY_SUPPORTED_XCR0_HI KVM_X86_CPU_PROPERTY(0xd, 0, EDX, 0, 31)
277
278 #define X86_PROPERTY_XSTATE_TILE_SIZE KVM_X86_CPU_PROPERTY(0xd, 18, EAX, 0, 31)
279 #define X86_PROPERTY_XSTATE_TILE_OFFSET KVM_X86_CPU_PROPERTY(0xd, 18, EBX, 0, 31)
280 #define X86_PROPERTY_AMX_MAX_PALETTE_TABLES KVM_X86_CPU_PROPERTY(0x1d, 0, EAX, 0, 31)
281 #define X86_PROPERTY_AMX_TOTAL_TILE_BYTES KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 0, 15)
282 #define X86_PROPERTY_AMX_BYTES_PER_TILE KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31)
283 #define X86_PROPERTY_AMX_BYTES_PER_ROW KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 0, 15)
284 #define X86_PROPERTY_AMX_NR_TILE_REGS KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 16, 31)
285 #define X86_PROPERTY_AMX_MAX_ROWS KVM_X86_CPU_PROPERTY(0x1d, 1, ECX, 0, 15)
286
287 #define X86_PROPERTY_MAX_KVM_LEAF KVM_X86_CPU_PROPERTY(0x40000000, 0, EAX, 0, 31)
288
289 #define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
290 #define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
291 #define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
292 #define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23)
293 #define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
294 #define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
295 #define X86_PROPERTY_NR_PERFCTR_CORE KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 0, 3)
296 #define X86_PROPERTY_NR_PERFCTR_NB KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 10, 15)
297
298 #define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
299
300 /*
301 * Intel's architectural PMU events are bizarre. They have a "feature" bit
302 * that indicates the feature is _not_ supported, and a property that states
303 * the length of the bit mask of unsupported features. A feature is supported
304 * if the size of the bit mask is larger than the "unavailable" bit, and said
305 * bit is not set. Fixed counters also bizarre enumeration, but inverted from
306 * arch events for general purpose counters. Fixed counters are supported if a
307 * feature flag is set **OR** the total number of fixed counters is greater
308 * than index of the counter.
309 *
310 * Wrap the events for general purpose and fixed counters to simplify checking
311 * whether or not a given architectural event is supported.
312 */
313 struct kvm_x86_pmu_feature {
314 struct kvm_x86_cpu_feature f;
315 };
316 #define KVM_X86_PMU_FEATURE(__reg, __bit) \
317 ({ \
318 struct kvm_x86_pmu_feature feature = { \
319 .f = KVM_X86_CPU_FEATURE(0xa, 0, __reg, __bit), \
320 }; \
321 \
322 kvm_static_assert(KVM_CPUID_##__reg == KVM_CPUID_EBX || \
323 KVM_CPUID_##__reg == KVM_CPUID_ECX); \
324 feature; \
325 })
326
327 #define X86_PMU_FEATURE_CPU_CYCLES KVM_X86_PMU_FEATURE(EBX, 0)
328 #define X86_PMU_FEATURE_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 1)
329 #define X86_PMU_FEATURE_REFERENCE_CYCLES KVM_X86_PMU_FEATURE(EBX, 2)
330 #define X86_PMU_FEATURE_LLC_REFERENCES KVM_X86_PMU_FEATURE(EBX, 3)
331 #define X86_PMU_FEATURE_LLC_MISSES KVM_X86_PMU_FEATURE(EBX, 4)
332 #define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5)
333 #define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6)
334 #define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7)
335
336 #define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0)
337 #define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1)
338 #define X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 2)
339 #define X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED KVM_X86_PMU_FEATURE(ECX, 3)
340
x86_family(unsigned int eax)341 static inline unsigned int x86_family(unsigned int eax)
342 {
343 unsigned int x86;
344
345 x86 = (eax >> 8) & 0xf;
346
347 if (x86 == 0xf)
348 x86 += (eax >> 20) & 0xff;
349
350 return x86;
351 }
352
x86_model(unsigned int eax)353 static inline unsigned int x86_model(unsigned int eax)
354 {
355 return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
356 }
357
358 /* Page table bitfield declarations */
359 #define PTE_PRESENT_MASK BIT_ULL(0)
360 #define PTE_WRITABLE_MASK BIT_ULL(1)
361 #define PTE_USER_MASK BIT_ULL(2)
362 #define PTE_ACCESSED_MASK BIT_ULL(5)
363 #define PTE_DIRTY_MASK BIT_ULL(6)
364 #define PTE_LARGE_MASK BIT_ULL(7)
365 #define PTE_GLOBAL_MASK BIT_ULL(8)
366 #define PTE_NX_MASK BIT_ULL(63)
367
368 #define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
369
370 #define PAGE_SHIFT 12
371 #define PAGE_SIZE (1ULL << PAGE_SHIFT)
372 #define PAGE_MASK (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK)
373
374 #define HUGEPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
375 #define HUGEPAGE_SIZE(x) (1UL << HUGEPAGE_SHIFT(x))
376 #define HUGEPAGE_MASK(x) (~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK)
377
378 #define PTE_GET_PA(pte) ((pte) & PHYSICAL_PAGE_MASK)
379 #define PTE_GET_PFN(pte) (PTE_GET_PA(pte) >> PAGE_SHIFT)
380
381 /* General Registers in 64-Bit Mode */
382 struct gpr64_regs {
383 u64 rax;
384 u64 rcx;
385 u64 rdx;
386 u64 rbx;
387 u64 rsp;
388 u64 rbp;
389 u64 rsi;
390 u64 rdi;
391 u64 r8;
392 u64 r9;
393 u64 r10;
394 u64 r11;
395 u64 r12;
396 u64 r13;
397 u64 r14;
398 u64 r15;
399 };
400
401 struct desc64 {
402 uint16_t limit0;
403 uint16_t base0;
404 unsigned base1:8, type:4, s:1, dpl:2, p:1;
405 unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
406 uint32_t base3;
407 uint32_t zero1;
408 } __attribute__((packed));
409
410 struct desc_ptr {
411 uint16_t size;
412 uint64_t address;
413 } __attribute__((packed));
414
415 struct kvm_x86_state {
416 struct kvm_xsave *xsave;
417 struct kvm_vcpu_events events;
418 struct kvm_mp_state mp_state;
419 struct kvm_regs regs;
420 struct kvm_xcrs xcrs;
421 struct kvm_sregs sregs;
422 struct kvm_debugregs debugregs;
423 union {
424 struct kvm_nested_state nested;
425 char nested_[16384];
426 };
427 struct kvm_msrs msrs;
428 };
429
get_desc64_base(const struct desc64 * desc)430 static inline uint64_t get_desc64_base(const struct desc64 *desc)
431 {
432 return ((uint64_t)desc->base3 << 32) |
433 (desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
434 }
435
rdtsc(void)436 static inline uint64_t rdtsc(void)
437 {
438 uint32_t eax, edx;
439 uint64_t tsc_val;
440 /*
441 * The lfence is to wait (on Intel CPUs) until all previous
442 * instructions have been executed. If software requires RDTSC to be
443 * executed prior to execution of any subsequent instruction, it can
444 * execute LFENCE immediately after RDTSC
445 */
446 __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
447 tsc_val = ((uint64_t)edx) << 32 | eax;
448 return tsc_val;
449 }
450
rdtscp(uint32_t * aux)451 static inline uint64_t rdtscp(uint32_t *aux)
452 {
453 uint32_t eax, edx;
454
455 __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
456 return ((uint64_t)edx) << 32 | eax;
457 }
458
rdmsr(uint32_t msr)459 static inline uint64_t rdmsr(uint32_t msr)
460 {
461 uint32_t a, d;
462
463 __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
464
465 return a | ((uint64_t) d << 32);
466 }
467
wrmsr(uint32_t msr,uint64_t value)468 static inline void wrmsr(uint32_t msr, uint64_t value)
469 {
470 uint32_t a = value;
471 uint32_t d = value >> 32;
472
473 __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
474 }
475
476
inw(uint16_t port)477 static inline uint16_t inw(uint16_t port)
478 {
479 uint16_t tmp;
480
481 __asm__ __volatile__("in %%dx, %%ax"
482 : /* output */ "=a" (tmp)
483 : /* input */ "d" (port));
484
485 return tmp;
486 }
487
get_es(void)488 static inline uint16_t get_es(void)
489 {
490 uint16_t es;
491
492 __asm__ __volatile__("mov %%es, %[es]"
493 : /* output */ [es]"=rm"(es));
494 return es;
495 }
496
get_cs(void)497 static inline uint16_t get_cs(void)
498 {
499 uint16_t cs;
500
501 __asm__ __volatile__("mov %%cs, %[cs]"
502 : /* output */ [cs]"=rm"(cs));
503 return cs;
504 }
505
get_ss(void)506 static inline uint16_t get_ss(void)
507 {
508 uint16_t ss;
509
510 __asm__ __volatile__("mov %%ss, %[ss]"
511 : /* output */ [ss]"=rm"(ss));
512 return ss;
513 }
514
get_ds(void)515 static inline uint16_t get_ds(void)
516 {
517 uint16_t ds;
518
519 __asm__ __volatile__("mov %%ds, %[ds]"
520 : /* output */ [ds]"=rm"(ds));
521 return ds;
522 }
523
get_fs(void)524 static inline uint16_t get_fs(void)
525 {
526 uint16_t fs;
527
528 __asm__ __volatile__("mov %%fs, %[fs]"
529 : /* output */ [fs]"=rm"(fs));
530 return fs;
531 }
532
get_gs(void)533 static inline uint16_t get_gs(void)
534 {
535 uint16_t gs;
536
537 __asm__ __volatile__("mov %%gs, %[gs]"
538 : /* output */ [gs]"=rm"(gs));
539 return gs;
540 }
541
get_tr(void)542 static inline uint16_t get_tr(void)
543 {
544 uint16_t tr;
545
546 __asm__ __volatile__("str %[tr]"
547 : /* output */ [tr]"=rm"(tr));
548 return tr;
549 }
550
get_cr0(void)551 static inline uint64_t get_cr0(void)
552 {
553 uint64_t cr0;
554
555 __asm__ __volatile__("mov %%cr0, %[cr0]"
556 : /* output */ [cr0]"=r"(cr0));
557 return cr0;
558 }
559
get_cr3(void)560 static inline uint64_t get_cr3(void)
561 {
562 uint64_t cr3;
563
564 __asm__ __volatile__("mov %%cr3, %[cr3]"
565 : /* output */ [cr3]"=r"(cr3));
566 return cr3;
567 }
568
get_cr4(void)569 static inline uint64_t get_cr4(void)
570 {
571 uint64_t cr4;
572
573 __asm__ __volatile__("mov %%cr4, %[cr4]"
574 : /* output */ [cr4]"=r"(cr4));
575 return cr4;
576 }
577
set_cr4(uint64_t val)578 static inline void set_cr4(uint64_t val)
579 {
580 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
581 }
582
set_idt(const struct desc_ptr * idt_desc)583 static inline void set_idt(const struct desc_ptr *idt_desc)
584 {
585 __asm__ __volatile__("lidt %0"::"m"(*idt_desc));
586 }
587
xgetbv(u32 index)588 static inline u64 xgetbv(u32 index)
589 {
590 u32 eax, edx;
591
592 __asm__ __volatile__("xgetbv;"
593 : "=a" (eax), "=d" (edx)
594 : "c" (index));
595 return eax | ((u64)edx << 32);
596 }
597
xsetbv(u32 index,u64 value)598 static inline void xsetbv(u32 index, u64 value)
599 {
600 u32 eax = value;
601 u32 edx = value >> 32;
602
603 __asm__ __volatile__("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
604 }
605
wrpkru(u32 pkru)606 static inline void wrpkru(u32 pkru)
607 {
608 /* Note, ECX and EDX are architecturally required to be '0'. */
609 asm volatile(".byte 0x0f,0x01,0xef\n\t"
610 : : "a" (pkru), "c"(0), "d"(0));
611 }
612
get_gdt(void)613 static inline struct desc_ptr get_gdt(void)
614 {
615 struct desc_ptr gdt;
616 __asm__ __volatile__("sgdt %[gdt]"
617 : /* output */ [gdt]"=m"(gdt));
618 return gdt;
619 }
620
get_idt(void)621 static inline struct desc_ptr get_idt(void)
622 {
623 struct desc_ptr idt;
624 __asm__ __volatile__("sidt %[idt]"
625 : /* output */ [idt]"=m"(idt));
626 return idt;
627 }
628
outl(uint16_t port,uint32_t value)629 static inline void outl(uint16_t port, uint32_t value)
630 {
631 __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
632 }
633
__cpuid(uint32_t function,uint32_t index,uint32_t * eax,uint32_t * ebx,uint32_t * ecx,uint32_t * edx)634 static inline void __cpuid(uint32_t function, uint32_t index,
635 uint32_t *eax, uint32_t *ebx,
636 uint32_t *ecx, uint32_t *edx)
637 {
638 *eax = function;
639 *ecx = index;
640
641 asm volatile("cpuid"
642 : "=a" (*eax),
643 "=b" (*ebx),
644 "=c" (*ecx),
645 "=d" (*edx)
646 : "0" (*eax), "2" (*ecx)
647 : "memory");
648 }
649
cpuid(uint32_t function,uint32_t * eax,uint32_t * ebx,uint32_t * ecx,uint32_t * edx)650 static inline void cpuid(uint32_t function,
651 uint32_t *eax, uint32_t *ebx,
652 uint32_t *ecx, uint32_t *edx)
653 {
654 return __cpuid(function, 0, eax, ebx, ecx, edx);
655 }
656
this_cpu_fms(void)657 static inline uint32_t this_cpu_fms(void)
658 {
659 uint32_t eax, ebx, ecx, edx;
660
661 cpuid(1, &eax, &ebx, &ecx, &edx);
662 return eax;
663 }
664
this_cpu_family(void)665 static inline uint32_t this_cpu_family(void)
666 {
667 return x86_family(this_cpu_fms());
668 }
669
this_cpu_model(void)670 static inline uint32_t this_cpu_model(void)
671 {
672 return x86_model(this_cpu_fms());
673 }
674
this_cpu_vendor_string_is(const char * vendor)675 static inline bool this_cpu_vendor_string_is(const char *vendor)
676 {
677 const uint32_t *chunk = (const uint32_t *)vendor;
678 uint32_t eax, ebx, ecx, edx;
679
680 cpuid(0, &eax, &ebx, &ecx, &edx);
681 return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
682 }
683
this_cpu_is_intel(void)684 static inline bool this_cpu_is_intel(void)
685 {
686 return this_cpu_vendor_string_is("GenuineIntel");
687 }
688
689 /*
690 * Exclude early K5 samples with a vendor string of "AMDisbetter!"
691 */
this_cpu_is_amd(void)692 static inline bool this_cpu_is_amd(void)
693 {
694 return this_cpu_vendor_string_is("AuthenticAMD");
695 }
696
__this_cpu_has(uint32_t function,uint32_t index,uint8_t reg,uint8_t lo,uint8_t hi)697 static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
698 uint8_t reg, uint8_t lo, uint8_t hi)
699 {
700 uint32_t gprs[4];
701
702 __cpuid(function, index,
703 &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX],
704 &gprs[KVM_CPUID_ECX], &gprs[KVM_CPUID_EDX]);
705
706 return (gprs[reg] & GENMASK(hi, lo)) >> lo;
707 }
708
this_cpu_has(struct kvm_x86_cpu_feature feature)709 static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
710 {
711 return __this_cpu_has(feature.function, feature.index,
712 feature.reg, feature.bit, feature.bit);
713 }
714
this_cpu_property(struct kvm_x86_cpu_property property)715 static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
716 {
717 return __this_cpu_has(property.function, property.index,
718 property.reg, property.lo_bit, property.hi_bit);
719 }
720
this_cpu_has_p(struct kvm_x86_cpu_property property)721 static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
722 {
723 uint32_t max_leaf;
724
725 switch (property.function & 0xc0000000) {
726 case 0:
727 max_leaf = this_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
728 break;
729 case 0x40000000:
730 max_leaf = this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
731 break;
732 case 0x80000000:
733 max_leaf = this_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
734 break;
735 case 0xc0000000:
736 max_leaf = this_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
737 }
738 return max_leaf >= property.function;
739 }
740
this_pmu_has(struct kvm_x86_pmu_feature feature)741 static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
742 {
743 uint32_t nr_bits;
744
745 if (feature.f.reg == KVM_CPUID_EBX) {
746 nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
747 return nr_bits > feature.f.bit && !this_cpu_has(feature.f);
748 }
749
750 GUEST_ASSERT(feature.f.reg == KVM_CPUID_ECX);
751 nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
752 return nr_bits > feature.f.bit || this_cpu_has(feature.f);
753 }
754
this_cpu_supported_xcr0(void)755 static __always_inline uint64_t this_cpu_supported_xcr0(void)
756 {
757 if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
758 return 0;
759
760 return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
761 ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
762 }
763
764 typedef u32 __attribute__((vector_size(16))) sse128_t;
765 #define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
766 #define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
767 #define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; })
768
read_sse_reg(int reg,sse128_t * data)769 static inline void read_sse_reg(int reg, sse128_t *data)
770 {
771 switch (reg) {
772 case 0:
773 asm("movdqa %%xmm0, %0" : "=m"(*data));
774 break;
775 case 1:
776 asm("movdqa %%xmm1, %0" : "=m"(*data));
777 break;
778 case 2:
779 asm("movdqa %%xmm2, %0" : "=m"(*data));
780 break;
781 case 3:
782 asm("movdqa %%xmm3, %0" : "=m"(*data));
783 break;
784 case 4:
785 asm("movdqa %%xmm4, %0" : "=m"(*data));
786 break;
787 case 5:
788 asm("movdqa %%xmm5, %0" : "=m"(*data));
789 break;
790 case 6:
791 asm("movdqa %%xmm6, %0" : "=m"(*data));
792 break;
793 case 7:
794 asm("movdqa %%xmm7, %0" : "=m"(*data));
795 break;
796 default:
797 BUG();
798 }
799 }
800
write_sse_reg(int reg,const sse128_t * data)801 static inline void write_sse_reg(int reg, const sse128_t *data)
802 {
803 switch (reg) {
804 case 0:
805 asm("movdqa %0, %%xmm0" : : "m"(*data));
806 break;
807 case 1:
808 asm("movdqa %0, %%xmm1" : : "m"(*data));
809 break;
810 case 2:
811 asm("movdqa %0, %%xmm2" : : "m"(*data));
812 break;
813 case 3:
814 asm("movdqa %0, %%xmm3" : : "m"(*data));
815 break;
816 case 4:
817 asm("movdqa %0, %%xmm4" : : "m"(*data));
818 break;
819 case 5:
820 asm("movdqa %0, %%xmm5" : : "m"(*data));
821 break;
822 case 6:
823 asm("movdqa %0, %%xmm6" : : "m"(*data));
824 break;
825 case 7:
826 asm("movdqa %0, %%xmm7" : : "m"(*data));
827 break;
828 default:
829 BUG();
830 }
831 }
832
cpu_relax(void)833 static inline void cpu_relax(void)
834 {
835 asm volatile("rep; nop" ::: "memory");
836 }
837
udelay(unsigned long usec)838 static inline void udelay(unsigned long usec)
839 {
840 uint64_t start, now, cycles;
841
842 GUEST_ASSERT(guest_tsc_khz);
843 cycles = guest_tsc_khz / 1000 * usec;
844
845 /*
846 * Deliberately don't PAUSE, a.k.a. cpu_relax(), so that the delay is
847 * as accurate as possible, e.g. doesn't trigger PAUSE-Loop VM-Exits.
848 */
849 start = rdtsc();
850 do {
851 now = rdtsc();
852 } while (now - start < cycles);
853 }
854
855 #define ud2() \
856 __asm__ __volatile__( \
857 "ud2\n" \
858 )
859
860 #define hlt() \
861 __asm__ __volatile__( \
862 "hlt\n" \
863 )
864
865 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
866 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
867 void kvm_x86_state_cleanup(struct kvm_x86_state *state);
868
869 const struct kvm_msr_list *kvm_get_msr_index_list(void);
870 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void);
871 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index);
872 uint64_t kvm_get_feature_msr(uint64_t msr_index);
873
vcpu_msrs_get(struct kvm_vcpu * vcpu,struct kvm_msrs * msrs)874 static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
875 struct kvm_msrs *msrs)
876 {
877 int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs);
878
879 TEST_ASSERT(r == msrs->nmsrs,
880 "KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
881 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
882 }
vcpu_msrs_set(struct kvm_vcpu * vcpu,struct kvm_msrs * msrs)883 static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs)
884 {
885 int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs);
886
887 TEST_ASSERT(r == msrs->nmsrs,
888 "KVM_SET_MSRS failed, r: %i (failed on MSR %x)",
889 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
890 }
vcpu_debugregs_get(struct kvm_vcpu * vcpu,struct kvm_debugregs * debugregs)891 static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu,
892 struct kvm_debugregs *debugregs)
893 {
894 vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs);
895 }
vcpu_debugregs_set(struct kvm_vcpu * vcpu,struct kvm_debugregs * debugregs)896 static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu,
897 struct kvm_debugregs *debugregs)
898 {
899 vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs);
900 }
vcpu_xsave_get(struct kvm_vcpu * vcpu,struct kvm_xsave * xsave)901 static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu,
902 struct kvm_xsave *xsave)
903 {
904 vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave);
905 }
vcpu_xsave2_get(struct kvm_vcpu * vcpu,struct kvm_xsave * xsave)906 static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu,
907 struct kvm_xsave *xsave)
908 {
909 vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave);
910 }
vcpu_xsave_set(struct kvm_vcpu * vcpu,struct kvm_xsave * xsave)911 static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu,
912 struct kvm_xsave *xsave)
913 {
914 vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave);
915 }
vcpu_xcrs_get(struct kvm_vcpu * vcpu,struct kvm_xcrs * xcrs)916 static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu,
917 struct kvm_xcrs *xcrs)
918 {
919 vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs);
920 }
vcpu_xcrs_set(struct kvm_vcpu * vcpu,struct kvm_xcrs * xcrs)921 static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
922 {
923 vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs);
924 }
925
926 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
927 uint32_t function, uint32_t index);
928 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
929
kvm_cpu_fms(void)930 static inline uint32_t kvm_cpu_fms(void)
931 {
932 return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax;
933 }
934
kvm_cpu_family(void)935 static inline uint32_t kvm_cpu_family(void)
936 {
937 return x86_family(kvm_cpu_fms());
938 }
939
kvm_cpu_model(void)940 static inline uint32_t kvm_cpu_model(void)
941 {
942 return x86_model(kvm_cpu_fms());
943 }
944
945 bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
946 struct kvm_x86_cpu_feature feature);
947
kvm_cpu_has(struct kvm_x86_cpu_feature feature)948 static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature)
949 {
950 return kvm_cpuid_has(kvm_get_supported_cpuid(), feature);
951 }
952
953 uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
954 struct kvm_x86_cpu_property property);
955
kvm_cpu_property(struct kvm_x86_cpu_property property)956 static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property)
957 {
958 return kvm_cpuid_property(kvm_get_supported_cpuid(), property);
959 }
960
kvm_cpu_has_p(struct kvm_x86_cpu_property property)961 static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
962 {
963 uint32_t max_leaf;
964
965 switch (property.function & 0xc0000000) {
966 case 0:
967 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
968 break;
969 case 0x40000000:
970 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
971 break;
972 case 0x80000000:
973 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
974 break;
975 case 0xc0000000:
976 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
977 }
978 return max_leaf >= property.function;
979 }
980
kvm_pmu_has(struct kvm_x86_pmu_feature feature)981 static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
982 {
983 uint32_t nr_bits;
984
985 if (feature.f.reg == KVM_CPUID_EBX) {
986 nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
987 return nr_bits > feature.f.bit && !kvm_cpu_has(feature.f);
988 }
989
990 TEST_ASSERT_EQ(feature.f.reg, KVM_CPUID_ECX);
991 nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
992 return nr_bits > feature.f.bit || kvm_cpu_has(feature.f);
993 }
994
kvm_cpu_supported_xcr0(void)995 static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
996 {
997 if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
998 return 0;
999
1000 return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
1001 ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
1002 }
1003
kvm_cpuid2_size(int nr_entries)1004 static inline size_t kvm_cpuid2_size(int nr_entries)
1005 {
1006 return sizeof(struct kvm_cpuid2) +
1007 sizeof(struct kvm_cpuid_entry2) * nr_entries;
1008 }
1009
1010 /*
1011 * Allocate a "struct kvm_cpuid2* instance, with the 0-length arrary of
1012 * entries sized to hold @nr_entries. The caller is responsible for freeing
1013 * the struct.
1014 */
allocate_kvm_cpuid2(int nr_entries)1015 static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries)
1016 {
1017 struct kvm_cpuid2 *cpuid;
1018
1019 cpuid = malloc(kvm_cpuid2_size(nr_entries));
1020 TEST_ASSERT(cpuid, "-ENOMEM when allocating kvm_cpuid2");
1021
1022 cpuid->nent = nr_entries;
1023
1024 return cpuid;
1025 }
1026
1027 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid);
1028
vcpu_get_cpuid(struct kvm_vcpu * vcpu)1029 static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu)
1030 {
1031 vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
1032 }
1033
__vcpu_get_cpuid_entry(struct kvm_vcpu * vcpu,uint32_t function,uint32_t index)1034 static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
1035 uint32_t function,
1036 uint32_t index)
1037 {
1038 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)");
1039
1040 vcpu_get_cpuid(vcpu);
1041
1042 return (struct kvm_cpuid_entry2 *)get_cpuid_entry(vcpu->cpuid,
1043 function, index);
1044 }
1045
vcpu_get_cpuid_entry(struct kvm_vcpu * vcpu,uint32_t function)1046 static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
1047 uint32_t function)
1048 {
1049 return __vcpu_get_cpuid_entry(vcpu, function, 0);
1050 }
1051
__vcpu_set_cpuid(struct kvm_vcpu * vcpu)1052 static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu)
1053 {
1054 int r;
1055
1056 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
1057 r = __vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
1058 if (r)
1059 return r;
1060
1061 /* On success, refresh the cache to pick up adjustments made by KVM. */
1062 vcpu_get_cpuid(vcpu);
1063 return 0;
1064 }
1065
vcpu_set_cpuid(struct kvm_vcpu * vcpu)1066 static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
1067 {
1068 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
1069 vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
1070
1071 /* Refresh the cache to pick up adjustments made by KVM. */
1072 vcpu_get_cpuid(vcpu);
1073 }
1074
1075 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
1076 struct kvm_x86_cpu_property property,
1077 uint32_t value);
1078 void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
1079
1080 void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
1081
vcpu_cpuid_has(struct kvm_vcpu * vcpu,struct kvm_x86_cpu_feature feature)1082 static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu,
1083 struct kvm_x86_cpu_feature feature)
1084 {
1085 struct kvm_cpuid_entry2 *entry;
1086
1087 entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
1088 return *((&entry->eax) + feature.reg) & BIT(feature.bit);
1089 }
1090
1091 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1092 struct kvm_x86_cpu_feature feature,
1093 bool set);
1094
vcpu_set_cpuid_feature(struct kvm_vcpu * vcpu,struct kvm_x86_cpu_feature feature)1095 static inline void vcpu_set_cpuid_feature(struct kvm_vcpu *vcpu,
1096 struct kvm_x86_cpu_feature feature)
1097 {
1098 vcpu_set_or_clear_cpuid_feature(vcpu, feature, true);
1099
1100 }
1101
vcpu_clear_cpuid_feature(struct kvm_vcpu * vcpu,struct kvm_x86_cpu_feature feature)1102 static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1103 struct kvm_x86_cpu_feature feature)
1104 {
1105 vcpu_set_or_clear_cpuid_feature(vcpu, feature, false);
1106 }
1107
1108 uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
1109 int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
1110
1111 /*
1112 * Assert on an MSR access(es) and pretty print the MSR name when possible.
1113 * Note, the caller provides the stringified name so that the name of macro is
1114 * printed, not the value the macro resolves to (due to macro expansion).
1115 */
1116 #define TEST_ASSERT_MSR(cond, fmt, msr, str, args...) \
1117 do { \
1118 if (__builtin_constant_p(msr)) { \
1119 TEST_ASSERT(cond, fmt, str, args); \
1120 } else if (!(cond)) { \
1121 char buf[16]; \
1122 \
1123 snprintf(buf, sizeof(buf), "MSR 0x%x", msr); \
1124 TEST_ASSERT(cond, fmt, buf, args); \
1125 } \
1126 } while (0)
1127
1128 /*
1129 * Returns true if KVM should return the last written value when reading an MSR
1130 * from userspace, e.g. the MSR isn't a command MSR, doesn't emulate state that
1131 * is changing, etc. This is NOT an exhaustive list! The intent is to filter
1132 * out MSRs that are not durable _and_ that a selftest wants to write.
1133 */
is_durable_msr(uint32_t msr)1134 static inline bool is_durable_msr(uint32_t msr)
1135 {
1136 return msr != MSR_IA32_TSC;
1137 }
1138
1139 #define vcpu_set_msr(vcpu, msr, val) \
1140 do { \
1141 uint64_t r, v = val; \
1142 \
1143 TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \
1144 "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \
1145 if (!is_durable_msr(msr)) \
1146 break; \
1147 r = vcpu_get_msr(vcpu, msr); \
1148 TEST_ASSERT_MSR(r == v, "Set %s to '0x%lx', got back '0x%lx'", msr, #msr, v, r);\
1149 } while (0)
1150
1151 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
1152 void kvm_init_vm_address_properties(struct kvm_vm *vm);
1153
1154 struct ex_regs {
1155 uint64_t rax, rcx, rdx, rbx;
1156 uint64_t rbp, rsi, rdi;
1157 uint64_t r8, r9, r10, r11;
1158 uint64_t r12, r13, r14, r15;
1159 uint64_t vector;
1160 uint64_t error_code;
1161 uint64_t rip;
1162 uint64_t cs;
1163 uint64_t rflags;
1164 };
1165
1166 struct idt_entry {
1167 uint16_t offset0;
1168 uint16_t selector;
1169 uint16_t ist : 3;
1170 uint16_t : 5;
1171 uint16_t type : 4;
1172 uint16_t : 1;
1173 uint16_t dpl : 2;
1174 uint16_t p : 1;
1175 uint16_t offset1;
1176 uint32_t offset2; uint32_t reserved;
1177 };
1178
1179 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
1180 void (*handler)(struct ex_regs *));
1181
1182 /* If a toddler were to say "abracadabra". */
1183 #define KVM_EXCEPTION_MAGIC 0xabacadabaULL
1184
1185 /*
1186 * KVM selftest exception fixup uses registers to coordinate with the exception
1187 * handler, versus the kernel's in-memory tables and KVM-Unit-Tests's in-memory
1188 * per-CPU data. Using only registers avoids having to map memory into the
1189 * guest, doesn't require a valid, stable GS.base, and reduces the risk of
1190 * for recursive faults when accessing memory in the handler. The downside to
1191 * using registers is that it restricts what registers can be used by the actual
1192 * instruction. But, selftests are 64-bit only, making register* pressure a
1193 * minor concern. Use r9-r11 as they are volatile, i.e. don't need to be saved
1194 * by the callee, and except for r11 are not implicit parameters to any
1195 * instructions. Ideally, fixup would use r8-r10 and thus avoid implicit
1196 * parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V
1197 * is higher priority than testing non-faulting SYSCALL/SYSRET.
1198 *
1199 * Note, the fixup handler deliberately does not handle #DE, i.e. the vector
1200 * is guaranteed to be non-zero on fault.
1201 *
1202 * REGISTER INPUTS:
1203 * r9 = MAGIC
1204 * r10 = RIP
1205 * r11 = new RIP on fault
1206 *
1207 * REGISTER OUTPUTS:
1208 * r9 = exception vector (non-zero)
1209 * r10 = error code
1210 */
1211 #define __KVM_ASM_SAFE(insn, fep) \
1212 "mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t" \
1213 "lea 1f(%%rip), %%r10\n\t" \
1214 "lea 2f(%%rip), %%r11\n\t" \
1215 fep "1: " insn "\n\t" \
1216 "xor %%r9, %%r9\n\t" \
1217 "2:\n\t" \
1218 "mov %%r9b, %[vector]\n\t" \
1219 "mov %%r10, %[error_code]\n\t"
1220
1221 #define KVM_ASM_SAFE(insn) __KVM_ASM_SAFE(insn, "")
1222 #define KVM_ASM_SAFE_FEP(insn) __KVM_ASM_SAFE(insn, KVM_FEP)
1223
1224 #define KVM_ASM_SAFE_OUTPUTS(v, ec) [vector] "=qm"(v), [error_code] "=rm"(ec)
1225 #define KVM_ASM_SAFE_CLOBBERS "r9", "r10", "r11"
1226
1227 #define kvm_asm_safe(insn, inputs...) \
1228 ({ \
1229 uint64_t ign_error_code; \
1230 uint8_t vector; \
1231 \
1232 asm volatile(KVM_ASM_SAFE(insn) \
1233 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
1234 : inputs \
1235 : KVM_ASM_SAFE_CLOBBERS); \
1236 vector; \
1237 })
1238
1239 #define kvm_asm_safe_ec(insn, error_code, inputs...) \
1240 ({ \
1241 uint8_t vector; \
1242 \
1243 asm volatile(KVM_ASM_SAFE(insn) \
1244 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
1245 : inputs \
1246 : KVM_ASM_SAFE_CLOBBERS); \
1247 vector; \
1248 })
1249
1250 #define kvm_asm_safe_fep(insn, inputs...) \
1251 ({ \
1252 uint64_t ign_error_code; \
1253 uint8_t vector; \
1254 \
1255 asm volatile(KVM_ASM_SAFE_FEP(insn) \
1256 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
1257 : inputs \
1258 : KVM_ASM_SAFE_CLOBBERS); \
1259 vector; \
1260 })
1261
1262 #define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \
1263 ({ \
1264 uint8_t vector; \
1265 \
1266 asm volatile(KVM_ASM_SAFE_FEP(insn) \
1267 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
1268 : inputs \
1269 : KVM_ASM_SAFE_CLOBBERS); \
1270 vector; \
1271 })
1272
1273 #define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \
1274 static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \
1275 { \
1276 uint64_t error_code; \
1277 uint8_t vector; \
1278 uint32_t a, d; \
1279 \
1280 asm volatile(KVM_ASM_SAFE##_FEP(#insn) \
1281 : "=a"(a), "=d"(d), \
1282 KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
1283 : "c"(idx) \
1284 : KVM_ASM_SAFE_CLOBBERS); \
1285 \
1286 *val = (uint64_t)a | ((uint64_t)d << 32); \
1287 return vector; \
1288 }
1289
1290 /*
1291 * Generate {insn}_safe() and {insn}_safe_fep() helpers for instructions that
1292 * use ECX as in input index, and EDX:EAX as a 64-bit output.
1293 */
1294 #define BUILD_READ_U64_SAFE_HELPERS(insn) \
1295 BUILD_READ_U64_SAFE_HELPER(insn, , ) \
1296 BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \
1297
1298 BUILD_READ_U64_SAFE_HELPERS(rdmsr)
BUILD_READ_U64_SAFE_HELPERS(rdpmc)1299 BUILD_READ_U64_SAFE_HELPERS(rdpmc)
1300 BUILD_READ_U64_SAFE_HELPERS(xgetbv)
1301
1302 static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
1303 {
1304 return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
1305 }
1306
xsetbv_safe(uint32_t index,uint64_t value)1307 static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value)
1308 {
1309 u32 eax = value;
1310 u32 edx = value >> 32;
1311
1312 return kvm_asm_safe("xsetbv", "a" (eax), "d" (edx), "c" (index));
1313 }
1314
1315 bool kvm_is_tdp_enabled(void);
1316
kvm_is_pmu_enabled(void)1317 static inline bool kvm_is_pmu_enabled(void)
1318 {
1319 return get_kvm_param_bool("enable_pmu");
1320 }
1321
kvm_is_forced_emulation_enabled(void)1322 static inline bool kvm_is_forced_emulation_enabled(void)
1323 {
1324 return !!get_kvm_param_integer("force_emulation_prefix");
1325 }
1326
kvm_is_unrestricted_guest_enabled(void)1327 static inline bool kvm_is_unrestricted_guest_enabled(void)
1328 {
1329 return get_kvm_intel_param_bool("unrestricted_guest");
1330 }
1331
1332 uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
1333 int *level);
1334 uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
1335
1336 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1337 uint64_t a3);
1338 uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
1339 void xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
1340
__kvm_hypercall_map_gpa_range(uint64_t gpa,uint64_t size,uint64_t flags)1341 static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa,
1342 uint64_t size, uint64_t flags)
1343 {
1344 return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
1345 }
1346
kvm_hypercall_map_gpa_range(uint64_t gpa,uint64_t size,uint64_t flags)1347 static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
1348 uint64_t flags)
1349 {
1350 uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
1351
1352 GUEST_ASSERT(!ret);
1353 }
1354
1355 /*
1356 * Execute HLT in an STI interrupt shadow to ensure that a pending IRQ that's
1357 * intended to be a wake event arrives *after* HLT is executed. Modern CPUs,
1358 * except for a few oddballs that KVM is unlikely to run on, block IRQs for one
1359 * instruction after STI, *if* RFLAGS.IF=0 before STI. Note, Intel CPUs may
1360 * block other events beyond regular IRQs, e.g. may block NMIs and SMIs too.
1361 */
safe_halt(void)1362 static inline void safe_halt(void)
1363 {
1364 asm volatile("sti; hlt");
1365 }
1366
1367 /*
1368 * Enable interrupts and ensure that interrupts are evaluated upon return from
1369 * this function, i.e. execute a nop to consume the STi interrupt shadow.
1370 */
sti_nop(void)1371 static inline void sti_nop(void)
1372 {
1373 asm volatile ("sti; nop");
1374 }
1375
1376 /*
1377 * Enable interrupts for one instruction (nop), to allow the CPU to process all
1378 * interrupts that are already pending.
1379 */
sti_nop_cli(void)1380 static inline void sti_nop_cli(void)
1381 {
1382 asm volatile ("sti; nop; cli");
1383 }
1384
sti(void)1385 static inline void sti(void)
1386 {
1387 asm volatile("sti");
1388 }
1389
cli(void)1390 static inline void cli(void)
1391 {
1392 asm volatile ("cli");
1393 }
1394
1395 void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
1396
1397 #define vm_xsave_require_permission(xfeature) \
1398 __vm_xsave_require_permission(xfeature, #xfeature)
1399
1400 enum pg_level {
1401 PG_LEVEL_NONE,
1402 PG_LEVEL_4K,
1403 PG_LEVEL_2M,
1404 PG_LEVEL_1G,
1405 PG_LEVEL_512G,
1406 PG_LEVEL_NUM
1407 };
1408
1409 #define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)
1410 #define PG_LEVEL_SIZE(_level) (1ull << PG_LEVEL_SHIFT(_level))
1411
1412 #define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K)
1413 #define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
1414 #define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
1415
1416 void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
1417 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1418 uint64_t nr_bytes, int level);
1419
1420 /*
1421 * Basic CPU control in CR0
1422 */
1423 #define X86_CR0_PE (1UL<<0) /* Protection Enable */
1424 #define X86_CR0_MP (1UL<<1) /* Monitor Coprocessor */
1425 #define X86_CR0_EM (1UL<<2) /* Emulation */
1426 #define X86_CR0_TS (1UL<<3) /* Task Switched */
1427 #define X86_CR0_ET (1UL<<4) /* Extension Type */
1428 #define X86_CR0_NE (1UL<<5) /* Numeric Error */
1429 #define X86_CR0_WP (1UL<<16) /* Write Protect */
1430 #define X86_CR0_AM (1UL<<18) /* Alignment Mask */
1431 #define X86_CR0_NW (1UL<<29) /* Not Write-through */
1432 #define X86_CR0_CD (1UL<<30) /* Cache Disable */
1433 #define X86_CR0_PG (1UL<<31) /* Paging */
1434
1435 #define PFERR_PRESENT_BIT 0
1436 #define PFERR_WRITE_BIT 1
1437 #define PFERR_USER_BIT 2
1438 #define PFERR_RSVD_BIT 3
1439 #define PFERR_FETCH_BIT 4
1440 #define PFERR_PK_BIT 5
1441 #define PFERR_SGX_BIT 15
1442 #define PFERR_GUEST_FINAL_BIT 32
1443 #define PFERR_GUEST_PAGE_BIT 33
1444 #define PFERR_IMPLICIT_ACCESS_BIT 48
1445
1446 #define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT)
1447 #define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT)
1448 #define PFERR_USER_MASK BIT(PFERR_USER_BIT)
1449 #define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT)
1450 #define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT)
1451 #define PFERR_PK_MASK BIT(PFERR_PK_BIT)
1452 #define PFERR_SGX_MASK BIT(PFERR_SGX_BIT)
1453 #define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT)
1454 #define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
1455 #define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
1456
1457 bool sys_clocksource_is_based_on_tsc(void);
1458
1459 #endif /* SELFTEST_KVM_PROCESSOR_H */
1460