1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * tools/testing/selftests/kvm/include/x86_64/processor.h
4 *
5 * Copyright (C) 2018, Google LLC.
6 */
7
8 #ifndef SELFTEST_KVM_PROCESSOR_H
9 #define SELFTEST_KVM_PROCESSOR_H
10
11 #include <assert.h>
12 #include <stdint.h>
13 #include <syscall.h>
14
15 #include <asm/msr-index.h>
16 #include <asm/prctl.h>
17
18 #include <linux/kvm_para.h>
19 #include <linux/stringify.h>
20
21 #include "kvm_util.h"
22 #include "ucall_common.h"
23
24 extern bool host_cpu_is_intel;
25 extern bool host_cpu_is_amd;
26 extern uint64_t guest_tsc_khz;
27
28 /* Forced emulation prefix, used to invoke the emulator unconditionally. */
29 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
30
31 #define NMI_VECTOR 0x02
32
33 #define X86_EFLAGS_FIXED (1u << 1)
34
35 #define X86_CR4_VME (1ul << 0)
36 #define X86_CR4_PVI (1ul << 1)
37 #define X86_CR4_TSD (1ul << 2)
38 #define X86_CR4_DE (1ul << 3)
39 #define X86_CR4_PSE (1ul << 4)
40 #define X86_CR4_PAE (1ul << 5)
41 #define X86_CR4_MCE (1ul << 6)
42 #define X86_CR4_PGE (1ul << 7)
43 #define X86_CR4_PCE (1ul << 8)
44 #define X86_CR4_OSFXSR (1ul << 9)
45 #define X86_CR4_OSXMMEXCPT (1ul << 10)
46 #define X86_CR4_UMIP (1ul << 11)
47 #define X86_CR4_LA57 (1ul << 12)
48 #define X86_CR4_VMXE (1ul << 13)
49 #define X86_CR4_SMXE (1ul << 14)
50 #define X86_CR4_FSGSBASE (1ul << 16)
51 #define X86_CR4_PCIDE (1ul << 17)
52 #define X86_CR4_OSXSAVE (1ul << 18)
53 #define X86_CR4_SMEP (1ul << 20)
54 #define X86_CR4_SMAP (1ul << 21)
55 #define X86_CR4_PKE (1ul << 22)
56
57 struct xstate_header {
58 u64 xstate_bv;
59 u64 xcomp_bv;
60 u64 reserved[6];
61 } __attribute__((packed));
62
63 struct xstate {
64 u8 i387[512];
65 struct xstate_header header;
66 u8 extended_state_area[0];
67 } __attribute__ ((packed, aligned (64)));
68
69 #define XFEATURE_MASK_FP BIT_ULL(0)
70 #define XFEATURE_MASK_SSE BIT_ULL(1)
71 #define XFEATURE_MASK_YMM BIT_ULL(2)
72 #define XFEATURE_MASK_BNDREGS BIT_ULL(3)
73 #define XFEATURE_MASK_BNDCSR BIT_ULL(4)
74 #define XFEATURE_MASK_OPMASK BIT_ULL(5)
75 #define XFEATURE_MASK_ZMM_Hi256 BIT_ULL(6)
76 #define XFEATURE_MASK_Hi16_ZMM BIT_ULL(7)
77 #define XFEATURE_MASK_PT BIT_ULL(8)
78 #define XFEATURE_MASK_PKRU BIT_ULL(9)
79 #define XFEATURE_MASK_PASID BIT_ULL(10)
80 #define XFEATURE_MASK_CET_USER BIT_ULL(11)
81 #define XFEATURE_MASK_CET_KERNEL BIT_ULL(12)
82 #define XFEATURE_MASK_LBR BIT_ULL(15)
83 #define XFEATURE_MASK_XTILE_CFG BIT_ULL(17)
84 #define XFEATURE_MASK_XTILE_DATA BIT_ULL(18)
85
86 #define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK | \
87 XFEATURE_MASK_ZMM_Hi256 | \
88 XFEATURE_MASK_Hi16_ZMM)
89 #define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILE_DATA | \
90 XFEATURE_MASK_XTILE_CFG)
91
92 /* Note, these are ordered alphabetically to match kvm_cpuid_entry2. Eww. */
93 enum cpuid_output_regs {
94 KVM_CPUID_EAX,
95 KVM_CPUID_EBX,
96 KVM_CPUID_ECX,
97 KVM_CPUID_EDX
98 };
99
100 /*
101 * Pack the information into a 64-bit value so that each X86_FEATURE_XXX can be
102 * passed by value with no overhead.
103 */
104 struct kvm_x86_cpu_feature {
105 u32 function;
106 u16 index;
107 u8 reg;
108 u8 bit;
109 };
110 #define KVM_X86_CPU_FEATURE(fn, idx, gpr, __bit) \
111 ({ \
112 struct kvm_x86_cpu_feature feature = { \
113 .function = fn, \
114 .index = idx, \
115 .reg = KVM_CPUID_##gpr, \
116 .bit = __bit, \
117 }; \
118 \
119 kvm_static_assert((fn & 0xc0000000) == 0 || \
120 (fn & 0xc0000000) == 0x40000000 || \
121 (fn & 0xc0000000) == 0x80000000 || \
122 (fn & 0xc0000000) == 0xc0000000); \
123 kvm_static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE)); \
124 feature; \
125 })
126
127 /*
128 * Basic Leafs, a.k.a. Intel defined
129 */
130 #define X86_FEATURE_MWAIT KVM_X86_CPU_FEATURE(0x1, 0, ECX, 3)
131 #define X86_FEATURE_VMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 5)
132 #define X86_FEATURE_SMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 6)
133 #define X86_FEATURE_PDCM KVM_X86_CPU_FEATURE(0x1, 0, ECX, 15)
134 #define X86_FEATURE_PCID KVM_X86_CPU_FEATURE(0x1, 0, ECX, 17)
135 #define X86_FEATURE_X2APIC KVM_X86_CPU_FEATURE(0x1, 0, ECX, 21)
136 #define X86_FEATURE_MOVBE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 22)
137 #define X86_FEATURE_TSC_DEADLINE_TIMER KVM_X86_CPU_FEATURE(0x1, 0, ECX, 24)
138 #define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26)
139 #define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27)
140 #define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30)
141 #define X86_FEATURE_HYPERVISOR KVM_X86_CPU_FEATURE(0x1, 0, ECX, 31)
142 #define X86_FEATURE_PAE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6)
143 #define X86_FEATURE_MCE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7)
144 #define X86_FEATURE_APIC KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9)
145 #define X86_FEATURE_CLFLUSH KVM_X86_CPU_FEATURE(0x1, 0, EDX, 19)
146 #define X86_FEATURE_XMM KVM_X86_CPU_FEATURE(0x1, 0, EDX, 25)
147 #define X86_FEATURE_XMM2 KVM_X86_CPU_FEATURE(0x1, 0, EDX, 26)
148 #define X86_FEATURE_FSGSBASE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 0)
149 #define X86_FEATURE_TSC_ADJUST KVM_X86_CPU_FEATURE(0x7, 0, EBX, 1)
150 #define X86_FEATURE_SGX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 2)
151 #define X86_FEATURE_HLE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 4)
152 #define X86_FEATURE_SMEP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 7)
153 #define X86_FEATURE_INVPCID KVM_X86_CPU_FEATURE(0x7, 0, EBX, 10)
154 #define X86_FEATURE_RTM KVM_X86_CPU_FEATURE(0x7, 0, EBX, 11)
155 #define X86_FEATURE_MPX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 14)
156 #define X86_FEATURE_SMAP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 20)
157 #define X86_FEATURE_PCOMMIT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 22)
158 #define X86_FEATURE_CLFLUSHOPT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 23)
159 #define X86_FEATURE_CLWB KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24)
160 #define X86_FEATURE_UMIP KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2)
161 #define X86_FEATURE_PKU KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3)
162 #define X86_FEATURE_OSPKE KVM_X86_CPU_FEATURE(0x7, 0, ECX, 4)
163 #define X86_FEATURE_LA57 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16)
164 #define X86_FEATURE_RDPID KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22)
165 #define X86_FEATURE_SGX_LC KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30)
166 #define X86_FEATURE_SHSTK KVM_X86_CPU_FEATURE(0x7, 0, ECX, 7)
167 #define X86_FEATURE_IBT KVM_X86_CPU_FEATURE(0x7, 0, EDX, 20)
168 #define X86_FEATURE_AMX_TILE KVM_X86_CPU_FEATURE(0x7, 0, EDX, 24)
169 #define X86_FEATURE_SPEC_CTRL KVM_X86_CPU_FEATURE(0x7, 0, EDX, 26)
170 #define X86_FEATURE_ARCH_CAPABILITIES KVM_X86_CPU_FEATURE(0x7, 0, EDX, 29)
171 #define X86_FEATURE_PKS KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31)
172 #define X86_FEATURE_XTILECFG KVM_X86_CPU_FEATURE(0xD, 0, EAX, 17)
173 #define X86_FEATURE_XTILEDATA KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18)
174 #define X86_FEATURE_XSAVES KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3)
175 #define X86_FEATURE_XFD KVM_X86_CPU_FEATURE(0xD, 1, EAX, 4)
176 #define X86_FEATURE_XTILEDATA_XFD KVM_X86_CPU_FEATURE(0xD, 18, ECX, 2)
177
178 /*
179 * Extended Leafs, a.k.a. AMD defined
180 */
181 #define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
182 #define X86_FEATURE_NX KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
183 #define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
184 #define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
185 #define X86_FEATURE_LM KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29)
186 #define X86_FEATURE_INVTSC KVM_X86_CPU_FEATURE(0x80000007, 0, EDX, 8)
187 #define X86_FEATURE_RDPRU KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4)
188 #define X86_FEATURE_AMD_IBPB KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12)
189 #define X86_FEATURE_NPT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0)
190 #define X86_FEATURE_LBRV KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 1)
191 #define X86_FEATURE_NRIPS KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 3)
192 #define X86_FEATURE_TSCRATEMSR KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4)
193 #define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
194 #define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
195 #define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
196 #define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
197 #define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
198
199 /*
200 * KVM defined paravirt features.
201 */
202 #define X86_FEATURE_KVM_CLOCKSOURCE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 0)
203 #define X86_FEATURE_KVM_NOP_IO_DELAY KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 1)
204 #define X86_FEATURE_KVM_MMU_OP KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 2)
205 #define X86_FEATURE_KVM_CLOCKSOURCE2 KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 3)
206 #define X86_FEATURE_KVM_ASYNC_PF KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 4)
207 #define X86_FEATURE_KVM_STEAL_TIME KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 5)
208 #define X86_FEATURE_KVM_PV_EOI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 6)
209 #define X86_FEATURE_KVM_PV_UNHALT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 7)
210 /* Bit 8 apparently isn't used?!?! */
211 #define X86_FEATURE_KVM_PV_TLB_FLUSH KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 9)
212 #define X86_FEATURE_KVM_ASYNC_PF_VMEXIT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 10)
213 #define X86_FEATURE_KVM_PV_SEND_IPI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 11)
214 #define X86_FEATURE_KVM_POLL_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 12)
215 #define X86_FEATURE_KVM_PV_SCHED_YIELD KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 13)
216 #define X86_FEATURE_KVM_ASYNC_PF_INT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 14)
217 #define X86_FEATURE_KVM_MSI_EXT_DEST_ID KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 15)
218 #define X86_FEATURE_KVM_HC_MAP_GPA_RANGE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16)
219 #define X86_FEATURE_KVM_MIGRATION_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17)
220
221 /*
222 * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
223 * value/property as opposed to a single-bit feature. Again, pack the info
224 * into a 64-bit value to pass by value with no overhead.
225 */
226 struct kvm_x86_cpu_property {
227 u32 function;
228 u8 index;
229 u8 reg;
230 u8 lo_bit;
231 u8 hi_bit;
232 };
233 #define KVM_X86_CPU_PROPERTY(fn, idx, gpr, low_bit, high_bit) \
234 ({ \
235 struct kvm_x86_cpu_property property = { \
236 .function = fn, \
237 .index = idx, \
238 .reg = KVM_CPUID_##gpr, \
239 .lo_bit = low_bit, \
240 .hi_bit = high_bit, \
241 }; \
242 \
243 kvm_static_assert(low_bit < high_bit); \
244 kvm_static_assert((fn & 0xc0000000) == 0 || \
245 (fn & 0xc0000000) == 0x40000000 || \
246 (fn & 0xc0000000) == 0x80000000 || \
247 (fn & 0xc0000000) == 0xc0000000); \
248 kvm_static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE)); \
249 property; \
250 })
251
252 #define X86_PROPERTY_MAX_BASIC_LEAF KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31)
253 #define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
254 #define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
255 #define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
256 #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
257 #define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
258 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
259 #define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
260 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
261
262 #define X86_PROPERTY_SUPPORTED_XCR0_LO KVM_X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31)
263 #define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31)
264 #define X86_PROPERTY_XSTATE_MAX_SIZE KVM_X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31)
265 #define X86_PROPERTY_SUPPORTED_XCR0_HI KVM_X86_CPU_PROPERTY(0xd, 0, EDX, 0, 31)
266
267 #define X86_PROPERTY_XSTATE_TILE_SIZE KVM_X86_CPU_PROPERTY(0xd, 18, EAX, 0, 31)
268 #define X86_PROPERTY_XSTATE_TILE_OFFSET KVM_X86_CPU_PROPERTY(0xd, 18, EBX, 0, 31)
269 #define X86_PROPERTY_AMX_MAX_PALETTE_TABLES KVM_X86_CPU_PROPERTY(0x1d, 0, EAX, 0, 31)
270 #define X86_PROPERTY_AMX_TOTAL_TILE_BYTES KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 0, 15)
271 #define X86_PROPERTY_AMX_BYTES_PER_TILE KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31)
272 #define X86_PROPERTY_AMX_BYTES_PER_ROW KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 0, 15)
273 #define X86_PROPERTY_AMX_NR_TILE_REGS KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 16, 31)
274 #define X86_PROPERTY_AMX_MAX_ROWS KVM_X86_CPU_PROPERTY(0x1d, 1, ECX, 0, 15)
275
276 #define X86_PROPERTY_MAX_KVM_LEAF KVM_X86_CPU_PROPERTY(0x40000000, 0, EAX, 0, 31)
277
278 #define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
279 #define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
280 #define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
281 #define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23)
282 #define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
283 #define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
284
285 #define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
286
287 /*
288 * Intel's architectural PMU events are bizarre. They have a "feature" bit
289 * that indicates the feature is _not_ supported, and a property that states
290 * the length of the bit mask of unsupported features. A feature is supported
291 * if the size of the bit mask is larger than the "unavailable" bit, and said
292 * bit is not set. Fixed counters also bizarre enumeration, but inverted from
293 * arch events for general purpose counters. Fixed counters are supported if a
294 * feature flag is set **OR** the total number of fixed counters is greater
295 * than index of the counter.
296 *
297 * Wrap the events for general purpose and fixed counters to simplify checking
298 * whether or not a given architectural event is supported.
299 */
300 struct kvm_x86_pmu_feature {
301 struct kvm_x86_cpu_feature f;
302 };
303 #define KVM_X86_PMU_FEATURE(__reg, __bit) \
304 ({ \
305 struct kvm_x86_pmu_feature feature = { \
306 .f = KVM_X86_CPU_FEATURE(0xa, 0, __reg, __bit), \
307 }; \
308 \
309 kvm_static_assert(KVM_CPUID_##__reg == KVM_CPUID_EBX || \
310 KVM_CPUID_##__reg == KVM_CPUID_ECX); \
311 feature; \
312 })
313
314 #define X86_PMU_FEATURE_CPU_CYCLES KVM_X86_PMU_FEATURE(EBX, 0)
315 #define X86_PMU_FEATURE_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 1)
316 #define X86_PMU_FEATURE_REFERENCE_CYCLES KVM_X86_PMU_FEATURE(EBX, 2)
317 #define X86_PMU_FEATURE_LLC_REFERENCES KVM_X86_PMU_FEATURE(EBX, 3)
318 #define X86_PMU_FEATURE_LLC_MISSES KVM_X86_PMU_FEATURE(EBX, 4)
319 #define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5)
320 #define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6)
321 #define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7)
322
323 #define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0)
324 #define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1)
325 #define X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 2)
326 #define X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED KVM_X86_PMU_FEATURE(ECX, 3)
327
x86_family(unsigned int eax)328 static inline unsigned int x86_family(unsigned int eax)
329 {
330 unsigned int x86;
331
332 x86 = (eax >> 8) & 0xf;
333
334 if (x86 == 0xf)
335 x86 += (eax >> 20) & 0xff;
336
337 return x86;
338 }
339
x86_model(unsigned int eax)340 static inline unsigned int x86_model(unsigned int eax)
341 {
342 return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
343 }
344
345 /* Page table bitfield declarations */
346 #define PTE_PRESENT_MASK BIT_ULL(0)
347 #define PTE_WRITABLE_MASK BIT_ULL(1)
348 #define PTE_USER_MASK BIT_ULL(2)
349 #define PTE_ACCESSED_MASK BIT_ULL(5)
350 #define PTE_DIRTY_MASK BIT_ULL(6)
351 #define PTE_LARGE_MASK BIT_ULL(7)
352 #define PTE_GLOBAL_MASK BIT_ULL(8)
353 #define PTE_NX_MASK BIT_ULL(63)
354
355 #define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
356
357 #define PAGE_SHIFT 12
358 #define PAGE_SIZE (1ULL << PAGE_SHIFT)
359 #define PAGE_MASK (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK)
360
361 #define HUGEPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
362 #define HUGEPAGE_SIZE(x) (1UL << HUGEPAGE_SHIFT(x))
363 #define HUGEPAGE_MASK(x) (~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK)
364
365 #define PTE_GET_PA(pte) ((pte) & PHYSICAL_PAGE_MASK)
366 #define PTE_GET_PFN(pte) (PTE_GET_PA(pte) >> PAGE_SHIFT)
367
368 /* General Registers in 64-Bit Mode */
369 struct gpr64_regs {
370 u64 rax;
371 u64 rcx;
372 u64 rdx;
373 u64 rbx;
374 u64 rsp;
375 u64 rbp;
376 u64 rsi;
377 u64 rdi;
378 u64 r8;
379 u64 r9;
380 u64 r10;
381 u64 r11;
382 u64 r12;
383 u64 r13;
384 u64 r14;
385 u64 r15;
386 };
387
388 struct desc64 {
389 uint16_t limit0;
390 uint16_t base0;
391 unsigned base1:8, type:4, s:1, dpl:2, p:1;
392 unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
393 uint32_t base3;
394 uint32_t zero1;
395 } __attribute__((packed));
396
397 struct desc_ptr {
398 uint16_t size;
399 uint64_t address;
400 } __attribute__((packed));
401
402 struct kvm_x86_state {
403 struct kvm_xsave *xsave;
404 struct kvm_vcpu_events events;
405 struct kvm_mp_state mp_state;
406 struct kvm_regs regs;
407 struct kvm_xcrs xcrs;
408 struct kvm_sregs sregs;
409 struct kvm_debugregs debugregs;
410 union {
411 struct kvm_nested_state nested;
412 char nested_[16384];
413 };
414 struct kvm_msrs msrs;
415 };
416
get_desc64_base(const struct desc64 * desc)417 static inline uint64_t get_desc64_base(const struct desc64 *desc)
418 {
419 return ((uint64_t)desc->base3 << 32) |
420 (desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
421 }
422
rdtsc(void)423 static inline uint64_t rdtsc(void)
424 {
425 uint32_t eax, edx;
426 uint64_t tsc_val;
427 /*
428 * The lfence is to wait (on Intel CPUs) until all previous
429 * instructions have been executed. If software requires RDTSC to be
430 * executed prior to execution of any subsequent instruction, it can
431 * execute LFENCE immediately after RDTSC
432 */
433 __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
434 tsc_val = ((uint64_t)edx) << 32 | eax;
435 return tsc_val;
436 }
437
rdtscp(uint32_t * aux)438 static inline uint64_t rdtscp(uint32_t *aux)
439 {
440 uint32_t eax, edx;
441
442 __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
443 return ((uint64_t)edx) << 32 | eax;
444 }
445
rdmsr(uint32_t msr)446 static inline uint64_t rdmsr(uint32_t msr)
447 {
448 uint32_t a, d;
449
450 __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
451
452 return a | ((uint64_t) d << 32);
453 }
454
wrmsr(uint32_t msr,uint64_t value)455 static inline void wrmsr(uint32_t msr, uint64_t value)
456 {
457 uint32_t a = value;
458 uint32_t d = value >> 32;
459
460 __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
461 }
462
463
inw(uint16_t port)464 static inline uint16_t inw(uint16_t port)
465 {
466 uint16_t tmp;
467
468 __asm__ __volatile__("in %%dx, %%ax"
469 : /* output */ "=a" (tmp)
470 : /* input */ "d" (port));
471
472 return tmp;
473 }
474
get_es(void)475 static inline uint16_t get_es(void)
476 {
477 uint16_t es;
478
479 __asm__ __volatile__("mov %%es, %[es]"
480 : /* output */ [es]"=rm"(es));
481 return es;
482 }
483
get_cs(void)484 static inline uint16_t get_cs(void)
485 {
486 uint16_t cs;
487
488 __asm__ __volatile__("mov %%cs, %[cs]"
489 : /* output */ [cs]"=rm"(cs));
490 return cs;
491 }
492
get_ss(void)493 static inline uint16_t get_ss(void)
494 {
495 uint16_t ss;
496
497 __asm__ __volatile__("mov %%ss, %[ss]"
498 : /* output */ [ss]"=rm"(ss));
499 return ss;
500 }
501
get_ds(void)502 static inline uint16_t get_ds(void)
503 {
504 uint16_t ds;
505
506 __asm__ __volatile__("mov %%ds, %[ds]"
507 : /* output */ [ds]"=rm"(ds));
508 return ds;
509 }
510
get_fs(void)511 static inline uint16_t get_fs(void)
512 {
513 uint16_t fs;
514
515 __asm__ __volatile__("mov %%fs, %[fs]"
516 : /* output */ [fs]"=rm"(fs));
517 return fs;
518 }
519
get_gs(void)520 static inline uint16_t get_gs(void)
521 {
522 uint16_t gs;
523
524 __asm__ __volatile__("mov %%gs, %[gs]"
525 : /* output */ [gs]"=rm"(gs));
526 return gs;
527 }
528
get_tr(void)529 static inline uint16_t get_tr(void)
530 {
531 uint16_t tr;
532
533 __asm__ __volatile__("str %[tr]"
534 : /* output */ [tr]"=rm"(tr));
535 return tr;
536 }
537
get_cr0(void)538 static inline uint64_t get_cr0(void)
539 {
540 uint64_t cr0;
541
542 __asm__ __volatile__("mov %%cr0, %[cr0]"
543 : /* output */ [cr0]"=r"(cr0));
544 return cr0;
545 }
546
get_cr3(void)547 static inline uint64_t get_cr3(void)
548 {
549 uint64_t cr3;
550
551 __asm__ __volatile__("mov %%cr3, %[cr3]"
552 : /* output */ [cr3]"=r"(cr3));
553 return cr3;
554 }
555
get_cr4(void)556 static inline uint64_t get_cr4(void)
557 {
558 uint64_t cr4;
559
560 __asm__ __volatile__("mov %%cr4, %[cr4]"
561 : /* output */ [cr4]"=r"(cr4));
562 return cr4;
563 }
564
set_cr4(uint64_t val)565 static inline void set_cr4(uint64_t val)
566 {
567 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
568 }
569
xgetbv(u32 index)570 static inline u64 xgetbv(u32 index)
571 {
572 u32 eax, edx;
573
574 __asm__ __volatile__("xgetbv;"
575 : "=a" (eax), "=d" (edx)
576 : "c" (index));
577 return eax | ((u64)edx << 32);
578 }
579
xsetbv(u32 index,u64 value)580 static inline void xsetbv(u32 index, u64 value)
581 {
582 u32 eax = value;
583 u32 edx = value >> 32;
584
585 __asm__ __volatile__("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
586 }
587
wrpkru(u32 pkru)588 static inline void wrpkru(u32 pkru)
589 {
590 /* Note, ECX and EDX are architecturally required to be '0'. */
591 asm volatile(".byte 0x0f,0x01,0xef\n\t"
592 : : "a" (pkru), "c"(0), "d"(0));
593 }
594
get_gdt(void)595 static inline struct desc_ptr get_gdt(void)
596 {
597 struct desc_ptr gdt;
598 __asm__ __volatile__("sgdt %[gdt]"
599 : /* output */ [gdt]"=m"(gdt));
600 return gdt;
601 }
602
get_idt(void)603 static inline struct desc_ptr get_idt(void)
604 {
605 struct desc_ptr idt;
606 __asm__ __volatile__("sidt %[idt]"
607 : /* output */ [idt]"=m"(idt));
608 return idt;
609 }
610
outl(uint16_t port,uint32_t value)611 static inline void outl(uint16_t port, uint32_t value)
612 {
613 __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
614 }
615
__cpuid(uint32_t function,uint32_t index,uint32_t * eax,uint32_t * ebx,uint32_t * ecx,uint32_t * edx)616 static inline void __cpuid(uint32_t function, uint32_t index,
617 uint32_t *eax, uint32_t *ebx,
618 uint32_t *ecx, uint32_t *edx)
619 {
620 *eax = function;
621 *ecx = index;
622
623 asm volatile("cpuid"
624 : "=a" (*eax),
625 "=b" (*ebx),
626 "=c" (*ecx),
627 "=d" (*edx)
628 : "0" (*eax), "2" (*ecx)
629 : "memory");
630 }
631
cpuid(uint32_t function,uint32_t * eax,uint32_t * ebx,uint32_t * ecx,uint32_t * edx)632 static inline void cpuid(uint32_t function,
633 uint32_t *eax, uint32_t *ebx,
634 uint32_t *ecx, uint32_t *edx)
635 {
636 return __cpuid(function, 0, eax, ebx, ecx, edx);
637 }
638
this_cpu_fms(void)639 static inline uint32_t this_cpu_fms(void)
640 {
641 uint32_t eax, ebx, ecx, edx;
642
643 cpuid(1, &eax, &ebx, &ecx, &edx);
644 return eax;
645 }
646
this_cpu_family(void)647 static inline uint32_t this_cpu_family(void)
648 {
649 return x86_family(this_cpu_fms());
650 }
651
this_cpu_model(void)652 static inline uint32_t this_cpu_model(void)
653 {
654 return x86_model(this_cpu_fms());
655 }
656
this_cpu_vendor_string_is(const char * vendor)657 static inline bool this_cpu_vendor_string_is(const char *vendor)
658 {
659 const uint32_t *chunk = (const uint32_t *)vendor;
660 uint32_t eax, ebx, ecx, edx;
661
662 cpuid(0, &eax, &ebx, &ecx, &edx);
663 return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
664 }
665
this_cpu_is_intel(void)666 static inline bool this_cpu_is_intel(void)
667 {
668 return this_cpu_vendor_string_is("GenuineIntel");
669 }
670
671 /*
672 * Exclude early K5 samples with a vendor string of "AMDisbetter!"
673 */
this_cpu_is_amd(void)674 static inline bool this_cpu_is_amd(void)
675 {
676 return this_cpu_vendor_string_is("AuthenticAMD");
677 }
678
__this_cpu_has(uint32_t function,uint32_t index,uint8_t reg,uint8_t lo,uint8_t hi)679 static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
680 uint8_t reg, uint8_t lo, uint8_t hi)
681 {
682 uint32_t gprs[4];
683
684 __cpuid(function, index,
685 &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX],
686 &gprs[KVM_CPUID_ECX], &gprs[KVM_CPUID_EDX]);
687
688 return (gprs[reg] & GENMASK(hi, lo)) >> lo;
689 }
690
this_cpu_has(struct kvm_x86_cpu_feature feature)691 static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
692 {
693 return __this_cpu_has(feature.function, feature.index,
694 feature.reg, feature.bit, feature.bit);
695 }
696
this_cpu_property(struct kvm_x86_cpu_property property)697 static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
698 {
699 return __this_cpu_has(property.function, property.index,
700 property.reg, property.lo_bit, property.hi_bit);
701 }
702
this_cpu_has_p(struct kvm_x86_cpu_property property)703 static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
704 {
705 uint32_t max_leaf;
706
707 switch (property.function & 0xc0000000) {
708 case 0:
709 max_leaf = this_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
710 break;
711 case 0x40000000:
712 max_leaf = this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
713 break;
714 case 0x80000000:
715 max_leaf = this_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
716 break;
717 case 0xc0000000:
718 max_leaf = this_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
719 }
720 return max_leaf >= property.function;
721 }
722
this_pmu_has(struct kvm_x86_pmu_feature feature)723 static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
724 {
725 uint32_t nr_bits;
726
727 if (feature.f.reg == KVM_CPUID_EBX) {
728 nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
729 return nr_bits > feature.f.bit && !this_cpu_has(feature.f);
730 }
731
732 GUEST_ASSERT(feature.f.reg == KVM_CPUID_ECX);
733 nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
734 return nr_bits > feature.f.bit || this_cpu_has(feature.f);
735 }
736
this_cpu_supported_xcr0(void)737 static __always_inline uint64_t this_cpu_supported_xcr0(void)
738 {
739 if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
740 return 0;
741
742 return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
743 ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
744 }
745
746 typedef u32 __attribute__((vector_size(16))) sse128_t;
747 #define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
748 #define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
749 #define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; })
750
read_sse_reg(int reg,sse128_t * data)751 static inline void read_sse_reg(int reg, sse128_t *data)
752 {
753 switch (reg) {
754 case 0:
755 asm("movdqa %%xmm0, %0" : "=m"(*data));
756 break;
757 case 1:
758 asm("movdqa %%xmm1, %0" : "=m"(*data));
759 break;
760 case 2:
761 asm("movdqa %%xmm2, %0" : "=m"(*data));
762 break;
763 case 3:
764 asm("movdqa %%xmm3, %0" : "=m"(*data));
765 break;
766 case 4:
767 asm("movdqa %%xmm4, %0" : "=m"(*data));
768 break;
769 case 5:
770 asm("movdqa %%xmm5, %0" : "=m"(*data));
771 break;
772 case 6:
773 asm("movdqa %%xmm6, %0" : "=m"(*data));
774 break;
775 case 7:
776 asm("movdqa %%xmm7, %0" : "=m"(*data));
777 break;
778 default:
779 BUG();
780 }
781 }
782
write_sse_reg(int reg,const sse128_t * data)783 static inline void write_sse_reg(int reg, const sse128_t *data)
784 {
785 switch (reg) {
786 case 0:
787 asm("movdqa %0, %%xmm0" : : "m"(*data));
788 break;
789 case 1:
790 asm("movdqa %0, %%xmm1" : : "m"(*data));
791 break;
792 case 2:
793 asm("movdqa %0, %%xmm2" : : "m"(*data));
794 break;
795 case 3:
796 asm("movdqa %0, %%xmm3" : : "m"(*data));
797 break;
798 case 4:
799 asm("movdqa %0, %%xmm4" : : "m"(*data));
800 break;
801 case 5:
802 asm("movdqa %0, %%xmm5" : : "m"(*data));
803 break;
804 case 6:
805 asm("movdqa %0, %%xmm6" : : "m"(*data));
806 break;
807 case 7:
808 asm("movdqa %0, %%xmm7" : : "m"(*data));
809 break;
810 default:
811 BUG();
812 }
813 }
814
cpu_relax(void)815 static inline void cpu_relax(void)
816 {
817 asm volatile("rep; nop" ::: "memory");
818 }
819
udelay(unsigned long usec)820 static inline void udelay(unsigned long usec)
821 {
822 uint64_t start, now, cycles;
823
824 GUEST_ASSERT(guest_tsc_khz);
825 cycles = guest_tsc_khz / 1000 * usec;
826
827 /*
828 * Deliberately don't PAUSE, a.k.a. cpu_relax(), so that the delay is
829 * as accurate as possible, e.g. doesn't trigger PAUSE-Loop VM-Exits.
830 */
831 start = rdtsc();
832 do {
833 now = rdtsc();
834 } while (now - start < cycles);
835 }
836
837 #define ud2() \
838 __asm__ __volatile__( \
839 "ud2\n" \
840 )
841
842 #define hlt() \
843 __asm__ __volatile__( \
844 "hlt\n" \
845 )
846
847 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
848 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
849 void kvm_x86_state_cleanup(struct kvm_x86_state *state);
850
851 const struct kvm_msr_list *kvm_get_msr_index_list(void);
852 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void);
853 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index);
854 uint64_t kvm_get_feature_msr(uint64_t msr_index);
855
vcpu_msrs_get(struct kvm_vcpu * vcpu,struct kvm_msrs * msrs)856 static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
857 struct kvm_msrs *msrs)
858 {
859 int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs);
860
861 TEST_ASSERT(r == msrs->nmsrs,
862 "KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
863 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
864 }
vcpu_msrs_set(struct kvm_vcpu * vcpu,struct kvm_msrs * msrs)865 static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs)
866 {
867 int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs);
868
869 TEST_ASSERT(r == msrs->nmsrs,
870 "KVM_SET_MSRS failed, r: %i (failed on MSR %x)",
871 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
872 }
vcpu_debugregs_get(struct kvm_vcpu * vcpu,struct kvm_debugregs * debugregs)873 static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu,
874 struct kvm_debugregs *debugregs)
875 {
876 vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs);
877 }
vcpu_debugregs_set(struct kvm_vcpu * vcpu,struct kvm_debugregs * debugregs)878 static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu,
879 struct kvm_debugregs *debugregs)
880 {
881 vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs);
882 }
vcpu_xsave_get(struct kvm_vcpu * vcpu,struct kvm_xsave * xsave)883 static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu,
884 struct kvm_xsave *xsave)
885 {
886 vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave);
887 }
vcpu_xsave2_get(struct kvm_vcpu * vcpu,struct kvm_xsave * xsave)888 static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu,
889 struct kvm_xsave *xsave)
890 {
891 vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave);
892 }
vcpu_xsave_set(struct kvm_vcpu * vcpu,struct kvm_xsave * xsave)893 static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu,
894 struct kvm_xsave *xsave)
895 {
896 vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave);
897 }
vcpu_xcrs_get(struct kvm_vcpu * vcpu,struct kvm_xcrs * xcrs)898 static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu,
899 struct kvm_xcrs *xcrs)
900 {
901 vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs);
902 }
vcpu_xcrs_set(struct kvm_vcpu * vcpu,struct kvm_xcrs * xcrs)903 static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
904 {
905 vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs);
906 }
907
908 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
909 uint32_t function, uint32_t index);
910 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
911 const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
912 const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
913
kvm_cpu_fms(void)914 static inline uint32_t kvm_cpu_fms(void)
915 {
916 return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax;
917 }
918
kvm_cpu_family(void)919 static inline uint32_t kvm_cpu_family(void)
920 {
921 return x86_family(kvm_cpu_fms());
922 }
923
kvm_cpu_model(void)924 static inline uint32_t kvm_cpu_model(void)
925 {
926 return x86_model(kvm_cpu_fms());
927 }
928
929 bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
930 struct kvm_x86_cpu_feature feature);
931
kvm_cpu_has(struct kvm_x86_cpu_feature feature)932 static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature)
933 {
934 return kvm_cpuid_has(kvm_get_supported_cpuid(), feature);
935 }
936
937 uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
938 struct kvm_x86_cpu_property property);
939
kvm_cpu_property(struct kvm_x86_cpu_property property)940 static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property)
941 {
942 return kvm_cpuid_property(kvm_get_supported_cpuid(), property);
943 }
944
kvm_cpu_has_p(struct kvm_x86_cpu_property property)945 static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
946 {
947 uint32_t max_leaf;
948
949 switch (property.function & 0xc0000000) {
950 case 0:
951 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
952 break;
953 case 0x40000000:
954 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
955 break;
956 case 0x80000000:
957 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
958 break;
959 case 0xc0000000:
960 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
961 }
962 return max_leaf >= property.function;
963 }
964
kvm_pmu_has(struct kvm_x86_pmu_feature feature)965 static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
966 {
967 uint32_t nr_bits;
968
969 if (feature.f.reg == KVM_CPUID_EBX) {
970 nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
971 return nr_bits > feature.f.bit && !kvm_cpu_has(feature.f);
972 }
973
974 TEST_ASSERT_EQ(feature.f.reg, KVM_CPUID_ECX);
975 nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
976 return nr_bits > feature.f.bit || kvm_cpu_has(feature.f);
977 }
978
kvm_cpu_supported_xcr0(void)979 static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
980 {
981 if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
982 return 0;
983
984 return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
985 ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
986 }
987
kvm_cpuid2_size(int nr_entries)988 static inline size_t kvm_cpuid2_size(int nr_entries)
989 {
990 return sizeof(struct kvm_cpuid2) +
991 sizeof(struct kvm_cpuid_entry2) * nr_entries;
992 }
993
994 /*
995 * Allocate a "struct kvm_cpuid2* instance, with the 0-length arrary of
996 * entries sized to hold @nr_entries. The caller is responsible for freeing
997 * the struct.
998 */
allocate_kvm_cpuid2(int nr_entries)999 static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries)
1000 {
1001 struct kvm_cpuid2 *cpuid;
1002
1003 cpuid = malloc(kvm_cpuid2_size(nr_entries));
1004 TEST_ASSERT(cpuid, "-ENOMEM when allocating kvm_cpuid2");
1005
1006 cpuid->nent = nr_entries;
1007
1008 return cpuid;
1009 }
1010
1011 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid);
1012 void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
1013
__vcpu_get_cpuid_entry(struct kvm_vcpu * vcpu,uint32_t function,uint32_t index)1014 static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
1015 uint32_t function,
1016 uint32_t index)
1017 {
1018 return (struct kvm_cpuid_entry2 *)get_cpuid_entry(vcpu->cpuid,
1019 function, index);
1020 }
1021
vcpu_get_cpuid_entry(struct kvm_vcpu * vcpu,uint32_t function)1022 static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
1023 uint32_t function)
1024 {
1025 return __vcpu_get_cpuid_entry(vcpu, function, 0);
1026 }
1027
__vcpu_set_cpuid(struct kvm_vcpu * vcpu)1028 static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu)
1029 {
1030 int r;
1031
1032 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
1033 r = __vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
1034 if (r)
1035 return r;
1036
1037 /* On success, refresh the cache to pick up adjustments made by KVM. */
1038 vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
1039 return 0;
1040 }
1041
vcpu_set_cpuid(struct kvm_vcpu * vcpu)1042 static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
1043 {
1044 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
1045 vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
1046
1047 /* Refresh the cache to pick up adjustments made by KVM. */
1048 vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
1049 }
1050
1051 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
1052 struct kvm_x86_cpu_property property,
1053 uint32_t value);
1054 void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
1055
1056 void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
1057
vcpu_cpuid_has(struct kvm_vcpu * vcpu,struct kvm_x86_cpu_feature feature)1058 static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu,
1059 struct kvm_x86_cpu_feature feature)
1060 {
1061 struct kvm_cpuid_entry2 *entry;
1062
1063 entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
1064 return *((&entry->eax) + feature.reg) & BIT(feature.bit);
1065 }
1066
1067 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1068 struct kvm_x86_cpu_feature feature,
1069 bool set);
1070
vcpu_set_cpuid_feature(struct kvm_vcpu * vcpu,struct kvm_x86_cpu_feature feature)1071 static inline void vcpu_set_cpuid_feature(struct kvm_vcpu *vcpu,
1072 struct kvm_x86_cpu_feature feature)
1073 {
1074 vcpu_set_or_clear_cpuid_feature(vcpu, feature, true);
1075
1076 }
1077
vcpu_clear_cpuid_feature(struct kvm_vcpu * vcpu,struct kvm_x86_cpu_feature feature)1078 static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1079 struct kvm_x86_cpu_feature feature)
1080 {
1081 vcpu_set_or_clear_cpuid_feature(vcpu, feature, false);
1082 }
1083
1084 uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
1085 int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
1086
1087 /*
1088 * Assert on an MSR access(es) and pretty print the MSR name when possible.
1089 * Note, the caller provides the stringified name so that the name of macro is
1090 * printed, not the value the macro resolves to (due to macro expansion).
1091 */
1092 #define TEST_ASSERT_MSR(cond, fmt, msr, str, args...) \
1093 do { \
1094 if (__builtin_constant_p(msr)) { \
1095 TEST_ASSERT(cond, fmt, str, args); \
1096 } else if (!(cond)) { \
1097 char buf[16]; \
1098 \
1099 snprintf(buf, sizeof(buf), "MSR 0x%x", msr); \
1100 TEST_ASSERT(cond, fmt, buf, args); \
1101 } \
1102 } while (0)
1103
1104 /*
1105 * Returns true if KVM should return the last written value when reading an MSR
1106 * from userspace, e.g. the MSR isn't a command MSR, doesn't emulate state that
1107 * is changing, etc. This is NOT an exhaustive list! The intent is to filter
1108 * out MSRs that are not durable _and_ that a selftest wants to write.
1109 */
is_durable_msr(uint32_t msr)1110 static inline bool is_durable_msr(uint32_t msr)
1111 {
1112 return msr != MSR_IA32_TSC;
1113 }
1114
1115 #define vcpu_set_msr(vcpu, msr, val) \
1116 do { \
1117 uint64_t r, v = val; \
1118 \
1119 TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \
1120 "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \
1121 if (!is_durable_msr(msr)) \
1122 break; \
1123 r = vcpu_get_msr(vcpu, msr); \
1124 TEST_ASSERT_MSR(r == v, "Set %s to '0x%lx', got back '0x%lx'", msr, #msr, v, r);\
1125 } while (0)
1126
1127 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
1128 void kvm_init_vm_address_properties(struct kvm_vm *vm);
1129 bool vm_is_unrestricted_guest(struct kvm_vm *vm);
1130
1131 struct ex_regs {
1132 uint64_t rax, rcx, rdx, rbx;
1133 uint64_t rbp, rsi, rdi;
1134 uint64_t r8, r9, r10, r11;
1135 uint64_t r12, r13, r14, r15;
1136 uint64_t vector;
1137 uint64_t error_code;
1138 uint64_t rip;
1139 uint64_t cs;
1140 uint64_t rflags;
1141 };
1142
1143 struct idt_entry {
1144 uint16_t offset0;
1145 uint16_t selector;
1146 uint16_t ist : 3;
1147 uint16_t : 5;
1148 uint16_t type : 4;
1149 uint16_t : 1;
1150 uint16_t dpl : 2;
1151 uint16_t p : 1;
1152 uint16_t offset1;
1153 uint32_t offset2; uint32_t reserved;
1154 };
1155
1156 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
1157 void (*handler)(struct ex_regs *));
1158
1159 /* If a toddler were to say "abracadabra". */
1160 #define KVM_EXCEPTION_MAGIC 0xabacadabaULL
1161
1162 /*
1163 * KVM selftest exception fixup uses registers to coordinate with the exception
1164 * handler, versus the kernel's in-memory tables and KVM-Unit-Tests's in-memory
1165 * per-CPU data. Using only registers avoids having to map memory into the
1166 * guest, doesn't require a valid, stable GS.base, and reduces the risk of
1167 * for recursive faults when accessing memory in the handler. The downside to
1168 * using registers is that it restricts what registers can be used by the actual
1169 * instruction. But, selftests are 64-bit only, making register* pressure a
1170 * minor concern. Use r9-r11 as they are volatile, i.e. don't need to be saved
1171 * by the callee, and except for r11 are not implicit parameters to any
1172 * instructions. Ideally, fixup would use r8-r10 and thus avoid implicit
1173 * parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V
1174 * is higher priority than testing non-faulting SYSCALL/SYSRET.
1175 *
1176 * Note, the fixup handler deliberately does not handle #DE, i.e. the vector
1177 * is guaranteed to be non-zero on fault.
1178 *
1179 * REGISTER INPUTS:
1180 * r9 = MAGIC
1181 * r10 = RIP
1182 * r11 = new RIP on fault
1183 *
1184 * REGISTER OUTPUTS:
1185 * r9 = exception vector (non-zero)
1186 * r10 = error code
1187 */
1188 #define __KVM_ASM_SAFE(insn, fep) \
1189 "mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t" \
1190 "lea 1f(%%rip), %%r10\n\t" \
1191 "lea 2f(%%rip), %%r11\n\t" \
1192 fep "1: " insn "\n\t" \
1193 "xor %%r9, %%r9\n\t" \
1194 "2:\n\t" \
1195 "mov %%r9b, %[vector]\n\t" \
1196 "mov %%r10, %[error_code]\n\t"
1197
1198 #define KVM_ASM_SAFE(insn) __KVM_ASM_SAFE(insn, "")
1199 #define KVM_ASM_SAFE_FEP(insn) __KVM_ASM_SAFE(insn, KVM_FEP)
1200
1201 #define KVM_ASM_SAFE_OUTPUTS(v, ec) [vector] "=qm"(v), [error_code] "=rm"(ec)
1202 #define KVM_ASM_SAFE_CLOBBERS "r9", "r10", "r11"
1203
1204 #define kvm_asm_safe(insn, inputs...) \
1205 ({ \
1206 uint64_t ign_error_code; \
1207 uint8_t vector; \
1208 \
1209 asm volatile(KVM_ASM_SAFE(insn) \
1210 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
1211 : inputs \
1212 : KVM_ASM_SAFE_CLOBBERS); \
1213 vector; \
1214 })
1215
1216 #define kvm_asm_safe_ec(insn, error_code, inputs...) \
1217 ({ \
1218 uint8_t vector; \
1219 \
1220 asm volatile(KVM_ASM_SAFE(insn) \
1221 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
1222 : inputs \
1223 : KVM_ASM_SAFE_CLOBBERS); \
1224 vector; \
1225 })
1226
1227 #define kvm_asm_safe_fep(insn, inputs...) \
1228 ({ \
1229 uint64_t ign_error_code; \
1230 uint8_t vector; \
1231 \
1232 asm volatile(KVM_ASM_SAFE(insn) \
1233 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
1234 : inputs \
1235 : KVM_ASM_SAFE_CLOBBERS); \
1236 vector; \
1237 })
1238
1239 #define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \
1240 ({ \
1241 uint8_t vector; \
1242 \
1243 asm volatile(KVM_ASM_SAFE_FEP(insn) \
1244 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
1245 : inputs \
1246 : KVM_ASM_SAFE_CLOBBERS); \
1247 vector; \
1248 })
1249
1250 #define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \
1251 static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \
1252 { \
1253 uint64_t error_code; \
1254 uint8_t vector; \
1255 uint32_t a, d; \
1256 \
1257 asm volatile(KVM_ASM_SAFE##_FEP(#insn) \
1258 : "=a"(a), "=d"(d), \
1259 KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
1260 : "c"(idx) \
1261 : KVM_ASM_SAFE_CLOBBERS); \
1262 \
1263 *val = (uint64_t)a | ((uint64_t)d << 32); \
1264 return vector; \
1265 }
1266
1267 /*
1268 * Generate {insn}_safe() and {insn}_safe_fep() helpers for instructions that
1269 * use ECX as in input index, and EDX:EAX as a 64-bit output.
1270 */
1271 #define BUILD_READ_U64_SAFE_HELPERS(insn) \
1272 BUILD_READ_U64_SAFE_HELPER(insn, , ) \
1273 BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \
1274
1275 BUILD_READ_U64_SAFE_HELPERS(rdmsr)
BUILD_READ_U64_SAFE_HELPERS(rdpmc)1276 BUILD_READ_U64_SAFE_HELPERS(rdpmc)
1277 BUILD_READ_U64_SAFE_HELPERS(xgetbv)
1278
1279 static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
1280 {
1281 return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
1282 }
1283
xsetbv_safe(uint32_t index,uint64_t value)1284 static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value)
1285 {
1286 u32 eax = value;
1287 u32 edx = value >> 32;
1288
1289 return kvm_asm_safe("xsetbv", "a" (eax), "d" (edx), "c" (index));
1290 }
1291
1292 bool kvm_is_tdp_enabled(void);
1293
kvm_is_pmu_enabled(void)1294 static inline bool kvm_is_pmu_enabled(void)
1295 {
1296 return get_kvm_param_bool("enable_pmu");
1297 }
1298
kvm_is_forced_emulation_enabled(void)1299 static inline bool kvm_is_forced_emulation_enabled(void)
1300 {
1301 return !!get_kvm_param_integer("force_emulation_prefix");
1302 }
1303
1304 uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
1305 int *level);
1306 uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
1307
1308 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1309 uint64_t a3);
1310 uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
1311 void xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
1312
__kvm_hypercall_map_gpa_range(uint64_t gpa,uint64_t size,uint64_t flags)1313 static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa,
1314 uint64_t size, uint64_t flags)
1315 {
1316 return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
1317 }
1318
kvm_hypercall_map_gpa_range(uint64_t gpa,uint64_t size,uint64_t flags)1319 static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
1320 uint64_t flags)
1321 {
1322 uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
1323
1324 GUEST_ASSERT(!ret);
1325 }
1326
1327 void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
1328
1329 #define vm_xsave_require_permission(xfeature) \
1330 __vm_xsave_require_permission(xfeature, #xfeature)
1331
1332 enum pg_level {
1333 PG_LEVEL_NONE,
1334 PG_LEVEL_4K,
1335 PG_LEVEL_2M,
1336 PG_LEVEL_1G,
1337 PG_LEVEL_512G,
1338 PG_LEVEL_NUM
1339 };
1340
1341 #define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)
1342 #define PG_LEVEL_SIZE(_level) (1ull << PG_LEVEL_SHIFT(_level))
1343
1344 #define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K)
1345 #define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
1346 #define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
1347
1348 void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
1349 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1350 uint64_t nr_bytes, int level);
1351
1352 /*
1353 * Basic CPU control in CR0
1354 */
1355 #define X86_CR0_PE (1UL<<0) /* Protection Enable */
1356 #define X86_CR0_MP (1UL<<1) /* Monitor Coprocessor */
1357 #define X86_CR0_EM (1UL<<2) /* Emulation */
1358 #define X86_CR0_TS (1UL<<3) /* Task Switched */
1359 #define X86_CR0_ET (1UL<<4) /* Extension Type */
1360 #define X86_CR0_NE (1UL<<5) /* Numeric Error */
1361 #define X86_CR0_WP (1UL<<16) /* Write Protect */
1362 #define X86_CR0_AM (1UL<<18) /* Alignment Mask */
1363 #define X86_CR0_NW (1UL<<29) /* Not Write-through */
1364 #define X86_CR0_CD (1UL<<30) /* Cache Disable */
1365 #define X86_CR0_PG (1UL<<31) /* Paging */
1366
1367 #define PFERR_PRESENT_BIT 0
1368 #define PFERR_WRITE_BIT 1
1369 #define PFERR_USER_BIT 2
1370 #define PFERR_RSVD_BIT 3
1371 #define PFERR_FETCH_BIT 4
1372 #define PFERR_PK_BIT 5
1373 #define PFERR_SGX_BIT 15
1374 #define PFERR_GUEST_FINAL_BIT 32
1375 #define PFERR_GUEST_PAGE_BIT 33
1376 #define PFERR_IMPLICIT_ACCESS_BIT 48
1377
1378 #define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT)
1379 #define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT)
1380 #define PFERR_USER_MASK BIT(PFERR_USER_BIT)
1381 #define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT)
1382 #define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT)
1383 #define PFERR_PK_MASK BIT(PFERR_PK_BIT)
1384 #define PFERR_SGX_MASK BIT(PFERR_SGX_BIT)
1385 #define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT)
1386 #define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
1387 #define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
1388
1389 bool sys_clocksource_is_based_on_tsc(void);
1390
1391 #endif /* SELFTEST_KVM_PROCESSOR_H */
1392