xref: /linux/tools/testing/selftests/kvm/include/x86/processor.h (revision dfd2a8b07c6cc94145e11d87d2f11137d6444854)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2018, Google LLC.
4  */
5 
6 #ifndef SELFTEST_KVM_PROCESSOR_H
7 #define SELFTEST_KVM_PROCESSOR_H
8 
9 #include <assert.h>
10 #include <stdint.h>
11 #include <syscall.h>
12 
13 #include <asm/msr-index.h>
14 #include <asm/prctl.h>
15 
16 #include <linux/kvm_para.h>
17 #include <linux/stringify.h>
18 
19 #include "kvm_util.h"
20 #include "ucall_common.h"
21 
22 extern bool host_cpu_is_intel;
23 extern bool host_cpu_is_amd;
24 extern bool host_cpu_is_hygon;
25 extern bool host_cpu_is_amd_compatible;
26 extern u64 guest_tsc_khz;
27 
28 #ifndef MAX_NR_CPUID_ENTRIES
29 #define MAX_NR_CPUID_ENTRIES 100
30 #endif
31 
32 #define NONCANONICAL 0xaaaaaaaaaaaaaaaaull
33 
34 /* Forced emulation prefix, used to invoke the emulator unconditionally. */
35 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
36 
37 #define NMI_VECTOR		0x02
38 
39 const char *ex_str(int vector);
40 
41 #define X86_EFLAGS_FIXED	 (1u << 1)
42 
43 #define X86_CR4_VME		(1ul << 0)
44 #define X86_CR4_PVI		(1ul << 1)
45 #define X86_CR4_TSD		(1ul << 2)
46 #define X86_CR4_DE		(1ul << 3)
47 #define X86_CR4_PSE		(1ul << 4)
48 #define X86_CR4_PAE		(1ul << 5)
49 #define X86_CR4_MCE		(1ul << 6)
50 #define X86_CR4_PGE		(1ul << 7)
51 #define X86_CR4_PCE		(1ul << 8)
52 #define X86_CR4_OSFXSR		(1ul << 9)
53 #define X86_CR4_OSXMMEXCPT	(1ul << 10)
54 #define X86_CR4_UMIP		(1ul << 11)
55 #define X86_CR4_LA57		(1ul << 12)
56 #define X86_CR4_VMXE		(1ul << 13)
57 #define X86_CR4_SMXE		(1ul << 14)
58 #define X86_CR4_FSGSBASE	(1ul << 16)
59 #define X86_CR4_PCIDE		(1ul << 17)
60 #define X86_CR4_OSXSAVE		(1ul << 18)
61 #define X86_CR4_SMEP		(1ul << 20)
62 #define X86_CR4_SMAP		(1ul << 21)
63 #define X86_CR4_PKE		(1ul << 22)
64 
65 struct xstate_header {
66 	u64				xstate_bv;
67 	u64				xcomp_bv;
68 	u64				reserved[6];
69 } __attribute__((packed));
70 
71 struct xstate {
72 	u8				i387[512];
73 	struct xstate_header		header;
74 	u8				extended_state_area[0];
75 } __attribute__ ((packed, aligned (64)));
76 
77 #define XFEATURE_MASK_FP		BIT_ULL(0)
78 #define XFEATURE_MASK_SSE		BIT_ULL(1)
79 #define XFEATURE_MASK_YMM		BIT_ULL(2)
80 #define XFEATURE_MASK_BNDREGS		BIT_ULL(3)
81 #define XFEATURE_MASK_BNDCSR		BIT_ULL(4)
82 #define XFEATURE_MASK_OPMASK		BIT_ULL(5)
83 #define XFEATURE_MASK_ZMM_Hi256		BIT_ULL(6)
84 #define XFEATURE_MASK_Hi16_ZMM		BIT_ULL(7)
85 #define XFEATURE_MASK_PT		BIT_ULL(8)
86 #define XFEATURE_MASK_PKRU		BIT_ULL(9)
87 #define XFEATURE_MASK_PASID		BIT_ULL(10)
88 #define XFEATURE_MASK_CET_USER		BIT_ULL(11)
89 #define XFEATURE_MASK_CET_KERNEL	BIT_ULL(12)
90 #define XFEATURE_MASK_LBR		BIT_ULL(15)
91 #define XFEATURE_MASK_XTILE_CFG		BIT_ULL(17)
92 #define XFEATURE_MASK_XTILE_DATA	BIT_ULL(18)
93 
94 #define XFEATURE_MASK_AVX512		(XFEATURE_MASK_OPMASK | \
95 					 XFEATURE_MASK_ZMM_Hi256 | \
96 					 XFEATURE_MASK_Hi16_ZMM)
97 #define XFEATURE_MASK_XTILE		(XFEATURE_MASK_XTILE_DATA | \
98 					 XFEATURE_MASK_XTILE_CFG)
99 
100 /* Note, these are ordered alphabetically to match kvm_cpuid_entry2.  Eww. */
101 enum cpuid_output_regs {
102 	KVM_CPUID_EAX,
103 	KVM_CPUID_EBX,
104 	KVM_CPUID_ECX,
105 	KVM_CPUID_EDX
106 };
107 
108 /*
109  * Pack the information into a 64-bit value so that each X86_FEATURE_XXX can be
110  * passed by value with no overhead.
111  */
112 struct kvm_x86_cpu_feature {
113 	u32	function;
114 	u16	index;
115 	u8	reg;
116 	u8	bit;
117 };
118 #define	KVM_X86_CPU_FEATURE(fn, idx, gpr, __bit)				\
119 ({										\
120 	struct kvm_x86_cpu_feature feature = {					\
121 		.function = fn,							\
122 		.index = idx,							\
123 		.reg = KVM_CPUID_##gpr,						\
124 		.bit = __bit,							\
125 	};									\
126 										\
127 	kvm_static_assert((fn & 0xc0000000) == 0 ||				\
128 			  (fn & 0xc0000000) == 0x40000000 ||			\
129 			  (fn & 0xc0000000) == 0x80000000 ||			\
130 			  (fn & 0xc0000000) == 0xc0000000);			\
131 	kvm_static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE));	\
132 	feature;								\
133 })
134 
135 /*
136  * Basic Leafs, a.k.a. Intel defined
137  */
138 #define	X86_FEATURE_MWAIT		KVM_X86_CPU_FEATURE(0x1, 0, ECX, 3)
139 #define	X86_FEATURE_VMX			KVM_X86_CPU_FEATURE(0x1, 0, ECX, 5)
140 #define	X86_FEATURE_SMX			KVM_X86_CPU_FEATURE(0x1, 0, ECX, 6)
141 #define	X86_FEATURE_PDCM		KVM_X86_CPU_FEATURE(0x1, 0, ECX, 15)
142 #define	X86_FEATURE_PCID		KVM_X86_CPU_FEATURE(0x1, 0, ECX, 17)
143 #define X86_FEATURE_X2APIC		KVM_X86_CPU_FEATURE(0x1, 0, ECX, 21)
144 #define	X86_FEATURE_MOVBE		KVM_X86_CPU_FEATURE(0x1, 0, ECX, 22)
145 #define	X86_FEATURE_TSC_DEADLINE_TIMER	KVM_X86_CPU_FEATURE(0x1, 0, ECX, 24)
146 #define	X86_FEATURE_XSAVE		KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26)
147 #define	X86_FEATURE_OSXSAVE		KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27)
148 #define	X86_FEATURE_RDRAND		KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30)
149 #define	X86_FEATURE_HYPERVISOR		KVM_X86_CPU_FEATURE(0x1, 0, ECX, 31)
150 #define X86_FEATURE_PAE			KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6)
151 #define	X86_FEATURE_MCE			KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7)
152 #define	X86_FEATURE_APIC		KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9)
153 #define	X86_FEATURE_CLFLUSH		KVM_X86_CPU_FEATURE(0x1, 0, EDX, 19)
154 #define	X86_FEATURE_XMM			KVM_X86_CPU_FEATURE(0x1, 0, EDX, 25)
155 #define	X86_FEATURE_XMM2		KVM_X86_CPU_FEATURE(0x1, 0, EDX, 26)
156 #define	X86_FEATURE_FSGSBASE		KVM_X86_CPU_FEATURE(0x7, 0, EBX, 0)
157 #define	X86_FEATURE_TSC_ADJUST		KVM_X86_CPU_FEATURE(0x7, 0, EBX, 1)
158 #define	X86_FEATURE_SGX			KVM_X86_CPU_FEATURE(0x7, 0, EBX, 2)
159 #define	X86_FEATURE_HLE			KVM_X86_CPU_FEATURE(0x7, 0, EBX, 4)
160 #define	X86_FEATURE_SMEP	        KVM_X86_CPU_FEATURE(0x7, 0, EBX, 7)
161 #define	X86_FEATURE_INVPCID		KVM_X86_CPU_FEATURE(0x7, 0, EBX, 10)
162 #define	X86_FEATURE_RTM			KVM_X86_CPU_FEATURE(0x7, 0, EBX, 11)
163 #define	X86_FEATURE_MPX			KVM_X86_CPU_FEATURE(0x7, 0, EBX, 14)
164 #define	X86_FEATURE_SMAP		KVM_X86_CPU_FEATURE(0x7, 0, EBX, 20)
165 #define	X86_FEATURE_PCOMMIT		KVM_X86_CPU_FEATURE(0x7, 0, EBX, 22)
166 #define	X86_FEATURE_CLFLUSHOPT		KVM_X86_CPU_FEATURE(0x7, 0, EBX, 23)
167 #define	X86_FEATURE_CLWB		KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24)
168 #define	X86_FEATURE_UMIP		KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2)
169 #define	X86_FEATURE_PKU			KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3)
170 #define	X86_FEATURE_OSPKE		KVM_X86_CPU_FEATURE(0x7, 0, ECX, 4)
171 #define	X86_FEATURE_LA57		KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16)
172 #define	X86_FEATURE_RDPID		KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22)
173 #define	X86_FEATURE_SGX_LC		KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30)
174 #define	X86_FEATURE_SHSTK		KVM_X86_CPU_FEATURE(0x7, 0, ECX, 7)
175 #define	X86_FEATURE_IBT			KVM_X86_CPU_FEATURE(0x7, 0, EDX, 20)
176 #define	X86_FEATURE_AMX_TILE		KVM_X86_CPU_FEATURE(0x7, 0, EDX, 24)
177 #define	X86_FEATURE_SPEC_CTRL		KVM_X86_CPU_FEATURE(0x7, 0, EDX, 26)
178 #define	X86_FEATURE_ARCH_CAPABILITIES	KVM_X86_CPU_FEATURE(0x7, 0, EDX, 29)
179 #define	X86_FEATURE_PKS			KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31)
180 #define	X86_FEATURE_XTILECFG		KVM_X86_CPU_FEATURE(0xD, 0, EAX, 17)
181 #define	X86_FEATURE_XTILEDATA		KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18)
182 #define	X86_FEATURE_XSAVES		KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3)
183 #define	X86_FEATURE_XFD			KVM_X86_CPU_FEATURE(0xD, 1, EAX, 4)
184 #define X86_FEATURE_XTILEDATA_XFD	KVM_X86_CPU_FEATURE(0xD, 18, ECX, 2)
185 
186 /*
187  * Extended Leafs, a.k.a. AMD defined
188  */
189 #define	X86_FEATURE_SVM			KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
190 #define	X86_FEATURE_PERFCTR_CORE	KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 23)
191 #define	X86_FEATURE_PERFCTR_NB		KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 24)
192 #define	X86_FEATURE_PERFCTR_LLC		KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 28)
193 #define	X86_FEATURE_NX			KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
194 #define	X86_FEATURE_GBPAGES		KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
195 #define	X86_FEATURE_RDTSCP		KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
196 #define	X86_FEATURE_LM			KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29)
197 #define	X86_FEATURE_INVTSC		KVM_X86_CPU_FEATURE(0x80000007, 0, EDX, 8)
198 #define	X86_FEATURE_RDPRU		KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4)
199 #define	X86_FEATURE_AMD_IBPB		KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12)
200 #define	X86_FEATURE_NPT			KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0)
201 #define	X86_FEATURE_LBRV		KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 1)
202 #define	X86_FEATURE_NRIPS		KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 3)
203 #define X86_FEATURE_TSCRATEMSR          KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4)
204 #define X86_FEATURE_PAUSEFILTER         KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
205 #define X86_FEATURE_PFTHRESHOLD         KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
206 #define	X86_FEATURE_V_VMSAVE_VMLOAD	KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 15)
207 #define	X86_FEATURE_VGIF		KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
208 #define X86_FEATURE_IDLE_HLT		KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 30)
209 #define X86_FEATURE_SEV			KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
210 #define X86_FEATURE_SEV_ES		KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
211 #define X86_FEATURE_SEV_SNP		KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 4)
212 #define	X86_FEATURE_PERFMON_V2		KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 0)
213 #define	X86_FEATURE_LBR_PMC_FREEZE	KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 2)
214 
215 /*
216  * KVM defined paravirt features.
217  */
218 #define X86_FEATURE_KVM_CLOCKSOURCE	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 0)
219 #define X86_FEATURE_KVM_NOP_IO_DELAY	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 1)
220 #define X86_FEATURE_KVM_MMU_OP		KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 2)
221 #define X86_FEATURE_KVM_CLOCKSOURCE2	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 3)
222 #define X86_FEATURE_KVM_ASYNC_PF	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 4)
223 #define X86_FEATURE_KVM_STEAL_TIME	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 5)
224 #define X86_FEATURE_KVM_PV_EOI		KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 6)
225 #define X86_FEATURE_KVM_PV_UNHALT	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 7)
226 /* Bit 8 apparently isn't used?!?! */
227 #define X86_FEATURE_KVM_PV_TLB_FLUSH	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 9)
228 #define X86_FEATURE_KVM_ASYNC_PF_VMEXIT	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 10)
229 #define X86_FEATURE_KVM_PV_SEND_IPI	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 11)
230 #define X86_FEATURE_KVM_POLL_CONTROL	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 12)
231 #define X86_FEATURE_KVM_PV_SCHED_YIELD	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 13)
232 #define X86_FEATURE_KVM_ASYNC_PF_INT	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 14)
233 #define X86_FEATURE_KVM_MSI_EXT_DEST_ID	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 15)
234 #define X86_FEATURE_KVM_HC_MAP_GPA_RANGE	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16)
235 #define X86_FEATURE_KVM_MIGRATION_CONTROL	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17)
236 
237 /*
238  * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
239  * value/property as opposed to a single-bit feature.  Again, pack the info
240  * into a 64-bit value to pass by value with no overhead.
241  */
242 struct kvm_x86_cpu_property {
243 	u32	function;
244 	u8	index;
245 	u8	reg;
246 	u8	lo_bit;
247 	u8	hi_bit;
248 };
249 #define	KVM_X86_CPU_PROPERTY(fn, idx, gpr, low_bit, high_bit)			\
250 ({										\
251 	struct kvm_x86_cpu_property property = {				\
252 		.function = fn,							\
253 		.index = idx,							\
254 		.reg = KVM_CPUID_##gpr,						\
255 		.lo_bit = low_bit,						\
256 		.hi_bit = high_bit,						\
257 	};									\
258 										\
259 	kvm_static_assert(low_bit < high_bit);					\
260 	kvm_static_assert((fn & 0xc0000000) == 0 ||				\
261 			  (fn & 0xc0000000) == 0x40000000 ||			\
262 			  (fn & 0xc0000000) == 0x80000000 ||			\
263 			  (fn & 0xc0000000) == 0xc0000000);			\
264 	kvm_static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE));	\
265 	property;								\
266 })
267 
268 #define X86_PROPERTY_MAX_BASIC_LEAF		KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31)
269 #define X86_PROPERTY_PMU_VERSION		KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
270 #define X86_PROPERTY_PMU_NR_GP_COUNTERS		KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
271 #define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH	KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
272 #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH	KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
273 #define X86_PROPERTY_PMU_EVENTS_MASK		KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 12)
274 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK	KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
275 #define X86_PROPERTY_PMU_NR_FIXED_COUNTERS	KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
276 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH	KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
277 
278 #define X86_PROPERTY_SUPPORTED_XCR0_LO		KVM_X86_CPU_PROPERTY(0xd,  0, EAX,  0, 31)
279 #define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0	KVM_X86_CPU_PROPERTY(0xd,  0, EBX,  0, 31)
280 #define X86_PROPERTY_XSTATE_MAX_SIZE		KVM_X86_CPU_PROPERTY(0xd,  0, ECX,  0, 31)
281 #define X86_PROPERTY_SUPPORTED_XCR0_HI		KVM_X86_CPU_PROPERTY(0xd,  0, EDX,  0, 31)
282 
283 #define X86_PROPERTY_XSTATE_TILE_SIZE		KVM_X86_CPU_PROPERTY(0xd, 18, EAX,  0, 31)
284 #define X86_PROPERTY_XSTATE_TILE_OFFSET		KVM_X86_CPU_PROPERTY(0xd, 18, EBX,  0, 31)
285 #define X86_PROPERTY_AMX_MAX_PALETTE_TABLES	KVM_X86_CPU_PROPERTY(0x1d, 0, EAX,  0, 31)
286 #define X86_PROPERTY_AMX_TOTAL_TILE_BYTES	KVM_X86_CPU_PROPERTY(0x1d, 1, EAX,  0, 15)
287 #define X86_PROPERTY_AMX_BYTES_PER_TILE		KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31)
288 #define X86_PROPERTY_AMX_BYTES_PER_ROW		KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 0,  15)
289 #define X86_PROPERTY_AMX_NR_TILE_REGS		KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 16, 31)
290 #define X86_PROPERTY_AMX_MAX_ROWS		KVM_X86_CPU_PROPERTY(0x1d, 1, ECX, 0,  15)
291 
292 #define X86_PROPERTY_MAX_KVM_LEAF		KVM_X86_CPU_PROPERTY(0x40000000, 0, EAX, 0, 31)
293 
294 #define X86_PROPERTY_MAX_EXT_LEAF		KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
295 #define X86_PROPERTY_MAX_PHY_ADDR		KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
296 #define X86_PROPERTY_MAX_VIRT_ADDR		KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
297 #define X86_PROPERTY_GUEST_MAX_PHY_ADDR		KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23)
298 #define X86_PROPERTY_SEV_C_BIT			KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
299 #define X86_PROPERTY_PHYS_ADDR_REDUCTION	KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
300 #define X86_PROPERTY_NR_PERFCTR_CORE		KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 0, 3)
301 #define X86_PROPERTY_NR_PERFCTR_NB		KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 10, 15)
302 
303 #define X86_PROPERTY_MAX_CENTAUR_LEAF		KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
304 
305 /*
306  * Intel's architectural PMU events are bizarre.  They have a "feature" bit
307  * that indicates the feature is _not_ supported, and a property that states
308  * the length of the bit mask of unsupported features.  A feature is supported
309  * if the size of the bit mask is larger than the "unavailable" bit, and said
310  * bit is not set.  Fixed counters also bizarre enumeration, but inverted from
311  * arch events for general purpose counters.  Fixed counters are supported if a
312  * feature flag is set **OR** the total number of fixed counters is greater
313  * than index of the counter.
314  *
315  * Wrap the events for general purpose and fixed counters to simplify checking
316  * whether or not a given architectural event is supported.
317  */
318 struct kvm_x86_pmu_feature {
319 	struct kvm_x86_cpu_feature f;
320 };
321 #define	KVM_X86_PMU_FEATURE(__reg, __bit)				\
322 ({									\
323 	struct kvm_x86_pmu_feature feature = {				\
324 		.f = KVM_X86_CPU_FEATURE(0xa, 0, __reg, __bit),		\
325 	};								\
326 									\
327 	kvm_static_assert(KVM_CPUID_##__reg == KVM_CPUID_EBX ||		\
328 			  KVM_CPUID_##__reg == KVM_CPUID_ECX);		\
329 	feature;							\
330 })
331 
332 #define X86_PMU_FEATURE_CPU_CYCLES			KVM_X86_PMU_FEATURE(EBX, 0)
333 #define X86_PMU_FEATURE_INSNS_RETIRED			KVM_X86_PMU_FEATURE(EBX, 1)
334 #define X86_PMU_FEATURE_REFERENCE_CYCLES		KVM_X86_PMU_FEATURE(EBX, 2)
335 #define X86_PMU_FEATURE_LLC_REFERENCES			KVM_X86_PMU_FEATURE(EBX, 3)
336 #define X86_PMU_FEATURE_LLC_MISSES			KVM_X86_PMU_FEATURE(EBX, 4)
337 #define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED		KVM_X86_PMU_FEATURE(EBX, 5)
338 #define X86_PMU_FEATURE_BRANCHES_MISPREDICTED		KVM_X86_PMU_FEATURE(EBX, 6)
339 #define X86_PMU_FEATURE_TOPDOWN_SLOTS			KVM_X86_PMU_FEATURE(EBX, 7)
340 #define X86_PMU_FEATURE_TOPDOWN_BE_BOUND		KVM_X86_PMU_FEATURE(EBX, 8)
341 #define X86_PMU_FEATURE_TOPDOWN_BAD_SPEC		KVM_X86_PMU_FEATURE(EBX, 9)
342 #define X86_PMU_FEATURE_TOPDOWN_FE_BOUND		KVM_X86_PMU_FEATURE(EBX, 10)
343 #define X86_PMU_FEATURE_TOPDOWN_RETIRING		KVM_X86_PMU_FEATURE(EBX, 11)
344 #define X86_PMU_FEATURE_LBR_INSERTS			KVM_X86_PMU_FEATURE(EBX, 12)
345 
346 #define X86_PMU_FEATURE_INSNS_RETIRED_FIXED		KVM_X86_PMU_FEATURE(ECX, 0)
347 #define X86_PMU_FEATURE_CPU_CYCLES_FIXED		KVM_X86_PMU_FEATURE(ECX, 1)
348 #define X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED	KVM_X86_PMU_FEATURE(ECX, 2)
349 #define X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED		KVM_X86_PMU_FEATURE(ECX, 3)
350 
351 static inline unsigned int x86_family(unsigned int eax)
352 {
353 	unsigned int x86;
354 
355 	x86 = (eax >> 8) & 0xf;
356 
357 	if (x86 == 0xf)
358 		x86 += (eax >> 20) & 0xff;
359 
360 	return x86;
361 }
362 
363 static inline unsigned int x86_model(unsigned int eax)
364 {
365 	return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
366 }
367 
368 #define PHYSICAL_PAGE_MASK      GENMASK_ULL(51, 12)
369 
370 #define PAGE_SHIFT		12
371 #define PAGE_SIZE		(1ULL << PAGE_SHIFT)
372 #define PAGE_MASK		(~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK)
373 
374 #define HUGEPAGE_SHIFT(x)	(PAGE_SHIFT + (((x) - 1) * 9))
375 #define HUGEPAGE_SIZE(x)	(1UL << HUGEPAGE_SHIFT(x))
376 #define HUGEPAGE_MASK(x)	(~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK)
377 
378 #define PTE_GET_PA(pte)		((pte) & PHYSICAL_PAGE_MASK)
379 #define PTE_GET_PFN(pte)        (PTE_GET_PA(pte) >> PAGE_SHIFT)
380 
381 /* General Registers in 64-Bit Mode */
382 struct gpr64_regs {
383 	u64 rax;
384 	u64 rcx;
385 	u64 rdx;
386 	u64 rbx;
387 	u64 rsp;
388 	u64 rbp;
389 	u64 rsi;
390 	u64 rdi;
391 	u64 r8;
392 	u64 r9;
393 	u64 r10;
394 	u64 r11;
395 	u64 r12;
396 	u64 r13;
397 	u64 r14;
398 	u64 r15;
399 };
400 
401 struct desc64 {
402 	u16 limit0;
403 	u16 base0;
404 	unsigned base1:8, type:4, s:1, dpl:2, p:1;
405 	unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
406 	u32 base3;
407 	u32 zero1;
408 } __attribute__((packed));
409 
410 struct desc_ptr {
411 	u16 size;
412 	u64 address;
413 } __attribute__((packed));
414 
415 struct kvm_x86_state {
416 	struct kvm_xsave *xsave;
417 	struct kvm_vcpu_events events;
418 	struct kvm_mp_state mp_state;
419 	struct kvm_regs regs;
420 	struct kvm_xcrs xcrs;
421 	struct kvm_sregs sregs;
422 	struct kvm_debugregs debugregs;
423 	union {
424 		struct kvm_nested_state nested;
425 		char nested_[16384];
426 	};
427 	struct kvm_msrs msrs;
428 };
429 
430 static inline u64 get_desc64_base(const struct desc64 *desc)
431 {
432 	return (u64)desc->base3 << 32 |
433 	       (u64)desc->base2 << 24 |
434 	       (u64)desc->base1 << 16 |
435 	       (u64)desc->base0;
436 }
437 
438 static inline u64 rdtsc(void)
439 {
440 	u32 eax, edx;
441 	u64 tsc_val;
442 	/*
443 	 * The lfence is to wait (on Intel CPUs) until all previous
444 	 * instructions have been executed. If software requires RDTSC to be
445 	 * executed prior to execution of any subsequent instruction, it can
446 	 * execute LFENCE immediately after RDTSC
447 	 */
448 	__asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
449 	tsc_val = ((u64)edx) << 32 | eax;
450 	return tsc_val;
451 }
452 
453 static inline u64 rdtscp(u32 *aux)
454 {
455 	u32 eax, edx;
456 
457 	__asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
458 	return ((u64)edx) << 32 | eax;
459 }
460 
461 static inline u64 rdmsr(u32 msr)
462 {
463 	u32 a, d;
464 
465 	__asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
466 
467 	return a | ((u64)d << 32);
468 }
469 
470 static inline void wrmsr(u32 msr, u64 value)
471 {
472 	u32 a = value;
473 	u32 d = value >> 32;
474 
475 	__asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
476 }
477 
478 
479 static inline u16 inw(u16 port)
480 {
481 	u16 tmp;
482 
483 	__asm__ __volatile__("in %%dx, %%ax"
484 		: /* output */ "=a" (tmp)
485 		: /* input */ "d" (port));
486 
487 	return tmp;
488 }
489 
490 static inline u16 get_es(void)
491 {
492 	u16 es;
493 
494 	__asm__ __volatile__("mov %%es, %[es]"
495 			     : /* output */ [es]"=rm"(es));
496 	return es;
497 }
498 
499 static inline u16 get_cs(void)
500 {
501 	u16 cs;
502 
503 	__asm__ __volatile__("mov %%cs, %[cs]"
504 			     : /* output */ [cs]"=rm"(cs));
505 	return cs;
506 }
507 
508 static inline u16 get_ss(void)
509 {
510 	u16 ss;
511 
512 	__asm__ __volatile__("mov %%ss, %[ss]"
513 			     : /* output */ [ss]"=rm"(ss));
514 	return ss;
515 }
516 
517 static inline u16 get_ds(void)
518 {
519 	u16 ds;
520 
521 	__asm__ __volatile__("mov %%ds, %[ds]"
522 			     : /* output */ [ds]"=rm"(ds));
523 	return ds;
524 }
525 
526 static inline u16 get_fs(void)
527 {
528 	u16 fs;
529 
530 	__asm__ __volatile__("mov %%fs, %[fs]"
531 			     : /* output */ [fs]"=rm"(fs));
532 	return fs;
533 }
534 
535 static inline u16 get_gs(void)
536 {
537 	u16 gs;
538 
539 	__asm__ __volatile__("mov %%gs, %[gs]"
540 			     : /* output */ [gs]"=rm"(gs));
541 	return gs;
542 }
543 
544 static inline u16 get_tr(void)
545 {
546 	u16 tr;
547 
548 	__asm__ __volatile__("str %[tr]"
549 			     : /* output */ [tr]"=rm"(tr));
550 	return tr;
551 }
552 
553 static inline u64 get_cr0(void)
554 {
555 	u64 cr0;
556 
557 	__asm__ __volatile__("mov %%cr0, %[cr0]"
558 			     : /* output */ [cr0]"=r"(cr0));
559 	return cr0;
560 }
561 
562 static inline void set_cr0(u64 val)
563 {
564 	__asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory");
565 }
566 
567 static inline u64 get_cr3(void)
568 {
569 	u64 cr3;
570 
571 	__asm__ __volatile__("mov %%cr3, %[cr3]"
572 			     : /* output */ [cr3]"=r"(cr3));
573 	return cr3;
574 }
575 
576 static inline void set_cr3(u64 val)
577 {
578 	__asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory");
579 }
580 
581 static inline u64 get_cr4(void)
582 {
583 	u64 cr4;
584 
585 	__asm__ __volatile__("mov %%cr4, %[cr4]"
586 			     : /* output */ [cr4]"=r"(cr4));
587 	return cr4;
588 }
589 
590 static inline void set_cr4(u64 val)
591 {
592 	__asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
593 }
594 
595 static inline u64 get_cr8(void)
596 {
597 	u64 cr8;
598 
599 	__asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8));
600 	return cr8;
601 }
602 
603 static inline void set_cr8(u64 val)
604 {
605 	__asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory");
606 }
607 
608 static inline void set_idt(const struct desc_ptr *idt_desc)
609 {
610 	__asm__ __volatile__("lidt %0"::"m"(*idt_desc));
611 }
612 
613 static inline u64 xgetbv(u32 index)
614 {
615 	u32 eax, edx;
616 
617 	__asm__ __volatile__("xgetbv;"
618 		     : "=a" (eax), "=d" (edx)
619 		     : "c" (index));
620 	return eax | ((u64)edx << 32);
621 }
622 
623 static inline void xsetbv(u32 index, u64 value)
624 {
625 	u32 eax = value;
626 	u32 edx = value >> 32;
627 
628 	__asm__ __volatile__("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
629 }
630 
631 static inline void wrpkru(u32 pkru)
632 {
633 	/* Note, ECX and EDX are architecturally required to be '0'. */
634 	asm volatile(".byte 0x0f,0x01,0xef\n\t"
635 		     : : "a" (pkru), "c"(0), "d"(0));
636 }
637 
638 static inline struct desc_ptr get_gdt(void)
639 {
640 	struct desc_ptr gdt;
641 	__asm__ __volatile__("sgdt %[gdt]"
642 			     : /* output */ [gdt]"=m"(gdt));
643 	return gdt;
644 }
645 
646 static inline struct desc_ptr get_idt(void)
647 {
648 	struct desc_ptr idt;
649 	__asm__ __volatile__("sidt %[idt]"
650 			     : /* output */ [idt]"=m"(idt));
651 	return idt;
652 }
653 
654 static inline void outl(u16 port, u32 value)
655 {
656 	__asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
657 }
658 
659 static inline void __cpuid(u32 function, u32 index,
660 			   u32 *eax, u32 *ebx,
661 			   u32 *ecx, u32 *edx)
662 {
663 	*eax = function;
664 	*ecx = index;
665 
666 	asm volatile("cpuid"
667 	    : "=a" (*eax),
668 	      "=b" (*ebx),
669 	      "=c" (*ecx),
670 	      "=d" (*edx)
671 	    : "0" (*eax), "2" (*ecx)
672 	    : "memory");
673 }
674 
675 static inline void cpuid(u32 function,
676 			 u32 *eax, u32 *ebx,
677 			 u32 *ecx, u32 *edx)
678 {
679 	return __cpuid(function, 0, eax, ebx, ecx, edx);
680 }
681 
682 static inline u32 this_cpu_fms(void)
683 {
684 	u32 eax, ebx, ecx, edx;
685 
686 	cpuid(1, &eax, &ebx, &ecx, &edx);
687 	return eax;
688 }
689 
690 static inline u32 this_cpu_family(void)
691 {
692 	return x86_family(this_cpu_fms());
693 }
694 
695 static inline u32 this_cpu_model(void)
696 {
697 	return x86_model(this_cpu_fms());
698 }
699 
700 static inline bool this_cpu_vendor_string_is(const char *vendor)
701 {
702 	const u32 *chunk = (const u32 *)vendor;
703 	u32 eax, ebx, ecx, edx;
704 
705 	cpuid(0, &eax, &ebx, &ecx, &edx);
706 	return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
707 }
708 
709 static inline bool this_cpu_is_intel(void)
710 {
711 	return this_cpu_vendor_string_is("GenuineIntel");
712 }
713 
714 /*
715  * Exclude early K5 samples with a vendor string of "AMDisbetter!"
716  */
717 static inline bool this_cpu_is_amd(void)
718 {
719 	return this_cpu_vendor_string_is("AuthenticAMD");
720 }
721 
722 static inline bool this_cpu_is_hygon(void)
723 {
724 	return this_cpu_vendor_string_is("HygonGenuine");
725 }
726 
727 static inline u32 __this_cpu_has(u32 function, u32 index, u8 reg, u8 lo, u8 hi)
728 {
729 	u32 gprs[4];
730 
731 	__cpuid(function, index,
732 		&gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX],
733 		&gprs[KVM_CPUID_ECX], &gprs[KVM_CPUID_EDX]);
734 
735 	return (gprs[reg] & GENMASK(hi, lo)) >> lo;
736 }
737 
738 static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
739 {
740 	return __this_cpu_has(feature.function, feature.index,
741 			      feature.reg, feature.bit, feature.bit);
742 }
743 
744 static inline u32 this_cpu_property(struct kvm_x86_cpu_property property)
745 {
746 	return __this_cpu_has(property.function, property.index,
747 			      property.reg, property.lo_bit, property.hi_bit);
748 }
749 
750 static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
751 {
752 	u32 max_leaf;
753 
754 	switch (property.function & 0xc0000000) {
755 	case 0:
756 		max_leaf = this_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
757 		break;
758 	case 0x40000000:
759 		max_leaf = this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
760 		break;
761 	case 0x80000000:
762 		max_leaf = this_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
763 		break;
764 	case 0xc0000000:
765 		max_leaf = this_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
766 	}
767 	return max_leaf >= property.function;
768 }
769 
770 static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
771 {
772 	u32 nr_bits;
773 
774 	if (feature.f.reg == KVM_CPUID_EBX) {
775 		nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
776 		return nr_bits > feature.f.bit && !this_cpu_has(feature.f);
777 	}
778 
779 	GUEST_ASSERT(feature.f.reg == KVM_CPUID_ECX);
780 	nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
781 	return nr_bits > feature.f.bit || this_cpu_has(feature.f);
782 }
783 
784 static __always_inline u64 this_cpu_supported_xcr0(void)
785 {
786 	if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
787 		return 0;
788 
789 	return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
790 	       ((u64)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
791 }
792 
793 typedef u32		__attribute__((vector_size(16))) sse128_t;
794 #define __sse128_u	union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
795 #define sse128_lo(x)	({ __sse128_u t; t.vec = x; t.as_u64[0]; })
796 #define sse128_hi(x)	({ __sse128_u t; t.vec = x; t.as_u64[1]; })
797 
798 static inline void read_sse_reg(int reg, sse128_t *data)
799 {
800 	switch (reg) {
801 	case 0:
802 		asm("movdqa %%xmm0, %0" : "=m"(*data));
803 		break;
804 	case 1:
805 		asm("movdqa %%xmm1, %0" : "=m"(*data));
806 		break;
807 	case 2:
808 		asm("movdqa %%xmm2, %0" : "=m"(*data));
809 		break;
810 	case 3:
811 		asm("movdqa %%xmm3, %0" : "=m"(*data));
812 		break;
813 	case 4:
814 		asm("movdqa %%xmm4, %0" : "=m"(*data));
815 		break;
816 	case 5:
817 		asm("movdqa %%xmm5, %0" : "=m"(*data));
818 		break;
819 	case 6:
820 		asm("movdqa %%xmm6, %0" : "=m"(*data));
821 		break;
822 	case 7:
823 		asm("movdqa %%xmm7, %0" : "=m"(*data));
824 		break;
825 	default:
826 		BUG();
827 	}
828 }
829 
830 static inline void write_sse_reg(int reg, const sse128_t *data)
831 {
832 	switch (reg) {
833 	case 0:
834 		asm("movdqa %0, %%xmm0" : : "m"(*data));
835 		break;
836 	case 1:
837 		asm("movdqa %0, %%xmm1" : : "m"(*data));
838 		break;
839 	case 2:
840 		asm("movdqa %0, %%xmm2" : : "m"(*data));
841 		break;
842 	case 3:
843 		asm("movdqa %0, %%xmm3" : : "m"(*data));
844 		break;
845 	case 4:
846 		asm("movdqa %0, %%xmm4" : : "m"(*data));
847 		break;
848 	case 5:
849 		asm("movdqa %0, %%xmm5" : : "m"(*data));
850 		break;
851 	case 6:
852 		asm("movdqa %0, %%xmm6" : : "m"(*data));
853 		break;
854 	case 7:
855 		asm("movdqa %0, %%xmm7" : : "m"(*data));
856 		break;
857 	default:
858 		BUG();
859 	}
860 }
861 
862 static inline void cpu_relax(void)
863 {
864 	asm volatile("rep; nop" ::: "memory");
865 }
866 
867 static inline void udelay(unsigned long usec)
868 {
869 	u64 start, now, cycles;
870 
871 	GUEST_ASSERT(guest_tsc_khz);
872 	cycles = guest_tsc_khz / 1000 * usec;
873 
874 	/*
875 	 * Deliberately don't PAUSE, a.k.a. cpu_relax(), so that the delay is
876 	 * as accurate as possible, e.g. doesn't trigger PAUSE-Loop VM-Exits.
877 	 */
878 	start = rdtsc();
879 	do {
880 		now = rdtsc();
881 	} while (now - start < cycles);
882 }
883 
884 #define ud2()			\
885 	__asm__ __volatile__(	\
886 		"ud2\n"	\
887 		)
888 
889 #define hlt()			\
890 	__asm__ __volatile__(	\
891 		"hlt\n"	\
892 		)
893 
894 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
895 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
896 void kvm_x86_state_cleanup(struct kvm_x86_state *state);
897 
898 const struct kvm_msr_list *kvm_get_msr_index_list(void);
899 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void);
900 bool kvm_msr_is_in_save_restore_list(u32 msr_index);
901 u64 kvm_get_feature_msr(u64 msr_index);
902 
903 static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
904 				 struct kvm_msrs *msrs)
905 {
906 	int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs);
907 
908 	TEST_ASSERT(r == msrs->nmsrs,
909 		    "KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
910 		    r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
911 }
912 static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs)
913 {
914 	int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs);
915 
916 	TEST_ASSERT(r == msrs->nmsrs,
917 		    "KVM_SET_MSRS failed, r: %i (failed on MSR %x)",
918 		    r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
919 }
920 static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu,
921 				      struct kvm_debugregs *debugregs)
922 {
923 	vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs);
924 }
925 static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu,
926 				      struct kvm_debugregs *debugregs)
927 {
928 	vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs);
929 }
930 static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu,
931 				  struct kvm_xsave *xsave)
932 {
933 	vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave);
934 }
935 static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu,
936 				   struct kvm_xsave *xsave)
937 {
938 	vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave);
939 }
940 static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu,
941 				  struct kvm_xsave *xsave)
942 {
943 	vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave);
944 }
945 static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu,
946 				 struct kvm_xcrs *xcrs)
947 {
948 	vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs);
949 }
950 static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
951 {
952 	vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs);
953 }
954 
955 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
956 					       u32 function, u32 index);
957 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
958 
959 static inline u32 kvm_cpu_fms(void)
960 {
961 	return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax;
962 }
963 
964 static inline u32 kvm_cpu_family(void)
965 {
966 	return x86_family(kvm_cpu_fms());
967 }
968 
969 static inline u32 kvm_cpu_model(void)
970 {
971 	return x86_model(kvm_cpu_fms());
972 }
973 
974 bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
975 		   struct kvm_x86_cpu_feature feature);
976 
977 static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature)
978 {
979 	return kvm_cpuid_has(kvm_get_supported_cpuid(), feature);
980 }
981 
982 u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
983 		       struct kvm_x86_cpu_property property);
984 
985 static inline u32 kvm_cpu_property(struct kvm_x86_cpu_property property)
986 {
987 	return kvm_cpuid_property(kvm_get_supported_cpuid(), property);
988 }
989 
990 static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
991 {
992 	u32 max_leaf;
993 
994 	switch (property.function & 0xc0000000) {
995 	case 0:
996 		max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
997 		break;
998 	case 0x40000000:
999 		max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
1000 		break;
1001 	case 0x80000000:
1002 		max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
1003 		break;
1004 	case 0xc0000000:
1005 		max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
1006 	}
1007 	return max_leaf >= property.function;
1008 }
1009 
1010 static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
1011 {
1012 	u32 nr_bits;
1013 
1014 	if (feature.f.reg == KVM_CPUID_EBX) {
1015 		nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
1016 		return nr_bits > feature.f.bit && !kvm_cpu_has(feature.f);
1017 	}
1018 
1019 	TEST_ASSERT_EQ(feature.f.reg, KVM_CPUID_ECX);
1020 	nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
1021 	return nr_bits > feature.f.bit || kvm_cpu_has(feature.f);
1022 }
1023 
1024 static __always_inline u64 kvm_cpu_supported_xcr0(void)
1025 {
1026 	if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
1027 		return 0;
1028 
1029 	return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
1030 	       ((u64)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
1031 }
1032 
1033 static inline size_t kvm_cpuid2_size(int nr_entries)
1034 {
1035 	return sizeof(struct kvm_cpuid2) +
1036 	       sizeof(struct kvm_cpuid_entry2) * nr_entries;
1037 }
1038 
1039 /*
1040  * Allocate a "struct kvm_cpuid2* instance, with the 0-length arrary of
1041  * entries sized to hold @nr_entries.  The caller is responsible for freeing
1042  * the struct.
1043  */
1044 static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries)
1045 {
1046 	struct kvm_cpuid2 *cpuid;
1047 
1048 	cpuid = malloc(kvm_cpuid2_size(nr_entries));
1049 	TEST_ASSERT(cpuid, "-ENOMEM when allocating kvm_cpuid2");
1050 
1051 	cpuid->nent = nr_entries;
1052 
1053 	return cpuid;
1054 }
1055 
1056 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid);
1057 
1058 static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu)
1059 {
1060 	vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
1061 }
1062 
1063 static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
1064 							      u32 function,
1065 							      u32 index)
1066 {
1067 	TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)");
1068 
1069 	vcpu_get_cpuid(vcpu);
1070 
1071 	return (struct kvm_cpuid_entry2 *)get_cpuid_entry(vcpu->cpuid,
1072 							  function, index);
1073 }
1074 
1075 static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
1076 							    u32 function)
1077 {
1078 	return __vcpu_get_cpuid_entry(vcpu, function, 0);
1079 }
1080 
1081 static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu)
1082 {
1083 	int r;
1084 
1085 	TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
1086 	r = __vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
1087 	if (r)
1088 		return r;
1089 
1090 	/* On success, refresh the cache to pick up adjustments made by KVM. */
1091 	vcpu_get_cpuid(vcpu);
1092 	return 0;
1093 }
1094 
1095 static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
1096 {
1097 	TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
1098 	vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
1099 
1100 	/* Refresh the cache to pick up adjustments made by KVM. */
1101 	vcpu_get_cpuid(vcpu);
1102 }
1103 
1104 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
1105 			     struct kvm_x86_cpu_property property,
1106 			     u32 value);
1107 void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, u8 maxphyaddr);
1108 
1109 void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function);
1110 
1111 static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu,
1112 				  struct kvm_x86_cpu_feature feature)
1113 {
1114 	struct kvm_cpuid_entry2 *entry;
1115 
1116 	entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
1117 	return *((&entry->eax) + feature.reg) & BIT(feature.bit);
1118 }
1119 
1120 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1121 				     struct kvm_x86_cpu_feature feature,
1122 				     bool set);
1123 
1124 static inline void vcpu_set_cpuid_feature(struct kvm_vcpu *vcpu,
1125 					  struct kvm_x86_cpu_feature feature)
1126 {
1127 	vcpu_set_or_clear_cpuid_feature(vcpu, feature, true);
1128 
1129 }
1130 
1131 static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1132 					    struct kvm_x86_cpu_feature feature)
1133 {
1134 	vcpu_set_or_clear_cpuid_feature(vcpu, feature, false);
1135 }
1136 
1137 u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index);
1138 int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value);
1139 
1140 /*
1141  * Assert on an MSR access(es) and pretty print the MSR name when possible.
1142  * Note, the caller provides the stringified name so that the name of macro is
1143  * printed, not the value the macro resolves to (due to macro expansion).
1144  */
1145 #define TEST_ASSERT_MSR(cond, fmt, msr, str, args...)				\
1146 do {										\
1147 	if (__builtin_constant_p(msr)) {					\
1148 		TEST_ASSERT(cond, fmt, str, args);				\
1149 	} else if (!(cond)) {							\
1150 		char buf[16];							\
1151 										\
1152 		snprintf(buf, sizeof(buf), "MSR 0x%x", msr);			\
1153 		TEST_ASSERT(cond, fmt, buf, args);				\
1154 	}									\
1155 } while (0)
1156 
1157 /*
1158  * Returns true if KVM should return the last written value when reading an MSR
1159  * from userspace, e.g. the MSR isn't a command MSR, doesn't emulate state that
1160  * is changing, etc.  This is NOT an exhaustive list!  The intent is to filter
1161  * out MSRs that are not durable _and_ that a selftest wants to write.
1162  */
1163 static inline bool is_durable_msr(u32 msr)
1164 {
1165 	return msr != MSR_IA32_TSC;
1166 }
1167 
1168 #define vcpu_set_msr(vcpu, msr, val)							\
1169 do {											\
1170 	u64 r, v = val;								\
1171 											\
1172 	TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1,				\
1173 			"KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v);	\
1174 	if (!is_durable_msr(msr))							\
1175 		break;									\
1176 	r = vcpu_get_msr(vcpu, msr);							\
1177 	TEST_ASSERT_MSR(r == v, "Set %s to '0x%lx', got back '0x%lx'", msr, #msr, v, r);\
1178 } while (0)
1179 
1180 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
1181 void kvm_init_vm_address_properties(struct kvm_vm *vm);
1182 
1183 struct ex_regs {
1184 	u64 rax, rcx, rdx, rbx;
1185 	u64 rbp, rsi, rdi;
1186 	u64 r8, r9, r10, r11;
1187 	u64 r12, r13, r14, r15;
1188 	u64 vector;
1189 	u64 error_code;
1190 	u64 rip;
1191 	u64 cs;
1192 	u64 rflags;
1193 };
1194 
1195 struct idt_entry {
1196 	u16 offset0;
1197 	u16 selector;
1198 	u16 ist : 3;
1199 	u16 : 5;
1200 	u16 type : 4;
1201 	u16 : 1;
1202 	u16 dpl : 2;
1203 	u16 p : 1;
1204 	u16 offset1;
1205 	u32 offset2; u32 reserved;
1206 };
1207 
1208 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
1209 			void (*handler)(struct ex_regs *));
1210 
1211 /*
1212  * Exception fixup morphs #DE to an arbitrary magic vector so that '0' can be
1213  * used to signal "no expcetion".
1214  */
1215 #define KVM_MAGIC_DE_VECTOR 0xff
1216 
1217 /* If a toddler were to say "abracadabra". */
1218 #define KVM_EXCEPTION_MAGIC 0xabacadabaULL
1219 
1220 /*
1221  * KVM selftest exception fixup uses registers to coordinate with the exception
1222  * handler, versus the kernel's in-memory tables and KVM-Unit-Tests's in-memory
1223  * per-CPU data.  Using only registers avoids having to map memory into the
1224  * guest, doesn't require a valid, stable GS.base, and reduces the risk of
1225  * for recursive faults when accessing memory in the handler.  The downside to
1226  * using registers is that it restricts what registers can be used by the actual
1227  * instruction.  But, selftests are 64-bit only, making register* pressure a
1228  * minor concern.  Use r9-r11 as they are volatile, i.e. don't need to be saved
1229  * by the callee, and except for r11 are not implicit parameters to any
1230  * instructions.  Ideally, fixup would use r8-r10 and thus avoid implicit
1231  * parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V
1232  * is higher priority than testing non-faulting SYSCALL/SYSRET.
1233  *
1234  * Note, the fixup handler deliberately does not handle #DE, i.e. the vector
1235  * is guaranteed to be non-zero on fault.
1236  *
1237  * REGISTER INPUTS:
1238  * r9  = MAGIC
1239  * r10 = RIP
1240  * r11 = new RIP on fault
1241  *
1242  * REGISTER OUTPUTS:
1243  * r9  = exception vector (non-zero)
1244  * r10 = error code
1245  */
1246 #define __KVM_ASM_SAFE(insn, fep)				\
1247 	"mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t"	\
1248 	"lea 1f(%%rip), %%r10\n\t"				\
1249 	"lea 2f(%%rip), %%r11\n\t"				\
1250 	fep "1: " insn "\n\t"					\
1251 	"xor %%r9, %%r9\n\t"					\
1252 	"2:\n\t"						\
1253 	"mov  %%r9b, %[vector]\n\t"				\
1254 	"mov  %%r10, %[error_code]\n\t"
1255 
1256 #define KVM_ASM_SAFE(insn) __KVM_ASM_SAFE(insn, "")
1257 #define KVM_ASM_SAFE_FEP(insn) __KVM_ASM_SAFE(insn, KVM_FEP)
1258 
1259 #define KVM_ASM_SAFE_OUTPUTS(v, ec)	[vector] "=qm"(v), [error_code] "=rm"(ec)
1260 #define KVM_ASM_SAFE_CLOBBERS	"r9", "r10", "r11"
1261 
1262 #define kvm_asm_safe(insn, inputs...)					\
1263 ({									\
1264 	u64 ign_error_code;						\
1265 	u8 vector;							\
1266 									\
1267 	asm volatile(KVM_ASM_SAFE(insn)					\
1268 		     : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code)	\
1269 		     : inputs						\
1270 		     : KVM_ASM_SAFE_CLOBBERS);				\
1271 	vector;								\
1272 })
1273 
1274 #define kvm_asm_safe_ec(insn, error_code, inputs...)			\
1275 ({									\
1276 	u8 vector;							\
1277 									\
1278 	asm volatile(KVM_ASM_SAFE(insn)					\
1279 		     : KVM_ASM_SAFE_OUTPUTS(vector, error_code)		\
1280 		     : inputs						\
1281 		     : KVM_ASM_SAFE_CLOBBERS);				\
1282 	vector;								\
1283 })
1284 
1285 #define kvm_asm_safe_fep(insn, inputs...)				\
1286 ({									\
1287 	u64 ign_error_code;						\
1288 	u8 vector;							\
1289 									\
1290 	asm volatile(KVM_ASM_SAFE_FEP(insn)				\
1291 		     : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code)	\
1292 		     : inputs						\
1293 		     : KVM_ASM_SAFE_CLOBBERS);				\
1294 	vector;								\
1295 })
1296 
1297 #define kvm_asm_safe_ec_fep(insn, error_code, inputs...)		\
1298 ({									\
1299 	u8 vector;							\
1300 									\
1301 	asm volatile(KVM_ASM_SAFE_FEP(insn)				\
1302 		     : KVM_ASM_SAFE_OUTPUTS(vector, error_code)		\
1303 		     : inputs						\
1304 		     : KVM_ASM_SAFE_CLOBBERS);				\
1305 	vector;								\
1306 })
1307 
1308 #define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP)			\
1309 static inline u8 insn##_safe ##_fep(u32 idx, u64 *val)			\
1310 {									\
1311 	u64 error_code;							\
1312 	u8 vector;							\
1313 	u32 a, d;							\
1314 									\
1315 	asm volatile(KVM_ASM_SAFE##_FEP(#insn)				\
1316 		     : "=a"(a), "=d"(d),				\
1317 		       KVM_ASM_SAFE_OUTPUTS(vector, error_code)		\
1318 		     : "c"(idx)						\
1319 		     : KVM_ASM_SAFE_CLOBBERS);				\
1320 									\
1321 	*val = (u64)a | ((u64)d << 32);			\
1322 	return vector;							\
1323 }
1324 
1325 /*
1326  * Generate {insn}_safe() and {insn}_safe_fep() helpers for instructions that
1327  * use ECX as in input index, and EDX:EAX as a 64-bit output.
1328  */
1329 #define BUILD_READ_U64_SAFE_HELPERS(insn)				\
1330 	BUILD_READ_U64_SAFE_HELPER(insn, , )				\
1331 	BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP)			\
1332 
1333 BUILD_READ_U64_SAFE_HELPERS(rdmsr)
1334 BUILD_READ_U64_SAFE_HELPERS(rdpmc)
1335 BUILD_READ_U64_SAFE_HELPERS(xgetbv)
1336 
1337 static inline u8 wrmsr_safe(u32 msr, u64 val)
1338 {
1339 	return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
1340 }
1341 
1342 static inline u8 xsetbv_safe(u32 index, u64 value)
1343 {
1344 	u32 eax = value;
1345 	u32 edx = value >> 32;
1346 
1347 	return kvm_asm_safe("xsetbv", "a" (eax), "d" (edx), "c" (index));
1348 }
1349 
1350 bool kvm_is_tdp_enabled(void);
1351 
1352 static inline bool get_kvm_intel_param_bool(const char *param)
1353 {
1354 	return kvm_get_module_param_bool("kvm_intel", param);
1355 }
1356 
1357 static inline bool get_kvm_amd_param_bool(const char *param)
1358 {
1359 	return kvm_get_module_param_bool("kvm_amd", param);
1360 }
1361 
1362 static inline int get_kvm_intel_param_integer(const char *param)
1363 {
1364 	return kvm_get_module_param_integer("kvm_intel", param);
1365 }
1366 
1367 static inline int get_kvm_amd_param_integer(const char *param)
1368 {
1369 	return kvm_get_module_param_integer("kvm_amd", param);
1370 }
1371 
1372 static inline bool kvm_is_pmu_enabled(void)
1373 {
1374 	return get_kvm_param_bool("enable_pmu");
1375 }
1376 
1377 static inline bool kvm_is_forced_emulation_enabled(void)
1378 {
1379 	return !!get_kvm_param_integer("force_emulation_prefix");
1380 }
1381 
1382 static inline bool kvm_is_unrestricted_guest_enabled(void)
1383 {
1384 	return get_kvm_intel_param_bool("unrestricted_guest");
1385 }
1386 
1387 static inline bool kvm_is_ignore_msrs(void)
1388 {
1389 	return get_kvm_param_bool("ignore_msrs");
1390 }
1391 
1392 static inline bool kvm_is_lbrv_enabled(void)
1393 {
1394 	return !!get_kvm_amd_param_integer("lbrv");
1395 }
1396 
1397 u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva);
1398 
1399 u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3);
1400 u64 __xen_hypercall(u64 nr, u64 a0, void *a1);
1401 void xen_hypercall(u64 nr, u64 a0, void *a1);
1402 
1403 static inline u64 __kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags)
1404 {
1405 	return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
1406 }
1407 
1408 static inline void kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags)
1409 {
1410 	u64 ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
1411 
1412 	GUEST_ASSERT(!ret);
1413 }
1414 
1415 /*
1416  * Execute HLT in an STI interrupt shadow to ensure that a pending IRQ that's
1417  * intended to be a wake event arrives *after* HLT is executed.  Modern CPUs,
1418  * except for a few oddballs that KVM is unlikely to run on, block IRQs for one
1419  * instruction after STI, *if* RFLAGS.IF=0 before STI.  Note, Intel CPUs may
1420  * block other events beyond regular IRQs, e.g. may block NMIs and SMIs too.
1421  */
1422 static inline void safe_halt(void)
1423 {
1424 	asm volatile("sti; hlt");
1425 }
1426 
1427 /*
1428  * Enable interrupts and ensure that interrupts are evaluated upon return from
1429  * this function, i.e. execute a nop to consume the STi interrupt shadow.
1430  */
1431 static inline void sti_nop(void)
1432 {
1433 	asm volatile ("sti; nop");
1434 }
1435 
1436 /*
1437  * Enable interrupts for one instruction (nop), to allow the CPU to process all
1438  * interrupts that are already pending.
1439  */
1440 static inline void sti_nop_cli(void)
1441 {
1442 	asm volatile ("sti; nop; cli");
1443 }
1444 
1445 static inline void sti(void)
1446 {
1447 	asm volatile("sti");
1448 }
1449 
1450 static inline void cli(void)
1451 {
1452 	asm volatile ("cli");
1453 }
1454 
1455 void __vm_xsave_require_permission(u64 xfeature, const char *name);
1456 
1457 #define vm_xsave_require_permission(xfeature)	\
1458 	__vm_xsave_require_permission(xfeature, #xfeature)
1459 
1460 enum pg_level {
1461 	PG_LEVEL_NONE,
1462 	PG_LEVEL_4K,
1463 	PG_LEVEL_2M,
1464 	PG_LEVEL_1G,
1465 	PG_LEVEL_512G,
1466 	PG_LEVEL_256T
1467 };
1468 
1469 #define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)
1470 #define PG_LEVEL_SIZE(_level) (1ull << PG_LEVEL_SHIFT(_level))
1471 
1472 #define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K)
1473 #define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
1474 #define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
1475 
1476 #define PTE_PRESENT_MASK(mmu)		((mmu)->arch.pte_masks.present)
1477 #define PTE_WRITABLE_MASK(mmu)		((mmu)->arch.pte_masks.writable)
1478 #define PTE_USER_MASK(mmu)		((mmu)->arch.pte_masks.user)
1479 #define PTE_READABLE_MASK(mmu)		((mmu)->arch.pte_masks.readable)
1480 #define PTE_EXECUTABLE_MASK(mmu)	((mmu)->arch.pte_masks.executable)
1481 #define PTE_ACCESSED_MASK(mmu)		((mmu)->arch.pte_masks.accessed)
1482 #define PTE_DIRTY_MASK(mmu)		((mmu)->arch.pte_masks.dirty)
1483 #define PTE_HUGE_MASK(mmu)		((mmu)->arch.pte_masks.huge)
1484 #define PTE_NX_MASK(mmu)		((mmu)->arch.pte_masks.nx)
1485 #define PTE_C_BIT_MASK(mmu)		((mmu)->arch.pte_masks.c)
1486 #define PTE_S_BIT_MASK(mmu)		((mmu)->arch.pte_masks.s)
1487 #define PTE_ALWAYS_SET_MASK(mmu)	((mmu)->arch.pte_masks.always_set)
1488 
1489 /*
1490  * For PTEs without a PRESENT bit (i.e. EPT entries), treat the PTE as present
1491  * if it's executable or readable, as EPT supports execute-only PTEs, but not
1492  * write-only PTEs.
1493  */
1494 #define is_present_pte(mmu, pte)		\
1495 	(PTE_PRESENT_MASK(mmu) ?		\
1496 	 !!(*(pte) & PTE_PRESENT_MASK(mmu)) :	\
1497 	 !!(*(pte) & (PTE_READABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu))))
1498 #define is_executable_pte(mmu, pte)	\
1499 	((*(pte) & (PTE_EXECUTABLE_MASK(mmu) | PTE_NX_MASK(mmu))) == PTE_EXECUTABLE_MASK(mmu))
1500 #define is_writable_pte(mmu, pte)	(!!(*(pte) & PTE_WRITABLE_MASK(mmu)))
1501 #define is_user_pte(mmu, pte)		(!!(*(pte) & PTE_USER_MASK(mmu)))
1502 #define is_accessed_pte(mmu, pte)	(!!(*(pte) & PTE_ACCESSED_MASK(mmu)))
1503 #define is_dirty_pte(mmu, pte)		(!!(*(pte) & PTE_DIRTY_MASK(mmu)))
1504 #define is_huge_pte(mmu, pte)		(!!(*(pte) & PTE_HUGE_MASK(mmu)))
1505 #define is_nx_pte(mmu, pte)		(!is_executable_pte(mmu, pte))
1506 
1507 void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels,
1508 		  struct pte_masks *pte_masks);
1509 
1510 void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,
1511 		   gpa_t gpa,  int level);
1512 void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
1513 		    u64 nr_bytes, int level);
1514 
1515 void vm_enable_tdp(struct kvm_vm *vm);
1516 bool kvm_cpu_has_tdp(void);
1517 void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size);
1518 void tdp_identity_map_default_memslots(struct kvm_vm *vm);
1519 void tdp_identity_map_1g(struct kvm_vm *vm,  u64 addr, u64 size);
1520 u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa);
1521 
1522 /*
1523  * Basic CPU control in CR0
1524  */
1525 #define X86_CR0_PE          (1UL<<0) /* Protection Enable */
1526 #define X86_CR0_MP          (1UL<<1) /* Monitor Coprocessor */
1527 #define X86_CR0_EM          (1UL<<2) /* Emulation */
1528 #define X86_CR0_TS          (1UL<<3) /* Task Switched */
1529 #define X86_CR0_ET          (1UL<<4) /* Extension Type */
1530 #define X86_CR0_NE          (1UL<<5) /* Numeric Error */
1531 #define X86_CR0_WP          (1UL<<16) /* Write Protect */
1532 #define X86_CR0_AM          (1UL<<18) /* Alignment Mask */
1533 #define X86_CR0_NW          (1UL<<29) /* Not Write-through */
1534 #define X86_CR0_CD          (1UL<<30) /* Cache Disable */
1535 #define X86_CR0_PG          (1UL<<31) /* Paging */
1536 
1537 #define PFERR_PRESENT_BIT 0
1538 #define PFERR_WRITE_BIT 1
1539 #define PFERR_USER_BIT 2
1540 #define PFERR_RSVD_BIT 3
1541 #define PFERR_FETCH_BIT 4
1542 #define PFERR_PK_BIT 5
1543 #define PFERR_SGX_BIT 15
1544 #define PFERR_GUEST_FINAL_BIT 32
1545 #define PFERR_GUEST_PAGE_BIT 33
1546 #define PFERR_IMPLICIT_ACCESS_BIT 48
1547 
1548 #define PFERR_PRESENT_MASK	BIT(PFERR_PRESENT_BIT)
1549 #define PFERR_WRITE_MASK	BIT(PFERR_WRITE_BIT)
1550 #define PFERR_USER_MASK		BIT(PFERR_USER_BIT)
1551 #define PFERR_RSVD_MASK		BIT(PFERR_RSVD_BIT)
1552 #define PFERR_FETCH_MASK	BIT(PFERR_FETCH_BIT)
1553 #define PFERR_PK_MASK		BIT(PFERR_PK_BIT)
1554 #define PFERR_SGX_MASK		BIT(PFERR_SGX_BIT)
1555 #define PFERR_GUEST_FINAL_MASK	BIT_ULL(PFERR_GUEST_FINAL_BIT)
1556 #define PFERR_GUEST_PAGE_MASK	BIT_ULL(PFERR_GUEST_PAGE_BIT)
1557 #define PFERR_IMPLICIT_ACCESS	BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
1558 
1559 bool sys_clocksource_is_based_on_tsc(void);
1560 
1561 #endif /* SELFTEST_KVM_PROCESSOR_H */
1562