1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * vmx.h: VMX Architecture related definitions
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * A few random additions are:
7 * Copyright (C) 2006 Qumranet
8 * Avi Kivity <avi@qumranet.com>
9 * Yaniv Kamay <yaniv@qumranet.com>
10 */
11 #ifndef VMX_H
12 #define VMX_H
13
14
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/types.h>
18
19 #include <uapi/asm/vmx.h>
20 #include <asm/trapnr.h>
21 #include <asm/vmxfeatures.h>
22
23 #define VMCS_CONTROL_BIT(x) BIT(VMX_FEATURE_##x & 0x1f)
24
25 /*
26 * Definitions of Primary Processor-Based VM-Execution Controls.
27 */
28 #define CPU_BASED_INTR_WINDOW_EXITING VMCS_CONTROL_BIT(INTR_WINDOW_EXITING)
29 #define CPU_BASED_USE_TSC_OFFSETTING VMCS_CONTROL_BIT(USE_TSC_OFFSETTING)
30 #define CPU_BASED_HLT_EXITING VMCS_CONTROL_BIT(HLT_EXITING)
31 #define CPU_BASED_INVLPG_EXITING VMCS_CONTROL_BIT(INVLPG_EXITING)
32 #define CPU_BASED_MWAIT_EXITING VMCS_CONTROL_BIT(MWAIT_EXITING)
33 #define CPU_BASED_RDPMC_EXITING VMCS_CONTROL_BIT(RDPMC_EXITING)
34 #define CPU_BASED_RDTSC_EXITING VMCS_CONTROL_BIT(RDTSC_EXITING)
35 #define CPU_BASED_CR3_LOAD_EXITING VMCS_CONTROL_BIT(CR3_LOAD_EXITING)
36 #define CPU_BASED_CR3_STORE_EXITING VMCS_CONTROL_BIT(CR3_STORE_EXITING)
37 #define CPU_BASED_ACTIVATE_TERTIARY_CONTROLS VMCS_CONTROL_BIT(TERTIARY_CONTROLS)
38 #define CPU_BASED_CR8_LOAD_EXITING VMCS_CONTROL_BIT(CR8_LOAD_EXITING)
39 #define CPU_BASED_CR8_STORE_EXITING VMCS_CONTROL_BIT(CR8_STORE_EXITING)
40 #define CPU_BASED_TPR_SHADOW VMCS_CONTROL_BIT(VIRTUAL_TPR)
41 #define CPU_BASED_NMI_WINDOW_EXITING VMCS_CONTROL_BIT(NMI_WINDOW_EXITING)
42 #define CPU_BASED_MOV_DR_EXITING VMCS_CONTROL_BIT(MOV_DR_EXITING)
43 #define CPU_BASED_UNCOND_IO_EXITING VMCS_CONTROL_BIT(UNCOND_IO_EXITING)
44 #define CPU_BASED_USE_IO_BITMAPS VMCS_CONTROL_BIT(USE_IO_BITMAPS)
45 #define CPU_BASED_MONITOR_TRAP_FLAG VMCS_CONTROL_BIT(MONITOR_TRAP_FLAG)
46 #define CPU_BASED_USE_MSR_BITMAPS VMCS_CONTROL_BIT(USE_MSR_BITMAPS)
47 #define CPU_BASED_MONITOR_EXITING VMCS_CONTROL_BIT(MONITOR_EXITING)
48 #define CPU_BASED_PAUSE_EXITING VMCS_CONTROL_BIT(PAUSE_EXITING)
49 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS VMCS_CONTROL_BIT(SEC_CONTROLS)
50
51 #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172
52
53 /*
54 * Definitions of Secondary Processor-Based VM-Execution Controls.
55 */
56 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES VMCS_CONTROL_BIT(VIRT_APIC_ACCESSES)
57 #define SECONDARY_EXEC_ENABLE_EPT VMCS_CONTROL_BIT(EPT)
58 #define SECONDARY_EXEC_DESC VMCS_CONTROL_BIT(DESC_EXITING)
59 #define SECONDARY_EXEC_ENABLE_RDTSCP VMCS_CONTROL_BIT(RDTSCP)
60 #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE VMCS_CONTROL_BIT(VIRTUAL_X2APIC)
61 #define SECONDARY_EXEC_ENABLE_VPID VMCS_CONTROL_BIT(VPID)
62 #define SECONDARY_EXEC_WBINVD_EXITING VMCS_CONTROL_BIT(WBINVD_EXITING)
63 #define SECONDARY_EXEC_UNRESTRICTED_GUEST VMCS_CONTROL_BIT(UNRESTRICTED_GUEST)
64 #define SECONDARY_EXEC_APIC_REGISTER_VIRT VMCS_CONTROL_BIT(APIC_REGISTER_VIRT)
65 #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY VMCS_CONTROL_BIT(VIRT_INTR_DELIVERY)
66 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING VMCS_CONTROL_BIT(PAUSE_LOOP_EXITING)
67 #define SECONDARY_EXEC_RDRAND_EXITING VMCS_CONTROL_BIT(RDRAND_EXITING)
68 #define SECONDARY_EXEC_ENABLE_INVPCID VMCS_CONTROL_BIT(INVPCID)
69 #define SECONDARY_EXEC_ENABLE_VMFUNC VMCS_CONTROL_BIT(VMFUNC)
70 #define SECONDARY_EXEC_SHADOW_VMCS VMCS_CONTROL_BIT(SHADOW_VMCS)
71 #define SECONDARY_EXEC_ENCLS_EXITING VMCS_CONTROL_BIT(ENCLS_EXITING)
72 #define SECONDARY_EXEC_RDSEED_EXITING VMCS_CONTROL_BIT(RDSEED_EXITING)
73 #define SECONDARY_EXEC_ENABLE_PML VMCS_CONTROL_BIT(PAGE_MOD_LOGGING)
74 #define SECONDARY_EXEC_EPT_VIOLATION_VE VMCS_CONTROL_BIT(EPT_VIOLATION_VE)
75 #define SECONDARY_EXEC_PT_CONCEAL_VMX VMCS_CONTROL_BIT(PT_CONCEAL_VMX)
76 #define SECONDARY_EXEC_ENABLE_XSAVES VMCS_CONTROL_BIT(XSAVES)
77 #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
78 #define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA)
79 #define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING)
80 #define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE)
81 #define SECONDARY_EXEC_BUS_LOCK_DETECTION VMCS_CONTROL_BIT(BUS_LOCK_DETECTION)
82 #define SECONDARY_EXEC_NOTIFY_VM_EXITING VMCS_CONTROL_BIT(NOTIFY_VM_EXITING)
83
84 /*
85 * Definitions of Tertiary Processor-Based VM-Execution Controls.
86 */
87 #define TERTIARY_EXEC_IPI_VIRT VMCS_CONTROL_BIT(IPI_VIRT)
88
89 #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING)
90 #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING)
91 #define PIN_BASED_VIRTUAL_NMIS VMCS_CONTROL_BIT(VIRTUAL_NMIS)
92 #define PIN_BASED_VMX_PREEMPTION_TIMER VMCS_CONTROL_BIT(PREEMPTION_TIMER)
93 #define PIN_BASED_POSTED_INTR VMCS_CONTROL_BIT(POSTED_INTR)
94
95 #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016
96
97 #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
98 #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
99 #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
100 #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
101 #define VM_EXIT_SAVE_IA32_PAT 0x00040000
102 #define VM_EXIT_LOAD_IA32_PAT 0x00080000
103 #define VM_EXIT_SAVE_IA32_EFER 0x00100000
104 #define VM_EXIT_LOAD_IA32_EFER 0x00200000
105 #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
106 #define VM_EXIT_CLEAR_BNDCFGS 0x00800000
107 #define VM_EXIT_PT_CONCEAL_PIP 0x01000000
108 #define VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
109 #define VM_EXIT_LOAD_CET_STATE 0x10000000
110
111 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff
112
113 #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
114 #define VM_ENTRY_IA32E_MODE 0x00000200
115 #define VM_ENTRY_SMM 0x00000400
116 #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
117 #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
118 #define VM_ENTRY_LOAD_IA32_PAT 0x00004000
119 #define VM_ENTRY_LOAD_IA32_EFER 0x00008000
120 #define VM_ENTRY_LOAD_BNDCFGS 0x00010000
121 #define VM_ENTRY_PT_CONCEAL_PIP 0x00020000
122 #define VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000
123 #define VM_ENTRY_LOAD_CET_STATE 0x00100000
124
125 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff
126
127 /* VMFUNC functions */
128 #define VMFUNC_CONTROL_BIT(x) BIT((VMX_FEATURE_##x & 0x1f) - 28)
129
130 #define VMX_VMFUNC_EPTP_SWITCHING VMFUNC_CONTROL_BIT(EPTP_SWITCHING)
131 #define VMFUNC_EPTP_ENTRIES 512
132
133 #define VMX_BASIC_32BIT_PHYS_ADDR_ONLY BIT_ULL(48)
134 #define VMX_BASIC_DUAL_MONITOR_TREATMENT BIT_ULL(49)
135 #define VMX_BASIC_INOUT BIT_ULL(54)
136 #define VMX_BASIC_TRUE_CTLS BIT_ULL(55)
137 #define VMX_BASIC_NO_HW_ERROR_CODE_CC BIT_ULL(56)
138
vmx_basic_vmcs_revision_id(u64 vmx_basic)139 static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic)
140 {
141 return vmx_basic & GENMASK_ULL(30, 0);
142 }
143
vmx_basic_vmcs_size(u64 vmx_basic)144 static inline u32 vmx_basic_vmcs_size(u64 vmx_basic)
145 {
146 return (vmx_basic & GENMASK_ULL(44, 32)) >> 32;
147 }
148
vmx_basic_vmcs_mem_type(u64 vmx_basic)149 static inline u32 vmx_basic_vmcs_mem_type(u64 vmx_basic)
150 {
151 return (vmx_basic & GENMASK_ULL(53, 50)) >> 50;
152 }
153
vmx_basic_encode_vmcs_info(u32 revision,u16 size,u8 memtype)154 static inline u64 vmx_basic_encode_vmcs_info(u32 revision, u16 size, u8 memtype)
155 {
156 return revision | ((u64)size << 32) | ((u64)memtype << 50);
157 }
158
159 #define VMX_MISC_SAVE_EFER_LMA BIT_ULL(5)
160 #define VMX_MISC_ACTIVITY_HLT BIT_ULL(6)
161 #define VMX_MISC_ACTIVITY_SHUTDOWN BIT_ULL(7)
162 #define VMX_MISC_ACTIVITY_WAIT_SIPI BIT_ULL(8)
163 #define VMX_MISC_INTEL_PT BIT_ULL(14)
164 #define VMX_MISC_RDMSR_IN_SMM BIT_ULL(15)
165 #define VMX_MISC_VMXOFF_BLOCK_SMI BIT_ULL(28)
166 #define VMX_MISC_VMWRITE_SHADOW_RO_FIELDS BIT_ULL(29)
167 #define VMX_MISC_ZERO_LEN_INS BIT_ULL(30)
168 #define VMX_MISC_MSR_LIST_MULTIPLIER 512
169
vmx_misc_preemption_timer_rate(u64 vmx_misc)170 static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc)
171 {
172 return vmx_misc & GENMASK_ULL(4, 0);
173 }
174
vmx_misc_cr3_count(u64 vmx_misc)175 static inline int vmx_misc_cr3_count(u64 vmx_misc)
176 {
177 return (vmx_misc & GENMASK_ULL(24, 16)) >> 16;
178 }
179
vmx_misc_max_msr(u64 vmx_misc)180 static inline int vmx_misc_max_msr(u64 vmx_misc)
181 {
182 return (vmx_misc & GENMASK_ULL(27, 25)) >> 25;
183 }
184
vmx_misc_mseg_revid(u64 vmx_misc)185 static inline int vmx_misc_mseg_revid(u64 vmx_misc)
186 {
187 return (vmx_misc & GENMASK_ULL(63, 32)) >> 32;
188 }
189
190 /* VMCS Encodings */
191 enum vmcs_field {
192 VIRTUAL_PROCESSOR_ID = 0x00000000,
193 POSTED_INTR_NV = 0x00000002,
194 LAST_PID_POINTER_INDEX = 0x00000008,
195 GUEST_ES_SELECTOR = 0x00000800,
196 GUEST_CS_SELECTOR = 0x00000802,
197 GUEST_SS_SELECTOR = 0x00000804,
198 GUEST_DS_SELECTOR = 0x00000806,
199 GUEST_FS_SELECTOR = 0x00000808,
200 GUEST_GS_SELECTOR = 0x0000080a,
201 GUEST_LDTR_SELECTOR = 0x0000080c,
202 GUEST_TR_SELECTOR = 0x0000080e,
203 GUEST_INTR_STATUS = 0x00000810,
204 GUEST_PML_INDEX = 0x00000812,
205 HOST_ES_SELECTOR = 0x00000c00,
206 HOST_CS_SELECTOR = 0x00000c02,
207 HOST_SS_SELECTOR = 0x00000c04,
208 HOST_DS_SELECTOR = 0x00000c06,
209 HOST_FS_SELECTOR = 0x00000c08,
210 HOST_GS_SELECTOR = 0x00000c0a,
211 HOST_TR_SELECTOR = 0x00000c0c,
212 IO_BITMAP_A = 0x00002000,
213 IO_BITMAP_A_HIGH = 0x00002001,
214 IO_BITMAP_B = 0x00002002,
215 IO_BITMAP_B_HIGH = 0x00002003,
216 MSR_BITMAP = 0x00002004,
217 MSR_BITMAP_HIGH = 0x00002005,
218 VM_EXIT_MSR_STORE_ADDR = 0x00002006,
219 VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007,
220 VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
221 VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009,
222 VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
223 VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b,
224 PML_ADDRESS = 0x0000200e,
225 PML_ADDRESS_HIGH = 0x0000200f,
226 TSC_OFFSET = 0x00002010,
227 TSC_OFFSET_HIGH = 0x00002011,
228 VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
229 VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013,
230 APIC_ACCESS_ADDR = 0x00002014,
231 APIC_ACCESS_ADDR_HIGH = 0x00002015,
232 POSTED_INTR_DESC_ADDR = 0x00002016,
233 POSTED_INTR_DESC_ADDR_HIGH = 0x00002017,
234 VM_FUNCTION_CONTROL = 0x00002018,
235 VM_FUNCTION_CONTROL_HIGH = 0x00002019,
236 EPT_POINTER = 0x0000201a,
237 EPT_POINTER_HIGH = 0x0000201b,
238 EOI_EXIT_BITMAP0 = 0x0000201c,
239 EOI_EXIT_BITMAP0_HIGH = 0x0000201d,
240 EOI_EXIT_BITMAP1 = 0x0000201e,
241 EOI_EXIT_BITMAP1_HIGH = 0x0000201f,
242 EOI_EXIT_BITMAP2 = 0x00002020,
243 EOI_EXIT_BITMAP2_HIGH = 0x00002021,
244 EOI_EXIT_BITMAP3 = 0x00002022,
245 EOI_EXIT_BITMAP3_HIGH = 0x00002023,
246 EPTP_LIST_ADDRESS = 0x00002024,
247 EPTP_LIST_ADDRESS_HIGH = 0x00002025,
248 VMREAD_BITMAP = 0x00002026,
249 VMREAD_BITMAP_HIGH = 0x00002027,
250 VMWRITE_BITMAP = 0x00002028,
251 VMWRITE_BITMAP_HIGH = 0x00002029,
252 VE_INFORMATION_ADDRESS = 0x0000202A,
253 VE_INFORMATION_ADDRESS_HIGH = 0x0000202B,
254 XSS_EXIT_BITMAP = 0x0000202C,
255 XSS_EXIT_BITMAP_HIGH = 0x0000202D,
256 ENCLS_EXITING_BITMAP = 0x0000202E,
257 ENCLS_EXITING_BITMAP_HIGH = 0x0000202F,
258 TSC_MULTIPLIER = 0x00002032,
259 TSC_MULTIPLIER_HIGH = 0x00002033,
260 TERTIARY_VM_EXEC_CONTROL = 0x00002034,
261 TERTIARY_VM_EXEC_CONTROL_HIGH = 0x00002035,
262 SHARED_EPT_POINTER = 0x0000203C,
263 PID_POINTER_TABLE = 0x00002042,
264 PID_POINTER_TABLE_HIGH = 0x00002043,
265 GUEST_PHYSICAL_ADDRESS = 0x00002400,
266 GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401,
267 VMCS_LINK_POINTER = 0x00002800,
268 VMCS_LINK_POINTER_HIGH = 0x00002801,
269 GUEST_IA32_DEBUGCTL = 0x00002802,
270 GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
271 GUEST_IA32_PAT = 0x00002804,
272 GUEST_IA32_PAT_HIGH = 0x00002805,
273 GUEST_IA32_EFER = 0x00002806,
274 GUEST_IA32_EFER_HIGH = 0x00002807,
275 GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
276 GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809,
277 GUEST_PDPTR0 = 0x0000280a,
278 GUEST_PDPTR0_HIGH = 0x0000280b,
279 GUEST_PDPTR1 = 0x0000280c,
280 GUEST_PDPTR1_HIGH = 0x0000280d,
281 GUEST_PDPTR2 = 0x0000280e,
282 GUEST_PDPTR2_HIGH = 0x0000280f,
283 GUEST_PDPTR3 = 0x00002810,
284 GUEST_PDPTR3_HIGH = 0x00002811,
285 GUEST_BNDCFGS = 0x00002812,
286 GUEST_BNDCFGS_HIGH = 0x00002813,
287 GUEST_IA32_RTIT_CTL = 0x00002814,
288 GUEST_IA32_RTIT_CTL_HIGH = 0x00002815,
289 HOST_IA32_PAT = 0x00002c00,
290 HOST_IA32_PAT_HIGH = 0x00002c01,
291 HOST_IA32_EFER = 0x00002c02,
292 HOST_IA32_EFER_HIGH = 0x00002c03,
293 HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
294 HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05,
295 PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
296 CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
297 EXCEPTION_BITMAP = 0x00004004,
298 PAGE_FAULT_ERROR_CODE_MASK = 0x00004006,
299 PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008,
300 CR3_TARGET_COUNT = 0x0000400a,
301 VM_EXIT_CONTROLS = 0x0000400c,
302 VM_EXIT_MSR_STORE_COUNT = 0x0000400e,
303 VM_EXIT_MSR_LOAD_COUNT = 0x00004010,
304 VM_ENTRY_CONTROLS = 0x00004012,
305 VM_ENTRY_MSR_LOAD_COUNT = 0x00004014,
306 VM_ENTRY_INTR_INFO_FIELD = 0x00004016,
307 VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018,
308 VM_ENTRY_INSTRUCTION_LEN = 0x0000401a,
309 TPR_THRESHOLD = 0x0000401c,
310 SECONDARY_VM_EXEC_CONTROL = 0x0000401e,
311 PLE_GAP = 0x00004020,
312 PLE_WINDOW = 0x00004022,
313 NOTIFY_WINDOW = 0x00004024,
314 VM_INSTRUCTION_ERROR = 0x00004400,
315 VM_EXIT_REASON = 0x00004402,
316 VM_EXIT_INTR_INFO = 0x00004404,
317 VM_EXIT_INTR_ERROR_CODE = 0x00004406,
318 IDT_VECTORING_INFO_FIELD = 0x00004408,
319 IDT_VECTORING_ERROR_CODE = 0x0000440a,
320 VM_EXIT_INSTRUCTION_LEN = 0x0000440c,
321 VMX_INSTRUCTION_INFO = 0x0000440e,
322 GUEST_ES_LIMIT = 0x00004800,
323 GUEST_CS_LIMIT = 0x00004802,
324 GUEST_SS_LIMIT = 0x00004804,
325 GUEST_DS_LIMIT = 0x00004806,
326 GUEST_FS_LIMIT = 0x00004808,
327 GUEST_GS_LIMIT = 0x0000480a,
328 GUEST_LDTR_LIMIT = 0x0000480c,
329 GUEST_TR_LIMIT = 0x0000480e,
330 GUEST_GDTR_LIMIT = 0x00004810,
331 GUEST_IDTR_LIMIT = 0x00004812,
332 GUEST_ES_AR_BYTES = 0x00004814,
333 GUEST_CS_AR_BYTES = 0x00004816,
334 GUEST_SS_AR_BYTES = 0x00004818,
335 GUEST_DS_AR_BYTES = 0x0000481a,
336 GUEST_FS_AR_BYTES = 0x0000481c,
337 GUEST_GS_AR_BYTES = 0x0000481e,
338 GUEST_LDTR_AR_BYTES = 0x00004820,
339 GUEST_TR_AR_BYTES = 0x00004822,
340 GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
341 GUEST_ACTIVITY_STATE = 0x00004826,
342 GUEST_SYSENTER_CS = 0x0000482A,
343 VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
344 HOST_IA32_SYSENTER_CS = 0x00004c00,
345 CR0_GUEST_HOST_MASK = 0x00006000,
346 CR4_GUEST_HOST_MASK = 0x00006002,
347 CR0_READ_SHADOW = 0x00006004,
348 CR4_READ_SHADOW = 0x00006006,
349 CR3_TARGET_VALUE0 = 0x00006008,
350 CR3_TARGET_VALUE1 = 0x0000600a,
351 CR3_TARGET_VALUE2 = 0x0000600c,
352 CR3_TARGET_VALUE3 = 0x0000600e,
353 EXIT_QUALIFICATION = 0x00006400,
354 GUEST_LINEAR_ADDRESS = 0x0000640a,
355 GUEST_CR0 = 0x00006800,
356 GUEST_CR3 = 0x00006802,
357 GUEST_CR4 = 0x00006804,
358 GUEST_ES_BASE = 0x00006806,
359 GUEST_CS_BASE = 0x00006808,
360 GUEST_SS_BASE = 0x0000680a,
361 GUEST_DS_BASE = 0x0000680c,
362 GUEST_FS_BASE = 0x0000680e,
363 GUEST_GS_BASE = 0x00006810,
364 GUEST_LDTR_BASE = 0x00006812,
365 GUEST_TR_BASE = 0x00006814,
366 GUEST_GDTR_BASE = 0x00006816,
367 GUEST_IDTR_BASE = 0x00006818,
368 GUEST_DR7 = 0x0000681a,
369 GUEST_RSP = 0x0000681c,
370 GUEST_RIP = 0x0000681e,
371 GUEST_RFLAGS = 0x00006820,
372 GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822,
373 GUEST_SYSENTER_ESP = 0x00006824,
374 GUEST_SYSENTER_EIP = 0x00006826,
375 GUEST_S_CET = 0x00006828,
376 GUEST_SSP = 0x0000682a,
377 GUEST_INTR_SSP_TABLE = 0x0000682c,
378 HOST_CR0 = 0x00006c00,
379 HOST_CR3 = 0x00006c02,
380 HOST_CR4 = 0x00006c04,
381 HOST_FS_BASE = 0x00006c06,
382 HOST_GS_BASE = 0x00006c08,
383 HOST_TR_BASE = 0x00006c0a,
384 HOST_GDTR_BASE = 0x00006c0c,
385 HOST_IDTR_BASE = 0x00006c0e,
386 HOST_IA32_SYSENTER_ESP = 0x00006c10,
387 HOST_IA32_SYSENTER_EIP = 0x00006c12,
388 HOST_RSP = 0x00006c14,
389 HOST_RIP = 0x00006c16,
390 HOST_S_CET = 0x00006c18,
391 HOST_SSP = 0x00006c1a,
392 HOST_INTR_SSP_TABLE = 0x00006c1c
393 };
394
395 /*
396 * Interruption-information format
397 */
398 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
399 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
400 #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
401 #define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */
402 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
403 #define INTR_INFO_RESVD_BITS_MASK 0x7ffff000
404
405 #define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK
406 #define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK
407 #define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK
408 #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
409
410 #define INTR_TYPE_EXT_INTR (EVENT_TYPE_EXTINT << 8) /* external interrupt */
411 #define INTR_TYPE_RESERVED (EVENT_TYPE_RESERVED << 8) /* reserved */
412 #define INTR_TYPE_NMI_INTR (EVENT_TYPE_NMI << 8) /* NMI */
413 #define INTR_TYPE_HARD_EXCEPTION (EVENT_TYPE_HWEXC << 8) /* processor exception */
414 #define INTR_TYPE_SOFT_INTR (EVENT_TYPE_SWINT << 8) /* software interrupt */
415 #define INTR_TYPE_PRIV_SW_EXCEPTION (EVENT_TYPE_PRIV_SWEXC << 8) /* ICE breakpoint */
416 #define INTR_TYPE_SOFT_EXCEPTION (EVENT_TYPE_SWEXC << 8) /* software exception */
417 #define INTR_TYPE_OTHER_EVENT (EVENT_TYPE_OTHER << 8) /* other event */
418
419 /* GUEST_INTERRUPTIBILITY_INFO flags. */
420 #define GUEST_INTR_STATE_STI 0x00000001
421 #define GUEST_INTR_STATE_MOV_SS 0x00000002
422 #define GUEST_INTR_STATE_SMI 0x00000004
423 #define GUEST_INTR_STATE_NMI 0x00000008
424 #define GUEST_INTR_STATE_ENCLAVE_INTR 0x00000010
425
426 /* GUEST_ACTIVITY_STATE flags */
427 #define GUEST_ACTIVITY_ACTIVE 0
428 #define GUEST_ACTIVITY_HLT 1
429 #define GUEST_ACTIVITY_SHUTDOWN 2
430 #define GUEST_ACTIVITY_WAIT_SIPI 3
431
432 /*
433 * Exit Qualifications for MOV for Control Register Access
434 */
435 #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control reg.*/
436 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
437 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose reg. */
438 #define LMSW_SOURCE_DATA_SHIFT 16
439 #define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */
440 #define REG_EAX (0 << 8)
441 #define REG_ECX (1 << 8)
442 #define REG_EDX (2 << 8)
443 #define REG_EBX (3 << 8)
444 #define REG_ESP (4 << 8)
445 #define REG_EBP (5 << 8)
446 #define REG_ESI (6 << 8)
447 #define REG_EDI (7 << 8)
448 #define REG_R8 (8 << 8)
449 #define REG_R9 (9 << 8)
450 #define REG_R10 (10 << 8)
451 #define REG_R11 (11 << 8)
452 #define REG_R12 (12 << 8)
453 #define REG_R13 (13 << 8)
454 #define REG_R14 (14 << 8)
455 #define REG_R15 (15 << 8)
456
457 /*
458 * Exit Qualifications for MOV for Debug Register Access
459 */
460 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug reg. */
461 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
462 #define TYPE_MOV_TO_DR (0 << 4)
463 #define TYPE_MOV_FROM_DR (1 << 4)
464 #define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */
465
466
467 /*
468 * Exit Qualifications for APIC-Access
469 */
470 #define APIC_ACCESS_OFFSET 0xfff /* 11:0, offset within the APIC page */
471 #define APIC_ACCESS_TYPE 0xf000 /* 15:12, access type */
472 #define TYPE_LINEAR_APIC_INST_READ (0 << 12)
473 #define TYPE_LINEAR_APIC_INST_WRITE (1 << 12)
474 #define TYPE_LINEAR_APIC_INST_FETCH (2 << 12)
475 #define TYPE_LINEAR_APIC_EVENT (3 << 12)
476 #define TYPE_PHYSICAL_APIC_EVENT (10 << 12)
477 #define TYPE_PHYSICAL_APIC_INST (15 << 12)
478
479 /* segment AR in VMCS -- these are different from what LAR reports */
480 #define VMX_SEGMENT_AR_L_MASK (1 << 13)
481
482 #define VMX_AR_TYPE_ACCESSES_MASK 1
483 #define VMX_AR_TYPE_READABLE_MASK (1 << 1)
484 #define VMX_AR_TYPE_WRITEABLE_MASK (1 << 2)
485 #define VMX_AR_TYPE_CODE_MASK (1 << 3)
486 #define VMX_AR_TYPE_MASK 0x0f
487 #define VMX_AR_TYPE_BUSY_64_TSS 11
488 #define VMX_AR_TYPE_BUSY_32_TSS 11
489 #define VMX_AR_TYPE_BUSY_16_TSS 3
490 #define VMX_AR_TYPE_LDT 2
491
492 #define VMX_AR_UNUSABLE_MASK (1 << 16)
493 #define VMX_AR_S_MASK (1 << 4)
494 #define VMX_AR_P_MASK (1 << 7)
495 #define VMX_AR_L_MASK (1 << 13)
496 #define VMX_AR_DB_MASK (1 << 14)
497 #define VMX_AR_G_MASK (1 << 15)
498 #define VMX_AR_DPL_SHIFT 5
499 #define VMX_AR_DPL(ar) (((ar) >> VMX_AR_DPL_SHIFT) & 3)
500
501 #define VMX_AR_RESERVD_MASK 0xfffe0f00
502
503 #define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0)
504 #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1)
505 #define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2)
506
507 #define VMX_NR_VPIDS (1 << 16)
508 #define VMX_VPID_EXTENT_INDIVIDUAL_ADDR 0
509 #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
510 #define VMX_VPID_EXTENT_ALL_CONTEXT 2
511 #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL 3
512
513 #define VMX_EPT_EXTENT_CONTEXT 1
514 #define VMX_EPT_EXTENT_GLOBAL 2
515 #define VMX_EPT_EXTENT_SHIFT 24
516
517 #define VMX_EPT_EXECUTE_ONLY_BIT (1ull)
518 #define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6)
519 #define VMX_EPT_PAGE_WALK_5_BIT (1ull << 7)
520 #define VMX_EPTP_UC_BIT (1ull << 8)
521 #define VMX_EPTP_WB_BIT (1ull << 14)
522 #define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
523 #define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
524 #define VMX_EPT_INVEPT_BIT (1ull << 20)
525 #define VMX_EPT_AD_BIT (1ull << 21)
526 #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
527 #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
528
529 #define VMX_VPID_INVVPID_BIT (1ull << 0) /* (32 - 32) */
530 #define VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT (1ull << 8) /* (40 - 32) */
531 #define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */
532 #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */
533 #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */
534
535 #define VMX_EPT_MT_EPTE_SHIFT 3
536 #define VMX_EPTP_PWL_MASK 0x38ull
537 #define VMX_EPTP_PWL_4 0x18ull
538 #define VMX_EPTP_PWL_5 0x20ull
539 #define VMX_EPTP_AD_ENABLE_BIT (1ull << 6)
540 /* The EPTP memtype is encoded in bits 2:0, i.e. doesn't need to be shifted. */
541 #define VMX_EPTP_MT_MASK 0x7ull
542 #define VMX_EPTP_MT_WB X86_MEMTYPE_WB
543 #define VMX_EPTP_MT_UC X86_MEMTYPE_UC
544 #define VMX_EPT_READABLE_MASK 0x1ull
545 #define VMX_EPT_WRITABLE_MASK 0x2ull
546 #define VMX_EPT_EXECUTABLE_MASK 0x4ull
547 #define VMX_EPT_IPAT_BIT (1ull << 6)
548 #define VMX_EPT_ACCESS_BIT (1ull << 8)
549 #define VMX_EPT_DIRTY_BIT (1ull << 9)
550 #define VMX_EPT_SUPPRESS_VE_BIT (1ull << 63)
551 #define VMX_EPT_RWX_MASK (VMX_EPT_READABLE_MASK | \
552 VMX_EPT_WRITABLE_MASK | \
553 VMX_EPT_EXECUTABLE_MASK)
554 #define VMX_EPT_MT_MASK (7ull << VMX_EPT_MT_EPTE_SHIFT)
555
vmx_eptp_page_walk_level(u64 eptp)556 static inline u8 vmx_eptp_page_walk_level(u64 eptp)
557 {
558 u64 encoded_level = eptp & VMX_EPTP_PWL_MASK;
559
560 if (encoded_level == VMX_EPTP_PWL_5)
561 return 5;
562
563 /* @eptp must be pre-validated by the caller. */
564 WARN_ON_ONCE(encoded_level != VMX_EPTP_PWL_4);
565 return 4;
566 }
567
568 /* The mask to use to trigger an EPT Misconfiguration in order to track MMIO */
569 #define VMX_EPT_MISCONFIG_WX_VALUE (VMX_EPT_WRITABLE_MASK | \
570 VMX_EPT_EXECUTABLE_MASK)
571
572 #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
573
574 struct vmx_msr_entry {
575 u32 index;
576 u32 reserved;
577 u64 value;
578 } __aligned(16);
579
580 /*
581 * Exit Qualifications for entry failure during or after loading guest state
582 */
583 enum vm_entry_failure_code {
584 ENTRY_FAIL_DEFAULT = 0,
585 ENTRY_FAIL_PDPTE = 2,
586 ENTRY_FAIL_NMI = 3,
587 ENTRY_FAIL_VMCS_LINK_PTR = 4,
588 };
589
590 /*
591 * Exit Qualifications for EPT Violations
592 */
593 #define EPT_VIOLATION_ACC_READ BIT(0)
594 #define EPT_VIOLATION_ACC_WRITE BIT(1)
595 #define EPT_VIOLATION_ACC_INSTR BIT(2)
596 #define EPT_VIOLATION_PROT_READ BIT(3)
597 #define EPT_VIOLATION_PROT_WRITE BIT(4)
598 #define EPT_VIOLATION_PROT_EXEC BIT(5)
599 #define EPT_VIOLATION_EXEC_FOR_RING3_LIN BIT(6)
600 #define EPT_VIOLATION_PROT_MASK (EPT_VIOLATION_PROT_READ | \
601 EPT_VIOLATION_PROT_WRITE | \
602 EPT_VIOLATION_PROT_EXEC)
603 #define EPT_VIOLATION_GVA_IS_VALID BIT(7)
604 #define EPT_VIOLATION_GVA_TRANSLATED BIT(8)
605
606 #define EPT_VIOLATION_RWX_TO_PROT(__epte) (((__epte) & VMX_EPT_RWX_MASK) << 3)
607
608 static_assert(EPT_VIOLATION_RWX_TO_PROT(VMX_EPT_RWX_MASK) ==
609 (EPT_VIOLATION_PROT_READ | EPT_VIOLATION_PROT_WRITE | EPT_VIOLATION_PROT_EXEC));
610
611 /*
612 * Exit Qualifications for NOTIFY VM EXIT
613 */
614 #define NOTIFY_VM_CONTEXT_INVALID BIT(0)
615
616 /*
617 * VM-instruction error numbers
618 */
619 enum vm_instruction_error_number {
620 VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1,
621 VMXERR_VMCLEAR_INVALID_ADDRESS = 2,
622 VMXERR_VMCLEAR_VMXON_POINTER = 3,
623 VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4,
624 VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5,
625 VMXERR_VMRESUME_AFTER_VMXOFF = 6,
626 VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7,
627 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8,
628 VMXERR_VMPTRLD_INVALID_ADDRESS = 9,
629 VMXERR_VMPTRLD_VMXON_POINTER = 10,
630 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11,
631 VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12,
632 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13,
633 VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15,
634 VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16,
635 VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17,
636 VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18,
637 VMXERR_VMCALL_NONCLEAR_VMCS = 19,
638 VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20,
639 VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22,
640 VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23,
641 VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24,
642 VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25,
643 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26,
644 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
645 };
646
647 /*
648 * VM-instruction errors that can be encountered on VM-Enter, used to trace
649 * nested VM-Enter failures reported by hardware. Errors unique to VM-Enter
650 * from a SMI Transfer Monitor are not included as things have gone seriously
651 * sideways if we get one of those...
652 */
653 #define VMX_VMENTER_INSTRUCTION_ERRORS \
654 { VMXERR_VMLAUNCH_NONCLEAR_VMCS, "VMLAUNCH_NONCLEAR_VMCS" }, \
655 { VMXERR_VMRESUME_NONLAUNCHED_VMCS, "VMRESUME_NONLAUNCHED_VMCS" }, \
656 { VMXERR_VMRESUME_AFTER_VMXOFF, "VMRESUME_AFTER_VMXOFF" }, \
657 { VMXERR_ENTRY_INVALID_CONTROL_FIELD, "VMENTRY_INVALID_CONTROL_FIELD" }, \
658 { VMXERR_ENTRY_INVALID_HOST_STATE_FIELD, "VMENTRY_INVALID_HOST_STATE_FIELD" }, \
659 { VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS, "VMENTRY_EVENTS_BLOCKED_BY_MOV_SS" }
660
661 enum vmx_l1d_flush_state {
662 VMENTER_L1D_FLUSH_AUTO,
663 VMENTER_L1D_FLUSH_NEVER,
664 VMENTER_L1D_FLUSH_COND,
665 VMENTER_L1D_FLUSH_ALWAYS,
666 VMENTER_L1D_FLUSH_EPT_DISABLED,
667 VMENTER_L1D_FLUSH_NOT_REQUIRED,
668 };
669
670 extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
671
672 struct vmx_ve_information {
673 u32 exit_reason;
674 u32 delivery;
675 u64 exit_qualification;
676 u64 guest_linear_address;
677 u64 guest_physical_address;
678 u16 eptp_index;
679 };
680
681 #endif
682