1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * vmx.h: VMX Architecture related definitions
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * A few random additions are:
7 * Copyright (C) 2006 Qumranet
8 * Avi Kivity <avi@qumranet.com>
9 * Yaniv Kamay <yaniv@qumranet.com>
10 */
11 #ifndef VMX_H
12 #define VMX_H
13
14
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/types.h>
18
19 #include <uapi/asm/vmx.h>
20 #include <asm/trapnr.h>
21 #include <asm/vmxfeatures.h>
22
23 #define VMCS_CONTROL_BIT(x) BIT(VMX_FEATURE_##x & 0x1f)
24
25 /*
26 * Definitions of Primary Processor-Based VM-Execution Controls.
27 */
28 #define CPU_BASED_INTR_WINDOW_EXITING VMCS_CONTROL_BIT(INTR_WINDOW_EXITING)
29 #define CPU_BASED_USE_TSC_OFFSETTING VMCS_CONTROL_BIT(USE_TSC_OFFSETTING)
30 #define CPU_BASED_HLT_EXITING VMCS_CONTROL_BIT(HLT_EXITING)
31 #define CPU_BASED_INVLPG_EXITING VMCS_CONTROL_BIT(INVLPG_EXITING)
32 #define CPU_BASED_MWAIT_EXITING VMCS_CONTROL_BIT(MWAIT_EXITING)
33 #define CPU_BASED_RDPMC_EXITING VMCS_CONTROL_BIT(RDPMC_EXITING)
34 #define CPU_BASED_RDTSC_EXITING VMCS_CONTROL_BIT(RDTSC_EXITING)
35 #define CPU_BASED_CR3_LOAD_EXITING VMCS_CONTROL_BIT(CR3_LOAD_EXITING)
36 #define CPU_BASED_CR3_STORE_EXITING VMCS_CONTROL_BIT(CR3_STORE_EXITING)
37 #define CPU_BASED_ACTIVATE_TERTIARY_CONTROLS VMCS_CONTROL_BIT(TERTIARY_CONTROLS)
38 #define CPU_BASED_CR8_LOAD_EXITING VMCS_CONTROL_BIT(CR8_LOAD_EXITING)
39 #define CPU_BASED_CR8_STORE_EXITING VMCS_CONTROL_BIT(CR8_STORE_EXITING)
40 #define CPU_BASED_TPR_SHADOW VMCS_CONTROL_BIT(VIRTUAL_TPR)
41 #define CPU_BASED_NMI_WINDOW_EXITING VMCS_CONTROL_BIT(NMI_WINDOW_EXITING)
42 #define CPU_BASED_MOV_DR_EXITING VMCS_CONTROL_BIT(MOV_DR_EXITING)
43 #define CPU_BASED_UNCOND_IO_EXITING VMCS_CONTROL_BIT(UNCOND_IO_EXITING)
44 #define CPU_BASED_USE_IO_BITMAPS VMCS_CONTROL_BIT(USE_IO_BITMAPS)
45 #define CPU_BASED_MONITOR_TRAP_FLAG VMCS_CONTROL_BIT(MONITOR_TRAP_FLAG)
46 #define CPU_BASED_USE_MSR_BITMAPS VMCS_CONTROL_BIT(USE_MSR_BITMAPS)
47 #define CPU_BASED_MONITOR_EXITING VMCS_CONTROL_BIT(MONITOR_EXITING)
48 #define CPU_BASED_PAUSE_EXITING VMCS_CONTROL_BIT(PAUSE_EXITING)
49 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS VMCS_CONTROL_BIT(SEC_CONTROLS)
50
51 #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172
52
53 /*
54 * Definitions of Secondary Processor-Based VM-Execution Controls.
55 */
56 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES VMCS_CONTROL_BIT(VIRT_APIC_ACCESSES)
57 #define SECONDARY_EXEC_ENABLE_EPT VMCS_CONTROL_BIT(EPT)
58 #define SECONDARY_EXEC_DESC VMCS_CONTROL_BIT(DESC_EXITING)
59 #define SECONDARY_EXEC_ENABLE_RDTSCP VMCS_CONTROL_BIT(RDTSCP)
60 #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE VMCS_CONTROL_BIT(VIRTUAL_X2APIC)
61 #define SECONDARY_EXEC_ENABLE_VPID VMCS_CONTROL_BIT(VPID)
62 #define SECONDARY_EXEC_WBINVD_EXITING VMCS_CONTROL_BIT(WBINVD_EXITING)
63 #define SECONDARY_EXEC_UNRESTRICTED_GUEST VMCS_CONTROL_BIT(UNRESTRICTED_GUEST)
64 #define SECONDARY_EXEC_APIC_REGISTER_VIRT VMCS_CONTROL_BIT(APIC_REGISTER_VIRT)
65 #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY VMCS_CONTROL_BIT(VIRT_INTR_DELIVERY)
66 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING VMCS_CONTROL_BIT(PAUSE_LOOP_EXITING)
67 #define SECONDARY_EXEC_RDRAND_EXITING VMCS_CONTROL_BIT(RDRAND_EXITING)
68 #define SECONDARY_EXEC_ENABLE_INVPCID VMCS_CONTROL_BIT(INVPCID)
69 #define SECONDARY_EXEC_ENABLE_VMFUNC VMCS_CONTROL_BIT(VMFUNC)
70 #define SECONDARY_EXEC_SHADOW_VMCS VMCS_CONTROL_BIT(SHADOW_VMCS)
71 #define SECONDARY_EXEC_ENCLS_EXITING VMCS_CONTROL_BIT(ENCLS_EXITING)
72 #define SECONDARY_EXEC_RDSEED_EXITING VMCS_CONTROL_BIT(RDSEED_EXITING)
73 #define SECONDARY_EXEC_ENABLE_PML VMCS_CONTROL_BIT(PAGE_MOD_LOGGING)
74 #define SECONDARY_EXEC_EPT_VIOLATION_VE VMCS_CONTROL_BIT(EPT_VIOLATION_VE)
75 #define SECONDARY_EXEC_PT_CONCEAL_VMX VMCS_CONTROL_BIT(PT_CONCEAL_VMX)
76 #define SECONDARY_EXEC_ENABLE_XSAVES VMCS_CONTROL_BIT(XSAVES)
77 #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
78 #define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA)
79 #define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING)
80 #define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE)
81 #define SECONDARY_EXEC_BUS_LOCK_DETECTION VMCS_CONTROL_BIT(BUS_LOCK_DETECTION)
82 #define SECONDARY_EXEC_NOTIFY_VM_EXITING VMCS_CONTROL_BIT(NOTIFY_VM_EXITING)
83
84 /*
85 * Definitions of Tertiary Processor-Based VM-Execution Controls.
86 */
87 #define TERTIARY_EXEC_IPI_VIRT VMCS_CONTROL_BIT(IPI_VIRT)
88
89 #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING)
90 #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING)
91 #define PIN_BASED_VIRTUAL_NMIS VMCS_CONTROL_BIT(VIRTUAL_NMIS)
92 #define PIN_BASED_VMX_PREEMPTION_TIMER VMCS_CONTROL_BIT(PREEMPTION_TIMER)
93 #define PIN_BASED_POSTED_INTR VMCS_CONTROL_BIT(POSTED_INTR)
94
95 #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016
96
97 #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
98 #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
99 #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
100 #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
101 #define VM_EXIT_SAVE_IA32_PAT 0x00040000
102 #define VM_EXIT_LOAD_IA32_PAT 0x00080000
103 #define VM_EXIT_SAVE_IA32_EFER 0x00100000
104 #define VM_EXIT_LOAD_IA32_EFER 0x00200000
105 #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
106 #define VM_EXIT_CLEAR_BNDCFGS 0x00800000
107 #define VM_EXIT_PT_CONCEAL_PIP 0x01000000
108 #define VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
109
110 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff
111
112 #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
113 #define VM_ENTRY_IA32E_MODE 0x00000200
114 #define VM_ENTRY_SMM 0x00000400
115 #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
116 #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
117 #define VM_ENTRY_LOAD_IA32_PAT 0x00004000
118 #define VM_ENTRY_LOAD_IA32_EFER 0x00008000
119 #define VM_ENTRY_LOAD_BNDCFGS 0x00010000
120 #define VM_ENTRY_PT_CONCEAL_PIP 0x00020000
121 #define VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000
122
123 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff
124
125 /* VMFUNC functions */
126 #define VMFUNC_CONTROL_BIT(x) BIT((VMX_FEATURE_##x & 0x1f) - 28)
127
128 #define VMX_VMFUNC_EPTP_SWITCHING VMFUNC_CONTROL_BIT(EPTP_SWITCHING)
129 #define VMFUNC_EPTP_ENTRIES 512
130
131 #define VMX_BASIC_32BIT_PHYS_ADDR_ONLY BIT_ULL(48)
132 #define VMX_BASIC_DUAL_MONITOR_TREATMENT BIT_ULL(49)
133 #define VMX_BASIC_INOUT BIT_ULL(54)
134 #define VMX_BASIC_TRUE_CTLS BIT_ULL(55)
135
vmx_basic_vmcs_revision_id(u64 vmx_basic)136 static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic)
137 {
138 return vmx_basic & GENMASK_ULL(30, 0);
139 }
140
vmx_basic_vmcs_size(u64 vmx_basic)141 static inline u32 vmx_basic_vmcs_size(u64 vmx_basic)
142 {
143 return (vmx_basic & GENMASK_ULL(44, 32)) >> 32;
144 }
145
vmx_basic_vmcs_mem_type(u64 vmx_basic)146 static inline u32 vmx_basic_vmcs_mem_type(u64 vmx_basic)
147 {
148 return (vmx_basic & GENMASK_ULL(53, 50)) >> 50;
149 }
150
vmx_basic_encode_vmcs_info(u32 revision,u16 size,u8 memtype)151 static inline u64 vmx_basic_encode_vmcs_info(u32 revision, u16 size, u8 memtype)
152 {
153 return revision | ((u64)size << 32) | ((u64)memtype << 50);
154 }
155
156 #define VMX_MISC_SAVE_EFER_LMA BIT_ULL(5)
157 #define VMX_MISC_ACTIVITY_HLT BIT_ULL(6)
158 #define VMX_MISC_ACTIVITY_SHUTDOWN BIT_ULL(7)
159 #define VMX_MISC_ACTIVITY_WAIT_SIPI BIT_ULL(8)
160 #define VMX_MISC_INTEL_PT BIT_ULL(14)
161 #define VMX_MISC_RDMSR_IN_SMM BIT_ULL(15)
162 #define VMX_MISC_VMXOFF_BLOCK_SMI BIT_ULL(28)
163 #define VMX_MISC_VMWRITE_SHADOW_RO_FIELDS BIT_ULL(29)
164 #define VMX_MISC_ZERO_LEN_INS BIT_ULL(30)
165 #define VMX_MISC_MSR_LIST_MULTIPLIER 512
166
vmx_misc_preemption_timer_rate(u64 vmx_misc)167 static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc)
168 {
169 return vmx_misc & GENMASK_ULL(4, 0);
170 }
171
vmx_misc_cr3_count(u64 vmx_misc)172 static inline int vmx_misc_cr3_count(u64 vmx_misc)
173 {
174 return (vmx_misc & GENMASK_ULL(24, 16)) >> 16;
175 }
176
vmx_misc_max_msr(u64 vmx_misc)177 static inline int vmx_misc_max_msr(u64 vmx_misc)
178 {
179 return (vmx_misc & GENMASK_ULL(27, 25)) >> 25;
180 }
181
vmx_misc_mseg_revid(u64 vmx_misc)182 static inline int vmx_misc_mseg_revid(u64 vmx_misc)
183 {
184 return (vmx_misc & GENMASK_ULL(63, 32)) >> 32;
185 }
186
187 /* VMCS Encodings */
188 enum vmcs_field {
189 VIRTUAL_PROCESSOR_ID = 0x00000000,
190 POSTED_INTR_NV = 0x00000002,
191 LAST_PID_POINTER_INDEX = 0x00000008,
192 GUEST_ES_SELECTOR = 0x00000800,
193 GUEST_CS_SELECTOR = 0x00000802,
194 GUEST_SS_SELECTOR = 0x00000804,
195 GUEST_DS_SELECTOR = 0x00000806,
196 GUEST_FS_SELECTOR = 0x00000808,
197 GUEST_GS_SELECTOR = 0x0000080a,
198 GUEST_LDTR_SELECTOR = 0x0000080c,
199 GUEST_TR_SELECTOR = 0x0000080e,
200 GUEST_INTR_STATUS = 0x00000810,
201 GUEST_PML_INDEX = 0x00000812,
202 HOST_ES_SELECTOR = 0x00000c00,
203 HOST_CS_SELECTOR = 0x00000c02,
204 HOST_SS_SELECTOR = 0x00000c04,
205 HOST_DS_SELECTOR = 0x00000c06,
206 HOST_FS_SELECTOR = 0x00000c08,
207 HOST_GS_SELECTOR = 0x00000c0a,
208 HOST_TR_SELECTOR = 0x00000c0c,
209 IO_BITMAP_A = 0x00002000,
210 IO_BITMAP_A_HIGH = 0x00002001,
211 IO_BITMAP_B = 0x00002002,
212 IO_BITMAP_B_HIGH = 0x00002003,
213 MSR_BITMAP = 0x00002004,
214 MSR_BITMAP_HIGH = 0x00002005,
215 VM_EXIT_MSR_STORE_ADDR = 0x00002006,
216 VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007,
217 VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
218 VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009,
219 VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
220 VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b,
221 PML_ADDRESS = 0x0000200e,
222 PML_ADDRESS_HIGH = 0x0000200f,
223 TSC_OFFSET = 0x00002010,
224 TSC_OFFSET_HIGH = 0x00002011,
225 VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
226 VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013,
227 APIC_ACCESS_ADDR = 0x00002014,
228 APIC_ACCESS_ADDR_HIGH = 0x00002015,
229 POSTED_INTR_DESC_ADDR = 0x00002016,
230 POSTED_INTR_DESC_ADDR_HIGH = 0x00002017,
231 VM_FUNCTION_CONTROL = 0x00002018,
232 VM_FUNCTION_CONTROL_HIGH = 0x00002019,
233 EPT_POINTER = 0x0000201a,
234 EPT_POINTER_HIGH = 0x0000201b,
235 EOI_EXIT_BITMAP0 = 0x0000201c,
236 EOI_EXIT_BITMAP0_HIGH = 0x0000201d,
237 EOI_EXIT_BITMAP1 = 0x0000201e,
238 EOI_EXIT_BITMAP1_HIGH = 0x0000201f,
239 EOI_EXIT_BITMAP2 = 0x00002020,
240 EOI_EXIT_BITMAP2_HIGH = 0x00002021,
241 EOI_EXIT_BITMAP3 = 0x00002022,
242 EOI_EXIT_BITMAP3_HIGH = 0x00002023,
243 EPTP_LIST_ADDRESS = 0x00002024,
244 EPTP_LIST_ADDRESS_HIGH = 0x00002025,
245 VMREAD_BITMAP = 0x00002026,
246 VMREAD_BITMAP_HIGH = 0x00002027,
247 VMWRITE_BITMAP = 0x00002028,
248 VMWRITE_BITMAP_HIGH = 0x00002029,
249 VE_INFORMATION_ADDRESS = 0x0000202A,
250 VE_INFORMATION_ADDRESS_HIGH = 0x0000202B,
251 XSS_EXIT_BITMAP = 0x0000202C,
252 XSS_EXIT_BITMAP_HIGH = 0x0000202D,
253 ENCLS_EXITING_BITMAP = 0x0000202E,
254 ENCLS_EXITING_BITMAP_HIGH = 0x0000202F,
255 TSC_MULTIPLIER = 0x00002032,
256 TSC_MULTIPLIER_HIGH = 0x00002033,
257 TERTIARY_VM_EXEC_CONTROL = 0x00002034,
258 TERTIARY_VM_EXEC_CONTROL_HIGH = 0x00002035,
259 PID_POINTER_TABLE = 0x00002042,
260 PID_POINTER_TABLE_HIGH = 0x00002043,
261 GUEST_PHYSICAL_ADDRESS = 0x00002400,
262 GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401,
263 VMCS_LINK_POINTER = 0x00002800,
264 VMCS_LINK_POINTER_HIGH = 0x00002801,
265 GUEST_IA32_DEBUGCTL = 0x00002802,
266 GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
267 GUEST_IA32_PAT = 0x00002804,
268 GUEST_IA32_PAT_HIGH = 0x00002805,
269 GUEST_IA32_EFER = 0x00002806,
270 GUEST_IA32_EFER_HIGH = 0x00002807,
271 GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
272 GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809,
273 GUEST_PDPTR0 = 0x0000280a,
274 GUEST_PDPTR0_HIGH = 0x0000280b,
275 GUEST_PDPTR1 = 0x0000280c,
276 GUEST_PDPTR1_HIGH = 0x0000280d,
277 GUEST_PDPTR2 = 0x0000280e,
278 GUEST_PDPTR2_HIGH = 0x0000280f,
279 GUEST_PDPTR3 = 0x00002810,
280 GUEST_PDPTR3_HIGH = 0x00002811,
281 GUEST_BNDCFGS = 0x00002812,
282 GUEST_BNDCFGS_HIGH = 0x00002813,
283 GUEST_IA32_RTIT_CTL = 0x00002814,
284 GUEST_IA32_RTIT_CTL_HIGH = 0x00002815,
285 HOST_IA32_PAT = 0x00002c00,
286 HOST_IA32_PAT_HIGH = 0x00002c01,
287 HOST_IA32_EFER = 0x00002c02,
288 HOST_IA32_EFER_HIGH = 0x00002c03,
289 HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
290 HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05,
291 PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
292 CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
293 EXCEPTION_BITMAP = 0x00004004,
294 PAGE_FAULT_ERROR_CODE_MASK = 0x00004006,
295 PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008,
296 CR3_TARGET_COUNT = 0x0000400a,
297 VM_EXIT_CONTROLS = 0x0000400c,
298 VM_EXIT_MSR_STORE_COUNT = 0x0000400e,
299 VM_EXIT_MSR_LOAD_COUNT = 0x00004010,
300 VM_ENTRY_CONTROLS = 0x00004012,
301 VM_ENTRY_MSR_LOAD_COUNT = 0x00004014,
302 VM_ENTRY_INTR_INFO_FIELD = 0x00004016,
303 VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018,
304 VM_ENTRY_INSTRUCTION_LEN = 0x0000401a,
305 TPR_THRESHOLD = 0x0000401c,
306 SECONDARY_VM_EXEC_CONTROL = 0x0000401e,
307 PLE_GAP = 0x00004020,
308 PLE_WINDOW = 0x00004022,
309 NOTIFY_WINDOW = 0x00004024,
310 VM_INSTRUCTION_ERROR = 0x00004400,
311 VM_EXIT_REASON = 0x00004402,
312 VM_EXIT_INTR_INFO = 0x00004404,
313 VM_EXIT_INTR_ERROR_CODE = 0x00004406,
314 IDT_VECTORING_INFO_FIELD = 0x00004408,
315 IDT_VECTORING_ERROR_CODE = 0x0000440a,
316 VM_EXIT_INSTRUCTION_LEN = 0x0000440c,
317 VMX_INSTRUCTION_INFO = 0x0000440e,
318 GUEST_ES_LIMIT = 0x00004800,
319 GUEST_CS_LIMIT = 0x00004802,
320 GUEST_SS_LIMIT = 0x00004804,
321 GUEST_DS_LIMIT = 0x00004806,
322 GUEST_FS_LIMIT = 0x00004808,
323 GUEST_GS_LIMIT = 0x0000480a,
324 GUEST_LDTR_LIMIT = 0x0000480c,
325 GUEST_TR_LIMIT = 0x0000480e,
326 GUEST_GDTR_LIMIT = 0x00004810,
327 GUEST_IDTR_LIMIT = 0x00004812,
328 GUEST_ES_AR_BYTES = 0x00004814,
329 GUEST_CS_AR_BYTES = 0x00004816,
330 GUEST_SS_AR_BYTES = 0x00004818,
331 GUEST_DS_AR_BYTES = 0x0000481a,
332 GUEST_FS_AR_BYTES = 0x0000481c,
333 GUEST_GS_AR_BYTES = 0x0000481e,
334 GUEST_LDTR_AR_BYTES = 0x00004820,
335 GUEST_TR_AR_BYTES = 0x00004822,
336 GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
337 GUEST_ACTIVITY_STATE = 0x00004826,
338 GUEST_SYSENTER_CS = 0x0000482A,
339 VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
340 HOST_IA32_SYSENTER_CS = 0x00004c00,
341 CR0_GUEST_HOST_MASK = 0x00006000,
342 CR4_GUEST_HOST_MASK = 0x00006002,
343 CR0_READ_SHADOW = 0x00006004,
344 CR4_READ_SHADOW = 0x00006006,
345 CR3_TARGET_VALUE0 = 0x00006008,
346 CR3_TARGET_VALUE1 = 0x0000600a,
347 CR3_TARGET_VALUE2 = 0x0000600c,
348 CR3_TARGET_VALUE3 = 0x0000600e,
349 EXIT_QUALIFICATION = 0x00006400,
350 GUEST_LINEAR_ADDRESS = 0x0000640a,
351 GUEST_CR0 = 0x00006800,
352 GUEST_CR3 = 0x00006802,
353 GUEST_CR4 = 0x00006804,
354 GUEST_ES_BASE = 0x00006806,
355 GUEST_CS_BASE = 0x00006808,
356 GUEST_SS_BASE = 0x0000680a,
357 GUEST_DS_BASE = 0x0000680c,
358 GUEST_FS_BASE = 0x0000680e,
359 GUEST_GS_BASE = 0x00006810,
360 GUEST_LDTR_BASE = 0x00006812,
361 GUEST_TR_BASE = 0x00006814,
362 GUEST_GDTR_BASE = 0x00006816,
363 GUEST_IDTR_BASE = 0x00006818,
364 GUEST_DR7 = 0x0000681a,
365 GUEST_RSP = 0x0000681c,
366 GUEST_RIP = 0x0000681e,
367 GUEST_RFLAGS = 0x00006820,
368 GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822,
369 GUEST_SYSENTER_ESP = 0x00006824,
370 GUEST_SYSENTER_EIP = 0x00006826,
371 HOST_CR0 = 0x00006c00,
372 HOST_CR3 = 0x00006c02,
373 HOST_CR4 = 0x00006c04,
374 HOST_FS_BASE = 0x00006c06,
375 HOST_GS_BASE = 0x00006c08,
376 HOST_TR_BASE = 0x00006c0a,
377 HOST_GDTR_BASE = 0x00006c0c,
378 HOST_IDTR_BASE = 0x00006c0e,
379 HOST_IA32_SYSENTER_ESP = 0x00006c10,
380 HOST_IA32_SYSENTER_EIP = 0x00006c12,
381 HOST_RSP = 0x00006c14,
382 HOST_RIP = 0x00006c16,
383 };
384
385 /*
386 * Interruption-information format
387 */
388 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
389 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
390 #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
391 #define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */
392 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
393 #define INTR_INFO_RESVD_BITS_MASK 0x7ffff000
394
395 #define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK
396 #define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK
397 #define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK
398 #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
399
400 #define INTR_TYPE_EXT_INTR (EVENT_TYPE_EXTINT << 8) /* external interrupt */
401 #define INTR_TYPE_RESERVED (EVENT_TYPE_RESERVED << 8) /* reserved */
402 #define INTR_TYPE_NMI_INTR (EVENT_TYPE_NMI << 8) /* NMI */
403 #define INTR_TYPE_HARD_EXCEPTION (EVENT_TYPE_HWEXC << 8) /* processor exception */
404 #define INTR_TYPE_SOFT_INTR (EVENT_TYPE_SWINT << 8) /* software interrupt */
405 #define INTR_TYPE_PRIV_SW_EXCEPTION (EVENT_TYPE_PRIV_SWEXC << 8) /* ICE breakpoint */
406 #define INTR_TYPE_SOFT_EXCEPTION (EVENT_TYPE_SWEXC << 8) /* software exception */
407 #define INTR_TYPE_OTHER_EVENT (EVENT_TYPE_OTHER << 8) /* other event */
408
409 /* GUEST_INTERRUPTIBILITY_INFO flags. */
410 #define GUEST_INTR_STATE_STI 0x00000001
411 #define GUEST_INTR_STATE_MOV_SS 0x00000002
412 #define GUEST_INTR_STATE_SMI 0x00000004
413 #define GUEST_INTR_STATE_NMI 0x00000008
414 #define GUEST_INTR_STATE_ENCLAVE_INTR 0x00000010
415
416 /* GUEST_ACTIVITY_STATE flags */
417 #define GUEST_ACTIVITY_ACTIVE 0
418 #define GUEST_ACTIVITY_HLT 1
419 #define GUEST_ACTIVITY_SHUTDOWN 2
420 #define GUEST_ACTIVITY_WAIT_SIPI 3
421
422 /*
423 * Exit Qualifications for MOV for Control Register Access
424 */
425 #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control reg.*/
426 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
427 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose reg. */
428 #define LMSW_SOURCE_DATA_SHIFT 16
429 #define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */
430 #define REG_EAX (0 << 8)
431 #define REG_ECX (1 << 8)
432 #define REG_EDX (2 << 8)
433 #define REG_EBX (3 << 8)
434 #define REG_ESP (4 << 8)
435 #define REG_EBP (5 << 8)
436 #define REG_ESI (6 << 8)
437 #define REG_EDI (7 << 8)
438 #define REG_R8 (8 << 8)
439 #define REG_R9 (9 << 8)
440 #define REG_R10 (10 << 8)
441 #define REG_R11 (11 << 8)
442 #define REG_R12 (12 << 8)
443 #define REG_R13 (13 << 8)
444 #define REG_R14 (14 << 8)
445 #define REG_R15 (15 << 8)
446
447 /*
448 * Exit Qualifications for MOV for Debug Register Access
449 */
450 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug reg. */
451 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
452 #define TYPE_MOV_TO_DR (0 << 4)
453 #define TYPE_MOV_FROM_DR (1 << 4)
454 #define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */
455
456
457 /*
458 * Exit Qualifications for APIC-Access
459 */
460 #define APIC_ACCESS_OFFSET 0xfff /* 11:0, offset within the APIC page */
461 #define APIC_ACCESS_TYPE 0xf000 /* 15:12, access type */
462 #define TYPE_LINEAR_APIC_INST_READ (0 << 12)
463 #define TYPE_LINEAR_APIC_INST_WRITE (1 << 12)
464 #define TYPE_LINEAR_APIC_INST_FETCH (2 << 12)
465 #define TYPE_LINEAR_APIC_EVENT (3 << 12)
466 #define TYPE_PHYSICAL_APIC_EVENT (10 << 12)
467 #define TYPE_PHYSICAL_APIC_INST (15 << 12)
468
469 /* segment AR in VMCS -- these are different from what LAR reports */
470 #define VMX_SEGMENT_AR_L_MASK (1 << 13)
471
472 #define VMX_AR_TYPE_ACCESSES_MASK 1
473 #define VMX_AR_TYPE_READABLE_MASK (1 << 1)
474 #define VMX_AR_TYPE_WRITEABLE_MASK (1 << 2)
475 #define VMX_AR_TYPE_CODE_MASK (1 << 3)
476 #define VMX_AR_TYPE_MASK 0x0f
477 #define VMX_AR_TYPE_BUSY_64_TSS 11
478 #define VMX_AR_TYPE_BUSY_32_TSS 11
479 #define VMX_AR_TYPE_BUSY_16_TSS 3
480 #define VMX_AR_TYPE_LDT 2
481
482 #define VMX_AR_UNUSABLE_MASK (1 << 16)
483 #define VMX_AR_S_MASK (1 << 4)
484 #define VMX_AR_P_MASK (1 << 7)
485 #define VMX_AR_L_MASK (1 << 13)
486 #define VMX_AR_DB_MASK (1 << 14)
487 #define VMX_AR_G_MASK (1 << 15)
488 #define VMX_AR_DPL_SHIFT 5
489 #define VMX_AR_DPL(ar) (((ar) >> VMX_AR_DPL_SHIFT) & 3)
490
491 #define VMX_AR_RESERVD_MASK 0xfffe0f00
492
493 #define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0)
494 #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1)
495 #define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2)
496
497 #define VMX_NR_VPIDS (1 << 16)
498 #define VMX_VPID_EXTENT_INDIVIDUAL_ADDR 0
499 #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
500 #define VMX_VPID_EXTENT_ALL_CONTEXT 2
501 #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL 3
502
503 #define VMX_EPT_EXTENT_CONTEXT 1
504 #define VMX_EPT_EXTENT_GLOBAL 2
505 #define VMX_EPT_EXTENT_SHIFT 24
506
507 #define VMX_EPT_EXECUTE_ONLY_BIT (1ull)
508 #define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6)
509 #define VMX_EPT_PAGE_WALK_5_BIT (1ull << 7)
510 #define VMX_EPTP_UC_BIT (1ull << 8)
511 #define VMX_EPTP_WB_BIT (1ull << 14)
512 #define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
513 #define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
514 #define VMX_EPT_INVEPT_BIT (1ull << 20)
515 #define VMX_EPT_AD_BIT (1ull << 21)
516 #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
517 #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
518
519 #define VMX_VPID_INVVPID_BIT (1ull << 0) /* (32 - 32) */
520 #define VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT (1ull << 8) /* (40 - 32) */
521 #define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */
522 #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */
523 #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */
524
525 #define VMX_EPT_MT_EPTE_SHIFT 3
526 #define VMX_EPTP_PWL_MASK 0x38ull
527 #define VMX_EPTP_PWL_4 0x18ull
528 #define VMX_EPTP_PWL_5 0x20ull
529 #define VMX_EPTP_AD_ENABLE_BIT (1ull << 6)
530 /* The EPTP memtype is encoded in bits 2:0, i.e. doesn't need to be shifted. */
531 #define VMX_EPTP_MT_MASK 0x7ull
532 #define VMX_EPTP_MT_WB X86_MEMTYPE_WB
533 #define VMX_EPTP_MT_UC X86_MEMTYPE_UC
534 #define VMX_EPT_READABLE_MASK 0x1ull
535 #define VMX_EPT_WRITABLE_MASK 0x2ull
536 #define VMX_EPT_EXECUTABLE_MASK 0x4ull
537 #define VMX_EPT_IPAT_BIT (1ull << 6)
538 #define VMX_EPT_ACCESS_BIT (1ull << 8)
539 #define VMX_EPT_DIRTY_BIT (1ull << 9)
540 #define VMX_EPT_SUPPRESS_VE_BIT (1ull << 63)
541 #define VMX_EPT_RWX_MASK (VMX_EPT_READABLE_MASK | \
542 VMX_EPT_WRITABLE_MASK | \
543 VMX_EPT_EXECUTABLE_MASK)
544 #define VMX_EPT_MT_MASK (7ull << VMX_EPT_MT_EPTE_SHIFT)
545
vmx_eptp_page_walk_level(u64 eptp)546 static inline u8 vmx_eptp_page_walk_level(u64 eptp)
547 {
548 u64 encoded_level = eptp & VMX_EPTP_PWL_MASK;
549
550 if (encoded_level == VMX_EPTP_PWL_5)
551 return 5;
552
553 /* @eptp must be pre-validated by the caller. */
554 WARN_ON_ONCE(encoded_level != VMX_EPTP_PWL_4);
555 return 4;
556 }
557
558 /* The mask to use to trigger an EPT Misconfiguration in order to track MMIO */
559 #define VMX_EPT_MISCONFIG_WX_VALUE (VMX_EPT_WRITABLE_MASK | \
560 VMX_EPT_EXECUTABLE_MASK)
561
562 #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
563
564 struct vmx_msr_entry {
565 u32 index;
566 u32 reserved;
567 u64 value;
568 } __aligned(16);
569
570 /*
571 * Exit Qualifications for entry failure during or after loading guest state
572 */
573 enum vm_entry_failure_code {
574 ENTRY_FAIL_DEFAULT = 0,
575 ENTRY_FAIL_PDPTE = 2,
576 ENTRY_FAIL_NMI = 3,
577 ENTRY_FAIL_VMCS_LINK_PTR = 4,
578 };
579
580 /*
581 * Exit Qualifications for EPT Violations
582 */
583 #define EPT_VIOLATION_ACC_READ_BIT 0
584 #define EPT_VIOLATION_ACC_WRITE_BIT 1
585 #define EPT_VIOLATION_ACC_INSTR_BIT 2
586 #define EPT_VIOLATION_RWX_SHIFT 3
587 #define EPT_VIOLATION_GVA_IS_VALID_BIT 7
588 #define EPT_VIOLATION_GVA_TRANSLATED_BIT 8
589 #define EPT_VIOLATION_ACC_READ (1 << EPT_VIOLATION_ACC_READ_BIT)
590 #define EPT_VIOLATION_ACC_WRITE (1 << EPT_VIOLATION_ACC_WRITE_BIT)
591 #define EPT_VIOLATION_ACC_INSTR (1 << EPT_VIOLATION_ACC_INSTR_BIT)
592 #define EPT_VIOLATION_RWX_MASK (VMX_EPT_RWX_MASK << EPT_VIOLATION_RWX_SHIFT)
593 #define EPT_VIOLATION_GVA_IS_VALID (1 << EPT_VIOLATION_GVA_IS_VALID_BIT)
594 #define EPT_VIOLATION_GVA_TRANSLATED (1 << EPT_VIOLATION_GVA_TRANSLATED_BIT)
595
596 /*
597 * Exit Qualifications for NOTIFY VM EXIT
598 */
599 #define NOTIFY_VM_CONTEXT_INVALID BIT(0)
600
601 /*
602 * VM-instruction error numbers
603 */
604 enum vm_instruction_error_number {
605 VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1,
606 VMXERR_VMCLEAR_INVALID_ADDRESS = 2,
607 VMXERR_VMCLEAR_VMXON_POINTER = 3,
608 VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4,
609 VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5,
610 VMXERR_VMRESUME_AFTER_VMXOFF = 6,
611 VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7,
612 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8,
613 VMXERR_VMPTRLD_INVALID_ADDRESS = 9,
614 VMXERR_VMPTRLD_VMXON_POINTER = 10,
615 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11,
616 VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12,
617 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13,
618 VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15,
619 VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16,
620 VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17,
621 VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18,
622 VMXERR_VMCALL_NONCLEAR_VMCS = 19,
623 VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20,
624 VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22,
625 VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23,
626 VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24,
627 VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25,
628 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26,
629 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
630 };
631
632 /*
633 * VM-instruction errors that can be encountered on VM-Enter, used to trace
634 * nested VM-Enter failures reported by hardware. Errors unique to VM-Enter
635 * from a SMI Transfer Monitor are not included as things have gone seriously
636 * sideways if we get one of those...
637 */
638 #define VMX_VMENTER_INSTRUCTION_ERRORS \
639 { VMXERR_VMLAUNCH_NONCLEAR_VMCS, "VMLAUNCH_NONCLEAR_VMCS" }, \
640 { VMXERR_VMRESUME_NONLAUNCHED_VMCS, "VMRESUME_NONLAUNCHED_VMCS" }, \
641 { VMXERR_VMRESUME_AFTER_VMXOFF, "VMRESUME_AFTER_VMXOFF" }, \
642 { VMXERR_ENTRY_INVALID_CONTROL_FIELD, "VMENTRY_INVALID_CONTROL_FIELD" }, \
643 { VMXERR_ENTRY_INVALID_HOST_STATE_FIELD, "VMENTRY_INVALID_HOST_STATE_FIELD" }, \
644 { VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS, "VMENTRY_EVENTS_BLOCKED_BY_MOV_SS" }
645
646 enum vmx_l1d_flush_state {
647 VMENTER_L1D_FLUSH_AUTO,
648 VMENTER_L1D_FLUSH_NEVER,
649 VMENTER_L1D_FLUSH_COND,
650 VMENTER_L1D_FLUSH_ALWAYS,
651 VMENTER_L1D_FLUSH_EPT_DISABLED,
652 VMENTER_L1D_FLUSH_NOT_REQUIRED,
653 };
654
655 extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
656
657 struct vmx_ve_information {
658 u32 exit_reason;
659 u32 delivery;
660 u64 exit_qualification;
661 u64 guest_linear_address;
662 u64 guest_physical_address;
663 u16 eptp_index;
664 };
665
666 #endif
667