1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * vmx.h: VMX Architecture related definitions 4 * Copyright (c) 2004, Intel Corporation. 5 * 6 * A few random additions are: 7 * Copyright (C) 2006 Qumranet 8 * Avi Kivity <avi@qumranet.com> 9 * Yaniv Kamay <yaniv@qumranet.com> 10 */ 11 #ifndef VMX_H 12 #define VMX_H 13 14 15 #include <linux/bitops.h> 16 #include <linux/bug.h> 17 #include <linux/types.h> 18 19 #include <uapi/asm/vmx.h> 20 #include <asm/trapnr.h> 21 #include <asm/vmxfeatures.h> 22 23 struct vmcs_hdr { 24 u32 revision_id:31; 25 u32 shadow_vmcs:1; 26 }; 27 28 struct vmcs { 29 struct vmcs_hdr hdr; 30 u32 abort; 31 char data[]; 32 }; 33 34 #define VMCS_CONTROL_BIT(x) BIT(VMX_FEATURE_##x & 0x1f) 35 36 /* 37 * Definitions of Primary Processor-Based VM-Execution Controls. 38 */ 39 #define CPU_BASED_INTR_WINDOW_EXITING VMCS_CONTROL_BIT(INTR_WINDOW_EXITING) 40 #define CPU_BASED_USE_TSC_OFFSETTING VMCS_CONTROL_BIT(USE_TSC_OFFSETTING) 41 #define CPU_BASED_HLT_EXITING VMCS_CONTROL_BIT(HLT_EXITING) 42 #define CPU_BASED_INVLPG_EXITING VMCS_CONTROL_BIT(INVLPG_EXITING) 43 #define CPU_BASED_MWAIT_EXITING VMCS_CONTROL_BIT(MWAIT_EXITING) 44 #define CPU_BASED_RDPMC_EXITING VMCS_CONTROL_BIT(RDPMC_EXITING) 45 #define CPU_BASED_RDTSC_EXITING VMCS_CONTROL_BIT(RDTSC_EXITING) 46 #define CPU_BASED_CR3_LOAD_EXITING VMCS_CONTROL_BIT(CR3_LOAD_EXITING) 47 #define CPU_BASED_CR3_STORE_EXITING VMCS_CONTROL_BIT(CR3_STORE_EXITING) 48 #define CPU_BASED_ACTIVATE_TERTIARY_CONTROLS VMCS_CONTROL_BIT(TERTIARY_CONTROLS) 49 #define CPU_BASED_CR8_LOAD_EXITING VMCS_CONTROL_BIT(CR8_LOAD_EXITING) 50 #define CPU_BASED_CR8_STORE_EXITING VMCS_CONTROL_BIT(CR8_STORE_EXITING) 51 #define CPU_BASED_TPR_SHADOW VMCS_CONTROL_BIT(VIRTUAL_TPR) 52 #define CPU_BASED_NMI_WINDOW_EXITING VMCS_CONTROL_BIT(NMI_WINDOW_EXITING) 53 #define CPU_BASED_MOV_DR_EXITING VMCS_CONTROL_BIT(MOV_DR_EXITING) 54 #define CPU_BASED_UNCOND_IO_EXITING VMCS_CONTROL_BIT(UNCOND_IO_EXITING) 55 #define CPU_BASED_USE_IO_BITMAPS VMCS_CONTROL_BIT(USE_IO_BITMAPS) 56 #define CPU_BASED_MONITOR_TRAP_FLAG VMCS_CONTROL_BIT(MONITOR_TRAP_FLAG) 57 #define CPU_BASED_USE_MSR_BITMAPS VMCS_CONTROL_BIT(USE_MSR_BITMAPS) 58 #define CPU_BASED_MONITOR_EXITING VMCS_CONTROL_BIT(MONITOR_EXITING) 59 #define CPU_BASED_PAUSE_EXITING VMCS_CONTROL_BIT(PAUSE_EXITING) 60 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS VMCS_CONTROL_BIT(SEC_CONTROLS) 61 62 #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172 63 64 /* 65 * Definitions of Secondary Processor-Based VM-Execution Controls. 66 */ 67 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES VMCS_CONTROL_BIT(VIRT_APIC_ACCESSES) 68 #define SECONDARY_EXEC_ENABLE_EPT VMCS_CONTROL_BIT(EPT) 69 #define SECONDARY_EXEC_DESC VMCS_CONTROL_BIT(DESC_EXITING) 70 #define SECONDARY_EXEC_ENABLE_RDTSCP VMCS_CONTROL_BIT(RDTSCP) 71 #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE VMCS_CONTROL_BIT(VIRTUAL_X2APIC) 72 #define SECONDARY_EXEC_ENABLE_VPID VMCS_CONTROL_BIT(VPID) 73 #define SECONDARY_EXEC_WBINVD_EXITING VMCS_CONTROL_BIT(WBINVD_EXITING) 74 #define SECONDARY_EXEC_UNRESTRICTED_GUEST VMCS_CONTROL_BIT(UNRESTRICTED_GUEST) 75 #define SECONDARY_EXEC_APIC_REGISTER_VIRT VMCS_CONTROL_BIT(APIC_REGISTER_VIRT) 76 #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY VMCS_CONTROL_BIT(VIRT_INTR_DELIVERY) 77 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING VMCS_CONTROL_BIT(PAUSE_LOOP_EXITING) 78 #define SECONDARY_EXEC_RDRAND_EXITING VMCS_CONTROL_BIT(RDRAND_EXITING) 79 #define SECONDARY_EXEC_ENABLE_INVPCID VMCS_CONTROL_BIT(INVPCID) 80 #define SECONDARY_EXEC_ENABLE_VMFUNC VMCS_CONTROL_BIT(VMFUNC) 81 #define SECONDARY_EXEC_SHADOW_VMCS VMCS_CONTROL_BIT(SHADOW_VMCS) 82 #define SECONDARY_EXEC_ENCLS_EXITING VMCS_CONTROL_BIT(ENCLS_EXITING) 83 #define SECONDARY_EXEC_RDSEED_EXITING VMCS_CONTROL_BIT(RDSEED_EXITING) 84 #define SECONDARY_EXEC_ENABLE_PML VMCS_CONTROL_BIT(PAGE_MOD_LOGGING) 85 #define SECONDARY_EXEC_EPT_VIOLATION_VE VMCS_CONTROL_BIT(EPT_VIOLATION_VE) 86 #define SECONDARY_EXEC_PT_CONCEAL_VMX VMCS_CONTROL_BIT(PT_CONCEAL_VMX) 87 #define SECONDARY_EXEC_ENABLE_XSAVES VMCS_CONTROL_BIT(XSAVES) 88 #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC) 89 #define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA) 90 #define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING) 91 #define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE) 92 #define SECONDARY_EXEC_BUS_LOCK_DETECTION VMCS_CONTROL_BIT(BUS_LOCK_DETECTION) 93 #define SECONDARY_EXEC_NOTIFY_VM_EXITING VMCS_CONTROL_BIT(NOTIFY_VM_EXITING) 94 95 /* 96 * Definitions of Tertiary Processor-Based VM-Execution Controls. 97 */ 98 #define TERTIARY_EXEC_IPI_VIRT VMCS_CONTROL_BIT(IPI_VIRT) 99 100 #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) 101 #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) 102 #define PIN_BASED_VIRTUAL_NMIS VMCS_CONTROL_BIT(VIRTUAL_NMIS) 103 #define PIN_BASED_VMX_PREEMPTION_TIMER VMCS_CONTROL_BIT(PREEMPTION_TIMER) 104 #define PIN_BASED_POSTED_INTR VMCS_CONTROL_BIT(POSTED_INTR) 105 106 #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 107 108 #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 109 #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 110 #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 111 #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 112 #define VM_EXIT_SAVE_IA32_PAT 0x00040000 113 #define VM_EXIT_LOAD_IA32_PAT 0x00080000 114 #define VM_EXIT_SAVE_IA32_EFER 0x00100000 115 #define VM_EXIT_LOAD_IA32_EFER 0x00200000 116 #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 117 #define VM_EXIT_CLEAR_BNDCFGS 0x00800000 118 #define VM_EXIT_PT_CONCEAL_PIP 0x01000000 119 #define VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 120 #define VM_EXIT_LOAD_CET_STATE 0x10000000 121 #define VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL 0x40000000 122 123 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff 124 125 #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 126 #define VM_ENTRY_IA32E_MODE 0x00000200 127 #define VM_ENTRY_SMM 0x00000400 128 #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 129 #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 130 #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 131 #define VM_ENTRY_LOAD_IA32_EFER 0x00008000 132 #define VM_ENTRY_LOAD_BNDCFGS 0x00010000 133 #define VM_ENTRY_PT_CONCEAL_PIP 0x00020000 134 #define VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000 135 #define VM_ENTRY_LOAD_CET_STATE 0x00100000 136 137 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff 138 139 /* VMFUNC functions */ 140 #define VMFUNC_CONTROL_BIT(x) BIT((VMX_FEATURE_##x & 0x1f) - 28) 141 142 #define VMX_VMFUNC_EPTP_SWITCHING VMFUNC_CONTROL_BIT(EPTP_SWITCHING) 143 #define VMFUNC_EPTP_ENTRIES 512 144 145 #define VMX_BASIC_32BIT_PHYS_ADDR_ONLY BIT_ULL(48) 146 #define VMX_BASIC_DUAL_MONITOR_TREATMENT BIT_ULL(49) 147 #define VMX_BASIC_INOUT BIT_ULL(54) 148 #define VMX_BASIC_TRUE_CTLS BIT_ULL(55) 149 #define VMX_BASIC_NO_HW_ERROR_CODE_CC BIT_ULL(56) 150 151 static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic) 152 { 153 return vmx_basic & GENMASK_ULL(30, 0); 154 } 155 156 static inline u32 vmx_basic_vmcs_size(u64 vmx_basic) 157 { 158 return (vmx_basic & GENMASK_ULL(44, 32)) >> 32; 159 } 160 161 static inline u32 vmx_basic_vmcs_mem_type(u64 vmx_basic) 162 { 163 return (vmx_basic & GENMASK_ULL(53, 50)) >> 50; 164 } 165 166 static inline u64 vmx_basic_encode_vmcs_info(u32 revision, u16 size, u8 memtype) 167 { 168 return revision | ((u64)size << 32) | ((u64)memtype << 50); 169 } 170 171 #define VMX_MISC_SAVE_EFER_LMA BIT_ULL(5) 172 #define VMX_MISC_ACTIVITY_HLT BIT_ULL(6) 173 #define VMX_MISC_ACTIVITY_SHUTDOWN BIT_ULL(7) 174 #define VMX_MISC_ACTIVITY_WAIT_SIPI BIT_ULL(8) 175 #define VMX_MISC_INTEL_PT BIT_ULL(14) 176 #define VMX_MISC_RDMSR_IN_SMM BIT_ULL(15) 177 #define VMX_MISC_VMXOFF_BLOCK_SMI BIT_ULL(28) 178 #define VMX_MISC_VMWRITE_SHADOW_RO_FIELDS BIT_ULL(29) 179 #define VMX_MISC_ZERO_LEN_INS BIT_ULL(30) 180 #define VMX_MISC_MSR_LIST_MULTIPLIER 512 181 182 static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc) 183 { 184 return vmx_misc & GENMASK_ULL(4, 0); 185 } 186 187 static inline int vmx_misc_cr3_count(u64 vmx_misc) 188 { 189 return (vmx_misc & GENMASK_ULL(24, 16)) >> 16; 190 } 191 192 static inline int vmx_misc_max_msr(u64 vmx_misc) 193 { 194 return (vmx_misc & GENMASK_ULL(27, 25)) >> 25; 195 } 196 197 static inline int vmx_misc_mseg_revid(u64 vmx_misc) 198 { 199 return (vmx_misc & GENMASK_ULL(63, 32)) >> 32; 200 } 201 202 /* VMCS Encodings */ 203 enum vmcs_field { 204 VIRTUAL_PROCESSOR_ID = 0x00000000, 205 POSTED_INTR_NV = 0x00000002, 206 LAST_PID_POINTER_INDEX = 0x00000008, 207 GUEST_ES_SELECTOR = 0x00000800, 208 GUEST_CS_SELECTOR = 0x00000802, 209 GUEST_SS_SELECTOR = 0x00000804, 210 GUEST_DS_SELECTOR = 0x00000806, 211 GUEST_FS_SELECTOR = 0x00000808, 212 GUEST_GS_SELECTOR = 0x0000080a, 213 GUEST_LDTR_SELECTOR = 0x0000080c, 214 GUEST_TR_SELECTOR = 0x0000080e, 215 GUEST_INTR_STATUS = 0x00000810, 216 GUEST_PML_INDEX = 0x00000812, 217 HOST_ES_SELECTOR = 0x00000c00, 218 HOST_CS_SELECTOR = 0x00000c02, 219 HOST_SS_SELECTOR = 0x00000c04, 220 HOST_DS_SELECTOR = 0x00000c06, 221 HOST_FS_SELECTOR = 0x00000c08, 222 HOST_GS_SELECTOR = 0x00000c0a, 223 HOST_TR_SELECTOR = 0x00000c0c, 224 IO_BITMAP_A = 0x00002000, 225 IO_BITMAP_A_HIGH = 0x00002001, 226 IO_BITMAP_B = 0x00002002, 227 IO_BITMAP_B_HIGH = 0x00002003, 228 MSR_BITMAP = 0x00002004, 229 MSR_BITMAP_HIGH = 0x00002005, 230 VM_EXIT_MSR_STORE_ADDR = 0x00002006, 231 VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, 232 VM_EXIT_MSR_LOAD_ADDR = 0x00002008, 233 VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, 234 VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, 235 VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, 236 PML_ADDRESS = 0x0000200e, 237 PML_ADDRESS_HIGH = 0x0000200f, 238 TSC_OFFSET = 0x00002010, 239 TSC_OFFSET_HIGH = 0x00002011, 240 VIRTUAL_APIC_PAGE_ADDR = 0x00002012, 241 VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, 242 APIC_ACCESS_ADDR = 0x00002014, 243 APIC_ACCESS_ADDR_HIGH = 0x00002015, 244 POSTED_INTR_DESC_ADDR = 0x00002016, 245 POSTED_INTR_DESC_ADDR_HIGH = 0x00002017, 246 VM_FUNCTION_CONTROL = 0x00002018, 247 VM_FUNCTION_CONTROL_HIGH = 0x00002019, 248 EPT_POINTER = 0x0000201a, 249 EPT_POINTER_HIGH = 0x0000201b, 250 EOI_EXIT_BITMAP0 = 0x0000201c, 251 EOI_EXIT_BITMAP0_HIGH = 0x0000201d, 252 EOI_EXIT_BITMAP1 = 0x0000201e, 253 EOI_EXIT_BITMAP1_HIGH = 0x0000201f, 254 EOI_EXIT_BITMAP2 = 0x00002020, 255 EOI_EXIT_BITMAP2_HIGH = 0x00002021, 256 EOI_EXIT_BITMAP3 = 0x00002022, 257 EOI_EXIT_BITMAP3_HIGH = 0x00002023, 258 EPTP_LIST_ADDRESS = 0x00002024, 259 EPTP_LIST_ADDRESS_HIGH = 0x00002025, 260 VMREAD_BITMAP = 0x00002026, 261 VMREAD_BITMAP_HIGH = 0x00002027, 262 VMWRITE_BITMAP = 0x00002028, 263 VMWRITE_BITMAP_HIGH = 0x00002029, 264 VE_INFORMATION_ADDRESS = 0x0000202A, 265 VE_INFORMATION_ADDRESS_HIGH = 0x0000202B, 266 XSS_EXIT_BITMAP = 0x0000202C, 267 XSS_EXIT_BITMAP_HIGH = 0x0000202D, 268 ENCLS_EXITING_BITMAP = 0x0000202E, 269 ENCLS_EXITING_BITMAP_HIGH = 0x0000202F, 270 TSC_MULTIPLIER = 0x00002032, 271 TSC_MULTIPLIER_HIGH = 0x00002033, 272 TERTIARY_VM_EXEC_CONTROL = 0x00002034, 273 TERTIARY_VM_EXEC_CONTROL_HIGH = 0x00002035, 274 SHARED_EPT_POINTER = 0x0000203C, 275 PID_POINTER_TABLE = 0x00002042, 276 PID_POINTER_TABLE_HIGH = 0x00002043, 277 GUEST_PHYSICAL_ADDRESS = 0x00002400, 278 GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, 279 VMCS_LINK_POINTER = 0x00002800, 280 VMCS_LINK_POINTER_HIGH = 0x00002801, 281 GUEST_IA32_DEBUGCTL = 0x00002802, 282 GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, 283 GUEST_IA32_PAT = 0x00002804, 284 GUEST_IA32_PAT_HIGH = 0x00002805, 285 GUEST_IA32_EFER = 0x00002806, 286 GUEST_IA32_EFER_HIGH = 0x00002807, 287 GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, 288 GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809, 289 GUEST_PDPTR0 = 0x0000280a, 290 GUEST_PDPTR0_HIGH = 0x0000280b, 291 GUEST_PDPTR1 = 0x0000280c, 292 GUEST_PDPTR1_HIGH = 0x0000280d, 293 GUEST_PDPTR2 = 0x0000280e, 294 GUEST_PDPTR2_HIGH = 0x0000280f, 295 GUEST_PDPTR3 = 0x00002810, 296 GUEST_PDPTR3_HIGH = 0x00002811, 297 GUEST_BNDCFGS = 0x00002812, 298 GUEST_BNDCFGS_HIGH = 0x00002813, 299 GUEST_IA32_RTIT_CTL = 0x00002814, 300 GUEST_IA32_RTIT_CTL_HIGH = 0x00002815, 301 HOST_IA32_PAT = 0x00002c00, 302 HOST_IA32_PAT_HIGH = 0x00002c01, 303 HOST_IA32_EFER = 0x00002c02, 304 HOST_IA32_EFER_HIGH = 0x00002c03, 305 HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, 306 HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05, 307 PIN_BASED_VM_EXEC_CONTROL = 0x00004000, 308 CPU_BASED_VM_EXEC_CONTROL = 0x00004002, 309 EXCEPTION_BITMAP = 0x00004004, 310 PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, 311 PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, 312 CR3_TARGET_COUNT = 0x0000400a, 313 VM_EXIT_CONTROLS = 0x0000400c, 314 VM_EXIT_MSR_STORE_COUNT = 0x0000400e, 315 VM_EXIT_MSR_LOAD_COUNT = 0x00004010, 316 VM_ENTRY_CONTROLS = 0x00004012, 317 VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, 318 VM_ENTRY_INTR_INFO_FIELD = 0x00004016, 319 VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, 320 VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, 321 TPR_THRESHOLD = 0x0000401c, 322 SECONDARY_VM_EXEC_CONTROL = 0x0000401e, 323 PLE_GAP = 0x00004020, 324 PLE_WINDOW = 0x00004022, 325 NOTIFY_WINDOW = 0x00004024, 326 VM_INSTRUCTION_ERROR = 0x00004400, 327 VM_EXIT_REASON = 0x00004402, 328 VM_EXIT_INTR_INFO = 0x00004404, 329 VM_EXIT_INTR_ERROR_CODE = 0x00004406, 330 IDT_VECTORING_INFO_FIELD = 0x00004408, 331 IDT_VECTORING_ERROR_CODE = 0x0000440a, 332 VM_EXIT_INSTRUCTION_LEN = 0x0000440c, 333 VMX_INSTRUCTION_INFO = 0x0000440e, 334 GUEST_ES_LIMIT = 0x00004800, 335 GUEST_CS_LIMIT = 0x00004802, 336 GUEST_SS_LIMIT = 0x00004804, 337 GUEST_DS_LIMIT = 0x00004806, 338 GUEST_FS_LIMIT = 0x00004808, 339 GUEST_GS_LIMIT = 0x0000480a, 340 GUEST_LDTR_LIMIT = 0x0000480c, 341 GUEST_TR_LIMIT = 0x0000480e, 342 GUEST_GDTR_LIMIT = 0x00004810, 343 GUEST_IDTR_LIMIT = 0x00004812, 344 GUEST_ES_AR_BYTES = 0x00004814, 345 GUEST_CS_AR_BYTES = 0x00004816, 346 GUEST_SS_AR_BYTES = 0x00004818, 347 GUEST_DS_AR_BYTES = 0x0000481a, 348 GUEST_FS_AR_BYTES = 0x0000481c, 349 GUEST_GS_AR_BYTES = 0x0000481e, 350 GUEST_LDTR_AR_BYTES = 0x00004820, 351 GUEST_TR_AR_BYTES = 0x00004822, 352 GUEST_INTERRUPTIBILITY_INFO = 0x00004824, 353 GUEST_ACTIVITY_STATE = 0x00004826, 354 GUEST_SYSENTER_CS = 0x0000482A, 355 VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, 356 HOST_IA32_SYSENTER_CS = 0x00004c00, 357 CR0_GUEST_HOST_MASK = 0x00006000, 358 CR4_GUEST_HOST_MASK = 0x00006002, 359 CR0_READ_SHADOW = 0x00006004, 360 CR4_READ_SHADOW = 0x00006006, 361 CR3_TARGET_VALUE0 = 0x00006008, 362 CR3_TARGET_VALUE1 = 0x0000600a, 363 CR3_TARGET_VALUE2 = 0x0000600c, 364 CR3_TARGET_VALUE3 = 0x0000600e, 365 EXIT_QUALIFICATION = 0x00006400, 366 GUEST_LINEAR_ADDRESS = 0x0000640a, 367 GUEST_CR0 = 0x00006800, 368 GUEST_CR3 = 0x00006802, 369 GUEST_CR4 = 0x00006804, 370 GUEST_ES_BASE = 0x00006806, 371 GUEST_CS_BASE = 0x00006808, 372 GUEST_SS_BASE = 0x0000680a, 373 GUEST_DS_BASE = 0x0000680c, 374 GUEST_FS_BASE = 0x0000680e, 375 GUEST_GS_BASE = 0x00006810, 376 GUEST_LDTR_BASE = 0x00006812, 377 GUEST_TR_BASE = 0x00006814, 378 GUEST_GDTR_BASE = 0x00006816, 379 GUEST_IDTR_BASE = 0x00006818, 380 GUEST_DR7 = 0x0000681a, 381 GUEST_RSP = 0x0000681c, 382 GUEST_RIP = 0x0000681e, 383 GUEST_RFLAGS = 0x00006820, 384 GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, 385 GUEST_SYSENTER_ESP = 0x00006824, 386 GUEST_SYSENTER_EIP = 0x00006826, 387 GUEST_S_CET = 0x00006828, 388 GUEST_SSP = 0x0000682a, 389 GUEST_INTR_SSP_TABLE = 0x0000682c, 390 HOST_CR0 = 0x00006c00, 391 HOST_CR3 = 0x00006c02, 392 HOST_CR4 = 0x00006c04, 393 HOST_FS_BASE = 0x00006c06, 394 HOST_GS_BASE = 0x00006c08, 395 HOST_TR_BASE = 0x00006c0a, 396 HOST_GDTR_BASE = 0x00006c0c, 397 HOST_IDTR_BASE = 0x00006c0e, 398 HOST_IA32_SYSENTER_ESP = 0x00006c10, 399 HOST_IA32_SYSENTER_EIP = 0x00006c12, 400 HOST_RSP = 0x00006c14, 401 HOST_RIP = 0x00006c16, 402 HOST_S_CET = 0x00006c18, 403 HOST_SSP = 0x00006c1a, 404 HOST_INTR_SSP_TABLE = 0x00006c1c 405 }; 406 407 /* 408 * Interruption-information format 409 */ 410 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ 411 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ 412 #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ 413 #define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */ 414 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ 415 #define INTR_INFO_RESVD_BITS_MASK 0x7ffff000 416 417 #define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK 418 #define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK 419 #define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK 420 #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK 421 422 #define INTR_TYPE_EXT_INTR (EVENT_TYPE_EXTINT << 8) /* external interrupt */ 423 #define INTR_TYPE_RESERVED (EVENT_TYPE_RESERVED << 8) /* reserved */ 424 #define INTR_TYPE_NMI_INTR (EVENT_TYPE_NMI << 8) /* NMI */ 425 #define INTR_TYPE_HARD_EXCEPTION (EVENT_TYPE_HWEXC << 8) /* processor exception */ 426 #define INTR_TYPE_SOFT_INTR (EVENT_TYPE_SWINT << 8) /* software interrupt */ 427 #define INTR_TYPE_PRIV_SW_EXCEPTION (EVENT_TYPE_PRIV_SWEXC << 8) /* ICE breakpoint */ 428 #define INTR_TYPE_SOFT_EXCEPTION (EVENT_TYPE_SWEXC << 8) /* software exception */ 429 #define INTR_TYPE_OTHER_EVENT (EVENT_TYPE_OTHER << 8) /* other event */ 430 431 /* GUEST_INTERRUPTIBILITY_INFO flags. */ 432 #define GUEST_INTR_STATE_STI 0x00000001 433 #define GUEST_INTR_STATE_MOV_SS 0x00000002 434 #define GUEST_INTR_STATE_SMI 0x00000004 435 #define GUEST_INTR_STATE_NMI 0x00000008 436 #define GUEST_INTR_STATE_ENCLAVE_INTR 0x00000010 437 438 /* GUEST_ACTIVITY_STATE flags */ 439 #define GUEST_ACTIVITY_ACTIVE 0 440 #define GUEST_ACTIVITY_HLT 1 441 #define GUEST_ACTIVITY_SHUTDOWN 2 442 #define GUEST_ACTIVITY_WAIT_SIPI 3 443 444 /* 445 * Exit Qualifications for MOV for Control Register Access 446 */ 447 #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control reg.*/ 448 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */ 449 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose reg. */ 450 #define LMSW_SOURCE_DATA_SHIFT 16 451 #define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */ 452 #define REG_EAX (0 << 8) 453 #define REG_ECX (1 << 8) 454 #define REG_EDX (2 << 8) 455 #define REG_EBX (3 << 8) 456 #define REG_ESP (4 << 8) 457 #define REG_EBP (5 << 8) 458 #define REG_ESI (6 << 8) 459 #define REG_EDI (7 << 8) 460 #define REG_R8 (8 << 8) 461 #define REG_R9 (9 << 8) 462 #define REG_R10 (10 << 8) 463 #define REG_R11 (11 << 8) 464 #define REG_R12 (12 << 8) 465 #define REG_R13 (13 << 8) 466 #define REG_R14 (14 << 8) 467 #define REG_R15 (15 << 8) 468 469 /* 470 * Exit Qualifications for MOV for Debug Register Access 471 */ 472 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug reg. */ 473 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ 474 #define TYPE_MOV_TO_DR (0 << 4) 475 #define TYPE_MOV_FROM_DR (1 << 4) 476 #define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */ 477 478 479 /* 480 * Exit Qualifications for APIC-Access 481 */ 482 #define APIC_ACCESS_OFFSET 0xfff /* 11:0, offset within the APIC page */ 483 #define APIC_ACCESS_TYPE 0xf000 /* 15:12, access type */ 484 #define TYPE_LINEAR_APIC_INST_READ (0 << 12) 485 #define TYPE_LINEAR_APIC_INST_WRITE (1 << 12) 486 #define TYPE_LINEAR_APIC_INST_FETCH (2 << 12) 487 #define TYPE_LINEAR_APIC_EVENT (3 << 12) 488 #define TYPE_PHYSICAL_APIC_EVENT (10 << 12) 489 #define TYPE_PHYSICAL_APIC_INST (15 << 12) 490 491 /* segment AR in VMCS -- these are different from what LAR reports */ 492 #define VMX_SEGMENT_AR_L_MASK (1 << 13) 493 494 #define VMX_AR_TYPE_ACCESSES_MASK 1 495 #define VMX_AR_TYPE_READABLE_MASK (1 << 1) 496 #define VMX_AR_TYPE_WRITEABLE_MASK (1 << 2) 497 #define VMX_AR_TYPE_CODE_MASK (1 << 3) 498 #define VMX_AR_TYPE_MASK 0x0f 499 #define VMX_AR_TYPE_BUSY_64_TSS 11 500 #define VMX_AR_TYPE_BUSY_32_TSS 11 501 #define VMX_AR_TYPE_BUSY_16_TSS 3 502 #define VMX_AR_TYPE_LDT 2 503 504 #define VMX_AR_UNUSABLE_MASK (1 << 16) 505 #define VMX_AR_S_MASK (1 << 4) 506 #define VMX_AR_P_MASK (1 << 7) 507 #define VMX_AR_L_MASK (1 << 13) 508 #define VMX_AR_DB_MASK (1 << 14) 509 #define VMX_AR_G_MASK (1 << 15) 510 #define VMX_AR_DPL_SHIFT 5 511 #define VMX_AR_DPL(ar) (((ar) >> VMX_AR_DPL_SHIFT) & 3) 512 513 #define VMX_AR_RESERVD_MASK 0xfffe0f00 514 515 #define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0) 516 #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1) 517 #define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2) 518 519 #define VMX_NR_VPIDS (1 << 16) 520 #define VMX_VPID_EXTENT_INDIVIDUAL_ADDR 0 521 #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 522 #define VMX_VPID_EXTENT_ALL_CONTEXT 2 523 #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL 3 524 525 #define VMX_EPT_EXTENT_CONTEXT 1 526 #define VMX_EPT_EXTENT_GLOBAL 2 527 #define VMX_EPT_EXTENT_SHIFT 24 528 529 #define VMX_EPT_EXECUTE_ONLY_BIT (1ull) 530 #define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6) 531 #define VMX_EPT_PAGE_WALK_5_BIT (1ull << 7) 532 #define VMX_EPTP_UC_BIT (1ull << 8) 533 #define VMX_EPTP_WB_BIT (1ull << 14) 534 #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) 535 #define VMX_EPT_1GB_PAGE_BIT (1ull << 17) 536 #define VMX_EPT_INVEPT_BIT (1ull << 20) 537 #define VMX_EPT_AD_BIT (1ull << 21) 538 #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) 539 #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) 540 541 #define VMX_VPID_INVVPID_BIT (1ull << 0) /* (32 - 32) */ 542 #define VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT (1ull << 8) /* (40 - 32) */ 543 #define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */ 544 #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */ 545 #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */ 546 547 #define VMX_EPT_MT_EPTE_SHIFT 3 548 #define VMX_EPTP_PWL_MASK 0x38ull 549 #define VMX_EPTP_PWL_4 0x18ull 550 #define VMX_EPTP_PWL_5 0x20ull 551 #define VMX_EPTP_AD_ENABLE_BIT (1ull << 6) 552 /* The EPTP memtype is encoded in bits 2:0, i.e. doesn't need to be shifted. */ 553 #define VMX_EPTP_MT_MASK 0x7ull 554 #define VMX_EPTP_MT_WB X86_MEMTYPE_WB 555 #define VMX_EPTP_MT_UC X86_MEMTYPE_UC 556 #define VMX_EPT_READABLE_MASK 0x1ull 557 #define VMX_EPT_WRITABLE_MASK 0x2ull 558 #define VMX_EPT_EXECUTABLE_MASK 0x4ull 559 #define VMX_EPT_IPAT_BIT (1ull << 6) 560 #define VMX_EPT_ACCESS_BIT (1ull << 8) 561 #define VMX_EPT_DIRTY_BIT (1ull << 9) 562 #define VMX_EPT_SUPPRESS_VE_BIT (1ull << 63) 563 #define VMX_EPT_RWX_MASK (VMX_EPT_READABLE_MASK | \ 564 VMX_EPT_WRITABLE_MASK | \ 565 VMX_EPT_EXECUTABLE_MASK) 566 #define VMX_EPT_MT_MASK (7ull << VMX_EPT_MT_EPTE_SHIFT) 567 568 static inline u8 vmx_eptp_page_walk_level(u64 eptp) 569 { 570 u64 encoded_level = eptp & VMX_EPTP_PWL_MASK; 571 572 if (encoded_level == VMX_EPTP_PWL_5) 573 return 5; 574 575 /* @eptp must be pre-validated by the caller. */ 576 WARN_ON_ONCE(encoded_level != VMX_EPTP_PWL_4); 577 return 4; 578 } 579 580 /* The mask to use to trigger an EPT Misconfiguration in order to track MMIO */ 581 #define VMX_EPT_MISCONFIG_WX_VALUE (VMX_EPT_WRITABLE_MASK | \ 582 VMX_EPT_EXECUTABLE_MASK) 583 584 #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul 585 586 struct vmx_msr_entry { 587 u32 index; 588 u32 reserved; 589 u64 value; 590 } __aligned(16); 591 592 /* 593 * Exit Qualifications for entry failure during or after loading guest state 594 */ 595 enum vm_entry_failure_code { 596 ENTRY_FAIL_DEFAULT = 0, 597 ENTRY_FAIL_PDPTE = 2, 598 ENTRY_FAIL_NMI = 3, 599 ENTRY_FAIL_VMCS_LINK_PTR = 4, 600 }; 601 602 /* 603 * Exit Qualifications for EPT Violations 604 */ 605 #define EPT_VIOLATION_ACC_READ BIT(0) 606 #define EPT_VIOLATION_ACC_WRITE BIT(1) 607 #define EPT_VIOLATION_ACC_INSTR BIT(2) 608 #define EPT_VIOLATION_PROT_READ BIT(3) 609 #define EPT_VIOLATION_PROT_WRITE BIT(4) 610 #define EPT_VIOLATION_PROT_EXEC BIT(5) 611 #define EPT_VIOLATION_EXEC_FOR_RING3_LIN BIT(6) 612 #define EPT_VIOLATION_PROT_MASK (EPT_VIOLATION_PROT_READ | \ 613 EPT_VIOLATION_PROT_WRITE | \ 614 EPT_VIOLATION_PROT_EXEC) 615 #define EPT_VIOLATION_GVA_IS_VALID BIT(7) 616 #define EPT_VIOLATION_GVA_TRANSLATED BIT(8) 617 618 #define EPT_VIOLATION_RWX_TO_PROT(__epte) (((__epte) & VMX_EPT_RWX_MASK) << 3) 619 620 static_assert(EPT_VIOLATION_RWX_TO_PROT(VMX_EPT_RWX_MASK) == 621 (EPT_VIOLATION_PROT_READ | EPT_VIOLATION_PROT_WRITE | EPT_VIOLATION_PROT_EXEC)); 622 623 /* 624 * Exit Qualifications for NOTIFY VM EXIT 625 */ 626 #define NOTIFY_VM_CONTEXT_INVALID BIT(0) 627 628 /* 629 * VM-instruction error numbers 630 */ 631 enum vm_instruction_error_number { 632 VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1, 633 VMXERR_VMCLEAR_INVALID_ADDRESS = 2, 634 VMXERR_VMCLEAR_VMXON_POINTER = 3, 635 VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4, 636 VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5, 637 VMXERR_VMRESUME_AFTER_VMXOFF = 6, 638 VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7, 639 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8, 640 VMXERR_VMPTRLD_INVALID_ADDRESS = 9, 641 VMXERR_VMPTRLD_VMXON_POINTER = 10, 642 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11, 643 VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12, 644 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13, 645 VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15, 646 VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16, 647 VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17, 648 VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18, 649 VMXERR_VMCALL_NONCLEAR_VMCS = 19, 650 VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20, 651 VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22, 652 VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23, 653 VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24, 654 VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25, 655 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26, 656 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28, 657 }; 658 659 /* 660 * VM-instruction errors that can be encountered on VM-Enter, used to trace 661 * nested VM-Enter failures reported by hardware. Errors unique to VM-Enter 662 * from a SMI Transfer Monitor are not included as things have gone seriously 663 * sideways if we get one of those... 664 */ 665 #define VMX_VMENTER_INSTRUCTION_ERRORS \ 666 { VMXERR_VMLAUNCH_NONCLEAR_VMCS, "VMLAUNCH_NONCLEAR_VMCS" }, \ 667 { VMXERR_VMRESUME_NONLAUNCHED_VMCS, "VMRESUME_NONLAUNCHED_VMCS" }, \ 668 { VMXERR_VMRESUME_AFTER_VMXOFF, "VMRESUME_AFTER_VMXOFF" }, \ 669 { VMXERR_ENTRY_INVALID_CONTROL_FIELD, "VMENTRY_INVALID_CONTROL_FIELD" }, \ 670 { VMXERR_ENTRY_INVALID_HOST_STATE_FIELD, "VMENTRY_INVALID_HOST_STATE_FIELD" }, \ 671 { VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS, "VMENTRY_EVENTS_BLOCKED_BY_MOV_SS" } 672 673 enum vmx_l1d_flush_state { 674 VMENTER_L1D_FLUSH_AUTO, 675 VMENTER_L1D_FLUSH_NEVER, 676 VMENTER_L1D_FLUSH_COND, 677 VMENTER_L1D_FLUSH_ALWAYS, 678 VMENTER_L1D_FLUSH_EPT_DISABLED, 679 VMENTER_L1D_FLUSH_NOT_REQUIRED, 680 }; 681 682 extern enum vmx_l1d_flush_state l1tf_vmx_mitigation; 683 684 struct vmx_ve_information { 685 u32 exit_reason; 686 u32 delivery; 687 u64 exit_qualification; 688 u64 guest_linear_address; 689 u64 guest_physical_address; 690 u16 eptp_index; 691 }; 692 693 #endif 694