1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * vmx.h: VMX Architecture related definitions 4 * Copyright (c) 2004, Intel Corporation. 5 * 6 * A few random additions are: 7 * Copyright (C) 2006 Qumranet 8 * Avi Kivity <avi@qumranet.com> 9 * Yaniv Kamay <yaniv@qumranet.com> 10 */ 11 #ifndef VMX_H 12 #define VMX_H 13 14 15 #include <linux/bitops.h> 16 #include <linux/types.h> 17 #include <uapi/asm/vmx.h> 18 #include <asm/vmxfeatures.h> 19 20 #define VMCS_CONTROL_BIT(x) BIT(VMX_FEATURE_##x & 0x1f) 21 22 /* 23 * Definitions of Primary Processor-Based VM-Execution Controls. 24 */ 25 #define CPU_BASED_INTR_WINDOW_EXITING VMCS_CONTROL_BIT(INTR_WINDOW_EXITING) 26 #define CPU_BASED_USE_TSC_OFFSETTING VMCS_CONTROL_BIT(USE_TSC_OFFSETTING) 27 #define CPU_BASED_HLT_EXITING VMCS_CONTROL_BIT(HLT_EXITING) 28 #define CPU_BASED_INVLPG_EXITING VMCS_CONTROL_BIT(INVLPG_EXITING) 29 #define CPU_BASED_MWAIT_EXITING VMCS_CONTROL_BIT(MWAIT_EXITING) 30 #define CPU_BASED_RDPMC_EXITING VMCS_CONTROL_BIT(RDPMC_EXITING) 31 #define CPU_BASED_RDTSC_EXITING VMCS_CONTROL_BIT(RDTSC_EXITING) 32 #define CPU_BASED_CR3_LOAD_EXITING VMCS_CONTROL_BIT(CR3_LOAD_EXITING) 33 #define CPU_BASED_CR3_STORE_EXITING VMCS_CONTROL_BIT(CR3_STORE_EXITING) 34 #define CPU_BASED_CR8_LOAD_EXITING VMCS_CONTROL_BIT(CR8_LOAD_EXITING) 35 #define CPU_BASED_CR8_STORE_EXITING VMCS_CONTROL_BIT(CR8_STORE_EXITING) 36 #define CPU_BASED_TPR_SHADOW VMCS_CONTROL_BIT(VIRTUAL_TPR) 37 #define CPU_BASED_NMI_WINDOW_EXITING VMCS_CONTROL_BIT(NMI_WINDOW_EXITING) 38 #define CPU_BASED_MOV_DR_EXITING VMCS_CONTROL_BIT(MOV_DR_EXITING) 39 #define CPU_BASED_UNCOND_IO_EXITING VMCS_CONTROL_BIT(UNCOND_IO_EXITING) 40 #define CPU_BASED_USE_IO_BITMAPS VMCS_CONTROL_BIT(USE_IO_BITMAPS) 41 #define CPU_BASED_MONITOR_TRAP_FLAG VMCS_CONTROL_BIT(MONITOR_TRAP_FLAG) 42 #define CPU_BASED_USE_MSR_BITMAPS VMCS_CONTROL_BIT(USE_MSR_BITMAPS) 43 #define CPU_BASED_MONITOR_EXITING VMCS_CONTROL_BIT(MONITOR_EXITING) 44 #define CPU_BASED_PAUSE_EXITING VMCS_CONTROL_BIT(PAUSE_EXITING) 45 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS VMCS_CONTROL_BIT(SEC_CONTROLS) 46 47 #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172 48 49 /* 50 * Definitions of Secondary Processor-Based VM-Execution Controls. 51 */ 52 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES VMCS_CONTROL_BIT(VIRT_APIC_ACCESSES) 53 #define SECONDARY_EXEC_ENABLE_EPT VMCS_CONTROL_BIT(EPT) 54 #define SECONDARY_EXEC_DESC VMCS_CONTROL_BIT(DESC_EXITING) 55 #define SECONDARY_EXEC_RDTSCP VMCS_CONTROL_BIT(RDTSCP) 56 #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE VMCS_CONTROL_BIT(VIRTUAL_X2APIC) 57 #define SECONDARY_EXEC_ENABLE_VPID VMCS_CONTROL_BIT(VPID) 58 #define SECONDARY_EXEC_WBINVD_EXITING VMCS_CONTROL_BIT(WBINVD_EXITING) 59 #define SECONDARY_EXEC_UNRESTRICTED_GUEST VMCS_CONTROL_BIT(UNRESTRICTED_GUEST) 60 #define SECONDARY_EXEC_APIC_REGISTER_VIRT VMCS_CONTROL_BIT(APIC_REGISTER_VIRT) 61 #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY VMCS_CONTROL_BIT(VIRT_INTR_DELIVERY) 62 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING VMCS_CONTROL_BIT(PAUSE_LOOP_EXITING) 63 #define SECONDARY_EXEC_RDRAND_EXITING VMCS_CONTROL_BIT(RDRAND_EXITING) 64 #define SECONDARY_EXEC_ENABLE_INVPCID VMCS_CONTROL_BIT(INVPCID) 65 #define SECONDARY_EXEC_ENABLE_VMFUNC VMCS_CONTROL_BIT(VMFUNC) 66 #define SECONDARY_EXEC_SHADOW_VMCS VMCS_CONTROL_BIT(SHADOW_VMCS) 67 #define SECONDARY_EXEC_ENCLS_EXITING VMCS_CONTROL_BIT(ENCLS_EXITING) 68 #define SECONDARY_EXEC_RDSEED_EXITING VMCS_CONTROL_BIT(RDSEED_EXITING) 69 #define SECONDARY_EXEC_ENABLE_PML VMCS_CONTROL_BIT(PAGE_MOD_LOGGING) 70 #define SECONDARY_EXEC_PT_CONCEAL_VMX VMCS_CONTROL_BIT(PT_CONCEAL_VMX) 71 #define SECONDARY_EXEC_XSAVES VMCS_CONTROL_BIT(XSAVES) 72 #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC) 73 #define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA) 74 #define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING) 75 #define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE) 76 77 #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) 78 #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) 79 #define PIN_BASED_VIRTUAL_NMIS VMCS_CONTROL_BIT(VIRTUAL_NMIS) 80 #define PIN_BASED_VMX_PREEMPTION_TIMER VMCS_CONTROL_BIT(PREEMPTION_TIMER) 81 #define PIN_BASED_POSTED_INTR VMCS_CONTROL_BIT(POSTED_INTR) 82 83 #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 84 85 #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 86 #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 87 #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 88 #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 89 #define VM_EXIT_SAVE_IA32_PAT 0x00040000 90 #define VM_EXIT_LOAD_IA32_PAT 0x00080000 91 #define VM_EXIT_SAVE_IA32_EFER 0x00100000 92 #define VM_EXIT_LOAD_IA32_EFER 0x00200000 93 #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 94 #define VM_EXIT_CLEAR_BNDCFGS 0x00800000 95 #define VM_EXIT_PT_CONCEAL_PIP 0x01000000 96 #define VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 97 98 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff 99 100 #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 101 #define VM_ENTRY_IA32E_MODE 0x00000200 102 #define VM_ENTRY_SMM 0x00000400 103 #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 104 #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 105 #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 106 #define VM_ENTRY_LOAD_IA32_EFER 0x00008000 107 #define VM_ENTRY_LOAD_BNDCFGS 0x00010000 108 #define VM_ENTRY_PT_CONCEAL_PIP 0x00020000 109 #define VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000 110 111 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff 112 113 #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f 114 #define VMX_MISC_SAVE_EFER_LMA 0x00000020 115 #define VMX_MISC_ACTIVITY_HLT 0x00000040 116 #define VMX_MISC_ZERO_LEN_INS 0x40000000 117 #define VMX_MISC_MSR_LIST_MULTIPLIER 512 118 119 /* VMFUNC functions */ 120 #define VMFUNC_CONTROL_BIT(x) BIT((VMX_FEATURE_##x & 0x1f) - 28) 121 122 #define VMX_VMFUNC_EPTP_SWITCHING VMFUNC_CONTROL_BIT(EPTP_SWITCHING) 123 #define VMFUNC_EPTP_ENTRIES 512 124 125 static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic) 126 { 127 return vmx_basic & GENMASK_ULL(30, 0); 128 } 129 130 static inline u32 vmx_basic_vmcs_size(u64 vmx_basic) 131 { 132 return (vmx_basic & GENMASK_ULL(44, 32)) >> 32; 133 } 134 135 static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc) 136 { 137 return vmx_misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; 138 } 139 140 static inline int vmx_misc_cr3_count(u64 vmx_misc) 141 { 142 return (vmx_misc & GENMASK_ULL(24, 16)) >> 16; 143 } 144 145 static inline int vmx_misc_max_msr(u64 vmx_misc) 146 { 147 return (vmx_misc & GENMASK_ULL(27, 25)) >> 25; 148 } 149 150 static inline int vmx_misc_mseg_revid(u64 vmx_misc) 151 { 152 return (vmx_misc & GENMASK_ULL(63, 32)) >> 32; 153 } 154 155 /* VMCS Encodings */ 156 enum vmcs_field { 157 VIRTUAL_PROCESSOR_ID = 0x00000000, 158 POSTED_INTR_NV = 0x00000002, 159 GUEST_ES_SELECTOR = 0x00000800, 160 GUEST_CS_SELECTOR = 0x00000802, 161 GUEST_SS_SELECTOR = 0x00000804, 162 GUEST_DS_SELECTOR = 0x00000806, 163 GUEST_FS_SELECTOR = 0x00000808, 164 GUEST_GS_SELECTOR = 0x0000080a, 165 GUEST_LDTR_SELECTOR = 0x0000080c, 166 GUEST_TR_SELECTOR = 0x0000080e, 167 GUEST_INTR_STATUS = 0x00000810, 168 GUEST_PML_INDEX = 0x00000812, 169 HOST_ES_SELECTOR = 0x00000c00, 170 HOST_CS_SELECTOR = 0x00000c02, 171 HOST_SS_SELECTOR = 0x00000c04, 172 HOST_DS_SELECTOR = 0x00000c06, 173 HOST_FS_SELECTOR = 0x00000c08, 174 HOST_GS_SELECTOR = 0x00000c0a, 175 HOST_TR_SELECTOR = 0x00000c0c, 176 IO_BITMAP_A = 0x00002000, 177 IO_BITMAP_A_HIGH = 0x00002001, 178 IO_BITMAP_B = 0x00002002, 179 IO_BITMAP_B_HIGH = 0x00002003, 180 MSR_BITMAP = 0x00002004, 181 MSR_BITMAP_HIGH = 0x00002005, 182 VM_EXIT_MSR_STORE_ADDR = 0x00002006, 183 VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, 184 VM_EXIT_MSR_LOAD_ADDR = 0x00002008, 185 VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, 186 VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, 187 VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, 188 PML_ADDRESS = 0x0000200e, 189 PML_ADDRESS_HIGH = 0x0000200f, 190 TSC_OFFSET = 0x00002010, 191 TSC_OFFSET_HIGH = 0x00002011, 192 VIRTUAL_APIC_PAGE_ADDR = 0x00002012, 193 VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, 194 APIC_ACCESS_ADDR = 0x00002014, 195 APIC_ACCESS_ADDR_HIGH = 0x00002015, 196 POSTED_INTR_DESC_ADDR = 0x00002016, 197 POSTED_INTR_DESC_ADDR_HIGH = 0x00002017, 198 VM_FUNCTION_CONTROL = 0x00002018, 199 VM_FUNCTION_CONTROL_HIGH = 0x00002019, 200 EPT_POINTER = 0x0000201a, 201 EPT_POINTER_HIGH = 0x0000201b, 202 EOI_EXIT_BITMAP0 = 0x0000201c, 203 EOI_EXIT_BITMAP0_HIGH = 0x0000201d, 204 EOI_EXIT_BITMAP1 = 0x0000201e, 205 EOI_EXIT_BITMAP1_HIGH = 0x0000201f, 206 EOI_EXIT_BITMAP2 = 0x00002020, 207 EOI_EXIT_BITMAP2_HIGH = 0x00002021, 208 EOI_EXIT_BITMAP3 = 0x00002022, 209 EOI_EXIT_BITMAP3_HIGH = 0x00002023, 210 EPTP_LIST_ADDRESS = 0x00002024, 211 EPTP_LIST_ADDRESS_HIGH = 0x00002025, 212 VMREAD_BITMAP = 0x00002026, 213 VMREAD_BITMAP_HIGH = 0x00002027, 214 VMWRITE_BITMAP = 0x00002028, 215 VMWRITE_BITMAP_HIGH = 0x00002029, 216 XSS_EXIT_BITMAP = 0x0000202C, 217 XSS_EXIT_BITMAP_HIGH = 0x0000202D, 218 ENCLS_EXITING_BITMAP = 0x0000202E, 219 ENCLS_EXITING_BITMAP_HIGH = 0x0000202F, 220 TSC_MULTIPLIER = 0x00002032, 221 TSC_MULTIPLIER_HIGH = 0x00002033, 222 GUEST_PHYSICAL_ADDRESS = 0x00002400, 223 GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, 224 VMCS_LINK_POINTER = 0x00002800, 225 VMCS_LINK_POINTER_HIGH = 0x00002801, 226 GUEST_IA32_DEBUGCTL = 0x00002802, 227 GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, 228 GUEST_IA32_PAT = 0x00002804, 229 GUEST_IA32_PAT_HIGH = 0x00002805, 230 GUEST_IA32_EFER = 0x00002806, 231 GUEST_IA32_EFER_HIGH = 0x00002807, 232 GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, 233 GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809, 234 GUEST_PDPTR0 = 0x0000280a, 235 GUEST_PDPTR0_HIGH = 0x0000280b, 236 GUEST_PDPTR1 = 0x0000280c, 237 GUEST_PDPTR1_HIGH = 0x0000280d, 238 GUEST_PDPTR2 = 0x0000280e, 239 GUEST_PDPTR2_HIGH = 0x0000280f, 240 GUEST_PDPTR3 = 0x00002810, 241 GUEST_PDPTR3_HIGH = 0x00002811, 242 GUEST_BNDCFGS = 0x00002812, 243 GUEST_BNDCFGS_HIGH = 0x00002813, 244 GUEST_IA32_RTIT_CTL = 0x00002814, 245 GUEST_IA32_RTIT_CTL_HIGH = 0x00002815, 246 HOST_IA32_PAT = 0x00002c00, 247 HOST_IA32_PAT_HIGH = 0x00002c01, 248 HOST_IA32_EFER = 0x00002c02, 249 HOST_IA32_EFER_HIGH = 0x00002c03, 250 HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, 251 HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05, 252 PIN_BASED_VM_EXEC_CONTROL = 0x00004000, 253 CPU_BASED_VM_EXEC_CONTROL = 0x00004002, 254 EXCEPTION_BITMAP = 0x00004004, 255 PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, 256 PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, 257 CR3_TARGET_COUNT = 0x0000400a, 258 VM_EXIT_CONTROLS = 0x0000400c, 259 VM_EXIT_MSR_STORE_COUNT = 0x0000400e, 260 VM_EXIT_MSR_LOAD_COUNT = 0x00004010, 261 VM_ENTRY_CONTROLS = 0x00004012, 262 VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, 263 VM_ENTRY_INTR_INFO_FIELD = 0x00004016, 264 VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, 265 VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, 266 TPR_THRESHOLD = 0x0000401c, 267 SECONDARY_VM_EXEC_CONTROL = 0x0000401e, 268 PLE_GAP = 0x00004020, 269 PLE_WINDOW = 0x00004022, 270 VM_INSTRUCTION_ERROR = 0x00004400, 271 VM_EXIT_REASON = 0x00004402, 272 VM_EXIT_INTR_INFO = 0x00004404, 273 VM_EXIT_INTR_ERROR_CODE = 0x00004406, 274 IDT_VECTORING_INFO_FIELD = 0x00004408, 275 IDT_VECTORING_ERROR_CODE = 0x0000440a, 276 VM_EXIT_INSTRUCTION_LEN = 0x0000440c, 277 VMX_INSTRUCTION_INFO = 0x0000440e, 278 GUEST_ES_LIMIT = 0x00004800, 279 GUEST_CS_LIMIT = 0x00004802, 280 GUEST_SS_LIMIT = 0x00004804, 281 GUEST_DS_LIMIT = 0x00004806, 282 GUEST_FS_LIMIT = 0x00004808, 283 GUEST_GS_LIMIT = 0x0000480a, 284 GUEST_LDTR_LIMIT = 0x0000480c, 285 GUEST_TR_LIMIT = 0x0000480e, 286 GUEST_GDTR_LIMIT = 0x00004810, 287 GUEST_IDTR_LIMIT = 0x00004812, 288 GUEST_ES_AR_BYTES = 0x00004814, 289 GUEST_CS_AR_BYTES = 0x00004816, 290 GUEST_SS_AR_BYTES = 0x00004818, 291 GUEST_DS_AR_BYTES = 0x0000481a, 292 GUEST_FS_AR_BYTES = 0x0000481c, 293 GUEST_GS_AR_BYTES = 0x0000481e, 294 GUEST_LDTR_AR_BYTES = 0x00004820, 295 GUEST_TR_AR_BYTES = 0x00004822, 296 GUEST_INTERRUPTIBILITY_INFO = 0x00004824, 297 GUEST_ACTIVITY_STATE = 0X00004826, 298 GUEST_SYSENTER_CS = 0x0000482A, 299 VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, 300 HOST_IA32_SYSENTER_CS = 0x00004c00, 301 CR0_GUEST_HOST_MASK = 0x00006000, 302 CR4_GUEST_HOST_MASK = 0x00006002, 303 CR0_READ_SHADOW = 0x00006004, 304 CR4_READ_SHADOW = 0x00006006, 305 CR3_TARGET_VALUE0 = 0x00006008, 306 CR3_TARGET_VALUE1 = 0x0000600a, 307 CR3_TARGET_VALUE2 = 0x0000600c, 308 CR3_TARGET_VALUE3 = 0x0000600e, 309 EXIT_QUALIFICATION = 0x00006400, 310 GUEST_LINEAR_ADDRESS = 0x0000640a, 311 GUEST_CR0 = 0x00006800, 312 GUEST_CR3 = 0x00006802, 313 GUEST_CR4 = 0x00006804, 314 GUEST_ES_BASE = 0x00006806, 315 GUEST_CS_BASE = 0x00006808, 316 GUEST_SS_BASE = 0x0000680a, 317 GUEST_DS_BASE = 0x0000680c, 318 GUEST_FS_BASE = 0x0000680e, 319 GUEST_GS_BASE = 0x00006810, 320 GUEST_LDTR_BASE = 0x00006812, 321 GUEST_TR_BASE = 0x00006814, 322 GUEST_GDTR_BASE = 0x00006816, 323 GUEST_IDTR_BASE = 0x00006818, 324 GUEST_DR7 = 0x0000681a, 325 GUEST_RSP = 0x0000681c, 326 GUEST_RIP = 0x0000681e, 327 GUEST_RFLAGS = 0x00006820, 328 GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, 329 GUEST_SYSENTER_ESP = 0x00006824, 330 GUEST_SYSENTER_EIP = 0x00006826, 331 HOST_CR0 = 0x00006c00, 332 HOST_CR3 = 0x00006c02, 333 HOST_CR4 = 0x00006c04, 334 HOST_FS_BASE = 0x00006c06, 335 HOST_GS_BASE = 0x00006c08, 336 HOST_TR_BASE = 0x00006c0a, 337 HOST_GDTR_BASE = 0x00006c0c, 338 HOST_IDTR_BASE = 0x00006c0e, 339 HOST_IA32_SYSENTER_ESP = 0x00006c10, 340 HOST_IA32_SYSENTER_EIP = 0x00006c12, 341 HOST_RSP = 0x00006c14, 342 HOST_RIP = 0x00006c16, 343 }; 344 345 /* 346 * Interruption-information format 347 */ 348 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ 349 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ 350 #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ 351 #define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */ 352 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ 353 #define INTR_INFO_RESVD_BITS_MASK 0x7ffff000 354 355 #define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK 356 #define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK 357 #define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK 358 #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK 359 360 #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ 361 #define INTR_TYPE_RESERVED (1 << 8) /* reserved */ 362 #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ 363 #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ 364 #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ 365 #define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ 366 #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ 367 #define INTR_TYPE_OTHER_EVENT (7 << 8) /* other event */ 368 369 /* GUEST_INTERRUPTIBILITY_INFO flags. */ 370 #define GUEST_INTR_STATE_STI 0x00000001 371 #define GUEST_INTR_STATE_MOV_SS 0x00000002 372 #define GUEST_INTR_STATE_SMI 0x00000004 373 #define GUEST_INTR_STATE_NMI 0x00000008 374 375 /* GUEST_ACTIVITY_STATE flags */ 376 #define GUEST_ACTIVITY_ACTIVE 0 377 #define GUEST_ACTIVITY_HLT 1 378 #define GUEST_ACTIVITY_SHUTDOWN 2 379 #define GUEST_ACTIVITY_WAIT_SIPI 3 380 381 /* 382 * Exit Qualifications for MOV for Control Register Access 383 */ 384 #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control reg.*/ 385 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */ 386 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose reg. */ 387 #define LMSW_SOURCE_DATA_SHIFT 16 388 #define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */ 389 #define REG_EAX (0 << 8) 390 #define REG_ECX (1 << 8) 391 #define REG_EDX (2 << 8) 392 #define REG_EBX (3 << 8) 393 #define REG_ESP (4 << 8) 394 #define REG_EBP (5 << 8) 395 #define REG_ESI (6 << 8) 396 #define REG_EDI (7 << 8) 397 #define REG_R8 (8 << 8) 398 #define REG_R9 (9 << 8) 399 #define REG_R10 (10 << 8) 400 #define REG_R11 (11 << 8) 401 #define REG_R12 (12 << 8) 402 #define REG_R13 (13 << 8) 403 #define REG_R14 (14 << 8) 404 #define REG_R15 (15 << 8) 405 406 /* 407 * Exit Qualifications for MOV for Debug Register Access 408 */ 409 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug reg. */ 410 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ 411 #define TYPE_MOV_TO_DR (0 << 4) 412 #define TYPE_MOV_FROM_DR (1 << 4) 413 #define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */ 414 415 416 /* 417 * Exit Qualifications for APIC-Access 418 */ 419 #define APIC_ACCESS_OFFSET 0xfff /* 11:0, offset within the APIC page */ 420 #define APIC_ACCESS_TYPE 0xf000 /* 15:12, access type */ 421 #define TYPE_LINEAR_APIC_INST_READ (0 << 12) 422 #define TYPE_LINEAR_APIC_INST_WRITE (1 << 12) 423 #define TYPE_LINEAR_APIC_INST_FETCH (2 << 12) 424 #define TYPE_LINEAR_APIC_EVENT (3 << 12) 425 #define TYPE_PHYSICAL_APIC_EVENT (10 << 12) 426 #define TYPE_PHYSICAL_APIC_INST (15 << 12) 427 428 /* segment AR in VMCS -- these are different from what LAR reports */ 429 #define VMX_SEGMENT_AR_L_MASK (1 << 13) 430 431 #define VMX_AR_TYPE_ACCESSES_MASK 1 432 #define VMX_AR_TYPE_READABLE_MASK (1 << 1) 433 #define VMX_AR_TYPE_WRITEABLE_MASK (1 << 2) 434 #define VMX_AR_TYPE_CODE_MASK (1 << 3) 435 #define VMX_AR_TYPE_MASK 0x0f 436 #define VMX_AR_TYPE_BUSY_64_TSS 11 437 #define VMX_AR_TYPE_BUSY_32_TSS 11 438 #define VMX_AR_TYPE_BUSY_16_TSS 3 439 #define VMX_AR_TYPE_LDT 2 440 441 #define VMX_AR_UNUSABLE_MASK (1 << 16) 442 #define VMX_AR_S_MASK (1 << 4) 443 #define VMX_AR_P_MASK (1 << 7) 444 #define VMX_AR_L_MASK (1 << 13) 445 #define VMX_AR_DB_MASK (1 << 14) 446 #define VMX_AR_G_MASK (1 << 15) 447 #define VMX_AR_DPL_SHIFT 5 448 #define VMX_AR_DPL(ar) (((ar) >> VMX_AR_DPL_SHIFT) & 3) 449 450 #define VMX_AR_RESERVD_MASK 0xfffe0f00 451 452 #define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0) 453 #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1) 454 #define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2) 455 456 #define VMX_NR_VPIDS (1 << 16) 457 #define VMX_VPID_EXTENT_INDIVIDUAL_ADDR 0 458 #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 459 #define VMX_VPID_EXTENT_ALL_CONTEXT 2 460 #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL 3 461 462 #define VMX_EPT_EXTENT_CONTEXT 1 463 #define VMX_EPT_EXTENT_GLOBAL 2 464 #define VMX_EPT_EXTENT_SHIFT 24 465 466 #define VMX_EPT_EXECUTE_ONLY_BIT (1ull) 467 #define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6) 468 #define VMX_EPT_PAGE_WALK_5_BIT (1ull << 7) 469 #define VMX_EPTP_UC_BIT (1ull << 8) 470 #define VMX_EPTP_WB_BIT (1ull << 14) 471 #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) 472 #define VMX_EPT_1GB_PAGE_BIT (1ull << 17) 473 #define VMX_EPT_INVEPT_BIT (1ull << 20) 474 #define VMX_EPT_AD_BIT (1ull << 21) 475 #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) 476 #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) 477 478 #define VMX_VPID_INVVPID_BIT (1ull << 0) /* (32 - 32) */ 479 #define VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT (1ull << 8) /* (40 - 32) */ 480 #define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */ 481 #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */ 482 #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */ 483 484 #define VMX_EPT_MT_EPTE_SHIFT 3 485 #define VMX_EPTP_PWL_MASK 0x38ull 486 #define VMX_EPTP_PWL_4 0x18ull 487 #define VMX_EPTP_PWL_5 0x20ull 488 #define VMX_EPTP_AD_ENABLE_BIT (1ull << 6) 489 #define VMX_EPTP_MT_MASK 0x7ull 490 #define VMX_EPTP_MT_WB 0x6ull 491 #define VMX_EPTP_MT_UC 0x0ull 492 #define VMX_EPT_READABLE_MASK 0x1ull 493 #define VMX_EPT_WRITABLE_MASK 0x2ull 494 #define VMX_EPT_EXECUTABLE_MASK 0x4ull 495 #define VMX_EPT_IPAT_BIT (1ull << 6) 496 #define VMX_EPT_ACCESS_BIT (1ull << 8) 497 #define VMX_EPT_DIRTY_BIT (1ull << 9) 498 #define VMX_EPT_RWX_MASK (VMX_EPT_READABLE_MASK | \ 499 VMX_EPT_WRITABLE_MASK | \ 500 VMX_EPT_EXECUTABLE_MASK) 501 #define VMX_EPT_MT_MASK (7ull << VMX_EPT_MT_EPTE_SHIFT) 502 503 static inline u8 vmx_eptp_page_walk_level(u64 eptp) 504 { 505 u64 encoded_level = eptp & VMX_EPTP_PWL_MASK; 506 507 if (encoded_level == VMX_EPTP_PWL_5) 508 return 5; 509 510 /* @eptp must be pre-validated by the caller. */ 511 WARN_ON_ONCE(encoded_level != VMX_EPTP_PWL_4); 512 return 4; 513 } 514 515 /* The mask to use to trigger an EPT Misconfiguration in order to track MMIO */ 516 #define VMX_EPT_MISCONFIG_WX_VALUE (VMX_EPT_WRITABLE_MASK | \ 517 VMX_EPT_EXECUTABLE_MASK) 518 519 #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul 520 521 struct vmx_msr_entry { 522 u32 index; 523 u32 reserved; 524 u64 value; 525 } __aligned(16); 526 527 /* 528 * Exit Qualifications for entry failure during or after loading guest state 529 */ 530 #define ENTRY_FAIL_DEFAULT 0 531 #define ENTRY_FAIL_PDPTE 2 532 #define ENTRY_FAIL_NMI 3 533 #define ENTRY_FAIL_VMCS_LINK_PTR 4 534 535 /* 536 * Exit Qualifications for EPT Violations 537 */ 538 #define EPT_VIOLATION_ACC_READ_BIT 0 539 #define EPT_VIOLATION_ACC_WRITE_BIT 1 540 #define EPT_VIOLATION_ACC_INSTR_BIT 2 541 #define EPT_VIOLATION_READABLE_BIT 3 542 #define EPT_VIOLATION_WRITABLE_BIT 4 543 #define EPT_VIOLATION_EXECUTABLE_BIT 5 544 #define EPT_VIOLATION_GVA_TRANSLATED_BIT 8 545 #define EPT_VIOLATION_ACC_READ (1 << EPT_VIOLATION_ACC_READ_BIT) 546 #define EPT_VIOLATION_ACC_WRITE (1 << EPT_VIOLATION_ACC_WRITE_BIT) 547 #define EPT_VIOLATION_ACC_INSTR (1 << EPT_VIOLATION_ACC_INSTR_BIT) 548 #define EPT_VIOLATION_READABLE (1 << EPT_VIOLATION_READABLE_BIT) 549 #define EPT_VIOLATION_WRITABLE (1 << EPT_VIOLATION_WRITABLE_BIT) 550 #define EPT_VIOLATION_EXECUTABLE (1 << EPT_VIOLATION_EXECUTABLE_BIT) 551 #define EPT_VIOLATION_GVA_TRANSLATED (1 << EPT_VIOLATION_GVA_TRANSLATED_BIT) 552 553 /* 554 * VM-instruction error numbers 555 */ 556 enum vm_instruction_error_number { 557 VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1, 558 VMXERR_VMCLEAR_INVALID_ADDRESS = 2, 559 VMXERR_VMCLEAR_VMXON_POINTER = 3, 560 VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4, 561 VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5, 562 VMXERR_VMRESUME_AFTER_VMXOFF = 6, 563 VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7, 564 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8, 565 VMXERR_VMPTRLD_INVALID_ADDRESS = 9, 566 VMXERR_VMPTRLD_VMXON_POINTER = 10, 567 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11, 568 VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12, 569 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13, 570 VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15, 571 VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16, 572 VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17, 573 VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18, 574 VMXERR_VMCALL_NONCLEAR_VMCS = 19, 575 VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20, 576 VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22, 577 VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23, 578 VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24, 579 VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25, 580 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26, 581 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28, 582 }; 583 584 /* 585 * VM-instruction errors that can be encountered on VM-Enter, used to trace 586 * nested VM-Enter failures reported by hardware. Errors unique to VM-Enter 587 * from a SMI Transfer Monitor are not included as things have gone seriously 588 * sideways if we get one of those... 589 */ 590 #define VMX_VMENTER_INSTRUCTION_ERRORS \ 591 { VMXERR_VMLAUNCH_NONCLEAR_VMCS, "VMLAUNCH_NONCLEAR_VMCS" }, \ 592 { VMXERR_VMRESUME_NONLAUNCHED_VMCS, "VMRESUME_NONLAUNCHED_VMCS" }, \ 593 { VMXERR_VMRESUME_AFTER_VMXOFF, "VMRESUME_AFTER_VMXOFF" }, \ 594 { VMXERR_ENTRY_INVALID_CONTROL_FIELD, "VMENTRY_INVALID_CONTROL_FIELD" }, \ 595 { VMXERR_ENTRY_INVALID_HOST_STATE_FIELD, "VMENTRY_INVALID_HOST_STATE_FIELD" }, \ 596 { VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS, "VMENTRY_EVENTS_BLOCKED_BY_MOV_SS" } 597 598 enum vmx_l1d_flush_state { 599 VMENTER_L1D_FLUSH_AUTO, 600 VMENTER_L1D_FLUSH_NEVER, 601 VMENTER_L1D_FLUSH_COND, 602 VMENTER_L1D_FLUSH_ALWAYS, 603 VMENTER_L1D_FLUSH_EPT_DISABLED, 604 VMENTER_L1D_FLUSH_NOT_REQUIRED, 605 }; 606 607 extern enum vmx_l1d_flush_state l1tf_vmx_mitigation; 608 609 #endif 610