1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/objtool.h> 5 #include <linux/percpu.h> 6 7 #include <asm/debugreg.h> 8 #include <asm/mmu_context.h> 9 10 #include "cpuid.h" 11 #include "hyperv.h" 12 #include "mmu.h" 13 #include "nested.h" 14 #include "pmu.h" 15 #include "posted_intr.h" 16 #include "sgx.h" 17 #include "trace.h" 18 #include "vmx.h" 19 #include "x86.h" 20 #include "smm.h" 21 22 static bool __read_mostly enable_shadow_vmcs = 1; 23 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 24 25 static bool __read_mostly nested_early_check = 0; 26 module_param(nested_early_check, bool, S_IRUGO); 27 28 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK 29 30 /* 31 * Hyper-V requires all of these, so mark them as supported even though 32 * they are just treated the same as all-context. 33 */ 34 #define VMX_VPID_EXTENT_SUPPORTED_MASK \ 35 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ 36 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ 37 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ 38 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) 39 40 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 41 42 enum { 43 VMX_VMREAD_BITMAP, 44 VMX_VMWRITE_BITMAP, 45 VMX_BITMAP_NR 46 }; 47 static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; 48 49 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) 50 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) 51 52 struct shadow_vmcs_field { 53 u16 encoding; 54 u16 offset; 55 }; 56 static struct shadow_vmcs_field shadow_read_only_fields[] = { 57 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) }, 58 #include "vmcs_shadow_fields.h" 59 }; 60 static int max_shadow_read_only_fields = 61 ARRAY_SIZE(shadow_read_only_fields); 62 63 static struct shadow_vmcs_field shadow_read_write_fields[] = { 64 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) }, 65 #include "vmcs_shadow_fields.h" 66 }; 67 static int max_shadow_read_write_fields = 68 ARRAY_SIZE(shadow_read_write_fields); 69 70 static void init_vmcs_shadow_fields(void) 71 { 72 int i, j; 73 74 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 75 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 76 77 for (i = j = 0; i < max_shadow_read_only_fields; i++) { 78 struct shadow_vmcs_field entry = shadow_read_only_fields[i]; 79 u16 field = entry.encoding; 80 81 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 82 (i + 1 == max_shadow_read_only_fields || 83 shadow_read_only_fields[i + 1].encoding != field + 1)) 84 pr_err("Missing field from shadow_read_only_field %x\n", 85 field + 1); 86 87 clear_bit(field, vmx_vmread_bitmap); 88 if (field & 1) 89 #ifdef CONFIG_X86_64 90 continue; 91 #else 92 entry.offset += sizeof(u32); 93 #endif 94 shadow_read_only_fields[j++] = entry; 95 } 96 max_shadow_read_only_fields = j; 97 98 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 99 struct shadow_vmcs_field entry = shadow_read_write_fields[i]; 100 u16 field = entry.encoding; 101 102 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 103 (i + 1 == max_shadow_read_write_fields || 104 shadow_read_write_fields[i + 1].encoding != field + 1)) 105 pr_err("Missing field from shadow_read_write_field %x\n", 106 field + 1); 107 108 WARN_ONCE(field >= GUEST_ES_AR_BYTES && 109 field <= GUEST_TR_AR_BYTES, 110 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES"); 111 112 /* 113 * PML and the preemption timer can be emulated, but the 114 * processor cannot vmwrite to fields that don't exist 115 * on bare metal. 116 */ 117 switch (field) { 118 case GUEST_PML_INDEX: 119 if (!cpu_has_vmx_pml()) 120 continue; 121 break; 122 case VMX_PREEMPTION_TIMER_VALUE: 123 if (!cpu_has_vmx_preemption_timer()) 124 continue; 125 break; 126 case GUEST_INTR_STATUS: 127 if (!cpu_has_vmx_apicv()) 128 continue; 129 break; 130 default: 131 break; 132 } 133 134 clear_bit(field, vmx_vmwrite_bitmap); 135 clear_bit(field, vmx_vmread_bitmap); 136 if (field & 1) 137 #ifdef CONFIG_X86_64 138 continue; 139 #else 140 entry.offset += sizeof(u32); 141 #endif 142 shadow_read_write_fields[j++] = entry; 143 } 144 max_shadow_read_write_fields = j; 145 } 146 147 /* 148 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), 149 * set the success or error code of an emulated VMX instruction (as specified 150 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated 151 * instruction. 152 */ 153 static int nested_vmx_succeed(struct kvm_vcpu *vcpu) 154 { 155 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) 156 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 157 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); 158 return kvm_skip_emulated_instruction(vcpu); 159 } 160 161 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) 162 { 163 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 164 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 165 X86_EFLAGS_SF | X86_EFLAGS_OF)) 166 | X86_EFLAGS_CF); 167 return kvm_skip_emulated_instruction(vcpu); 168 } 169 170 static int nested_vmx_failValid(struct kvm_vcpu *vcpu, 171 u32 vm_instruction_error) 172 { 173 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 174 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 175 X86_EFLAGS_SF | X86_EFLAGS_OF)) 176 | X86_EFLAGS_ZF); 177 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 178 /* 179 * We don't need to force sync to shadow VMCS because 180 * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all 181 * fields and thus must be synced. 182 */ 183 if (nested_vmx_is_evmptr12_set(to_vmx(vcpu))) 184 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true; 185 186 return kvm_skip_emulated_instruction(vcpu); 187 } 188 189 static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error) 190 { 191 struct vcpu_vmx *vmx = to_vmx(vcpu); 192 193 /* 194 * failValid writes the error number to the current VMCS, which 195 * can't be done if there isn't a current VMCS. 196 */ 197 if (vmx->nested.current_vmptr == INVALID_GPA && 198 !nested_vmx_is_evmptr12_valid(vmx)) 199 return nested_vmx_failInvalid(vcpu); 200 201 return nested_vmx_failValid(vcpu, vm_instruction_error); 202 } 203 204 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) 205 { 206 /* TODO: not to reset guest simply here. */ 207 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 208 pr_debug_ratelimited("nested vmx abort, indicator %d\n", indicator); 209 } 210 211 static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 212 { 213 return fixed_bits_valid(control, low, high); 214 } 215 216 static inline u64 vmx_control_msr(u32 low, u32 high) 217 { 218 return low | ((u64)high << 32); 219 } 220 221 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) 222 { 223 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 224 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); 225 vmx->nested.need_vmcs12_to_shadow_sync = false; 226 } 227 228 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) 229 { 230 #ifdef CONFIG_KVM_HYPERV 231 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 232 struct vcpu_vmx *vmx = to_vmx(vcpu); 233 234 if (nested_vmx_is_evmptr12_valid(vmx)) { 235 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); 236 vmx->nested.hv_evmcs = NULL; 237 } 238 239 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; 240 241 if (hv_vcpu) { 242 hv_vcpu->nested.pa_page_gpa = INVALID_GPA; 243 hv_vcpu->nested.vm_id = 0; 244 hv_vcpu->nested.vp_id = 0; 245 } 246 #endif 247 } 248 249 static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr) 250 { 251 #ifdef CONFIG_KVM_HYPERV 252 struct vcpu_vmx *vmx = to_vmx(vcpu); 253 /* 254 * When Enlightened VMEntry is enabled on the calling CPU we treat 255 * memory area pointer by vmptr as Enlightened VMCS (as there's no good 256 * way to distinguish it from VMCS12) and we must not corrupt it by 257 * writing to the non-existent 'launch_state' field. The area doesn't 258 * have to be the currently active EVMCS on the calling CPU and there's 259 * nothing KVM has to do to transition it from 'active' to 'non-active' 260 * state. It is possible that the area will stay mapped as 261 * vmx->nested.hv_evmcs but this shouldn't be a problem. 262 */ 263 if (!guest_cpuid_has_evmcs(vcpu) || 264 !evmptr_is_valid(nested_get_evmptr(vcpu))) 265 return false; 266 267 if (nested_vmx_evmcs(vmx) && vmptr == vmx->nested.hv_evmcs_vmptr) 268 nested_release_evmcs(vcpu); 269 270 return true; 271 #else 272 return false; 273 #endif 274 } 275 276 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, 277 struct loaded_vmcs *prev) 278 { 279 struct vmcs_host_state *dest, *src; 280 281 if (unlikely(!vmx->guest_state_loaded)) 282 return; 283 284 src = &prev->host_state; 285 dest = &vmx->loaded_vmcs->host_state; 286 287 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); 288 dest->ldt_sel = src->ldt_sel; 289 #ifdef CONFIG_X86_64 290 dest->ds_sel = src->ds_sel; 291 dest->es_sel = src->es_sel; 292 #endif 293 } 294 295 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) 296 { 297 struct vcpu_vmx *vmx = to_vmx(vcpu); 298 struct loaded_vmcs *prev; 299 int cpu; 300 301 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) 302 return; 303 304 cpu = get_cpu(); 305 prev = vmx->loaded_vmcs; 306 vmx->loaded_vmcs = vmcs; 307 vmx_vcpu_load_vmcs(vcpu, cpu, prev); 308 vmx_sync_vmcs_host_state(vmx, prev); 309 put_cpu(); 310 311 vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET; 312 313 /* 314 * All lazily updated registers will be reloaded from VMCS12 on both 315 * vmentry and vmexit. 316 */ 317 vcpu->arch.regs_dirty = 0; 318 } 319 320 /* 321 * Free whatever needs to be freed from vmx->nested when L1 goes down, or 322 * just stops using VMX. 323 */ 324 static void free_nested(struct kvm_vcpu *vcpu) 325 { 326 struct vcpu_vmx *vmx = to_vmx(vcpu); 327 328 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) 329 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 330 331 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 332 return; 333 334 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 335 336 vmx->nested.vmxon = false; 337 vmx->nested.smm.vmxon = false; 338 vmx->nested.vmxon_ptr = INVALID_GPA; 339 free_vpid(vmx->nested.vpid02); 340 vmx->nested.posted_intr_nv = -1; 341 vmx->nested.current_vmptr = INVALID_GPA; 342 if (enable_shadow_vmcs) { 343 vmx_disable_shadow_vmcs(vmx); 344 vmcs_clear(vmx->vmcs01.shadow_vmcs); 345 free_vmcs(vmx->vmcs01.shadow_vmcs); 346 vmx->vmcs01.shadow_vmcs = NULL; 347 } 348 kfree(vmx->nested.cached_vmcs12); 349 vmx->nested.cached_vmcs12 = NULL; 350 kfree(vmx->nested.cached_shadow_vmcs12); 351 vmx->nested.cached_shadow_vmcs12 = NULL; 352 /* 353 * Unpin physical memory we referred to in the vmcs02. The APIC access 354 * page's backing page (yeah, confusing) shouldn't actually be accessed, 355 * and if it is written, the contents are irrelevant. 356 */ 357 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false); 358 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 359 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 360 vmx->nested.pi_desc = NULL; 361 362 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 363 364 nested_release_evmcs(vcpu); 365 366 free_loaded_vmcs(&vmx->nested.vmcs02); 367 } 368 369 /* 370 * Ensure that the current vmcs of the logical processor is the 371 * vmcs01 of the vcpu before calling free_nested(). 372 */ 373 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) 374 { 375 vcpu_load(vcpu); 376 vmx_leave_nested(vcpu); 377 vcpu_put(vcpu); 378 } 379 380 #define EPTP_PA_MASK GENMASK_ULL(51, 12) 381 382 static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) 383 { 384 return VALID_PAGE(root_hpa) && 385 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK)); 386 } 387 388 static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp, 389 gpa_t addr) 390 { 391 unsigned long roots = 0; 392 uint i; 393 struct kvm_mmu_root_info *cached_root; 394 395 WARN_ON_ONCE(!mmu_is_nested(vcpu)); 396 397 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 398 cached_root = &vcpu->arch.mmu->prev_roots[i]; 399 400 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd, 401 eptp)) 402 roots |= KVM_MMU_ROOT_PREVIOUS(i); 403 } 404 if (roots) 405 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.mmu, addr, roots); 406 } 407 408 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 409 struct x86_exception *fault) 410 { 411 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 412 struct vcpu_vmx *vmx = to_vmx(vcpu); 413 unsigned long exit_qualification; 414 u32 vm_exit_reason; 415 416 if (vmx->nested.pml_full) { 417 vm_exit_reason = EXIT_REASON_PML_FULL; 418 vmx->nested.pml_full = false; 419 420 /* 421 * It should be impossible to trigger a nested PML Full VM-Exit 422 * for anything other than an EPT Violation from L2. KVM *can* 423 * trigger nEPT page fault injection in response to an EPT 424 * Misconfig, e.g. if the MMIO SPTE was stale and L1's EPT 425 * tables also changed, but KVM should not treat EPT Misconfig 426 * VM-Exits as writes. 427 */ 428 WARN_ON_ONCE(vmx->exit_reason.basic != EXIT_REASON_EPT_VIOLATION); 429 430 /* 431 * PML Full and EPT Violation VM-Exits both use bit 12 to report 432 * "NMI unblocking due to IRET", i.e. the bit can be propagated 433 * as-is from the original EXIT_QUALIFICATION. 434 */ 435 exit_qualification = vmx_get_exit_qual(vcpu) & INTR_INFO_UNBLOCK_NMI; 436 } else { 437 if (fault->error_code & PFERR_RSVD_MASK) { 438 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; 439 exit_qualification = 0; 440 } else { 441 exit_qualification = fault->exit_qualification; 442 exit_qualification |= vmx_get_exit_qual(vcpu) & 443 (EPT_VIOLATION_GVA_IS_VALID | 444 EPT_VIOLATION_GVA_TRANSLATED); 445 vm_exit_reason = EXIT_REASON_EPT_VIOLATION; 446 } 447 448 /* 449 * Although the caller (kvm_inject_emulated_page_fault) would 450 * have already synced the faulting address in the shadow EPT 451 * tables for the current EPTP12, we also need to sync it for 452 * any other cached EPTP02s based on the same EP4TA, since the 453 * TLB associates mappings to the EP4TA rather than the full EPTP. 454 */ 455 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer, 456 fault->address); 457 } 458 459 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); 460 vmcs12->guest_physical_address = fault->address; 461 } 462 463 static void nested_ept_new_eptp(struct kvm_vcpu *vcpu) 464 { 465 struct vcpu_vmx *vmx = to_vmx(vcpu); 466 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT; 467 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps); 468 469 kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level, 470 nested_ept_ad_enabled(vcpu), 471 nested_ept_get_eptp(vcpu)); 472 } 473 474 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) 475 { 476 WARN_ON(mmu_is_nested(vcpu)); 477 478 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 479 nested_ept_new_eptp(vcpu); 480 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp; 481 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; 482 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; 483 484 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 485 } 486 487 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) 488 { 489 vcpu->arch.mmu = &vcpu->arch.root_mmu; 490 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 491 } 492 493 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 494 u16 error_code) 495 { 496 bool inequality, bit; 497 498 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; 499 inequality = 500 (error_code & vmcs12->page_fault_error_code_mask) != 501 vmcs12->page_fault_error_code_match; 502 return inequality ^ bit; 503 } 504 505 static bool nested_vmx_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector, 506 u32 error_code) 507 { 508 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 509 510 /* 511 * Drop bits 31:16 of the error code when performing the #PF mask+match 512 * check. All VMCS fields involved are 32 bits, but Intel CPUs never 513 * set bits 31:16 and VMX disallows setting bits 31:16 in the injected 514 * error code. Including the to-be-dropped bits in the check might 515 * result in an "impossible" or missed exit from L1's perspective. 516 */ 517 if (vector == PF_VECTOR) 518 return nested_vmx_is_page_fault_vmexit(vmcs12, (u16)error_code); 519 520 return (vmcs12->exception_bitmap & (1u << vector)); 521 } 522 523 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, 524 struct vmcs12 *vmcs12) 525 { 526 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 527 return 0; 528 529 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) || 530 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b))) 531 return -EINVAL; 532 533 return 0; 534 } 535 536 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, 537 struct vmcs12 *vmcs12) 538 { 539 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 540 return 0; 541 542 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap))) 543 return -EINVAL; 544 545 return 0; 546 } 547 548 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, 549 struct vmcs12 *vmcs12) 550 { 551 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 552 return 0; 553 554 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) 555 return -EINVAL; 556 557 return 0; 558 } 559 560 /* 561 * For x2APIC MSRs, ignore the vmcs01 bitmap. L1 can enable x2APIC without L1 562 * itself utilizing x2APIC. All MSRs were previously set to be intercepted, 563 * only the "disable intercept" case needs to be handled. 564 */ 565 static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1, 566 unsigned long *msr_bitmap_l0, 567 u32 msr, int type) 568 { 569 if (type & MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr)) 570 vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr); 571 572 if (type & MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr)) 573 vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr); 574 } 575 576 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) 577 { 578 int msr; 579 580 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 581 unsigned word = msr / BITS_PER_LONG; 582 583 msr_bitmap[word] = ~0; 584 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; 585 } 586 } 587 588 #define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) \ 589 static inline \ 590 void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \ 591 unsigned long *msr_bitmap_l1, \ 592 unsigned long *msr_bitmap_l0, u32 msr) \ 593 { \ 594 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \ 595 vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \ 596 vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \ 597 else \ 598 vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \ 599 } 600 BUILD_NVMX_MSR_INTERCEPT_HELPER(read) 601 BUILD_NVMX_MSR_INTERCEPT_HELPER(write) 602 603 static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx, 604 unsigned long *msr_bitmap_l1, 605 unsigned long *msr_bitmap_l0, 606 u32 msr, int types) 607 { 608 if (types & MSR_TYPE_R) 609 nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1, 610 msr_bitmap_l0, msr); 611 if (types & MSR_TYPE_W) 612 nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1, 613 msr_bitmap_l0, msr); 614 } 615 616 /* 617 * Merge L0's and L1's MSR bitmap, return false to indicate that 618 * we do not use the hardware. 619 */ 620 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 621 struct vmcs12 *vmcs12) 622 { 623 struct vcpu_vmx *vmx = to_vmx(vcpu); 624 int msr; 625 unsigned long *msr_bitmap_l1; 626 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap; 627 struct kvm_host_map *map = &vmx->nested.msr_bitmap_map; 628 629 /* Nothing to do if the MSR bitmap is not in use. */ 630 if (!cpu_has_vmx_msr_bitmap() || 631 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 632 return false; 633 634 /* 635 * MSR bitmap update can be skipped when: 636 * - MSR bitmap for L1 hasn't changed. 637 * - Nested hypervisor (L1) is attempting to launch the same L2 as 638 * before. 639 * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature 640 * and tells KVM (L0) there were no changes in MSR bitmap for L2. 641 */ 642 if (!vmx->nested.force_msr_bitmap_recalc) { 643 struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx); 644 645 if (evmcs && evmcs->hv_enlightenments_control.msr_bitmap && 646 evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP) 647 return true; 648 } 649 650 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map)) 651 return false; 652 653 msr_bitmap_l1 = (unsigned long *)map->hva; 654 655 /* 656 * To keep the control flow simple, pay eight 8-byte writes (sixteen 657 * 4-byte writes on 32-bit systems) up front to enable intercepts for 658 * the x2APIC MSR range and selectively toggle those relevant to L2. 659 */ 660 enable_x2apic_msr_intercepts(msr_bitmap_l0); 661 662 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 663 if (nested_cpu_has_apic_reg_virt(vmcs12)) { 664 /* 665 * L0 need not intercept reads for MSRs between 0x800 666 * and 0x8ff, it just lets the processor take the value 667 * from the virtual-APIC page; take those 256 bits 668 * directly from the L1 bitmap. 669 */ 670 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 671 unsigned word = msr / BITS_PER_LONG; 672 673 msr_bitmap_l0[word] = msr_bitmap_l1[word]; 674 } 675 } 676 677 nested_vmx_disable_intercept_for_x2apic_msr( 678 msr_bitmap_l1, msr_bitmap_l0, 679 X2APIC_MSR(APIC_TASKPRI), 680 MSR_TYPE_R | MSR_TYPE_W); 681 682 if (nested_cpu_has_vid(vmcs12)) { 683 nested_vmx_disable_intercept_for_x2apic_msr( 684 msr_bitmap_l1, msr_bitmap_l0, 685 X2APIC_MSR(APIC_EOI), 686 MSR_TYPE_W); 687 nested_vmx_disable_intercept_for_x2apic_msr( 688 msr_bitmap_l1, msr_bitmap_l0, 689 X2APIC_MSR(APIC_SELF_IPI), 690 MSR_TYPE_W); 691 } 692 } 693 694 /* 695 * Always check vmcs01's bitmap to honor userspace MSR filters and any 696 * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through. 697 */ 698 #ifdef CONFIG_X86_64 699 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 700 MSR_FS_BASE, MSR_TYPE_RW); 701 702 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 703 MSR_GS_BASE, MSR_TYPE_RW); 704 705 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 706 MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 707 #endif 708 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 709 MSR_IA32_SPEC_CTRL, MSR_TYPE_RW); 710 711 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 712 MSR_IA32_PRED_CMD, MSR_TYPE_W); 713 714 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 715 MSR_IA32_FLUSH_CMD, MSR_TYPE_W); 716 717 kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false); 718 719 vmx->nested.force_msr_bitmap_recalc = false; 720 721 return true; 722 } 723 724 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, 725 struct vmcs12 *vmcs12) 726 { 727 struct vcpu_vmx *vmx = to_vmx(vcpu); 728 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 729 730 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 731 vmcs12->vmcs_link_pointer == INVALID_GPA) 732 return; 733 734 if (ghc->gpa != vmcs12->vmcs_link_pointer && 735 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 736 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) 737 return; 738 739 kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), 740 VMCS12_SIZE); 741 } 742 743 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, 744 struct vmcs12 *vmcs12) 745 { 746 struct vcpu_vmx *vmx = to_vmx(vcpu); 747 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 748 749 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 750 vmcs12->vmcs_link_pointer == INVALID_GPA) 751 return; 752 753 if (ghc->gpa != vmcs12->vmcs_link_pointer && 754 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 755 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) 756 return; 757 758 kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), 759 VMCS12_SIZE); 760 } 761 762 /* 763 * In nested virtualization, check if L1 has set 764 * VM_EXIT_ACK_INTR_ON_EXIT 765 */ 766 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) 767 { 768 return get_vmcs12(vcpu)->vm_exit_controls & 769 VM_EXIT_ACK_INTR_ON_EXIT; 770 } 771 772 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, 773 struct vmcs12 *vmcs12) 774 { 775 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 776 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr))) 777 return -EINVAL; 778 else 779 return 0; 780 } 781 782 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 783 struct vmcs12 *vmcs12) 784 { 785 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 786 !nested_cpu_has_apic_reg_virt(vmcs12) && 787 !nested_cpu_has_vid(vmcs12) && 788 !nested_cpu_has_posted_intr(vmcs12)) 789 return 0; 790 791 /* 792 * If virtualize x2apic mode is enabled, 793 * virtualize apic access must be disabled. 794 */ 795 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) && 796 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))) 797 return -EINVAL; 798 799 /* 800 * If virtual interrupt delivery is enabled, 801 * we must exit on external interrupts. 802 */ 803 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu))) 804 return -EINVAL; 805 806 /* 807 * bits 15:8 should be zero in posted_intr_nv, 808 * the descriptor address has been already checked 809 * in nested_get_vmcs12_pages. 810 * 811 * bits 5:0 of posted_intr_desc_addr should be zero. 812 */ 813 if (nested_cpu_has_posted_intr(vmcs12) && 814 (CC(!nested_cpu_has_vid(vmcs12)) || 815 CC(!nested_exit_intr_ack_set(vcpu)) || 816 CC((vmcs12->posted_intr_nv & 0xff00)) || 817 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64)))) 818 return -EINVAL; 819 820 /* tpr shadow is needed by all apicv features. */ 821 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))) 822 return -EINVAL; 823 824 return 0; 825 } 826 827 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 828 u32 count, u64 addr) 829 { 830 if (count == 0) 831 return 0; 832 833 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) || 834 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) 835 return -EINVAL; 836 837 return 0; 838 } 839 840 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, 841 struct vmcs12 *vmcs12) 842 { 843 if (CC(nested_vmx_check_msr_switch(vcpu, 844 vmcs12->vm_exit_msr_load_count, 845 vmcs12->vm_exit_msr_load_addr)) || 846 CC(nested_vmx_check_msr_switch(vcpu, 847 vmcs12->vm_exit_msr_store_count, 848 vmcs12->vm_exit_msr_store_addr))) 849 return -EINVAL; 850 851 return 0; 852 } 853 854 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, 855 struct vmcs12 *vmcs12) 856 { 857 if (CC(nested_vmx_check_msr_switch(vcpu, 858 vmcs12->vm_entry_msr_load_count, 859 vmcs12->vm_entry_msr_load_addr))) 860 return -EINVAL; 861 862 return 0; 863 } 864 865 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, 866 struct vmcs12 *vmcs12) 867 { 868 if (!nested_cpu_has_pml(vmcs12)) 869 return 0; 870 871 if (CC(!nested_cpu_has_ept(vmcs12)) || 872 CC(!page_address_valid(vcpu, vmcs12->pml_address))) 873 return -EINVAL; 874 875 return 0; 876 } 877 878 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, 879 struct vmcs12 *vmcs12) 880 { 881 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && 882 !nested_cpu_has_ept(vmcs12))) 883 return -EINVAL; 884 return 0; 885 } 886 887 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, 888 struct vmcs12 *vmcs12) 889 { 890 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && 891 !nested_cpu_has_ept(vmcs12))) 892 return -EINVAL; 893 return 0; 894 } 895 896 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, 897 struct vmcs12 *vmcs12) 898 { 899 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 900 return 0; 901 902 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) || 903 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))) 904 return -EINVAL; 905 906 return 0; 907 } 908 909 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, 910 struct vmx_msr_entry *e) 911 { 912 /* x2APIC MSR accesses are not allowed */ 913 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) 914 return -EINVAL; 915 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */ 916 CC(e->index == MSR_IA32_UCODE_REV)) 917 return -EINVAL; 918 if (CC(e->reserved != 0)) 919 return -EINVAL; 920 return 0; 921 } 922 923 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, 924 struct vmx_msr_entry *e) 925 { 926 if (CC(e->index == MSR_FS_BASE) || 927 CC(e->index == MSR_GS_BASE) || 928 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */ 929 nested_vmx_msr_check_common(vcpu, e)) 930 return -EINVAL; 931 return 0; 932 } 933 934 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, 935 struct vmx_msr_entry *e) 936 { 937 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */ 938 nested_vmx_msr_check_common(vcpu, e)) 939 return -EINVAL; 940 return 0; 941 } 942 943 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu) 944 { 945 struct vcpu_vmx *vmx = to_vmx(vcpu); 946 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 947 vmx->nested.msrs.misc_high); 948 949 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER; 950 } 951 952 /* 953 * Load guest's/host's msr at nested entry/exit. 954 * return 0 for success, entry index for failure. 955 * 956 * One of the failure modes for MSR load/store is when a list exceeds the 957 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch 958 * as possible, process all valid entries before failing rather than precheck 959 * for a capacity violation. 960 */ 961 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 962 { 963 u32 i; 964 struct vmx_msr_entry e; 965 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 966 967 for (i = 0; i < count; i++) { 968 if (unlikely(i >= max_msr_list_size)) 969 goto fail; 970 971 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 972 &e, sizeof(e))) { 973 pr_debug_ratelimited( 974 "%s cannot read MSR entry (%u, 0x%08llx)\n", 975 __func__, i, gpa + i * sizeof(e)); 976 goto fail; 977 } 978 if (nested_vmx_load_msr_check(vcpu, &e)) { 979 pr_debug_ratelimited( 980 "%s check failed (%u, 0x%x, 0x%x)\n", 981 __func__, i, e.index, e.reserved); 982 goto fail; 983 } 984 if (kvm_set_msr_with_filter(vcpu, e.index, e.value)) { 985 pr_debug_ratelimited( 986 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 987 __func__, i, e.index, e.value); 988 goto fail; 989 } 990 } 991 return 0; 992 fail: 993 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */ 994 return i + 1; 995 } 996 997 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, 998 u32 msr_index, 999 u64 *data) 1000 { 1001 struct vcpu_vmx *vmx = to_vmx(vcpu); 1002 1003 /* 1004 * If the L0 hypervisor stored a more accurate value for the TSC that 1005 * does not include the time taken for emulation of the L2->L1 1006 * VM-exit in L0, use the more accurate value. 1007 */ 1008 if (msr_index == MSR_IA32_TSC) { 1009 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, 1010 MSR_IA32_TSC); 1011 1012 if (i >= 0) { 1013 u64 val = vmx->msr_autostore.guest.val[i].value; 1014 1015 *data = kvm_read_l1_tsc(vcpu, val); 1016 return true; 1017 } 1018 } 1019 1020 if (kvm_get_msr_with_filter(vcpu, msr_index, data)) { 1021 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, 1022 msr_index); 1023 return false; 1024 } 1025 return true; 1026 } 1027 1028 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, 1029 struct vmx_msr_entry *e) 1030 { 1031 if (kvm_vcpu_read_guest(vcpu, 1032 gpa + i * sizeof(*e), 1033 e, 2 * sizeof(u32))) { 1034 pr_debug_ratelimited( 1035 "%s cannot read MSR entry (%u, 0x%08llx)\n", 1036 __func__, i, gpa + i * sizeof(*e)); 1037 return false; 1038 } 1039 if (nested_vmx_store_msr_check(vcpu, e)) { 1040 pr_debug_ratelimited( 1041 "%s check failed (%u, 0x%x, 0x%x)\n", 1042 __func__, i, e->index, e->reserved); 1043 return false; 1044 } 1045 return true; 1046 } 1047 1048 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 1049 { 1050 u64 data; 1051 u32 i; 1052 struct vmx_msr_entry e; 1053 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 1054 1055 for (i = 0; i < count; i++) { 1056 if (unlikely(i >= max_msr_list_size)) 1057 return -EINVAL; 1058 1059 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1060 return -EINVAL; 1061 1062 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data)) 1063 return -EINVAL; 1064 1065 if (kvm_vcpu_write_guest(vcpu, 1066 gpa + i * sizeof(e) + 1067 offsetof(struct vmx_msr_entry, value), 1068 &data, sizeof(data))) { 1069 pr_debug_ratelimited( 1070 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 1071 __func__, i, e.index, data); 1072 return -EINVAL; 1073 } 1074 } 1075 return 0; 1076 } 1077 1078 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index) 1079 { 1080 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1081 u32 count = vmcs12->vm_exit_msr_store_count; 1082 u64 gpa = vmcs12->vm_exit_msr_store_addr; 1083 struct vmx_msr_entry e; 1084 u32 i; 1085 1086 for (i = 0; i < count; i++) { 1087 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1088 return false; 1089 1090 if (e.index == msr_index) 1091 return true; 1092 } 1093 return false; 1094 } 1095 1096 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, 1097 u32 msr_index) 1098 { 1099 struct vcpu_vmx *vmx = to_vmx(vcpu); 1100 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; 1101 bool in_vmcs12_store_list; 1102 int msr_autostore_slot; 1103 bool in_autostore_list; 1104 int last; 1105 1106 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index); 1107 in_autostore_list = msr_autostore_slot >= 0; 1108 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); 1109 1110 if (in_vmcs12_store_list && !in_autostore_list) { 1111 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) { 1112 /* 1113 * Emulated VMEntry does not fail here. Instead a less 1114 * accurate value will be returned by 1115 * nested_vmx_get_vmexit_msr_value() by reading KVM's 1116 * internal MSR state instead of reading the value from 1117 * the vmcs02 VMExit MSR-store area. 1118 */ 1119 pr_warn_ratelimited( 1120 "Not enough msr entries in msr_autostore. Can't add msr %x\n", 1121 msr_index); 1122 return; 1123 } 1124 last = autostore->nr++; 1125 autostore->val[last].index = msr_index; 1126 } else if (!in_vmcs12_store_list && in_autostore_list) { 1127 last = --autostore->nr; 1128 autostore->val[msr_autostore_slot] = autostore->val[last]; 1129 } 1130 } 1131 1132 /* 1133 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are 1134 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected 1135 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to 1136 * @entry_failure_code. 1137 */ 1138 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, 1139 bool nested_ept, bool reload_pdptrs, 1140 enum vm_entry_failure_code *entry_failure_code) 1141 { 1142 if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) { 1143 *entry_failure_code = ENTRY_FAIL_DEFAULT; 1144 return -EINVAL; 1145 } 1146 1147 /* 1148 * If PAE paging and EPT are both on, CR3 is not used by the CPU and 1149 * must not be dereferenced. 1150 */ 1151 if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) && 1152 CC(!load_pdptrs(vcpu, cr3))) { 1153 *entry_failure_code = ENTRY_FAIL_PDPTE; 1154 return -EINVAL; 1155 } 1156 1157 vcpu->arch.cr3 = cr3; 1158 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 1159 1160 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ 1161 kvm_init_mmu(vcpu); 1162 1163 if (!nested_ept) 1164 kvm_mmu_new_pgd(vcpu, cr3); 1165 1166 return 0; 1167 } 1168 1169 /* 1170 * Returns if KVM is able to config CPU to tag TLB entries 1171 * populated by L2 differently than TLB entries populated 1172 * by L1. 1173 * 1174 * If L0 uses EPT, L1 and L2 run with different EPTP because 1175 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries 1176 * are tagged with different EPTP. 1177 * 1178 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged 1179 * with different VPID (L1 entries are tagged with vmx->vpid 1180 * while L2 entries are tagged with vmx->nested.vpid02). 1181 */ 1182 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) 1183 { 1184 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1185 1186 return enable_ept || 1187 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); 1188 } 1189 1190 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, 1191 struct vmcs12 *vmcs12, 1192 bool is_vmenter) 1193 { 1194 struct vcpu_vmx *vmx = to_vmx(vcpu); 1195 1196 /* Handle pending Hyper-V TLB flush requests */ 1197 kvm_hv_nested_transtion_tlb_flush(vcpu, enable_ept); 1198 1199 /* 1200 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings 1201 * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a 1202 * full TLB flush from the guest's perspective. This is required even 1203 * if VPID is disabled in the host as KVM may need to synchronize the 1204 * MMU in response to the guest TLB flush. 1205 * 1206 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use. 1207 * EPT is a special snowflake, as guest-physical mappings aren't 1208 * flushed on VPID invalidations, including VM-Enter or VM-Exit with 1209 * VPID disabled. As a result, KVM _never_ needs to sync nEPT 1210 * entries on VM-Enter because L1 can't rely on VM-Enter to flush 1211 * those mappings. 1212 */ 1213 if (!nested_cpu_has_vpid(vmcs12)) { 1214 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1215 return; 1216 } 1217 1218 /* L2 should never have a VPID if VPID is disabled. */ 1219 WARN_ON(!enable_vpid); 1220 1221 /* 1222 * VPID is enabled and in use by vmcs12. If vpid12 is changing, then 1223 * emulate a guest TLB flush as KVM does not track vpid12 history nor 1224 * is the VPID incorporated into the MMU context. I.e. KVM must assume 1225 * that the new vpid12 has never been used and thus represents a new 1226 * guest ASID that cannot have entries in the TLB. 1227 */ 1228 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) { 1229 vmx->nested.last_vpid = vmcs12->virtual_processor_id; 1230 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1231 return; 1232 } 1233 1234 /* 1235 * If VPID is enabled, used by vmc12, and vpid12 is not changing but 1236 * does not have a unique TLB tag (ASID), i.e. EPT is disabled and 1237 * KVM was unable to allocate a VPID for L2, flush the current context 1238 * as the effective ASID is common to both L1 and L2. 1239 */ 1240 if (!nested_has_guest_tlb_tag(vcpu)) 1241 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1242 } 1243 1244 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) 1245 { 1246 superset &= mask; 1247 subset &= mask; 1248 1249 return (superset | subset) == superset; 1250 } 1251 1252 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) 1253 { 1254 const u64 feature_bits = VMX_BASIC_DUAL_MONITOR_TREATMENT | 1255 VMX_BASIC_INOUT | 1256 VMX_BASIC_TRUE_CTLS; 1257 1258 const u64 reserved_bits = GENMASK_ULL(63, 56) | 1259 GENMASK_ULL(47, 45) | 1260 BIT_ULL(31); 1261 1262 u64 vmx_basic = vmcs_config.nested.basic; 1263 1264 BUILD_BUG_ON(feature_bits & reserved_bits); 1265 1266 /* 1267 * Except for 32BIT_PHYS_ADDR_ONLY, which is an anti-feature bit (has 1268 * inverted polarity), the incoming value must not set feature bits or 1269 * reserved bits that aren't allowed/supported by KVM. Fields, i.e. 1270 * multi-bit values, are explicitly checked below. 1271 */ 1272 if (!is_bitwise_subset(vmx_basic, data, feature_bits | reserved_bits)) 1273 return -EINVAL; 1274 1275 /* 1276 * KVM does not emulate a version of VMX that constrains physical 1277 * addresses of VMX structures (e.g. VMCS) to 32-bits. 1278 */ 1279 if (data & VMX_BASIC_32BIT_PHYS_ADDR_ONLY) 1280 return -EINVAL; 1281 1282 if (vmx_basic_vmcs_revision_id(vmx_basic) != 1283 vmx_basic_vmcs_revision_id(data)) 1284 return -EINVAL; 1285 1286 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) 1287 return -EINVAL; 1288 1289 vmx->nested.msrs.basic = data; 1290 return 0; 1291 } 1292 1293 static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index, 1294 u32 **low, u32 **high) 1295 { 1296 switch (msr_index) { 1297 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1298 *low = &msrs->pinbased_ctls_low; 1299 *high = &msrs->pinbased_ctls_high; 1300 break; 1301 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1302 *low = &msrs->procbased_ctls_low; 1303 *high = &msrs->procbased_ctls_high; 1304 break; 1305 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1306 *low = &msrs->exit_ctls_low; 1307 *high = &msrs->exit_ctls_high; 1308 break; 1309 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1310 *low = &msrs->entry_ctls_low; 1311 *high = &msrs->entry_ctls_high; 1312 break; 1313 case MSR_IA32_VMX_PROCBASED_CTLS2: 1314 *low = &msrs->secondary_ctls_low; 1315 *high = &msrs->secondary_ctls_high; 1316 break; 1317 default: 1318 BUG(); 1319 } 1320 } 1321 1322 static int 1323 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1324 { 1325 u32 *lowp, *highp; 1326 u64 supported; 1327 1328 vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp); 1329 1330 supported = vmx_control_msr(*lowp, *highp); 1331 1332 /* Check must-be-1 bits are still 1. */ 1333 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) 1334 return -EINVAL; 1335 1336 /* Check must-be-0 bits are still 0. */ 1337 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) 1338 return -EINVAL; 1339 1340 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); 1341 *lowp = data; 1342 *highp = data >> 32; 1343 return 0; 1344 } 1345 1346 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) 1347 { 1348 const u64 feature_bits = VMX_MISC_SAVE_EFER_LMA | 1349 VMX_MISC_ACTIVITY_HLT | 1350 VMX_MISC_ACTIVITY_SHUTDOWN | 1351 VMX_MISC_ACTIVITY_WAIT_SIPI | 1352 VMX_MISC_INTEL_PT | 1353 VMX_MISC_RDMSR_IN_SMM | 1354 VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 1355 VMX_MISC_VMXOFF_BLOCK_SMI | 1356 VMX_MISC_ZERO_LEN_INS; 1357 1358 const u64 reserved_bits = BIT_ULL(31) | GENMASK_ULL(13, 9); 1359 1360 u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low, 1361 vmcs_config.nested.misc_high); 1362 1363 BUILD_BUG_ON(feature_bits & reserved_bits); 1364 1365 /* 1366 * The incoming value must not set feature bits or reserved bits that 1367 * aren't allowed/supported by KVM. Fields, i.e. multi-bit values, are 1368 * explicitly checked below. 1369 */ 1370 if (!is_bitwise_subset(vmx_misc, data, feature_bits | reserved_bits)) 1371 return -EINVAL; 1372 1373 if ((vmx->nested.msrs.pinbased_ctls_high & 1374 PIN_BASED_VMX_PREEMPTION_TIMER) && 1375 vmx_misc_preemption_timer_rate(data) != 1376 vmx_misc_preemption_timer_rate(vmx_misc)) 1377 return -EINVAL; 1378 1379 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) 1380 return -EINVAL; 1381 1382 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) 1383 return -EINVAL; 1384 1385 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) 1386 return -EINVAL; 1387 1388 vmx->nested.msrs.misc_low = data; 1389 vmx->nested.msrs.misc_high = data >> 32; 1390 1391 return 0; 1392 } 1393 1394 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) 1395 { 1396 u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps, 1397 vmcs_config.nested.vpid_caps); 1398 1399 /* Every bit is either reserved or a feature bit. */ 1400 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) 1401 return -EINVAL; 1402 1403 vmx->nested.msrs.ept_caps = data; 1404 vmx->nested.msrs.vpid_caps = data >> 32; 1405 return 0; 1406 } 1407 1408 static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index) 1409 { 1410 switch (msr_index) { 1411 case MSR_IA32_VMX_CR0_FIXED0: 1412 return &msrs->cr0_fixed0; 1413 case MSR_IA32_VMX_CR4_FIXED0: 1414 return &msrs->cr4_fixed0; 1415 default: 1416 BUG(); 1417 } 1418 } 1419 1420 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1421 { 1422 const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index); 1423 1424 /* 1425 * 1 bits (which indicates bits which "must-be-1" during VMX operation) 1426 * must be 1 in the restored value. 1427 */ 1428 if (!is_bitwise_subset(data, *msr, -1ULL)) 1429 return -EINVAL; 1430 1431 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; 1432 return 0; 1433 } 1434 1435 /* 1436 * Called when userspace is restoring VMX MSRs. 1437 * 1438 * Returns 0 on success, non-0 otherwise. 1439 */ 1440 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 1441 { 1442 struct vcpu_vmx *vmx = to_vmx(vcpu); 1443 1444 /* 1445 * Don't allow changes to the VMX capability MSRs while the vCPU 1446 * is in VMX operation. 1447 */ 1448 if (vmx->nested.vmxon) 1449 return -EBUSY; 1450 1451 switch (msr_index) { 1452 case MSR_IA32_VMX_BASIC: 1453 return vmx_restore_vmx_basic(vmx, data); 1454 case MSR_IA32_VMX_PINBASED_CTLS: 1455 case MSR_IA32_VMX_PROCBASED_CTLS: 1456 case MSR_IA32_VMX_EXIT_CTLS: 1457 case MSR_IA32_VMX_ENTRY_CTLS: 1458 /* 1459 * The "non-true" VMX capability MSRs are generated from the 1460 * "true" MSRs, so we do not support restoring them directly. 1461 * 1462 * If userspace wants to emulate VMX_BASIC[55]=0, userspace 1463 * should restore the "true" MSRs with the must-be-1 bits 1464 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND 1465 * DEFAULT SETTINGS". 1466 */ 1467 return -EINVAL; 1468 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1469 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1470 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1471 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1472 case MSR_IA32_VMX_PROCBASED_CTLS2: 1473 return vmx_restore_control_msr(vmx, msr_index, data); 1474 case MSR_IA32_VMX_MISC: 1475 return vmx_restore_vmx_misc(vmx, data); 1476 case MSR_IA32_VMX_CR0_FIXED0: 1477 case MSR_IA32_VMX_CR4_FIXED0: 1478 return vmx_restore_fixed0_msr(vmx, msr_index, data); 1479 case MSR_IA32_VMX_CR0_FIXED1: 1480 case MSR_IA32_VMX_CR4_FIXED1: 1481 /* 1482 * These MSRs are generated based on the vCPU's CPUID, so we 1483 * do not support restoring them directly. 1484 */ 1485 return -EINVAL; 1486 case MSR_IA32_VMX_EPT_VPID_CAP: 1487 return vmx_restore_vmx_ept_vpid_cap(vmx, data); 1488 case MSR_IA32_VMX_VMCS_ENUM: 1489 vmx->nested.msrs.vmcs_enum = data; 1490 return 0; 1491 case MSR_IA32_VMX_VMFUNC: 1492 if (data & ~vmcs_config.nested.vmfunc_controls) 1493 return -EINVAL; 1494 vmx->nested.msrs.vmfunc_controls = data; 1495 return 0; 1496 default: 1497 /* 1498 * The rest of the VMX capability MSRs do not support restore. 1499 */ 1500 return -EINVAL; 1501 } 1502 } 1503 1504 /* Returns 0 on success, non-0 otherwise. */ 1505 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) 1506 { 1507 switch (msr_index) { 1508 case MSR_IA32_VMX_BASIC: 1509 *pdata = msrs->basic; 1510 break; 1511 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1512 case MSR_IA32_VMX_PINBASED_CTLS: 1513 *pdata = vmx_control_msr( 1514 msrs->pinbased_ctls_low, 1515 msrs->pinbased_ctls_high); 1516 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) 1517 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1518 break; 1519 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1520 case MSR_IA32_VMX_PROCBASED_CTLS: 1521 *pdata = vmx_control_msr( 1522 msrs->procbased_ctls_low, 1523 msrs->procbased_ctls_high); 1524 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) 1525 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1526 break; 1527 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1528 case MSR_IA32_VMX_EXIT_CTLS: 1529 *pdata = vmx_control_msr( 1530 msrs->exit_ctls_low, 1531 msrs->exit_ctls_high); 1532 if (msr_index == MSR_IA32_VMX_EXIT_CTLS) 1533 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 1534 break; 1535 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1536 case MSR_IA32_VMX_ENTRY_CTLS: 1537 *pdata = vmx_control_msr( 1538 msrs->entry_ctls_low, 1539 msrs->entry_ctls_high); 1540 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) 1541 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 1542 break; 1543 case MSR_IA32_VMX_MISC: 1544 *pdata = vmx_control_msr( 1545 msrs->misc_low, 1546 msrs->misc_high); 1547 break; 1548 case MSR_IA32_VMX_CR0_FIXED0: 1549 *pdata = msrs->cr0_fixed0; 1550 break; 1551 case MSR_IA32_VMX_CR0_FIXED1: 1552 *pdata = msrs->cr0_fixed1; 1553 break; 1554 case MSR_IA32_VMX_CR4_FIXED0: 1555 *pdata = msrs->cr4_fixed0; 1556 break; 1557 case MSR_IA32_VMX_CR4_FIXED1: 1558 *pdata = msrs->cr4_fixed1; 1559 break; 1560 case MSR_IA32_VMX_VMCS_ENUM: 1561 *pdata = msrs->vmcs_enum; 1562 break; 1563 case MSR_IA32_VMX_PROCBASED_CTLS2: 1564 *pdata = vmx_control_msr( 1565 msrs->secondary_ctls_low, 1566 msrs->secondary_ctls_high); 1567 break; 1568 case MSR_IA32_VMX_EPT_VPID_CAP: 1569 *pdata = msrs->ept_caps | 1570 ((u64)msrs->vpid_caps << 32); 1571 break; 1572 case MSR_IA32_VMX_VMFUNC: 1573 *pdata = msrs->vmfunc_controls; 1574 break; 1575 default: 1576 return 1; 1577 } 1578 1579 return 0; 1580 } 1581 1582 /* 1583 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have 1584 * been modified by the L1 guest. Note, "writable" in this context means 1585 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of 1586 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only" 1587 * VM-exit information fields (which are actually writable if the vCPU is 1588 * configured to support "VMWRITE to any supported field in the VMCS"). 1589 */ 1590 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) 1591 { 1592 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1593 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1594 struct shadow_vmcs_field field; 1595 unsigned long val; 1596 int i; 1597 1598 if (WARN_ON(!shadow_vmcs)) 1599 return; 1600 1601 preempt_disable(); 1602 1603 vmcs_load(shadow_vmcs); 1604 1605 for (i = 0; i < max_shadow_read_write_fields; i++) { 1606 field = shadow_read_write_fields[i]; 1607 val = __vmcs_readl(field.encoding); 1608 vmcs12_write_any(vmcs12, field.encoding, field.offset, val); 1609 } 1610 1611 vmcs_clear(shadow_vmcs); 1612 vmcs_load(vmx->loaded_vmcs->vmcs); 1613 1614 preempt_enable(); 1615 } 1616 1617 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 1618 { 1619 const struct shadow_vmcs_field *fields[] = { 1620 shadow_read_write_fields, 1621 shadow_read_only_fields 1622 }; 1623 const int max_fields[] = { 1624 max_shadow_read_write_fields, 1625 max_shadow_read_only_fields 1626 }; 1627 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1628 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1629 struct shadow_vmcs_field field; 1630 unsigned long val; 1631 int i, q; 1632 1633 if (WARN_ON(!shadow_vmcs)) 1634 return; 1635 1636 vmcs_load(shadow_vmcs); 1637 1638 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1639 for (i = 0; i < max_fields[q]; i++) { 1640 field = fields[q][i]; 1641 val = vmcs12_read_any(vmcs12, field.encoding, 1642 field.offset); 1643 __vmcs_writel(field.encoding, val); 1644 } 1645 } 1646 1647 vmcs_clear(shadow_vmcs); 1648 vmcs_load(vmx->loaded_vmcs->vmcs); 1649 } 1650 1651 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields) 1652 { 1653 #ifdef CONFIG_KVM_HYPERV 1654 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1655 struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx); 1656 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu); 1657 1658 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 1659 vmcs12->tpr_threshold = evmcs->tpr_threshold; 1660 vmcs12->guest_rip = evmcs->guest_rip; 1661 1662 if (unlikely(!(hv_clean_fields & 1663 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL))) { 1664 hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page; 1665 hv_vcpu->nested.vm_id = evmcs->hv_vm_id; 1666 hv_vcpu->nested.vp_id = evmcs->hv_vp_id; 1667 } 1668 1669 if (unlikely(!(hv_clean_fields & 1670 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { 1671 vmcs12->guest_rsp = evmcs->guest_rsp; 1672 vmcs12->guest_rflags = evmcs->guest_rflags; 1673 vmcs12->guest_interruptibility_info = 1674 evmcs->guest_interruptibility_info; 1675 /* 1676 * Not present in struct vmcs12: 1677 * vmcs12->guest_ssp = evmcs->guest_ssp; 1678 */ 1679 } 1680 1681 if (unlikely(!(hv_clean_fields & 1682 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1683 vmcs12->cpu_based_vm_exec_control = 1684 evmcs->cpu_based_vm_exec_control; 1685 } 1686 1687 if (unlikely(!(hv_clean_fields & 1688 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) { 1689 vmcs12->exception_bitmap = evmcs->exception_bitmap; 1690 } 1691 1692 if (unlikely(!(hv_clean_fields & 1693 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { 1694 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; 1695 } 1696 1697 if (unlikely(!(hv_clean_fields & 1698 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { 1699 vmcs12->vm_entry_intr_info_field = 1700 evmcs->vm_entry_intr_info_field; 1701 vmcs12->vm_entry_exception_error_code = 1702 evmcs->vm_entry_exception_error_code; 1703 vmcs12->vm_entry_instruction_len = 1704 evmcs->vm_entry_instruction_len; 1705 } 1706 1707 if (unlikely(!(hv_clean_fields & 1708 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1709 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; 1710 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; 1711 vmcs12->host_cr0 = evmcs->host_cr0; 1712 vmcs12->host_cr3 = evmcs->host_cr3; 1713 vmcs12->host_cr4 = evmcs->host_cr4; 1714 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; 1715 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; 1716 vmcs12->host_rip = evmcs->host_rip; 1717 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; 1718 vmcs12->host_es_selector = evmcs->host_es_selector; 1719 vmcs12->host_cs_selector = evmcs->host_cs_selector; 1720 vmcs12->host_ss_selector = evmcs->host_ss_selector; 1721 vmcs12->host_ds_selector = evmcs->host_ds_selector; 1722 vmcs12->host_fs_selector = evmcs->host_fs_selector; 1723 vmcs12->host_gs_selector = evmcs->host_gs_selector; 1724 vmcs12->host_tr_selector = evmcs->host_tr_selector; 1725 vmcs12->host_ia32_perf_global_ctrl = evmcs->host_ia32_perf_global_ctrl; 1726 /* 1727 * Not present in struct vmcs12: 1728 * vmcs12->host_ia32_s_cet = evmcs->host_ia32_s_cet; 1729 * vmcs12->host_ssp = evmcs->host_ssp; 1730 * vmcs12->host_ia32_int_ssp_table_addr = evmcs->host_ia32_int_ssp_table_addr; 1731 */ 1732 } 1733 1734 if (unlikely(!(hv_clean_fields & 1735 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) { 1736 vmcs12->pin_based_vm_exec_control = 1737 evmcs->pin_based_vm_exec_control; 1738 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; 1739 vmcs12->secondary_vm_exec_control = 1740 evmcs->secondary_vm_exec_control; 1741 } 1742 1743 if (unlikely(!(hv_clean_fields & 1744 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { 1745 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; 1746 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; 1747 } 1748 1749 if (unlikely(!(hv_clean_fields & 1750 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { 1751 vmcs12->msr_bitmap = evmcs->msr_bitmap; 1752 } 1753 1754 if (unlikely(!(hv_clean_fields & 1755 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { 1756 vmcs12->guest_es_base = evmcs->guest_es_base; 1757 vmcs12->guest_cs_base = evmcs->guest_cs_base; 1758 vmcs12->guest_ss_base = evmcs->guest_ss_base; 1759 vmcs12->guest_ds_base = evmcs->guest_ds_base; 1760 vmcs12->guest_fs_base = evmcs->guest_fs_base; 1761 vmcs12->guest_gs_base = evmcs->guest_gs_base; 1762 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; 1763 vmcs12->guest_tr_base = evmcs->guest_tr_base; 1764 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; 1765 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; 1766 vmcs12->guest_es_limit = evmcs->guest_es_limit; 1767 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; 1768 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; 1769 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; 1770 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; 1771 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; 1772 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; 1773 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; 1774 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; 1775 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; 1776 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; 1777 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; 1778 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; 1779 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; 1780 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; 1781 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; 1782 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; 1783 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; 1784 vmcs12->guest_es_selector = evmcs->guest_es_selector; 1785 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; 1786 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; 1787 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; 1788 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; 1789 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; 1790 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; 1791 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; 1792 } 1793 1794 if (unlikely(!(hv_clean_fields & 1795 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { 1796 vmcs12->tsc_offset = evmcs->tsc_offset; 1797 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; 1798 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; 1799 vmcs12->encls_exiting_bitmap = evmcs->encls_exiting_bitmap; 1800 vmcs12->tsc_multiplier = evmcs->tsc_multiplier; 1801 } 1802 1803 if (unlikely(!(hv_clean_fields & 1804 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { 1805 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; 1806 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; 1807 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; 1808 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; 1809 vmcs12->guest_cr0 = evmcs->guest_cr0; 1810 vmcs12->guest_cr3 = evmcs->guest_cr3; 1811 vmcs12->guest_cr4 = evmcs->guest_cr4; 1812 vmcs12->guest_dr7 = evmcs->guest_dr7; 1813 } 1814 1815 if (unlikely(!(hv_clean_fields & 1816 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { 1817 vmcs12->host_fs_base = evmcs->host_fs_base; 1818 vmcs12->host_gs_base = evmcs->host_gs_base; 1819 vmcs12->host_tr_base = evmcs->host_tr_base; 1820 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; 1821 vmcs12->host_idtr_base = evmcs->host_idtr_base; 1822 vmcs12->host_rsp = evmcs->host_rsp; 1823 } 1824 1825 if (unlikely(!(hv_clean_fields & 1826 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { 1827 vmcs12->ept_pointer = evmcs->ept_pointer; 1828 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; 1829 } 1830 1831 if (unlikely(!(hv_clean_fields & 1832 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { 1833 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; 1834 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; 1835 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; 1836 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; 1837 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; 1838 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; 1839 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; 1840 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; 1841 vmcs12->guest_pending_dbg_exceptions = 1842 evmcs->guest_pending_dbg_exceptions; 1843 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; 1844 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; 1845 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; 1846 vmcs12->guest_activity_state = evmcs->guest_activity_state; 1847 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; 1848 vmcs12->guest_ia32_perf_global_ctrl = evmcs->guest_ia32_perf_global_ctrl; 1849 /* 1850 * Not present in struct vmcs12: 1851 * vmcs12->guest_ia32_s_cet = evmcs->guest_ia32_s_cet; 1852 * vmcs12->guest_ia32_lbr_ctl = evmcs->guest_ia32_lbr_ctl; 1853 * vmcs12->guest_ia32_int_ssp_table_addr = evmcs->guest_ia32_int_ssp_table_addr; 1854 */ 1855 } 1856 1857 /* 1858 * Not used? 1859 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; 1860 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; 1861 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; 1862 * vmcs12->page_fault_error_code_mask = 1863 * evmcs->page_fault_error_code_mask; 1864 * vmcs12->page_fault_error_code_match = 1865 * evmcs->page_fault_error_code_match; 1866 * vmcs12->cr3_target_count = evmcs->cr3_target_count; 1867 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; 1868 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; 1869 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; 1870 */ 1871 1872 /* 1873 * Read only fields: 1874 * vmcs12->guest_physical_address = evmcs->guest_physical_address; 1875 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; 1876 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; 1877 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; 1878 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; 1879 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; 1880 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; 1881 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; 1882 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; 1883 * vmcs12->exit_qualification = evmcs->exit_qualification; 1884 * vmcs12->guest_linear_address = evmcs->guest_linear_address; 1885 * 1886 * Not present in struct vmcs12: 1887 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; 1888 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; 1889 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; 1890 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; 1891 */ 1892 1893 return; 1894 #else /* CONFIG_KVM_HYPERV */ 1895 KVM_BUG_ON(1, vmx->vcpu.kvm); 1896 #endif /* CONFIG_KVM_HYPERV */ 1897 } 1898 1899 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) 1900 { 1901 #ifdef CONFIG_KVM_HYPERV 1902 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1903 struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx); 1904 1905 /* 1906 * Should not be changed by KVM: 1907 * 1908 * evmcs->host_es_selector = vmcs12->host_es_selector; 1909 * evmcs->host_cs_selector = vmcs12->host_cs_selector; 1910 * evmcs->host_ss_selector = vmcs12->host_ss_selector; 1911 * evmcs->host_ds_selector = vmcs12->host_ds_selector; 1912 * evmcs->host_fs_selector = vmcs12->host_fs_selector; 1913 * evmcs->host_gs_selector = vmcs12->host_gs_selector; 1914 * evmcs->host_tr_selector = vmcs12->host_tr_selector; 1915 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; 1916 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; 1917 * evmcs->host_cr0 = vmcs12->host_cr0; 1918 * evmcs->host_cr3 = vmcs12->host_cr3; 1919 * evmcs->host_cr4 = vmcs12->host_cr4; 1920 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; 1921 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; 1922 * evmcs->host_rip = vmcs12->host_rip; 1923 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; 1924 * evmcs->host_fs_base = vmcs12->host_fs_base; 1925 * evmcs->host_gs_base = vmcs12->host_gs_base; 1926 * evmcs->host_tr_base = vmcs12->host_tr_base; 1927 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; 1928 * evmcs->host_idtr_base = vmcs12->host_idtr_base; 1929 * evmcs->host_rsp = vmcs12->host_rsp; 1930 * sync_vmcs02_to_vmcs12() doesn't read these: 1931 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; 1932 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; 1933 * evmcs->msr_bitmap = vmcs12->msr_bitmap; 1934 * evmcs->ept_pointer = vmcs12->ept_pointer; 1935 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; 1936 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; 1937 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; 1938 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; 1939 * evmcs->tpr_threshold = vmcs12->tpr_threshold; 1940 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; 1941 * evmcs->exception_bitmap = vmcs12->exception_bitmap; 1942 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; 1943 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; 1944 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; 1945 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; 1946 * evmcs->page_fault_error_code_mask = 1947 * vmcs12->page_fault_error_code_mask; 1948 * evmcs->page_fault_error_code_match = 1949 * vmcs12->page_fault_error_code_match; 1950 * evmcs->cr3_target_count = vmcs12->cr3_target_count; 1951 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; 1952 * evmcs->tsc_offset = vmcs12->tsc_offset; 1953 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; 1954 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; 1955 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; 1956 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; 1957 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; 1958 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; 1959 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; 1960 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; 1961 * evmcs->guest_ia32_perf_global_ctrl = vmcs12->guest_ia32_perf_global_ctrl; 1962 * evmcs->host_ia32_perf_global_ctrl = vmcs12->host_ia32_perf_global_ctrl; 1963 * evmcs->encls_exiting_bitmap = vmcs12->encls_exiting_bitmap; 1964 * evmcs->tsc_multiplier = vmcs12->tsc_multiplier; 1965 * 1966 * Not present in struct vmcs12: 1967 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; 1968 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; 1969 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; 1970 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; 1971 * evmcs->host_ia32_s_cet = vmcs12->host_ia32_s_cet; 1972 * evmcs->host_ssp = vmcs12->host_ssp; 1973 * evmcs->host_ia32_int_ssp_table_addr = vmcs12->host_ia32_int_ssp_table_addr; 1974 * evmcs->guest_ia32_s_cet = vmcs12->guest_ia32_s_cet; 1975 * evmcs->guest_ia32_lbr_ctl = vmcs12->guest_ia32_lbr_ctl; 1976 * evmcs->guest_ia32_int_ssp_table_addr = vmcs12->guest_ia32_int_ssp_table_addr; 1977 * evmcs->guest_ssp = vmcs12->guest_ssp; 1978 */ 1979 1980 evmcs->guest_es_selector = vmcs12->guest_es_selector; 1981 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; 1982 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; 1983 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; 1984 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; 1985 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; 1986 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; 1987 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; 1988 1989 evmcs->guest_es_limit = vmcs12->guest_es_limit; 1990 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; 1991 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; 1992 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; 1993 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; 1994 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; 1995 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; 1996 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; 1997 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; 1998 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; 1999 2000 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; 2001 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; 2002 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; 2003 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; 2004 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; 2005 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; 2006 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; 2007 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; 2008 2009 evmcs->guest_es_base = vmcs12->guest_es_base; 2010 evmcs->guest_cs_base = vmcs12->guest_cs_base; 2011 evmcs->guest_ss_base = vmcs12->guest_ss_base; 2012 evmcs->guest_ds_base = vmcs12->guest_ds_base; 2013 evmcs->guest_fs_base = vmcs12->guest_fs_base; 2014 evmcs->guest_gs_base = vmcs12->guest_gs_base; 2015 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; 2016 evmcs->guest_tr_base = vmcs12->guest_tr_base; 2017 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; 2018 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; 2019 2020 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; 2021 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; 2022 2023 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; 2024 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; 2025 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; 2026 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; 2027 2028 evmcs->guest_pending_dbg_exceptions = 2029 vmcs12->guest_pending_dbg_exceptions; 2030 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; 2031 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; 2032 2033 evmcs->guest_activity_state = vmcs12->guest_activity_state; 2034 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; 2035 2036 evmcs->guest_cr0 = vmcs12->guest_cr0; 2037 evmcs->guest_cr3 = vmcs12->guest_cr3; 2038 evmcs->guest_cr4 = vmcs12->guest_cr4; 2039 evmcs->guest_dr7 = vmcs12->guest_dr7; 2040 2041 evmcs->guest_physical_address = vmcs12->guest_physical_address; 2042 2043 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; 2044 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; 2045 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; 2046 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; 2047 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; 2048 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; 2049 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; 2050 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; 2051 2052 evmcs->exit_qualification = vmcs12->exit_qualification; 2053 2054 evmcs->guest_linear_address = vmcs12->guest_linear_address; 2055 evmcs->guest_rsp = vmcs12->guest_rsp; 2056 evmcs->guest_rflags = vmcs12->guest_rflags; 2057 2058 evmcs->guest_interruptibility_info = 2059 vmcs12->guest_interruptibility_info; 2060 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; 2061 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; 2062 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; 2063 evmcs->vm_entry_exception_error_code = 2064 vmcs12->vm_entry_exception_error_code; 2065 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; 2066 2067 evmcs->guest_rip = vmcs12->guest_rip; 2068 2069 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; 2070 2071 return; 2072 #else /* CONFIG_KVM_HYPERV */ 2073 KVM_BUG_ON(1, vmx->vcpu.kvm); 2074 #endif /* CONFIG_KVM_HYPERV */ 2075 } 2076 2077 /* 2078 * This is an equivalent of the nested hypervisor executing the vmptrld 2079 * instruction. 2080 */ 2081 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( 2082 struct kvm_vcpu *vcpu, bool from_launch) 2083 { 2084 #ifdef CONFIG_KVM_HYPERV 2085 struct vcpu_vmx *vmx = to_vmx(vcpu); 2086 bool evmcs_gpa_changed = false; 2087 u64 evmcs_gpa; 2088 2089 if (likely(!guest_cpuid_has_evmcs(vcpu))) 2090 return EVMPTRLD_DISABLED; 2091 2092 evmcs_gpa = nested_get_evmptr(vcpu); 2093 if (!evmptr_is_valid(evmcs_gpa)) { 2094 nested_release_evmcs(vcpu); 2095 return EVMPTRLD_DISABLED; 2096 } 2097 2098 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { 2099 vmx->nested.current_vmptr = INVALID_GPA; 2100 2101 nested_release_evmcs(vcpu); 2102 2103 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa), 2104 &vmx->nested.hv_evmcs_map)) 2105 return EVMPTRLD_ERROR; 2106 2107 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; 2108 2109 /* 2110 * Currently, KVM only supports eVMCS version 1 2111 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this 2112 * value to first u32 field of eVMCS which should specify eVMCS 2113 * VersionNumber. 2114 * 2115 * Guest should be aware of supported eVMCS versions by host by 2116 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is 2117 * expected to set this CPUID leaf according to the value 2118 * returned in vmcs_version from nested_enable_evmcs(). 2119 * 2120 * However, it turns out that Microsoft Hyper-V fails to comply 2121 * to their own invented interface: When Hyper-V use eVMCS, it 2122 * just sets first u32 field of eVMCS to revision_id specified 2123 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number 2124 * which is one of the supported versions specified in 2125 * CPUID.0x4000000A.EAX[0:15]. 2126 * 2127 * To overcome Hyper-V bug, we accept here either a supported 2128 * eVMCS version or VMCS12 revision_id as valid values for first 2129 * u32 field of eVMCS. 2130 */ 2131 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && 2132 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { 2133 nested_release_evmcs(vcpu); 2134 return EVMPTRLD_VMFAIL; 2135 } 2136 2137 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; 2138 2139 evmcs_gpa_changed = true; 2140 /* 2141 * Unlike normal vmcs12, enlightened vmcs12 is not fully 2142 * reloaded from guest's memory (read only fields, fields not 2143 * present in struct hv_enlightened_vmcs, ...). Make sure there 2144 * are no leftovers. 2145 */ 2146 if (from_launch) { 2147 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2148 memset(vmcs12, 0, sizeof(*vmcs12)); 2149 vmcs12->hdr.revision_id = VMCS12_REVISION; 2150 } 2151 2152 } 2153 2154 /* 2155 * Clean fields data can't be used on VMLAUNCH and when we switch 2156 * between different L2 guests as KVM keeps a single VMCS12 per L1. 2157 */ 2158 if (from_launch || evmcs_gpa_changed) { 2159 vmx->nested.hv_evmcs->hv_clean_fields &= 2160 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2161 2162 vmx->nested.force_msr_bitmap_recalc = true; 2163 } 2164 2165 return EVMPTRLD_SUCCEEDED; 2166 #else 2167 return EVMPTRLD_DISABLED; 2168 #endif 2169 } 2170 2171 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu) 2172 { 2173 struct vcpu_vmx *vmx = to_vmx(vcpu); 2174 2175 if (nested_vmx_is_evmptr12_valid(vmx)) 2176 copy_vmcs12_to_enlightened(vmx); 2177 else 2178 copy_vmcs12_to_shadow(vmx); 2179 2180 vmx->nested.need_vmcs12_to_shadow_sync = false; 2181 } 2182 2183 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 2184 { 2185 struct vcpu_vmx *vmx = 2186 container_of(timer, struct vcpu_vmx, nested.preemption_timer); 2187 2188 vmx->nested.preemption_timer_expired = true; 2189 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); 2190 kvm_vcpu_kick(&vmx->vcpu); 2191 2192 return HRTIMER_NORESTART; 2193 } 2194 2195 static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu) 2196 { 2197 struct vcpu_vmx *vmx = to_vmx(vcpu); 2198 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2199 2200 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >> 2201 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2202 2203 if (!vmx->nested.has_preemption_timer_deadline) { 2204 vmx->nested.preemption_timer_deadline = 2205 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc; 2206 vmx->nested.has_preemption_timer_deadline = true; 2207 } 2208 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; 2209 } 2210 2211 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu, 2212 u64 preemption_timeout) 2213 { 2214 struct vcpu_vmx *vmx = to_vmx(vcpu); 2215 2216 /* 2217 * A timer value of zero is architecturally guaranteed to cause 2218 * a VMExit prior to executing any instructions in the guest. 2219 */ 2220 if (preemption_timeout == 0) { 2221 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 2222 return; 2223 } 2224 2225 if (vcpu->arch.virtual_tsc_khz == 0) 2226 return; 2227 2228 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2229 preemption_timeout *= 1000000; 2230 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 2231 hrtimer_start(&vmx->nested.preemption_timer, 2232 ktime_add_ns(ktime_get(), preemption_timeout), 2233 HRTIMER_MODE_ABS_PINNED); 2234 } 2235 2236 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2237 { 2238 if (vmx->nested.nested_run_pending && 2239 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) 2240 return vmcs12->guest_ia32_efer; 2241 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 2242 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); 2243 else 2244 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); 2245 } 2246 2247 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) 2248 { 2249 struct kvm *kvm = vmx->vcpu.kvm; 2250 2251 /* 2252 * If vmcs02 hasn't been initialized, set the constant vmcs02 state 2253 * according to L0's settings (vmcs12 is irrelevant here). Host 2254 * fields that come from L0 and are not constant, e.g. HOST_CR3, 2255 * will be set as needed prior to VMLAUNCH/VMRESUME. 2256 */ 2257 if (vmx->nested.vmcs02_initialized) 2258 return; 2259 vmx->nested.vmcs02_initialized = true; 2260 2261 /* 2262 * We don't care what the EPTP value is we just need to guarantee 2263 * it's valid so we don't get a false positive when doing early 2264 * consistency checks. 2265 */ 2266 if (enable_ept && nested_early_check) 2267 vmcs_write64(EPT_POINTER, 2268 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); 2269 2270 if (vmx->ve_info) 2271 vmcs_write64(VE_INFORMATION_ADDRESS, __pa(vmx->ve_info)); 2272 2273 /* All VMFUNCs are currently emulated through L0 vmexits. */ 2274 if (cpu_has_vmx_vmfunc()) 2275 vmcs_write64(VM_FUNCTION_CONTROL, 0); 2276 2277 if (cpu_has_vmx_posted_intr()) 2278 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); 2279 2280 if (cpu_has_vmx_msr_bitmap()) 2281 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); 2282 2283 /* 2284 * PML is emulated for L2, but never enabled in hardware as the MMU 2285 * handles A/D emulation. Disabling PML for L2 also avoids having to 2286 * deal with filtering out L2 GPAs from the buffer. 2287 */ 2288 if (enable_pml) { 2289 vmcs_write64(PML_ADDRESS, 0); 2290 vmcs_write16(GUEST_PML_INDEX, -1); 2291 } 2292 2293 if (cpu_has_vmx_encls_vmexit()) 2294 vmcs_write64(ENCLS_EXITING_BITMAP, INVALID_GPA); 2295 2296 if (kvm_notify_vmexit_enabled(kvm)) 2297 vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window); 2298 2299 /* 2300 * Set the MSR load/store lists to match L0's settings. Only the 2301 * addresses are constant (for vmcs02), the counts can change based 2302 * on L2's behavior, e.g. switching to/from long mode. 2303 */ 2304 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); 2305 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 2306 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 2307 2308 vmx_set_constant_host_state(vmx); 2309 } 2310 2311 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, 2312 struct vmcs12 *vmcs12) 2313 { 2314 prepare_vmcs02_constant_state(vmx); 2315 2316 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); 2317 2318 if (enable_vpid) { 2319 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) 2320 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); 2321 else 2322 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 2323 } 2324 } 2325 2326 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, 2327 struct vmcs12 *vmcs12) 2328 { 2329 u32 exec_control; 2330 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); 2331 2332 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx)) 2333 prepare_vmcs02_early_rare(vmx, vmcs12); 2334 2335 /* 2336 * PIN CONTROLS 2337 */ 2338 exec_control = __pin_controls_get(vmcs01); 2339 exec_control |= (vmcs12->pin_based_vm_exec_control & 2340 ~PIN_BASED_VMX_PREEMPTION_TIMER); 2341 2342 /* Posted interrupts setting is only taken from vmcs12. */ 2343 vmx->nested.pi_pending = false; 2344 if (nested_cpu_has_posted_intr(vmcs12)) { 2345 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 2346 } else { 2347 vmx->nested.posted_intr_nv = -1; 2348 exec_control &= ~PIN_BASED_POSTED_INTR; 2349 } 2350 pin_controls_set(vmx, exec_control); 2351 2352 /* 2353 * EXEC CONTROLS 2354 */ 2355 exec_control = __exec_controls_get(vmcs01); /* L0's desires */ 2356 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING; 2357 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING; 2358 exec_control &= ~CPU_BASED_TPR_SHADOW; 2359 exec_control |= vmcs12->cpu_based_vm_exec_control; 2360 2361 vmx->nested.l1_tpr_threshold = -1; 2362 if (exec_control & CPU_BASED_TPR_SHADOW) 2363 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 2364 #ifdef CONFIG_X86_64 2365 else 2366 exec_control |= CPU_BASED_CR8_LOAD_EXITING | 2367 CPU_BASED_CR8_STORE_EXITING; 2368 #endif 2369 2370 /* 2371 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed 2372 * for I/O port accesses. 2373 */ 2374 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 2375 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 2376 2377 /* 2378 * This bit will be computed in nested_get_vmcs12_pages, because 2379 * we do not have access to L1's MSR bitmap yet. For now, keep 2380 * the same bit as before, hoping to avoid multiple VMWRITEs that 2381 * only set/clear this bit. 2382 */ 2383 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 2384 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; 2385 2386 exec_controls_set(vmx, exec_control); 2387 2388 /* 2389 * SECONDARY EXEC CONTROLS 2390 */ 2391 if (cpu_has_secondary_exec_ctrls()) { 2392 exec_control = __secondary_exec_controls_get(vmcs01); 2393 2394 /* Take the following fields only from vmcs12 */ 2395 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2396 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2397 SECONDARY_EXEC_ENABLE_INVPCID | 2398 SECONDARY_EXEC_ENABLE_RDTSCP | 2399 SECONDARY_EXEC_ENABLE_XSAVES | 2400 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2401 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2402 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2403 SECONDARY_EXEC_ENABLE_VMFUNC | 2404 SECONDARY_EXEC_DESC); 2405 2406 if (nested_cpu_has(vmcs12, 2407 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) 2408 exec_control |= vmcs12->secondary_vm_exec_control; 2409 2410 /* PML is emulated and never enabled in hardware for L2. */ 2411 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 2412 2413 /* VMCS shadowing for L2 is emulated for now */ 2414 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 2415 2416 /* 2417 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4() 2418 * will not have to rewrite the controls just for this bit. 2419 */ 2420 if (vmx_umip_emulated() && (vmcs12->guest_cr4 & X86_CR4_UMIP)) 2421 exec_control |= SECONDARY_EXEC_DESC; 2422 2423 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 2424 vmcs_write16(GUEST_INTR_STATUS, 2425 vmcs12->guest_intr_status); 2426 2427 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) 2428 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 2429 2430 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING) 2431 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12); 2432 2433 secondary_exec_controls_set(vmx, exec_control); 2434 } 2435 2436 /* 2437 * ENTRY CONTROLS 2438 * 2439 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE 2440 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate 2441 * on the related bits (if supported by the CPU) in the hope that 2442 * we can avoid VMWrites during vmx_set_efer(). 2443 * 2444 * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is 2445 * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to 2446 * do the same for L2. 2447 */ 2448 exec_control = __vm_entry_controls_get(vmcs01); 2449 exec_control |= (vmcs12->vm_entry_controls & 2450 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL); 2451 exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER); 2452 if (cpu_has_load_ia32_efer()) { 2453 if (guest_efer & EFER_LMA) 2454 exec_control |= VM_ENTRY_IA32E_MODE; 2455 if (guest_efer != kvm_host.efer) 2456 exec_control |= VM_ENTRY_LOAD_IA32_EFER; 2457 } 2458 vm_entry_controls_set(vmx, exec_control); 2459 2460 /* 2461 * EXIT CONTROLS 2462 * 2463 * L2->L1 exit controls are emulated - the hardware exit is to L0 so 2464 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER 2465 * bits may be modified by vmx_set_efer() in prepare_vmcs02(). 2466 */ 2467 exec_control = __vm_exit_controls_get(vmcs01); 2468 if (cpu_has_load_ia32_efer() && guest_efer != kvm_host.efer) 2469 exec_control |= VM_EXIT_LOAD_IA32_EFER; 2470 else 2471 exec_control &= ~VM_EXIT_LOAD_IA32_EFER; 2472 vm_exit_controls_set(vmx, exec_control); 2473 2474 /* 2475 * Interrupt/Exception Fields 2476 */ 2477 if (vmx->nested.nested_run_pending) { 2478 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2479 vmcs12->vm_entry_intr_info_field); 2480 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2481 vmcs12->vm_entry_exception_error_code); 2482 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2483 vmcs12->vm_entry_instruction_len); 2484 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 2485 vmcs12->guest_interruptibility_info); 2486 vmx->loaded_vmcs->nmi_known_unmasked = 2487 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); 2488 } else { 2489 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 2490 } 2491 } 2492 2493 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2494 { 2495 struct hv_enlightened_vmcs *hv_evmcs = nested_vmx_evmcs(vmx); 2496 2497 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2498 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2499 2500 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 2501 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 2502 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); 2503 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); 2504 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); 2505 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); 2506 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); 2507 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); 2508 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); 2509 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); 2510 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); 2511 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); 2512 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); 2513 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); 2514 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); 2515 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); 2516 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); 2517 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); 2518 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); 2519 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); 2520 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); 2521 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); 2522 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); 2523 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); 2524 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); 2525 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); 2526 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); 2527 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); 2528 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); 2529 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); 2530 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); 2531 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); 2532 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); 2533 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); 2534 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 2535 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 2536 2537 vmx_segment_cache_clear(vmx); 2538 } 2539 2540 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2541 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { 2542 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 2543 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 2544 vmcs12->guest_pending_dbg_exceptions); 2545 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 2546 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); 2547 2548 /* 2549 * L1 may access the L2's PDPTR, so save them to construct 2550 * vmcs12 2551 */ 2552 if (enable_ept) { 2553 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2554 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2555 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2556 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2557 } 2558 2559 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && 2560 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2561 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 2562 } 2563 2564 if (nested_cpu_has_xsaves(vmcs12)) 2565 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); 2566 2567 /* 2568 * Whether page-faults are trapped is determined by a combination of 2569 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0 2570 * doesn't care about page faults then we should set all of these to 2571 * L1's desires. However, if L0 does care about (some) page faults, it 2572 * is not easy (if at all possible?) to merge L0 and L1's desires, we 2573 * simply ask to exit on each and every L2 page fault. This is done by 2574 * setting MASK=MATCH=0 and (see below) EB.PF=1. 2575 * Note that below we don't need special code to set EB.PF beyond the 2576 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, 2577 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when 2578 * !enable_ept, EB.PF is 1, so the "or" will always be 1. 2579 */ 2580 if (vmx_need_pf_intercept(&vmx->vcpu)) { 2581 /* 2582 * TODO: if both L0 and L1 need the same MASK and MATCH, 2583 * go ahead and use it? 2584 */ 2585 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 2586 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 2587 } else { 2588 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask); 2589 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match); 2590 } 2591 2592 if (cpu_has_vmx_apicv()) { 2593 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); 2594 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); 2595 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); 2596 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); 2597 } 2598 2599 /* 2600 * Make sure the msr_autostore list is up to date before we set the 2601 * count in the vmcs02. 2602 */ 2603 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); 2604 2605 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); 2606 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2607 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2608 2609 set_cr4_guest_host_mask(vmx); 2610 } 2611 2612 /* 2613 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 2614 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 2615 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 2616 * guest in a way that will both be appropriate to L1's requests, and our 2617 * needs. In addition to modifying the active vmcs (which is vmcs02), this 2618 * function also has additional necessary side-effects, like setting various 2619 * vcpu->arch fields. 2620 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 2621 * is assigned to entry_failure_code on failure. 2622 */ 2623 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 2624 bool from_vmentry, 2625 enum vm_entry_failure_code *entry_failure_code) 2626 { 2627 struct vcpu_vmx *vmx = to_vmx(vcpu); 2628 struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx); 2629 bool load_guest_pdptrs_vmcs12 = false; 2630 2631 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx)) { 2632 prepare_vmcs02_rare(vmx, vmcs12); 2633 vmx->nested.dirty_vmcs12 = false; 2634 2635 load_guest_pdptrs_vmcs12 = !nested_vmx_is_evmptr12_valid(vmx) || 2636 !(evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1); 2637 } 2638 2639 if (vmx->nested.nested_run_pending && 2640 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { 2641 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 2642 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 2643 } else { 2644 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); 2645 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl); 2646 } 2647 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || 2648 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) 2649 vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs); 2650 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 2651 2652 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the 2653 * bitwise-or of what L1 wants to trap for L2, and what we want to 2654 * trap. Note that CR0.TS also needs updating - we do this later. 2655 */ 2656 vmx_update_exception_bitmap(vcpu); 2657 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 2658 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2659 2660 if (vmx->nested.nested_run_pending && 2661 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { 2662 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 2663 vcpu->arch.pat = vmcs12->guest_ia32_pat; 2664 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2665 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 2666 } 2667 2668 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2669 vcpu->arch.l1_tsc_offset, 2670 vmx_get_l2_tsc_offset(vcpu), 2671 vmx_get_l2_tsc_multiplier(vcpu)); 2672 2673 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2674 vcpu->arch.l1_tsc_scaling_ratio, 2675 vmx_get_l2_tsc_multiplier(vcpu)); 2676 2677 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 2678 if (kvm_caps.has_tsc_control) 2679 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); 2680 2681 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true); 2682 2683 if (nested_cpu_has_ept(vmcs12)) 2684 nested_ept_init_mmu_context(vcpu); 2685 2686 /* 2687 * Override the CR0/CR4 read shadows after setting the effective guest 2688 * CR0/CR4. The common helpers also set the shadows, but they don't 2689 * account for vmcs12's cr0/4_guest_host_mask. 2690 */ 2691 vmx_set_cr0(vcpu, vmcs12->guest_cr0); 2692 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 2693 2694 vmx_set_cr4(vcpu, vmcs12->guest_cr4); 2695 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); 2696 2697 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); 2698 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 2699 vmx_set_efer(vcpu, vcpu->arch.efer); 2700 2701 /* 2702 * Guest state is invalid and unrestricted guest is disabled, 2703 * which means L1 attempted VMEntry to L2 with invalid state. 2704 * Fail the VMEntry. 2705 * 2706 * However when force loading the guest state (SMM exit or 2707 * loading nested state after migration, it is possible to 2708 * have invalid guest state now, which will be later fixed by 2709 * restoring L2 register state 2710 */ 2711 if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) { 2712 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2713 return -EINVAL; 2714 } 2715 2716 /* Shadow page tables on either EPT or shadow page tables. */ 2717 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 2718 from_vmentry, entry_failure_code)) 2719 return -EINVAL; 2720 2721 /* 2722 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 2723 * on nested VM-Exit, which can occur without actually running L2 and 2724 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with 2725 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the 2726 * transition to HLT instead of running L2. 2727 */ 2728 if (enable_ept) 2729 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); 2730 2731 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ 2732 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && 2733 is_pae_paging(vcpu)) { 2734 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2735 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2736 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2737 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2738 } 2739 2740 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2741 kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) && 2742 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 2743 vmcs12->guest_ia32_perf_global_ctrl))) { 2744 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2745 return -EINVAL; 2746 } 2747 2748 kvm_rsp_write(vcpu, vmcs12->guest_rsp); 2749 kvm_rip_write(vcpu, vmcs12->guest_rip); 2750 2751 /* 2752 * It was observed that genuine Hyper-V running in L1 doesn't reset 2753 * 'hv_clean_fields' by itself, it only sets the corresponding dirty 2754 * bits when it changes a field in eVMCS. Mark all fields as clean 2755 * here. 2756 */ 2757 if (nested_vmx_is_evmptr12_valid(vmx)) 2758 evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2759 2760 return 0; 2761 } 2762 2763 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) 2764 { 2765 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) && 2766 nested_cpu_has_virtual_nmis(vmcs12))) 2767 return -EINVAL; 2768 2769 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) && 2770 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING))) 2771 return -EINVAL; 2772 2773 return 0; 2774 } 2775 2776 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) 2777 { 2778 struct vcpu_vmx *vmx = to_vmx(vcpu); 2779 2780 /* Check for memory type validity */ 2781 switch (new_eptp & VMX_EPTP_MT_MASK) { 2782 case VMX_EPTP_MT_UC: 2783 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) 2784 return false; 2785 break; 2786 case VMX_EPTP_MT_WB: 2787 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) 2788 return false; 2789 break; 2790 default: 2791 return false; 2792 } 2793 2794 /* Page-walk levels validity. */ 2795 switch (new_eptp & VMX_EPTP_PWL_MASK) { 2796 case VMX_EPTP_PWL_5: 2797 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) 2798 return false; 2799 break; 2800 case VMX_EPTP_PWL_4: 2801 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) 2802 return false; 2803 break; 2804 default: 2805 return false; 2806 } 2807 2808 /* Reserved bits should not be set */ 2809 if (CC(!kvm_vcpu_is_legal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) 2810 return false; 2811 2812 /* AD, if set, should be supported */ 2813 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) { 2814 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) 2815 return false; 2816 } 2817 2818 return true; 2819 } 2820 2821 /* 2822 * Checks related to VM-Execution Control Fields 2823 */ 2824 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, 2825 struct vmcs12 *vmcs12) 2826 { 2827 struct vcpu_vmx *vmx = to_vmx(vcpu); 2828 2829 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control, 2830 vmx->nested.msrs.pinbased_ctls_low, 2831 vmx->nested.msrs.pinbased_ctls_high)) || 2832 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, 2833 vmx->nested.msrs.procbased_ctls_low, 2834 vmx->nested.msrs.procbased_ctls_high))) 2835 return -EINVAL; 2836 2837 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && 2838 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control, 2839 vmx->nested.msrs.secondary_ctls_low, 2840 vmx->nested.msrs.secondary_ctls_high))) 2841 return -EINVAL; 2842 2843 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) || 2844 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || 2845 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || 2846 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || 2847 nested_vmx_check_apic_access_controls(vcpu, vmcs12) || 2848 nested_vmx_check_apicv_controls(vcpu, vmcs12) || 2849 nested_vmx_check_nmi_controls(vmcs12) || 2850 nested_vmx_check_pml_controls(vcpu, vmcs12) || 2851 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || 2852 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || 2853 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || 2854 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2855 return -EINVAL; 2856 2857 if (!nested_cpu_has_preemption_timer(vmcs12) && 2858 nested_cpu_has_save_preemption_timer(vmcs12)) 2859 return -EINVAL; 2860 2861 if (nested_cpu_has_ept(vmcs12) && 2862 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer))) 2863 return -EINVAL; 2864 2865 if (nested_cpu_has_vmfunc(vmcs12)) { 2866 if (CC(vmcs12->vm_function_control & 2867 ~vmx->nested.msrs.vmfunc_controls)) 2868 return -EINVAL; 2869 2870 if (nested_cpu_has_eptp_switching(vmcs12)) { 2871 if (CC(!nested_cpu_has_ept(vmcs12)) || 2872 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address))) 2873 return -EINVAL; 2874 } 2875 } 2876 2877 return 0; 2878 } 2879 2880 /* 2881 * Checks related to VM-Exit Control Fields 2882 */ 2883 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, 2884 struct vmcs12 *vmcs12) 2885 { 2886 struct vcpu_vmx *vmx = to_vmx(vcpu); 2887 2888 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls, 2889 vmx->nested.msrs.exit_ctls_low, 2890 vmx->nested.msrs.exit_ctls_high)) || 2891 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))) 2892 return -EINVAL; 2893 2894 return 0; 2895 } 2896 2897 /* 2898 * Checks related to VM-Entry Control Fields 2899 */ 2900 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, 2901 struct vmcs12 *vmcs12) 2902 { 2903 struct vcpu_vmx *vmx = to_vmx(vcpu); 2904 2905 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls, 2906 vmx->nested.msrs.entry_ctls_low, 2907 vmx->nested.msrs.entry_ctls_high))) 2908 return -EINVAL; 2909 2910 /* 2911 * From the Intel SDM, volume 3: 2912 * Fields relevant to VM-entry event injection must be set properly. 2913 * These fields are the VM-entry interruption-information field, the 2914 * VM-entry exception error code, and the VM-entry instruction length. 2915 */ 2916 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { 2917 u32 intr_info = vmcs12->vm_entry_intr_info_field; 2918 u8 vector = intr_info & INTR_INFO_VECTOR_MASK; 2919 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; 2920 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; 2921 bool should_have_error_code; 2922 bool urg = nested_cpu_has2(vmcs12, 2923 SECONDARY_EXEC_UNRESTRICTED_GUEST); 2924 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; 2925 2926 /* VM-entry interruption-info field: interruption type */ 2927 if (CC(intr_type == INTR_TYPE_RESERVED) || 2928 CC(intr_type == INTR_TYPE_OTHER_EVENT && 2929 !nested_cpu_supports_monitor_trap_flag(vcpu))) 2930 return -EINVAL; 2931 2932 /* VM-entry interruption-info field: vector */ 2933 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || 2934 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || 2935 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) 2936 return -EINVAL; 2937 2938 /* VM-entry interruption-info field: deliver error code */ 2939 should_have_error_code = 2940 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && 2941 x86_exception_has_error_code(vector); 2942 if (CC(has_error_code != should_have_error_code)) 2943 return -EINVAL; 2944 2945 /* VM-entry exception error code */ 2946 if (CC(has_error_code && 2947 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))) 2948 return -EINVAL; 2949 2950 /* VM-entry interruption-info field: reserved bits */ 2951 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK)) 2952 return -EINVAL; 2953 2954 /* VM-entry instruction length */ 2955 switch (intr_type) { 2956 case INTR_TYPE_SOFT_EXCEPTION: 2957 case INTR_TYPE_SOFT_INTR: 2958 case INTR_TYPE_PRIV_SW_EXCEPTION: 2959 if (CC(vmcs12->vm_entry_instruction_len > 15) || 2960 CC(vmcs12->vm_entry_instruction_len == 0 && 2961 CC(!nested_cpu_has_zero_length_injection(vcpu)))) 2962 return -EINVAL; 2963 } 2964 } 2965 2966 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) 2967 return -EINVAL; 2968 2969 return 0; 2970 } 2971 2972 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, 2973 struct vmcs12 *vmcs12) 2974 { 2975 if (nested_check_vm_execution_controls(vcpu, vmcs12) || 2976 nested_check_vm_exit_controls(vcpu, vmcs12) || 2977 nested_check_vm_entry_controls(vcpu, vmcs12)) 2978 return -EINVAL; 2979 2980 #ifdef CONFIG_KVM_HYPERV 2981 if (guest_cpuid_has_evmcs(vcpu)) 2982 return nested_evmcs_check_controls(vmcs12); 2983 #endif 2984 2985 return 0; 2986 } 2987 2988 static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu, 2989 struct vmcs12 *vmcs12) 2990 { 2991 #ifdef CONFIG_X86_64 2992 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != 2993 !!(vcpu->arch.efer & EFER_LMA))) 2994 return -EINVAL; 2995 #endif 2996 return 0; 2997 } 2998 2999 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, 3000 struct vmcs12 *vmcs12) 3001 { 3002 bool ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); 3003 3004 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || 3005 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || 3006 CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3))) 3007 return -EINVAL; 3008 3009 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || 3010 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))) 3011 return -EINVAL; 3012 3013 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && 3014 CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) 3015 return -EINVAL; 3016 3017 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && 3018 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 3019 vmcs12->host_ia32_perf_global_ctrl))) 3020 return -EINVAL; 3021 3022 if (ia32e) { 3023 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) 3024 return -EINVAL; 3025 } else { 3026 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || 3027 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || 3028 CC((vmcs12->host_rip) >> 32)) 3029 return -EINVAL; 3030 } 3031 3032 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3033 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3034 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3035 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3036 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3037 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3038 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3039 CC(vmcs12->host_cs_selector == 0) || 3040 CC(vmcs12->host_tr_selector == 0) || 3041 CC(vmcs12->host_ss_selector == 0 && !ia32e)) 3042 return -EINVAL; 3043 3044 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) || 3045 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) || 3046 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) || 3047 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) || 3048 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) || 3049 CC(is_noncanonical_address(vmcs12->host_rip, vcpu))) 3050 return -EINVAL; 3051 3052 /* 3053 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 3054 * IA32_EFER MSR must be 0 in the field for that register. In addition, 3055 * the values of the LMA and LME bits in the field must each be that of 3056 * the host address-space size VM-exit control. 3057 */ 3058 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { 3059 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) || 3060 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) || 3061 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))) 3062 return -EINVAL; 3063 } 3064 3065 return 0; 3066 } 3067 3068 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, 3069 struct vmcs12 *vmcs12) 3070 { 3071 struct vcpu_vmx *vmx = to_vmx(vcpu); 3072 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 3073 struct vmcs_hdr hdr; 3074 3075 if (vmcs12->vmcs_link_pointer == INVALID_GPA) 3076 return 0; 3077 3078 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) 3079 return -EINVAL; 3080 3081 if (ghc->gpa != vmcs12->vmcs_link_pointer && 3082 CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 3083 vmcs12->vmcs_link_pointer, VMCS12_SIZE))) 3084 return -EINVAL; 3085 3086 if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, 3087 offsetof(struct vmcs12, hdr), 3088 sizeof(hdr)))) 3089 return -EINVAL; 3090 3091 if (CC(hdr.revision_id != VMCS12_REVISION) || 3092 CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) 3093 return -EINVAL; 3094 3095 return 0; 3096 } 3097 3098 /* 3099 * Checks related to Guest Non-register State 3100 */ 3101 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) 3102 { 3103 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && 3104 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT && 3105 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI)) 3106 return -EINVAL; 3107 3108 return 0; 3109 } 3110 3111 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, 3112 struct vmcs12 *vmcs12, 3113 enum vm_entry_failure_code *entry_failure_code) 3114 { 3115 bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE); 3116 3117 *entry_failure_code = ENTRY_FAIL_DEFAULT; 3118 3119 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) || 3120 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))) 3121 return -EINVAL; 3122 3123 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && 3124 CC(!kvm_dr7_valid(vmcs12->guest_dr7))) 3125 return -EINVAL; 3126 3127 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && 3128 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) 3129 return -EINVAL; 3130 3131 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { 3132 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR; 3133 return -EINVAL; 3134 } 3135 3136 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 3137 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 3138 vmcs12->guest_ia32_perf_global_ctrl))) 3139 return -EINVAL; 3140 3141 if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG)) 3142 return -EINVAL; 3143 3144 if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) || 3145 CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG))) 3146 return -EINVAL; 3147 3148 /* 3149 * If the load IA32_EFER VM-entry control is 1, the following checks 3150 * are performed on the field for the IA32_EFER MSR: 3151 * - Bits reserved in the IA32_EFER MSR must be 0. 3152 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of 3153 * the IA-32e mode guest VM-exit control. It must also be identical 3154 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to 3155 * CR0.PG) is 1. 3156 */ 3157 if (to_vmx(vcpu)->nested.nested_run_pending && 3158 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { 3159 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || 3160 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || 3161 CC(((vmcs12->guest_cr0 & X86_CR0_PG) && 3162 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))) 3163 return -EINVAL; 3164 } 3165 3166 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && 3167 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) || 3168 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))) 3169 return -EINVAL; 3170 3171 if (nested_check_guest_non_reg_state(vmcs12)) 3172 return -EINVAL; 3173 3174 return 0; 3175 } 3176 3177 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) 3178 { 3179 struct vcpu_vmx *vmx = to_vmx(vcpu); 3180 unsigned long cr3, cr4; 3181 bool vm_fail; 3182 3183 if (!nested_early_check) 3184 return 0; 3185 3186 if (vmx->msr_autoload.host.nr) 3187 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 3188 if (vmx->msr_autoload.guest.nr) 3189 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 3190 3191 preempt_disable(); 3192 3193 vmx_prepare_switch_to_guest(vcpu); 3194 3195 /* 3196 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, 3197 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to 3198 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e. 3199 * there is no need to preserve other bits or save/restore the field. 3200 */ 3201 vmcs_writel(GUEST_RFLAGS, 0); 3202 3203 cr3 = __get_current_cr3_fast(); 3204 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 3205 vmcs_writel(HOST_CR3, cr3); 3206 vmx->loaded_vmcs->host_state.cr3 = cr3; 3207 } 3208 3209 cr4 = cr4_read_shadow(); 3210 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 3211 vmcs_writel(HOST_CR4, cr4); 3212 vmx->loaded_vmcs->host_state.cr4 = cr4; 3213 } 3214 3215 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 3216 __vmx_vcpu_run_flags(vmx)); 3217 3218 if (vmx->msr_autoload.host.nr) 3219 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 3220 if (vmx->msr_autoload.guest.nr) 3221 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 3222 3223 if (vm_fail) { 3224 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR); 3225 3226 preempt_enable(); 3227 3228 trace_kvm_nested_vmenter_failed( 3229 "early hardware check VM-instruction error: ", error); 3230 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3231 return 1; 3232 } 3233 3234 /* 3235 * VMExit clears RFLAGS.IF and DR7, even on a consistency check. 3236 */ 3237 if (hw_breakpoint_active()) 3238 set_debugreg(__this_cpu_read(cpu_dr7), 7); 3239 local_irq_enable(); 3240 preempt_enable(); 3241 3242 /* 3243 * A non-failing VMEntry means we somehow entered guest mode with 3244 * an illegal RIP, and that's just the tip of the iceberg. There 3245 * is no telling what memory has been modified or what state has 3246 * been exposed to unknown code. Hitting this all but guarantees 3247 * a (very critical) hardware issue. 3248 */ 3249 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & 3250 VMX_EXIT_REASONS_FAILED_VMENTRY)); 3251 3252 return 0; 3253 } 3254 3255 #ifdef CONFIG_KVM_HYPERV 3256 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu) 3257 { 3258 struct vcpu_vmx *vmx = to_vmx(vcpu); 3259 3260 /* 3261 * hv_evmcs may end up being not mapped after migration (when 3262 * L2 was running), map it here to make sure vmcs12 changes are 3263 * properly reflected. 3264 */ 3265 if (guest_cpuid_has_evmcs(vcpu) && 3266 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) { 3267 enum nested_evmptrld_status evmptrld_status = 3268 nested_vmx_handle_enlightened_vmptrld(vcpu, false); 3269 3270 if (evmptrld_status == EVMPTRLD_VMFAIL || 3271 evmptrld_status == EVMPTRLD_ERROR) 3272 return false; 3273 3274 /* 3275 * Post migration VMCS12 always provides the most actual 3276 * information, copy it to eVMCS upon entry. 3277 */ 3278 vmx->nested.need_vmcs12_to_shadow_sync = true; 3279 } 3280 3281 return true; 3282 } 3283 #endif 3284 3285 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) 3286 { 3287 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3288 struct vcpu_vmx *vmx = to_vmx(vcpu); 3289 struct kvm_host_map *map; 3290 3291 if (!vcpu->arch.pdptrs_from_userspace && 3292 !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 3293 /* 3294 * Reload the guest's PDPTRs since after a migration 3295 * the guest CR3 might be restored prior to setting the nested 3296 * state which can lead to a load of wrong PDPTRs. 3297 */ 3298 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) 3299 return false; 3300 } 3301 3302 3303 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3304 map = &vmx->nested.apic_access_page_map; 3305 3306 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->apic_access_addr), map)) { 3307 vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(map->pfn)); 3308 } else { 3309 pr_debug_ratelimited("%s: no backing for APIC-access address in vmcs12\n", 3310 __func__); 3311 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3312 vcpu->run->internal.suberror = 3313 KVM_INTERNAL_ERROR_EMULATION; 3314 vcpu->run->internal.ndata = 0; 3315 return false; 3316 } 3317 } 3318 3319 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3320 map = &vmx->nested.virtual_apic_map; 3321 3322 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { 3323 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); 3324 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) && 3325 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) && 3326 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3327 /* 3328 * The processor will never use the TPR shadow, simply 3329 * clear the bit from the execution control. Such a 3330 * configuration is useless, but it happens in tests. 3331 * For any other configuration, failing the vm entry is 3332 * _not_ what the processor does but it's basically the 3333 * only possibility we have. 3334 */ 3335 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); 3336 } else { 3337 /* 3338 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to 3339 * force VM-Entry to fail. 3340 */ 3341 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, INVALID_GPA); 3342 } 3343 } 3344 3345 if (nested_cpu_has_posted_intr(vmcs12)) { 3346 map = &vmx->nested.pi_desc_map; 3347 3348 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { 3349 vmx->nested.pi_desc = 3350 (struct pi_desc *)(((void *)map->hva) + 3351 offset_in_page(vmcs12->posted_intr_desc_addr)); 3352 vmcs_write64(POSTED_INTR_DESC_ADDR, 3353 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); 3354 } else { 3355 /* 3356 * Defer the KVM_INTERNAL_EXIT until KVM tries to 3357 * access the contents of the VMCS12 posted interrupt 3358 * descriptor. (Note that KVM may do this when it 3359 * should not, per the architectural specification.) 3360 */ 3361 vmx->nested.pi_desc = NULL; 3362 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR); 3363 } 3364 } 3365 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) 3366 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3367 else 3368 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3369 3370 return true; 3371 } 3372 3373 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) 3374 { 3375 #ifdef CONFIG_KVM_HYPERV 3376 /* 3377 * Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy 3378 * in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory 3379 * to make nested_evmcs_l2_tlb_flush_enabled() work correctly post 3380 * migration. 3381 */ 3382 if (!nested_get_evmcs_page(vcpu)) { 3383 pr_debug_ratelimited("%s: enlightened vmptrld failed\n", 3384 __func__); 3385 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3386 vcpu->run->internal.suberror = 3387 KVM_INTERNAL_ERROR_EMULATION; 3388 vcpu->run->internal.ndata = 0; 3389 3390 return false; 3391 } 3392 #endif 3393 3394 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu)) 3395 return false; 3396 3397 return true; 3398 } 3399 3400 static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) 3401 { 3402 struct vmcs12 *vmcs12; 3403 struct vcpu_vmx *vmx = to_vmx(vcpu); 3404 gpa_t dst; 3405 3406 if (WARN_ON_ONCE(!is_guest_mode(vcpu))) 3407 return 0; 3408 3409 if (WARN_ON_ONCE(vmx->nested.pml_full)) 3410 return 1; 3411 3412 /* 3413 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is 3414 * set is already checked as part of A/D emulation. 3415 */ 3416 vmcs12 = get_vmcs12(vcpu); 3417 if (!nested_cpu_has_pml(vmcs12)) 3418 return 0; 3419 3420 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { 3421 vmx->nested.pml_full = true; 3422 return 1; 3423 } 3424 3425 gpa &= ~0xFFFull; 3426 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; 3427 3428 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, 3429 offset_in_page(dst), sizeof(gpa))) 3430 return 0; 3431 3432 vmcs12->guest_pml_index--; 3433 3434 return 0; 3435 } 3436 3437 /* 3438 * Intel's VMX Instruction Reference specifies a common set of prerequisites 3439 * for running VMX instructions (except VMXON, whose prerequisites are 3440 * slightly different). It also specifies what exception to inject otherwise. 3441 * Note that many of these exceptions have priority over VM exits, so they 3442 * don't have to be checked again here. 3443 */ 3444 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 3445 { 3446 if (!to_vmx(vcpu)->nested.vmxon) { 3447 kvm_queue_exception(vcpu, UD_VECTOR); 3448 return 0; 3449 } 3450 3451 if (vmx_get_cpl(vcpu)) { 3452 kvm_inject_gp(vcpu, 0); 3453 return 0; 3454 } 3455 3456 return 1; 3457 } 3458 3459 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) 3460 { 3461 u8 rvi = vmx_get_rvi(); 3462 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); 3463 3464 return ((rvi & 0xf0) > (vppr & 0xf0)); 3465 } 3466 3467 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 3468 struct vmcs12 *vmcs12); 3469 3470 /* 3471 * If from_vmentry is false, this is being called from state restore (either RSM 3472 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. 3473 * 3474 * Returns: 3475 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode 3476 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail 3477 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit 3478 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error 3479 */ 3480 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, 3481 bool from_vmentry) 3482 { 3483 struct vcpu_vmx *vmx = to_vmx(vcpu); 3484 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3485 enum vm_entry_failure_code entry_failure_code; 3486 bool evaluate_pending_interrupts; 3487 union vmx_exit_reason exit_reason = { 3488 .basic = EXIT_REASON_INVALID_STATE, 3489 .failed_vmentry = 1, 3490 }; 3491 u32 failed_index; 3492 3493 trace_kvm_nested_vmenter(kvm_rip_read(vcpu), 3494 vmx->nested.current_vmptr, 3495 vmcs12->guest_rip, 3496 vmcs12->guest_intr_status, 3497 vmcs12->vm_entry_intr_info_field, 3498 vmcs12->secondary_vm_exec_control & SECONDARY_EXEC_ENABLE_EPT, 3499 vmcs12->ept_pointer, 3500 vmcs12->guest_cr3, 3501 KVM_ISA_VMX); 3502 3503 kvm_service_local_tlb_flush_requests(vcpu); 3504 3505 evaluate_pending_interrupts = exec_controls_get(vmx) & 3506 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING); 3507 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) 3508 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); 3509 if (!evaluate_pending_interrupts) 3510 evaluate_pending_interrupts |= kvm_apic_has_pending_init_or_sipi(vcpu); 3511 3512 if (!vmx->nested.nested_run_pending || 3513 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 3514 vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 3515 if (kvm_mpx_supported() && 3516 (!vmx->nested.nested_run_pending || 3517 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) 3518 vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3519 3520 /* 3521 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and* 3522 * nested early checks are disabled. In the event of a "late" VM-Fail, 3523 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its 3524 * software model to the pre-VMEntry host state. When EPT is disabled, 3525 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes 3526 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing 3527 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to 3528 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested 3529 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is 3530 * guaranteed to be overwritten with a shadow CR3 prior to re-entering 3531 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as 3532 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks 3533 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail 3534 * path would need to manually save/restore vmcs01.GUEST_CR3. 3535 */ 3536 if (!enable_ept && !nested_early_check) 3537 vmcs_writel(GUEST_CR3, vcpu->arch.cr3); 3538 3539 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 3540 3541 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12); 3542 3543 if (from_vmentry) { 3544 if (unlikely(!nested_get_vmcs12_pages(vcpu))) { 3545 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3546 return NVMX_VMENTRY_KVM_INTERNAL_ERROR; 3547 } 3548 3549 if (nested_vmx_check_vmentry_hw(vcpu)) { 3550 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3551 return NVMX_VMENTRY_VMFAIL; 3552 } 3553 3554 if (nested_vmx_check_guest_state(vcpu, vmcs12, 3555 &entry_failure_code)) { 3556 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3557 vmcs12->exit_qualification = entry_failure_code; 3558 goto vmentry_fail_vmexit; 3559 } 3560 } 3561 3562 enter_guest_mode(vcpu); 3563 3564 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) { 3565 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3566 vmcs12->exit_qualification = entry_failure_code; 3567 goto vmentry_fail_vmexit_guest_mode; 3568 } 3569 3570 if (from_vmentry) { 3571 failed_index = nested_vmx_load_msr(vcpu, 3572 vmcs12->vm_entry_msr_load_addr, 3573 vmcs12->vm_entry_msr_load_count); 3574 if (failed_index) { 3575 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL; 3576 vmcs12->exit_qualification = failed_index; 3577 goto vmentry_fail_vmexit_guest_mode; 3578 } 3579 } else { 3580 /* 3581 * The MMU is not initialized to point at the right entities yet and 3582 * "get pages" would need to read data from the guest (i.e. we will 3583 * need to perform gpa to hpa translation). Request a call 3584 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs 3585 * have already been set at vmentry time and should not be reset. 3586 */ 3587 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 3588 } 3589 3590 /* 3591 * Re-evaluate pending events if L1 had a pending IRQ/NMI/INIT/SIPI 3592 * when it executed VMLAUNCH/VMRESUME, as entering non-root mode can 3593 * effectively unblock various events, e.g. INIT/SIPI cause VM-Exit 3594 * unconditionally. 3595 */ 3596 if (unlikely(evaluate_pending_interrupts)) 3597 kvm_make_request(KVM_REQ_EVENT, vcpu); 3598 3599 /* 3600 * Do not start the preemption timer hrtimer until after we know 3601 * we are successful, so that only nested_vmx_vmexit needs to cancel 3602 * the timer. 3603 */ 3604 vmx->nested.preemption_timer_expired = false; 3605 if (nested_cpu_has_preemption_timer(vmcs12)) { 3606 u64 timer_value = vmx_calc_preemption_timer_value(vcpu); 3607 vmx_start_preemption_timer(vcpu, timer_value); 3608 } 3609 3610 /* 3611 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 3612 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 3613 * returned as far as L1 is concerned. It will only return (and set 3614 * the success flag) when L2 exits (see nested_vmx_vmexit()). 3615 */ 3616 return NVMX_VMENTRY_SUCCESS; 3617 3618 /* 3619 * A failed consistency check that leads to a VMExit during L1's 3620 * VMEnter to L2 is a variation of a normal VMexit, as explained in 3621 * 26.7 "VM-entry failures during or after loading guest state". 3622 */ 3623 vmentry_fail_vmexit_guest_mode: 3624 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 3625 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3626 leave_guest_mode(vcpu); 3627 3628 vmentry_fail_vmexit: 3629 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3630 3631 if (!from_vmentry) 3632 return NVMX_VMENTRY_VMEXIT; 3633 3634 load_vmcs12_host_state(vcpu, vmcs12); 3635 vmcs12->vm_exit_reason = exit_reason.full; 3636 if (enable_shadow_vmcs || nested_vmx_is_evmptr12_valid(vmx)) 3637 vmx->nested.need_vmcs12_to_shadow_sync = true; 3638 return NVMX_VMENTRY_VMEXIT; 3639 } 3640 3641 /* 3642 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 3643 * for running an L2 nested guest. 3644 */ 3645 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) 3646 { 3647 struct vmcs12 *vmcs12; 3648 enum nvmx_vmentry_status status; 3649 struct vcpu_vmx *vmx = to_vmx(vcpu); 3650 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); 3651 enum nested_evmptrld_status evmptrld_status; 3652 3653 if (!nested_vmx_check_permission(vcpu)) 3654 return 1; 3655 3656 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch); 3657 if (evmptrld_status == EVMPTRLD_ERROR) { 3658 kvm_queue_exception(vcpu, UD_VECTOR); 3659 return 1; 3660 } 3661 3662 kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED); 3663 3664 if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) 3665 return nested_vmx_failInvalid(vcpu); 3666 3667 if (CC(!nested_vmx_is_evmptr12_valid(vmx) && 3668 vmx->nested.current_vmptr == INVALID_GPA)) 3669 return nested_vmx_failInvalid(vcpu); 3670 3671 vmcs12 = get_vmcs12(vcpu); 3672 3673 /* 3674 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact 3675 * that there *is* a valid VMCS pointer, RFLAGS.CF is set 3676 * rather than RFLAGS.ZF, and no error number is stored to the 3677 * VM-instruction error field. 3678 */ 3679 if (CC(vmcs12->hdr.shadow_vmcs)) 3680 return nested_vmx_failInvalid(vcpu); 3681 3682 if (nested_vmx_is_evmptr12_valid(vmx)) { 3683 struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx); 3684 3685 copy_enlightened_to_vmcs12(vmx, evmcs->hv_clean_fields); 3686 /* Enlightened VMCS doesn't have launch state */ 3687 vmcs12->launch_state = !launch; 3688 } else if (enable_shadow_vmcs) { 3689 copy_shadow_to_vmcs12(vmx); 3690 } 3691 3692 /* 3693 * The nested entry process starts with enforcing various prerequisites 3694 * on vmcs12 as required by the Intel SDM, and act appropriately when 3695 * they fail: As the SDM explains, some conditions should cause the 3696 * instruction to fail, while others will cause the instruction to seem 3697 * to succeed, but return an EXIT_REASON_INVALID_STATE. 3698 * To speed up the normal (success) code path, we should avoid checking 3699 * for misconfigurations which will anyway be caught by the processor 3700 * when using the merged vmcs02. 3701 */ 3702 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)) 3703 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); 3704 3705 if (CC(vmcs12->launch_state == launch)) 3706 return nested_vmx_fail(vcpu, 3707 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 3708 : VMXERR_VMRESUME_NONLAUNCHED_VMCS); 3709 3710 if (nested_vmx_check_controls(vcpu, vmcs12)) 3711 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3712 3713 if (nested_vmx_check_address_space_size(vcpu, vmcs12)) 3714 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3715 3716 if (nested_vmx_check_host_state(vcpu, vmcs12)) 3717 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3718 3719 /* 3720 * We're finally done with prerequisite checking, and can start with 3721 * the nested entry. 3722 */ 3723 vmx->nested.nested_run_pending = 1; 3724 vmx->nested.has_preemption_timer_deadline = false; 3725 status = nested_vmx_enter_non_root_mode(vcpu, true); 3726 if (unlikely(status != NVMX_VMENTRY_SUCCESS)) 3727 goto vmentry_failed; 3728 3729 /* Emulate processing of posted interrupts on VM-Enter. */ 3730 if (nested_cpu_has_posted_intr(vmcs12) && 3731 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { 3732 vmx->nested.pi_pending = true; 3733 kvm_make_request(KVM_REQ_EVENT, vcpu); 3734 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); 3735 } 3736 3737 /* Hide L1D cache contents from the nested guest. */ 3738 vmx->vcpu.arch.l1tf_flush_l1d = true; 3739 3740 /* 3741 * Must happen outside of nested_vmx_enter_non_root_mode() as it will 3742 * also be used as part of restoring nVMX state for 3743 * snapshot restore (migration). 3744 * 3745 * In this flow, it is assumed that vmcs12 cache was 3746 * transferred as part of captured nVMX state and should 3747 * therefore not be read from guest memory (which may not 3748 * exist on destination host yet). 3749 */ 3750 nested_cache_shadow_vmcs12(vcpu, vmcs12); 3751 3752 switch (vmcs12->guest_activity_state) { 3753 case GUEST_ACTIVITY_HLT: 3754 /* 3755 * If we're entering a halted L2 vcpu and the L2 vcpu won't be 3756 * awakened by event injection or by an NMI-window VM-exit or 3757 * by an interrupt-window VM-exit, halt the vcpu. 3758 */ 3759 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && 3760 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) && 3761 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) && 3762 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { 3763 vmx->nested.nested_run_pending = 0; 3764 return kvm_emulate_halt_noskip(vcpu); 3765 } 3766 break; 3767 case GUEST_ACTIVITY_WAIT_SIPI: 3768 vmx->nested.nested_run_pending = 0; 3769 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 3770 break; 3771 default: 3772 break; 3773 } 3774 3775 return 1; 3776 3777 vmentry_failed: 3778 vmx->nested.nested_run_pending = 0; 3779 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR) 3780 return 0; 3781 if (status == NVMX_VMENTRY_VMEXIT) 3782 return 1; 3783 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); 3784 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3785 } 3786 3787 /* 3788 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date 3789 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK). 3790 * This function returns the new value we should put in vmcs12.guest_cr0. 3791 * It's not enough to just return the vmcs02 GUEST_CR0. Rather, 3792 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now 3793 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 3794 * didn't trap the bit, because if L1 did, so would L0). 3795 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have 3796 * been modified by L2, and L1 knows it. So just leave the old value of 3797 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 3798 * isn't relevant, because if L0 traps this bit it can set it to anything. 3799 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have 3800 * changed these bits, and therefore they need to be updated, but L0 3801 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather 3802 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. 3803 */ 3804 static inline unsigned long 3805 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3806 { 3807 return 3808 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | 3809 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | 3810 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | 3811 vcpu->arch.cr0_guest_owned_bits)); 3812 } 3813 3814 static inline unsigned long 3815 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3816 { 3817 return 3818 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | 3819 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | 3820 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | 3821 vcpu->arch.cr4_guest_owned_bits)); 3822 } 3823 3824 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, 3825 struct vmcs12 *vmcs12, 3826 u32 vm_exit_reason, u32 exit_intr_info) 3827 { 3828 u32 idt_vectoring; 3829 unsigned int nr; 3830 3831 /* 3832 * Per the SDM, VM-Exits due to double and triple faults are never 3833 * considered to occur during event delivery, even if the double/triple 3834 * fault is the result of an escalating vectoring issue. 3835 * 3836 * Note, the SDM qualifies the double fault behavior with "The original 3837 * event results in a double-fault exception". It's unclear why the 3838 * qualification exists since exits due to double fault can occur only 3839 * while vectoring a different exception (injected events are never 3840 * subject to interception), i.e. there's _always_ an original event. 3841 * 3842 * The SDM also uses NMI as a confusing example for the "original event 3843 * causes the VM exit directly" clause. NMI isn't special in any way, 3844 * the same rule applies to all events that cause an exit directly. 3845 * NMI is an odd choice for the example because NMIs can only occur on 3846 * instruction boundaries, i.e. they _can't_ occur during vectoring. 3847 */ 3848 if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT || 3849 ((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI && 3850 is_double_fault(exit_intr_info))) { 3851 vmcs12->idt_vectoring_info_field = 0; 3852 } else if (vcpu->arch.exception.injected) { 3853 nr = vcpu->arch.exception.vector; 3854 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3855 3856 if (kvm_exception_is_soft(nr)) { 3857 vmcs12->vm_exit_instruction_len = 3858 vcpu->arch.event_exit_inst_len; 3859 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; 3860 } else 3861 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; 3862 3863 if (vcpu->arch.exception.has_error_code) { 3864 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; 3865 vmcs12->idt_vectoring_error_code = 3866 vcpu->arch.exception.error_code; 3867 } 3868 3869 vmcs12->idt_vectoring_info_field = idt_vectoring; 3870 } else if (vcpu->arch.nmi_injected) { 3871 vmcs12->idt_vectoring_info_field = 3872 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 3873 } else if (vcpu->arch.interrupt.injected) { 3874 nr = vcpu->arch.interrupt.nr; 3875 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3876 3877 if (vcpu->arch.interrupt.soft) { 3878 idt_vectoring |= INTR_TYPE_SOFT_INTR; 3879 vmcs12->vm_entry_instruction_len = 3880 vcpu->arch.event_exit_inst_len; 3881 } else 3882 idt_vectoring |= INTR_TYPE_EXT_INTR; 3883 3884 vmcs12->idt_vectoring_info_field = idt_vectoring; 3885 } else { 3886 vmcs12->idt_vectoring_info_field = 0; 3887 } 3888 } 3889 3890 3891 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) 3892 { 3893 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3894 gfn_t gfn; 3895 3896 /* 3897 * Don't need to mark the APIC access page dirty; it is never 3898 * written to by the CPU during APIC virtualization. 3899 */ 3900 3901 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3902 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; 3903 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3904 } 3905 3906 if (nested_cpu_has_posted_intr(vmcs12)) { 3907 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; 3908 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3909 } 3910 } 3911 3912 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 3913 { 3914 struct vcpu_vmx *vmx = to_vmx(vcpu); 3915 int max_irr; 3916 void *vapic_page; 3917 u16 status; 3918 3919 if (!vmx->nested.pi_pending) 3920 return 0; 3921 3922 if (!vmx->nested.pi_desc) 3923 goto mmio_needed; 3924 3925 vmx->nested.pi_pending = false; 3926 3927 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 3928 return 0; 3929 3930 max_irr = pi_find_highest_vector(vmx->nested.pi_desc); 3931 if (max_irr > 0) { 3932 vapic_page = vmx->nested.virtual_apic_map.hva; 3933 if (!vapic_page) 3934 goto mmio_needed; 3935 3936 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, 3937 vapic_page, &max_irr); 3938 status = vmcs_read16(GUEST_INTR_STATUS); 3939 if ((u8)max_irr > ((u8)status & 0xff)) { 3940 status &= ~0xff; 3941 status |= (u8)max_irr; 3942 vmcs_write16(GUEST_INTR_STATUS, status); 3943 } 3944 } 3945 3946 nested_mark_vmcs12_pages_dirty(vcpu); 3947 return 0; 3948 3949 mmio_needed: 3950 kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL); 3951 return -ENXIO; 3952 } 3953 3954 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu) 3955 { 3956 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; 3957 u32 intr_info = ex->vector | INTR_INFO_VALID_MASK; 3958 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3959 unsigned long exit_qual; 3960 3961 if (ex->has_payload) { 3962 exit_qual = ex->payload; 3963 } else if (ex->vector == PF_VECTOR) { 3964 exit_qual = vcpu->arch.cr2; 3965 } else if (ex->vector == DB_VECTOR) { 3966 exit_qual = vcpu->arch.dr6; 3967 exit_qual &= ~DR6_BT; 3968 exit_qual ^= DR6_ACTIVE_LOW; 3969 } else { 3970 exit_qual = 0; 3971 } 3972 3973 /* 3974 * Unlike AMD's Paged Real Mode, which reports an error code on #PF 3975 * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the 3976 * "has error code" flags on VM-Exit if the CPU is in Real Mode. 3977 */ 3978 if (ex->has_error_code && is_protmode(vcpu)) { 3979 /* 3980 * Intel CPUs do not generate error codes with bits 31:16 set, 3981 * and more importantly VMX disallows setting bits 31:16 in the 3982 * injected error code for VM-Entry. Drop the bits to mimic 3983 * hardware and avoid inducing failure on nested VM-Entry if L1 3984 * chooses to inject the exception back to L2. AMD CPUs _do_ 3985 * generate "full" 32-bit error codes, so KVM allows userspace 3986 * to inject exception error codes with bits 31:16 set. 3987 */ 3988 vmcs12->vm_exit_intr_error_code = (u16)ex->error_code; 3989 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 3990 } 3991 3992 if (kvm_exception_is_soft(ex->vector)) 3993 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 3994 else 3995 intr_info |= INTR_TYPE_HARD_EXCEPTION; 3996 3997 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && 3998 vmx_get_nmi_mask(vcpu)) 3999 intr_info |= INTR_INFO_UNBLOCK_NMI; 4000 4001 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); 4002 } 4003 4004 /* 4005 * Returns true if a debug trap is (likely) pending delivery. Infer the class 4006 * of a #DB (trap-like vs. fault-like) from the exception payload (to-be-DR6). 4007 * Using the payload is flawed because code breakpoints (fault-like) and data 4008 * breakpoints (trap-like) set the same bits in DR6 (breakpoint detected), i.e. 4009 * this will return false positives if a to-be-injected code breakpoint #DB is 4010 * pending (from KVM's perspective, but not "pending" across an instruction 4011 * boundary). ICEBP, a.k.a. INT1, is also not reflected here even though it 4012 * too is trap-like. 4013 * 4014 * KVM "works" despite these flaws as ICEBP isn't currently supported by the 4015 * emulator, Monitor Trap Flag is not marked pending on intercepted #DBs (the 4016 * #DB has already happened), and MTF isn't marked pending on code breakpoints 4017 * from the emulator (because such #DBs are fault-like and thus don't trigger 4018 * actions that fire on instruction retire). 4019 */ 4020 static unsigned long vmx_get_pending_dbg_trap(struct kvm_queued_exception *ex) 4021 { 4022 if (!ex->pending || ex->vector != DB_VECTOR) 4023 return 0; 4024 4025 /* General Detect #DBs are always fault-like. */ 4026 return ex->payload & ~DR6_BD; 4027 } 4028 4029 /* 4030 * Returns true if there's a pending #DB exception that is lower priority than 4031 * a pending Monitor Trap Flag VM-Exit. TSS T-flag #DBs are not emulated by 4032 * KVM, but could theoretically be injected by userspace. Note, this code is 4033 * imperfect, see above. 4034 */ 4035 static bool vmx_is_low_priority_db_trap(struct kvm_queued_exception *ex) 4036 { 4037 return vmx_get_pending_dbg_trap(ex) & ~DR6_BT; 4038 } 4039 4040 /* 4041 * Certain VM-exits set the 'pending debug exceptions' field to indicate a 4042 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM 4043 * represents these debug traps with a payload that is said to be compatible 4044 * with the 'pending debug exceptions' field, write the payload to the VMCS 4045 * field if a VM-exit is delivered before the debug trap. 4046 */ 4047 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu) 4048 { 4049 unsigned long pending_dbg; 4050 4051 pending_dbg = vmx_get_pending_dbg_trap(&vcpu->arch.exception); 4052 if (pending_dbg) 4053 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, pending_dbg); 4054 } 4055 4056 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) 4057 { 4058 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && 4059 to_vmx(vcpu)->nested.preemption_timer_expired; 4060 } 4061 4062 static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection) 4063 { 4064 struct vcpu_vmx *vmx = to_vmx(vcpu); 4065 void *vapic = vmx->nested.virtual_apic_map.hva; 4066 int max_irr, vppr; 4067 4068 if (nested_vmx_preemption_timer_pending(vcpu) || 4069 vmx->nested.mtf_pending) 4070 return true; 4071 4072 /* 4073 * Virtual Interrupt Delivery doesn't require manual injection. Either 4074 * the interrupt is already in GUEST_RVI and will be recognized by CPU 4075 * at VM-Entry, or there is a KVM_REQ_EVENT pending and KVM will move 4076 * the interrupt from the PIR to RVI prior to entering the guest. 4077 */ 4078 if (for_injection) 4079 return false; 4080 4081 if (!nested_cpu_has_vid(get_vmcs12(vcpu)) || 4082 __vmx_interrupt_blocked(vcpu)) 4083 return false; 4084 4085 if (!vapic) 4086 return false; 4087 4088 vppr = *((u32 *)(vapic + APIC_PROCPRI)); 4089 4090 max_irr = vmx_get_rvi(); 4091 if ((max_irr & 0xf0) > (vppr & 0xf0)) 4092 return true; 4093 4094 if (vmx->nested.pi_pending && vmx->nested.pi_desc && 4095 pi_test_on(vmx->nested.pi_desc)) { 4096 max_irr = pi_find_highest_vector(vmx->nested.pi_desc); 4097 if (max_irr > 0 && (max_irr & 0xf0) > (vppr & 0xf0)) 4098 return true; 4099 } 4100 4101 return false; 4102 } 4103 4104 /* 4105 * Per the Intel SDM's table "Priority Among Concurrent Events", with minor 4106 * edits to fill in missing examples, e.g. #DB due to split-lock accesses, 4107 * and less minor edits to splice in the priority of VMX Non-Root specific 4108 * events, e.g. MTF and NMI/INTR-window exiting. 4109 * 4110 * 1 Hardware Reset and Machine Checks 4111 * - RESET 4112 * - Machine Check 4113 * 4114 * 2 Trap on Task Switch 4115 * - T flag in TSS is set (on task switch) 4116 * 4117 * 3 External Hardware Interventions 4118 * - FLUSH 4119 * - STOPCLK 4120 * - SMI 4121 * - INIT 4122 * 4123 * 3.5 Monitor Trap Flag (MTF) VM-exit[1] 4124 * 4125 * 4 Traps on Previous Instruction 4126 * - Breakpoints 4127 * - Trap-class Debug Exceptions (#DB due to TF flag set, data/I-O 4128 * breakpoint, or #DB due to a split-lock access) 4129 * 4130 * 4.3 VMX-preemption timer expired VM-exit 4131 * 4132 * 4.6 NMI-window exiting VM-exit[2] 4133 * 4134 * 5 Nonmaskable Interrupts (NMI) 4135 * 4136 * 5.5 Interrupt-window exiting VM-exit and Virtual-interrupt delivery 4137 * 4138 * 6 Maskable Hardware Interrupts 4139 * 4140 * 7 Code Breakpoint Fault 4141 * 4142 * 8 Faults from Fetching Next Instruction 4143 * - Code-Segment Limit Violation 4144 * - Code Page Fault 4145 * - Control protection exception (missing ENDBRANCH at target of indirect 4146 * call or jump) 4147 * 4148 * 9 Faults from Decoding Next Instruction 4149 * - Instruction length > 15 bytes 4150 * - Invalid Opcode 4151 * - Coprocessor Not Available 4152 * 4153 *10 Faults on Executing Instruction 4154 * - Overflow 4155 * - Bound error 4156 * - Invalid TSS 4157 * - Segment Not Present 4158 * - Stack fault 4159 * - General Protection 4160 * - Data Page Fault 4161 * - Alignment Check 4162 * - x86 FPU Floating-point exception 4163 * - SIMD floating-point exception 4164 * - Virtualization exception 4165 * - Control protection exception 4166 * 4167 * [1] Per the "Monitor Trap Flag" section: System-management interrupts (SMIs), 4168 * INIT signals, and higher priority events take priority over MTF VM exits. 4169 * MTF VM exits take priority over debug-trap exceptions and lower priority 4170 * events. 4171 * 4172 * [2] Debug-trap exceptions and higher priority events take priority over VM exits 4173 * caused by the VMX-preemption timer. VM exits caused by the VMX-preemption 4174 * timer take priority over VM exits caused by the "NMI-window exiting" 4175 * VM-execution control and lower priority events. 4176 * 4177 * [3] Debug-trap exceptions and higher priority events take priority over VM exits 4178 * caused by "NMI-window exiting". VM exits caused by this control take 4179 * priority over non-maskable interrupts (NMIs) and lower priority events. 4180 * 4181 * [4] Virtual-interrupt delivery has the same priority as that of VM exits due to 4182 * the 1-setting of the "interrupt-window exiting" VM-execution control. Thus, 4183 * non-maskable interrupts (NMIs) and higher priority events take priority over 4184 * delivery of a virtual interrupt; delivery of a virtual interrupt takes 4185 * priority over external interrupts and lower priority events. 4186 */ 4187 static int vmx_check_nested_events(struct kvm_vcpu *vcpu) 4188 { 4189 struct kvm_lapic *apic = vcpu->arch.apic; 4190 struct vcpu_vmx *vmx = to_vmx(vcpu); 4191 /* 4192 * Only a pending nested run blocks a pending exception. If there is a 4193 * previously injected event, the pending exception occurred while said 4194 * event was being delivered and thus needs to be handled. 4195 */ 4196 bool block_nested_exceptions = vmx->nested.nested_run_pending; 4197 /* 4198 * New events (not exceptions) are only recognized at instruction 4199 * boundaries. If an event needs reinjection, then KVM is handling a 4200 * VM-Exit that occurred _during_ instruction execution; new events are 4201 * blocked until the instruction completes. 4202 */ 4203 bool block_nested_events = block_nested_exceptions || 4204 kvm_event_needs_reinjection(vcpu); 4205 4206 if (lapic_in_kernel(vcpu) && 4207 test_bit(KVM_APIC_INIT, &apic->pending_events)) { 4208 if (block_nested_events) 4209 return -EBUSY; 4210 nested_vmx_update_pending_dbg(vcpu); 4211 clear_bit(KVM_APIC_INIT, &apic->pending_events); 4212 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED) 4213 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); 4214 4215 /* MTF is discarded if the vCPU is in WFS. */ 4216 vmx->nested.mtf_pending = false; 4217 return 0; 4218 } 4219 4220 if (lapic_in_kernel(vcpu) && 4221 test_bit(KVM_APIC_SIPI, &apic->pending_events)) { 4222 if (block_nested_events) 4223 return -EBUSY; 4224 4225 clear_bit(KVM_APIC_SIPI, &apic->pending_events); 4226 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 4227 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0, 4228 apic->sipi_vector & 0xFFUL); 4229 return 0; 4230 } 4231 /* Fallthrough, the SIPI is completely ignored. */ 4232 } 4233 4234 /* 4235 * Process exceptions that are higher priority than Monitor Trap Flag: 4236 * fault-like exceptions, TSS T flag #DB (not emulated by KVM, but 4237 * could theoretically come in from userspace), and ICEBP (INT1). 4238 * 4239 * TODO: SMIs have higher priority than MTF and trap-like #DBs (except 4240 * for TSS T flag #DBs). KVM also doesn't save/restore pending MTF 4241 * across SMI/RSM as it should; that needs to be addressed in order to 4242 * prioritize SMI over MTF and trap-like #DBs. 4243 */ 4244 if (vcpu->arch.exception_vmexit.pending && 4245 !vmx_is_low_priority_db_trap(&vcpu->arch.exception_vmexit)) { 4246 if (block_nested_exceptions) 4247 return -EBUSY; 4248 4249 nested_vmx_inject_exception_vmexit(vcpu); 4250 return 0; 4251 } 4252 4253 if (vcpu->arch.exception.pending && 4254 !vmx_is_low_priority_db_trap(&vcpu->arch.exception)) { 4255 if (block_nested_exceptions) 4256 return -EBUSY; 4257 goto no_vmexit; 4258 } 4259 4260 if (vmx->nested.mtf_pending) { 4261 if (block_nested_events) 4262 return -EBUSY; 4263 nested_vmx_update_pending_dbg(vcpu); 4264 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); 4265 return 0; 4266 } 4267 4268 if (vcpu->arch.exception_vmexit.pending) { 4269 if (block_nested_exceptions) 4270 return -EBUSY; 4271 4272 nested_vmx_inject_exception_vmexit(vcpu); 4273 return 0; 4274 } 4275 4276 if (vcpu->arch.exception.pending) { 4277 if (block_nested_exceptions) 4278 return -EBUSY; 4279 goto no_vmexit; 4280 } 4281 4282 if (nested_vmx_preemption_timer_pending(vcpu)) { 4283 if (block_nested_events) 4284 return -EBUSY; 4285 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); 4286 return 0; 4287 } 4288 4289 if (vcpu->arch.smi_pending && !is_smm(vcpu)) { 4290 if (block_nested_events) 4291 return -EBUSY; 4292 goto no_vmexit; 4293 } 4294 4295 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) { 4296 if (block_nested_events) 4297 return -EBUSY; 4298 if (!nested_exit_on_nmi(vcpu)) 4299 goto no_vmexit; 4300 4301 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 4302 NMI_VECTOR | INTR_TYPE_NMI_INTR | 4303 INTR_INFO_VALID_MASK, 0); 4304 /* 4305 * The NMI-triggered VM exit counts as injection: 4306 * clear this one and block further NMIs. 4307 */ 4308 vcpu->arch.nmi_pending = 0; 4309 vmx_set_nmi_mask(vcpu, true); 4310 return 0; 4311 } 4312 4313 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) { 4314 int irq; 4315 4316 if (block_nested_events) 4317 return -EBUSY; 4318 if (!nested_exit_on_intr(vcpu)) 4319 goto no_vmexit; 4320 4321 if (!nested_exit_intr_ack_set(vcpu)) { 4322 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); 4323 return 0; 4324 } 4325 4326 irq = kvm_cpu_get_extint(vcpu); 4327 if (irq != -1) { 4328 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 4329 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0); 4330 return 0; 4331 } 4332 4333 irq = kvm_apic_has_interrupt(vcpu); 4334 if (WARN_ON_ONCE(irq < 0)) 4335 goto no_vmexit; 4336 4337 /* 4338 * If the IRQ is L2's PI notification vector, process posted 4339 * interrupts for L2 instead of injecting VM-Exit, as the 4340 * detection/morphing architecturally occurs when the IRQ is 4341 * delivered to the CPU. Note, only interrupts that are routed 4342 * through the local APIC trigger posted interrupt processing, 4343 * and enabling posted interrupts requires ACK-on-exit. 4344 */ 4345 if (irq == vmx->nested.posted_intr_nv) { 4346 vmx->nested.pi_pending = true; 4347 kvm_apic_clear_irr(vcpu, irq); 4348 goto no_vmexit; 4349 } 4350 4351 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 4352 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0); 4353 4354 /* 4355 * ACK the interrupt _after_ emulating VM-Exit, as the IRQ must 4356 * be marked as in-service in vmcs01.GUEST_INTERRUPT_STATUS.SVI 4357 * if APICv is active. 4358 */ 4359 kvm_apic_ack_interrupt(vcpu, irq); 4360 return 0; 4361 } 4362 4363 no_vmexit: 4364 return vmx_complete_nested_posted_interrupt(vcpu); 4365 } 4366 4367 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 4368 { 4369 ktime_t remaining = 4370 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); 4371 u64 value; 4372 4373 if (ktime_to_ns(remaining) <= 0) 4374 return 0; 4375 4376 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; 4377 do_div(value, 1000000); 4378 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 4379 } 4380 4381 static bool is_vmcs12_ext_field(unsigned long field) 4382 { 4383 switch (field) { 4384 case GUEST_ES_SELECTOR: 4385 case GUEST_CS_SELECTOR: 4386 case GUEST_SS_SELECTOR: 4387 case GUEST_DS_SELECTOR: 4388 case GUEST_FS_SELECTOR: 4389 case GUEST_GS_SELECTOR: 4390 case GUEST_LDTR_SELECTOR: 4391 case GUEST_TR_SELECTOR: 4392 case GUEST_ES_LIMIT: 4393 case GUEST_CS_LIMIT: 4394 case GUEST_SS_LIMIT: 4395 case GUEST_DS_LIMIT: 4396 case GUEST_FS_LIMIT: 4397 case GUEST_GS_LIMIT: 4398 case GUEST_LDTR_LIMIT: 4399 case GUEST_TR_LIMIT: 4400 case GUEST_GDTR_LIMIT: 4401 case GUEST_IDTR_LIMIT: 4402 case GUEST_ES_AR_BYTES: 4403 case GUEST_DS_AR_BYTES: 4404 case GUEST_FS_AR_BYTES: 4405 case GUEST_GS_AR_BYTES: 4406 case GUEST_LDTR_AR_BYTES: 4407 case GUEST_TR_AR_BYTES: 4408 case GUEST_ES_BASE: 4409 case GUEST_CS_BASE: 4410 case GUEST_SS_BASE: 4411 case GUEST_DS_BASE: 4412 case GUEST_FS_BASE: 4413 case GUEST_GS_BASE: 4414 case GUEST_LDTR_BASE: 4415 case GUEST_TR_BASE: 4416 case GUEST_GDTR_BASE: 4417 case GUEST_IDTR_BASE: 4418 case GUEST_PENDING_DBG_EXCEPTIONS: 4419 case GUEST_BNDCFGS: 4420 return true; 4421 default: 4422 break; 4423 } 4424 4425 return false; 4426 } 4427 4428 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 4429 struct vmcs12 *vmcs12) 4430 { 4431 struct vcpu_vmx *vmx = to_vmx(vcpu); 4432 4433 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); 4434 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); 4435 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); 4436 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); 4437 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); 4438 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); 4439 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); 4440 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); 4441 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); 4442 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); 4443 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); 4444 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); 4445 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); 4446 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); 4447 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); 4448 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); 4449 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); 4450 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); 4451 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); 4452 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); 4453 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); 4454 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); 4455 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); 4456 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); 4457 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); 4458 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); 4459 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); 4460 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); 4461 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); 4462 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); 4463 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); 4464 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); 4465 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); 4466 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); 4467 vmcs12->guest_pending_dbg_exceptions = 4468 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 4469 4470 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; 4471 } 4472 4473 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 4474 struct vmcs12 *vmcs12) 4475 { 4476 struct vcpu_vmx *vmx = to_vmx(vcpu); 4477 int cpu; 4478 4479 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) 4480 return; 4481 4482 4483 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); 4484 4485 cpu = get_cpu(); 4486 vmx->loaded_vmcs = &vmx->nested.vmcs02; 4487 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01); 4488 4489 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4490 4491 vmx->loaded_vmcs = &vmx->vmcs01; 4492 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); 4493 put_cpu(); 4494 } 4495 4496 /* 4497 * Update the guest state fields of vmcs12 to reflect changes that 4498 * occurred while L2 was running. (The "IA-32e mode guest" bit of the 4499 * VM-entry controls is also updated, since this is really a guest 4500 * state bit.) 4501 */ 4502 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 4503 { 4504 struct vcpu_vmx *vmx = to_vmx(vcpu); 4505 4506 if (nested_vmx_is_evmptr12_valid(vmx)) 4507 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4508 4509 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = 4510 !nested_vmx_is_evmptr12_valid(vmx); 4511 4512 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 4513 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 4514 4515 vmcs12->guest_rsp = kvm_rsp_read(vcpu); 4516 vmcs12->guest_rip = kvm_rip_read(vcpu); 4517 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 4518 4519 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); 4520 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); 4521 4522 vmcs12->guest_interruptibility_info = 4523 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 4524 4525 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 4526 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; 4527 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 4528 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI; 4529 else 4530 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; 4531 4532 if (nested_cpu_has_preemption_timer(vmcs12) && 4533 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER && 4534 !vmx->nested.nested_run_pending) 4535 vmcs12->vmx_preemption_timer_value = 4536 vmx_get_preemption_timer_value(vcpu); 4537 4538 /* 4539 * In some cases (usually, nested EPT), L2 is allowed to change its 4540 * own CR3 without exiting. If it has changed it, we must keep it. 4541 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined 4542 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. 4543 * 4544 * Additionally, restore L2's PDPTR to vmcs12. 4545 */ 4546 if (enable_ept) { 4547 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); 4548 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 4549 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); 4550 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); 4551 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); 4552 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); 4553 } 4554 } 4555 4556 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); 4557 4558 if (nested_cpu_has_vid(vmcs12)) 4559 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); 4560 4561 vmcs12->vm_entry_controls = 4562 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 4563 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 4564 4565 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) 4566 vmcs12->guest_dr7 = vcpu->arch.dr7; 4567 4568 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 4569 vmcs12->guest_ia32_efer = vcpu->arch.efer; 4570 } 4571 4572 /* 4573 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits 4574 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), 4575 * and this function updates it to reflect the changes to the guest state while 4576 * L2 was running (and perhaps made some exits which were handled directly by L0 4577 * without going back to L1), and to reflect the exit reason. 4578 * Note that we do not have to copy here all VMCS fields, just those that 4579 * could have changed by the L2 guest or the exit - i.e., the guest-state and 4580 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 4581 * which already writes to vmcs12 directly. 4582 */ 4583 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 4584 u32 vm_exit_reason, u32 exit_intr_info, 4585 unsigned long exit_qualification) 4586 { 4587 /* update exit information fields: */ 4588 vmcs12->vm_exit_reason = vm_exit_reason; 4589 if (to_vmx(vcpu)->exit_reason.enclave_mode) 4590 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE; 4591 vmcs12->exit_qualification = exit_qualification; 4592 4593 /* 4594 * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched 4595 * and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other 4596 * exit info fields are unmodified. 4597 */ 4598 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { 4599 vmcs12->launch_state = 1; 4600 4601 /* vm_entry_intr_info_field is cleared on exit. Emulate this 4602 * instead of reading the real value. */ 4603 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; 4604 4605 /* 4606 * Transfer the event that L0 or L1 may wanted to inject into 4607 * L2 to IDT_VECTORING_INFO_FIELD. 4608 */ 4609 vmcs12_save_pending_event(vcpu, vmcs12, 4610 vm_exit_reason, exit_intr_info); 4611 4612 vmcs12->vm_exit_intr_info = exit_intr_info; 4613 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4614 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4615 4616 /* 4617 * According to spec, there's no need to store the guest's 4618 * MSRs if the exit is due to a VM-entry failure that occurs 4619 * during or after loading the guest state. Since this exit 4620 * does not fall in that category, we need to save the MSRs. 4621 */ 4622 if (nested_vmx_store_msr(vcpu, 4623 vmcs12->vm_exit_msr_store_addr, 4624 vmcs12->vm_exit_msr_store_count)) 4625 nested_vmx_abort(vcpu, 4626 VMX_ABORT_SAVE_GUEST_MSR_FAIL); 4627 } 4628 } 4629 4630 /* 4631 * A part of what we need to when the nested L2 guest exits and we want to 4632 * run its L1 parent, is to reset L1's guest state to the host state specified 4633 * in vmcs12. 4634 * This function is to be called not only on normal nested exit, but also on 4635 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry 4636 * Failures During or After Loading Guest State"). 4637 * This function should be called when the active VMCS is L1's (vmcs01). 4638 */ 4639 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 4640 struct vmcs12 *vmcs12) 4641 { 4642 enum vm_entry_failure_code ignored; 4643 struct kvm_segment seg; 4644 4645 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 4646 vcpu->arch.efer = vmcs12->host_ia32_efer; 4647 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4648 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 4649 else 4650 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 4651 vmx_set_efer(vcpu, vcpu->arch.efer); 4652 4653 kvm_rsp_write(vcpu, vmcs12->host_rsp); 4654 kvm_rip_write(vcpu, vmcs12->host_rip); 4655 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); 4656 vmx_set_interrupt_shadow(vcpu, 0); 4657 4658 /* 4659 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 4660 * actually changed, because vmx_set_cr0 refers to efer set above. 4661 * 4662 * CR0_GUEST_HOST_MASK is already set in the original vmcs01 4663 * (KVM doesn't change it); 4664 */ 4665 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); 4666 vmx_set_cr0(vcpu, vmcs12->host_cr0); 4667 4668 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 4669 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4670 vmx_set_cr4(vcpu, vmcs12->host_cr4); 4671 4672 nested_ept_uninit_mmu_context(vcpu); 4673 4674 /* 4675 * Only PDPTE load can fail as the value of cr3 was checked on entry and 4676 * couldn't have changed. 4677 */ 4678 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored)) 4679 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); 4680 4681 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false); 4682 4683 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 4684 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 4685 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 4686 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 4687 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 4688 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); 4689 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); 4690 4691 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ 4692 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) 4693 vmcs_write64(GUEST_BNDCFGS, 0); 4694 4695 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { 4696 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 4697 vcpu->arch.pat = vmcs12->host_ia32_pat; 4698 } 4699 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && 4700 kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu))) 4701 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 4702 vmcs12->host_ia32_perf_global_ctrl)); 4703 4704 /* Set L1 segment info according to Intel SDM 4705 27.5.2 Loading Host Segment and Descriptor-Table Registers */ 4706 seg = (struct kvm_segment) { 4707 .base = 0, 4708 .limit = 0xFFFFFFFF, 4709 .selector = vmcs12->host_cs_selector, 4710 .type = 11, 4711 .present = 1, 4712 .s = 1, 4713 .g = 1 4714 }; 4715 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4716 seg.l = 1; 4717 else 4718 seg.db = 1; 4719 __vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); 4720 seg = (struct kvm_segment) { 4721 .base = 0, 4722 .limit = 0xFFFFFFFF, 4723 .type = 3, 4724 .present = 1, 4725 .s = 1, 4726 .db = 1, 4727 .g = 1 4728 }; 4729 seg.selector = vmcs12->host_ds_selector; 4730 __vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); 4731 seg.selector = vmcs12->host_es_selector; 4732 __vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); 4733 seg.selector = vmcs12->host_ss_selector; 4734 __vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); 4735 seg.selector = vmcs12->host_fs_selector; 4736 seg.base = vmcs12->host_fs_base; 4737 __vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); 4738 seg.selector = vmcs12->host_gs_selector; 4739 seg.base = vmcs12->host_gs_base; 4740 __vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); 4741 seg = (struct kvm_segment) { 4742 .base = vmcs12->host_tr_base, 4743 .limit = 0x67, 4744 .selector = vmcs12->host_tr_selector, 4745 .type = 11, 4746 .present = 1 4747 }; 4748 __vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); 4749 4750 memset(&seg, 0, sizeof(seg)); 4751 seg.unusable = 1; 4752 __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR); 4753 4754 kvm_set_dr(vcpu, 7, 0x400); 4755 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4756 4757 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 4758 vmcs12->vm_exit_msr_load_count)) 4759 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4760 4761 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu); 4762 } 4763 4764 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) 4765 { 4766 struct vmx_uret_msr *efer_msr; 4767 unsigned int i; 4768 4769 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) 4770 return vmcs_read64(GUEST_IA32_EFER); 4771 4772 if (cpu_has_load_ia32_efer()) 4773 return kvm_host.efer; 4774 4775 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { 4776 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) 4777 return vmx->msr_autoload.guest.val[i].value; 4778 } 4779 4780 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER); 4781 if (efer_msr) 4782 return efer_msr->data; 4783 4784 return kvm_host.efer; 4785 } 4786 4787 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) 4788 { 4789 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4790 struct vcpu_vmx *vmx = to_vmx(vcpu); 4791 struct vmx_msr_entry g, h; 4792 gpa_t gpa; 4793 u32 i, j; 4794 4795 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); 4796 4797 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 4798 /* 4799 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set 4800 * as vmcs01.GUEST_DR7 contains a userspace defined value 4801 * and vcpu->arch.dr7 is not squirreled away before the 4802 * nested VMENTER (not worth adding a variable in nested_vmx). 4803 */ 4804 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 4805 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 4806 else 4807 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); 4808 } 4809 4810 /* 4811 * Note that calling vmx_set_{efer,cr0,cr4} is important as they 4812 * handle a variety of side effects to KVM's software model. 4813 */ 4814 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); 4815 4816 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); 4817 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); 4818 4819 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4820 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); 4821 4822 nested_ept_uninit_mmu_context(vcpu); 4823 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 4824 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 4825 4826 /* 4827 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs 4828 * from vmcs01 (if necessary). The PDPTRs are not loaded on 4829 * VMFail, like everything else we just need to ensure our 4830 * software model is up-to-date. 4831 */ 4832 if (enable_ept && is_pae_paging(vcpu)) 4833 ept_save_pdptrs(vcpu); 4834 4835 kvm_mmu_reset_context(vcpu); 4836 4837 /* 4838 * This nasty bit of open coding is a compromise between blindly 4839 * loading L1's MSRs using the exit load lists (incorrect emulation 4840 * of VMFail), leaving the nested VM's MSRs in the software model 4841 * (incorrect behavior) and snapshotting the modified MSRs (too 4842 * expensive since the lists are unbound by hardware). For each 4843 * MSR that was (prematurely) loaded from the nested VMEntry load 4844 * list, reload it from the exit load list if it exists and differs 4845 * from the guest value. The intent is to stuff host state as 4846 * silently as possible, not to fully process the exit load list. 4847 */ 4848 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { 4849 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); 4850 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { 4851 pr_debug_ratelimited( 4852 "%s read MSR index failed (%u, 0x%08llx)\n", 4853 __func__, i, gpa); 4854 goto vmabort; 4855 } 4856 4857 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { 4858 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); 4859 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { 4860 pr_debug_ratelimited( 4861 "%s read MSR failed (%u, 0x%08llx)\n", 4862 __func__, j, gpa); 4863 goto vmabort; 4864 } 4865 if (h.index != g.index) 4866 continue; 4867 if (h.value == g.value) 4868 break; 4869 4870 if (nested_vmx_load_msr_check(vcpu, &h)) { 4871 pr_debug_ratelimited( 4872 "%s check failed (%u, 0x%x, 0x%x)\n", 4873 __func__, j, h.index, h.reserved); 4874 goto vmabort; 4875 } 4876 4877 if (kvm_set_msr_with_filter(vcpu, h.index, h.value)) { 4878 pr_debug_ratelimited( 4879 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", 4880 __func__, j, h.index, h.value); 4881 goto vmabort; 4882 } 4883 } 4884 } 4885 4886 return; 4887 4888 vmabort: 4889 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4890 } 4891 4892 /* 4893 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 4894 * and modify vmcs12 to make it see what it would expect to see there if 4895 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 4896 */ 4897 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, 4898 u32 exit_intr_info, unsigned long exit_qualification) 4899 { 4900 struct vcpu_vmx *vmx = to_vmx(vcpu); 4901 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4902 4903 /* Pending MTF traps are discarded on VM-Exit. */ 4904 vmx->nested.mtf_pending = false; 4905 4906 /* trying to cancel vmlaunch/vmresume is a bug */ 4907 WARN_ON_ONCE(vmx->nested.nested_run_pending); 4908 4909 #ifdef CONFIG_KVM_HYPERV 4910 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 4911 /* 4912 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map 4913 * Enlightened VMCS after migration and we still need to 4914 * do that when something is forcing L2->L1 exit prior to 4915 * the first L2 run. 4916 */ 4917 (void)nested_get_evmcs_page(vcpu); 4918 } 4919 #endif 4920 4921 /* Service pending TLB flush requests for L2 before switching to L1. */ 4922 kvm_service_local_tlb_flush_requests(vcpu); 4923 4924 /* 4925 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between 4926 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are 4927 * up-to-date before switching to L1. 4928 */ 4929 if (enable_ept && is_pae_paging(vcpu)) 4930 vmx_ept_load_pdptrs(vcpu); 4931 4932 leave_guest_mode(vcpu); 4933 4934 if (nested_cpu_has_preemption_timer(vmcs12)) 4935 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); 4936 4937 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) { 4938 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset; 4939 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING)) 4940 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; 4941 } 4942 4943 if (likely(!vmx->fail)) { 4944 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 4945 4946 if (vm_exit_reason != -1) 4947 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason, 4948 exit_intr_info, exit_qualification); 4949 4950 /* 4951 * Must happen outside of sync_vmcs02_to_vmcs12() as it will 4952 * also be used to capture vmcs12 cache as part of 4953 * capturing nVMX state for snapshot (migration). 4954 * 4955 * Otherwise, this flush will dirty guest memory at a 4956 * point it is already assumed by user-space to be 4957 * immutable. 4958 */ 4959 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); 4960 } else { 4961 /* 4962 * The only expected VM-instruction error is "VM entry with 4963 * invalid control field(s)." Anything else indicates a 4964 * problem with L0. And we should never get here with a 4965 * VMFail of any type if early consistency checks are enabled. 4966 */ 4967 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 4968 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4969 WARN_ON_ONCE(nested_early_check); 4970 } 4971 4972 /* 4973 * Drop events/exceptions that were queued for re-injection to L2 4974 * (picked up via vmx_complete_interrupts()), as well as exceptions 4975 * that were pending for L2. Note, this must NOT be hoisted above 4976 * prepare_vmcs12(), events/exceptions queued for re-injection need to 4977 * be captured in vmcs12 (see vmcs12_save_pending_event()). 4978 */ 4979 vcpu->arch.nmi_injected = false; 4980 kvm_clear_exception_queue(vcpu); 4981 kvm_clear_interrupt_queue(vcpu); 4982 4983 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 4984 4985 /* 4986 * If IBRS is advertised to the vCPU, KVM must flush the indirect 4987 * branch predictors when transitioning from L2 to L1, as L1 expects 4988 * hardware (KVM in this case) to provide separate predictor modes. 4989 * Bare metal isolates VMX root (host) from VMX non-root (guest), but 4990 * doesn't isolate different VMCSs, i.e. in this case, doesn't provide 4991 * separate modes for L2 vs L1. 4992 */ 4993 if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) 4994 indirect_branch_prediction_barrier(); 4995 4996 /* Update any VMCS fields that might have changed while L2 ran */ 4997 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 4998 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 4999 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 5000 if (kvm_caps.has_tsc_control) 5001 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); 5002 5003 if (vmx->nested.l1_tpr_threshold != -1) 5004 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); 5005 5006 if (vmx->nested.change_vmcs01_virtual_apic_mode) { 5007 vmx->nested.change_vmcs01_virtual_apic_mode = false; 5008 vmx_set_virtual_apic_mode(vcpu); 5009 } 5010 5011 if (vmx->nested.update_vmcs01_cpu_dirty_logging) { 5012 vmx->nested.update_vmcs01_cpu_dirty_logging = false; 5013 vmx_update_cpu_dirty_logging(vcpu); 5014 } 5015 5016 /* Unpin physical memory we referred to in vmcs02 */ 5017 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false); 5018 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 5019 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 5020 vmx->nested.pi_desc = NULL; 5021 5022 if (vmx->nested.reload_vmcs01_apic_access_page) { 5023 vmx->nested.reload_vmcs01_apic_access_page = false; 5024 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 5025 } 5026 5027 if (vmx->nested.update_vmcs01_apicv_status) { 5028 vmx->nested.update_vmcs01_apicv_status = false; 5029 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 5030 } 5031 5032 if ((vm_exit_reason != -1) && 5033 (enable_shadow_vmcs || nested_vmx_is_evmptr12_valid(vmx))) 5034 vmx->nested.need_vmcs12_to_shadow_sync = true; 5035 5036 /* in case we halted in L2 */ 5037 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 5038 5039 if (likely(!vmx->fail)) { 5040 if (vm_exit_reason != -1) 5041 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, 5042 vmcs12->exit_qualification, 5043 vmcs12->idt_vectoring_info_field, 5044 vmcs12->vm_exit_intr_info, 5045 vmcs12->vm_exit_intr_error_code, 5046 KVM_ISA_VMX); 5047 5048 load_vmcs12_host_state(vcpu, vmcs12); 5049 5050 return; 5051 } 5052 5053 /* 5054 * After an early L2 VM-entry failure, we're now back 5055 * in L1 which thinks it just finished a VMLAUNCH or 5056 * VMRESUME instruction, so we need to set the failure 5057 * flag and the VM-instruction error field of the VMCS 5058 * accordingly, and skip the emulated instruction. 5059 */ 5060 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 5061 5062 /* 5063 * Restore L1's host state to KVM's software model. We're here 5064 * because a consistency check was caught by hardware, which 5065 * means some amount of guest state has been propagated to KVM's 5066 * model and needs to be unwound to the host's state. 5067 */ 5068 nested_vmx_restore_host_state(vcpu); 5069 5070 vmx->fail = 0; 5071 } 5072 5073 static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu) 5074 { 5075 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5076 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); 5077 } 5078 5079 /* 5080 * Decode the memory-address operand of a vmx instruction, as recorded on an 5081 * exit caused by such an instruction (run by a guest hypervisor). 5082 * On success, returns 0. When the operand is invalid, returns 1 and throws 5083 * #UD, #GP, or #SS. 5084 */ 5085 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, 5086 u32 vmx_instruction_info, bool wr, int len, gva_t *ret) 5087 { 5088 gva_t off; 5089 bool exn; 5090 struct kvm_segment s; 5091 5092 /* 5093 * According to Vol. 3B, "Information for VM Exits Due to Instruction 5094 * Execution", on an exit, vmx_instruction_info holds most of the 5095 * addressing components of the operand. Only the displacement part 5096 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). 5097 * For how an actual address is calculated from all these components, 5098 * refer to Vol. 1, "Operand Addressing". 5099 */ 5100 int scaling = vmx_instruction_info & 3; 5101 int addr_size = (vmx_instruction_info >> 7) & 7; 5102 bool is_reg = vmx_instruction_info & (1u << 10); 5103 int seg_reg = (vmx_instruction_info >> 15) & 7; 5104 int index_reg = (vmx_instruction_info >> 18) & 0xf; 5105 bool index_is_valid = !(vmx_instruction_info & (1u << 22)); 5106 int base_reg = (vmx_instruction_info >> 23) & 0xf; 5107 bool base_is_valid = !(vmx_instruction_info & (1u << 27)); 5108 5109 if (is_reg) { 5110 kvm_queue_exception(vcpu, UD_VECTOR); 5111 return 1; 5112 } 5113 5114 /* Addr = segment_base + offset */ 5115 /* offset = base + [index * scale] + displacement */ 5116 off = exit_qualification; /* holds the displacement */ 5117 if (addr_size == 1) 5118 off = (gva_t)sign_extend64(off, 31); 5119 else if (addr_size == 0) 5120 off = (gva_t)sign_extend64(off, 15); 5121 if (base_is_valid) 5122 off += kvm_register_read(vcpu, base_reg); 5123 if (index_is_valid) 5124 off += kvm_register_read(vcpu, index_reg) << scaling; 5125 vmx_get_segment(vcpu, &s, seg_reg); 5126 5127 /* 5128 * The effective address, i.e. @off, of a memory operand is truncated 5129 * based on the address size of the instruction. Note that this is 5130 * the *effective address*, i.e. the address prior to accounting for 5131 * the segment's base. 5132 */ 5133 if (addr_size == 1) /* 32 bit */ 5134 off &= 0xffffffff; 5135 else if (addr_size == 0) /* 16 bit */ 5136 off &= 0xffff; 5137 5138 /* Checks for #GP/#SS exceptions. */ 5139 exn = false; 5140 if (is_long_mode(vcpu)) { 5141 /* 5142 * The virtual/linear address is never truncated in 64-bit 5143 * mode, e.g. a 32-bit address size can yield a 64-bit virtual 5144 * address when using FS/GS with a non-zero base. 5145 */ 5146 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS) 5147 *ret = s.base + off; 5148 else 5149 *ret = off; 5150 5151 *ret = vmx_get_untagged_addr(vcpu, *ret, 0); 5152 /* Long mode: #GP(0)/#SS(0) if the memory address is in a 5153 * non-canonical form. This is the only check on the memory 5154 * destination for long mode! 5155 */ 5156 exn = is_noncanonical_address(*ret, vcpu); 5157 } else { 5158 /* 5159 * When not in long mode, the virtual/linear address is 5160 * unconditionally truncated to 32 bits regardless of the 5161 * address size. 5162 */ 5163 *ret = (s.base + off) & 0xffffffff; 5164 5165 /* Protected mode: apply checks for segment validity in the 5166 * following order: 5167 * - segment type check (#GP(0) may be thrown) 5168 * - usability check (#GP(0)/#SS(0)) 5169 * - limit check (#GP(0)/#SS(0)) 5170 */ 5171 if (wr) 5172 /* #GP(0) if the destination operand is located in a 5173 * read-only data segment or any code segment. 5174 */ 5175 exn = ((s.type & 0xa) == 0 || (s.type & 8)); 5176 else 5177 /* #GP(0) if the source operand is located in an 5178 * execute-only code segment 5179 */ 5180 exn = ((s.type & 0xa) == 8); 5181 if (exn) { 5182 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 5183 return 1; 5184 } 5185 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 5186 */ 5187 exn = (s.unusable != 0); 5188 5189 /* 5190 * Protected mode: #GP(0)/#SS(0) if the memory operand is 5191 * outside the segment limit. All CPUs that support VMX ignore 5192 * limit checks for flat segments, i.e. segments with base==0, 5193 * limit==0xffffffff and of type expand-up data or code. 5194 */ 5195 if (!(s.base == 0 && s.limit == 0xffffffff && 5196 ((s.type & 8) || !(s.type & 4)))) 5197 exn = exn || ((u64)off + len - 1 > s.limit); 5198 } 5199 if (exn) { 5200 kvm_queue_exception_e(vcpu, 5201 seg_reg == VCPU_SREG_SS ? 5202 SS_VECTOR : GP_VECTOR, 5203 0); 5204 return 1; 5205 } 5206 5207 return 0; 5208 } 5209 5210 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer, 5211 int *ret) 5212 { 5213 gva_t gva; 5214 struct x86_exception e; 5215 int r; 5216 5217 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5218 vmcs_read32(VMX_INSTRUCTION_INFO), false, 5219 sizeof(*vmpointer), &gva)) { 5220 *ret = 1; 5221 return -EINVAL; 5222 } 5223 5224 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e); 5225 if (r != X86EMUL_CONTINUE) { 5226 *ret = kvm_handle_memory_failure(vcpu, r, &e); 5227 return -EINVAL; 5228 } 5229 5230 return 0; 5231 } 5232 5233 /* 5234 * Allocate a shadow VMCS and associate it with the currently loaded 5235 * VMCS, unless such a shadow VMCS already exists. The newly allocated 5236 * VMCS is also VMCLEARed, so that it is ready for use. 5237 */ 5238 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) 5239 { 5240 struct vcpu_vmx *vmx = to_vmx(vcpu); 5241 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; 5242 5243 /* 5244 * KVM allocates a shadow VMCS only when L1 executes VMXON and frees it 5245 * when L1 executes VMXOFF or the vCPU is forced out of nested 5246 * operation. VMXON faults if the CPU is already post-VMXON, so it 5247 * should be impossible to already have an allocated shadow VMCS. KVM 5248 * doesn't support virtualization of VMCS shadowing, so vmcs01 should 5249 * always be the loaded VMCS. 5250 */ 5251 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs)) 5252 return loaded_vmcs->shadow_vmcs; 5253 5254 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); 5255 if (loaded_vmcs->shadow_vmcs) 5256 vmcs_clear(loaded_vmcs->shadow_vmcs); 5257 5258 return loaded_vmcs->shadow_vmcs; 5259 } 5260 5261 static int enter_vmx_operation(struct kvm_vcpu *vcpu) 5262 { 5263 struct vcpu_vmx *vmx = to_vmx(vcpu); 5264 int r; 5265 5266 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); 5267 if (r < 0) 5268 goto out_vmcs02; 5269 5270 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 5271 if (!vmx->nested.cached_vmcs12) 5272 goto out_cached_vmcs12; 5273 5274 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA; 5275 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 5276 if (!vmx->nested.cached_shadow_vmcs12) 5277 goto out_cached_shadow_vmcs12; 5278 5279 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) 5280 goto out_shadow_vmcs; 5281 5282 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, 5283 HRTIMER_MODE_ABS_PINNED); 5284 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 5285 5286 vmx->nested.vpid02 = allocate_vpid(); 5287 5288 vmx->nested.vmcs02_initialized = false; 5289 vmx->nested.vmxon = true; 5290 5291 if (vmx_pt_mode_is_host_guest()) { 5292 vmx->pt_desc.guest.ctl = 0; 5293 pt_update_intercept_for_msr(vcpu); 5294 } 5295 5296 return 0; 5297 5298 out_shadow_vmcs: 5299 kfree(vmx->nested.cached_shadow_vmcs12); 5300 5301 out_cached_shadow_vmcs12: 5302 kfree(vmx->nested.cached_vmcs12); 5303 5304 out_cached_vmcs12: 5305 free_loaded_vmcs(&vmx->nested.vmcs02); 5306 5307 out_vmcs02: 5308 return -ENOMEM; 5309 } 5310 5311 /* Emulate the VMXON instruction. */ 5312 static int handle_vmxon(struct kvm_vcpu *vcpu) 5313 { 5314 int ret; 5315 gpa_t vmptr; 5316 uint32_t revision; 5317 struct vcpu_vmx *vmx = to_vmx(vcpu); 5318 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED 5319 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; 5320 5321 /* 5322 * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter 5323 * the guest and so cannot rely on hardware to perform the check, 5324 * which has higher priority than VM-Exit (see Intel SDM's pseudocode 5325 * for VMXON). 5326 * 5327 * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86 5328 * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't 5329 * force any of the relevant guest state. For a restricted guest, KVM 5330 * does force CR0.PE=1, but only to also force VM86 in order to emulate 5331 * Real Mode, and so there's no need to check CR0.PE manually. 5332 */ 5333 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_VMXE)) { 5334 kvm_queue_exception(vcpu, UD_VECTOR); 5335 return 1; 5336 } 5337 5338 /* 5339 * The CPL is checked for "not in VMX operation" and for "in VMX root", 5340 * and has higher priority than the VM-Fail due to being post-VMXON, 5341 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, 5342 * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits 5343 * from L2 to L1, i.e. there's no need to check for the vCPU being in 5344 * VMX non-root. 5345 * 5346 * Forwarding the VM-Exit unconditionally, i.e. without performing the 5347 * #UD checks (see above), is functionally ok because KVM doesn't allow 5348 * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's 5349 * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are 5350 * missed by hardware due to shadowing CR0 and/or CR4. 5351 */ 5352 if (vmx_get_cpl(vcpu)) { 5353 kvm_inject_gp(vcpu, 0); 5354 return 1; 5355 } 5356 5357 if (vmx->nested.vmxon) 5358 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 5359 5360 /* 5361 * Invalid CR0/CR4 generates #GP. These checks are performed if and 5362 * only if the vCPU isn't already in VMX operation, i.e. effectively 5363 * have lower priority than the VM-Fail above. 5364 */ 5365 if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || 5366 !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { 5367 kvm_inject_gp(vcpu, 0); 5368 return 1; 5369 } 5370 5371 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) 5372 != VMXON_NEEDED_FEATURES) { 5373 kvm_inject_gp(vcpu, 0); 5374 return 1; 5375 } 5376 5377 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret)) 5378 return ret; 5379 5380 /* 5381 * SDM 3: 24.11.5 5382 * The first 4 bytes of VMXON region contain the supported 5383 * VMCS revision identifier 5384 * 5385 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 5386 * which replaces physical address width with 32 5387 */ 5388 if (!page_address_valid(vcpu, vmptr)) 5389 return nested_vmx_failInvalid(vcpu); 5390 5391 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || 5392 revision != VMCS12_REVISION) 5393 return nested_vmx_failInvalid(vcpu); 5394 5395 vmx->nested.vmxon_ptr = vmptr; 5396 ret = enter_vmx_operation(vcpu); 5397 if (ret) 5398 return ret; 5399 5400 return nested_vmx_succeed(vcpu); 5401 } 5402 5403 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) 5404 { 5405 struct vcpu_vmx *vmx = to_vmx(vcpu); 5406 5407 if (vmx->nested.current_vmptr == INVALID_GPA) 5408 return; 5409 5410 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 5411 5412 if (enable_shadow_vmcs) { 5413 /* copy to memory all shadowed fields in case 5414 they were modified */ 5415 copy_shadow_to_vmcs12(vmx); 5416 vmx_disable_shadow_vmcs(vmx); 5417 } 5418 vmx->nested.posted_intr_nv = -1; 5419 5420 /* Flush VMCS12 to guest memory */ 5421 kvm_vcpu_write_guest_page(vcpu, 5422 vmx->nested.current_vmptr >> PAGE_SHIFT, 5423 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); 5424 5425 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 5426 5427 vmx->nested.current_vmptr = INVALID_GPA; 5428 } 5429 5430 /* Emulate the VMXOFF instruction */ 5431 static int handle_vmxoff(struct kvm_vcpu *vcpu) 5432 { 5433 if (!nested_vmx_check_permission(vcpu)) 5434 return 1; 5435 5436 free_nested(vcpu); 5437 5438 if (kvm_apic_has_pending_init_or_sipi(vcpu)) 5439 kvm_make_request(KVM_REQ_EVENT, vcpu); 5440 5441 return nested_vmx_succeed(vcpu); 5442 } 5443 5444 /* Emulate the VMCLEAR instruction */ 5445 static int handle_vmclear(struct kvm_vcpu *vcpu) 5446 { 5447 struct vcpu_vmx *vmx = to_vmx(vcpu); 5448 u32 zero = 0; 5449 gpa_t vmptr; 5450 int r; 5451 5452 if (!nested_vmx_check_permission(vcpu)) 5453 return 1; 5454 5455 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5456 return r; 5457 5458 if (!page_address_valid(vcpu, vmptr)) 5459 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); 5460 5461 if (vmptr == vmx->nested.vmxon_ptr) 5462 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); 5463 5464 if (likely(!nested_evmcs_handle_vmclear(vcpu, vmptr))) { 5465 if (vmptr == vmx->nested.current_vmptr) 5466 nested_release_vmcs12(vcpu); 5467 5468 /* 5469 * Silently ignore memory errors on VMCLEAR, Intel's pseudocode 5470 * for VMCLEAR includes a "ensure that data for VMCS referenced 5471 * by the operand is in memory" clause that guards writes to 5472 * memory, i.e. doing nothing for I/O is architecturally valid. 5473 * 5474 * FIXME: Suppress failures if and only if no memslot is found, 5475 * i.e. exit to userspace if __copy_to_user() fails. 5476 */ 5477 (void)kvm_vcpu_write_guest(vcpu, 5478 vmptr + offsetof(struct vmcs12, 5479 launch_state), 5480 &zero, sizeof(zero)); 5481 } 5482 5483 return nested_vmx_succeed(vcpu); 5484 } 5485 5486 /* Emulate the VMLAUNCH instruction */ 5487 static int handle_vmlaunch(struct kvm_vcpu *vcpu) 5488 { 5489 return nested_vmx_run(vcpu, true); 5490 } 5491 5492 /* Emulate the VMRESUME instruction */ 5493 static int handle_vmresume(struct kvm_vcpu *vcpu) 5494 { 5495 5496 return nested_vmx_run(vcpu, false); 5497 } 5498 5499 static int handle_vmread(struct kvm_vcpu *vcpu) 5500 { 5501 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5502 : get_vmcs12(vcpu); 5503 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5504 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5505 struct vcpu_vmx *vmx = to_vmx(vcpu); 5506 struct x86_exception e; 5507 unsigned long field; 5508 u64 value; 5509 gva_t gva = 0; 5510 short offset; 5511 int len, r; 5512 5513 if (!nested_vmx_check_permission(vcpu)) 5514 return 1; 5515 5516 /* Decode instruction info and find the field to read */ 5517 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); 5518 5519 if (!nested_vmx_is_evmptr12_valid(vmx)) { 5520 /* 5521 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, 5522 * any VMREAD sets the ALU flags for VMfailInvalid. 5523 */ 5524 if (vmx->nested.current_vmptr == INVALID_GPA || 5525 (is_guest_mode(vcpu) && 5526 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) 5527 return nested_vmx_failInvalid(vcpu); 5528 5529 offset = get_vmcs12_field_offset(field); 5530 if (offset < 0) 5531 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5532 5533 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) 5534 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5535 5536 /* Read the field, zero-extended to a u64 value */ 5537 value = vmcs12_read_any(vmcs12, field, offset); 5538 } else { 5539 /* 5540 * Hyper-V TLFS (as of 6.0b) explicitly states, that while an 5541 * enlightened VMCS is active VMREAD/VMWRITE instructions are 5542 * unsupported. Unfortunately, certain versions of Windows 11 5543 * don't comply with this requirement which is not enforced in 5544 * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a 5545 * workaround, as misbehaving guests will panic on VM-Fail. 5546 * Note, enlightened VMCS is incompatible with shadow VMCS so 5547 * all VMREADs from L2 should go to L1. 5548 */ 5549 if (WARN_ON_ONCE(is_guest_mode(vcpu))) 5550 return nested_vmx_failInvalid(vcpu); 5551 5552 offset = evmcs_field_offset(field, NULL); 5553 if (offset < 0) 5554 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5555 5556 /* Read the field, zero-extended to a u64 value */ 5557 value = evmcs_read_any(nested_vmx_evmcs(vmx), field, offset); 5558 } 5559 5560 /* 5561 * Now copy part of this value to register or memory, as requested. 5562 * Note that the number of bits actually copied is 32 or 64 depending 5563 * on the guest's mode (32 or 64 bit), not on the given field's length. 5564 */ 5565 if (instr_info & BIT(10)) { 5566 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value); 5567 } else { 5568 len = is_64_bit_mode(vcpu) ? 8 : 4; 5569 if (get_vmx_mem_address(vcpu, exit_qualification, 5570 instr_info, true, len, &gva)) 5571 return 1; 5572 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ 5573 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e); 5574 if (r != X86EMUL_CONTINUE) 5575 return kvm_handle_memory_failure(vcpu, r, &e); 5576 } 5577 5578 return nested_vmx_succeed(vcpu); 5579 } 5580 5581 static bool is_shadow_field_rw(unsigned long field) 5582 { 5583 switch (field) { 5584 #define SHADOW_FIELD_RW(x, y) case x: 5585 #include "vmcs_shadow_fields.h" 5586 return true; 5587 default: 5588 break; 5589 } 5590 return false; 5591 } 5592 5593 static bool is_shadow_field_ro(unsigned long field) 5594 { 5595 switch (field) { 5596 #define SHADOW_FIELD_RO(x, y) case x: 5597 #include "vmcs_shadow_fields.h" 5598 return true; 5599 default: 5600 break; 5601 } 5602 return false; 5603 } 5604 5605 static int handle_vmwrite(struct kvm_vcpu *vcpu) 5606 { 5607 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5608 : get_vmcs12(vcpu); 5609 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5610 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5611 struct vcpu_vmx *vmx = to_vmx(vcpu); 5612 struct x86_exception e; 5613 unsigned long field; 5614 short offset; 5615 gva_t gva; 5616 int len, r; 5617 5618 /* 5619 * The value to write might be 32 or 64 bits, depending on L1's long 5620 * mode, and eventually we need to write that into a field of several 5621 * possible lengths. The code below first zero-extends the value to 64 5622 * bit (value), and then copies only the appropriate number of 5623 * bits into the vmcs12 field. 5624 */ 5625 u64 value = 0; 5626 5627 if (!nested_vmx_check_permission(vcpu)) 5628 return 1; 5629 5630 /* 5631 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, 5632 * any VMWRITE sets the ALU flags for VMfailInvalid. 5633 */ 5634 if (vmx->nested.current_vmptr == INVALID_GPA || 5635 (is_guest_mode(vcpu) && 5636 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) 5637 return nested_vmx_failInvalid(vcpu); 5638 5639 if (instr_info & BIT(10)) 5640 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf)); 5641 else { 5642 len = is_64_bit_mode(vcpu) ? 8 : 4; 5643 if (get_vmx_mem_address(vcpu, exit_qualification, 5644 instr_info, false, len, &gva)) 5645 return 1; 5646 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); 5647 if (r != X86EMUL_CONTINUE) 5648 return kvm_handle_memory_failure(vcpu, r, &e); 5649 } 5650 5651 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); 5652 5653 offset = get_vmcs12_field_offset(field); 5654 if (offset < 0) 5655 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5656 5657 /* 5658 * If the vCPU supports "VMWRITE to any supported field in the 5659 * VMCS," then the "read-only" fields are actually read/write. 5660 */ 5661 if (vmcs_field_readonly(field) && 5662 !nested_cpu_has_vmwrite_any_field(vcpu)) 5663 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); 5664 5665 /* 5666 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties 5667 * vmcs12, else we may crush a field or consume a stale value. 5668 */ 5669 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) 5670 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5671 5672 /* 5673 * Some Intel CPUs intentionally drop the reserved bits of the AR byte 5674 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM 5675 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE 5676 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD 5677 * from L1 will return a different value than VMREAD from L2 (L1 sees 5678 * the stripped down value, L2 sees the full value as stored by KVM). 5679 */ 5680 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES) 5681 value &= 0x1f0ff; 5682 5683 vmcs12_write_any(vmcs12, field, offset, value); 5684 5685 /* 5686 * Do not track vmcs12 dirty-state if in guest-mode as we actually 5687 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated 5688 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't 5689 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path. 5690 */ 5691 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) { 5692 /* 5693 * L1 can read these fields without exiting, ensure the 5694 * shadow VMCS is up-to-date. 5695 */ 5696 if (enable_shadow_vmcs && is_shadow_field_ro(field)) { 5697 preempt_disable(); 5698 vmcs_load(vmx->vmcs01.shadow_vmcs); 5699 5700 __vmcs_writel(field, value); 5701 5702 vmcs_clear(vmx->vmcs01.shadow_vmcs); 5703 vmcs_load(vmx->loaded_vmcs->vmcs); 5704 preempt_enable(); 5705 } 5706 vmx->nested.dirty_vmcs12 = true; 5707 } 5708 5709 return nested_vmx_succeed(vcpu); 5710 } 5711 5712 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) 5713 { 5714 vmx->nested.current_vmptr = vmptr; 5715 if (enable_shadow_vmcs) { 5716 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 5717 vmcs_write64(VMCS_LINK_POINTER, 5718 __pa(vmx->vmcs01.shadow_vmcs)); 5719 vmx->nested.need_vmcs12_to_shadow_sync = true; 5720 } 5721 vmx->nested.dirty_vmcs12 = true; 5722 vmx->nested.force_msr_bitmap_recalc = true; 5723 } 5724 5725 /* Emulate the VMPTRLD instruction */ 5726 static int handle_vmptrld(struct kvm_vcpu *vcpu) 5727 { 5728 struct vcpu_vmx *vmx = to_vmx(vcpu); 5729 gpa_t vmptr; 5730 int r; 5731 5732 if (!nested_vmx_check_permission(vcpu)) 5733 return 1; 5734 5735 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5736 return r; 5737 5738 if (!page_address_valid(vcpu, vmptr)) 5739 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); 5740 5741 if (vmptr == vmx->nested.vmxon_ptr) 5742 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); 5743 5744 /* Forbid normal VMPTRLD if Enlightened version was used */ 5745 if (nested_vmx_is_evmptr12_valid(vmx)) 5746 return 1; 5747 5748 if (vmx->nested.current_vmptr != vmptr) { 5749 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache; 5750 struct vmcs_hdr hdr; 5751 5752 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) { 5753 /* 5754 * Reads from an unbacked page return all 1s, 5755 * which means that the 32 bits located at the 5756 * given physical address won't match the required 5757 * VMCS12_REVISION identifier. 5758 */ 5759 return nested_vmx_fail(vcpu, 5760 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5761 } 5762 5763 if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, 5764 offsetof(struct vmcs12, hdr), 5765 sizeof(hdr))) { 5766 return nested_vmx_fail(vcpu, 5767 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5768 } 5769 5770 if (hdr.revision_id != VMCS12_REVISION || 5771 (hdr.shadow_vmcs && 5772 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { 5773 return nested_vmx_fail(vcpu, 5774 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5775 } 5776 5777 nested_release_vmcs12(vcpu); 5778 5779 /* 5780 * Load VMCS12 from guest memory since it is not already 5781 * cached. 5782 */ 5783 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12, 5784 VMCS12_SIZE)) { 5785 return nested_vmx_fail(vcpu, 5786 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5787 } 5788 5789 set_current_vmptr(vmx, vmptr); 5790 } 5791 5792 return nested_vmx_succeed(vcpu); 5793 } 5794 5795 /* Emulate the VMPTRST instruction */ 5796 static int handle_vmptrst(struct kvm_vcpu *vcpu) 5797 { 5798 unsigned long exit_qual = vmx_get_exit_qual(vcpu); 5799 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5800 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; 5801 struct x86_exception e; 5802 gva_t gva; 5803 int r; 5804 5805 if (!nested_vmx_check_permission(vcpu)) 5806 return 1; 5807 5808 if (unlikely(nested_vmx_is_evmptr12_valid(to_vmx(vcpu)))) 5809 return 1; 5810 5811 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, 5812 true, sizeof(gpa_t), &gva)) 5813 return 1; 5814 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ 5815 r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, 5816 sizeof(gpa_t), &e); 5817 if (r != X86EMUL_CONTINUE) 5818 return kvm_handle_memory_failure(vcpu, r, &e); 5819 5820 return nested_vmx_succeed(vcpu); 5821 } 5822 5823 /* Emulate the INVEPT instruction */ 5824 static int handle_invept(struct kvm_vcpu *vcpu) 5825 { 5826 struct vcpu_vmx *vmx = to_vmx(vcpu); 5827 u32 vmx_instruction_info, types; 5828 unsigned long type, roots_to_free; 5829 struct kvm_mmu *mmu; 5830 gva_t gva; 5831 struct x86_exception e; 5832 struct { 5833 u64 eptp, gpa; 5834 } operand; 5835 int i, r, gpr_index; 5836 5837 if (!(vmx->nested.msrs.secondary_ctls_high & 5838 SECONDARY_EXEC_ENABLE_EPT) || 5839 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { 5840 kvm_queue_exception(vcpu, UD_VECTOR); 5841 return 1; 5842 } 5843 5844 if (!nested_vmx_check_permission(vcpu)) 5845 return 1; 5846 5847 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5848 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info); 5849 type = kvm_register_read(vcpu, gpr_index); 5850 5851 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 5852 5853 if (type >= 32 || !(types & (1 << type))) 5854 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5855 5856 /* According to the Intel VMX instruction reference, the memory 5857 * operand is read even if it isn't needed (e.g., for type==global) 5858 */ 5859 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5860 vmx_instruction_info, false, sizeof(operand), &gva)) 5861 return 1; 5862 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5863 if (r != X86EMUL_CONTINUE) 5864 return kvm_handle_memory_failure(vcpu, r, &e); 5865 5866 /* 5867 * Nested EPT roots are always held through guest_mmu, 5868 * not root_mmu. 5869 */ 5870 mmu = &vcpu->arch.guest_mmu; 5871 5872 switch (type) { 5873 case VMX_EPT_EXTENT_CONTEXT: 5874 if (!nested_vmx_check_eptp(vcpu, operand.eptp)) 5875 return nested_vmx_fail(vcpu, 5876 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5877 5878 roots_to_free = 0; 5879 if (nested_ept_root_matches(mmu->root.hpa, mmu->root.pgd, 5880 operand.eptp)) 5881 roots_to_free |= KVM_MMU_ROOT_CURRENT; 5882 5883 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 5884 if (nested_ept_root_matches(mmu->prev_roots[i].hpa, 5885 mmu->prev_roots[i].pgd, 5886 operand.eptp)) 5887 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 5888 } 5889 break; 5890 case VMX_EPT_EXTENT_GLOBAL: 5891 roots_to_free = KVM_MMU_ROOTS_ALL; 5892 break; 5893 default: 5894 BUG(); 5895 break; 5896 } 5897 5898 if (roots_to_free) 5899 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); 5900 5901 return nested_vmx_succeed(vcpu); 5902 } 5903 5904 static int handle_invvpid(struct kvm_vcpu *vcpu) 5905 { 5906 struct vcpu_vmx *vmx = to_vmx(vcpu); 5907 u32 vmx_instruction_info; 5908 unsigned long type, types; 5909 gva_t gva; 5910 struct x86_exception e; 5911 struct { 5912 u64 vpid; 5913 u64 gla; 5914 } operand; 5915 u16 vpid02; 5916 int r, gpr_index; 5917 5918 if (!(vmx->nested.msrs.secondary_ctls_high & 5919 SECONDARY_EXEC_ENABLE_VPID) || 5920 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { 5921 kvm_queue_exception(vcpu, UD_VECTOR); 5922 return 1; 5923 } 5924 5925 if (!nested_vmx_check_permission(vcpu)) 5926 return 1; 5927 5928 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5929 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info); 5930 type = kvm_register_read(vcpu, gpr_index); 5931 5932 types = (vmx->nested.msrs.vpid_caps & 5933 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; 5934 5935 if (type >= 32 || !(types & (1 << type))) 5936 return nested_vmx_fail(vcpu, 5937 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5938 5939 /* according to the intel vmx instruction reference, the memory 5940 * operand is read even if it isn't needed (e.g., for type==global) 5941 */ 5942 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5943 vmx_instruction_info, false, sizeof(operand), &gva)) 5944 return 1; 5945 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5946 if (r != X86EMUL_CONTINUE) 5947 return kvm_handle_memory_failure(vcpu, r, &e); 5948 5949 if (operand.vpid >> 16) 5950 return nested_vmx_fail(vcpu, 5951 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5952 5953 vpid02 = nested_get_vpid02(vcpu); 5954 switch (type) { 5955 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: 5956 /* 5957 * LAM doesn't apply to addresses that are inputs to TLB 5958 * invalidation. 5959 */ 5960 if (!operand.vpid || 5961 is_noncanonical_address(operand.gla, vcpu)) 5962 return nested_vmx_fail(vcpu, 5963 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5964 vpid_sync_vcpu_addr(vpid02, operand.gla); 5965 break; 5966 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 5967 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: 5968 if (!operand.vpid) 5969 return nested_vmx_fail(vcpu, 5970 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5971 vpid_sync_context(vpid02); 5972 break; 5973 case VMX_VPID_EXTENT_ALL_CONTEXT: 5974 vpid_sync_context(vpid02); 5975 break; 5976 default: 5977 WARN_ON_ONCE(1); 5978 return kvm_skip_emulated_instruction(vcpu); 5979 } 5980 5981 /* 5982 * Sync the shadow page tables if EPT is disabled, L1 is invalidating 5983 * linear mappings for L2 (tagged with L2's VPID). Free all guest 5984 * roots as VPIDs are not tracked in the MMU role. 5985 * 5986 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share 5987 * an MMU when EPT is disabled. 5988 * 5989 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR. 5990 */ 5991 if (!enable_ept) 5992 kvm_mmu_free_guest_mode_roots(vcpu->kvm, &vcpu->arch.root_mmu); 5993 5994 return nested_vmx_succeed(vcpu); 5995 } 5996 5997 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, 5998 struct vmcs12 *vmcs12) 5999 { 6000 u32 index = kvm_rcx_read(vcpu); 6001 u64 new_eptp; 6002 6003 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12))) 6004 return 1; 6005 if (index >= VMFUNC_EPTP_ENTRIES) 6006 return 1; 6007 6008 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, 6009 &new_eptp, index * 8, 8)) 6010 return 1; 6011 6012 /* 6013 * If the (L2) guest does a vmfunc to the currently 6014 * active ept pointer, we don't have to do anything else 6015 */ 6016 if (vmcs12->ept_pointer != new_eptp) { 6017 if (!nested_vmx_check_eptp(vcpu, new_eptp)) 6018 return 1; 6019 6020 vmcs12->ept_pointer = new_eptp; 6021 nested_ept_new_eptp(vcpu); 6022 6023 if (!nested_cpu_has_vpid(vmcs12)) 6024 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 6025 } 6026 6027 return 0; 6028 } 6029 6030 static int handle_vmfunc(struct kvm_vcpu *vcpu) 6031 { 6032 struct vcpu_vmx *vmx = to_vmx(vcpu); 6033 struct vmcs12 *vmcs12; 6034 u32 function = kvm_rax_read(vcpu); 6035 6036 /* 6037 * VMFUNC should never execute cleanly while L1 is active; KVM supports 6038 * VMFUNC for nested VMs, but not for L1. 6039 */ 6040 if (WARN_ON_ONCE(!is_guest_mode(vcpu))) { 6041 kvm_queue_exception(vcpu, UD_VECTOR); 6042 return 1; 6043 } 6044 6045 vmcs12 = get_vmcs12(vcpu); 6046 6047 /* 6048 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC 6049 * is enabled in vmcs02 if and only if it's enabled in vmcs12. 6050 */ 6051 if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) { 6052 kvm_queue_exception(vcpu, UD_VECTOR); 6053 return 1; 6054 } 6055 6056 if (!(vmcs12->vm_function_control & BIT_ULL(function))) 6057 goto fail; 6058 6059 switch (function) { 6060 case 0: 6061 if (nested_vmx_eptp_switching(vcpu, vmcs12)) 6062 goto fail; 6063 break; 6064 default: 6065 goto fail; 6066 } 6067 return kvm_skip_emulated_instruction(vcpu); 6068 6069 fail: 6070 /* 6071 * This is effectively a reflected VM-Exit, as opposed to a synthesized 6072 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode 6073 * EXIT_REASON_VMFUNC as the exit reason. 6074 */ 6075 nested_vmx_vmexit(vcpu, vmx->exit_reason.full, 6076 vmx_get_intr_info(vcpu), 6077 vmx_get_exit_qual(vcpu)); 6078 return 1; 6079 } 6080 6081 /* 6082 * Return true if an IO instruction with the specified port and size should cause 6083 * a VM-exit into L1. 6084 */ 6085 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, 6086 int size) 6087 { 6088 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6089 gpa_t bitmap, last_bitmap; 6090 u8 b; 6091 6092 last_bitmap = INVALID_GPA; 6093 b = -1; 6094 6095 while (size > 0) { 6096 if (port < 0x8000) 6097 bitmap = vmcs12->io_bitmap_a; 6098 else if (port < 0x10000) 6099 bitmap = vmcs12->io_bitmap_b; 6100 else 6101 return true; 6102 bitmap += (port & 0x7fff) / 8; 6103 6104 if (last_bitmap != bitmap) 6105 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) 6106 return true; 6107 if (b & (1 << (port & 7))) 6108 return true; 6109 6110 port++; 6111 size--; 6112 last_bitmap = bitmap; 6113 } 6114 6115 return false; 6116 } 6117 6118 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, 6119 struct vmcs12 *vmcs12) 6120 { 6121 unsigned long exit_qualification; 6122 unsigned short port; 6123 int size; 6124 6125 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 6126 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); 6127 6128 exit_qualification = vmx_get_exit_qual(vcpu); 6129 6130 port = exit_qualification >> 16; 6131 size = (exit_qualification & 7) + 1; 6132 6133 return nested_vmx_check_io_bitmaps(vcpu, port, size); 6134 } 6135 6136 /* 6137 * Return 1 if we should exit from L2 to L1 to handle an MSR access, 6138 * rather than handle it ourselves in L0. I.e., check whether L1 expressed 6139 * disinterest in the current event (read or write a specific MSR) by using an 6140 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. 6141 */ 6142 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, 6143 struct vmcs12 *vmcs12, 6144 union vmx_exit_reason exit_reason) 6145 { 6146 u32 msr_index = kvm_rcx_read(vcpu); 6147 gpa_t bitmap; 6148 6149 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 6150 return true; 6151 6152 /* 6153 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, 6154 * for the four combinations of read/write and low/high MSR numbers. 6155 * First we need to figure out which of the four to use: 6156 */ 6157 bitmap = vmcs12->msr_bitmap; 6158 if (exit_reason.basic == EXIT_REASON_MSR_WRITE) 6159 bitmap += 2048; 6160 if (msr_index >= 0xc0000000) { 6161 msr_index -= 0xc0000000; 6162 bitmap += 1024; 6163 } 6164 6165 /* Then read the msr_index'th bit from this bitmap: */ 6166 if (msr_index < 1024*8) { 6167 unsigned char b; 6168 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) 6169 return true; 6170 return 1 & (b >> (msr_index & 7)); 6171 } else 6172 return true; /* let L1 handle the wrong parameter */ 6173 } 6174 6175 /* 6176 * Return 1 if we should exit from L2 to L1 to handle a CR access exit, 6177 * rather than handle it ourselves in L0. I.e., check if L1 wanted to 6178 * intercept (via guest_host_mask etc.) the current event. 6179 */ 6180 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, 6181 struct vmcs12 *vmcs12) 6182 { 6183 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 6184 int cr = exit_qualification & 15; 6185 int reg; 6186 unsigned long val; 6187 6188 switch ((exit_qualification >> 4) & 3) { 6189 case 0: /* mov to cr */ 6190 reg = (exit_qualification >> 8) & 15; 6191 val = kvm_register_read(vcpu, reg); 6192 switch (cr) { 6193 case 0: 6194 if (vmcs12->cr0_guest_host_mask & 6195 (val ^ vmcs12->cr0_read_shadow)) 6196 return true; 6197 break; 6198 case 3: 6199 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) 6200 return true; 6201 break; 6202 case 4: 6203 if (vmcs12->cr4_guest_host_mask & 6204 (vmcs12->cr4_read_shadow ^ val)) 6205 return true; 6206 break; 6207 case 8: 6208 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) 6209 return true; 6210 break; 6211 } 6212 break; 6213 case 2: /* clts */ 6214 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && 6215 (vmcs12->cr0_read_shadow & X86_CR0_TS)) 6216 return true; 6217 break; 6218 case 1: /* mov from cr */ 6219 switch (cr) { 6220 case 3: 6221 if (vmcs12->cpu_based_vm_exec_control & 6222 CPU_BASED_CR3_STORE_EXITING) 6223 return true; 6224 break; 6225 case 8: 6226 if (vmcs12->cpu_based_vm_exec_control & 6227 CPU_BASED_CR8_STORE_EXITING) 6228 return true; 6229 break; 6230 } 6231 break; 6232 case 3: /* lmsw */ 6233 /* 6234 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 6235 * cr0. Other attempted changes are ignored, with no exit. 6236 */ 6237 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 6238 if (vmcs12->cr0_guest_host_mask & 0xe & 6239 (val ^ vmcs12->cr0_read_shadow)) 6240 return true; 6241 if ((vmcs12->cr0_guest_host_mask & 0x1) && 6242 !(vmcs12->cr0_read_shadow & 0x1) && 6243 (val & 0x1)) 6244 return true; 6245 break; 6246 } 6247 return false; 6248 } 6249 6250 static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu, 6251 struct vmcs12 *vmcs12) 6252 { 6253 u32 encls_leaf; 6254 6255 if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) || 6256 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING)) 6257 return false; 6258 6259 encls_leaf = kvm_rax_read(vcpu); 6260 if (encls_leaf > 62) 6261 encls_leaf = 63; 6262 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf); 6263 } 6264 6265 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, 6266 struct vmcs12 *vmcs12, gpa_t bitmap) 6267 { 6268 u32 vmx_instruction_info; 6269 unsigned long field; 6270 u8 b; 6271 6272 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 6273 return true; 6274 6275 /* Decode instruction info and find the field to access */ 6276 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 6277 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 6278 6279 /* Out-of-range fields always cause a VM exit from L2 to L1 */ 6280 if (field >> 15) 6281 return true; 6282 6283 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) 6284 return true; 6285 6286 return 1 & (b >> (field & 7)); 6287 } 6288 6289 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) 6290 { 6291 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field; 6292 6293 if (nested_cpu_has_mtf(vmcs12)) 6294 return true; 6295 6296 /* 6297 * An MTF VM-exit may be injected into the guest by setting the 6298 * interruption-type to 7 (other event) and the vector field to 0. Such 6299 * is the case regardless of the 'monitor trap flag' VM-execution 6300 * control. 6301 */ 6302 return entry_intr_info == (INTR_INFO_VALID_MASK 6303 | INTR_TYPE_OTHER_EVENT); 6304 } 6305 6306 /* 6307 * Return true if L0 wants to handle an exit from L2 regardless of whether or not 6308 * L1 wants the exit. Only call this when in is_guest_mode (L2). 6309 */ 6310 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, 6311 union vmx_exit_reason exit_reason) 6312 { 6313 u32 intr_info; 6314 6315 switch ((u16)exit_reason.basic) { 6316 case EXIT_REASON_EXCEPTION_NMI: 6317 intr_info = vmx_get_intr_info(vcpu); 6318 if (is_nmi(intr_info)) 6319 return true; 6320 else if (is_page_fault(intr_info)) 6321 return vcpu->arch.apf.host_apf_flags || 6322 vmx_need_pf_intercept(vcpu); 6323 else if (is_debug(intr_info) && 6324 vcpu->guest_debug & 6325 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 6326 return true; 6327 else if (is_breakpoint(intr_info) && 6328 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 6329 return true; 6330 else if (is_alignment_check(intr_info) && 6331 !vmx_guest_inject_ac(vcpu)) 6332 return true; 6333 else if (is_ve_fault(intr_info)) 6334 return true; 6335 return false; 6336 case EXIT_REASON_EXTERNAL_INTERRUPT: 6337 return true; 6338 case EXIT_REASON_MCE_DURING_VMENTRY: 6339 return true; 6340 case EXIT_REASON_EPT_VIOLATION: 6341 /* 6342 * L0 always deals with the EPT violation. If nested EPT is 6343 * used, and the nested mmu code discovers that the address is 6344 * missing in the guest EPT table (EPT12), the EPT violation 6345 * will be injected with nested_ept_inject_page_fault() 6346 */ 6347 return true; 6348 case EXIT_REASON_EPT_MISCONFIG: 6349 /* 6350 * L2 never uses directly L1's EPT, but rather L0's own EPT 6351 * table (shadow on EPT) or a merged EPT table that L0 built 6352 * (EPT on EPT). So any problems with the structure of the 6353 * table is L0's fault. 6354 */ 6355 return true; 6356 case EXIT_REASON_PREEMPTION_TIMER: 6357 return true; 6358 case EXIT_REASON_PML_FULL: 6359 /* 6360 * PML is emulated for an L1 VMM and should never be enabled in 6361 * vmcs02, always "handle" PML_FULL by exiting to userspace. 6362 */ 6363 return true; 6364 case EXIT_REASON_VMFUNC: 6365 /* VM functions are emulated through L2->L0 vmexits. */ 6366 return true; 6367 case EXIT_REASON_BUS_LOCK: 6368 /* 6369 * At present, bus lock VM exit is never exposed to L1. 6370 * Handle L2's bus locks in L0 directly. 6371 */ 6372 return true; 6373 #ifdef CONFIG_KVM_HYPERV 6374 case EXIT_REASON_VMCALL: 6375 /* Hyper-V L2 TLB flush hypercall is handled by L0 */ 6376 return guest_hv_cpuid_has_l2_tlb_flush(vcpu) && 6377 nested_evmcs_l2_tlb_flush_enabled(vcpu) && 6378 kvm_hv_is_tlb_flush_hcall(vcpu); 6379 #endif 6380 default: 6381 break; 6382 } 6383 return false; 6384 } 6385 6386 /* 6387 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in 6388 * is_guest_mode (L2). 6389 */ 6390 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, 6391 union vmx_exit_reason exit_reason) 6392 { 6393 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6394 u32 intr_info; 6395 6396 switch ((u16)exit_reason.basic) { 6397 case EXIT_REASON_EXCEPTION_NMI: 6398 intr_info = vmx_get_intr_info(vcpu); 6399 if (is_nmi(intr_info)) 6400 return true; 6401 else if (is_page_fault(intr_info)) 6402 return true; 6403 return vmcs12->exception_bitmap & 6404 (1u << (intr_info & INTR_INFO_VECTOR_MASK)); 6405 case EXIT_REASON_EXTERNAL_INTERRUPT: 6406 return nested_exit_on_intr(vcpu); 6407 case EXIT_REASON_TRIPLE_FAULT: 6408 return true; 6409 case EXIT_REASON_INTERRUPT_WINDOW: 6410 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING); 6411 case EXIT_REASON_NMI_WINDOW: 6412 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING); 6413 case EXIT_REASON_TASK_SWITCH: 6414 return true; 6415 case EXIT_REASON_CPUID: 6416 return true; 6417 case EXIT_REASON_HLT: 6418 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 6419 case EXIT_REASON_INVD: 6420 return true; 6421 case EXIT_REASON_INVLPG: 6422 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 6423 case EXIT_REASON_RDPMC: 6424 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); 6425 case EXIT_REASON_RDRAND: 6426 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); 6427 case EXIT_REASON_RDSEED: 6428 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); 6429 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: 6430 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); 6431 case EXIT_REASON_VMREAD: 6432 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 6433 vmcs12->vmread_bitmap); 6434 case EXIT_REASON_VMWRITE: 6435 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 6436 vmcs12->vmwrite_bitmap); 6437 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: 6438 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: 6439 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: 6440 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 6441 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: 6442 /* 6443 * VMX instructions trap unconditionally. This allows L1 to 6444 * emulate them for its L2 guest, i.e., allows 3-level nesting! 6445 */ 6446 return true; 6447 case EXIT_REASON_CR_ACCESS: 6448 return nested_vmx_exit_handled_cr(vcpu, vmcs12); 6449 case EXIT_REASON_DR_ACCESS: 6450 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); 6451 case EXIT_REASON_IO_INSTRUCTION: 6452 return nested_vmx_exit_handled_io(vcpu, vmcs12); 6453 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: 6454 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); 6455 case EXIT_REASON_MSR_READ: 6456 case EXIT_REASON_MSR_WRITE: 6457 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); 6458 case EXIT_REASON_INVALID_STATE: 6459 return true; 6460 case EXIT_REASON_MWAIT_INSTRUCTION: 6461 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); 6462 case EXIT_REASON_MONITOR_TRAP_FLAG: 6463 return nested_vmx_exit_handled_mtf(vmcs12); 6464 case EXIT_REASON_MONITOR_INSTRUCTION: 6465 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); 6466 case EXIT_REASON_PAUSE_INSTRUCTION: 6467 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || 6468 nested_cpu_has2(vmcs12, 6469 SECONDARY_EXEC_PAUSE_LOOP_EXITING); 6470 case EXIT_REASON_MCE_DURING_VMENTRY: 6471 return true; 6472 case EXIT_REASON_TPR_BELOW_THRESHOLD: 6473 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); 6474 case EXIT_REASON_APIC_ACCESS: 6475 case EXIT_REASON_APIC_WRITE: 6476 case EXIT_REASON_EOI_INDUCED: 6477 /* 6478 * The controls for "virtualize APIC accesses," "APIC- 6479 * register virtualization," and "virtual-interrupt 6480 * delivery" only come from vmcs12. 6481 */ 6482 return true; 6483 case EXIT_REASON_INVPCID: 6484 return 6485 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && 6486 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 6487 case EXIT_REASON_WBINVD: 6488 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); 6489 case EXIT_REASON_XSETBV: 6490 return true; 6491 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: 6492 /* 6493 * This should never happen, since it is not possible to 6494 * set XSS to a non-zero value---neither in L1 nor in L2. 6495 * If if it were, XSS would have to be checked against 6496 * the XSS exit bitmap in vmcs12. 6497 */ 6498 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES); 6499 case EXIT_REASON_UMWAIT: 6500 case EXIT_REASON_TPAUSE: 6501 return nested_cpu_has2(vmcs12, 6502 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE); 6503 case EXIT_REASON_ENCLS: 6504 return nested_vmx_exit_handled_encls(vcpu, vmcs12); 6505 case EXIT_REASON_NOTIFY: 6506 /* Notify VM exit is not exposed to L1 */ 6507 return false; 6508 default: 6509 return true; 6510 } 6511 } 6512 6513 /* 6514 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was 6515 * reflected into L1. 6516 */ 6517 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) 6518 { 6519 struct vcpu_vmx *vmx = to_vmx(vcpu); 6520 union vmx_exit_reason exit_reason = vmx->exit_reason; 6521 unsigned long exit_qual; 6522 u32 exit_intr_info; 6523 6524 WARN_ON_ONCE(vmx->nested.nested_run_pending); 6525 6526 /* 6527 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM 6528 * has already loaded L2's state. 6529 */ 6530 if (unlikely(vmx->fail)) { 6531 trace_kvm_nested_vmenter_failed( 6532 "hardware VM-instruction error: ", 6533 vmcs_read32(VM_INSTRUCTION_ERROR)); 6534 exit_intr_info = 0; 6535 exit_qual = 0; 6536 goto reflect_vmexit; 6537 } 6538 6539 trace_kvm_nested_vmexit(vcpu, KVM_ISA_VMX); 6540 6541 /* If L0 (KVM) wants the exit, it trumps L1's desires. */ 6542 if (nested_vmx_l0_wants_exit(vcpu, exit_reason)) 6543 return false; 6544 6545 /* If L1 doesn't want the exit, handle it in L0. */ 6546 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason)) 6547 return false; 6548 6549 /* 6550 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For 6551 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would 6552 * need to be synthesized by querying the in-kernel LAPIC, but external 6553 * interrupts are never reflected to L1 so it's a non-issue. 6554 */ 6555 exit_intr_info = vmx_get_intr_info(vcpu); 6556 if (is_exception_with_error_code(exit_intr_info)) { 6557 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6558 6559 vmcs12->vm_exit_intr_error_code = 6560 vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 6561 } 6562 exit_qual = vmx_get_exit_qual(vcpu); 6563 6564 reflect_vmexit: 6565 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual); 6566 return true; 6567 } 6568 6569 static int vmx_get_nested_state(struct kvm_vcpu *vcpu, 6570 struct kvm_nested_state __user *user_kvm_nested_state, 6571 u32 user_data_size) 6572 { 6573 struct vcpu_vmx *vmx; 6574 struct vmcs12 *vmcs12; 6575 struct kvm_nested_state kvm_state = { 6576 .flags = 0, 6577 .format = KVM_STATE_NESTED_FORMAT_VMX, 6578 .size = sizeof(kvm_state), 6579 .hdr.vmx.flags = 0, 6580 .hdr.vmx.vmxon_pa = INVALID_GPA, 6581 .hdr.vmx.vmcs12_pa = INVALID_GPA, 6582 .hdr.vmx.preemption_timer_deadline = 0, 6583 }; 6584 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6585 &user_kvm_nested_state->data.vmx[0]; 6586 6587 if (!vcpu) 6588 return kvm_state.size + sizeof(*user_vmx_nested_state); 6589 6590 vmx = to_vmx(vcpu); 6591 vmcs12 = get_vmcs12(vcpu); 6592 6593 if (guest_can_use(vcpu, X86_FEATURE_VMX) && 6594 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { 6595 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; 6596 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; 6597 6598 if (vmx_has_valid_vmcs12(vcpu)) { 6599 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); 6600 6601 /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */ 6602 if (nested_vmx_is_evmptr12_set(vmx)) 6603 kvm_state.flags |= KVM_STATE_NESTED_EVMCS; 6604 6605 if (is_guest_mode(vcpu) && 6606 nested_cpu_has_shadow_vmcs(vmcs12) && 6607 vmcs12->vmcs_link_pointer != INVALID_GPA) 6608 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); 6609 } 6610 6611 if (vmx->nested.smm.vmxon) 6612 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; 6613 6614 if (vmx->nested.smm.guest_mode) 6615 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; 6616 6617 if (is_guest_mode(vcpu)) { 6618 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 6619 6620 if (vmx->nested.nested_run_pending) 6621 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 6622 6623 if (vmx->nested.mtf_pending) 6624 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING; 6625 6626 if (nested_cpu_has_preemption_timer(vmcs12) && 6627 vmx->nested.has_preemption_timer_deadline) { 6628 kvm_state.hdr.vmx.flags |= 6629 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE; 6630 kvm_state.hdr.vmx.preemption_timer_deadline = 6631 vmx->nested.preemption_timer_deadline; 6632 } 6633 } 6634 } 6635 6636 if (user_data_size < kvm_state.size) 6637 goto out; 6638 6639 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 6640 return -EFAULT; 6641 6642 if (!vmx_has_valid_vmcs12(vcpu)) 6643 goto out; 6644 6645 /* 6646 * When running L2, the authoritative vmcs12 state is in the 6647 * vmcs02. When running L1, the authoritative vmcs12 state is 6648 * in the shadow or enlightened vmcs linked to vmcs01, unless 6649 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative 6650 * vmcs12 state is in the vmcs12 already. 6651 */ 6652 if (is_guest_mode(vcpu)) { 6653 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 6654 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 6655 } else { 6656 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 6657 if (!vmx->nested.need_vmcs12_to_shadow_sync) { 6658 if (nested_vmx_is_evmptr12_valid(vmx)) 6659 /* 6660 * L1 hypervisor is not obliged to keep eVMCS 6661 * clean fields data always up-to-date while 6662 * not in guest mode, 'hv_clean_fields' is only 6663 * supposed to be actual upon vmentry so we need 6664 * to ignore it here and do full copy. 6665 */ 6666 copy_enlightened_to_vmcs12(vmx, 0); 6667 else if (enable_shadow_vmcs) 6668 copy_shadow_to_vmcs12(vmx); 6669 } 6670 } 6671 6672 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); 6673 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE); 6674 6675 /* 6676 * Copy over the full allocated size of vmcs12 rather than just the size 6677 * of the struct. 6678 */ 6679 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE)) 6680 return -EFAULT; 6681 6682 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6683 vmcs12->vmcs_link_pointer != INVALID_GPA) { 6684 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, 6685 get_shadow_vmcs12(vcpu), VMCS12_SIZE)) 6686 return -EFAULT; 6687 } 6688 out: 6689 return kvm_state.size; 6690 } 6691 6692 void vmx_leave_nested(struct kvm_vcpu *vcpu) 6693 { 6694 if (is_guest_mode(vcpu)) { 6695 to_vmx(vcpu)->nested.nested_run_pending = 0; 6696 nested_vmx_vmexit(vcpu, -1, 0, 0); 6697 } 6698 free_nested(vcpu); 6699 } 6700 6701 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 6702 struct kvm_nested_state __user *user_kvm_nested_state, 6703 struct kvm_nested_state *kvm_state) 6704 { 6705 struct vcpu_vmx *vmx = to_vmx(vcpu); 6706 struct vmcs12 *vmcs12; 6707 enum vm_entry_failure_code ignored; 6708 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6709 &user_kvm_nested_state->data.vmx[0]; 6710 int ret; 6711 6712 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) 6713 return -EINVAL; 6714 6715 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) { 6716 if (kvm_state->hdr.vmx.smm.flags) 6717 return -EINVAL; 6718 6719 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) 6720 return -EINVAL; 6721 6722 /* 6723 * KVM_STATE_NESTED_EVMCS used to signal that KVM should 6724 * enable eVMCS capability on vCPU. However, since then 6725 * code was changed such that flag signals vmcs12 should 6726 * be copied into eVMCS in guest memory. 6727 * 6728 * To preserve backwards compatibility, allow user 6729 * to set this flag even when there is no VMXON region. 6730 */ 6731 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) 6732 return -EINVAL; 6733 } else { 6734 if (!guest_can_use(vcpu, X86_FEATURE_VMX)) 6735 return -EINVAL; 6736 6737 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) 6738 return -EINVAL; 6739 } 6740 6741 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6742 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6743 return -EINVAL; 6744 6745 if (kvm_state->hdr.vmx.smm.flags & 6746 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 6747 return -EINVAL; 6748 6749 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) 6750 return -EINVAL; 6751 6752 /* 6753 * SMM temporarily disables VMX, so we cannot be in guest mode, 6754 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags 6755 * must be zero. 6756 */ 6757 if (is_smm(vcpu) ? 6758 (kvm_state->flags & 6759 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING)) 6760 : kvm_state->hdr.vmx.smm.flags) 6761 return -EINVAL; 6762 6763 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6764 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 6765 return -EINVAL; 6766 6767 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && 6768 (!guest_can_use(vcpu, X86_FEATURE_VMX) || 6769 !vmx->nested.enlightened_vmcs_enabled)) 6770 return -EINVAL; 6771 6772 vmx_leave_nested(vcpu); 6773 6774 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) 6775 return 0; 6776 6777 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; 6778 ret = enter_vmx_operation(vcpu); 6779 if (ret) 6780 return ret; 6781 6782 /* Empty 'VMXON' state is permitted if no VMCS loaded */ 6783 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { 6784 /* See vmx_has_valid_vmcs12. */ 6785 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || 6786 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || 6787 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)) 6788 return -EINVAL; 6789 else 6790 return 0; 6791 } 6792 6793 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) { 6794 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || 6795 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) 6796 return -EINVAL; 6797 6798 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); 6799 #ifdef CONFIG_KVM_HYPERV 6800 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { 6801 /* 6802 * nested_vmx_handle_enlightened_vmptrld() cannot be called 6803 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be 6804 * restored yet. EVMCS will be mapped from 6805 * nested_get_vmcs12_pages(). 6806 */ 6807 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING; 6808 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 6809 #endif 6810 } else { 6811 return -EINVAL; 6812 } 6813 6814 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { 6815 vmx->nested.smm.vmxon = true; 6816 vmx->nested.vmxon = false; 6817 6818 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) 6819 vmx->nested.smm.guest_mode = true; 6820 } 6821 6822 vmcs12 = get_vmcs12(vcpu); 6823 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12))) 6824 return -EFAULT; 6825 6826 if (vmcs12->hdr.revision_id != VMCS12_REVISION) 6827 return -EINVAL; 6828 6829 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6830 return 0; 6831 6832 vmx->nested.nested_run_pending = 6833 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 6834 6835 vmx->nested.mtf_pending = 6836 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); 6837 6838 ret = -EINVAL; 6839 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6840 vmcs12->vmcs_link_pointer != INVALID_GPA) { 6841 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 6842 6843 if (kvm_state->size < 6844 sizeof(*kvm_state) + 6845 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) 6846 goto error_guest_mode; 6847 6848 if (copy_from_user(shadow_vmcs12, 6849 user_vmx_nested_state->shadow_vmcs12, 6850 sizeof(*shadow_vmcs12))) { 6851 ret = -EFAULT; 6852 goto error_guest_mode; 6853 } 6854 6855 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 6856 !shadow_vmcs12->hdr.shadow_vmcs) 6857 goto error_guest_mode; 6858 } 6859 6860 vmx->nested.has_preemption_timer_deadline = false; 6861 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { 6862 vmx->nested.has_preemption_timer_deadline = true; 6863 vmx->nested.preemption_timer_deadline = 6864 kvm_state->hdr.vmx.preemption_timer_deadline; 6865 } 6866 6867 if (nested_vmx_check_controls(vcpu, vmcs12) || 6868 nested_vmx_check_host_state(vcpu, vmcs12) || 6869 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored)) 6870 goto error_guest_mode; 6871 6872 vmx->nested.dirty_vmcs12 = true; 6873 vmx->nested.force_msr_bitmap_recalc = true; 6874 ret = nested_vmx_enter_non_root_mode(vcpu, false); 6875 if (ret) 6876 goto error_guest_mode; 6877 6878 if (vmx->nested.mtf_pending) 6879 kvm_make_request(KVM_REQ_EVENT, vcpu); 6880 6881 return 0; 6882 6883 error_guest_mode: 6884 vmx->nested.nested_run_pending = 0; 6885 return ret; 6886 } 6887 6888 void nested_vmx_set_vmcs_shadowing_bitmap(void) 6889 { 6890 if (enable_shadow_vmcs) { 6891 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); 6892 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); 6893 } 6894 } 6895 6896 /* 6897 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo 6898 * that madness to get the encoding for comparison. 6899 */ 6900 #define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10))) 6901 6902 static u64 nested_vmx_calc_vmcs_enum_msr(void) 6903 { 6904 /* 6905 * Note these are the so called "index" of the VMCS field encoding, not 6906 * the index into vmcs12. 6907 */ 6908 unsigned int max_idx, idx; 6909 int i; 6910 6911 /* 6912 * For better or worse, KVM allows VMREAD/VMWRITE to all fields in 6913 * vmcs12, regardless of whether or not the associated feature is 6914 * exposed to L1. Simply find the field with the highest index. 6915 */ 6916 max_idx = 0; 6917 for (i = 0; i < nr_vmcs12_fields; i++) { 6918 /* The vmcs12 table is very, very sparsely populated. */ 6919 if (!vmcs12_field_offsets[i]) 6920 continue; 6921 6922 idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i)); 6923 if (idx > max_idx) 6924 max_idx = idx; 6925 } 6926 6927 return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT; 6928 } 6929 6930 static void nested_vmx_setup_pinbased_ctls(struct vmcs_config *vmcs_conf, 6931 struct nested_vmx_msrs *msrs) 6932 { 6933 msrs->pinbased_ctls_low = 6934 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6935 6936 msrs->pinbased_ctls_high = vmcs_conf->pin_based_exec_ctrl; 6937 msrs->pinbased_ctls_high &= 6938 PIN_BASED_EXT_INTR_MASK | 6939 PIN_BASED_NMI_EXITING | 6940 PIN_BASED_VIRTUAL_NMIS | 6941 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); 6942 msrs->pinbased_ctls_high |= 6943 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6944 PIN_BASED_VMX_PREEMPTION_TIMER; 6945 } 6946 6947 static void nested_vmx_setup_exit_ctls(struct vmcs_config *vmcs_conf, 6948 struct nested_vmx_msrs *msrs) 6949 { 6950 msrs->exit_ctls_low = 6951 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 6952 6953 msrs->exit_ctls_high = vmcs_conf->vmexit_ctrl; 6954 msrs->exit_ctls_high &= 6955 #ifdef CONFIG_X86_64 6956 VM_EXIT_HOST_ADDR_SPACE_SIZE | 6957 #endif 6958 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT | 6959 VM_EXIT_CLEAR_BNDCFGS; 6960 msrs->exit_ctls_high |= 6961 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | 6962 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 6963 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT | 6964 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 6965 6966 /* We support free control of debug control saving. */ 6967 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; 6968 } 6969 6970 static void nested_vmx_setup_entry_ctls(struct vmcs_config *vmcs_conf, 6971 struct nested_vmx_msrs *msrs) 6972 { 6973 msrs->entry_ctls_low = 6974 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 6975 6976 msrs->entry_ctls_high = vmcs_conf->vmentry_ctrl; 6977 msrs->entry_ctls_high &= 6978 #ifdef CONFIG_X86_64 6979 VM_ENTRY_IA32E_MODE | 6980 #endif 6981 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS; 6982 msrs->entry_ctls_high |= 6983 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER | 6984 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL); 6985 6986 /* We support free control of debug control loading. */ 6987 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 6988 } 6989 6990 static void nested_vmx_setup_cpubased_ctls(struct vmcs_config *vmcs_conf, 6991 struct nested_vmx_msrs *msrs) 6992 { 6993 msrs->procbased_ctls_low = 6994 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6995 6996 msrs->procbased_ctls_high = vmcs_conf->cpu_based_exec_ctrl; 6997 msrs->procbased_ctls_high &= 6998 CPU_BASED_INTR_WINDOW_EXITING | 6999 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING | 7000 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | 7001 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | 7002 CPU_BASED_CR3_STORE_EXITING | 7003 #ifdef CONFIG_X86_64 7004 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | 7005 #endif 7006 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | 7007 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | 7008 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | 7009 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | 7010 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 7011 /* 7012 * We can allow some features even when not supported by the 7013 * hardware. For example, L1 can specify an MSR bitmap - and we 7014 * can use it to avoid exits to L1 - even when L0 runs L2 7015 * without MSR bitmaps. 7016 */ 7017 msrs->procbased_ctls_high |= 7018 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 7019 CPU_BASED_USE_MSR_BITMAPS; 7020 7021 /* We support free control of CR3 access interception. */ 7022 msrs->procbased_ctls_low &= 7023 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); 7024 } 7025 7026 static void nested_vmx_setup_secondary_ctls(u32 ept_caps, 7027 struct vmcs_config *vmcs_conf, 7028 struct nested_vmx_msrs *msrs) 7029 { 7030 msrs->secondary_ctls_low = 0; 7031 7032 msrs->secondary_ctls_high = vmcs_conf->cpu_based_2nd_exec_ctrl; 7033 msrs->secondary_ctls_high &= 7034 SECONDARY_EXEC_DESC | 7035 SECONDARY_EXEC_ENABLE_RDTSCP | 7036 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 7037 SECONDARY_EXEC_WBINVD_EXITING | 7038 SECONDARY_EXEC_APIC_REGISTER_VIRT | 7039 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 7040 SECONDARY_EXEC_RDRAND_EXITING | 7041 SECONDARY_EXEC_ENABLE_INVPCID | 7042 SECONDARY_EXEC_ENABLE_VMFUNC | 7043 SECONDARY_EXEC_RDSEED_EXITING | 7044 SECONDARY_EXEC_ENABLE_XSAVES | 7045 SECONDARY_EXEC_TSC_SCALING | 7046 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; 7047 7048 /* 7049 * We can emulate "VMCS shadowing," even if the hardware 7050 * doesn't support it. 7051 */ 7052 msrs->secondary_ctls_high |= 7053 SECONDARY_EXEC_SHADOW_VMCS; 7054 7055 if (enable_ept) { 7056 /* nested EPT: emulate EPT also to L1 */ 7057 msrs->secondary_ctls_high |= 7058 SECONDARY_EXEC_ENABLE_EPT; 7059 msrs->ept_caps = 7060 VMX_EPT_PAGE_WALK_4_BIT | 7061 VMX_EPT_PAGE_WALK_5_BIT | 7062 VMX_EPTP_WB_BIT | 7063 VMX_EPT_INVEPT_BIT | 7064 VMX_EPT_EXECUTE_ONLY_BIT; 7065 7066 msrs->ept_caps &= ept_caps; 7067 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | 7068 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | 7069 VMX_EPT_1GB_PAGE_BIT; 7070 if (enable_ept_ad_bits) { 7071 msrs->secondary_ctls_high |= 7072 SECONDARY_EXEC_ENABLE_PML; 7073 msrs->ept_caps |= VMX_EPT_AD_BIT; 7074 } 7075 7076 /* 7077 * Advertise EPTP switching irrespective of hardware support, 7078 * KVM emulates it in software so long as VMFUNC is supported. 7079 */ 7080 if (cpu_has_vmx_vmfunc()) 7081 msrs->vmfunc_controls = VMX_VMFUNC_EPTP_SWITCHING; 7082 } 7083 7084 /* 7085 * Old versions of KVM use the single-context version without 7086 * checking for support, so declare that it is supported even 7087 * though it is treated as global context. The alternative is 7088 * not failing the single-context invvpid, and it is worse. 7089 */ 7090 if (enable_vpid) { 7091 msrs->secondary_ctls_high |= 7092 SECONDARY_EXEC_ENABLE_VPID; 7093 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | 7094 VMX_VPID_EXTENT_SUPPORTED_MASK; 7095 } 7096 7097 if (enable_unrestricted_guest) 7098 msrs->secondary_ctls_high |= 7099 SECONDARY_EXEC_UNRESTRICTED_GUEST; 7100 7101 if (flexpriority_enabled) 7102 msrs->secondary_ctls_high |= 7103 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 7104 7105 if (enable_sgx) 7106 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING; 7107 } 7108 7109 static void nested_vmx_setup_misc_data(struct vmcs_config *vmcs_conf, 7110 struct nested_vmx_msrs *msrs) 7111 { 7112 msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA; 7113 msrs->misc_low |= 7114 VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 7115 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | 7116 VMX_MISC_ACTIVITY_HLT | 7117 VMX_MISC_ACTIVITY_WAIT_SIPI; 7118 msrs->misc_high = 0; 7119 } 7120 7121 static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs) 7122 { 7123 /* 7124 * This MSR reports some information about VMX support. We 7125 * should return information about the VMX we emulate for the 7126 * guest, and the VMCS structure we give it - not about the 7127 * VMX support of the underlying hardware. 7128 */ 7129 msrs->basic = vmx_basic_encode_vmcs_info(VMCS12_REVISION, VMCS12_SIZE, 7130 X86_MEMTYPE_WB); 7131 7132 msrs->basic |= VMX_BASIC_TRUE_CTLS; 7133 if (cpu_has_vmx_basic_inout()) 7134 msrs->basic |= VMX_BASIC_INOUT; 7135 } 7136 7137 static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs) 7138 { 7139 /* 7140 * These MSRs specify bits which the guest must keep fixed on 7141 * while L1 is in VMXON mode (in L1's root mode, or running an L2). 7142 * We picked the standard core2 setting. 7143 */ 7144 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) 7145 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE 7146 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; 7147 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; 7148 7149 /* These MSRs specify bits which the guest must keep fixed off. */ 7150 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 7151 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 7152 7153 if (vmx_umip_emulated()) 7154 msrs->cr4_fixed1 |= X86_CR4_UMIP; 7155 } 7156 7157 /* 7158 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be 7159 * returned for the various VMX controls MSRs when nested VMX is enabled. 7160 * The same values should also be used to verify that vmcs12 control fields are 7161 * valid during nested entry from L1 to L2. 7162 * Each of these control msrs has a low and high 32-bit half: A low bit is on 7163 * if the corresponding bit in the (32-bit) control field *must* be on, and a 7164 * bit in the high half is on if the corresponding bit in the control field 7165 * may be on. See also vmx_control_verify(). 7166 */ 7167 void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) 7168 { 7169 struct nested_vmx_msrs *msrs = &vmcs_conf->nested; 7170 7171 /* 7172 * Note that as a general rule, the high half of the MSRs (bits in 7173 * the control fields which may be 1) should be initialized by the 7174 * intersection of the underlying hardware's MSR (i.e., features which 7175 * can be supported) and the list of features we want to expose - 7176 * because they are known to be properly supported in our code. 7177 * Also, usually, the low half of the MSRs (bits which must be 1) can 7178 * be set to 0, meaning that L1 may turn off any of these bits. The 7179 * reason is that if one of these bits is necessary, it will appear 7180 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control 7181 * fields of vmcs01 and vmcs02, will turn these bits off - and 7182 * nested_vmx_l1_wants_exit() will not pass related exits to L1. 7183 * These rules have exceptions below. 7184 */ 7185 nested_vmx_setup_pinbased_ctls(vmcs_conf, msrs); 7186 7187 nested_vmx_setup_exit_ctls(vmcs_conf, msrs); 7188 7189 nested_vmx_setup_entry_ctls(vmcs_conf, msrs); 7190 7191 nested_vmx_setup_cpubased_ctls(vmcs_conf, msrs); 7192 7193 nested_vmx_setup_secondary_ctls(ept_caps, vmcs_conf, msrs); 7194 7195 nested_vmx_setup_misc_data(vmcs_conf, msrs); 7196 7197 nested_vmx_setup_basic(msrs); 7198 7199 nested_vmx_setup_cr_fixed(msrs); 7200 7201 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr(); 7202 } 7203 7204 void nested_vmx_hardware_unsetup(void) 7205 { 7206 int i; 7207 7208 if (enable_shadow_vmcs) { 7209 for (i = 0; i < VMX_BITMAP_NR; i++) 7210 free_page((unsigned long)vmx_bitmap[i]); 7211 } 7212 } 7213 7214 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) 7215 { 7216 int i; 7217 7218 if (!cpu_has_vmx_shadow_vmcs()) 7219 enable_shadow_vmcs = 0; 7220 if (enable_shadow_vmcs) { 7221 for (i = 0; i < VMX_BITMAP_NR; i++) { 7222 /* 7223 * The vmx_bitmap is not tied to a VM and so should 7224 * not be charged to a memcg. 7225 */ 7226 vmx_bitmap[i] = (unsigned long *) 7227 __get_free_page(GFP_KERNEL); 7228 if (!vmx_bitmap[i]) { 7229 nested_vmx_hardware_unsetup(); 7230 return -ENOMEM; 7231 } 7232 } 7233 7234 init_vmcs_shadow_fields(); 7235 } 7236 7237 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear; 7238 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch; 7239 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld; 7240 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst; 7241 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread; 7242 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume; 7243 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite; 7244 exit_handlers[EXIT_REASON_VMOFF] = handle_vmxoff; 7245 exit_handlers[EXIT_REASON_VMON] = handle_vmxon; 7246 exit_handlers[EXIT_REASON_INVEPT] = handle_invept; 7247 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; 7248 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; 7249 7250 return 0; 7251 } 7252 7253 struct kvm_x86_nested_ops vmx_nested_ops = { 7254 .leave_nested = vmx_leave_nested, 7255 .is_exception_vmexit = nested_vmx_is_exception_vmexit, 7256 .check_events = vmx_check_nested_events, 7257 .has_events = vmx_has_nested_events, 7258 .triple_fault = nested_vmx_triple_fault, 7259 .get_state = vmx_get_nested_state, 7260 .set_state = vmx_set_nested_state, 7261 .get_nested_state_pages = vmx_get_nested_state_pages, 7262 .write_log_dirty = nested_vmx_write_pml_buffer, 7263 #ifdef CONFIG_KVM_HYPERV 7264 .enable_evmcs = nested_enable_evmcs, 7265 .get_evmcs_version = nested_get_evmcs_version, 7266 .hv_inject_synthetic_vmexit_post_tlb_flush = vmx_hv_inject_synthetic_vmexit_post_tlb_flush, 7267 #endif 7268 }; 7269