Lines Matching +full:guest +full:- +full:index +full:- +full:bits

1 // SPDX-License-Identifier: GPL-2.0
32 * Hyper-V requires all of these, so mark them as supported even though
33 * they are just treated the same as all-context.
111 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES"); in init_vmcs_shadow_fields()
178 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; in nested_vmx_failValid()
185 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_failValid()
198 if (vmx->nested.current_vmptr == INVALID_GPA && in nested_vmx_fail()
207 /* TODO: not to reset guest simply here. */ in nested_vmx_abort()
226 vmx->nested.need_vmcs12_to_shadow_sync = false; in vmx_disable_shadow_vmcs()
235 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map); in nested_release_evmcs()
236 vmx->nested.hv_evmcs = NULL; in nested_release_evmcs()
237 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; in nested_release_evmcs()
240 hv_vcpu->nested.pa_page_gpa = INVALID_GPA; in nested_release_evmcs()
241 hv_vcpu->nested.vm_id = 0; in nested_release_evmcs()
242 hv_vcpu->nested.vp_id = 0; in nested_release_evmcs()
255 * writing to the non-existent 'launch_state' field. The area doesn't in nested_evmcs_handle_vmclear()
257 * nothing KVM has to do to transition it from 'active' to 'non-active' in nested_evmcs_handle_vmclear()
259 * vmx->nested.hv_evmcs but this shouldn't be a problem. in nested_evmcs_handle_vmclear()
265 if (nested_vmx_evmcs(vmx) && vmptr == vmx->nested.hv_evmcs_vmptr) in nested_evmcs_handle_vmclear()
279 if (unlikely(!vmx->vt.guest_state_loaded)) in vmx_sync_vmcs_host_state()
282 src = &prev->host_state; in vmx_sync_vmcs_host_state()
283 dest = &vmx->loaded_vmcs->host_state; in vmx_sync_vmcs_host_state()
285 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); in vmx_sync_vmcs_host_state()
286 dest->ldt_sel = src->ldt_sel; in vmx_sync_vmcs_host_state()
288 dest->ds_sel = src->ds_sel; in vmx_sync_vmcs_host_state()
289 dest->es_sel = src->es_sel; in vmx_sync_vmcs_host_state()
299 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) in vmx_switch_vmcs()
303 prev = vmx->loaded_vmcs; in vmx_switch_vmcs()
304 vmx->loaded_vmcs = vmcs; in vmx_switch_vmcs()
309 vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET; in vmx_switch_vmcs()
315 vcpu->arch.regs_dirty = 0; in vmx_switch_vmcs()
322 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map); in nested_put_vmcs12_pages()
323 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map); in nested_put_vmcs12_pages()
324 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map); in nested_put_vmcs12_pages()
325 vmx->nested.pi_desc = NULL; in nested_put_vmcs12_pages()
329 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
336 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) in free_nested()
337 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in free_nested()
339 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) in free_nested()
344 vmx->nested.vmxon = false; in free_nested()
345 vmx->nested.smm.vmxon = false; in free_nested()
346 vmx->nested.vmxon_ptr = INVALID_GPA; in free_nested()
347 free_vpid(vmx->nested.vpid02); in free_nested()
348 vmx->nested.posted_intr_nv = -1; in free_nested()
349 vmx->nested.current_vmptr = INVALID_GPA; in free_nested()
352 vmcs_clear(vmx->vmcs01.shadow_vmcs); in free_nested()
353 free_vmcs(vmx->vmcs01.shadow_vmcs); in free_nested()
354 vmx->vmcs01.shadow_vmcs = NULL; in free_nested()
356 kfree(vmx->nested.cached_vmcs12); in free_nested()
357 vmx->nested.cached_vmcs12 = NULL; in free_nested()
358 kfree(vmx->nested.cached_shadow_vmcs12); in free_nested()
359 vmx->nested.cached_shadow_vmcs12 = NULL; in free_nested()
363 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); in free_nested()
367 free_loaded_vmcs(&vmx->nested.vmcs02); in free_nested()
399 cached_root = &vcpu->arch.mmu->prev_roots[i]; in nested_ept_invalidate_addr()
401 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd, in nested_ept_invalidate_addr()
406 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.mmu, addr, roots); in nested_ept_invalidate_addr()
417 if (vmx->nested.pml_full) { in nested_ept_inject_page_fault()
419 vmx->nested.pml_full = false; in nested_ept_inject_page_fault()
422 * It should be impossible to trigger a nested PML Full VM-Exit in nested_ept_inject_page_fault()
427 * VM-Exits as writes. in nested_ept_inject_page_fault()
429 WARN_ON_ONCE(vmx->vt.exit_reason.basic != EXIT_REASON_EPT_VIOLATION); in nested_ept_inject_page_fault()
432 * PML Full and EPT Violation VM-Exits both use bit 12 to report in nested_ept_inject_page_fault()
434 * as-is from the original EXIT_QUALIFICATION. in nested_ept_inject_page_fault()
438 if (fault->error_code & PFERR_RSVD_MASK) { in nested_ept_inject_page_fault()
442 exit_qualification = fault->exit_qualification; in nested_ept_inject_page_fault()
456 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer, in nested_ept_inject_page_fault()
457 fault->address); in nested_ept_inject_page_fault()
461 vmcs12->guest_physical_address = fault->address; in nested_ept_inject_page_fault()
467 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT; in nested_ept_new_eptp()
468 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps); in nested_ept_new_eptp()
479 vcpu->arch.mmu = &vcpu->arch.guest_mmu; in nested_ept_init_mmu_context()
481 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp; in nested_ept_init_mmu_context()
482 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; in nested_ept_init_mmu_context()
483 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; in nested_ept_init_mmu_context()
485 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_ept_init_mmu_context()
490 vcpu->arch.mmu = &vcpu->arch.root_mmu; in nested_ept_uninit_mmu_context()
491 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in nested_ept_uninit_mmu_context()
499 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; in nested_vmx_is_page_fault_vmexit()
501 (error_code & vmcs12->page_fault_error_code_mask) != in nested_vmx_is_page_fault_vmexit()
502 vmcs12->page_fault_error_code_match; in nested_vmx_is_page_fault_vmexit()
512 * Drop bits 31:16 of the error code when performing the #PF mask+match in nested_vmx_is_exception_vmexit()
513 * check. All VMCS fields involved are 32 bits, but Intel CPUs never in nested_vmx_is_exception_vmexit()
514 * set bits 31:16 and VMX disallows setting bits 31:16 in the injected in nested_vmx_is_exception_vmexit()
515 * error code. Including the to-be-dropped bits in the check might in nested_vmx_is_exception_vmexit()
521 return (vmcs12->exception_bitmap & (1u << vector)); in nested_vmx_is_exception_vmexit()
530 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) || in nested_vmx_check_io_bitmap_controls()
531 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b))) in nested_vmx_check_io_bitmap_controls()
532 return -EINVAL; in nested_vmx_check_io_bitmap_controls()
543 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap))) in nested_vmx_check_msr_bitmap_controls()
544 return -EINVAL; in nested_vmx_check_msr_bitmap_controls()
555 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) in nested_vmx_check_tpr_shadow_controls()
556 return -EINVAL; in nested_vmx_check_tpr_shadow_controls()
595 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
627 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap; in nested_vmx_prepare_msr_bitmap()
637 * - MSR bitmap for L1 hasn't changed. in nested_vmx_prepare_msr_bitmap()
638 * - Nested hypervisor (L1) is attempting to launch the same L2 as in nested_vmx_prepare_msr_bitmap()
640 * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature in nested_vmx_prepare_msr_bitmap()
643 if (!vmx->nested.force_msr_bitmap_recalc) { in nested_vmx_prepare_msr_bitmap()
646 if (evmcs && evmcs->hv_enlightenments_control.msr_bitmap && in nested_vmx_prepare_msr_bitmap()
647 evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP) in nested_vmx_prepare_msr_bitmap()
651 if (kvm_vcpu_map_readonly(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), &map)) in nested_vmx_prepare_msr_bitmap()
657 * To keep the control flow simple, pay eight 8-byte writes (sixteen in nested_vmx_prepare_msr_bitmap()
658 * 4-byte writes on 32-bit systems) up front to enable intercepts for in nested_vmx_prepare_msr_bitmap()
668 * from the virtual-APIC page; take those 256 bits in nested_vmx_prepare_msr_bitmap()
697 * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through. in nested_vmx_prepare_msr_bitmap()
744 vmx->nested.force_msr_bitmap_recalc = false; in nested_vmx_prepare_msr_bitmap()
753 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_cache_shadow_vmcs12()
756 vmcs12->vmcs_link_pointer == INVALID_GPA) in nested_cache_shadow_vmcs12()
759 if (ghc->gpa != vmcs12->vmcs_link_pointer && in nested_cache_shadow_vmcs12()
760 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, in nested_cache_shadow_vmcs12()
761 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) in nested_cache_shadow_vmcs12()
764 kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), in nested_cache_shadow_vmcs12()
772 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_flush_cached_shadow_vmcs12()
775 vmcs12->vmcs_link_pointer == INVALID_GPA) in nested_flush_cached_shadow_vmcs12()
778 if (ghc->gpa != vmcs12->vmcs_link_pointer && in nested_flush_cached_shadow_vmcs12()
779 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, in nested_flush_cached_shadow_vmcs12()
780 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) in nested_flush_cached_shadow_vmcs12()
783 kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), in nested_flush_cached_shadow_vmcs12()
793 return get_vmcs12(vcpu)->vm_exit_controls & in nested_exit_intr_ack_set()
801 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr))) in nested_vmx_check_apic_access_controls()
802 return -EINVAL; in nested_vmx_check_apic_access_controls()
822 return -EINVAL; in nested_vmx_check_apicv_controls()
829 return -EINVAL; in nested_vmx_check_apicv_controls()
832 * bits 15:8 should be zero in posted_intr_nv, in nested_vmx_check_apicv_controls()
836 * bits 5:0 of posted_intr_desc_addr should be zero. in nested_vmx_check_apicv_controls()
841 CC((vmcs12->posted_intr_nv & 0xff00)) || in nested_vmx_check_apicv_controls()
842 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64)))) in nested_vmx_check_apicv_controls()
843 return -EINVAL; in nested_vmx_check_apicv_controls()
847 return -EINVAL; in nested_vmx_check_apicv_controls()
855 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, in nested_vmx_max_atomic_switch_msrs()
856 vmx->nested.msrs.misc_high); in nested_vmx_max_atomic_switch_msrs()
874 return -EINVAL; in nested_vmx_check_msr_switch()
877 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) in nested_vmx_check_msr_switch()
878 return -EINVAL; in nested_vmx_check_msr_switch()
887 vmcs12->vm_exit_msr_load_count, in nested_vmx_check_exit_msr_switch_controls()
888 vmcs12->vm_exit_msr_load_addr)) || in nested_vmx_check_exit_msr_switch_controls()
890 vmcs12->vm_exit_msr_store_count, in nested_vmx_check_exit_msr_switch_controls()
891 vmcs12->vm_exit_msr_store_addr))) in nested_vmx_check_exit_msr_switch_controls()
892 return -EINVAL; in nested_vmx_check_exit_msr_switch_controls()
901 vmcs12->vm_entry_msr_load_count, in nested_vmx_check_entry_msr_switch_controls()
902 vmcs12->vm_entry_msr_load_addr))) in nested_vmx_check_entry_msr_switch_controls()
903 return -EINVAL; in nested_vmx_check_entry_msr_switch_controls()
915 CC(!page_address_valid(vcpu, vmcs12->pml_address))) in nested_vmx_check_pml_controls()
916 return -EINVAL; in nested_vmx_check_pml_controls()
926 return -EINVAL; in nested_vmx_check_unrestricted_guest_controls()
935 return -EINVAL; in nested_vmx_check_mode_based_ept_exec_controls()
945 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) || in nested_vmx_check_shadow_vmcs_controls()
946 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))) in nested_vmx_check_shadow_vmcs_controls()
947 return -EINVAL; in nested_vmx_check_shadow_vmcs_controls()
956 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) in nested_vmx_msr_check_common()
957 return -EINVAL; in nested_vmx_msr_check_common()
958 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */ in nested_vmx_msr_check_common()
959 CC(e->index == MSR_IA32_UCODE_REV)) in nested_vmx_msr_check_common()
960 return -EINVAL; in nested_vmx_msr_check_common()
961 if (CC(e->reserved != 0)) in nested_vmx_msr_check_common()
962 return -EINVAL; in nested_vmx_msr_check_common()
969 if (CC(e->index == MSR_FS_BASE) || in nested_vmx_load_msr_check()
970 CC(e->index == MSR_GS_BASE) || in nested_vmx_load_msr_check()
971 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */ in nested_vmx_load_msr_check()
973 return -EINVAL; in nested_vmx_load_msr_check()
980 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */ in nested_vmx_store_msr_check()
982 return -EINVAL; in nested_vmx_store_msr_check()
987 * Load guest's/host's msr at nested entry/exit.
988 * return 0 for success, entry index for failure.
1015 __func__, i, e.index, e.reserved); in nested_vmx_load_msr()
1018 if (kvm_emulate_msr_write(vcpu, e.index, e.value)) { in nested_vmx_load_msr()
1021 __func__, i, e.index, e.value); in nested_vmx_load_msr()
1039 * does not include the time taken for emulation of the L2->L1 in nested_vmx_get_vmexit_msr_value()
1040 * VM-exit in L0, use the more accurate value. in nested_vmx_get_vmexit_msr_value()
1043 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, in nested_vmx_get_vmexit_msr_value()
1047 u64 val = vmx->msr_autostore.guest.val[i].value; in nested_vmx_get_vmexit_msr_value()
1076 __func__, i, e->index, e->reserved); in read_and_check_msr_entry()
1091 return -EINVAL; in nested_vmx_store_msr()
1094 return -EINVAL; in nested_vmx_store_msr()
1096 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data)) in nested_vmx_store_msr()
1097 return -EINVAL; in nested_vmx_store_msr()
1105 __func__, i, e.index, data); in nested_vmx_store_msr()
1106 return -EINVAL; in nested_vmx_store_msr()
1115 u32 count = vmcs12->vm_exit_msr_store_count; in nested_msr_store_list_has_msr()
1116 u64 gpa = vmcs12->vm_exit_msr_store_addr; in nested_msr_store_list_has_msr()
1124 if (e.index == msr_index) in nested_msr_store_list_has_msr()
1134 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; in prepare_vmx_msr_autostore_list()
1145 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) { in prepare_vmx_msr_autostore_list()
1151 * the vmcs02 VMExit MSR-store area. in prepare_vmx_msr_autostore_list()
1158 last = autostore->nr++; in prepare_vmx_msr_autostore_list()
1159 autostore->val[last].index = msr_index; in prepare_vmx_msr_autostore_list()
1161 last = --autostore->nr; in prepare_vmx_msr_autostore_list()
1162 autostore->val[msr_autostore_slot] = autostore->val[last]; in prepare_vmx_msr_autostore_list()
1167 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1168 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1169 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1178 return -EINVAL; in nested_vmx_load_cr3()
1188 return -EINVAL; in nested_vmx_load_cr3()
1191 vcpu->arch.cr3 = cr3; in nested_vmx_load_cr3()
1194 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ in nested_vmx_load_cr3()
1213 * with different VPID (L1 entries are tagged with vmx->vpid
1214 * while L2 entries are tagged with vmx->nested.vpid02).
1221 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); in nested_has_guest_tlb_tag()
1230 /* Handle pending Hyper-V TLB flush requests */ in nested_vmx_transition_tlb_flush()
1234 * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the in nested_vmx_transition_tlb_flush()
1236 * mappings for VPID=0 must be flushed at VM-Enter and VM-Exit. KVM in nested_vmx_transition_tlb_flush()
1241 * synchronize the MMU in response to the guest TLB flush. in nested_vmx_transition_tlb_flush()
1244 * EPT is a special snowflake, as guest-physical mappings aren't in nested_vmx_transition_tlb_flush()
1245 * flushed on VPID invalidations, including VM-Enter or VM-Exit with in nested_vmx_transition_tlb_flush()
1247 * entries on VM-Enter because L1 can't rely on VM-Enter to flush in nested_vmx_transition_tlb_flush()
1260 * emulate a guest TLB flush as KVM does not track vpid12 history nor in nested_vmx_transition_tlb_flush()
1263 * guest ASID that cannot have entries in the TLB. in nested_vmx_transition_tlb_flush()
1265 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) { in nested_vmx_transition_tlb_flush()
1266 vmx->nested.last_vpid = vmcs12->virtual_processor_id; in nested_vmx_transition_tlb_flush()
1305 * Except for 32BIT_PHYS_ADDR_ONLY, which is an anti-feature bit (has in vmx_restore_vmx_basic()
1306 * inverted polarity), the incoming value must not set feature bits or in vmx_restore_vmx_basic()
1307 * reserved bits that aren't allowed/supported by KVM. Fields, i.e. in vmx_restore_vmx_basic()
1308 * multi-bit values, are explicitly checked below. in vmx_restore_vmx_basic()
1311 return -EINVAL; in vmx_restore_vmx_basic()
1315 * addresses of VMX structures (e.g. VMCS) to 32-bits. in vmx_restore_vmx_basic()
1318 return -EINVAL; in vmx_restore_vmx_basic()
1322 return -EINVAL; in vmx_restore_vmx_basic()
1325 return -EINVAL; in vmx_restore_vmx_basic()
1327 vmx->nested.msrs.basic = data; in vmx_restore_vmx_basic()
1336 *low = &msrs->pinbased_ctls_low; in vmx_get_control_msr()
1337 *high = &msrs->pinbased_ctls_high; in vmx_get_control_msr()
1340 *low = &msrs->procbased_ctls_low; in vmx_get_control_msr()
1341 *high = &msrs->procbased_ctls_high; in vmx_get_control_msr()
1344 *low = &msrs->exit_ctls_low; in vmx_get_control_msr()
1345 *high = &msrs->exit_ctls_high; in vmx_get_control_msr()
1348 *low = &msrs->entry_ctls_low; in vmx_get_control_msr()
1349 *high = &msrs->entry_ctls_high; in vmx_get_control_msr()
1352 *low = &msrs->secondary_ctls_low; in vmx_get_control_msr()
1353 *high = &msrs->secondary_ctls_high; in vmx_get_control_msr()
1370 /* Check must-be-1 bits are still 1. */ in vmx_restore_control_msr()
1372 return -EINVAL; in vmx_restore_control_msr()
1374 /* Check must-be-0 bits are still 0. */ in vmx_restore_control_msr()
1376 return -EINVAL; in vmx_restore_control_msr()
1378 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); in vmx_restore_control_msr()
1404 * The incoming value must not set feature bits or reserved bits that in vmx_restore_vmx_misc()
1405 * aren't allowed/supported by KVM. Fields, i.e. multi-bit values, are in vmx_restore_vmx_misc()
1409 return -EINVAL; in vmx_restore_vmx_misc()
1411 if ((vmx->nested.msrs.pinbased_ctls_high & in vmx_restore_vmx_misc()
1415 return -EINVAL; in vmx_restore_vmx_misc()
1418 return -EINVAL; in vmx_restore_vmx_misc()
1421 return -EINVAL; in vmx_restore_vmx_misc()
1424 return -EINVAL; in vmx_restore_vmx_misc()
1426 vmx->nested.msrs.misc_low = data; in vmx_restore_vmx_misc()
1427 vmx->nested.msrs.misc_high = data >> 32; in vmx_restore_vmx_misc()
1438 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) in vmx_restore_vmx_ept_vpid_cap()
1439 return -EINVAL; in vmx_restore_vmx_ept_vpid_cap()
1441 vmx->nested.msrs.ept_caps = data; in vmx_restore_vmx_ept_vpid_cap()
1442 vmx->nested.msrs.vpid_caps = data >> 32; in vmx_restore_vmx_ept_vpid_cap()
1450 return &msrs->cr0_fixed0; in vmx_get_fixed0_msr()
1452 return &msrs->cr4_fixed0; in vmx_get_fixed0_msr()
1463 * 1 bits (which indicates bits which "must-be-1" during VMX operation) in vmx_restore_fixed0_msr()
1466 if (!is_bitwise_subset(data, *msr, -1ULL)) in vmx_restore_fixed0_msr()
1467 return -EINVAL; in vmx_restore_fixed0_msr()
1469 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; in vmx_restore_fixed0_msr()
1476 * Returns 0 on success, non-0 otherwise.
1486 if (vmx->nested.vmxon) in vmx_set_vmx_msr()
1487 return -EBUSY; in vmx_set_vmx_msr()
1497 * The "non-true" VMX capability MSRs are generated from the in vmx_set_vmx_msr()
1501 * should restore the "true" MSRs with the must-be-1 bits in vmx_set_vmx_msr()
1505 return -EINVAL; in vmx_set_vmx_msr()
1523 return -EINVAL; in vmx_set_vmx_msr()
1527 vmx->nested.msrs.vmcs_enum = data; in vmx_set_vmx_msr()
1531 return -EINVAL; in vmx_set_vmx_msr()
1532 vmx->nested.msrs.vmfunc_controls = data; in vmx_set_vmx_msr()
1538 return -EINVAL; in vmx_set_vmx_msr()
1542 /* Returns 0 on success, non-0 otherwise. */
1547 *pdata = msrs->basic; in vmx_get_vmx_msr()
1552 msrs->pinbased_ctls_low, in vmx_get_vmx_msr()
1553 msrs->pinbased_ctls_high); in vmx_get_vmx_msr()
1560 msrs->procbased_ctls_low, in vmx_get_vmx_msr()
1561 msrs->procbased_ctls_high); in vmx_get_vmx_msr()
1568 msrs->exit_ctls_low, in vmx_get_vmx_msr()
1569 msrs->exit_ctls_high); in vmx_get_vmx_msr()
1576 msrs->entry_ctls_low, in vmx_get_vmx_msr()
1577 msrs->entry_ctls_high); in vmx_get_vmx_msr()
1583 msrs->misc_low, in vmx_get_vmx_msr()
1584 msrs->misc_high); in vmx_get_vmx_msr()
1587 *pdata = msrs->cr0_fixed0; in vmx_get_vmx_msr()
1590 *pdata = msrs->cr0_fixed1; in vmx_get_vmx_msr()
1593 *pdata = msrs->cr4_fixed0; in vmx_get_vmx_msr()
1596 *pdata = msrs->cr4_fixed1; in vmx_get_vmx_msr()
1599 *pdata = msrs->vmcs_enum; in vmx_get_vmx_msr()
1603 msrs->secondary_ctls_low, in vmx_get_vmx_msr()
1604 msrs->secondary_ctls_high); in vmx_get_vmx_msr()
1607 *pdata = msrs->ept_caps | in vmx_get_vmx_msr()
1608 ((u64)msrs->vpid_caps << 32); in vmx_get_vmx_msr()
1611 *pdata = msrs->vmfunc_controls; in vmx_get_vmx_msr()
1622 * been modified by the L1 guest. Note, "writable" in this context means
1623 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1624 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1625 * VM-exit information fields (which are actually writable if the vCPU is
1630 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_shadow_to_vmcs12()
1631 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); in copy_shadow_to_vmcs12()
1650 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_shadow_to_vmcs12()
1665 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_vmcs12_to_shadow()
1666 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); in copy_vmcs12_to_shadow()
1686 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_vmcs12_to_shadow()
1692 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_enlightened_to_vmcs12()
1694 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu); in copy_enlightened_to_vmcs12()
1697 vmcs12->tpr_threshold = evmcs->tpr_threshold; in copy_enlightened_to_vmcs12()
1698 vmcs12->guest_rip = evmcs->guest_rip; in copy_enlightened_to_vmcs12()
1702 hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page; in copy_enlightened_to_vmcs12()
1703 hv_vcpu->nested.vm_id = evmcs->hv_vm_id; in copy_enlightened_to_vmcs12()
1704 hv_vcpu->nested.vp_id = evmcs->hv_vp_id; in copy_enlightened_to_vmcs12()
1709 vmcs12->guest_rsp = evmcs->guest_rsp; in copy_enlightened_to_vmcs12()
1710 vmcs12->guest_rflags = evmcs->guest_rflags; in copy_enlightened_to_vmcs12()
1711 vmcs12->guest_interruptibility_info = in copy_enlightened_to_vmcs12()
1712 evmcs->guest_interruptibility_info; in copy_enlightened_to_vmcs12()
1715 * vmcs12->guest_ssp = evmcs->guest_ssp; in copy_enlightened_to_vmcs12()
1721 vmcs12->cpu_based_vm_exec_control = in copy_enlightened_to_vmcs12()
1722 evmcs->cpu_based_vm_exec_control; in copy_enlightened_to_vmcs12()
1727 vmcs12->exception_bitmap = evmcs->exception_bitmap; in copy_enlightened_to_vmcs12()
1732 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; in copy_enlightened_to_vmcs12()
1737 vmcs12->vm_entry_intr_info_field = in copy_enlightened_to_vmcs12()
1738 evmcs->vm_entry_intr_info_field; in copy_enlightened_to_vmcs12()
1739 vmcs12->vm_entry_exception_error_code = in copy_enlightened_to_vmcs12()
1740 evmcs->vm_entry_exception_error_code; in copy_enlightened_to_vmcs12()
1741 vmcs12->vm_entry_instruction_len = in copy_enlightened_to_vmcs12()
1742 evmcs->vm_entry_instruction_len; in copy_enlightened_to_vmcs12()
1747 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; in copy_enlightened_to_vmcs12()
1748 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; in copy_enlightened_to_vmcs12()
1749 vmcs12->host_cr0 = evmcs->host_cr0; in copy_enlightened_to_vmcs12()
1750 vmcs12->host_cr3 = evmcs->host_cr3; in copy_enlightened_to_vmcs12()
1751 vmcs12->host_cr4 = evmcs->host_cr4; in copy_enlightened_to_vmcs12()
1752 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; in copy_enlightened_to_vmcs12()
1753 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; in copy_enlightened_to_vmcs12()
1754 vmcs12->host_rip = evmcs->host_rip; in copy_enlightened_to_vmcs12()
1755 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; in copy_enlightened_to_vmcs12()
1756 vmcs12->host_es_selector = evmcs->host_es_selector; in copy_enlightened_to_vmcs12()
1757 vmcs12->host_cs_selector = evmcs->host_cs_selector; in copy_enlightened_to_vmcs12()
1758 vmcs12->host_ss_selector = evmcs->host_ss_selector; in copy_enlightened_to_vmcs12()
1759 vmcs12->host_ds_selector = evmcs->host_ds_selector; in copy_enlightened_to_vmcs12()
1760 vmcs12->host_fs_selector = evmcs->host_fs_selector; in copy_enlightened_to_vmcs12()
1761 vmcs12->host_gs_selector = evmcs->host_gs_selector; in copy_enlightened_to_vmcs12()
1762 vmcs12->host_tr_selector = evmcs->host_tr_selector; in copy_enlightened_to_vmcs12()
1763 vmcs12->host_ia32_perf_global_ctrl = evmcs->host_ia32_perf_global_ctrl; in copy_enlightened_to_vmcs12()
1766 * vmcs12->host_ia32_s_cet = evmcs->host_ia32_s_cet; in copy_enlightened_to_vmcs12()
1767 * vmcs12->host_ssp = evmcs->host_ssp; in copy_enlightened_to_vmcs12()
1768 * vmcs12->host_ia32_int_ssp_table_addr = evmcs->host_ia32_int_ssp_table_addr; in copy_enlightened_to_vmcs12()
1774 vmcs12->pin_based_vm_exec_control = in copy_enlightened_to_vmcs12()
1775 evmcs->pin_based_vm_exec_control; in copy_enlightened_to_vmcs12()
1776 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; in copy_enlightened_to_vmcs12()
1777 vmcs12->secondary_vm_exec_control = in copy_enlightened_to_vmcs12()
1778 evmcs->secondary_vm_exec_control; in copy_enlightened_to_vmcs12()
1783 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; in copy_enlightened_to_vmcs12()
1784 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; in copy_enlightened_to_vmcs12()
1789 vmcs12->msr_bitmap = evmcs->msr_bitmap; in copy_enlightened_to_vmcs12()
1794 vmcs12->guest_es_base = evmcs->guest_es_base; in copy_enlightened_to_vmcs12()
1795 vmcs12->guest_cs_base = evmcs->guest_cs_base; in copy_enlightened_to_vmcs12()
1796 vmcs12->guest_ss_base = evmcs->guest_ss_base; in copy_enlightened_to_vmcs12()
1797 vmcs12->guest_ds_base = evmcs->guest_ds_base; in copy_enlightened_to_vmcs12()
1798 vmcs12->guest_fs_base = evmcs->guest_fs_base; in copy_enlightened_to_vmcs12()
1799 vmcs12->guest_gs_base = evmcs->guest_gs_base; in copy_enlightened_to_vmcs12()
1800 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; in copy_enlightened_to_vmcs12()
1801 vmcs12->guest_tr_base = evmcs->guest_tr_base; in copy_enlightened_to_vmcs12()
1802 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; in copy_enlightened_to_vmcs12()
1803 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; in copy_enlightened_to_vmcs12()
1804 vmcs12->guest_es_limit = evmcs->guest_es_limit; in copy_enlightened_to_vmcs12()
1805 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; in copy_enlightened_to_vmcs12()
1806 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; in copy_enlightened_to_vmcs12()
1807 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; in copy_enlightened_to_vmcs12()
1808 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; in copy_enlightened_to_vmcs12()
1809 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; in copy_enlightened_to_vmcs12()
1810 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; in copy_enlightened_to_vmcs12()
1811 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; in copy_enlightened_to_vmcs12()
1812 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; in copy_enlightened_to_vmcs12()
1813 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; in copy_enlightened_to_vmcs12()
1814 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; in copy_enlightened_to_vmcs12()
1815 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; in copy_enlightened_to_vmcs12()
1816 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; in copy_enlightened_to_vmcs12()
1817 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; in copy_enlightened_to_vmcs12()
1818 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; in copy_enlightened_to_vmcs12()
1819 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; in copy_enlightened_to_vmcs12()
1820 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; in copy_enlightened_to_vmcs12()
1821 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; in copy_enlightened_to_vmcs12()
1822 vmcs12->guest_es_selector = evmcs->guest_es_selector; in copy_enlightened_to_vmcs12()
1823 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; in copy_enlightened_to_vmcs12()
1824 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; in copy_enlightened_to_vmcs12()
1825 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; in copy_enlightened_to_vmcs12()
1826 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; in copy_enlightened_to_vmcs12()
1827 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; in copy_enlightened_to_vmcs12()
1828 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; in copy_enlightened_to_vmcs12()
1829 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; in copy_enlightened_to_vmcs12()
1834 vmcs12->tsc_offset = evmcs->tsc_offset; in copy_enlightened_to_vmcs12()
1835 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; in copy_enlightened_to_vmcs12()
1836 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; in copy_enlightened_to_vmcs12()
1837 vmcs12->encls_exiting_bitmap = evmcs->encls_exiting_bitmap; in copy_enlightened_to_vmcs12()
1838 vmcs12->tsc_multiplier = evmcs->tsc_multiplier; in copy_enlightened_to_vmcs12()
1843 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; in copy_enlightened_to_vmcs12()
1844 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; in copy_enlightened_to_vmcs12()
1845 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; in copy_enlightened_to_vmcs12()
1846 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; in copy_enlightened_to_vmcs12()
1847 vmcs12->guest_cr0 = evmcs->guest_cr0; in copy_enlightened_to_vmcs12()
1848 vmcs12->guest_cr3 = evmcs->guest_cr3; in copy_enlightened_to_vmcs12()
1849 vmcs12->guest_cr4 = evmcs->guest_cr4; in copy_enlightened_to_vmcs12()
1850 vmcs12->guest_dr7 = evmcs->guest_dr7; in copy_enlightened_to_vmcs12()
1855 vmcs12->host_fs_base = evmcs->host_fs_base; in copy_enlightened_to_vmcs12()
1856 vmcs12->host_gs_base = evmcs->host_gs_base; in copy_enlightened_to_vmcs12()
1857 vmcs12->host_tr_base = evmcs->host_tr_base; in copy_enlightened_to_vmcs12()
1858 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; in copy_enlightened_to_vmcs12()
1859 vmcs12->host_idtr_base = evmcs->host_idtr_base; in copy_enlightened_to_vmcs12()
1860 vmcs12->host_rsp = evmcs->host_rsp; in copy_enlightened_to_vmcs12()
1865 vmcs12->ept_pointer = evmcs->ept_pointer; in copy_enlightened_to_vmcs12()
1866 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; in copy_enlightened_to_vmcs12()
1871 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; in copy_enlightened_to_vmcs12()
1872 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; in copy_enlightened_to_vmcs12()
1873 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; in copy_enlightened_to_vmcs12()
1874 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; in copy_enlightened_to_vmcs12()
1875 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; in copy_enlightened_to_vmcs12()
1876 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; in copy_enlightened_to_vmcs12()
1877 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; in copy_enlightened_to_vmcs12()
1878 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; in copy_enlightened_to_vmcs12()
1879 vmcs12->guest_pending_dbg_exceptions = in copy_enlightened_to_vmcs12()
1880 evmcs->guest_pending_dbg_exceptions; in copy_enlightened_to_vmcs12()
1881 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; in copy_enlightened_to_vmcs12()
1882 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; in copy_enlightened_to_vmcs12()
1883 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; in copy_enlightened_to_vmcs12()
1884 vmcs12->guest_activity_state = evmcs->guest_activity_state; in copy_enlightened_to_vmcs12()
1885 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; in copy_enlightened_to_vmcs12()
1886 vmcs12->guest_ia32_perf_global_ctrl = evmcs->guest_ia32_perf_global_ctrl; in copy_enlightened_to_vmcs12()
1889 * vmcs12->guest_ia32_s_cet = evmcs->guest_ia32_s_cet; in copy_enlightened_to_vmcs12()
1890 * vmcs12->guest_ia32_lbr_ctl = evmcs->guest_ia32_lbr_ctl; in copy_enlightened_to_vmcs12()
1891 * vmcs12->guest_ia32_int_ssp_table_addr = evmcs->guest_ia32_int_ssp_table_addr; in copy_enlightened_to_vmcs12()
1897 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; in copy_enlightened_to_vmcs12()
1898 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; in copy_enlightened_to_vmcs12()
1899 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; in copy_enlightened_to_vmcs12()
1900 * vmcs12->page_fault_error_code_mask = in copy_enlightened_to_vmcs12()
1901 * evmcs->page_fault_error_code_mask; in copy_enlightened_to_vmcs12()
1902 * vmcs12->page_fault_error_code_match = in copy_enlightened_to_vmcs12()
1903 * evmcs->page_fault_error_code_match; in copy_enlightened_to_vmcs12()
1904 * vmcs12->cr3_target_count = evmcs->cr3_target_count; in copy_enlightened_to_vmcs12()
1905 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; in copy_enlightened_to_vmcs12()
1906 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; in copy_enlightened_to_vmcs12()
1907 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; in copy_enlightened_to_vmcs12()
1912 * vmcs12->guest_physical_address = evmcs->guest_physical_address; in copy_enlightened_to_vmcs12()
1913 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; in copy_enlightened_to_vmcs12()
1914 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; in copy_enlightened_to_vmcs12()
1915 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; in copy_enlightened_to_vmcs12()
1916 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; in copy_enlightened_to_vmcs12()
1917 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; in copy_enlightened_to_vmcs12()
1918 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; in copy_enlightened_to_vmcs12()
1919 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; in copy_enlightened_to_vmcs12()
1920 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; in copy_enlightened_to_vmcs12()
1921 * vmcs12->exit_qualification = evmcs->exit_qualification; in copy_enlightened_to_vmcs12()
1922 * vmcs12->guest_linear_address = evmcs->guest_linear_address; in copy_enlightened_to_vmcs12()
1925 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; in copy_enlightened_to_vmcs12()
1926 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; in copy_enlightened_to_vmcs12()
1927 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; in copy_enlightened_to_vmcs12()
1928 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; in copy_enlightened_to_vmcs12()
1933 KVM_BUG_ON(1, vmx->vcpu.kvm); in copy_enlightened_to_vmcs12()
1940 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_vmcs12_to_enlightened()
1946 * evmcs->host_es_selector = vmcs12->host_es_selector; in copy_vmcs12_to_enlightened()
1947 * evmcs->host_cs_selector = vmcs12->host_cs_selector; in copy_vmcs12_to_enlightened()
1948 * evmcs->host_ss_selector = vmcs12->host_ss_selector; in copy_vmcs12_to_enlightened()
1949 * evmcs->host_ds_selector = vmcs12->host_ds_selector; in copy_vmcs12_to_enlightened()
1950 * evmcs->host_fs_selector = vmcs12->host_fs_selector; in copy_vmcs12_to_enlightened()
1951 * evmcs->host_gs_selector = vmcs12->host_gs_selector; in copy_vmcs12_to_enlightened()
1952 * evmcs->host_tr_selector = vmcs12->host_tr_selector; in copy_vmcs12_to_enlightened()
1953 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; in copy_vmcs12_to_enlightened()
1954 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; in copy_vmcs12_to_enlightened()
1955 * evmcs->host_cr0 = vmcs12->host_cr0; in copy_vmcs12_to_enlightened()
1956 * evmcs->host_cr3 = vmcs12->host_cr3; in copy_vmcs12_to_enlightened()
1957 * evmcs->host_cr4 = vmcs12->host_cr4; in copy_vmcs12_to_enlightened()
1958 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; in copy_vmcs12_to_enlightened()
1959 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; in copy_vmcs12_to_enlightened()
1960 * evmcs->host_rip = vmcs12->host_rip; in copy_vmcs12_to_enlightened()
1961 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; in copy_vmcs12_to_enlightened()
1962 * evmcs->host_fs_base = vmcs12->host_fs_base; in copy_vmcs12_to_enlightened()
1963 * evmcs->host_gs_base = vmcs12->host_gs_base; in copy_vmcs12_to_enlightened()
1964 * evmcs->host_tr_base = vmcs12->host_tr_base; in copy_vmcs12_to_enlightened()
1965 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; in copy_vmcs12_to_enlightened()
1966 * evmcs->host_idtr_base = vmcs12->host_idtr_base; in copy_vmcs12_to_enlightened()
1967 * evmcs->host_rsp = vmcs12->host_rsp; in copy_vmcs12_to_enlightened()
1969 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; in copy_vmcs12_to_enlightened()
1970 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; in copy_vmcs12_to_enlightened()
1971 * evmcs->msr_bitmap = vmcs12->msr_bitmap; in copy_vmcs12_to_enlightened()
1972 * evmcs->ept_pointer = vmcs12->ept_pointer; in copy_vmcs12_to_enlightened()
1973 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; in copy_vmcs12_to_enlightened()
1974 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; in copy_vmcs12_to_enlightened()
1975 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; in copy_vmcs12_to_enlightened()
1976 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; in copy_vmcs12_to_enlightened()
1977 * evmcs->tpr_threshold = vmcs12->tpr_threshold; in copy_vmcs12_to_enlightened()
1978 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; in copy_vmcs12_to_enlightened()
1979 * evmcs->exception_bitmap = vmcs12->exception_bitmap; in copy_vmcs12_to_enlightened()
1980 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; in copy_vmcs12_to_enlightened()
1981 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; in copy_vmcs12_to_enlightened()
1982 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; in copy_vmcs12_to_enlightened()
1983 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; in copy_vmcs12_to_enlightened()
1984 * evmcs->page_fault_error_code_mask = in copy_vmcs12_to_enlightened()
1985 * vmcs12->page_fault_error_code_mask; in copy_vmcs12_to_enlightened()
1986 * evmcs->page_fault_error_code_match = in copy_vmcs12_to_enlightened()
1987 * vmcs12->page_fault_error_code_match; in copy_vmcs12_to_enlightened()
1988 * evmcs->cr3_target_count = vmcs12->cr3_target_count; in copy_vmcs12_to_enlightened()
1989 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; in copy_vmcs12_to_enlightened()
1990 * evmcs->tsc_offset = vmcs12->tsc_offset; in copy_vmcs12_to_enlightened()
1991 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; in copy_vmcs12_to_enlightened()
1992 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; in copy_vmcs12_to_enlightened()
1993 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; in copy_vmcs12_to_enlightened()
1994 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; in copy_vmcs12_to_enlightened()
1995 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; in copy_vmcs12_to_enlightened()
1996 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; in copy_vmcs12_to_enlightened()
1997 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; in copy_vmcs12_to_enlightened()
1998 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; in copy_vmcs12_to_enlightened()
1999 * evmcs->guest_ia32_perf_global_ctrl = vmcs12->guest_ia32_perf_global_ctrl; in copy_vmcs12_to_enlightened()
2000 * evmcs->host_ia32_perf_global_ctrl = vmcs12->host_ia32_perf_global_ctrl; in copy_vmcs12_to_enlightened()
2001 * evmcs->encls_exiting_bitmap = vmcs12->encls_exiting_bitmap; in copy_vmcs12_to_enlightened()
2002 * evmcs->tsc_multiplier = vmcs12->tsc_multiplier; in copy_vmcs12_to_enlightened()
2005 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; in copy_vmcs12_to_enlightened()
2006 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; in copy_vmcs12_to_enlightened()
2007 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; in copy_vmcs12_to_enlightened()
2008 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; in copy_vmcs12_to_enlightened()
2009 * evmcs->host_ia32_s_cet = vmcs12->host_ia32_s_cet; in copy_vmcs12_to_enlightened()
2010 * evmcs->host_ssp = vmcs12->host_ssp; in copy_vmcs12_to_enlightened()
2011 * evmcs->host_ia32_int_ssp_table_addr = vmcs12->host_ia32_int_ssp_table_addr; in copy_vmcs12_to_enlightened()
2012 * evmcs->guest_ia32_s_cet = vmcs12->guest_ia32_s_cet; in copy_vmcs12_to_enlightened()
2013 * evmcs->guest_ia32_lbr_ctl = vmcs12->guest_ia32_lbr_ctl; in copy_vmcs12_to_enlightened()
2014 * evmcs->guest_ia32_int_ssp_table_addr = vmcs12->guest_ia32_int_ssp_table_addr; in copy_vmcs12_to_enlightened()
2015 * evmcs->guest_ssp = vmcs12->guest_ssp; in copy_vmcs12_to_enlightened()
2018 evmcs->guest_es_selector = vmcs12->guest_es_selector; in copy_vmcs12_to_enlightened()
2019 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; in copy_vmcs12_to_enlightened()
2020 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; in copy_vmcs12_to_enlightened()
2021 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; in copy_vmcs12_to_enlightened()
2022 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; in copy_vmcs12_to_enlightened()
2023 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; in copy_vmcs12_to_enlightened()
2024 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; in copy_vmcs12_to_enlightened()
2025 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; in copy_vmcs12_to_enlightened()
2027 evmcs->guest_es_limit = vmcs12->guest_es_limit; in copy_vmcs12_to_enlightened()
2028 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; in copy_vmcs12_to_enlightened()
2029 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; in copy_vmcs12_to_enlightened()
2030 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; in copy_vmcs12_to_enlightened()
2031 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; in copy_vmcs12_to_enlightened()
2032 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; in copy_vmcs12_to_enlightened()
2033 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; in copy_vmcs12_to_enlightened()
2034 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; in copy_vmcs12_to_enlightened()
2035 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; in copy_vmcs12_to_enlightened()
2036 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; in copy_vmcs12_to_enlightened()
2038 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; in copy_vmcs12_to_enlightened()
2039 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; in copy_vmcs12_to_enlightened()
2040 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; in copy_vmcs12_to_enlightened()
2041 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; in copy_vmcs12_to_enlightened()
2042 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; in copy_vmcs12_to_enlightened()
2043 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; in copy_vmcs12_to_enlightened()
2044 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; in copy_vmcs12_to_enlightened()
2045 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; in copy_vmcs12_to_enlightened()
2047 evmcs->guest_es_base = vmcs12->guest_es_base; in copy_vmcs12_to_enlightened()
2048 evmcs->guest_cs_base = vmcs12->guest_cs_base; in copy_vmcs12_to_enlightened()
2049 evmcs->guest_ss_base = vmcs12->guest_ss_base; in copy_vmcs12_to_enlightened()
2050 evmcs->guest_ds_base = vmcs12->guest_ds_base; in copy_vmcs12_to_enlightened()
2051 evmcs->guest_fs_base = vmcs12->guest_fs_base; in copy_vmcs12_to_enlightened()
2052 evmcs->guest_gs_base = vmcs12->guest_gs_base; in copy_vmcs12_to_enlightened()
2053 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; in copy_vmcs12_to_enlightened()
2054 evmcs->guest_tr_base = vmcs12->guest_tr_base; in copy_vmcs12_to_enlightened()
2055 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; in copy_vmcs12_to_enlightened()
2056 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; in copy_vmcs12_to_enlightened()
2058 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; in copy_vmcs12_to_enlightened()
2059 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; in copy_vmcs12_to_enlightened()
2061 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; in copy_vmcs12_to_enlightened()
2062 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; in copy_vmcs12_to_enlightened()
2063 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; in copy_vmcs12_to_enlightened()
2064 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; in copy_vmcs12_to_enlightened()
2066 evmcs->guest_pending_dbg_exceptions = in copy_vmcs12_to_enlightened()
2067 vmcs12->guest_pending_dbg_exceptions; in copy_vmcs12_to_enlightened()
2068 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; in copy_vmcs12_to_enlightened()
2069 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; in copy_vmcs12_to_enlightened()
2071 evmcs->guest_activity_state = vmcs12->guest_activity_state; in copy_vmcs12_to_enlightened()
2072 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; in copy_vmcs12_to_enlightened()
2074 evmcs->guest_cr0 = vmcs12->guest_cr0; in copy_vmcs12_to_enlightened()
2075 evmcs->guest_cr3 = vmcs12->guest_cr3; in copy_vmcs12_to_enlightened()
2076 evmcs->guest_cr4 = vmcs12->guest_cr4; in copy_vmcs12_to_enlightened()
2077 evmcs->guest_dr7 = vmcs12->guest_dr7; in copy_vmcs12_to_enlightened()
2079 evmcs->guest_physical_address = vmcs12->guest_physical_address; in copy_vmcs12_to_enlightened()
2081 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; in copy_vmcs12_to_enlightened()
2082 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; in copy_vmcs12_to_enlightened()
2083 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; in copy_vmcs12_to_enlightened()
2084 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; in copy_vmcs12_to_enlightened()
2085 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; in copy_vmcs12_to_enlightened()
2086 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; in copy_vmcs12_to_enlightened()
2087 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; in copy_vmcs12_to_enlightened()
2088 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; in copy_vmcs12_to_enlightened()
2090 evmcs->exit_qualification = vmcs12->exit_qualification; in copy_vmcs12_to_enlightened()
2092 evmcs->guest_linear_address = vmcs12->guest_linear_address; in copy_vmcs12_to_enlightened()
2093 evmcs->guest_rsp = vmcs12->guest_rsp; in copy_vmcs12_to_enlightened()
2094 evmcs->guest_rflags = vmcs12->guest_rflags; in copy_vmcs12_to_enlightened()
2096 evmcs->guest_interruptibility_info = in copy_vmcs12_to_enlightened()
2097 vmcs12->guest_interruptibility_info; in copy_vmcs12_to_enlightened()
2098 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; in copy_vmcs12_to_enlightened()
2099 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; in copy_vmcs12_to_enlightened()
2100 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; in copy_vmcs12_to_enlightened()
2101 evmcs->vm_entry_exception_error_code = in copy_vmcs12_to_enlightened()
2102 vmcs12->vm_entry_exception_error_code; in copy_vmcs12_to_enlightened()
2103 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; in copy_vmcs12_to_enlightened()
2105 evmcs->guest_rip = vmcs12->guest_rip; in copy_vmcs12_to_enlightened()
2107 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; in copy_vmcs12_to_enlightened()
2111 KVM_BUG_ON(1, vmx->vcpu.kvm); in copy_vmcs12_to_enlightened()
2136 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { in nested_vmx_handle_enlightened_vmptrld()
2137 vmx->nested.current_vmptr = INVALID_GPA; in nested_vmx_handle_enlightened_vmptrld()
2142 &vmx->nested.hv_evmcs_map)) in nested_vmx_handle_enlightened_vmptrld()
2145 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; in nested_vmx_handle_enlightened_vmptrld()
2149 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this in nested_vmx_handle_enlightened_vmptrld()
2153 * Guest should be aware of supported eVMCS versions by host by in nested_vmx_handle_enlightened_vmptrld()
2158 * However, it turns out that Microsoft Hyper-V fails to comply in nested_vmx_handle_enlightened_vmptrld()
2159 * to their own invented interface: When Hyper-V use eVMCS, it in nested_vmx_handle_enlightened_vmptrld()
2165 * To overcome Hyper-V bug, we accept here either a supported in nested_vmx_handle_enlightened_vmptrld()
2169 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && in nested_vmx_handle_enlightened_vmptrld()
2170 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { in nested_vmx_handle_enlightened_vmptrld()
2175 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; in nested_vmx_handle_enlightened_vmptrld()
2180 * reloaded from guest's memory (read only fields, fields not in nested_vmx_handle_enlightened_vmptrld()
2187 vmcs12->hdr.revision_id = VMCS12_REVISION; in nested_vmx_handle_enlightened_vmptrld()
2197 vmx->nested.hv_evmcs->hv_clean_fields &= in nested_vmx_handle_enlightened_vmptrld()
2200 vmx->nested.force_msr_bitmap_recalc = true; in nested_vmx_handle_enlightened_vmptrld()
2218 vmx->nested.need_vmcs12_to_shadow_sync = false; in nested_sync_vmcs12_to_shadow()
2226 vmx->nested.preemption_timer_expired = true; in vmx_preemption_timer_fn()
2227 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); in vmx_preemption_timer_fn()
2228 kvm_vcpu_kick(&vmx->vcpu); in vmx_preemption_timer_fn()
2241 if (!vmx->nested.has_preemption_timer_deadline) { in vmx_calc_preemption_timer_value()
2242 vmx->nested.preemption_timer_deadline = in vmx_calc_preemption_timer_value()
2243 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc; in vmx_calc_preemption_timer_value()
2244 vmx->nested.has_preemption_timer_deadline = true; in vmx_calc_preemption_timer_value()
2246 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; in vmx_calc_preemption_timer_value()
2256 * a VMExit prior to executing any instructions in the guest. in vmx_start_preemption_timer()
2259 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); in vmx_start_preemption_timer()
2263 if (vcpu->arch.virtual_tsc_khz == 0) in vmx_start_preemption_timer()
2268 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); in vmx_start_preemption_timer()
2269 hrtimer_start(&vmx->nested.preemption_timer, in vmx_start_preemption_timer()
2276 if (vmx->nested.nested_run_pending && in nested_vmx_calc_efer()
2277 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) in nested_vmx_calc_efer()
2278 return vmcs12->guest_ia32_efer; in nested_vmx_calc_efer()
2279 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) in nested_vmx_calc_efer()
2280 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); in nested_vmx_calc_efer()
2282 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); in nested_vmx_calc_efer()
2287 struct kvm *kvm = vmx->vcpu.kvm; in prepare_vmcs02_constant_state()
2295 if (vmx->nested.vmcs02_initialized) in prepare_vmcs02_constant_state()
2297 vmx->nested.vmcs02_initialized = true; in prepare_vmcs02_constant_state()
2306 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); in prepare_vmcs02_constant_state()
2308 if (vmx->ve_info) in prepare_vmcs02_constant_state()
2309 vmcs_write64(VE_INFORMATION_ADDRESS, __pa(vmx->ve_info)); in prepare_vmcs02_constant_state()
2319 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); in prepare_vmcs02_constant_state()
2328 vmcs_write16(GUEST_PML_INDEX, -1); in prepare_vmcs02_constant_state()
2335 vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window); in prepare_vmcs02_constant_state()
2342 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); in prepare_vmcs02_constant_state()
2343 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); in prepare_vmcs02_constant_state()
2344 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); in prepare_vmcs02_constant_state()
2357 * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the in prepare_vmcs02_early_rare()
2359 * if VPID is disabled in vmcs12. Note, if VPID is disabled, VM-Enter in prepare_vmcs02_early_rare()
2360 * and VM-Exit are architecturally required to flush VPID=0, but *only* in prepare_vmcs02_early_rare()
2362 * required flushes), but doing so would cause KVM to over-flush. E.g. in prepare_vmcs02_early_rare()
2368 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) in prepare_vmcs02_early_rare()
2369 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); in prepare_vmcs02_early_rare()
2371 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in prepare_vmcs02_early_rare()
2381 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx)) in prepare_vmcs02_early()
2388 exec_control |= (vmcs12->pin_based_vm_exec_control & in prepare_vmcs02_early()
2392 vmx->nested.pi_pending = false; in prepare_vmcs02_early()
2394 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; in prepare_vmcs02_early()
2396 vmx->nested.posted_intr_nv = -1; in prepare_vmcs02_early()
2408 exec_control |= vmcs12->cpu_based_vm_exec_control; in prepare_vmcs02_early()
2410 vmx->nested.l1_tpr_threshold = -1; in prepare_vmcs02_early()
2412 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); in prepare_vmcs02_early()
2457 exec_control |= vmcs12->secondary_vm_exec_control; in prepare_vmcs02_early()
2469 if (vmx_umip_emulated() && (vmcs12->guest_cr4 & X86_CR4_UMIP)) in prepare_vmcs02_early()
2474 vmcs12->guest_intr_status); in prepare_vmcs02_early()
2480 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12); in prepare_vmcs02_early()
2490 * on the related bits (if supported by the CPU) in the hope that in prepare_vmcs02_early()
2498 exec_control |= (vmcs12->vm_entry_controls & in prepare_vmcs02_early()
2512 * L2->L1 exit controls are emulated - the hardware exit is to L0 so in prepare_vmcs02_early()
2514 * bits may be modified by vmx_set_efer() in prepare_vmcs02(). in prepare_vmcs02_early()
2526 if (vmx->nested.nested_run_pending) { in prepare_vmcs02_early()
2528 vmcs12->vm_entry_intr_info_field); in prepare_vmcs02_early()
2530 vmcs12->vm_entry_exception_error_code); in prepare_vmcs02_early()
2532 vmcs12->vm_entry_instruction_len); in prepare_vmcs02_early()
2534 vmcs12->guest_interruptibility_info); in prepare_vmcs02_early()
2535 vmx->loaded_vmcs->nmi_known_unmasked = in prepare_vmcs02_early()
2536 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); in prepare_vmcs02_early()
2572 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & in prepare_vmcs02_rare()
2575 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); in prepare_vmcs02_rare()
2576 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); in prepare_vmcs02_rare()
2577 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); in prepare_vmcs02_rare()
2578 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); in prepare_vmcs02_rare()
2579 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); in prepare_vmcs02_rare()
2580 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); in prepare_vmcs02_rare()
2581 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); in prepare_vmcs02_rare()
2582 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); in prepare_vmcs02_rare()
2583 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); in prepare_vmcs02_rare()
2584 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); in prepare_vmcs02_rare()
2585 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); in prepare_vmcs02_rare()
2586 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); in prepare_vmcs02_rare()
2587 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); in prepare_vmcs02_rare()
2588 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); in prepare_vmcs02_rare()
2589 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); in prepare_vmcs02_rare()
2590 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); in prepare_vmcs02_rare()
2591 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); in prepare_vmcs02_rare()
2592 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); in prepare_vmcs02_rare()
2593 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); in prepare_vmcs02_rare()
2594 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); in prepare_vmcs02_rare()
2595 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); in prepare_vmcs02_rare()
2596 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); in prepare_vmcs02_rare()
2597 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); in prepare_vmcs02_rare()
2598 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); in prepare_vmcs02_rare()
2599 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); in prepare_vmcs02_rare()
2600 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); in prepare_vmcs02_rare()
2601 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); in prepare_vmcs02_rare()
2602 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); in prepare_vmcs02_rare()
2603 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); in prepare_vmcs02_rare()
2604 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); in prepare_vmcs02_rare()
2605 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); in prepare_vmcs02_rare()
2606 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); in prepare_vmcs02_rare()
2607 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); in prepare_vmcs02_rare()
2608 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); in prepare_vmcs02_rare()
2609 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); in prepare_vmcs02_rare()
2610 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); in prepare_vmcs02_rare()
2615 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & in prepare_vmcs02_rare()
2617 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); in prepare_vmcs02_rare()
2619 vmcs12->guest_pending_dbg_exceptions); in prepare_vmcs02_rare()
2620 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); in prepare_vmcs02_rare()
2621 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); in prepare_vmcs02_rare()
2628 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); in prepare_vmcs02_rare()
2629 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); in prepare_vmcs02_rare()
2630 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); in prepare_vmcs02_rare()
2631 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); in prepare_vmcs02_rare()
2634 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && in prepare_vmcs02_rare()
2635 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) in prepare_vmcs02_rare()
2636 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); in prepare_vmcs02_rare()
2640 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); in prepare_vmcs02_rare()
2643 * Whether page-faults are trapped is determined by a combination of in prepare_vmcs02_rare()
2655 if (vmx_need_pf_intercept(&vmx->vcpu)) { in prepare_vmcs02_rare()
2663 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask); in prepare_vmcs02_rare()
2664 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match); in prepare_vmcs02_rare()
2668 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); in prepare_vmcs02_rare()
2669 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); in prepare_vmcs02_rare()
2670 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); in prepare_vmcs02_rare()
2671 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); in prepare_vmcs02_rare()
2678 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); in prepare_vmcs02_rare()
2680 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); in prepare_vmcs02_rare()
2681 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in prepare_vmcs02_rare()
2682 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in prepare_vmcs02_rare()
2684 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_CET_STATE) in prepare_vmcs02_rare()
2685 vmcs_write_cet_state(&vmx->vcpu, vmcs12->guest_s_cet, in prepare_vmcs02_rare()
2686 vmcs12->guest_ssp, vmcs12->guest_ssp_tbl); in prepare_vmcs02_rare()
2692 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2693 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2694 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2695 * guest in a way that will both be appropriate to L1's requests, and our
2697 * function also has additional necessary side-effects, like setting various
2698 * vcpu->arch fields.
2710 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx)) { in prepare_vmcs02()
2712 vmx->nested.dirty_vmcs12 = false; in prepare_vmcs02()
2715 !(evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1); in prepare_vmcs02()
2718 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2719 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { in prepare_vmcs02()
2720 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); in prepare_vmcs02()
2721 vmx_guest_debugctl_write(vcpu, vmcs12->guest_ia32_debugctl & in prepare_vmcs02()
2724 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); in prepare_vmcs02()
2725 vmx_guest_debugctl_write(vcpu, vmx->nested.pre_vmenter_debugctl); in prepare_vmcs02()
2728 if (!vmx->nested.nested_run_pending || in prepare_vmcs02()
2729 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_CET_STATE)) in prepare_vmcs02()
2730 vmcs_write_cet_state(vcpu, vmx->nested.pre_vmenter_s_cet, in prepare_vmcs02()
2731 vmx->nested.pre_vmenter_ssp, in prepare_vmcs02()
2732 vmx->nested.pre_vmenter_ssp_tbl); in prepare_vmcs02()
2734 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || in prepare_vmcs02()
2735 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) in prepare_vmcs02()
2736 vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs); in prepare_vmcs02()
2737 vmx_set_rflags(vcpu, vmcs12->guest_rflags); in prepare_vmcs02()
2740 * bitwise-or of what L1 wants to trap for L2, and what we want to in prepare_vmcs02()
2741 * trap. Note that CR0.TS also needs updating - we do this later. in prepare_vmcs02()
2744 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; in prepare_vmcs02()
2745 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in prepare_vmcs02()
2747 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2748 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { in prepare_vmcs02()
2749 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); in prepare_vmcs02()
2750 vcpu->arch.pat = vmcs12->guest_ia32_pat; in prepare_vmcs02()
2752 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
2755 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( in prepare_vmcs02()
2756 vcpu->arch.l1_tsc_offset, in prepare_vmcs02()
2760 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( in prepare_vmcs02()
2761 vcpu->arch.l1_tsc_scaling_ratio, in prepare_vmcs02()
2764 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); in prepare_vmcs02()
2766 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); in prepare_vmcs02()
2774 * Override the CR0/CR4 read shadows after setting the effective guest in prepare_vmcs02()
2778 vmx_set_cr0(vcpu, vmcs12->guest_cr0); in prepare_vmcs02()
2781 vmx_set_cr4(vcpu, vmcs12->guest_cr4); in prepare_vmcs02()
2784 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); in prepare_vmcs02()
2785 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ in prepare_vmcs02()
2786 vmx_set_efer(vcpu, vcpu->arch.efer); in prepare_vmcs02()
2789 * Guest state is invalid and unrestricted guest is disabled, in prepare_vmcs02()
2793 * However when force loading the guest state (SMM exit or in prepare_vmcs02()
2795 * have invalid guest state now, which will be later fixed by in prepare_vmcs02()
2800 return -EINVAL; in prepare_vmcs02()
2804 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), in prepare_vmcs02()
2806 return -EINVAL; in prepare_vmcs02()
2810 * on nested VM-Exit, which can occur without actually running L2 and in prepare_vmcs02()
2816 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); in prepare_vmcs02()
2821 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); in prepare_vmcs02()
2822 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); in prepare_vmcs02()
2823 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); in prepare_vmcs02()
2824 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); in prepare_vmcs02()
2827 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && in prepare_vmcs02()
2830 vmcs12->guest_ia32_perf_global_ctrl))) { in prepare_vmcs02()
2832 return -EINVAL; in prepare_vmcs02()
2835 kvm_rsp_write(vcpu, vmcs12->guest_rsp); in prepare_vmcs02()
2836 kvm_rip_write(vcpu, vmcs12->guest_rip); in prepare_vmcs02()
2839 * It was observed that genuine Hyper-V running in L1 doesn't reset in prepare_vmcs02()
2841 * bits when it changes a field in eVMCS. Mark all fields as clean in prepare_vmcs02()
2845 evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; in prepare_vmcs02()
2854 return -EINVAL; in nested_vmx_check_nmi_controls()
2858 return -EINVAL; in nested_vmx_check_nmi_controls()
2870 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) in nested_vmx_check_eptp()
2874 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) in nested_vmx_check_eptp()
2881 /* Page-walk levels validity. */ in nested_vmx_check_eptp()
2884 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) in nested_vmx_check_eptp()
2888 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) in nested_vmx_check_eptp()
2895 /* Reserved bits should not be set */ in nested_vmx_check_eptp()
2901 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) in nested_vmx_check_eptp()
2909 * Checks related to VM-Execution Control Fields
2916 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control, in nested_check_vm_execution_controls()
2917 vmx->nested.msrs.pinbased_ctls_low, in nested_check_vm_execution_controls()
2918 vmx->nested.msrs.pinbased_ctls_high)) || in nested_check_vm_execution_controls()
2919 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, in nested_check_vm_execution_controls()
2920 vmx->nested.msrs.procbased_ctls_low, in nested_check_vm_execution_controls()
2921 vmx->nested.msrs.procbased_ctls_high))) in nested_check_vm_execution_controls()
2922 return -EINVAL; in nested_check_vm_execution_controls()
2925 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control, in nested_check_vm_execution_controls()
2926 vmx->nested.msrs.secondary_ctls_low, in nested_check_vm_execution_controls()
2927 vmx->nested.msrs.secondary_ctls_high))) in nested_check_vm_execution_controls()
2928 return -EINVAL; in nested_check_vm_execution_controls()
2930 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) || in nested_check_vm_execution_controls()
2941 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) in nested_check_vm_execution_controls()
2942 return -EINVAL; in nested_check_vm_execution_controls()
2946 return -EINVAL; in nested_check_vm_execution_controls()
2949 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer))) in nested_check_vm_execution_controls()
2950 return -EINVAL; in nested_check_vm_execution_controls()
2953 if (CC(vmcs12->vm_function_control & in nested_check_vm_execution_controls()
2954 ~vmx->nested.msrs.vmfunc_controls)) in nested_check_vm_execution_controls()
2955 return -EINVAL; in nested_check_vm_execution_controls()
2959 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address))) in nested_check_vm_execution_controls()
2960 return -EINVAL; in nested_check_vm_execution_controls()
2968 * Checks related to VM-Exit Control Fields
2975 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls, in nested_check_vm_exit_controls()
2976 vmx->nested.msrs.exit_ctls_low, in nested_check_vm_exit_controls()
2977 vmx->nested.msrs.exit_ctls_high)) || in nested_check_vm_exit_controls()
2979 return -EINVAL; in nested_check_vm_exit_controls()
2985 * Checks related to VM-Entry Control Fields
2992 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls, in nested_check_vm_entry_controls()
2993 vmx->nested.msrs.entry_ctls_low, in nested_check_vm_entry_controls()
2994 vmx->nested.msrs.entry_ctls_high))) in nested_check_vm_entry_controls()
2995 return -EINVAL; in nested_check_vm_entry_controls()
2999 * Fields relevant to VM-entry event injection must be set properly. in nested_check_vm_entry_controls()
3000 * These fields are the VM-entry interruption-information field, the in nested_check_vm_entry_controls()
3001 * VM-entry exception error code, and the VM-entry instruction length. in nested_check_vm_entry_controls()
3003 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { in nested_check_vm_entry_controls()
3004 u32 intr_info = vmcs12->vm_entry_intr_info_field; in nested_check_vm_entry_controls()
3010 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; in nested_check_vm_entry_controls()
3012 /* VM-entry interruption-info field: interruption type */ in nested_check_vm_entry_controls()
3016 return -EINVAL; in nested_check_vm_entry_controls()
3018 /* VM-entry interruption-info field: vector */ in nested_check_vm_entry_controls()
3022 return -EINVAL; in nested_check_vm_entry_controls()
3032 return -EINVAL; in nested_check_vm_entry_controls()
3035 return -EINVAL; in nested_check_vm_entry_controls()
3038 /* VM-entry exception error code */ in nested_check_vm_entry_controls()
3040 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))) in nested_check_vm_entry_controls()
3041 return -EINVAL; in nested_check_vm_entry_controls()
3043 /* VM-entry interruption-info field: reserved bits */ in nested_check_vm_entry_controls()
3045 return -EINVAL; in nested_check_vm_entry_controls()
3047 /* VM-entry instruction length */ in nested_check_vm_entry_controls()
3052 if (CC(vmcs12->vm_entry_instruction_len > X86_MAX_INSTRUCTION_LENGTH) || in nested_check_vm_entry_controls()
3053 CC(vmcs12->vm_entry_instruction_len == 0 && in nested_check_vm_entry_controls()
3055 return -EINVAL; in nested_check_vm_entry_controls()
3060 return -EINVAL; in nested_check_vm_entry_controls()
3071 return -EINVAL; in nested_vmx_check_controls()
3085 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != in nested_vmx_check_address_space_size()
3086 !!(vcpu->arch.efer & EFER_LMA))) in nested_vmx_check_address_space_size()
3087 return -EINVAL; in nested_vmx_check_address_space_size()
3098 u8 l1_address_bits_on_exit = (vmcs12->host_cr4 & X86_CR4_LA57) ? 57 : 48; in is_l1_noncanonical_address_on_vmexit()
3108 return -EINVAL; in nested_vmx_check_cet_state_common()
3116 bool ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); in nested_vmx_check_host_state()
3118 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || in nested_vmx_check_host_state()
3119 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || in nested_vmx_check_host_state()
3120 CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3))) in nested_vmx_check_host_state()
3121 return -EINVAL; in nested_vmx_check_host_state()
3123 if (CC(vmcs12->host_cr4 & X86_CR4_CET && !(vmcs12->host_cr0 & X86_CR0_WP))) in nested_vmx_check_host_state()
3124 return -EINVAL; in nested_vmx_check_host_state()
3126 if (CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || in nested_vmx_check_host_state()
3127 CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_eip, vcpu))) in nested_vmx_check_host_state()
3128 return -EINVAL; in nested_vmx_check_host_state()
3130 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && in nested_vmx_check_host_state()
3131 CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) in nested_vmx_check_host_state()
3132 return -EINVAL; in nested_vmx_check_host_state()
3134 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && in nested_vmx_check_host_state()
3136 vmcs12->host_ia32_perf_global_ctrl))) in nested_vmx_check_host_state()
3137 return -EINVAL; in nested_vmx_check_host_state()
3140 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) in nested_vmx_check_host_state()
3141 return -EINVAL; in nested_vmx_check_host_state()
3143 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || in nested_vmx_check_host_state()
3144 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || in nested_vmx_check_host_state()
3145 CC((vmcs12->host_rip) >> 32)) in nested_vmx_check_host_state()
3146 return -EINVAL; in nested_vmx_check_host_state()
3149 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3150 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3151 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3152 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3153 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3154 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3155 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3156 CC(vmcs12->host_cs_selector == 0) || in nested_vmx_check_host_state()
3157 CC(vmcs12->host_tr_selector == 0) || in nested_vmx_check_host_state()
3158 CC(vmcs12->host_ss_selector == 0 && !ia32e)) in nested_vmx_check_host_state()
3159 return -EINVAL; in nested_vmx_check_host_state()
3161 if (CC(is_noncanonical_base_address(vmcs12->host_fs_base, vcpu)) || in nested_vmx_check_host_state()
3162 CC(is_noncanonical_base_address(vmcs12->host_gs_base, vcpu)) || in nested_vmx_check_host_state()
3163 CC(is_noncanonical_base_address(vmcs12->host_gdtr_base, vcpu)) || in nested_vmx_check_host_state()
3164 CC(is_noncanonical_base_address(vmcs12->host_idtr_base, vcpu)) || in nested_vmx_check_host_state()
3165 CC(is_noncanonical_base_address(vmcs12->host_tr_base, vcpu)) || in nested_vmx_check_host_state()
3166 CC(is_l1_noncanonical_address_on_vmexit(vmcs12->host_rip, vmcs12))) in nested_vmx_check_host_state()
3167 return -EINVAL; in nested_vmx_check_host_state()
3170 * If the load IA32_EFER VM-exit control is 1, bits reserved in the in nested_vmx_check_host_state()
3172 * the values of the LMA and LME bits in the field must each be that of in nested_vmx_check_host_state()
3173 * the host address-space size VM-exit control. in nested_vmx_check_host_state()
3175 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { in nested_vmx_check_host_state()
3176 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) || in nested_vmx_check_host_state()
3177 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) || in nested_vmx_check_host_state()
3178 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))) in nested_vmx_check_host_state()
3179 return -EINVAL; in nested_vmx_check_host_state()
3182 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_CET_STATE) { in nested_vmx_check_host_state()
3183 if (nested_vmx_check_cet_state_common(vcpu, vmcs12->host_s_cet, in nested_vmx_check_host_state()
3184 vmcs12->host_ssp, in nested_vmx_check_host_state()
3185 vmcs12->host_ssp_tbl)) in nested_vmx_check_host_state()
3186 return -EINVAL; in nested_vmx_check_host_state()
3190 * enter 64-bit mode after VM-exit; otherwise, higher in nested_vmx_check_host_state()
3191 * 32-bits must be all 0s. in nested_vmx_check_host_state()
3194 if (CC(is_noncanonical_msr_address(vmcs12->host_s_cet, vcpu)) || in nested_vmx_check_host_state()
3195 CC(is_noncanonical_msr_address(vmcs12->host_ssp, vcpu))) in nested_vmx_check_host_state()
3196 return -EINVAL; in nested_vmx_check_host_state()
3198 if (CC(vmcs12->host_s_cet >> 32) || CC(vmcs12->host_ssp >> 32)) in nested_vmx_check_host_state()
3199 return -EINVAL; in nested_vmx_check_host_state()
3210 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_vmx_check_vmcs_link_ptr()
3213 if (vmcs12->vmcs_link_pointer == INVALID_GPA) in nested_vmx_check_vmcs_link_ptr()
3216 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) in nested_vmx_check_vmcs_link_ptr()
3217 return -EINVAL; in nested_vmx_check_vmcs_link_ptr()
3219 if (ghc->gpa != vmcs12->vmcs_link_pointer && in nested_vmx_check_vmcs_link_ptr()
3220 CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, in nested_vmx_check_vmcs_link_ptr()
3221 vmcs12->vmcs_link_pointer, VMCS12_SIZE))) in nested_vmx_check_vmcs_link_ptr()
3222 return -EINVAL; in nested_vmx_check_vmcs_link_ptr()
3224 if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, in nested_vmx_check_vmcs_link_ptr()
3227 return -EINVAL; in nested_vmx_check_vmcs_link_ptr()
3231 return -EINVAL; in nested_vmx_check_vmcs_link_ptr()
3237 * Checks related to Guest Non-register State
3241 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && in nested_check_guest_non_reg_state()
3242 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT && in nested_check_guest_non_reg_state()
3243 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI)) in nested_check_guest_non_reg_state()
3244 return -EINVAL; in nested_check_guest_non_reg_state()
3253 bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE); in nested_vmx_check_guest_state()
3257 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) || in nested_vmx_check_guest_state()
3258 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))) in nested_vmx_check_guest_state()
3259 return -EINVAL; in nested_vmx_check_guest_state()
3261 if (CC(vmcs12->guest_cr4 & X86_CR4_CET && !(vmcs12->guest_cr0 & X86_CR0_WP))) in nested_vmx_check_guest_state()
3262 return -EINVAL; in nested_vmx_check_guest_state()
3264 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && in nested_vmx_check_guest_state()
3265 (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) || in nested_vmx_check_guest_state()
3266 CC(!vmx_is_valid_debugctl(vcpu, vmcs12->guest_ia32_debugctl, false)))) in nested_vmx_check_guest_state()
3267 return -EINVAL; in nested_vmx_check_guest_state()
3269 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && in nested_vmx_check_guest_state()
3270 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) in nested_vmx_check_guest_state()
3271 return -EINVAL; in nested_vmx_check_guest_state()
3275 return -EINVAL; in nested_vmx_check_guest_state()
3278 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && in nested_vmx_check_guest_state()
3280 vmcs12->guest_ia32_perf_global_ctrl))) in nested_vmx_check_guest_state()
3281 return -EINVAL; in nested_vmx_check_guest_state()
3283 if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG)) in nested_vmx_check_guest_state()
3284 return -EINVAL; in nested_vmx_check_guest_state()
3286 if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) || in nested_vmx_check_guest_state()
3287 CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG))) in nested_vmx_check_guest_state()
3288 return -EINVAL; in nested_vmx_check_guest_state()
3291 * If the load IA32_EFER VM-entry control is 1, the following checks in nested_vmx_check_guest_state()
3293 * - Bits reserved in the IA32_EFER MSR must be 0. in nested_vmx_check_guest_state()
3294 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of in nested_vmx_check_guest_state()
3295 * the IA-32e mode guest VM-exit control. It must also be identical in nested_vmx_check_guest_state()
3299 if (to_vmx(vcpu)->nested.nested_run_pending && in nested_vmx_check_guest_state()
3300 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { in nested_vmx_check_guest_state()
3301 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || in nested_vmx_check_guest_state()
3302 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || in nested_vmx_check_guest_state()
3303 CC(((vmcs12->guest_cr0 & X86_CR0_PG) && in nested_vmx_check_guest_state()
3304 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))) in nested_vmx_check_guest_state()
3305 return -EINVAL; in nested_vmx_check_guest_state()
3308 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && in nested_vmx_check_guest_state()
3309 (CC(is_noncanonical_msr_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) || in nested_vmx_check_guest_state()
3310 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))) in nested_vmx_check_guest_state()
3311 return -EINVAL; in nested_vmx_check_guest_state()
3313 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_CET_STATE) { in nested_vmx_check_guest_state()
3314 if (nested_vmx_check_cet_state_common(vcpu, vmcs12->guest_s_cet, in nested_vmx_check_guest_state()
3315 vmcs12->guest_ssp, in nested_vmx_check_guest_state()
3316 vmcs12->guest_ssp_tbl)) in nested_vmx_check_guest_state()
3317 return -EINVAL; in nested_vmx_check_guest_state()
3320 * Guest SSP must have 63:N bits identical, rather than in nested_vmx_check_guest_state()
3321 * be canonical (i.e., 63:N-1 bits identical), where N is in nested_vmx_check_guest_state()
3322 * the CPU's maximum linear-address width. Similar to in nested_vmx_check_guest_state()
3324 * linear-address width. in nested_vmx_check_guest_state()
3326 if (CC(!__is_canonical_address(vmcs12->guest_ssp, max_host_virt_addr_bits() + 1))) in nested_vmx_check_guest_state()
3327 return -EINVAL; in nested_vmx_check_guest_state()
3331 return -EINVAL; in nested_vmx_check_guest_state()
3345 if (vmx->msr_autoload.host.nr) in nested_vmx_check_vmentry_hw()
3347 if (vmx->msr_autoload.guest.nr) in nested_vmx_check_vmentry_hw()
3358 * there is no need to preserve other bits or save/restore the field. in nested_vmx_check_vmentry_hw()
3363 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { in nested_vmx_check_vmentry_hw()
3365 vmx->loaded_vmcs->host_state.cr3 = cr3; in nested_vmx_check_vmentry_hw()
3369 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { in nested_vmx_check_vmentry_hw()
3371 vmx->loaded_vmcs->host_state.cr4 = cr4; in nested_vmx_check_vmentry_hw()
3374 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, in nested_vmx_check_vmentry_hw()
3377 if (vmx->msr_autoload.host.nr) in nested_vmx_check_vmentry_hw()
3378 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in nested_vmx_check_vmentry_hw()
3379 if (vmx->msr_autoload.guest.nr) in nested_vmx_check_vmentry_hw()
3380 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in nested_vmx_check_vmentry_hw()
3388 "early hardware check VM-instruction error: ", error); in nested_vmx_check_vmentry_hw()
3402 * A non-failing VMEntry means we somehow entered guest mode with in nested_vmx_check_vmentry_hw()
3425 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) { in nested_get_evmcs_page()
3437 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_get_evmcs_page()
3450 if (!vcpu->arch.pdptrs_from_userspace && in nested_get_vmcs12_pages()
3453 * Reload the guest's PDPTRs since after a migration in nested_get_vmcs12_pages()
3454 * the guest CR3 might be restored prior to setting the nested in nested_get_vmcs12_pages()
3457 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) in nested_get_vmcs12_pages()
3463 map = &vmx->nested.apic_access_page_map; in nested_get_vmcs12_pages()
3465 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->apic_access_addr), map)) { in nested_get_vmcs12_pages()
3466 vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(map->pfn)); in nested_get_vmcs12_pages()
3468 pr_debug_ratelimited("%s: no backing for APIC-access address in vmcs12\n", in nested_get_vmcs12_pages()
3470 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in nested_get_vmcs12_pages()
3471 vcpu->run->internal.suberror = in nested_get_vmcs12_pages()
3473 vcpu->run->internal.ndata = 0; in nested_get_vmcs12_pages()
3479 map = &vmx->nested.virtual_apic_map; in nested_get_vmcs12_pages()
3481 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { in nested_get_vmcs12_pages()
3482 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); in nested_get_vmcs12_pages()
3498 * force VM-Entry to fail. in nested_get_vmcs12_pages()
3505 map = &vmx->nested.pi_desc_map; in nested_get_vmcs12_pages()
3507 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { in nested_get_vmcs12_pages()
3508 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
3509 (struct pi_desc *)(((void *)map->hva) + in nested_get_vmcs12_pages()
3510 offset_in_page(vmcs12->posted_intr_desc_addr)); in nested_get_vmcs12_pages()
3512 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); in nested_get_vmcs12_pages()
3520 vmx->nested.pi_desc = NULL; in nested_get_vmcs12_pages()
3544 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in vmx_get_nested_state_pages()
3545 vcpu->run->internal.suberror = in vmx_get_nested_state_pages()
3547 vcpu->run->internal.ndata = 0; in vmx_get_nested_state_pages()
3568 if (WARN_ON_ONCE(vmx->nested.pml_full)) in nested_vmx_write_pml_buffer()
3572 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is in nested_vmx_write_pml_buffer()
3579 if (vmcs12->guest_pml_index >= PML_LOG_NR_ENTRIES) { in nested_vmx_write_pml_buffer()
3580 vmx->nested.pml_full = true; in nested_vmx_write_pml_buffer()
3585 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; in nested_vmx_write_pml_buffer()
3587 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, in nested_vmx_write_pml_buffer()
3591 vmcs12->guest_pml_index--; in nested_vmx_write_pml_buffer()
3605 if (!to_vmx(vcpu)->nested.vmxon) { in nested_vmx_check_permission()
3626 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3644 vmx->nested.current_vmptr, in nested_vmx_enter_non_root_mode()
3645 vmcs12->guest_rip, in nested_vmx_enter_non_root_mode()
3646 vmcs12->guest_intr_status, in nested_vmx_enter_non_root_mode()
3647 vmcs12->vm_entry_intr_info_field, in nested_vmx_enter_non_root_mode()
3648 vmcs12->secondary_vm_exec_control & SECONDARY_EXEC_ENABLE_EPT, in nested_vmx_enter_non_root_mode()
3649 vmcs12->ept_pointer, in nested_vmx_enter_non_root_mode()
3650 vmcs12->guest_cr3, in nested_vmx_enter_non_root_mode()
3655 if (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3656 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) in nested_vmx_enter_non_root_mode()
3657 vmx->nested.pre_vmenter_debugctl = vmx_guest_debugctl_read(); in nested_vmx_enter_non_root_mode()
3659 (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3660 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) in nested_vmx_enter_non_root_mode()
3661 vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS); in nested_vmx_enter_non_root_mode()
3663 if (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3664 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_CET_STATE)) in nested_vmx_enter_non_root_mode()
3665 vmcs_read_cet_state(vcpu, &vmx->nested.pre_vmenter_s_cet, in nested_vmx_enter_non_root_mode()
3666 &vmx->nested.pre_vmenter_ssp, in nested_vmx_enter_non_root_mode()
3667 &vmx->nested.pre_vmenter_ssp_tbl); in nested_vmx_enter_non_root_mode()
3671 * nested early checks are disabled. In the event of a "late" VM-Fail, in nested_vmx_enter_non_root_mode()
3672 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its in nested_vmx_enter_non_root_mode()
3673 * software model to the pre-VMEntry host state. When EPT is disabled, in nested_vmx_enter_non_root_mode()
3675 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing in nested_vmx_enter_non_root_mode()
3678 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is in nested_vmx_enter_non_root_mode()
3679 * guaranteed to be overwritten with a shadow CR3 prior to re-entering in nested_vmx_enter_non_root_mode()
3681 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks in nested_vmx_enter_non_root_mode()
3682 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail in nested_vmx_enter_non_root_mode()
3686 vmcs_writel(GUEST_CR3, vcpu->arch.cr3); in nested_vmx_enter_non_root_mode()
3688 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); in nested_vmx_enter_non_root_mode()
3690 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12); in nested_vmx_enter_non_root_mode()
3694 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3699 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3706 vmcs12->exit_qualification = entry_failure_code; in nested_vmx_enter_non_root_mode()
3715 vmcs12->exit_qualification = entry_failure_code; in nested_vmx_enter_non_root_mode()
3721 vmcs12->vm_entry_msr_load_addr, in nested_vmx_enter_non_root_mode()
3722 vmcs12->vm_entry_msr_load_count); in nested_vmx_enter_non_root_mode()
3725 vmcs12->exit_qualification = failed_index; in nested_vmx_enter_non_root_mode()
3731 * "get pages" would need to read data from the guest (i.e. we will in nested_vmx_enter_non_root_mode()
3733 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs in nested_vmx_enter_non_root_mode()
3740 * Re-evaluate pending events if L1 had a pending IRQ/NMI/INIT/SIPI in nested_vmx_enter_non_root_mode()
3741 * when it executed VMLAUNCH/VMRESUME, as entering non-root mode can in nested_vmx_enter_non_root_mode()
3742 * effectively unblock various events, e.g. INIT/SIPI cause VM-Exit in nested_vmx_enter_non_root_mode()
3746 if ((__exec_controls_get(&vmx->vmcs01) & (CPU_BASED_INTR_WINDOW_EXITING | in nested_vmx_enter_non_root_mode()
3757 vmx->nested.preemption_timer_expired = false; in nested_vmx_enter_non_root_mode()
3774 * 26.7 "VM-entry failures during or after loading guest state". in nested_vmx_enter_non_root_mode()
3777 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) in nested_vmx_enter_non_root_mode()
3778 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; in nested_vmx_enter_non_root_mode()
3782 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3788 vmcs12->vm_exit_reason = exit_reason.full; in nested_vmx_enter_non_root_mode()
3790 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_enter_non_root_mode()
3796 * for running an L2 nested guest.
3821 vmx->nested.current_vmptr == INVALID_GPA)) in nested_vmx_run()
3830 * VM-instruction error field. in nested_vmx_run()
3832 if (CC(vmcs12->hdr.shadow_vmcs)) in nested_vmx_run()
3838 copy_enlightened_to_vmcs12(vmx, evmcs->hv_clean_fields); in nested_vmx_run()
3840 vmcs12->launch_state = !launch; in nested_vmx_run()
3858 if (CC(vmcs12->launch_state == launch)) in nested_vmx_run()
3876 vmx->nested.nested_run_pending = 1; in nested_vmx_run()
3877 vmx->nested.has_preemption_timer_deadline = false; in nested_vmx_run()
3882 /* Hide L1D cache contents from the nested guest. */ in nested_vmx_run()
3883 vmx->vcpu.arch.l1tf_flush_l1d = true; in nested_vmx_run()
3892 * therefore not be read from guest memory (which may not in nested_vmx_run()
3897 switch (vmcs12->guest_activity_state) { in nested_vmx_run()
3901 * awakened by event injection or by an NMI-window VM-exit or in nested_vmx_run()
3902 * by an interrupt-window VM-exit, halt the vcpu. in nested_vmx_run()
3904 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && in nested_vmx_run()
3907 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { in nested_vmx_run()
3908 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3913 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3923 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3933 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3934 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
3937 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3940 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3944 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3945 * changed these bits, and therefore they need to be updated, but L0
3946 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3947 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3953 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | in vmcs12_guest_cr0()
3954 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | in vmcs12_guest_cr0()
3955 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | in vmcs12_guest_cr0()
3956 vcpu->arch.cr0_guest_owned_bits)); in vmcs12_guest_cr0()
3963 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | in vmcs12_guest_cr4()
3964 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | in vmcs12_guest_cr4()
3965 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | in vmcs12_guest_cr4()
3966 vcpu->arch.cr4_guest_owned_bits)); in vmcs12_guest_cr4()
3977 * Per the SDM, VM-Exits due to double and triple faults are never in vmcs12_save_pending_event()
3982 * event results in a double-fault exception". It's unclear why the in vmcs12_save_pending_event()
3996 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
3997 } else if (vcpu->arch.exception.injected) { in vmcs12_save_pending_event()
3998 nr = vcpu->arch.exception.vector; in vmcs12_save_pending_event()
4002 vmcs12->vm_exit_instruction_len = in vmcs12_save_pending_event()
4003 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
4008 if (vcpu->arch.exception.has_error_code) { in vmcs12_save_pending_event()
4010 vmcs12->idt_vectoring_error_code = in vmcs12_save_pending_event()
4011 vcpu->arch.exception.error_code; in vmcs12_save_pending_event()
4014 vmcs12->idt_vectoring_info_field = idt_vectoring; in vmcs12_save_pending_event()
4015 } else if (vcpu->arch.nmi_injected) { in vmcs12_save_pending_event()
4016 vmcs12->idt_vectoring_info_field = in vmcs12_save_pending_event()
4018 } else if (vcpu->arch.interrupt.injected) { in vmcs12_save_pending_event()
4019 nr = vcpu->arch.interrupt.nr; in vmcs12_save_pending_event()
4022 if (vcpu->arch.interrupt.soft) { in vmcs12_save_pending_event()
4024 vmcs12->vm_entry_instruction_len = in vmcs12_save_pending_event()
4025 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
4029 vmcs12->idt_vectoring_info_field = idt_vectoring; in vmcs12_save_pending_event()
4031 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
4047 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; in nested_mark_vmcs12_pages_dirty()
4052 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; in nested_mark_vmcs12_pages_dirty()
4064 if (!vmx->nested.pi_pending) in vmx_complete_nested_posted_interrupt()
4067 if (!vmx->nested.pi_desc) in vmx_complete_nested_posted_interrupt()
4070 vmx->nested.pi_pending = false; in vmx_complete_nested_posted_interrupt()
4072 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) in vmx_complete_nested_posted_interrupt()
4075 max_irr = pi_find_highest_vector(vmx->nested.pi_desc); in vmx_complete_nested_posted_interrupt()
4077 vapic_page = vmx->nested.virtual_apic_map.hva; in vmx_complete_nested_posted_interrupt()
4081 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, in vmx_complete_nested_posted_interrupt()
4096 return -ENXIO; in vmx_complete_nested_posted_interrupt()
4101 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; in nested_vmx_inject_exception_vmexit()
4102 u32 intr_info = ex->vector | INTR_INFO_VALID_MASK; in nested_vmx_inject_exception_vmexit()
4106 if (ex->has_payload) { in nested_vmx_inject_exception_vmexit()
4107 exit_qual = ex->payload; in nested_vmx_inject_exception_vmexit()
4108 } else if (ex->vector == PF_VECTOR) { in nested_vmx_inject_exception_vmexit()
4109 exit_qual = vcpu->arch.cr2; in nested_vmx_inject_exception_vmexit()
4110 } else if (ex->vector == DB_VECTOR) { in nested_vmx_inject_exception_vmexit()
4111 exit_qual = vcpu->arch.dr6; in nested_vmx_inject_exception_vmexit()
4120 * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the in nested_vmx_inject_exception_vmexit()
4121 * "has error code" flags on VM-Exit if the CPU is in Real Mode. in nested_vmx_inject_exception_vmexit()
4123 if (ex->has_error_code && is_protmode(vcpu)) { in nested_vmx_inject_exception_vmexit()
4125 * Intel CPUs do not generate error codes with bits 31:16 set, in nested_vmx_inject_exception_vmexit()
4126 * and more importantly VMX disallows setting bits 31:16 in the in nested_vmx_inject_exception_vmexit()
4127 * injected error code for VM-Entry. Drop the bits to mimic in nested_vmx_inject_exception_vmexit()
4128 * hardware and avoid inducing failure on nested VM-Entry if L1 in nested_vmx_inject_exception_vmexit()
4130 * generate "full" 32-bit error codes, so KVM allows userspace in nested_vmx_inject_exception_vmexit()
4131 * to inject exception error codes with bits 31:16 set. in nested_vmx_inject_exception_vmexit()
4133 vmcs12->vm_exit_intr_error_code = (u16)ex->error_code; in nested_vmx_inject_exception_vmexit()
4137 if (kvm_exception_is_soft(ex->vector)) in nested_vmx_inject_exception_vmexit()
4142 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && in nested_vmx_inject_exception_vmexit()
4151 * of a #DB (trap-like vs. fault-like) from the exception payload (to-be-DR6).
4152 * Using the payload is flawed because code breakpoints (fault-like) and data
4153 * breakpoints (trap-like) set the same bits in DR6 (breakpoint detected), i.e.
4154 * this will return false positives if a to-be-injected code breakpoint #DB is
4157 * too is trap-like.
4162 * from the emulator (because such #DBs are fault-like and thus don't trigger
4167 if (!ex->pending || ex->vector != DB_VECTOR) in vmx_get_pending_dbg_trap()
4170 /* General Detect #DBs are always fault-like. */ in vmx_get_pending_dbg_trap()
4171 return ex->payload & ~DR6_BD; in vmx_get_pending_dbg_trap()
4176 * a pending Monitor Trap Flag VM-Exit. TSS T-flag #DBs are not emulated by
4186 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
4187 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
4190 * field if a VM-exit is delivered before the debug trap.
4196 pending_dbg = vmx_get_pending_dbg_trap(&vcpu->arch.exception); in nested_vmx_update_pending_dbg()
4204 to_vmx(vcpu)->nested.preemption_timer_expired; in nested_vmx_preemption_timer_pending()
4210 void *vapic = vmx->nested.virtual_apic_map.hva; in vmx_has_nested_events()
4214 vmx->nested.mtf_pending) in vmx_has_nested_events()
4220 * at VM-Entry, or there is a KVM_REQ_EVENT pending and KVM will move in vmx_has_nested_events()
4221 * the interrupt from the PIR to RVI prior to entering the guest. in vmx_has_nested_events()
4239 if (vmx->nested.pi_pending && vmx->nested.pi_desc && in vmx_has_nested_events()
4240 pi_test_on(vmx->nested.pi_desc)) { in vmx_has_nested_events()
4241 max_irr = pi_find_highest_vector(vmx->nested.pi_desc); in vmx_has_nested_events()
4251 * edits to fill in missing examples, e.g. #DB due to split-lock accesses,
4252 * and less minor edits to splice in the priority of VMX Non-Root specific
4253 * events, e.g. MTF and NMI/INTR-window exiting.
4256 * - RESET
4257 * - Machine Check
4260 * - T flag in TSS is set (on task switch)
4263 * - FLUSH
4264 * - STOPCLK
4265 * - SMI
4266 * - INIT
4268 * 3.5 Monitor Trap Flag (MTF) VM-exit[1]
4271 * - Breakpoints
4272 * - Trap-class Debug Exceptions (#DB due to TF flag set, data/I-O
4273 * breakpoint, or #DB due to a split-lock access)
4275 * 4.3 VMX-preemption timer expired VM-exit
4277 * 4.6 NMI-window exiting VM-exit[2]
4281 * 5.5 Interrupt-window exiting VM-exit and Virtual-interrupt delivery
4288 * - Code-Segment Limit Violation
4289 * - Code Page Fault
4290 * - Control protection exception (missing ENDBRANCH at target of indirect
4294 * - Instruction length > 15 bytes
4295 * - Invalid Opcode
4296 * - Coprocessor Not Available
4299 * - Overflow
4300 * - Bound error
4301 * - Invalid TSS
4302 * - Segment Not Present
4303 * - Stack fault
4304 * - General Protection
4305 * - Data Page Fault
4306 * - Alignment Check
4307 * - x86 FPU Floating-point exception
4308 * - SIMD floating-point exception
4309 * - Virtualization exception
4310 * - Control protection exception
4312 * [1] Per the "Monitor Trap Flag" section: System-management interrupts (SMIs),
4314 * MTF VM exits take priority over debug-trap exceptions and lower priority
4317 * [2] Debug-trap exceptions and higher priority events take priority over VM exits
4318 * caused by the VMX-preemption timer. VM exits caused by the VMX-preemption
4319 * timer take priority over VM exits caused by the "NMI-window exiting"
4320 * VM-execution control and lower priority events.
4322 * [3] Debug-trap exceptions and higher priority events take priority over VM exits
4323 * caused by "NMI-window exiting". VM exits caused by this control take
4324 * priority over non-maskable interrupts (NMIs) and lower priority events.
4326 * [4] Virtual-interrupt delivery has the same priority as that of VM exits due to
4327 * the 1-setting of the "interrupt-window exiting" VM-execution control. Thus,
4328 * non-maskable interrupts (NMIs) and higher priority events take priority over
4334 struct kvm_lapic *apic = vcpu->arch.apic; in vmx_check_nested_events()
4341 bool block_nested_exceptions = vmx->nested.nested_run_pending; in vmx_check_nested_events()
4344 * hardware, aren't blocked by a pending VM-Enter as KVM doesn't need in vmx_check_nested_events()
4350 * VM-Exit that occurred _during_ instruction execution; new events, in vmx_check_nested_events()
4356 * Inject events are blocked by nested VM-Enter, as KVM is responsible in vmx_check_nested_events()
4358 * wait until after VM-Enter completes to deliver injected events. in vmx_check_nested_events()
4364 test_bit(KVM_APIC_INIT, &apic->pending_events)) { in vmx_check_nested_events()
4366 return -EBUSY; in vmx_check_nested_events()
4368 clear_bit(KVM_APIC_INIT, &apic->pending_events); in vmx_check_nested_events()
4369 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED) in vmx_check_nested_events()
4373 vmx->nested.mtf_pending = false; in vmx_check_nested_events()
4378 test_bit(KVM_APIC_SIPI, &apic->pending_events)) { in vmx_check_nested_events()
4380 return -EBUSY; in vmx_check_nested_events()
4382 clear_bit(KVM_APIC_SIPI, &apic->pending_events); in vmx_check_nested_events()
4383 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vmx_check_nested_events()
4385 apic->sipi_vector & 0xFFUL); in vmx_check_nested_events()
4393 * fault-like exceptions, TSS T flag #DB (not emulated by KVM, but in vmx_check_nested_events()
4396 * TODO: SMIs have higher priority than MTF and trap-like #DBs (except in vmx_check_nested_events()
4399 * prioritize SMI over MTF and trap-like #DBs. in vmx_check_nested_events()
4401 if (vcpu->arch.exception_vmexit.pending && in vmx_check_nested_events()
4402 !vmx_is_low_priority_db_trap(&vcpu->arch.exception_vmexit)) { in vmx_check_nested_events()
4404 return -EBUSY; in vmx_check_nested_events()
4410 if (vcpu->arch.exception.pending && in vmx_check_nested_events()
4411 !vmx_is_low_priority_db_trap(&vcpu->arch.exception)) { in vmx_check_nested_events()
4413 return -EBUSY; in vmx_check_nested_events()
4417 if (vmx->nested.mtf_pending) { in vmx_check_nested_events()
4419 return -EBUSY; in vmx_check_nested_events()
4425 if (vcpu->arch.exception_vmexit.pending) { in vmx_check_nested_events()
4427 return -EBUSY; in vmx_check_nested_events()
4433 if (vcpu->arch.exception.pending) { in vmx_check_nested_events()
4435 return -EBUSY; in vmx_check_nested_events()
4441 return -EBUSY; in vmx_check_nested_events()
4446 if (vcpu->arch.smi_pending && !is_smm(vcpu)) { in vmx_check_nested_events()
4448 return -EBUSY; in vmx_check_nested_events()
4452 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) { in vmx_check_nested_events()
4454 return -EBUSY; in vmx_check_nested_events()
4462 * The NMI-triggered VM exit counts as injection: in vmx_check_nested_events()
4465 vcpu->arch.nmi_pending = 0; in vmx_check_nested_events()
4475 return -EBUSY; in vmx_check_nested_events()
4482 return -EBUSY; in vmx_check_nested_events()
4489 if (irq != -1) { in vmx_check_nested_events()
4491 return -EBUSY; in vmx_check_nested_events()
4504 * interrupts for L2 instead of injecting VM-Exit, as the in vmx_check_nested_events()
4508 * and enabling posted interrupts requires ACK-on-exit. in vmx_check_nested_events()
4510 if (irq == vmx->nested.posted_intr_nv) { in vmx_check_nested_events()
4517 return -EBUSY; in vmx_check_nested_events()
4519 vmx->nested.pi_pending = true; in vmx_check_nested_events()
4525 return -EBUSY; in vmx_check_nested_events()
4531 * ACK the interrupt _after_ emulating VM-Exit, as the IRQ must in vmx_check_nested_events()
4532 * be marked as in-service in vmcs01.GUEST_INTERRUPT_STATUS.SVI in vmx_check_nested_events()
4546 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); in vmx_get_preemption_timer_value()
4552 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; in vmx_get_preemption_timer_value()
4609 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4610 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4611 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4612 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4613 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4614 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4615 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4616 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4617 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4618 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4619 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4620 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4621 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4622 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4623 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4624 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4625 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4626 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4627 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4628 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4629 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4630 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4631 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4632 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4633 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); in sync_vmcs02_to_vmcs12_rare()
4634 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); in sync_vmcs02_to_vmcs12_rare()
4635 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); in sync_vmcs02_to_vmcs12_rare()
4636 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); in sync_vmcs02_to_vmcs12_rare()
4637 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); in sync_vmcs02_to_vmcs12_rare()
4638 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); in sync_vmcs02_to_vmcs12_rare()
4639 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); in sync_vmcs02_to_vmcs12_rare()
4640 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); in sync_vmcs02_to_vmcs12_rare()
4641 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); in sync_vmcs02_to_vmcs12_rare()
4642 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); in sync_vmcs02_to_vmcs12_rare()
4643 vmcs12->guest_pending_dbg_exceptions = in sync_vmcs02_to_vmcs12_rare()
4646 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; in sync_vmcs02_to_vmcs12_rare()
4655 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) in copy_vmcs02_to_vmcs12_rare()
4659 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); in copy_vmcs02_to_vmcs12_rare()
4662 vmx->loaded_vmcs = &vmx->nested.vmcs02; in copy_vmcs02_to_vmcs12_rare()
4667 vmx->loaded_vmcs = &vmx->vmcs01; in copy_vmcs02_to_vmcs12_rare()
4673 * Update the guest state fields of vmcs12 to reflect changes that
4674 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
4675 * VM-entry controls is also updated, since this is really a guest
4685 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = in sync_vmcs02_to_vmcs12()
4688 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); in sync_vmcs02_to_vmcs12()
4689 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); in sync_vmcs02_to_vmcs12()
4691 vmcs12->guest_rsp = kvm_rsp_read(vcpu); in sync_vmcs02_to_vmcs12()
4692 vmcs12->guest_rip = kvm_rip_read(vcpu); in sync_vmcs02_to_vmcs12()
4693 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); in sync_vmcs02_to_vmcs12()
4695 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); in sync_vmcs02_to_vmcs12()
4696 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); in sync_vmcs02_to_vmcs12()
4698 vmcs12->guest_interruptibility_info = in sync_vmcs02_to_vmcs12()
4701 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) in sync_vmcs02_to_vmcs12()
4702 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; in sync_vmcs02_to_vmcs12()
4703 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in sync_vmcs02_to_vmcs12()
4704 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI; in sync_vmcs02_to_vmcs12()
4706 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; in sync_vmcs02_to_vmcs12()
4709 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER && in sync_vmcs02_to_vmcs12()
4710 !vmx->nested.nested_run_pending) in sync_vmcs02_to_vmcs12()
4711 vmcs12->vmx_preemption_timer_value = in sync_vmcs02_to_vmcs12()
4723 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); in sync_vmcs02_to_vmcs12()
4725 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); in sync_vmcs02_to_vmcs12()
4726 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); in sync_vmcs02_to_vmcs12()
4727 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); in sync_vmcs02_to_vmcs12()
4728 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); in sync_vmcs02_to_vmcs12()
4732 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); in sync_vmcs02_to_vmcs12()
4735 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); in sync_vmcs02_to_vmcs12()
4737 vmcs12->vm_entry_controls = in sync_vmcs02_to_vmcs12()
4738 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | in sync_vmcs02_to_vmcs12()
4747 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) in sync_vmcs02_to_vmcs12()
4748 vmcs12->guest_dr7 = vcpu->arch.dr7; in sync_vmcs02_to_vmcs12()
4750 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) in sync_vmcs02_to_vmcs12()
4751 vmcs12->guest_ia32_efer = vcpu->arch.efer; in sync_vmcs02_to_vmcs12()
4753 vmcs_read_cet_state(&vmx->vcpu, &vmcs12->guest_s_cet, in sync_vmcs02_to_vmcs12()
4754 &vmcs12->guest_ssp, in sync_vmcs02_to_vmcs12()
4755 &vmcs12->guest_ssp_tbl); in sync_vmcs02_to_vmcs12()
4759 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4761 * and this function updates it to reflect the changes to the guest state while
4765 * could have changed by the L2 guest or the exit - i.e., the guest-state and
4766 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
4774 vmcs12->vm_exit_reason = vm_exit_reason; in prepare_vmcs12()
4776 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE; in prepare_vmcs12()
4777 vmcs12->exit_qualification = exit_qualification; in prepare_vmcs12()
4780 * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched in prepare_vmcs12()
4784 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { in prepare_vmcs12()
4785 vmcs12->launch_state = 1; in prepare_vmcs12()
4789 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; in prepare_vmcs12()
4798 vmcs12->vm_exit_intr_info = exit_intr_info; in prepare_vmcs12()
4799 vmcs12->vm_exit_instruction_len = exit_insn_len; in prepare_vmcs12()
4800 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); in prepare_vmcs12()
4803 * According to spec, there's no need to store the guest's in prepare_vmcs12()
4804 * MSRs if the exit is due to a VM-entry failure that occurs in prepare_vmcs12()
4805 * during or after loading the guest state. Since this exit in prepare_vmcs12()
4809 vmcs12->vm_exit_msr_store_addr, in prepare_vmcs12()
4810 vmcs12->vm_exit_msr_store_count)) in prepare_vmcs12()
4817 * A part of what we need to when the nested L2 guest exits and we want to
4818 * run its L1 parent, is to reset L1's guest state to the host state specified
4821 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4822 * Failures During or After Loading Guest State").
4831 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) in load_vmcs12_host_state()
4832 vcpu->arch.efer = vmcs12->host_ia32_efer; in load_vmcs12_host_state()
4833 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) in load_vmcs12_host_state()
4834 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in load_vmcs12_host_state()
4836 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in load_vmcs12_host_state()
4837 vmx_set_efer(vcpu, vcpu->arch.efer); in load_vmcs12_host_state()
4839 kvm_rsp_write(vcpu, vmcs12->host_rsp); in load_vmcs12_host_state()
4840 kvm_rip_write(vcpu, vmcs12->host_rip); in load_vmcs12_host_state()
4851 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); in load_vmcs12_host_state()
4852 vmx_set_cr0(vcpu, vmcs12->host_cr0); in load_vmcs12_host_state()
4854 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ in load_vmcs12_host_state()
4855 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); in load_vmcs12_host_state()
4856 vmx_set_cr4(vcpu, vmcs12->host_cr4); in load_vmcs12_host_state()
4864 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored)) in load_vmcs12_host_state()
4869 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); in load_vmcs12_host_state()
4870 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); in load_vmcs12_host_state()
4871 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); in load_vmcs12_host_state()
4872 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); in load_vmcs12_host_state()
4873 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); in load_vmcs12_host_state()
4878 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) in load_vmcs12_host_state()
4883 * otherwise CET state should be retained across VM-exit, i.e., in load_vmcs12_host_state()
4884 * guest values should be propagated from vmcs12 to vmcs01. in load_vmcs12_host_state()
4886 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_CET_STATE) in load_vmcs12_host_state()
4887 vmcs_write_cet_state(vcpu, vmcs12->host_s_cet, vmcs12->host_ssp, in load_vmcs12_host_state()
4888 vmcs12->host_ssp_tbl); in load_vmcs12_host_state()
4890 vmcs_write_cet_state(vcpu, vmcs12->guest_s_cet, vmcs12->guest_ssp, in load_vmcs12_host_state()
4891 vmcs12->guest_ssp_tbl); in load_vmcs12_host_state()
4893 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { in load_vmcs12_host_state()
4894 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); in load_vmcs12_host_state()
4895 vcpu->arch.pat = vmcs12->host_ia32_pat; in load_vmcs12_host_state()
4897 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && in load_vmcs12_host_state()
4900 vmcs12->host_ia32_perf_global_ctrl)); in load_vmcs12_host_state()
4903 27.5.2 Loading Host Segment and Descriptor-Table Registers */ in load_vmcs12_host_state()
4907 .selector = vmcs12->host_cs_selector, in load_vmcs12_host_state()
4913 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) in load_vmcs12_host_state()
4927 seg.selector = vmcs12->host_ds_selector; in load_vmcs12_host_state()
4929 seg.selector = vmcs12->host_es_selector; in load_vmcs12_host_state()
4931 seg.selector = vmcs12->host_ss_selector; in load_vmcs12_host_state()
4933 seg.selector = vmcs12->host_fs_selector; in load_vmcs12_host_state()
4934 seg.base = vmcs12->host_fs_base; in load_vmcs12_host_state()
4936 seg.selector = vmcs12->host_gs_selector; in load_vmcs12_host_state()
4937 seg.base = vmcs12->host_gs_base; in load_vmcs12_host_state()
4940 .base = vmcs12->host_tr_base, in load_vmcs12_host_state()
4942 .selector = vmcs12->host_tr_selector, in load_vmcs12_host_state()
4955 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, in load_vmcs12_host_state()
4956 vmcs12->vm_exit_msr_load_count)) in load_vmcs12_host_state()
4959 to_vt(vcpu)->emulation_required = vmx_emulation_required(vcpu); in load_vmcs12_host_state()
4973 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { in nested_vmx_get_vmcs01_guest_efer()
4974 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) in nested_vmx_get_vmcs01_guest_efer()
4975 return vmx->msr_autoload.guest.val[i].value; in nested_vmx_get_vmcs01_guest_efer()
4980 return efer_msr->data; in nested_vmx_get_vmcs01_guest_efer()
4993 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); in nested_vmx_restore_host_state()
4995 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { in nested_vmx_restore_host_state()
4999 * and vcpu->arch.dr7 is not squirreled away before the in nested_vmx_restore_host_state()
5002 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in nested_vmx_restore_host_state()
5017 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); in nested_vmx_restore_host_state()
5020 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); in nested_vmx_restore_host_state()
5024 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); in nested_vmx_restore_host_state()
5031 * software model is up-to-date. in nested_vmx_restore_host_state()
5046 * from the guest value. The intent is to stuff host state as in nested_vmx_restore_host_state()
5049 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { in nested_vmx_restore_host_state()
5050 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); in nested_vmx_restore_host_state()
5053 "%s read MSR index failed (%u, 0x%08llx)\n", in nested_vmx_restore_host_state()
5058 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { in nested_vmx_restore_host_state()
5059 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); in nested_vmx_restore_host_state()
5066 if (h.index != g.index) in nested_vmx_restore_host_state()
5074 __func__, j, h.index, h.reserved); in nested_vmx_restore_host_state()
5078 if (kvm_emulate_msr_write(vcpu, h.index, h.value)) { in nested_vmx_restore_host_state()
5081 __func__, j, h.index, h.value); in nested_vmx_restore_host_state()
5094 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
5096 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
5105 /* Pending MTF traps are discarded on VM-Exit. */ in __nested_vmx_vmexit()
5106 vmx->nested.mtf_pending = false; in __nested_vmx_vmexit()
5109 WARN_ON_ONCE(vmx->nested.nested_run_pending); in __nested_vmx_vmexit()
5116 * do that when something is forcing L2->L1 exit prior to in __nested_vmx_vmexit()
5129 * up-to-date before switching to L1. in __nested_vmx_vmexit()
5137 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); in __nested_vmx_vmexit()
5140 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset; in __nested_vmx_vmexit()
5142 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; in __nested_vmx_vmexit()
5145 if (likely(!vmx->fail)) { in __nested_vmx_vmexit()
5148 if (vm_exit_reason != -1) in __nested_vmx_vmexit()
5158 * Otherwise, this flush will dirty guest memory at a in __nested_vmx_vmexit()
5159 * point it is already assumed by user-space to be in __nested_vmx_vmexit()
5165 * The only expected VM-instruction error is "VM entry with in __nested_vmx_vmexit()
5176 * Drop events/exceptions that were queued for re-injection to L2 in __nested_vmx_vmexit()
5179 * prepare_vmcs12(), events/exceptions queued for re-injection need to in __nested_vmx_vmexit()
5182 vcpu->arch.nmi_injected = false; in __nested_vmx_vmexit()
5186 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in __nested_vmx_vmexit()
5191 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in __nested_vmx_vmexit()
5192 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in __nested_vmx_vmexit()
5193 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); in __nested_vmx_vmexit()
5195 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); in __nested_vmx_vmexit()
5197 if (vmx->nested.l1_tpr_threshold != -1) in __nested_vmx_vmexit()
5198 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); in __nested_vmx_vmexit()
5200 if (vmx->nested.change_vmcs01_virtual_apic_mode) { in __nested_vmx_vmexit()
5201 vmx->nested.change_vmcs01_virtual_apic_mode = false; in __nested_vmx_vmexit()
5205 if (vmx->nested.update_vmcs01_cpu_dirty_logging) { in __nested_vmx_vmexit()
5206 vmx->nested.update_vmcs01_cpu_dirty_logging = false; in __nested_vmx_vmexit()
5212 if (vmx->nested.reload_vmcs01_apic_access_page) { in __nested_vmx_vmexit()
5213 vmx->nested.reload_vmcs01_apic_access_page = false; in __nested_vmx_vmexit()
5217 if (vmx->nested.update_vmcs01_apicv_status) { in __nested_vmx_vmexit()
5218 vmx->nested.update_vmcs01_apicv_status = false; in __nested_vmx_vmexit()
5222 if (vmx->nested.update_vmcs01_hwapic_isr) { in __nested_vmx_vmexit()
5223 vmx->nested.update_vmcs01_hwapic_isr = false; in __nested_vmx_vmexit()
5227 if ((vm_exit_reason != -1) && in __nested_vmx_vmexit()
5229 vmx->nested.need_vmcs12_to_shadow_sync = true; in __nested_vmx_vmexit()
5234 if (likely(!vmx->fail)) { in __nested_vmx_vmexit()
5235 if (vm_exit_reason != -1) in __nested_vmx_vmexit()
5236 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, in __nested_vmx_vmexit()
5237 vmcs12->exit_qualification, in __nested_vmx_vmexit()
5238 vmcs12->idt_vectoring_info_field, in __nested_vmx_vmexit()
5239 vmcs12->vm_exit_intr_info, in __nested_vmx_vmexit()
5240 vmcs12->vm_exit_intr_error_code, in __nested_vmx_vmexit()
5247 * if the event is blocked (RFLAGS.IF is cleared on VM-Exit). in __nested_vmx_vmexit()
5251 * non-root mode. INIT/SIPI don't need to be checked as INIT in __nested_vmx_vmexit()
5252 * is blocked post-VMXON, and SIPIs are ignored. in __nested_vmx_vmexit()
5254 if (kvm_cpu_has_injectable_intr(vcpu) || vcpu->arch.nmi_pending) in __nested_vmx_vmexit()
5260 * After an early L2 VM-entry failure, we're now back in __nested_vmx_vmexit()
5263 * flag and the VM-instruction error field of the VMCS in __nested_vmx_vmexit()
5271 * means some amount of guest state has been propagated to KVM's in __nested_vmx_vmexit()
5276 vmx->fail = 0; in __nested_vmx_vmexit()
5286 * Decode the memory-address operand of a vmx instruction, as recorded on an
5287 * exit caused by such an instruction (run by a guest hypervisor).
5302 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). in get_vmx_mem_address()
5321 /* offset = base + [index * scale] + displacement */ in get_vmx_mem_address()
5348 * The virtual/linear address is never truncated in 64-bit in get_vmx_mem_address()
5349 * mode, e.g. a 32-bit address size can yield a 64-bit virtual in get_vmx_mem_address()
5350 * address when using FS/GS with a non-zero base. in get_vmx_mem_address()
5359 * non-canonical form. This is the only check on the memory in get_vmx_mem_address()
5366 * unconditionally truncated to 32 bits regardless of the in get_vmx_mem_address()
5373 * - segment type check (#GP(0) may be thrown) in get_vmx_mem_address()
5374 * - usability check (#GP(0)/#SS(0)) in get_vmx_mem_address()
5375 * - limit check (#GP(0)/#SS(0)) in get_vmx_mem_address()
5379 * read-only data segment or any code segment. in get_vmx_mem_address()
5384 * execute-only code segment in get_vmx_mem_address()
5399 * limit==0xffffffff and of type expand-up data or code. in get_vmx_mem_address()
5403 exn = exn || ((u64)off + len - 1 > s.limit); in get_vmx_mem_address()
5427 return -EINVAL; in nested_vmx_get_vmptr()
5433 return -EINVAL; in nested_vmx_get_vmptr()
5447 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; in alloc_shadow_vmcs()
5452 * operation. VMXON faults if the CPU is already post-VMXON, so it in alloc_shadow_vmcs()
5457 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs)) in alloc_shadow_vmcs()
5458 return loaded_vmcs->shadow_vmcs; in alloc_shadow_vmcs()
5460 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); in alloc_shadow_vmcs()
5461 if (loaded_vmcs->shadow_vmcs) in alloc_shadow_vmcs()
5462 vmcs_clear(loaded_vmcs->shadow_vmcs); in alloc_shadow_vmcs()
5464 return loaded_vmcs->shadow_vmcs; in alloc_shadow_vmcs()
5472 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
5476 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
5477 if (!vmx->nested.cached_vmcs12) in enter_vmx_operation()
5480 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA; in enter_vmx_operation()
5481 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
5482 if (!vmx->nested.cached_shadow_vmcs12) in enter_vmx_operation()
5488 hrtimer_setup(&vmx->nested.preemption_timer, vmx_preemption_timer_fn, CLOCK_MONOTONIC, in enter_vmx_operation()
5491 vmx->nested.vpid02 = allocate_vpid(); in enter_vmx_operation()
5493 vmx->nested.vmcs02_initialized = false; in enter_vmx_operation()
5494 vmx->nested.vmxon = true; in enter_vmx_operation()
5497 vmx->pt_desc.guest.ctl = 0; in enter_vmx_operation()
5504 kfree(vmx->nested.cached_shadow_vmcs12); in enter_vmx_operation()
5507 kfree(vmx->nested.cached_vmcs12); in enter_vmx_operation()
5510 free_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
5513 return -ENOMEM; in enter_vmx_operation()
5528 * the guest and so cannot rely on hardware to perform the check, in handle_vmxon()
5529 * which has higher priority than VM-Exit (see Intel SDM's pseudocode in handle_vmxon()
5532 * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86 in handle_vmxon()
5533 * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't in handle_vmxon()
5534 * force any of the relevant guest state. For a restricted guest, KVM in handle_vmxon()
5545 * and has higher priority than the VM-Fail due to being post-VMXON, in handle_vmxon()
5546 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, in handle_vmxon()
5547 * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits in handle_vmxon()
5549 * VMX non-root. in handle_vmxon()
5551 * Forwarding the VM-Exit unconditionally, i.e. without performing the in handle_vmxon()
5562 if (vmx->nested.vmxon) in handle_vmxon()
5568 * have lower priority than the VM-Fail above. in handle_vmxon()
5576 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) in handle_vmxon()
5590 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; in handle_vmxon()
5596 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || in handle_vmxon()
5600 vmx->nested.vmxon_ptr = vmptr; in handle_vmxon()
5612 if (vmx->nested.current_vmptr == INVALID_GPA) in nested_release_vmcs12()
5623 vmx->nested.posted_intr_nv = -1; in nested_release_vmcs12()
5625 /* Flush VMCS12 to guest memory */ in nested_release_vmcs12()
5627 vmx->nested.current_vmptr >> PAGE_SHIFT, in nested_release_vmcs12()
5628 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
5630 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); in nested_release_vmcs12()
5632 vmx->nested.current_vmptr = INVALID_GPA; in nested_release_vmcs12()
5666 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmclear()
5670 if (vmptr == vmx->nested.current_vmptr) in handle_vmclear()
5726 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, in handle_vmread()
5729 if (vmx->nested.current_vmptr == INVALID_GPA || in handle_vmread()
5731 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) in handle_vmread()
5741 /* Read the field, zero-extended to a u64 value */ in handle_vmread()
5745 * Hyper-V TLFS (as of 6.0b) explicitly states, that while an in handle_vmread()
5749 * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a in handle_vmread()
5750 * workaround, as misbehaving guests will panic on VM-Fail. in handle_vmread()
5761 /* Read the field, zero-extended to a u64 value */ in handle_vmread()
5767 * Note that the number of bits actually copied is 32 or 64 depending in handle_vmread()
5768 * on the guest's mode (32 or 64 bit), not on the given field's length. in handle_vmread()
5824 * The value to write might be 32 or 64 bits, depending on L1's long in handle_vmwrite()
5826 * possible lengths. The code below first zero-extends the value to 64 in handle_vmwrite()
5828 * bits into the vmcs12 field. in handle_vmwrite()
5836 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, in handle_vmwrite()
5839 if (vmx->nested.current_vmptr == INVALID_GPA || in handle_vmwrite()
5841 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) in handle_vmwrite()
5864 * VMCS," then the "read-only" fields are actually read/write. in handle_vmwrite()
5871 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties in handle_vmwrite()
5878 * Some Intel CPUs intentionally drop the reserved bits of the AR byte in handle_vmwrite()
5891 * Do not track vmcs12 dirty-state if in guest-mode as we actually in handle_vmwrite()
5899 * shadow VMCS is up-to-date. in handle_vmwrite()
5903 vmcs_load(vmx->vmcs01.shadow_vmcs); in handle_vmwrite()
5907 vmcs_clear(vmx->vmcs01.shadow_vmcs); in handle_vmwrite()
5908 vmcs_load(vmx->loaded_vmcs->vmcs); in handle_vmwrite()
5911 vmx->nested.dirty_vmcs12 = true; in handle_vmwrite()
5919 vmx->nested.current_vmptr = vmptr; in set_current_vmptr()
5923 __pa(vmx->vmcs01.shadow_vmcs)); in set_current_vmptr()
5924 vmx->nested.need_vmcs12_to_shadow_sync = true; in set_current_vmptr()
5926 vmx->nested.dirty_vmcs12 = true; in set_current_vmptr()
5927 vmx->nested.force_msr_bitmap_recalc = true; in set_current_vmptr()
5946 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmptrld()
5953 if (vmx->nested.current_vmptr != vmptr) { in handle_vmptrld()
5954 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache; in handle_vmptrld()
5957 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) { in handle_vmptrld()
5960 * which means that the 32 bits located at the in handle_vmptrld()
5968 if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, in handle_vmptrld()
5985 * Load VMCS12 from guest memory since it is not already in handle_vmptrld()
5988 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12, in handle_vmptrld()
6005 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; in handle_vmptrst()
6042 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invept()
6044 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { in handle_invept()
6056 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; in handle_invept()
6075 mmu = &vcpu->arch.guest_mmu; in handle_invept()
6084 if (nested_ept_root_matches(mmu->root.hpa, mmu->root.pgd, in handle_invept()
6089 if (nested_ept_root_matches(mmu->prev_roots[i].hpa, in handle_invept()
6090 mmu->prev_roots[i].pgd, in handle_invept()
6104 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); in handle_invept()
6123 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invvpid()
6125 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { in handle_invvpid()
6137 types = (vmx->nested.msrs.vpid_caps & in handle_invvpid()
6194 * linear mappings for L2 (tagged with L2's VPID). Free all guest in handle_invvpid()
6203 kvm_mmu_free_guest_mode_roots(vcpu->kvm, &vcpu->arch.root_mmu); in handle_invvpid()
6211 u32 index = kvm_rcx_read(vcpu); in nested_vmx_eptp_switching() local
6216 if (index >= VMFUNC_EPTP_ENTRIES) in nested_vmx_eptp_switching()
6219 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, in nested_vmx_eptp_switching()
6220 &new_eptp, index * 8, 8)) in nested_vmx_eptp_switching()
6224 * If the (L2) guest does a vmfunc to the currently in nested_vmx_eptp_switching()
6227 if (vmcs12->ept_pointer != new_eptp) { in nested_vmx_eptp_switching()
6231 vmcs12->ept_pointer = new_eptp; in nested_vmx_eptp_switching()
6259 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC in handle_vmfunc()
6267 if (!(vmcs12->vm_function_control & BIT_ULL(function))) in handle_vmfunc()
6282 * This is effectively a reflected VM-Exit, as opposed to a synthesized in handle_vmfunc()
6283 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode in handle_vmfunc()
6286 nested_vmx_vmexit(vcpu, vmx->vt.exit_reason.full, in handle_vmfunc()
6294 * a VM-exit into L1.
6304 b = -1; in nested_vmx_check_io_bitmaps()
6308 bitmap = vmcs12->io_bitmap_a; in nested_vmx_check_io_bitmaps()
6310 bitmap = vmcs12->io_bitmap_b; in nested_vmx_check_io_bitmaps()
6322 size--; in nested_vmx_check_io_bitmaps()
6370 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, in nested_vmx_exit_handled_msr()
6374 bitmap = vmcs12->msr_bitmap; in nested_vmx_exit_handled_msr()
6379 msr_index -= 0xc0000000; in nested_vmx_exit_handled_msr()
6412 if (vmcs12->cr0_guest_host_mask & in nested_vmx_exit_handled_cr()
6413 (val ^ vmcs12->cr0_read_shadow)) in nested_vmx_exit_handled_cr()
6421 if (vmcs12->cr4_guest_host_mask & in nested_vmx_exit_handled_cr()
6422 (vmcs12->cr4_read_shadow ^ val)) in nested_vmx_exit_handled_cr()
6432 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && in nested_vmx_exit_handled_cr()
6433 (vmcs12->cr0_read_shadow & X86_CR0_TS)) in nested_vmx_exit_handled_cr()
6439 if (vmcs12->cpu_based_vm_exec_control & in nested_vmx_exit_handled_cr()
6444 if (vmcs12->cpu_based_vm_exec_control & in nested_vmx_exit_handled_cr()
6452 * lmsw can change bits 1..3 of cr0, and only set bit 0 of in nested_vmx_exit_handled_cr()
6456 if (vmcs12->cr0_guest_host_mask & 0xe & in nested_vmx_exit_handled_cr()
6457 (val ^ vmcs12->cr0_read_shadow)) in nested_vmx_exit_handled_cr()
6459 if ((vmcs12->cr0_guest_host_mask & 0x1) && in nested_vmx_exit_handled_cr()
6460 !(vmcs12->cr0_read_shadow & 0x1) && in nested_vmx_exit_handled_cr()
6480 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf); in nested_vmx_exit_handled_encls()
6497 /* Out-of-range fields always cause a VM exit from L2 to L1 */ in nested_vmx_exit_handled_vmcs_access()
6509 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field; in nested_vmx_exit_handled_mtf()
6515 * An MTF VM-exit may be injected into the guest by setting the in nested_vmx_exit_handled_mtf()
6516 * interruption-type to 7 (other event) and the vector field to 0. Such in nested_vmx_exit_handled_mtf()
6517 * is the case regardless of the 'monitor trap flag' VM-execution in nested_vmx_exit_handled_mtf()
6539 return vcpu->arch.apf.host_apf_flags || in nested_vmx_l0_wants_exit()
6542 vcpu->guest_debug & in nested_vmx_l0_wants_exit()
6546 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) in nested_vmx_l0_wants_exit()
6562 * missing in the guest EPT table (EPT12), the EPT violation in nested_vmx_l0_wants_exit()
6583 /* VM functions are emulated through L2->L0 vmexits. */ in nested_vmx_l0_wants_exit()
6593 /* Hyper-V L2 TLB flush hypercall is handled by L0 */ in nested_vmx_l0_wants_exit()
6621 return vmcs12->exception_bitmap & in nested_vmx_l1_wants_exit()
6651 vmcs12->vmread_bitmap); in nested_vmx_l1_wants_exit()
6654 vmcs12->vmwrite_bitmap); in nested_vmx_l1_wants_exit()
6662 * emulate them for its L2 guest, i.e., allows 3-level nesting! in nested_vmx_l1_wants_exit()
6698 * The controls for "virtualize APIC accesses," "APIC- in nested_vmx_l1_wants_exit()
6699 * register virtualization," and "virtual-interrupt in nested_vmx_l1_wants_exit()
6715 * XSS-bitmap, and always loads vmcs02 with vmcs12's XSS-bitmap in nested_vmx_l1_wants_exit()
6718 * in that case, before consulting the XSS-bitmap. in nested_vmx_l1_wants_exit()
6737 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
6743 union vmx_exit_reason exit_reason = vmx->vt.exit_reason; in nested_vmx_reflect_vmexit()
6747 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_reflect_vmexit()
6750 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM in nested_vmx_reflect_vmexit()
6753 if (unlikely(vmx->fail)) { in nested_vmx_reflect_vmexit()
6755 "hardware VM-instruction error: ", in nested_vmx_reflect_vmexit()
6774 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would in nested_vmx_reflect_vmexit()
6775 * need to be synthesized by querying the in-kernel LAPIC, but external in nested_vmx_reflect_vmexit()
6776 * interrupts are never reflected to L1 so it's a non-issue. in nested_vmx_reflect_vmexit()
6782 vmcs12->vm_exit_intr_error_code = in nested_vmx_reflect_vmexit()
6808 &user_kvm_nested_state->data.vmx[0]; in vmx_get_nested_state()
6817 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { in vmx_get_nested_state()
6818 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; in vmx_get_nested_state()
6819 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; in vmx_get_nested_state()
6822 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); in vmx_get_nested_state()
6830 vmcs12->vmcs_link_pointer != INVALID_GPA) in vmx_get_nested_state()
6831 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); in vmx_get_nested_state()
6834 if (vmx->nested.smm.vmxon) in vmx_get_nested_state()
6837 if (vmx->nested.smm.guest_mode) in vmx_get_nested_state()
6843 if (vmx->nested.nested_run_pending) in vmx_get_nested_state()
6846 if (vmx->nested.mtf_pending) in vmx_get_nested_state()
6850 vmx->nested.has_preemption_timer_deadline) { in vmx_get_nested_state()
6854 vmx->nested.preemption_timer_deadline; in vmx_get_nested_state()
6863 return -EFAULT; in vmx_get_nested_state()
6880 if (!vmx->nested.need_vmcs12_to_shadow_sync) { in vmx_get_nested_state()
6884 * clean fields data always up-to-date while in vmx_get_nested_state()
6885 * not in guest mode, 'hv_clean_fields' is only in vmx_get_nested_state()
6895 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); in vmx_get_nested_state()
6896 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE); in vmx_get_nested_state()
6902 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE)) in vmx_get_nested_state()
6903 return -EFAULT; in vmx_get_nested_state()
6906 vmcs12->vmcs_link_pointer != INVALID_GPA) { in vmx_get_nested_state()
6907 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, in vmx_get_nested_state()
6909 return -EFAULT; in vmx_get_nested_state()
6918 to_vmx(vcpu)->nested.nested_run_pending = 0; in vmx_leave_nested()
6919 nested_vmx_vmexit(vcpu, -1, 0, 0); in vmx_leave_nested()
6932 &user_kvm_nested_state->data.vmx[0]; in vmx_set_nested_state()
6935 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) in vmx_set_nested_state()
6936 return -EINVAL; in vmx_set_nested_state()
6938 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) { in vmx_set_nested_state()
6939 if (kvm_state->hdr.vmx.smm.flags) in vmx_set_nested_state()
6940 return -EINVAL; in vmx_set_nested_state()
6942 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) in vmx_set_nested_state()
6943 return -EINVAL; in vmx_set_nested_state()
6949 * be copied into eVMCS in guest memory. in vmx_set_nested_state()
6954 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) in vmx_set_nested_state()
6955 return -EINVAL; in vmx_set_nested_state()
6958 return -EINVAL; in vmx_set_nested_state()
6960 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) in vmx_set_nested_state()
6961 return -EINVAL; in vmx_set_nested_state()
6964 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
6965 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) in vmx_set_nested_state()
6966 return -EINVAL; in vmx_set_nested_state()
6968 if (kvm_state->hdr.vmx.smm.flags & in vmx_set_nested_state()
6970 return -EINVAL; in vmx_set_nested_state()
6972 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) in vmx_set_nested_state()
6973 return -EINVAL; in vmx_set_nested_state()
6976 * SMM temporarily disables VMX, so we cannot be in guest mode, in vmx_set_nested_state()
6981 (kvm_state->flags & in vmx_set_nested_state()
6983 : kvm_state->hdr.vmx.smm.flags) in vmx_set_nested_state()
6984 return -EINVAL; in vmx_set_nested_state()
6986 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
6987 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) in vmx_set_nested_state()
6988 return -EINVAL; in vmx_set_nested_state()
6990 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && in vmx_set_nested_state()
6992 !vmx->nested.enlightened_vmcs_enabled)) in vmx_set_nested_state()
6993 return -EINVAL; in vmx_set_nested_state()
6997 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) in vmx_set_nested_state()
7000 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; in vmx_set_nested_state()
7006 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { in vmx_set_nested_state()
7008 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || in vmx_set_nested_state()
7009 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || in vmx_set_nested_state()
7010 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)) in vmx_set_nested_state()
7011 return -EINVAL; in vmx_set_nested_state()
7016 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) { in vmx_set_nested_state()
7017 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || in vmx_set_nested_state()
7018 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) in vmx_set_nested_state()
7019 return -EINVAL; in vmx_set_nested_state()
7021 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); in vmx_set_nested_state()
7023 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { in vmx_set_nested_state()
7030 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING; in vmx_set_nested_state()
7034 return -EINVAL; in vmx_set_nested_state()
7037 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { in vmx_set_nested_state()
7038 vmx->nested.smm.vmxon = true; in vmx_set_nested_state()
7039 vmx->nested.vmxon = false; in vmx_set_nested_state()
7041 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) in vmx_set_nested_state()
7042 vmx->nested.smm.guest_mode = true; in vmx_set_nested_state()
7046 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12))) in vmx_set_nested_state()
7047 return -EFAULT; in vmx_set_nested_state()
7049 if (vmcs12->hdr.revision_id != VMCS12_REVISION) in vmx_set_nested_state()
7050 return -EINVAL; in vmx_set_nested_state()
7052 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) in vmx_set_nested_state()
7055 vmx->nested.nested_run_pending = in vmx_set_nested_state()
7056 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); in vmx_set_nested_state()
7058 vmx->nested.mtf_pending = in vmx_set_nested_state()
7059 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); in vmx_set_nested_state()
7061 ret = -EINVAL; in vmx_set_nested_state()
7063 vmcs12->vmcs_link_pointer != INVALID_GPA) { in vmx_set_nested_state()
7066 if (kvm_state->size < in vmx_set_nested_state()
7068 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) in vmx_set_nested_state()
7072 user_vmx_nested_state->shadow_vmcs12, in vmx_set_nested_state()
7074 ret = -EFAULT; in vmx_set_nested_state()
7078 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || in vmx_set_nested_state()
7079 !shadow_vmcs12->hdr.shadow_vmcs) in vmx_set_nested_state()
7083 vmx->nested.has_preemption_timer_deadline = false; in vmx_set_nested_state()
7084 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { in vmx_set_nested_state()
7085 vmx->nested.has_preemption_timer_deadline = true; in vmx_set_nested_state()
7086 vmx->nested.preemption_timer_deadline = in vmx_set_nested_state()
7087 kvm_state->hdr.vmx.preemption_timer_deadline; in vmx_set_nested_state()
7095 vmx->nested.dirty_vmcs12 = true; in vmx_set_nested_state()
7096 vmx->nested.force_msr_bitmap_recalc = true; in vmx_set_nested_state()
7101 if (vmx->nested.mtf_pending) in vmx_set_nested_state()
7107 vmx->nested.nested_run_pending = 0; in vmx_set_nested_state()
7128 * Note these are the so called "index" of the VMCS field encoding, not in nested_vmx_calc_vmcs_enum_msr()
7129 * the index into vmcs12. in nested_vmx_calc_vmcs_enum_msr()
7137 * exposed to L1. Simply find the field with the highest index. in nested_vmx_calc_vmcs_enum_msr()
7156 msrs->pinbased_ctls_low = in nested_vmx_setup_pinbased_ctls()
7159 msrs->pinbased_ctls_high = vmcs_conf->pin_based_exec_ctrl; in nested_vmx_setup_pinbased_ctls()
7160 msrs->pinbased_ctls_high &= in nested_vmx_setup_pinbased_ctls()
7165 msrs->pinbased_ctls_high |= in nested_vmx_setup_pinbased_ctls()
7173 msrs->exit_ctls_low = in nested_vmx_setup_exit_ctls()
7176 msrs->exit_ctls_high = vmcs_conf->vmexit_ctrl; in nested_vmx_setup_exit_ctls()
7177 msrs->exit_ctls_high &= in nested_vmx_setup_exit_ctls()
7183 msrs->exit_ctls_high |= in nested_vmx_setup_exit_ctls()
7191 msrs->exit_ctls_high &= ~VM_EXIT_LOAD_CET_STATE; in nested_vmx_setup_exit_ctls()
7194 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; in nested_vmx_setup_exit_ctls()
7200 msrs->entry_ctls_low = in nested_vmx_setup_entry_ctls()
7203 msrs->entry_ctls_high = vmcs_conf->vmentry_ctrl; in nested_vmx_setup_entry_ctls()
7204 msrs->entry_ctls_high &= in nested_vmx_setup_entry_ctls()
7210 msrs->entry_ctls_high |= in nested_vmx_setup_entry_ctls()
7216 msrs->entry_ctls_high &= ~VM_ENTRY_LOAD_CET_STATE; in nested_vmx_setup_entry_ctls()
7219 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; in nested_vmx_setup_entry_ctls()
7225 msrs->procbased_ctls_low = in nested_vmx_setup_cpubased_ctls()
7228 msrs->procbased_ctls_high = vmcs_conf->cpu_based_exec_ctrl; in nested_vmx_setup_cpubased_ctls()
7229 msrs->procbased_ctls_high &= in nested_vmx_setup_cpubased_ctls()
7245 * hardware. For example, L1 can specify an MSR bitmap - and we in nested_vmx_setup_cpubased_ctls()
7246 * can use it to avoid exits to L1 - even when L0 runs L2 in nested_vmx_setup_cpubased_ctls()
7249 msrs->procbased_ctls_high |= in nested_vmx_setup_cpubased_ctls()
7254 msrs->procbased_ctls_low &= in nested_vmx_setup_cpubased_ctls()
7262 msrs->secondary_ctls_low = 0; in nested_vmx_setup_secondary_ctls()
7264 msrs->secondary_ctls_high = vmcs_conf->cpu_based_2nd_exec_ctrl; in nested_vmx_setup_secondary_ctls()
7265 msrs->secondary_ctls_high &= in nested_vmx_setup_secondary_ctls()
7284 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7289 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7291 msrs->ept_caps = in nested_vmx_setup_secondary_ctls()
7298 msrs->ept_caps &= ept_caps; in nested_vmx_setup_secondary_ctls()
7299 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | in nested_vmx_setup_secondary_ctls()
7303 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7305 msrs->ept_caps |= VMX_EPT_AD_BIT; in nested_vmx_setup_secondary_ctls()
7313 msrs->vmfunc_controls = VMX_VMFUNC_EPTP_SWITCHING; in nested_vmx_setup_secondary_ctls()
7317 * Old versions of KVM use the single-context version without in nested_vmx_setup_secondary_ctls()
7320 * not failing the single-context invvpid, and it is worse. in nested_vmx_setup_secondary_ctls()
7323 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7325 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | in nested_vmx_setup_secondary_ctls()
7330 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7334 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7338 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING; in nested_vmx_setup_secondary_ctls()
7344 msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA; in nested_vmx_setup_misc_data()
7345 msrs->misc_low |= in nested_vmx_setup_misc_data()
7350 msrs->misc_high = 0; in nested_vmx_setup_misc_data()
7358 * guest, and the VMCS structure we give it - not about the in nested_vmx_setup_basic()
7361 msrs->basic = vmx_basic_encode_vmcs_info(VMCS12_REVISION, VMCS12_SIZE, in nested_vmx_setup_basic()
7364 msrs->basic |= VMX_BASIC_TRUE_CTLS; in nested_vmx_setup_basic()
7366 msrs->basic |= VMX_BASIC_INOUT; in nested_vmx_setup_basic()
7368 msrs->basic |= VMX_BASIC_NO_HW_ERROR_CODE_CC; in nested_vmx_setup_basic()
7374 * These MSRs specify bits which the guest must keep fixed on in nested_vmx_setup_cr_fixed()
7380 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; in nested_vmx_setup_cr_fixed()
7381 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; in nested_vmx_setup_cr_fixed()
7383 /* These MSRs specify bits which the guest must keep fixed off. */ in nested_vmx_setup_cr_fixed()
7384 rdmsrq(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); in nested_vmx_setup_cr_fixed()
7385 rdmsrq(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); in nested_vmx_setup_cr_fixed()
7388 msrs->cr4_fixed1 |= X86_CR4_UMIP; in nested_vmx_setup_cr_fixed()
7396 * Each of these control msrs has a low and high 32-bit half: A low bit is on
7397 * if the corresponding bit in the (32-bit) control field *must* be on, and a
7403 struct nested_vmx_msrs *msrs = &vmcs_conf->nested; in nested_vmx_setup_ctls_msrs()
7406 * Note that as a general rule, the high half of the MSRs (bits in in nested_vmx_setup_ctls_msrs()
7409 * can be supported) and the list of features we want to expose - in nested_vmx_setup_ctls_msrs()
7411 * Also, usually, the low half of the MSRs (bits which must be 1) can in nested_vmx_setup_ctls_msrs()
7412 * be set to 0, meaning that L1 may turn off any of these bits. The in nested_vmx_setup_ctls_msrs()
7413 * reason is that if one of these bits is necessary, it will appear in nested_vmx_setup_ctls_msrs()
7414 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control in nested_vmx_setup_ctls_msrs()
7415 * fields of vmcs01 and vmcs02, will turn these bits off - and in nested_vmx_setup_ctls_msrs()
7435 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr(); in nested_vmx_setup_ctls_msrs()
7464 return -ENOMEM; in nested_vmx_hardware_setup()