Lines Matching refs:nested
15 #include "nested.h"
189 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
202 if (vmx->nested.current_vmptr == INVALID_GPA &&
213 pr_debug_ratelimited("nested vmx abort, indicator %d\n", indicator);
230 vmx->nested.need_vmcs12_to_shadow_sync = false;
239 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map);
240 vmx->nested.hv_evmcs = NULL;
241 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
244 hv_vcpu->nested.pa_page_gpa = INVALID_GPA;
245 hv_vcpu->nested.vm_id = 0;
246 hv_vcpu->nested.vp_id = 0;
263 * vmx->nested.hv_evmcs but this shouldn't be a problem.
269 if (nested_vmx_evmcs(vmx) && vmptr == vmx->nested.hv_evmcs_vmptr)
326 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map);
327 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map);
328 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map);
329 vmx->nested.pi_desc = NULL;
333 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
343 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
348 vmx->nested.vmxon = false;
349 vmx->nested.smm.vmxon = false;
350 vmx->nested.vmxon_ptr = INVALID_GPA;
351 free_vpid(vmx->nested.vpid02);
352 vmx->nested.posted_intr_nv = -1;
353 vmx->nested.current_vmptr = INVALID_GPA;
360 kfree(vmx->nested.cached_vmcs12);
361 vmx->nested.cached_vmcs12 = NULL;
362 kfree(vmx->nested.cached_shadow_vmcs12);
363 vmx->nested.cached_shadow_vmcs12 = NULL;
371 free_loaded_vmcs(&vmx->nested.vmcs02);
421 if (vmx->nested.pml_full) {
423 vmx->nested.pml_full = false;
426 * It should be impossible to trigger a nested PML Full VM-Exit
471 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT;
472 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps);
675 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
691 if (!vmx->nested.force_msr_bitmap_recalc) {
784 vmx->nested.force_msr_bitmap_recalc = false;
793 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
812 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
828 * In nested virtualization, check if L1 has set
895 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
896 vmx->nested.msrs.misc_high);
1027 * Load guest's/host's msr at nested entry/exit.
1082 if (msr_index == MSR_IA32_TSC && vmx->nested.tsc_autostore_slot >= 0) {
1083 int slot = vmx->nested.tsc_autostore_slot;
1167 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1214 * while L2 entries are tagged with vmx->nested.vpid02).
1221 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1243 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
1265 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1266 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1300 u64 vmx_basic = vmcs_config.nested.basic;
1327 vmx->nested.msrs.basic = data;
1366 vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp);
1378 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp);
1398 u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
1399 vmcs_config.nested.misc_high);
1411 if ((vmx->nested.msrs.pinbased_ctls_high &
1426 vmx->nested.msrs.misc_low = data;
1427 vmx->nested.msrs.misc_high = data >> 32;
1434 u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps,
1435 vmcs_config.nested.vpid_caps);
1441 vmx->nested.msrs.ept_caps = data;
1442 vmx->nested.msrs.vpid_caps = data >> 32;
1460 const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index);
1469 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data;
1486 if (vmx->nested.vmxon)
1527 vmx->nested.msrs.vmcs_enum = data;
1530 if (data & ~vmcs_config.nested.vmfunc_controls)
1532 vmx->nested.msrs.vmfunc_controls = data;
1692 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1702 hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page;
1703 hv_vcpu->nested.vm_id = evmcs->hv_vm_id;
1704 hv_vcpu->nested.vp_id = evmcs->hv_vp_id;
1940 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
2116 * This is an equivalent of the nested hypervisor executing the vmptrld
2136 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
2137 vmx->nested.current_vmptr = INVALID_GPA;
2142 &vmx->nested.hv_evmcs_map))
2145 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
2169 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
2170 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
2175 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
2197 vmx->nested.hv_evmcs->hv_clean_fields &=
2200 vmx->nested.force_msr_bitmap_recalc = true;
2218 vmx->nested.need_vmcs12_to_shadow_sync = false;
2224 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
2226 vmx->nested.preemption_timer_expired = true;
2241 if (!vmx->nested.has_preemption_timer_deadline) {
2242 vmx->nested.preemption_timer_deadline =
2244 vmx->nested.has_preemption_timer_deadline = true;
2246 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc;
2259 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
2269 hrtimer_start(&vmx->nested.preemption_timer,
2276 if (vmx->nested.nested_run_pending &&
2295 if (vmx->nested.vmcs02_initialized)
2297 vmx->nested.vmcs02_initialized = true;
2310 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2359 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2360 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2372 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx))
2383 vmx->nested.pi_pending = false;
2385 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2387 vmx->nested.posted_intr_nv = -1;
2516 if (vmx->nested.nested_run_pending) {
2624 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2668 * propagated to vmcs12's list on nested VM-Exit.
2676 vmx->nested.tsc_autostore_slot = vmx->msr_autostore.nr;
2681 vmx->nested.tsc_autostore_slot = -1;
2695 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2713 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx)) {
2715 vmx->nested.dirty_vmcs12 = false;
2721 if (vmx->nested.nested_run_pending &&
2728 vmx_guest_debugctl_write(vcpu, vmx->nested.pre_vmenter_debugctl);
2731 if (!vmx->nested.nested_run_pending ||
2733 vmcs_write_cet_state(vcpu, vmx->nested.pre_vmenter_s_cet,
2734 vmx->nested.pre_vmenter_ssp,
2735 vmx->nested.pre_vmenter_ssp_tbl);
2737 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2739 vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs);
2750 if (vmx->nested.nested_run_pending &&
2797 * loading nested state after migration, it is possible to
2813 * on nested VM-Exit, which can occur without actually running L2 and
2873 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
2877 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
2887 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
2891 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
2904 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
2920 vmx->nested.msrs.pinbased_ctls_low,
2921 vmx->nested.msrs.pinbased_ctls_high)) ||
2923 vmx->nested.msrs.procbased_ctls_low,
2924 vmx->nested.msrs.procbased_ctls_high)))
2929 vmx->nested.msrs.secondary_ctls_low,
2930 vmx->nested.msrs.secondary_ctls_high)))
2957 ~vmx->nested.msrs.vmfunc_controls))
2983 vmx->nested.msrs.exit_ctls_low,
2984 vmx->nested.msrs.exit_ctls_high)) ||
3000 vmx->nested.msrs.entry_ctls_low,
3001 vmx->nested.msrs.entry_ctls_high)))
3091 void *vapic = to_vmx(vcpu)->nested.virtual_apic_map.hva;
3249 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
3338 if (to_vmx(vcpu)->nested.nested_run_pending &&
3386 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
3398 vmx->nested.need_vmcs12_to_shadow_sync = true;
3415 * the guest CR3 might be restored prior to setting the nested
3424 map = &vmx->nested.apic_access_page_map;
3440 map = &vmx->nested.virtual_apic_map;
3466 map = &vmx->nested.pi_desc_map;
3469 vmx->nested.pi_desc =
3481 vmx->nested.pi_desc = NULL;
3529 if (WARN_ON_ONCE(vmx->nested.pml_full))
3533 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
3541 vmx->nested.pml_full = true;
3566 if (!to_vmx(vcpu)->nested.vmxon) {
3605 vmx->nested.current_vmptr,
3616 if (!vmx->nested.nested_run_pending ||
3618 vmx->nested.pre_vmenter_debugctl = vmx_guest_debugctl_read();
3620 (!vmx->nested.nested_run_pending ||
3622 vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3624 if (!vmx->nested.nested_run_pending ||
3626 vmcs_read_cet_state(vcpu, &vmx->nested.pre_vmenter_s_cet,
3627 &vmx->nested.pre_vmenter_ssp,
3628 &vmx->nested.pre_vmenter_ssp_tbl);
3638 * vmcs01.GUEST_CR3 is safe because nested VM-Exits, and the unwind,
3645 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3714 vmx->nested.preemption_timer_expired = false;
3747 vmx->nested.need_vmcs12_to_shadow_sync = true;
3752 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3753 * for running an L2 nested guest.
3778 vmx->nested.current_vmptr == INVALID_GPA))
3803 * The nested entry process starts with enforcing various prerequisites
3831 * the nested entry.
3833 vmx->nested.nested_run_pending = 1;
3834 vmx->nested.has_preemption_timer_deadline = false;
3839 /* Hide L1D cache contents from the nested guest. */
3865 vmx->nested.nested_run_pending = 0;
3870 vmx->nested.nested_run_pending = 0;
3880 vmx->nested.nested_run_pending = 0;
3890 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3999 if (!vmx->nested.pi_pending)
4002 if (!vmx->nested.pi_desc)
4005 vmx->nested.pi_pending = false;
4007 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
4010 max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
4012 vapic_page = vmx->nested.virtual_apic_map.hva;
4016 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
4026 kvm_vcpu_map_mark_dirty(vcpu, &vmx->nested.virtual_apic_map);
4027 kvm_vcpu_map_mark_dirty(vcpu, &vmx->nested.pi_desc_map);
4064 * hardware and avoid inducing failure on nested VM-Entry if L1
4140 to_vmx(vcpu)->nested.preemption_timer_expired;
4146 void *vapic = vmx->nested.virtual_apic_map.hva;
4150 vmx->nested.mtf_pending)
4175 if (vmx->nested.pi_pending && vmx->nested.pi_desc &&
4176 pi_test_on(vmx->nested.pi_desc)) {
4177 max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
4273 * Only a pending nested run blocks a pending exception. If there is a
4277 bool block_nested_exceptions = vmx->nested.nested_run_pending;
4292 * Inject events are blocked by nested VM-Enter, as KVM is responsible
4309 vmx->nested.mtf_pending = false;
4353 if (vmx->nested.mtf_pending) {
4446 if (irq == vmx->nested.posted_intr_nv) {
4455 vmx->nested.pi_pending = true;
4482 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
4582 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
4591 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
4598 vmx->loaded_vmcs = &vmx->nested.vmcs02;
4621 vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
4646 !vmx->nested.nested_run_pending)
4651 * In some cases (usually, nested EPT), L2 is allowed to change its
4695 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4753 * A part of what we need to when the nested L2 guest exits and we want to
4756 * This function is to be called not only on normal nested exit, but also on
4757 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4936 * nested VMENTER (not worth adding a variable in nested_vmx).
4977 * of VMFail), leaving the nested VM's MSRs in the software model
4980 * MSR that was (prematurely) loaded from the nested VMEntry load
5030 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
5042 vmx->nested.mtf_pending = false;
5045 WARN_ON_ONCE(vmx->nested.nested_run_pending);
5073 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
5142 vmx->nested.need_vmcs12_to_shadow_sync = true;
5364 * when L1 executes VMXOFF or the vCPU is forced out of nested
5385 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
5389 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
5390 if (!vmx->nested.cached_vmcs12)
5393 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
5394 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
5395 if (!vmx->nested.cached_shadow_vmcs12)
5401 hrtimer_setup(&vmx->nested.preemption_timer, vmx_preemption_timer_fn, CLOCK_MONOTONIC,
5404 vmx->nested.vpid02 = allocate_vpid();
5406 vmx->nested.vmcs02_initialized = false;
5407 vmx->nested.vmxon = true;
5417 kfree(vmx->nested.cached_shadow_vmcs12);
5420 kfree(vmx->nested.cached_vmcs12);
5423 free_loaded_vmcs(&vmx->nested.vmcs02);
5475 if (vmx->nested.vmxon)
5503 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
5513 vmx->nested.vmxon_ptr = vmptr;
5525 if (vmx->nested.current_vmptr == INVALID_GPA)
5536 vmx->nested.posted_intr_nv = -1;
5540 vmx->nested.current_vmptr >> PAGE_SHIFT,
5541 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
5545 vmx->nested.current_vmptr = INVALID_GPA;
5579 if (vmptr == vmx->nested.vmxon_ptr)
5583 if (vmptr == vmx->nested.current_vmptr)
5642 if (vmx->nested.current_vmptr == INVALID_GPA ||
5752 if (vmx->nested.current_vmptr == INVALID_GPA ||
5824 vmx->nested.dirty_vmcs12 = true;
5832 vmx->nested.current_vmptr = vmptr;
5837 vmx->nested.need_vmcs12_to_shadow_sync = true;
5839 vmx->nested.dirty_vmcs12 = true;
5840 vmx->nested.force_msr_bitmap_recalc = true;
5859 if (vmptr == vmx->nested.vmxon_ptr)
5866 if (vmx->nested.current_vmptr != vmptr) {
5867 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
5901 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12,
5918 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
5955 if (!(vmx->nested.msrs.secondary_ctls_high &
5957 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
5969 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
6036 if (!(vmx->nested.msrs.secondary_ctls_high &
6038 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
6050 types = (vmx->nested.msrs.vpid_caps &
6162 * VMFUNC for nested VMs, but not for L1.
6196 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
6473 * L0 always deals with the EPT violation. If nested EPT is
6474 * used, and the nested mmu code discovers that the address is
6668 WARN_ON_ONCE(vmx->nested.nested_run_pending);
6671 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
6738 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
6739 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
6740 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
6755 if (vmx->nested.smm.vmxon)
6758 if (vmx->nested.smm.guest_mode)
6764 if (vmx->nested.nested_run_pending)
6767 if (vmx->nested.mtf_pending)
6771 vmx->nested.has_preemption_timer_deadline) {
6775 vmx->nested.preemption_timer_deadline;
6801 if (!vmx->nested.need_vmcs12_to_shadow_sync) {
6839 to_vmx(vcpu)->nested.nested_run_pending = 0;
6913 !vmx->nested.enlightened_vmcs_enabled))
6921 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
6951 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
6959 vmx->nested.smm.vmxon = true;
6960 vmx->nested.vmxon = false;
6963 vmx->nested.smm.guest_mode = true;
6976 vmx->nested.nested_run_pending =
6979 vmx->nested.mtf_pending =
7004 vmx->nested.has_preemption_timer_deadline = false;
7006 vmx->nested.has_preemption_timer_deadline = true;
7007 vmx->nested.preemption_timer_deadline =
7016 vmx->nested.dirty_vmcs12 = true;
7017 vmx->nested.force_msr_bitmap_recalc = true;
7022 if (vmx->nested.mtf_pending)
7028 vmx->nested.nested_run_pending = 0;
7203 /* nested EPT: emulate EPT also to L1 */
7308 * returned for the various VMX controls MSRs when nested VMX is enabled.
7310 * valid during nested entry from L1 to L2.
7318 struct nested_vmx_msrs *msrs = &vmcs_conf->nested;