xref: /linux/arch/x86/kvm/svm/nested.c (revision b89456aee78d22b20c6c83c4d75af7985ae5be8d)
1883b0a91SJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only
2883b0a91SJoerg Roedel /*
3883b0a91SJoerg Roedel  * Kernel-based Virtual Machine driver for Linux
4883b0a91SJoerg Roedel  *
5883b0a91SJoerg Roedel  * AMD SVM support
6883b0a91SJoerg Roedel  *
7883b0a91SJoerg Roedel  * Copyright (C) 2006 Qumranet, Inc.
8883b0a91SJoerg Roedel  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9883b0a91SJoerg Roedel  *
10883b0a91SJoerg Roedel  * Authors:
11883b0a91SJoerg Roedel  *   Yaniv Kamay  <yaniv@qumranet.com>
12883b0a91SJoerg Roedel  *   Avi Kivity   <avi@qumranet.com>
13883b0a91SJoerg Roedel  */
14883b0a91SJoerg Roedel 
158d20bd63SSean Christopherson #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16883b0a91SJoerg Roedel 
17883b0a91SJoerg Roedel #include <linux/kvm_types.h>
18883b0a91SJoerg Roedel #include <linux/kvm_host.h>
19883b0a91SJoerg Roedel #include <linux/kernel.h>
20883b0a91SJoerg Roedel 
21883b0a91SJoerg Roedel #include <asm/msr-index.h>
225679b803SPaolo Bonzini #include <asm/debugreg.h>
23883b0a91SJoerg Roedel 
24883b0a91SJoerg Roedel #include "kvm_emulate.h"
25883b0a91SJoerg Roedel #include "trace.h"
26883b0a91SJoerg Roedel #include "mmu.h"
27883b0a91SJoerg Roedel #include "x86.h"
28b0b42197SPaolo Bonzini #include "smm.h"
29cc440cdaSPaolo Bonzini #include "cpuid.h"
305b672408SPaolo Bonzini #include "lapic.h"
31883b0a91SJoerg Roedel #include "svm.h"
3266c03a92SVitaly Kuznetsov #include "hyperv.h"
33883b0a91SJoerg Roedel 
3411f0cbf0SSean Christopherson #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
3511f0cbf0SSean Christopherson 
36883b0a91SJoerg Roedel static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
37883b0a91SJoerg Roedel 				       struct x86_exception *fault)
38883b0a91SJoerg Roedel {
39883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
40db663af4SMaxim Levitsky 	struct vmcb *vmcb = svm->vmcb;
41883b0a91SJoerg Roedel 
42db663af4SMaxim Levitsky 	if (vmcb->control.exit_code != SVM_EXIT_NPF) {
43883b0a91SJoerg Roedel 		/*
44883b0a91SJoerg Roedel 		 * TODO: track the cause of the nested page fault, and
45883b0a91SJoerg Roedel 		 * correctly fill in the high bits of exit_info_1.
46883b0a91SJoerg Roedel 		 */
47db663af4SMaxim Levitsky 		vmcb->control.exit_code = SVM_EXIT_NPF;
48db663af4SMaxim Levitsky 		vmcb->control.exit_code_hi = 0;
49db663af4SMaxim Levitsky 		vmcb->control.exit_info_1 = (1ULL << 32);
50db663af4SMaxim Levitsky 		vmcb->control.exit_info_2 = fault->address;
51883b0a91SJoerg Roedel 	}
52883b0a91SJoerg Roedel 
53db663af4SMaxim Levitsky 	vmcb->control.exit_info_1 &= ~0xffffffffULL;
54db663af4SMaxim Levitsky 	vmcb->control.exit_info_1 |= fault->error_code;
55883b0a91SJoerg Roedel 
56883b0a91SJoerg Roedel 	nested_svm_vmexit(svm);
57883b0a91SJoerg Roedel }
58883b0a91SJoerg Roedel 
59883b0a91SJoerg Roedel static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
60883b0a91SJoerg Roedel {
61883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
62e670bf68SPaolo Bonzini 	u64 cr3 = svm->nested.ctl.nested_cr3;
63883b0a91SJoerg Roedel 	u64 pdpte;
64883b0a91SJoerg Roedel 	int ret;
65883b0a91SJoerg Roedel 
662732be90SSean Christopherson 	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
67883b0a91SJoerg Roedel 				       offset_in_page(cr3) + index * 8, 8);
68883b0a91SJoerg Roedel 	if (ret)
69883b0a91SJoerg Roedel 		return 0;
70883b0a91SJoerg Roedel 	return pdpte;
71883b0a91SJoerg Roedel }
72883b0a91SJoerg Roedel 
73883b0a91SJoerg Roedel static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
74883b0a91SJoerg Roedel {
75883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
76883b0a91SJoerg Roedel 
77e670bf68SPaolo Bonzini 	return svm->nested.ctl.nested_cr3;
78883b0a91SJoerg Roedel }
79883b0a91SJoerg Roedel 
80883b0a91SJoerg Roedel static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
81883b0a91SJoerg Roedel {
82929d1cfaSPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
83929d1cfaSPaolo Bonzini 
84883b0a91SJoerg Roedel 	WARN_ON(mmu_is_nested(vcpu));
85883b0a91SJoerg Roedel 
86883b0a91SJoerg Roedel 	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
8731e96bc6SSean Christopherson 
8831e96bc6SSean Christopherson 	/*
8931e96bc6SSean Christopherson 	 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01.  Note,
9031e96bc6SSean Christopherson 	 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
9131e96bc6SSean Christopherson 	 * vCPU state.  CR0.WP is explicitly ignored, while CR0.PG is required.
9231e96bc6SSean Christopherson 	 */
934995a368SCathy Avery 	kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
944995a368SCathy Avery 				svm->vmcb01.ptr->save.efer,
950f04a2acSVitaly Kuznetsov 				svm->nested.ctl.nested_cr3);
96883b0a91SJoerg Roedel 	vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
97883b0a91SJoerg Roedel 	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
98883b0a91SJoerg Roedel 	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
99883b0a91SJoerg Roedel 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
100883b0a91SJoerg Roedel }
101883b0a91SJoerg Roedel 
102883b0a91SJoerg Roedel static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
103883b0a91SJoerg Roedel {
104883b0a91SJoerg Roedel 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
105883b0a91SJoerg Roedel 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
106883b0a91SJoerg Roedel }
107883b0a91SJoerg Roedel 
108b9f3973aSMaxim Levitsky static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
109b9f3973aSMaxim Levitsky {
1104d2a1560SSean Christopherson 	if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
111b9f3973aSMaxim Levitsky 		return true;
112b9f3973aSMaxim Levitsky 
113b9f3973aSMaxim Levitsky 	if (!nested_npt_enabled(svm))
114b9f3973aSMaxim Levitsky 		return true;
115b9f3973aSMaxim Levitsky 
116b9f3973aSMaxim Levitsky 	if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
117b9f3973aSMaxim Levitsky 		return true;
118b9f3973aSMaxim Levitsky 
119b9f3973aSMaxim Levitsky 	return false;
120b9f3973aSMaxim Levitsky }
121b9f3973aSMaxim Levitsky 
122883b0a91SJoerg Roedel void recalc_intercepts(struct vcpu_svm *svm)
123883b0a91SJoerg Roedel {
1248fc78909SEmanuele Giuseppe Esposito 	struct vmcb_control_area *c, *h;
1258fc78909SEmanuele Giuseppe Esposito 	struct vmcb_ctrl_area_cached *g;
126c45ad722SBabu Moger 	unsigned int i;
127883b0a91SJoerg Roedel 
12806e7852cSJoerg Roedel 	vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
129883b0a91SJoerg Roedel 
130883b0a91SJoerg Roedel 	if (!is_guest_mode(&svm->vcpu))
131883b0a91SJoerg Roedel 		return;
132883b0a91SJoerg Roedel 
133883b0a91SJoerg Roedel 	c = &svm->vmcb->control;
1344995a368SCathy Avery 	h = &svm->vmcb01.ptr->control;
135e670bf68SPaolo Bonzini 	g = &svm->nested.ctl;
136883b0a91SJoerg Roedel 
137c45ad722SBabu Moger 	for (i = 0; i < MAX_INTERCEPT; i++)
138c45ad722SBabu Moger 		c->intercepts[i] = h->intercepts[i];
139c45ad722SBabu Moger 
140e9fd761aSPaolo Bonzini 	if (g->int_ctl & V_INTR_MASKING_MASK) {
141883b0a91SJoerg Roedel 		/*
1427334ede4SSantosh Shukla 		 * If L2 is active and V_INTR_MASKING is enabled in vmcb12,
1437334ede4SSantosh Shukla 		 * disable intercept of CR8 writes as L2's CR8 does not affect
1447334ede4SSantosh Shukla 		 * any interrupt KVM may want to inject.
1457334ede4SSantosh Shukla 		 *
1467334ede4SSantosh Shukla 		 * Similarly, disable intercept of virtual interrupts (used to
1477334ede4SSantosh Shukla 		 * detect interrupt windows) if the saved RFLAGS.IF is '0', as
1487334ede4SSantosh Shukla 		 * the effective RFLAGS.IF for L1 interrupts will never be set
1497334ede4SSantosh Shukla 		 * while L2 is running (L2's RFLAGS.IF doesn't affect L1 IRQs).
150883b0a91SJoerg Roedel 		 */
15174905e3dSPaolo Bonzini 		vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
1527334ede4SSantosh Shukla 		if (!(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF))
153c62e2e94SBabu Moger 			vmcb_clr_intercept(c, INTERCEPT_VINTR);
154883b0a91SJoerg Roedel 	}
155883b0a91SJoerg Roedel 
1563f4a812eSVitaly Kuznetsov 	/*
1573f4a812eSVitaly Kuznetsov 	 * We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB
1583f4a812eSVitaly Kuznetsov 	 * flush feature is enabled.
1593f4a812eSVitaly Kuznetsov 	 */
1603f4a812eSVitaly Kuznetsov 	if (!nested_svm_l2_tlb_flush_enabled(&svm->vcpu))
161c62e2e94SBabu Moger 		vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
162883b0a91SJoerg Roedel 
163c45ad722SBabu Moger 	for (i = 0; i < MAX_INTERCEPT; i++)
164c45ad722SBabu Moger 		c->intercepts[i] |= g->intercepts[i];
1654b639a9fSMaxim Levitsky 
1664b639a9fSMaxim Levitsky 	/* If SMI is not intercepted, ignore guest SMI intercept as well  */
1674b639a9fSMaxim Levitsky 	if (!intercept_smi)
1684b639a9fSMaxim Levitsky 		vmcb_clr_intercept(c, INTERCEPT_SMI);
169c7dfa400SMaxim Levitsky 
170b9f3973aSMaxim Levitsky 	if (nested_vmcb_needs_vls_intercept(svm)) {
171b9f3973aSMaxim Levitsky 		/*
172b9f3973aSMaxim Levitsky 		 * If the virtual VMLOAD/VMSAVE is not enabled for the L2,
173b9f3973aSMaxim Levitsky 		 * we must intercept these instructions to correctly
174b9f3973aSMaxim Levitsky 		 * emulate them in case L1 doesn't intercept them.
175b9f3973aSMaxim Levitsky 		 */
176c7dfa400SMaxim Levitsky 		vmcb_set_intercept(c, INTERCEPT_VMLOAD);
177c7dfa400SMaxim Levitsky 		vmcb_set_intercept(c, INTERCEPT_VMSAVE);
178b9f3973aSMaxim Levitsky 	} else {
179b9f3973aSMaxim Levitsky 		WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
180b9f3973aSMaxim Levitsky 	}
181883b0a91SJoerg Roedel }
182883b0a91SJoerg Roedel 
18366c03a92SVitaly Kuznetsov /*
18466c03a92SVitaly Kuznetsov  * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
18566c03a92SVitaly Kuznetsov  * is optimized in that it only merges the parts where KVM MSR permission bitmap
18666c03a92SVitaly Kuznetsov  * may contain zero bits.
18766c03a92SVitaly Kuznetsov  */
188883b0a91SJoerg Roedel static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
189883b0a91SJoerg Roedel {
19026b516bbSSean Christopherson 	struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
191883b0a91SJoerg Roedel 	int i;
192883b0a91SJoerg Roedel 
19366c03a92SVitaly Kuznetsov 	/*
19466c03a92SVitaly Kuznetsov 	 * MSR bitmap update can be skipped when:
19566c03a92SVitaly Kuznetsov 	 * - MSR bitmap for L1 hasn't changed.
19666c03a92SVitaly Kuznetsov 	 * - Nested hypervisor (L1) is attempting to launch the same L2 as
19766c03a92SVitaly Kuznetsov 	 *   before.
19866c03a92SVitaly Kuznetsov 	 * - Nested hypervisor (L1) is using Hyper-V emulation interface and
19966c03a92SVitaly Kuznetsov 	 * tells KVM (L0) there were no changes in MSR bitmap for L2.
20066c03a92SVitaly Kuznetsov 	 */
20166c03a92SVitaly Kuznetsov 	if (!svm->nested.force_msr_bitmap_recalc &&
20266c03a92SVitaly Kuznetsov 	    kvm_hv_hypercall_enabled(&svm->vcpu) &&
20366c03a92SVitaly Kuznetsov 	    hve->hv_enlightenments_control.msr_bitmap &&
204089fe572SSean Christopherson 	    (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
20566c03a92SVitaly Kuznetsov 		goto set_msrpm_base_pa;
20666c03a92SVitaly Kuznetsov 
2078fc78909SEmanuele Giuseppe Esposito 	if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
208883b0a91SJoerg Roedel 		return true;
209883b0a91SJoerg Roedel 
210883b0a91SJoerg Roedel 	for (i = 0; i < MSRPM_OFFSETS; i++) {
211883b0a91SJoerg Roedel 		u32 value, p;
212883b0a91SJoerg Roedel 		u64 offset;
213883b0a91SJoerg Roedel 
214883b0a91SJoerg Roedel 		if (msrpm_offsets[i] == 0xffffffff)
215883b0a91SJoerg Roedel 			break;
216883b0a91SJoerg Roedel 
217883b0a91SJoerg Roedel 		p      = msrpm_offsets[i];
2187a8f7c1fSMaxim Levitsky 
2197a8f7c1fSMaxim Levitsky 		/* x2apic msrs are intercepted always for the nested guest */
2207a8f7c1fSMaxim Levitsky 		if (is_x2apic_msrpm_offset(p))
2217a8f7c1fSMaxim Levitsky 			continue;
2227a8f7c1fSMaxim Levitsky 
223e670bf68SPaolo Bonzini 		offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
224883b0a91SJoerg Roedel 
225883b0a91SJoerg Roedel 		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
226883b0a91SJoerg Roedel 			return false;
227883b0a91SJoerg Roedel 
228883b0a91SJoerg Roedel 		svm->nested.msrpm[p] = svm->msrpm[p] | value;
229883b0a91SJoerg Roedel 	}
230883b0a91SJoerg Roedel 
23173c25546SVitaly Kuznetsov 	svm->nested.force_msr_bitmap_recalc = false;
23273c25546SVitaly Kuznetsov 
23366c03a92SVitaly Kuznetsov set_msrpm_base_pa:
234883b0a91SJoerg Roedel 	svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
235883b0a91SJoerg Roedel 
236883b0a91SJoerg Roedel 	return true;
237883b0a91SJoerg Roedel }
238883b0a91SJoerg Roedel 
239ee695f22SKrish Sadhukhan /*
240ee695f22SKrish Sadhukhan  * Bits 11:0 of bitmap address are ignored by hardware
241ee695f22SKrish Sadhukhan  */
242ee695f22SKrish Sadhukhan static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
243ee695f22SKrish Sadhukhan {
244ee695f22SKrish Sadhukhan 	u64 addr = PAGE_ALIGN(pa);
245ee695f22SKrish Sadhukhan 
246ee695f22SKrish Sadhukhan 	return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
247ee695f22SKrish Sadhukhan 	    kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
248ee695f22SKrish Sadhukhan }
249ee695f22SKrish Sadhukhan 
250174a921bSKrish Sadhukhan static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
251174a921bSKrish Sadhukhan {
252174a921bSKrish Sadhukhan 	/* Nested FLUSHBYASID is not supported yet.  */
253174a921bSKrish Sadhukhan 	switch(tlb_ctl) {
254174a921bSKrish Sadhukhan 		case TLB_CONTROL_DO_NOTHING:
255174a921bSKrish Sadhukhan 		case TLB_CONTROL_FLUSH_ALL_ASID:
256174a921bSKrish Sadhukhan 			return true;
257174a921bSKrish Sadhukhan 		default:
258174a921bSKrish Sadhukhan 			return false;
259174a921bSKrish Sadhukhan 	}
260174a921bSKrish Sadhukhan }
261174a921bSKrish Sadhukhan 
262bd95926cSPaolo Bonzini static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
2638fc78909SEmanuele Giuseppe Esposito 					 struct vmcb_ctrl_area_cached *control)
264ca46d739SPaolo Bonzini {
2658fc78909SEmanuele Giuseppe Esposito 	if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
266ca46d739SPaolo Bonzini 		return false;
267ca46d739SPaolo Bonzini 
26811f0cbf0SSean Christopherson 	if (CC(control->asid == 0))
269ca46d739SPaolo Bonzini 		return false;
270ca46d739SPaolo Bonzini 
27111f0cbf0SSean Christopherson 	if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
272ca46d739SPaolo Bonzini 		return false;
273ca46d739SPaolo Bonzini 
274ee695f22SKrish Sadhukhan 	if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
275ee695f22SKrish Sadhukhan 					   MSRPM_SIZE)))
276ee695f22SKrish Sadhukhan 		return false;
277ee695f22SKrish Sadhukhan 	if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
278ee695f22SKrish Sadhukhan 					   IOPM_SIZE)))
279ee695f22SKrish Sadhukhan 		return false;
280ee695f22SKrish Sadhukhan 
281174a921bSKrish Sadhukhan 	if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
282174a921bSKrish Sadhukhan 		return false;
283174a921bSKrish Sadhukhan 
2840977cfacSSantosh Shukla 	if (CC((control->int_ctl & V_NMI_ENABLE_MASK) &&
2850977cfacSSantosh Shukla 	       !vmcb12_is_intercept(control, INTERCEPT_NMI))) {
2860977cfacSSantosh Shukla 		return false;
2870977cfacSSantosh Shukla 	}
2880977cfacSSantosh Shukla 
289ca46d739SPaolo Bonzini 	return true;
290ca46d739SPaolo Bonzini }
291ca46d739SPaolo Bonzini 
2926906e06dSKrish Sadhukhan /* Common checks that apply to both L1 and L2 state.  */
293b7a3d8b6SEmanuele Giuseppe Esposito static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
294b7a3d8b6SEmanuele Giuseppe Esposito 				     struct vmcb_save_area_cached *save)
2956906e06dSKrish Sadhukhan {
29611f0cbf0SSean Christopherson 	if (CC(!(save->efer & EFER_SVME)))
2976906e06dSKrish Sadhukhan 		return false;
2986906e06dSKrish Sadhukhan 
29911f0cbf0SSean Christopherson 	if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
30011f0cbf0SSean Christopherson 	    CC(save->cr0 & ~0xffffffffULL))
3016906e06dSKrish Sadhukhan 		return false;
3026906e06dSKrish Sadhukhan 
30311f0cbf0SSean Christopherson 	if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
3046906e06dSKrish Sadhukhan 		return false;
3056906e06dSKrish Sadhukhan 
306907afa48SEmanuele Giuseppe Esposito 	/*
307907afa48SEmanuele Giuseppe Esposito 	 * These checks are also performed by KVM_SET_SREGS,
308907afa48SEmanuele Giuseppe Esposito 	 * except that EFER.LMA is not checked by SVM against
309907afa48SEmanuele Giuseppe Esposito 	 * CR0.PG && EFER.LME.
310907afa48SEmanuele Giuseppe Esposito 	 */
311907afa48SEmanuele Giuseppe Esposito 	if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
312907afa48SEmanuele Giuseppe Esposito 		if (CC(!(save->cr4 & X86_CR4_PAE)) ||
313907afa48SEmanuele Giuseppe Esposito 		    CC(!(save->cr0 & X86_CR0_PE)) ||
314907afa48SEmanuele Giuseppe Esposito 		    CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
315907afa48SEmanuele Giuseppe Esposito 			return false;
316907afa48SEmanuele Giuseppe Esposito 	}
317907afa48SEmanuele Giuseppe Esposito 
318c33f6f22SSean Christopherson 	/* Note, SVM doesn't have any additional restrictions on CR4. */
319c33f6f22SSean Christopherson 	if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
3206906e06dSKrish Sadhukhan 		return false;
3216906e06dSKrish Sadhukhan 
32263129754SPaolo Bonzini 	if (CC(!kvm_valid_efer(vcpu, save->efer)))
3236906e06dSKrish Sadhukhan 		return false;
3246906e06dSKrish Sadhukhan 
3256906e06dSKrish Sadhukhan 	return true;
3266906e06dSKrish Sadhukhan }
3276906e06dSKrish Sadhukhan 
328b7a3d8b6SEmanuele Giuseppe Esposito static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
329b7a3d8b6SEmanuele Giuseppe Esposito {
330b7a3d8b6SEmanuele Giuseppe Esposito 	struct vcpu_svm *svm = to_svm(vcpu);
331b7a3d8b6SEmanuele Giuseppe Esposito 	struct vmcb_save_area_cached *save = &svm->nested.save;
332b7a3d8b6SEmanuele Giuseppe Esposito 
333b7a3d8b6SEmanuele Giuseppe Esposito 	return __nested_vmcb_check_save(vcpu, save);
334b7a3d8b6SEmanuele Giuseppe Esposito }
335b7a3d8b6SEmanuele Giuseppe Esposito 
336bd95926cSPaolo Bonzini static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
337bd95926cSPaolo Bonzini {
338bd95926cSPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
3398fc78909SEmanuele Giuseppe Esposito 	struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
340bd95926cSPaolo Bonzini 
341bd95926cSPaolo Bonzini 	return __nested_vmcb_check_controls(vcpu, ctl);
342bd95926cSPaolo Bonzini }
343bd95926cSPaolo Bonzini 
3447907160dSEmanuele Giuseppe Esposito static
34566c03a92SVitaly Kuznetsov void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
34666c03a92SVitaly Kuznetsov 					 struct vmcb_ctrl_area_cached *to,
3477907160dSEmanuele Giuseppe Esposito 					 struct vmcb_control_area *from)
3487907160dSEmanuele Giuseppe Esposito {
3497907160dSEmanuele Giuseppe Esposito 	unsigned int i;
3507907160dSEmanuele Giuseppe Esposito 
3517907160dSEmanuele Giuseppe Esposito 	for (i = 0; i < MAX_INTERCEPT; i++)
3527907160dSEmanuele Giuseppe Esposito 		to->intercepts[i] = from->intercepts[i];
3537907160dSEmanuele Giuseppe Esposito 
3547907160dSEmanuele Giuseppe Esposito 	to->iopm_base_pa        = from->iopm_base_pa;
3557907160dSEmanuele Giuseppe Esposito 	to->msrpm_base_pa       = from->msrpm_base_pa;
3567907160dSEmanuele Giuseppe Esposito 	to->tsc_offset          = from->tsc_offset;
3577907160dSEmanuele Giuseppe Esposito 	to->tlb_ctl             = from->tlb_ctl;
3587907160dSEmanuele Giuseppe Esposito 	to->int_ctl             = from->int_ctl;
3597907160dSEmanuele Giuseppe Esposito 	to->int_vector          = from->int_vector;
3607907160dSEmanuele Giuseppe Esposito 	to->int_state           = from->int_state;
3617907160dSEmanuele Giuseppe Esposito 	to->exit_code           = from->exit_code;
3627907160dSEmanuele Giuseppe Esposito 	to->exit_code_hi        = from->exit_code_hi;
3637907160dSEmanuele Giuseppe Esposito 	to->exit_info_1         = from->exit_info_1;
3647907160dSEmanuele Giuseppe Esposito 	to->exit_info_2         = from->exit_info_2;
3657907160dSEmanuele Giuseppe Esposito 	to->exit_int_info       = from->exit_int_info;
3667907160dSEmanuele Giuseppe Esposito 	to->exit_int_info_err   = from->exit_int_info_err;
3677907160dSEmanuele Giuseppe Esposito 	to->nested_ctl          = from->nested_ctl;
3687907160dSEmanuele Giuseppe Esposito 	to->event_inj           = from->event_inj;
3697907160dSEmanuele Giuseppe Esposito 	to->event_inj_err       = from->event_inj_err;
37000f08d99SMaciej S. Szmigiero 	to->next_rip            = from->next_rip;
3717907160dSEmanuele Giuseppe Esposito 	to->nested_cr3          = from->nested_cr3;
3727907160dSEmanuele Giuseppe Esposito 	to->virt_ext            = from->virt_ext;
3737907160dSEmanuele Giuseppe Esposito 	to->pause_filter_count  = from->pause_filter_count;
3747907160dSEmanuele Giuseppe Esposito 	to->pause_filter_thresh = from->pause_filter_thresh;
3757907160dSEmanuele Giuseppe Esposito 
3767907160dSEmanuele Giuseppe Esposito 	/* Copy asid here because nested_vmcb_check_controls will check it.  */
3777907160dSEmanuele Giuseppe Esposito 	to->asid           = from->asid;
3787907160dSEmanuele Giuseppe Esposito 	to->msrpm_base_pa &= ~0x0fffULL;
3797907160dSEmanuele Giuseppe Esposito 	to->iopm_base_pa  &= ~0x0fffULL;
38066c03a92SVitaly Kuznetsov 
38166c03a92SVitaly Kuznetsov 	/* Hyper-V extensions (Enlightened VMCB) */
38266c03a92SVitaly Kuznetsov 	if (kvm_hv_hypercall_enabled(vcpu)) {
38366c03a92SVitaly Kuznetsov 		to->clean = from->clean;
38468ae7c7bSSean Christopherson 		memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
38568ae7c7bSSean Christopherson 		       sizeof(to->hv_enlightenments));
38666c03a92SVitaly Kuznetsov 	}
3877907160dSEmanuele Giuseppe Esposito }
3887907160dSEmanuele Giuseppe Esposito 
3897907160dSEmanuele Giuseppe Esposito void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
3903e06f016SPaolo Bonzini 				       struct vmcb_control_area *control)
3913e06f016SPaolo Bonzini {
39266c03a92SVitaly Kuznetsov 	__nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
3933e06f016SPaolo Bonzini }
3943e06f016SPaolo Bonzini 
395f2740a8dSEmanuele Giuseppe Esposito static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
396f2740a8dSEmanuele Giuseppe Esposito 					     struct vmcb_save_area *from)
397f2740a8dSEmanuele Giuseppe Esposito {
398f2740a8dSEmanuele Giuseppe Esposito 	/*
399f2740a8dSEmanuele Giuseppe Esposito 	 * Copy only fields that are validated, as we need them
400f2740a8dSEmanuele Giuseppe Esposito 	 * to avoid TOC/TOU races.
401f2740a8dSEmanuele Giuseppe Esposito 	 */
402f2740a8dSEmanuele Giuseppe Esposito 	to->efer = from->efer;
403f2740a8dSEmanuele Giuseppe Esposito 	to->cr0 = from->cr0;
404f2740a8dSEmanuele Giuseppe Esposito 	to->cr3 = from->cr3;
405f2740a8dSEmanuele Giuseppe Esposito 	to->cr4 = from->cr4;
406f2740a8dSEmanuele Giuseppe Esposito 
407f2740a8dSEmanuele Giuseppe Esposito 	to->dr6 = from->dr6;
408f2740a8dSEmanuele Giuseppe Esposito 	to->dr7 = from->dr7;
409f2740a8dSEmanuele Giuseppe Esposito }
410f2740a8dSEmanuele Giuseppe Esposito 
411f2740a8dSEmanuele Giuseppe Esposito void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
412f2740a8dSEmanuele Giuseppe Esposito 				    struct vmcb_save_area *save)
413f2740a8dSEmanuele Giuseppe Esposito {
414f2740a8dSEmanuele Giuseppe Esposito 	__nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
415f2740a8dSEmanuele Giuseppe Esposito }
416f2740a8dSEmanuele Giuseppe Esposito 
4172d8a42beSPaolo Bonzini /*
4182d8a42beSPaolo Bonzini  * Synchronize fields that are written by the processor, so that
4199e8f0fbfSPaolo Bonzini  * they can be copied back into the vmcb12.
4202d8a42beSPaolo Bonzini  */
4219e8f0fbfSPaolo Bonzini void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
4222d8a42beSPaolo Bonzini {
4232d8a42beSPaolo Bonzini 	u32 mask;
4242d8a42beSPaolo Bonzini 	svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
4252d8a42beSPaolo Bonzini 	svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;
4262d8a42beSPaolo Bonzini 
4272d8a42beSPaolo Bonzini 	/* Only a few fields of int_ctl are written by the processor.  */
4282d8a42beSPaolo Bonzini 	mask = V_IRQ_MASK | V_TPR_MASK;
4292d8a42beSPaolo Bonzini 	/*
4305faaffabSSantosh Shukla 	 * Don't sync vmcb02 V_IRQ back to vmcb12 if KVM (L0) is intercepting
4315faaffabSSantosh Shukla 	 * virtual interrupts in order to request an interrupt window, as KVM
4325faaffabSSantosh Shukla 	 * has usurped vmcb02's int_ctl.  If an interrupt window opens before
4335faaffabSSantosh Shukla 	 * the next VM-Exit, svm_clear_vintr() will restore vmcb12's int_ctl.
4345faaffabSSantosh Shukla 	 * If no window opens, V_IRQ will be correctly preserved in vmcb12's
4355faaffabSSantosh Shukla 	 * int_ctl (because it was never recognized while L2 was running).
4362d8a42beSPaolo Bonzini 	 */
4375faaffabSSantosh Shukla 	if (svm_is_intercept(svm, INTERCEPT_VINTR) &&
4385faaffabSSantosh Shukla 	    !test_bit(INTERCEPT_VINTR, (unsigned long *)svm->nested.ctl.intercepts))
4392d8a42beSPaolo Bonzini 		mask &= ~V_IRQ_MASK;
4400b349662SMaxim Levitsky 
4410b349662SMaxim Levitsky 	if (nested_vgif_enabled(svm))
4420b349662SMaxim Levitsky 		mask |= V_GIF_MASK;
4430b349662SMaxim Levitsky 
4440977cfacSSantosh Shukla 	if (nested_vnmi_enabled(svm))
4450977cfacSSantosh Shukla 		mask |= V_NMI_BLOCKING_MASK | V_NMI_PENDING_MASK;
4460977cfacSSantosh Shukla 
4472d8a42beSPaolo Bonzini 	svm->nested.ctl.int_ctl        &= ~mask;
4482d8a42beSPaolo Bonzini 	svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
4492d8a42beSPaolo Bonzini }
4502d8a42beSPaolo Bonzini 
45136e2e983SPaolo Bonzini /*
45236e2e983SPaolo Bonzini  * Transfer any event that L0 or L1 wanted to inject into L2 to
45336e2e983SPaolo Bonzini  * EXIT_INT_INFO.
45436e2e983SPaolo Bonzini  */
4559e8f0fbfSPaolo Bonzini static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
4560dd16b5bSMaxim Levitsky 						struct vmcb *vmcb12)
45736e2e983SPaolo Bonzini {
45836e2e983SPaolo Bonzini 	struct kvm_vcpu *vcpu = &svm->vcpu;
45936e2e983SPaolo Bonzini 	u32 exit_int_info = 0;
46036e2e983SPaolo Bonzini 	unsigned int nr;
46136e2e983SPaolo Bonzini 
46236e2e983SPaolo Bonzini 	if (vcpu->arch.exception.injected) {
463d4963e31SSean Christopherson 		nr = vcpu->arch.exception.vector;
46436e2e983SPaolo Bonzini 		exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
46536e2e983SPaolo Bonzini 
46636e2e983SPaolo Bonzini 		if (vcpu->arch.exception.has_error_code) {
46736e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_VALID_ERR;
4680dd16b5bSMaxim Levitsky 			vmcb12->control.exit_int_info_err =
46936e2e983SPaolo Bonzini 				vcpu->arch.exception.error_code;
47036e2e983SPaolo Bonzini 		}
47136e2e983SPaolo Bonzini 
47236e2e983SPaolo Bonzini 	} else if (vcpu->arch.nmi_injected) {
47336e2e983SPaolo Bonzini 		exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
47436e2e983SPaolo Bonzini 
47536e2e983SPaolo Bonzini 	} else if (vcpu->arch.interrupt.injected) {
47636e2e983SPaolo Bonzini 		nr = vcpu->arch.interrupt.nr;
47736e2e983SPaolo Bonzini 		exit_int_info = nr | SVM_EVTINJ_VALID;
47836e2e983SPaolo Bonzini 
47936e2e983SPaolo Bonzini 		if (vcpu->arch.interrupt.soft)
48036e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
48136e2e983SPaolo Bonzini 		else
48236e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_TYPE_INTR;
48336e2e983SPaolo Bonzini 	}
48436e2e983SPaolo Bonzini 
4850dd16b5bSMaxim Levitsky 	vmcb12->control.exit_int_info = exit_int_info;
48636e2e983SPaolo Bonzini }
48736e2e983SPaolo Bonzini 
488d2e56019SSean Christopherson static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
489d2e56019SSean Christopherson {
490d2e56019SSean Christopherson 	/*
4913f4a812eSVitaly Kuznetsov 	 * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
4923f4a812eSVitaly Kuznetsov 	 * L2's VP_ID upon request from the guest. Make sure we check for
4933f4a812eSVitaly Kuznetsov 	 * pending entries in the right FIFO upon L1/L2 transition as these
4943f4a812eSVitaly Kuznetsov 	 * requests are put by other vCPUs asynchronously.
4953f4a812eSVitaly Kuznetsov 	 */
4963f4a812eSVitaly Kuznetsov 	if (to_hv_vcpu(vcpu) && npt_enabled)
4973f4a812eSVitaly Kuznetsov 		kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
4983f4a812eSVitaly Kuznetsov 
4993f4a812eSVitaly Kuznetsov 	/*
500d2e56019SSean Christopherson 	 * TODO: optimize unconditional TLB flush/MMU sync.  A partial list of
501d2e56019SSean Christopherson 	 * things to fix before this can be conditional:
502d2e56019SSean Christopherson 	 *
503d2e56019SSean Christopherson 	 *  - Flush TLBs for both L1 and L2 remote TLB flush
504d2e56019SSean Christopherson 	 *  - Honor L1's request to flush an ASID on nested VMRUN
505d2e56019SSean Christopherson 	 *  - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
506d2e56019SSean Christopherson 	 *  - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
507d2e56019SSean Christopherson 	 *  - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
508d2e56019SSean Christopherson 	 *
509d2e56019SSean Christopherson 	 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
510d2e56019SSean Christopherson 	 *     NPT guest-physical mappings on VMRUN.
511d2e56019SSean Christopherson 	 */
512d2e56019SSean Christopherson 	kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
513d2e56019SSean Christopherson 	kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
514d2e56019SSean Christopherson }
515d2e56019SSean Christopherson 
51662156f6cSVitaly Kuznetsov /*
517d82aaef9SVitaly Kuznetsov  * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
518d82aaef9SVitaly Kuznetsov  * if we are emulating VM-Entry into a guest with NPT enabled.
51962156f6cSVitaly Kuznetsov  */
52062156f6cSVitaly Kuznetsov static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
521b222b0b8SMaxim Levitsky 			       bool nested_npt, bool reload_pdptrs)
52262156f6cSVitaly Kuznetsov {
52311f0cbf0SSean Christopherson 	if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
524a506fdd2SVitaly Kuznetsov 		return -EINVAL;
525a506fdd2SVitaly Kuznetsov 
526b222b0b8SMaxim Levitsky 	if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
5272df4a5ebSLai Jiangshan 	    CC(!load_pdptrs(vcpu, cr3)))
528a506fdd2SVitaly Kuznetsov 		return -EINVAL;
529a506fdd2SVitaly Kuznetsov 
530a506fdd2SVitaly Kuznetsov 	vcpu->arch.cr3 = cr3;
531a506fdd2SVitaly Kuznetsov 
532616007c8SSean Christopherson 	/* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
533c9060662SSean Christopherson 	kvm_init_mmu(vcpu);
534a506fdd2SVitaly Kuznetsov 
5353cffc89dSPaolo Bonzini 	if (!nested_npt)
5363cffc89dSPaolo Bonzini 		kvm_mmu_new_pgd(vcpu, cr3);
5373cffc89dSPaolo Bonzini 
538a506fdd2SVitaly Kuznetsov 	return 0;
53962156f6cSVitaly Kuznetsov }
54062156f6cSVitaly Kuznetsov 
5414995a368SCathy Avery void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
5424995a368SCathy Avery {
5434995a368SCathy Avery 	if (!svm->nested.vmcb02.ptr)
5444995a368SCathy Avery 		return;
5454995a368SCathy Avery 
5464995a368SCathy Avery 	/* FIXME: merge g_pat from vmcb01 and vmcb12.  */
5474995a368SCathy Avery 	svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
5484995a368SCathy Avery }
5494995a368SCathy Avery 
5509e8f0fbfSPaolo Bonzini static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
551883b0a91SJoerg Roedel {
5528173396eSCathy Avery 	bool new_vmcb12 = false;
5531d5a1b58SMaxim Levitsky 	struct vmcb *vmcb01 = svm->vmcb01.ptr;
554db663af4SMaxim Levitsky 	struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
555e183d17aSSean Christopherson 	struct kvm_vcpu *vcpu = &svm->vcpu;
5568173396eSCathy Avery 
5574995a368SCathy Avery 	nested_vmcb02_compute_g_pat(svm);
5584995a368SCathy Avery 
559883b0a91SJoerg Roedel 	/* Load the nested guest state */
5608173396eSCathy Avery 	if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
5618173396eSCathy Avery 		new_vmcb12 = true;
5628173396eSCathy Avery 		svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
56373c25546SVitaly Kuznetsov 		svm->nested.force_msr_bitmap_recalc = true;
5648173396eSCathy Avery 	}
5658173396eSCathy Avery 
5668173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
567db663af4SMaxim Levitsky 		vmcb02->save.es = vmcb12->save.es;
568db663af4SMaxim Levitsky 		vmcb02->save.cs = vmcb12->save.cs;
569db663af4SMaxim Levitsky 		vmcb02->save.ss = vmcb12->save.ss;
570db663af4SMaxim Levitsky 		vmcb02->save.ds = vmcb12->save.ds;
571db663af4SMaxim Levitsky 		vmcb02->save.cpl = vmcb12->save.cpl;
572db663af4SMaxim Levitsky 		vmcb_mark_dirty(vmcb02, VMCB_SEG);
5738173396eSCathy Avery 	}
5744bb170a5SPaolo Bonzini 
5758173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
576db663af4SMaxim Levitsky 		vmcb02->save.gdtr = vmcb12->save.gdtr;
577db663af4SMaxim Levitsky 		vmcb02->save.idtr = vmcb12->save.idtr;
578db663af4SMaxim Levitsky 		vmcb_mark_dirty(vmcb02, VMCB_DT);
5798173396eSCathy Avery 	}
5804bb170a5SPaolo Bonzini 
581e183d17aSSean Christopherson 	kvm_set_rflags(vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
5823c346c0cSPaolo Bonzini 
583e183d17aSSean Christopherson 	svm_set_efer(vcpu, svm->nested.save.efer);
5843c346c0cSPaolo Bonzini 
585e183d17aSSean Christopherson 	svm_set_cr0(vcpu, svm->nested.save.cr0);
586e183d17aSSean Christopherson 	svm_set_cr4(vcpu, svm->nested.save.cr4);
5874bb170a5SPaolo Bonzini 
5884bb170a5SPaolo Bonzini 	svm->vcpu.arch.cr2 = vmcb12->save.cr2;
5898173396eSCathy Avery 
590e183d17aSSean Christopherson 	kvm_rax_write(vcpu, vmcb12->save.rax);
591e183d17aSSean Christopherson 	kvm_rsp_write(vcpu, vmcb12->save.rsp);
592e183d17aSSean Christopherson 	kvm_rip_write(vcpu, vmcb12->save.rip);
593883b0a91SJoerg Roedel 
594883b0a91SJoerg Roedel 	/* In case we don't even reach vcpu_run, the fields are not updated */
595db663af4SMaxim Levitsky 	vmcb02->save.rax = vmcb12->save.rax;
596db663af4SMaxim Levitsky 	vmcb02->save.rsp = vmcb12->save.rsp;
597db663af4SMaxim Levitsky 	vmcb02->save.rip = vmcb12->save.rip;
5984bb170a5SPaolo Bonzini 
5998173396eSCathy Avery 	/* These bits will be set properly on the first execution when new_vmc12 is true */
6008173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
601db663af4SMaxim Levitsky 		vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
602355d0473SEmanuele Giuseppe Esposito 		svm->vcpu.arch.dr6  = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
603db663af4SMaxim Levitsky 		vmcb_mark_dirty(vmcb02, VMCB_DR);
604f241d711SPaolo Bonzini 	}
6051d5a1b58SMaxim Levitsky 
606e183d17aSSean Christopherson 	if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
607e183d17aSSean Christopherson 		     (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
608d20c796cSMaxim Levitsky 		/*
609d20c796cSMaxim Levitsky 		 * Reserved bits of DEBUGCTL are ignored.  Be consistent with
610d20c796cSMaxim Levitsky 		 * svm_set_msr's definition of reserved bits.
611d20c796cSMaxim Levitsky 		 */
612d20c796cSMaxim Levitsky 		svm_copy_lbrs(vmcb02, vmcb12);
613d20c796cSMaxim Levitsky 		vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
614d20c796cSMaxim Levitsky 		svm_update_lbrv(&svm->vcpu);
615d20c796cSMaxim Levitsky 
616d20c796cSMaxim Levitsky 	} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
6171d5a1b58SMaxim Levitsky 		svm_copy_lbrs(vmcb02, vmcb01);
6188173396eSCathy Avery 	}
619d20c796cSMaxim Levitsky }
620883b0a91SJoerg Roedel 
6216ef88d6eSSean Christopherson static inline bool is_evtinj_soft(u32 evtinj)
6226ef88d6eSSean Christopherson {
6236ef88d6eSSean Christopherson 	u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
6246ef88d6eSSean Christopherson 	u8 vector = evtinj & SVM_EVTINJ_VEC_MASK;
6256ef88d6eSSean Christopherson 
6266ef88d6eSSean Christopherson 	if (!(evtinj & SVM_EVTINJ_VALID))
6276ef88d6eSSean Christopherson 		return false;
6286ef88d6eSSean Christopherson 
6297e5b5ef8SSean Christopherson 	if (type == SVM_EVTINJ_TYPE_SOFT)
6307e5b5ef8SSean Christopherson 		return true;
6317e5b5ef8SSean Christopherson 
6326ef88d6eSSean Christopherson 	return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector);
6336ef88d6eSSean Christopherson }
6346ef88d6eSSean Christopherson 
635159fc6faSMaciej S. Szmigiero static bool is_evtinj_nmi(u32 evtinj)
636159fc6faSMaciej S. Szmigiero {
637159fc6faSMaciej S. Szmigiero 	u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
638159fc6faSMaciej S. Szmigiero 
639159fc6faSMaciej S. Szmigiero 	if (!(evtinj & SVM_EVTINJ_VALID))
640159fc6faSMaciej S. Szmigiero 		return false;
641159fc6faSMaciej S. Szmigiero 
642159fc6faSMaciej S. Szmigiero 	return type == SVM_EVTINJ_TYPE_NMI;
643159fc6faSMaciej S. Szmigiero }
644159fc6faSMaciej S. Szmigiero 
64500f08d99SMaciej S. Szmigiero static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
646da0b93d6SMaciej S. Szmigiero 					  unsigned long vmcb12_rip,
647da0b93d6SMaciej S. Szmigiero 					  unsigned long vmcb12_csbase)
648f241d711SPaolo Bonzini {
6490b349662SMaxim Levitsky 	u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
6500b349662SMaxim Levitsky 	u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
6510f923e07SMaxim Levitsky 
652d2e56019SSean Christopherson 	struct kvm_vcpu *vcpu = &svm->vcpu;
653db663af4SMaxim Levitsky 	struct vmcb *vmcb01 = svm->vmcb01.ptr;
654db663af4SMaxim Levitsky 	struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
655e3cdaab5SPaolo Bonzini 	u32 pause_count12;
656e3cdaab5SPaolo Bonzini 	u32 pause_thresh12;
65762156f6cSVitaly Kuznetsov 
6587c3ecfcdSPaolo Bonzini 	/*
6597c3ecfcdSPaolo Bonzini 	 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
6607c3ecfcdSPaolo Bonzini 	 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
6617c3ecfcdSPaolo Bonzini 	 */
6624995a368SCathy Avery 
663*b89456aeSSean Christopherson 	if (guest_can_use(vcpu, X86_FEATURE_VGIF) &&
664*b89456aeSSean Christopherson 	    (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
6650b349662SMaxim Levitsky 		int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
6660b349662SMaxim Levitsky 	else
6670b349662SMaxim Levitsky 		int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
6680b349662SMaxim Levitsky 
6690977cfacSSantosh Shukla 	if (vnmi) {
6700977cfacSSantosh Shukla 		if (vmcb01->control.int_ctl & V_NMI_PENDING_MASK) {
6710977cfacSSantosh Shukla 			svm->vcpu.arch.nmi_pending++;
6720977cfacSSantosh Shukla 			kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
6730977cfacSSantosh Shukla 		}
6740977cfacSSantosh Shukla 		if (nested_vnmi_enabled(svm))
6750977cfacSSantosh Shukla 			int_ctl_vmcb12_bits |= (V_NMI_PENDING_MASK |
6760977cfacSSantosh Shukla 						V_NMI_ENABLE_MASK |
6770977cfacSSantosh Shukla 						V_NMI_BLOCKING_MASK);
6780977cfacSSantosh Shukla 	}
6790977cfacSSantosh Shukla 
6807c3ecfcdSPaolo Bonzini 	/* Copied from vmcb01.  msrpm_base can be overwritten later.  */
681db663af4SMaxim Levitsky 	vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
682db663af4SMaxim Levitsky 	vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
683db663af4SMaxim Levitsky 	vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
6847c3ecfcdSPaolo Bonzini 
6857c3ecfcdSPaolo Bonzini 	/* Done at vmrun: asid.  */
6867c3ecfcdSPaolo Bonzini 
6877c3ecfcdSPaolo Bonzini 	/* Also overwritten later if necessary.  */
688db663af4SMaxim Levitsky 	vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
6897c3ecfcdSPaolo Bonzini 
6907c3ecfcdSPaolo Bonzini 	/* nested_cr3.  */
69162156f6cSVitaly Kuznetsov 	if (nested_npt_enabled(svm))
692d2e56019SSean Christopherson 		nested_svm_init_mmu_context(vcpu);
69369cb8774SPaolo Bonzini 
6945228eb96SMaxim Levitsky 	vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
6955228eb96SMaxim Levitsky 			vcpu->arch.l1_tsc_offset,
6965228eb96SMaxim Levitsky 			svm->nested.ctl.tsc_offset,
6975228eb96SMaxim Levitsky 			svm->tsc_ratio_msr);
6985228eb96SMaxim Levitsky 
699db663af4SMaxim Levitsky 	vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
7005228eb96SMaxim Levitsky 
7014365a455SSean Christopherson 	if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
7027cafe9b8SSean Christopherson 	    svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
7035228eb96SMaxim Levitsky 		nested_svm_update_tsc_ratio_msr(vcpu);
704883b0a91SJoerg Roedel 
705db663af4SMaxim Levitsky 	vmcb02->control.int_ctl             =
7060f923e07SMaxim Levitsky 		(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
707db663af4SMaxim Levitsky 		(vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
70891b7130cSPaolo Bonzini 
709db663af4SMaxim Levitsky 	vmcb02->control.int_vector          = svm->nested.ctl.int_vector;
710db663af4SMaxim Levitsky 	vmcb02->control.int_state           = svm->nested.ctl.int_state;
711db663af4SMaxim Levitsky 	vmcb02->control.event_inj           = svm->nested.ctl.event_inj;
712db663af4SMaxim Levitsky 	vmcb02->control.event_inj_err       = svm->nested.ctl.event_inj_err;
713883b0a91SJoerg Roedel 
71400f08d99SMaciej S. Szmigiero 	/*
71500f08d99SMaciej S. Szmigiero 	 * next_rip is consumed on VMRUN as the return address pushed on the
71600f08d99SMaciej S. Szmigiero 	 * stack for injected soft exceptions/interrupts.  If nrips is exposed
71700f08d99SMaciej S. Szmigiero 	 * to L1, take it verbatim from vmcb12.  If nrips is supported in
71800f08d99SMaciej S. Szmigiero 	 * hardware but not exposed to L1, stuff the actual L2 RIP to emulate
71900f08d99SMaciej S. Szmigiero 	 * what a nrips=0 CPU would do (L1 is responsible for advancing RIP
72000f08d99SMaciej S. Szmigiero 	 * prior to injecting the event).
72100f08d99SMaciej S. Szmigiero 	 */
7227a6a6a3bSSean Christopherson 	if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
72300f08d99SMaciej S. Szmigiero 		vmcb02->control.next_rip    = svm->nested.ctl.next_rip;
72400f08d99SMaciej S. Szmigiero 	else if (boot_cpu_has(X86_FEATURE_NRIPS))
72500f08d99SMaciej S. Szmigiero 		vmcb02->control.next_rip    = vmcb12_rip;
72600f08d99SMaciej S. Szmigiero 
727159fc6faSMaciej S. Szmigiero 	svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj);
7286ef88d6eSSean Christopherson 	if (is_evtinj_soft(vmcb02->control.event_inj)) {
7296ef88d6eSSean Christopherson 		svm->soft_int_injected = true;
730da0b93d6SMaciej S. Szmigiero 		svm->soft_int_csbase = vmcb12_csbase;
7316ef88d6eSSean Christopherson 		svm->soft_int_old_rip = vmcb12_rip;
7327a6a6a3bSSean Christopherson 		if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
7336ef88d6eSSean Christopherson 			svm->soft_int_next_rip = svm->nested.ctl.next_rip;
7346ef88d6eSSean Christopherson 		else
7356ef88d6eSSean Christopherson 			svm->soft_int_next_rip = vmcb12_rip;
7366ef88d6eSSean Christopherson 	}
7376ef88d6eSSean Christopherson 
7381d5a1b58SMaxim Levitsky 	vmcb02->control.virt_ext            = vmcb01->control.virt_ext &
7391d5a1b58SMaxim Levitsky 					      LBR_CTL_ENABLE_MASK;
740e183d17aSSean Christopherson 	if (guest_can_use(vcpu, X86_FEATURE_LBRV))
741d20c796cSMaxim Levitsky 		vmcb02->control.virt_ext  |=
742d20c796cSMaxim Levitsky 			(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
7431d5a1b58SMaxim Levitsky 
744b9f3973aSMaxim Levitsky 	if (!nested_vmcb_needs_vls_intercept(svm))
745db663af4SMaxim Levitsky 		vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
746b9f3973aSMaxim Levitsky 
74759d67fc1SSean Christopherson 	if (guest_can_use(vcpu, X86_FEATURE_PAUSEFILTER))
74859d67fc1SSean Christopherson 		pause_count12 = svm->nested.ctl.pause_filter_count;
74959d67fc1SSean Christopherson 	else
75059d67fc1SSean Christopherson 		pause_count12 = 0;
75159d67fc1SSean Christopherson 	if (guest_can_use(vcpu, X86_FEATURE_PFTHRESHOLD))
75259d67fc1SSean Christopherson 		pause_thresh12 = svm->nested.ctl.pause_filter_thresh;
75359d67fc1SSean Christopherson 	else
75459d67fc1SSean Christopherson 		pause_thresh12 = 0;
75574fd41edSMaxim Levitsky 	if (kvm_pause_in_guest(svm->vcpu.kvm)) {
756e3cdaab5SPaolo Bonzini 		/* use guest values since host doesn't intercept PAUSE */
757e3cdaab5SPaolo Bonzini 		vmcb02->control.pause_filter_count = pause_count12;
758e3cdaab5SPaolo Bonzini 		vmcb02->control.pause_filter_thresh = pause_thresh12;
75974fd41edSMaxim Levitsky 
760e3cdaab5SPaolo Bonzini 	} else {
761e3cdaab5SPaolo Bonzini 		/* start from host values otherwise */
76274fd41edSMaxim Levitsky 		vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
76374fd41edSMaxim Levitsky 		vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
764e3cdaab5SPaolo Bonzini 
765e3cdaab5SPaolo Bonzini 		/* ... but ensure filtering is disabled if so requested.  */
766e3cdaab5SPaolo Bonzini 		if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
767e3cdaab5SPaolo Bonzini 			if (!pause_count12)
76874fd41edSMaxim Levitsky 				vmcb02->control.pause_filter_count = 0;
769e3cdaab5SPaolo Bonzini 			if (!pause_thresh12)
77074fd41edSMaxim Levitsky 				vmcb02->control.pause_filter_thresh = 0;
77174fd41edSMaxim Levitsky 		}
772e3cdaab5SPaolo Bonzini 	}
77374fd41edSMaxim Levitsky 
774d2e56019SSean Christopherson 	nested_svm_transition_tlb_flush(vcpu);
775d2e56019SSean Christopherson 
776883b0a91SJoerg Roedel 	/* Enter Guest-Mode */
777d2e56019SSean Christopherson 	enter_guest_mode(vcpu);
778883b0a91SJoerg Roedel 
779883b0a91SJoerg Roedel 	/*
780883b0a91SJoerg Roedel 	 * Merge guest and host intercepts - must be called with vcpu in
7814bb170a5SPaolo Bonzini 	 * guest-mode to take effect.
782883b0a91SJoerg Roedel 	 */
783883b0a91SJoerg Roedel 	recalc_intercepts(svm);
784f241d711SPaolo Bonzini }
785f241d711SPaolo Bonzini 
786d00b99c5SBabu Moger static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
787d00b99c5SBabu Moger {
788d00b99c5SBabu Moger 	/*
789d00b99c5SBabu Moger 	 * Some VMCB state is shared between L1 and L2 and thus has to be
790d00b99c5SBabu Moger 	 * moved at the time of nested vmrun and vmexit.
791d00b99c5SBabu Moger 	 *
792d00b99c5SBabu Moger 	 * VMLOAD/VMSAVE state would also belong in this category, but KVM
793d00b99c5SBabu Moger 	 * always performs VMLOAD and VMSAVE from the VMCB01.
794d00b99c5SBabu Moger 	 */
795d00b99c5SBabu Moger 	to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
796d00b99c5SBabu Moger }
797d00b99c5SBabu Moger 
79863129754SPaolo Bonzini int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
799e85d3e7bSMaxim Levitsky 			 struct vmcb *vmcb12, bool from_vmrun)
800f241d711SPaolo Bonzini {
80163129754SPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
802a506fdd2SVitaly Kuznetsov 	int ret;
803a506fdd2SVitaly Kuznetsov 
80489e54ec5SMingwei Zhang 	trace_kvm_nested_vmenter(svm->vmcb->save.rip,
80589e54ec5SMingwei Zhang 				 vmcb12_gpa,
806954f419bSMaxim Levitsky 				 vmcb12->save.rip,
807954f419bSMaxim Levitsky 				 vmcb12->control.int_ctl,
808954f419bSMaxim Levitsky 				 vmcb12->control.event_inj,
80989e54ec5SMingwei Zhang 				 vmcb12->control.nested_ctl,
81002dfc44fSMingwei Zhang 				 vmcb12->control.nested_cr3,
81102dfc44fSMingwei Zhang 				 vmcb12->save.cr3,
81289e54ec5SMingwei Zhang 				 KVM_ISA_SVM);
813954f419bSMaxim Levitsky 
814954f419bSMaxim Levitsky 	trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
815954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
816954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
817954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD3],
818954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD4],
819954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD5]);
820954f419bSMaxim Levitsky 
821954f419bSMaxim Levitsky 
8220dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = vmcb12_gpa;
8234995a368SCathy Avery 
8244995a368SCathy Avery 	WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
8254995a368SCathy Avery 
826d00b99c5SBabu Moger 	nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
8274995a368SCathy Avery 
8284995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->nested.vmcb02);
829da0b93d6SMaciej S. Szmigiero 	nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base);
8309e8f0fbfSPaolo Bonzini 	nested_vmcb02_prepare_save(svm, vmcb12);
831f241d711SPaolo Bonzini 
832355d0473SEmanuele Giuseppe Esposito 	ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
833e85d3e7bSMaxim Levitsky 				  nested_npt_enabled(svm), from_vmrun);
834a506fdd2SVitaly Kuznetsov 	if (ret)
835a506fdd2SVitaly Kuznetsov 		return ret;
836a506fdd2SVitaly Kuznetsov 
837e85d3e7bSMaxim Levitsky 	if (!from_vmrun)
838e85d3e7bSMaxim Levitsky 		kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
839e85d3e7bSMaxim Levitsky 
840ffdf7f9eSPaolo Bonzini 	svm_set_gif(svm, true);
84159cd9bc5SVitaly Kuznetsov 
842f44509f8SMaxim Levitsky 	if (kvm_vcpu_apicv_active(vcpu))
843f44509f8SMaxim Levitsky 		kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
844f44509f8SMaxim Levitsky 
845e45aa244SVitaly Kuznetsov 	nested_svm_hv_update_vm_vp_ids(vcpu);
846e45aa244SVitaly Kuznetsov 
84759cd9bc5SVitaly Kuznetsov 	return 0;
848883b0a91SJoerg Roedel }
849883b0a91SJoerg Roedel 
85063129754SPaolo Bonzini int nested_svm_vmrun(struct kvm_vcpu *vcpu)
851883b0a91SJoerg Roedel {
85263129754SPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
853883b0a91SJoerg Roedel 	int ret;
8540dd16b5bSMaxim Levitsky 	struct vmcb *vmcb12;
855883b0a91SJoerg Roedel 	struct kvm_host_map map;
8560dd16b5bSMaxim Levitsky 	u64 vmcb12_gpa;
857db663af4SMaxim Levitsky 	struct vmcb *vmcb01 = svm->vmcb01.ptr;
858883b0a91SJoerg Roedel 
859fb79f566SVitaly Kuznetsov 	if (!svm->nested.hsave_msr) {
860fb79f566SVitaly Kuznetsov 		kvm_inject_gp(vcpu, 0);
861fb79f566SVitaly Kuznetsov 		return 1;
862fb79f566SVitaly Kuznetsov 	}
863fb79f566SVitaly Kuznetsov 
86463129754SPaolo Bonzini 	if (is_smm(vcpu)) {
86563129754SPaolo Bonzini 		kvm_queue_exception(vcpu, UD_VECTOR);
8667c67f546SPaolo Bonzini 		return 1;
8677c67f546SPaolo Bonzini 	}
868883b0a91SJoerg Roedel 
8693f4a812eSVitaly Kuznetsov 	/* This fails when VP assist page is enabled but the supplied GPA is bogus */
8703f4a812eSVitaly Kuznetsov 	ret = kvm_hv_verify_vp_assist(vcpu);
8713f4a812eSVitaly Kuznetsov 	if (ret) {
8723f4a812eSVitaly Kuznetsov 		kvm_inject_gp(vcpu, 0);
8733f4a812eSVitaly Kuznetsov 		return ret;
8743f4a812eSVitaly Kuznetsov 	}
8753f4a812eSVitaly Kuznetsov 
8760dd16b5bSMaxim Levitsky 	vmcb12_gpa = svm->vmcb->save.rax;
87763129754SPaolo Bonzini 	ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
878883b0a91SJoerg Roedel 	if (ret == -EINVAL) {
87963129754SPaolo Bonzini 		kvm_inject_gp(vcpu, 0);
880883b0a91SJoerg Roedel 		return 1;
881883b0a91SJoerg Roedel 	} else if (ret) {
88263129754SPaolo Bonzini 		return kvm_skip_emulated_instruction(vcpu);
883883b0a91SJoerg Roedel 	}
884883b0a91SJoerg Roedel 
88563129754SPaolo Bonzini 	ret = kvm_skip_emulated_instruction(vcpu);
886883b0a91SJoerg Roedel 
8870dd16b5bSMaxim Levitsky 	vmcb12 = map.hva;
888883b0a91SJoerg Roedel 
8892fcf4876SMaxim Levitsky 	if (WARN_ON_ONCE(!svm->nested.initialized))
8902fcf4876SMaxim Levitsky 		return -EINVAL;
8912fcf4876SMaxim Levitsky 
8927907160dSEmanuele Giuseppe Esposito 	nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
893f2740a8dSEmanuele Giuseppe Esposito 	nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
894cb9b6a1bSPaolo Bonzini 
895b7a3d8b6SEmanuele Giuseppe Esposito 	if (!nested_vmcb_check_save(vcpu) ||
896bd95926cSPaolo Bonzini 	    !nested_vmcb_check_controls(vcpu)) {
8970dd16b5bSMaxim Levitsky 		vmcb12->control.exit_code    = SVM_EXIT_ERR;
8980dd16b5bSMaxim Levitsky 		vmcb12->control.exit_code_hi = 0;
8990dd16b5bSMaxim Levitsky 		vmcb12->control.exit_info_1  = 0;
9000dd16b5bSMaxim Levitsky 		vmcb12->control.exit_info_2  = 0;
90169c9dfa2SPaolo Bonzini 		goto out;
902883b0a91SJoerg Roedel 	}
903883b0a91SJoerg Roedel 
904883b0a91SJoerg Roedel 	/*
9054995a368SCathy Avery 	 * Since vmcb01 is not in use, we can use it to store some of the L1
9064995a368SCathy Avery 	 * state.
907883b0a91SJoerg Roedel 	 */
908db663af4SMaxim Levitsky 	vmcb01->save.efer   = vcpu->arch.efer;
909db663af4SMaxim Levitsky 	vmcb01->save.cr0    = kvm_read_cr0(vcpu);
910db663af4SMaxim Levitsky 	vmcb01->save.cr4    = vcpu->arch.cr4;
911db663af4SMaxim Levitsky 	vmcb01->save.rflags = kvm_get_rflags(vcpu);
912db663af4SMaxim Levitsky 	vmcb01->save.rip    = kvm_rip_read(vcpu);
913883b0a91SJoerg Roedel 
9144995a368SCathy Avery 	if (!npt_enabled)
915db663af4SMaxim Levitsky 		vmcb01->save.cr3 = kvm_read_cr3(vcpu);
916883b0a91SJoerg Roedel 
917f74f9414SPaolo Bonzini 	svm->nested.nested_run_pending = 1;
918883b0a91SJoerg Roedel 
919e85d3e7bSMaxim Levitsky 	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
92059cd9bc5SVitaly Kuznetsov 		goto out_exit_err;
92159cd9bc5SVitaly Kuznetsov 
92259cd9bc5SVitaly Kuznetsov 	if (nested_svm_vmrun_msrpm(svm))
92359cd9bc5SVitaly Kuznetsov 		goto out;
92459cd9bc5SVitaly Kuznetsov 
92559cd9bc5SVitaly Kuznetsov out_exit_err:
926ebdb3dbaSVitaly Kuznetsov 	svm->nested.nested_run_pending = 0;
927159fc6faSMaciej S. Szmigiero 	svm->nmi_l1_to_l2 = false;
9286ef88d6eSSean Christopherson 	svm->soft_int_injected = false;
929ebdb3dbaSVitaly Kuznetsov 
930883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
931883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code_hi = 0;
932883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_1  = 0;
933883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_2  = 0;
934883b0a91SJoerg Roedel 
935883b0a91SJoerg Roedel 	nested_svm_vmexit(svm);
936883b0a91SJoerg Roedel 
93769c9dfa2SPaolo Bonzini out:
93863129754SPaolo Bonzini 	kvm_vcpu_unmap(vcpu, &map, true);
93969c9dfa2SPaolo Bonzini 
940883b0a91SJoerg Roedel 	return ret;
941883b0a91SJoerg Roedel }
942883b0a91SJoerg Roedel 
9430a758290SVitaly Kuznetsov /* Copy state save area fields which are handled by VMRUN */
9442bb16beaSVitaly Kuznetsov void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
9452bb16beaSVitaly Kuznetsov 			  struct vmcb_save_area *from_save)
9460a758290SVitaly Kuznetsov {
9470a758290SVitaly Kuznetsov 	to_save->es = from_save->es;
9480a758290SVitaly Kuznetsov 	to_save->cs = from_save->cs;
9490a758290SVitaly Kuznetsov 	to_save->ss = from_save->ss;
9500a758290SVitaly Kuznetsov 	to_save->ds = from_save->ds;
9510a758290SVitaly Kuznetsov 	to_save->gdtr = from_save->gdtr;
9520a758290SVitaly Kuznetsov 	to_save->idtr = from_save->idtr;
9530a758290SVitaly Kuznetsov 	to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
9540a758290SVitaly Kuznetsov 	to_save->efer = from_save->efer;
9550a758290SVitaly Kuznetsov 	to_save->cr0 = from_save->cr0;
9560a758290SVitaly Kuznetsov 	to_save->cr3 = from_save->cr3;
9570a758290SVitaly Kuznetsov 	to_save->cr4 = from_save->cr4;
9580a758290SVitaly Kuznetsov 	to_save->rax = from_save->rax;
9590a758290SVitaly Kuznetsov 	to_save->rsp = from_save->rsp;
9600a758290SVitaly Kuznetsov 	to_save->rip = from_save->rip;
9610a758290SVitaly Kuznetsov 	to_save->cpl = 0;
9620a758290SVitaly Kuznetsov }
9630a758290SVitaly Kuznetsov 
9642bb16beaSVitaly Kuznetsov void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
965883b0a91SJoerg Roedel {
966883b0a91SJoerg Roedel 	to_vmcb->save.fs = from_vmcb->save.fs;
967883b0a91SJoerg Roedel 	to_vmcb->save.gs = from_vmcb->save.gs;
968883b0a91SJoerg Roedel 	to_vmcb->save.tr = from_vmcb->save.tr;
969883b0a91SJoerg Roedel 	to_vmcb->save.ldtr = from_vmcb->save.ldtr;
970883b0a91SJoerg Roedel 	to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
971883b0a91SJoerg Roedel 	to_vmcb->save.star = from_vmcb->save.star;
972883b0a91SJoerg Roedel 	to_vmcb->save.lstar = from_vmcb->save.lstar;
973883b0a91SJoerg Roedel 	to_vmcb->save.cstar = from_vmcb->save.cstar;
974883b0a91SJoerg Roedel 	to_vmcb->save.sfmask = from_vmcb->save.sfmask;
975883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
976883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
977883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
978883b0a91SJoerg Roedel }
979883b0a91SJoerg Roedel 
980883b0a91SJoerg Roedel int nested_svm_vmexit(struct vcpu_svm *svm)
981883b0a91SJoerg Roedel {
98263129754SPaolo Bonzini 	struct kvm_vcpu *vcpu = &svm->vcpu;
983db663af4SMaxim Levitsky 	struct vmcb *vmcb01 = svm->vmcb01.ptr;
984db663af4SMaxim Levitsky 	struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
9850dd16b5bSMaxim Levitsky 	struct vmcb *vmcb12;
986883b0a91SJoerg Roedel 	struct kvm_host_map map;
98763129754SPaolo Bonzini 	int rc;
988883b0a91SJoerg Roedel 
98963129754SPaolo Bonzini 	rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
990883b0a91SJoerg Roedel 	if (rc) {
991883b0a91SJoerg Roedel 		if (rc == -EINVAL)
99263129754SPaolo Bonzini 			kvm_inject_gp(vcpu, 0);
993883b0a91SJoerg Roedel 		return 1;
994883b0a91SJoerg Roedel 	}
995883b0a91SJoerg Roedel 
9960dd16b5bSMaxim Levitsky 	vmcb12 = map.hva;
997883b0a91SJoerg Roedel 
998883b0a91SJoerg Roedel 	/* Exit Guest-Mode */
99963129754SPaolo Bonzini 	leave_guest_mode(vcpu);
10000dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = 0;
10012d8a42beSPaolo Bonzini 	WARN_ON_ONCE(svm->nested.nested_run_pending);
1002883b0a91SJoerg Roedel 
100363129754SPaolo Bonzini 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1004f2c7ef3bSMaxim Levitsky 
100538c0b192SPaolo Bonzini 	/* in case we halted in L2 */
100638c0b192SPaolo Bonzini 	svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
100738c0b192SPaolo Bonzini 
1008883b0a91SJoerg Roedel 	/* Give the current vmcb to the guest */
1009883b0a91SJoerg Roedel 
1010db663af4SMaxim Levitsky 	vmcb12->save.es     = vmcb02->save.es;
1011db663af4SMaxim Levitsky 	vmcb12->save.cs     = vmcb02->save.cs;
1012db663af4SMaxim Levitsky 	vmcb12->save.ss     = vmcb02->save.ss;
1013db663af4SMaxim Levitsky 	vmcb12->save.ds     = vmcb02->save.ds;
1014db663af4SMaxim Levitsky 	vmcb12->save.gdtr   = vmcb02->save.gdtr;
1015db663af4SMaxim Levitsky 	vmcb12->save.idtr   = vmcb02->save.idtr;
10160dd16b5bSMaxim Levitsky 	vmcb12->save.efer   = svm->vcpu.arch.efer;
101763129754SPaolo Bonzini 	vmcb12->save.cr0    = kvm_read_cr0(vcpu);
101863129754SPaolo Bonzini 	vmcb12->save.cr3    = kvm_read_cr3(vcpu);
1019db663af4SMaxim Levitsky 	vmcb12->save.cr2    = vmcb02->save.cr2;
10200dd16b5bSMaxim Levitsky 	vmcb12->save.cr4    = svm->vcpu.arch.cr4;
102163129754SPaolo Bonzini 	vmcb12->save.rflags = kvm_get_rflags(vcpu);
102263129754SPaolo Bonzini 	vmcb12->save.rip    = kvm_rip_read(vcpu);
102363129754SPaolo Bonzini 	vmcb12->save.rsp    = kvm_rsp_read(vcpu);
102463129754SPaolo Bonzini 	vmcb12->save.rax    = kvm_rax_read(vcpu);
1025db663af4SMaxim Levitsky 	vmcb12->save.dr7    = vmcb02->save.dr7;
10260dd16b5bSMaxim Levitsky 	vmcb12->save.dr6    = svm->vcpu.arch.dr6;
1027db663af4SMaxim Levitsky 	vmcb12->save.cpl    = vmcb02->save.cpl;
1028883b0a91SJoerg Roedel 
1029db663af4SMaxim Levitsky 	vmcb12->control.int_state         = vmcb02->control.int_state;
1030db663af4SMaxim Levitsky 	vmcb12->control.exit_code         = vmcb02->control.exit_code;
1031db663af4SMaxim Levitsky 	vmcb12->control.exit_code_hi      = vmcb02->control.exit_code_hi;
1032db663af4SMaxim Levitsky 	vmcb12->control.exit_info_1       = vmcb02->control.exit_info_1;
1033db663af4SMaxim Levitsky 	vmcb12->control.exit_info_2       = vmcb02->control.exit_info_2;
103436e2e983SPaolo Bonzini 
10350dd16b5bSMaxim Levitsky 	if (vmcb12->control.exit_code != SVM_EXIT_ERR)
10369e8f0fbfSPaolo Bonzini 		nested_save_pending_event_to_vmcb12(svm, vmcb12);
1037883b0a91SJoerg Roedel 
10387a6a6a3bSSean Christopherson 	if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
1039db663af4SMaxim Levitsky 		vmcb12->control.next_rip  = vmcb02->control.next_rip;
1040883b0a91SJoerg Roedel 
10410dd16b5bSMaxim Levitsky 	vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
10420dd16b5bSMaxim Levitsky 	vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
10430dd16b5bSMaxim Levitsky 	vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
1044883b0a91SJoerg Roedel 
1045e3cdaab5SPaolo Bonzini 	if (!kvm_pause_in_guest(vcpu->kvm)) {
104674fd41edSMaxim Levitsky 		vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
1047e3cdaab5SPaolo Bonzini 		vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1048e3cdaab5SPaolo Bonzini 
1049e3cdaab5SPaolo Bonzini 	}
105074fd41edSMaxim Levitsky 
1051d00b99c5SBabu Moger 	nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
1052d00b99c5SBabu Moger 
10534995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->vmcb01);
10544995a368SCathy Avery 
10555d1ec456SMaxim Levitsky 	/*
10565d1ec456SMaxim Levitsky 	 * Rules for synchronizing int_ctl bits from vmcb02 to vmcb01:
10575d1ec456SMaxim Levitsky 	 *
10585d1ec456SMaxim Levitsky 	 * V_IRQ, V_IRQ_VECTOR, V_INTR_PRIO_MASK, V_IGN_TPR:  If L1 doesn't
10595d1ec456SMaxim Levitsky 	 * intercept interrupts, then KVM will use vmcb02's V_IRQ (and related
10605d1ec456SMaxim Levitsky 	 * flags) to detect interrupt windows for L1 IRQs (even if L1 uses
10615d1ec456SMaxim Levitsky 	 * virtual interrupt masking).  Raise KVM_REQ_EVENT to ensure that
10625d1ec456SMaxim Levitsky 	 * KVM re-requests an interrupt window if necessary, which implicitly
10635d1ec456SMaxim Levitsky 	 * copies this bits from vmcb02 to vmcb01.
10645d1ec456SMaxim Levitsky 	 *
10655d1ec456SMaxim Levitsky 	 * V_TPR: If L1 doesn't use virtual interrupt masking, then L1's vTPR
10665d1ec456SMaxim Levitsky 	 * is stored in vmcb02, but its value doesn't need to be copied from/to
10675d1ec456SMaxim Levitsky 	 * vmcb01 because it is copied from/to the virtual APIC's TPR register
10685d1ec456SMaxim Levitsky 	 * on each VM entry/exit.
10695d1ec456SMaxim Levitsky 	 *
10705d1ec456SMaxim Levitsky 	 * V_GIF: If nested vGIF is not used, KVM uses vmcb02's V_GIF for L1's
10715d1ec456SMaxim Levitsky 	 * V_GIF.  However, GIF is architecturally clear on each VM exit, thus
10725d1ec456SMaxim Levitsky 	 * there is no need to copy V_GIF from vmcb02 to vmcb01.
10735d1ec456SMaxim Levitsky 	 */
10745d1ec456SMaxim Levitsky 	if (!nested_exit_on_intr(svm))
10755d1ec456SMaxim Levitsky 		kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
10765d1ec456SMaxim Levitsky 
1077e183d17aSSean Christopherson 	if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
1078e183d17aSSean Christopherson 		     (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1079d20c796cSMaxim Levitsky 		svm_copy_lbrs(vmcb12, vmcb02);
1080d20c796cSMaxim Levitsky 		svm_update_lbrv(vcpu);
1081d20c796cSMaxim Levitsky 	} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
10821d5a1b58SMaxim Levitsky 		svm_copy_lbrs(vmcb01, vmcb02);
10831d5a1b58SMaxim Levitsky 		svm_update_lbrv(vcpu);
10841d5a1b58SMaxim Levitsky 	}
10851d5a1b58SMaxim Levitsky 
10860977cfacSSantosh Shukla 	if (vnmi) {
10870977cfacSSantosh Shukla 		if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)
10880977cfacSSantosh Shukla 			vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK;
10890977cfacSSantosh Shukla 		else
10900977cfacSSantosh Shukla 			vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
10910977cfacSSantosh Shukla 
10920977cfacSSantosh Shukla 		if (vcpu->arch.nmi_pending) {
10930977cfacSSantosh Shukla 			vcpu->arch.nmi_pending--;
10940977cfacSSantosh Shukla 			vmcb01->control.int_ctl |= V_NMI_PENDING_MASK;
10950977cfacSSantosh Shukla 		} else {
10960977cfacSSantosh Shukla 			vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK;
10970977cfacSSantosh Shukla 		}
10980977cfacSSantosh Shukla 	}
10990977cfacSSantosh Shukla 
11004995a368SCathy Avery 	/*
11014995a368SCathy Avery 	 * On vmexit the  GIF is set to false and
11024995a368SCathy Avery 	 * no event can be injected in L1.
11034995a368SCathy Avery 	 */
11049883764aSMaxim Levitsky 	svm_set_gif(svm, false);
1105db663af4SMaxim Levitsky 	vmcb01->control.exit_int_info = 0;
11069883764aSMaxim Levitsky 
11077ca62d13SPaolo Bonzini 	svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
1108db663af4SMaxim Levitsky 	if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
1109db663af4SMaxim Levitsky 		vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
1110db663af4SMaxim Levitsky 		vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
11117ca62d13SPaolo Bonzini 	}
111218fc6c55SPaolo Bonzini 
11130c94e246SSean Christopherson 	if (kvm_caps.has_tsc_control &&
11140c94e246SSean Christopherson 	    vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
11155228eb96SMaxim Levitsky 		vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
11162d636990SSean Christopherson 		svm_write_tsc_multiplier(vcpu);
11175228eb96SMaxim Levitsky 	}
11185228eb96SMaxim Levitsky 
1119e670bf68SPaolo Bonzini 	svm->nested.ctl.nested_cr3 = 0;
1120883b0a91SJoerg Roedel 
11214995a368SCathy Avery 	/*
11224995a368SCathy Avery 	 * Restore processor state that had been saved in vmcb01
11234995a368SCathy Avery 	 */
1124db663af4SMaxim Levitsky 	kvm_set_rflags(vcpu, vmcb01->save.rflags);
1125db663af4SMaxim Levitsky 	svm_set_efer(vcpu, vmcb01->save.efer);
1126db663af4SMaxim Levitsky 	svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1127db663af4SMaxim Levitsky 	svm_set_cr4(vcpu, vmcb01->save.cr4);
1128db663af4SMaxim Levitsky 	kvm_rax_write(vcpu, vmcb01->save.rax);
1129db663af4SMaxim Levitsky 	kvm_rsp_write(vcpu, vmcb01->save.rsp);
1130db663af4SMaxim Levitsky 	kvm_rip_write(vcpu, vmcb01->save.rip);
11314995a368SCathy Avery 
11324995a368SCathy Avery 	svm->vcpu.arch.dr7 = DR7_FIXED_1;
11334995a368SCathy Avery 	kvm_update_dr7(&svm->vcpu);
1134883b0a91SJoerg Roedel 
11350dd16b5bSMaxim Levitsky 	trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
11360dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_info_1,
11370dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_info_2,
11380dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_int_info,
11390dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_int_info_err,
114036e2e983SPaolo Bonzini 				       KVM_ISA_SVM);
114136e2e983SPaolo Bonzini 
114263129754SPaolo Bonzini 	kvm_vcpu_unmap(vcpu, &map, true);
1143883b0a91SJoerg Roedel 
1144d2e56019SSean Christopherson 	nested_svm_transition_tlb_flush(vcpu);
1145d2e56019SSean Christopherson 
114663129754SPaolo Bonzini 	nested_svm_uninit_mmu_context(vcpu);
1147bf7dea42SVitaly Kuznetsov 
1148db663af4SMaxim Levitsky 	rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
1149d82aaef9SVitaly Kuznetsov 	if (rc)
1150d82aaef9SVitaly Kuznetsov 		return 1;
1151bf7dea42SVitaly Kuznetsov 
1152883b0a91SJoerg Roedel 	/*
1153883b0a91SJoerg Roedel 	 * Drop what we picked up for L2 via svm_complete_interrupts() so it
1154883b0a91SJoerg Roedel 	 * doesn't end up in L1.
1155883b0a91SJoerg Roedel 	 */
1156883b0a91SJoerg Roedel 	svm->vcpu.arch.nmi_injected = false;
115763129754SPaolo Bonzini 	kvm_clear_exception_queue(vcpu);
115863129754SPaolo Bonzini 	kvm_clear_interrupt_queue(vcpu);
1159883b0a91SJoerg Roedel 
11609a7de6ecSKrish Sadhukhan 	/*
11619a7de6ecSKrish Sadhukhan 	 * If we are here following the completion of a VMRUN that
11629a7de6ecSKrish Sadhukhan 	 * is being single-stepped, queue the pending #DB intercept
11639a7de6ecSKrish Sadhukhan 	 * right now so that it an be accounted for before we execute
11649a7de6ecSKrish Sadhukhan 	 * L1's next instruction.
11659a7de6ecSKrish Sadhukhan 	 */
1166db663af4SMaxim Levitsky 	if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
11679a7de6ecSKrish Sadhukhan 		kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
11689a7de6ecSKrish Sadhukhan 
1169f44509f8SMaxim Levitsky 	/*
1170f44509f8SMaxim Levitsky 	 * Un-inhibit the AVIC right away, so that other vCPUs can start
1171f44509f8SMaxim Levitsky 	 * to benefit from it right away.
1172f44509f8SMaxim Levitsky 	 */
1173f44509f8SMaxim Levitsky 	if (kvm_apicv_activated(vcpu->kvm))
11742008fab3SSean Christopherson 		__kvm_vcpu_update_apicv(vcpu);
1175f44509f8SMaxim Levitsky 
1176883b0a91SJoerg Roedel 	return 0;
1177883b0a91SJoerg Roedel }
1178883b0a91SJoerg Roedel 
1179cb6a32c2SSean Christopherson static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
1180cb6a32c2SSean Christopherson {
118192e7d5c8SMaxim Levitsky 	struct vcpu_svm *svm = to_svm(vcpu);
118292e7d5c8SMaxim Levitsky 
118392e7d5c8SMaxim Levitsky 	if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
118492e7d5c8SMaxim Levitsky 		return;
118592e7d5c8SMaxim Levitsky 
118692e7d5c8SMaxim Levitsky 	kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
11873a87c7e0SSean Christopherson 	nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
1188cb6a32c2SSean Christopherson }
1189cb6a32c2SSean Christopherson 
11902fcf4876SMaxim Levitsky int svm_allocate_nested(struct vcpu_svm *svm)
11912fcf4876SMaxim Levitsky {
11924995a368SCathy Avery 	struct page *vmcb02_page;
11932fcf4876SMaxim Levitsky 
11942fcf4876SMaxim Levitsky 	if (svm->nested.initialized)
11952fcf4876SMaxim Levitsky 		return 0;
11962fcf4876SMaxim Levitsky 
11974995a368SCathy Avery 	vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
11984995a368SCathy Avery 	if (!vmcb02_page)
11992fcf4876SMaxim Levitsky 		return -ENOMEM;
12004995a368SCathy Avery 	svm->nested.vmcb02.ptr = page_address(vmcb02_page);
12014995a368SCathy Avery 	svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
12022fcf4876SMaxim Levitsky 
12032fcf4876SMaxim Levitsky 	svm->nested.msrpm = svm_vcpu_alloc_msrpm();
12042fcf4876SMaxim Levitsky 	if (!svm->nested.msrpm)
12054995a368SCathy Avery 		goto err_free_vmcb02;
12062fcf4876SMaxim Levitsky 	svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
12072fcf4876SMaxim Levitsky 
12082fcf4876SMaxim Levitsky 	svm->nested.initialized = true;
12092fcf4876SMaxim Levitsky 	return 0;
12102fcf4876SMaxim Levitsky 
12114995a368SCathy Avery err_free_vmcb02:
12124995a368SCathy Avery 	__free_page(vmcb02_page);
12132fcf4876SMaxim Levitsky 	return -ENOMEM;
12142fcf4876SMaxim Levitsky }
12152fcf4876SMaxim Levitsky 
12162fcf4876SMaxim Levitsky void svm_free_nested(struct vcpu_svm *svm)
12172fcf4876SMaxim Levitsky {
12182fcf4876SMaxim Levitsky 	if (!svm->nested.initialized)
12192fcf4876SMaxim Levitsky 		return;
12202fcf4876SMaxim Levitsky 
122116ae56d7SMaxim Levitsky 	if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
122216ae56d7SMaxim Levitsky 		svm_switch_vmcb(svm, &svm->vmcb01);
122316ae56d7SMaxim Levitsky 
12242fcf4876SMaxim Levitsky 	svm_vcpu_free_msrpm(svm->nested.msrpm);
12252fcf4876SMaxim Levitsky 	svm->nested.msrpm = NULL;
12262fcf4876SMaxim Levitsky 
12274995a368SCathy Avery 	__free_page(virt_to_page(svm->nested.vmcb02.ptr));
12284995a368SCathy Avery 	svm->nested.vmcb02.ptr = NULL;
12292fcf4876SMaxim Levitsky 
1230c74ad08fSMaxim Levitsky 	/*
1231c74ad08fSMaxim Levitsky 	 * When last_vmcb12_gpa matches the current vmcb12 gpa,
1232c74ad08fSMaxim Levitsky 	 * some vmcb12 fields are not loaded if they are marked clean
1233c74ad08fSMaxim Levitsky 	 * in the vmcb12, since in this case they are up to date already.
1234c74ad08fSMaxim Levitsky 	 *
1235c74ad08fSMaxim Levitsky 	 * When the vmcb02 is freed, this optimization becomes invalid.
1236c74ad08fSMaxim Levitsky 	 */
1237c74ad08fSMaxim Levitsky 	svm->nested.last_vmcb12_gpa = INVALID_GPA;
1238c74ad08fSMaxim Levitsky 
12392fcf4876SMaxim Levitsky 	svm->nested.initialized = false;
12402fcf4876SMaxim Levitsky }
12412fcf4876SMaxim Levitsky 
1242f7e57078SSean Christopherson void svm_leave_nested(struct kvm_vcpu *vcpu)
1243c513f484SPaolo Bonzini {
1244f7e57078SSean Christopherson 	struct vcpu_svm *svm = to_svm(vcpu);
124563129754SPaolo Bonzini 
124663129754SPaolo Bonzini 	if (is_guest_mode(vcpu)) {
1247c513f484SPaolo Bonzini 		svm->nested.nested_run_pending = 0;
1248c74ad08fSMaxim Levitsky 		svm->nested.vmcb12_gpa = INVALID_GPA;
1249c74ad08fSMaxim Levitsky 
125063129754SPaolo Bonzini 		leave_guest_mode(vcpu);
12514995a368SCathy Avery 
1252deee59baSMaxim Levitsky 		svm_switch_vmcb(svm, &svm->vmcb01);
12534995a368SCathy Avery 
125463129754SPaolo Bonzini 		nested_svm_uninit_mmu_context(vcpu);
125556fe28deSMaxim Levitsky 		vmcb_mark_all_dirty(svm->vmcb);
1256c513f484SPaolo Bonzini 	}
1257a7d5c7ceSPaolo Bonzini 
125863129754SPaolo Bonzini 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1259c513f484SPaolo Bonzini }
1260c513f484SPaolo Bonzini 
1261883b0a91SJoerg Roedel static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1262883b0a91SJoerg Roedel {
1263883b0a91SJoerg Roedel 	u32 offset, msr, value;
1264883b0a91SJoerg Roedel 	int write, mask;
1265883b0a91SJoerg Roedel 
12668fc78909SEmanuele Giuseppe Esposito 	if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1267883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
1268883b0a91SJoerg Roedel 
1269883b0a91SJoerg Roedel 	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1270883b0a91SJoerg Roedel 	offset = svm_msrpm_offset(msr);
1271883b0a91SJoerg Roedel 	write  = svm->vmcb->control.exit_info_1 & 1;
1272883b0a91SJoerg Roedel 	mask   = 1 << ((2 * (msr & 0xf)) + write);
1273883b0a91SJoerg Roedel 
1274883b0a91SJoerg Roedel 	if (offset == MSR_INVALID)
1275883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
1276883b0a91SJoerg Roedel 
1277883b0a91SJoerg Roedel 	/* Offset is in 32 bit units but need in 8 bit units */
1278883b0a91SJoerg Roedel 	offset *= 4;
1279883b0a91SJoerg Roedel 
1280e670bf68SPaolo Bonzini 	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1281883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
1282883b0a91SJoerg Roedel 
1283883b0a91SJoerg Roedel 	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1284883b0a91SJoerg Roedel }
1285883b0a91SJoerg Roedel 
1286883b0a91SJoerg Roedel static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1287883b0a91SJoerg Roedel {
1288883b0a91SJoerg Roedel 	unsigned port, size, iopm_len;
1289883b0a91SJoerg Roedel 	u16 val, mask;
1290883b0a91SJoerg Roedel 	u8 start_bit;
1291883b0a91SJoerg Roedel 	u64 gpa;
1292883b0a91SJoerg Roedel 
12938fc78909SEmanuele Giuseppe Esposito 	if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1294883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
1295883b0a91SJoerg Roedel 
1296883b0a91SJoerg Roedel 	port = svm->vmcb->control.exit_info_1 >> 16;
1297883b0a91SJoerg Roedel 	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1298883b0a91SJoerg Roedel 		SVM_IOIO_SIZE_SHIFT;
1299e670bf68SPaolo Bonzini 	gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
1300883b0a91SJoerg Roedel 	start_bit = port % 8;
1301883b0a91SJoerg Roedel 	iopm_len = (start_bit + size > 8) ? 2 : 1;
1302883b0a91SJoerg Roedel 	mask = (0xf >> (4 - size)) << start_bit;
1303883b0a91SJoerg Roedel 	val = 0;
1304883b0a91SJoerg Roedel 
1305883b0a91SJoerg Roedel 	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1306883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
1307883b0a91SJoerg Roedel 
1308883b0a91SJoerg Roedel 	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1309883b0a91SJoerg Roedel }
1310883b0a91SJoerg Roedel 
1311883b0a91SJoerg Roedel static int nested_svm_intercept(struct vcpu_svm *svm)
1312883b0a91SJoerg Roedel {
1313883b0a91SJoerg Roedel 	u32 exit_code = svm->vmcb->control.exit_code;
1314883b0a91SJoerg Roedel 	int vmexit = NESTED_EXIT_HOST;
1315883b0a91SJoerg Roedel 
1316883b0a91SJoerg Roedel 	switch (exit_code) {
1317883b0a91SJoerg Roedel 	case SVM_EXIT_MSR:
1318883b0a91SJoerg Roedel 		vmexit = nested_svm_exit_handled_msr(svm);
1319883b0a91SJoerg Roedel 		break;
1320883b0a91SJoerg Roedel 	case SVM_EXIT_IOIO:
1321883b0a91SJoerg Roedel 		vmexit = nested_svm_intercept_ioio(svm);
1322883b0a91SJoerg Roedel 		break;
1323883b0a91SJoerg Roedel 	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
13248fc78909SEmanuele Giuseppe Esposito 		if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1325883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
1326883b0a91SJoerg Roedel 		break;
1327883b0a91SJoerg Roedel 	}
1328883b0a91SJoerg Roedel 	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
13298fc78909SEmanuele Giuseppe Esposito 		if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1330883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
1331883b0a91SJoerg Roedel 		break;
1332883b0a91SJoerg Roedel 	}
1333883b0a91SJoerg Roedel 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
13347c86663bSPaolo Bonzini 		/*
13357c86663bSPaolo Bonzini 		 * Host-intercepted exceptions have been checked already in
13367c86663bSPaolo Bonzini 		 * nested_svm_exit_special.  There is nothing to do here,
13377c86663bSPaolo Bonzini 		 * the vmexit is injected by svm_check_nested_events.
13387c86663bSPaolo Bonzini 		 */
1339883b0a91SJoerg Roedel 		vmexit = NESTED_EXIT_DONE;
1340883b0a91SJoerg Roedel 		break;
1341883b0a91SJoerg Roedel 	}
1342883b0a91SJoerg Roedel 	case SVM_EXIT_ERR: {
1343883b0a91SJoerg Roedel 		vmexit = NESTED_EXIT_DONE;
1344883b0a91SJoerg Roedel 		break;
1345883b0a91SJoerg Roedel 	}
1346883b0a91SJoerg Roedel 	default: {
13478fc78909SEmanuele Giuseppe Esposito 		if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1348883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
1349883b0a91SJoerg Roedel 	}
1350883b0a91SJoerg Roedel 	}
1351883b0a91SJoerg Roedel 
1352883b0a91SJoerg Roedel 	return vmexit;
1353883b0a91SJoerg Roedel }
1354883b0a91SJoerg Roedel 
1355883b0a91SJoerg Roedel int nested_svm_exit_handled(struct vcpu_svm *svm)
1356883b0a91SJoerg Roedel {
1357883b0a91SJoerg Roedel 	int vmexit;
1358883b0a91SJoerg Roedel 
1359883b0a91SJoerg Roedel 	vmexit = nested_svm_intercept(svm);
1360883b0a91SJoerg Roedel 
1361883b0a91SJoerg Roedel 	if (vmexit == NESTED_EXIT_DONE)
1362883b0a91SJoerg Roedel 		nested_svm_vmexit(svm);
1363883b0a91SJoerg Roedel 
1364883b0a91SJoerg Roedel 	return vmexit;
1365883b0a91SJoerg Roedel }
1366883b0a91SJoerg Roedel 
136763129754SPaolo Bonzini int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1368883b0a91SJoerg Roedel {
136963129754SPaolo Bonzini 	if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
137063129754SPaolo Bonzini 		kvm_queue_exception(vcpu, UD_VECTOR);
1371883b0a91SJoerg Roedel 		return 1;
1372883b0a91SJoerg Roedel 	}
1373883b0a91SJoerg Roedel 
137463129754SPaolo Bonzini 	if (to_svm(vcpu)->vmcb->save.cpl) {
137563129754SPaolo Bonzini 		kvm_inject_gp(vcpu, 0);
1376883b0a91SJoerg Roedel 		return 1;
1377883b0a91SJoerg Roedel 	}
1378883b0a91SJoerg Roedel 
1379883b0a91SJoerg Roedel 	return 0;
1380883b0a91SJoerg Roedel }
1381883b0a91SJoerg Roedel 
13827709aba8SSean Christopherson static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
13837709aba8SSean Christopherson 					   u32 error_code)
1384883b0a91SJoerg Roedel {
13857709aba8SSean Christopherson 	struct vcpu_svm *svm = to_svm(vcpu);
1386883b0a91SJoerg Roedel 
1387d4963e31SSean Christopherson 	return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
13887c86663bSPaolo Bonzini }
1389883b0a91SJoerg Roedel 
1390d4963e31SSean Christopherson static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
13917c86663bSPaolo Bonzini {
13927709aba8SSean Christopherson 	struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
1393d4963e31SSean Christopherson 	struct vcpu_svm *svm = to_svm(vcpu);
1394db663af4SMaxim Levitsky 	struct vmcb *vmcb = svm->vmcb;
1395883b0a91SJoerg Roedel 
1396d4963e31SSean Christopherson 	vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + ex->vector;
1397db663af4SMaxim Levitsky 	vmcb->control.exit_code_hi = 0;
13987c86663bSPaolo Bonzini 
1399d4963e31SSean Christopherson 	if (ex->has_error_code)
1400d4963e31SSean Christopherson 		vmcb->control.exit_info_1 = ex->error_code;
1401883b0a91SJoerg Roedel 
1402883b0a91SJoerg Roedel 	/*
1403883b0a91SJoerg Roedel 	 * EXITINFO2 is undefined for all exception intercepts other
1404883b0a91SJoerg Roedel 	 * than #PF.
1405883b0a91SJoerg Roedel 	 */
1406d4963e31SSean Christopherson 	if (ex->vector == PF_VECTOR) {
14077709aba8SSean Christopherson 		if (ex->has_payload)
1408d4963e31SSean Christopherson 			vmcb->control.exit_info_2 = ex->payload;
1409883b0a91SJoerg Roedel 		else
1410d4963e31SSean Christopherson 			vmcb->control.exit_info_2 = vcpu->arch.cr2;
1411d4963e31SSean Christopherson 	} else if (ex->vector == DB_VECTOR) {
1412e746c1f1SSean Christopherson 		/* See kvm_check_and_inject_events().  */
1413d4963e31SSean Christopherson 		kvm_deliver_exception_payload(vcpu, ex);
1414d4963e31SSean Christopherson 
1415d4963e31SSean Christopherson 		if (vcpu->arch.dr7 & DR7_GD) {
1416d4963e31SSean Christopherson 			vcpu->arch.dr7 &= ~DR7_GD;
1417d4963e31SSean Christopherson 			kvm_update_dr7(vcpu);
14187c86663bSPaolo Bonzini 		}
1419d4963e31SSean Christopherson 	} else {
1420d4963e31SSean Christopherson 		WARN_ON(ex->has_payload);
1421d4963e31SSean Christopherson 	}
1422883b0a91SJoerg Roedel 
14237c86663bSPaolo Bonzini 	nested_svm_vmexit(svm);
1424883b0a91SJoerg Roedel }
1425883b0a91SJoerg Roedel 
14265b672408SPaolo Bonzini static inline bool nested_exit_on_init(struct vcpu_svm *svm)
14275b672408SPaolo Bonzini {
14288fc78909SEmanuele Giuseppe Esposito 	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
14295b672408SPaolo Bonzini }
14305b672408SPaolo Bonzini 
143133b22172SPaolo Bonzini static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1432883b0a91SJoerg Roedel {
14335b672408SPaolo Bonzini 	struct kvm_lapic *apic = vcpu->arch.apic;
143472c14e00SSean Christopherson 	struct vcpu_svm *svm = to_svm(vcpu);
143572c14e00SSean Christopherson 	/*
143672c14e00SSean Christopherson 	 * Only a pending nested run blocks a pending exception.  If there is a
143772c14e00SSean Christopherson 	 * previously injected event, the pending exception occurred while said
143872c14e00SSean Christopherson 	 * event was being delivered and thus needs to be handled.
143972c14e00SSean Christopherson 	 */
144072c14e00SSean Christopherson 	bool block_nested_exceptions = svm->nested.nested_run_pending;
144172c14e00SSean Christopherson 	/*
144272c14e00SSean Christopherson 	 * New events (not exceptions) are only recognized at instruction
144372c14e00SSean Christopherson 	 * boundaries.  If an event needs reinjection, then KVM is handling a
144472c14e00SSean Christopherson 	 * VM-Exit that occurred _during_ instruction execution; new events are
144572c14e00SSean Christopherson 	 * blocked until the instruction completes.
144672c14e00SSean Christopherson 	 */
144772c14e00SSean Christopherson 	bool block_nested_events = block_nested_exceptions ||
144872c14e00SSean Christopherson 				   kvm_event_needs_reinjection(vcpu);
14495b672408SPaolo Bonzini 
14505b672408SPaolo Bonzini 	if (lapic_in_kernel(vcpu) &&
14515b672408SPaolo Bonzini 	    test_bit(KVM_APIC_INIT, &apic->pending_events)) {
14525b672408SPaolo Bonzini 		if (block_nested_events)
14535b672408SPaolo Bonzini 			return -EBUSY;
14545b672408SPaolo Bonzini 		if (!nested_exit_on_init(svm))
14555b672408SPaolo Bonzini 			return 0;
14563a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
14575b672408SPaolo Bonzini 		return 0;
14585b672408SPaolo Bonzini 	}
1459883b0a91SJoerg Roedel 
14607709aba8SSean Christopherson 	if (vcpu->arch.exception_vmexit.pending) {
14617709aba8SSean Christopherson 		if (block_nested_exceptions)
14627709aba8SSean Christopherson                         return -EBUSY;
14637709aba8SSean Christopherson 		nested_svm_inject_exception_vmexit(vcpu);
14647709aba8SSean Christopherson 		return 0;
14657709aba8SSean Christopherson 	}
14667709aba8SSean Christopherson 
14677c86663bSPaolo Bonzini 	if (vcpu->arch.exception.pending) {
146872c14e00SSean Christopherson 		if (block_nested_exceptions)
14697c86663bSPaolo Bonzini 			return -EBUSY;
14707c86663bSPaolo Bonzini 		return 0;
14717c86663bSPaolo Bonzini 	}
14727c86663bSPaolo Bonzini 
147331e83e21SPaolo Bonzini #ifdef CONFIG_KVM_SMM
1474221e7610SPaolo Bonzini 	if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
147555714cddSPaolo Bonzini 		if (block_nested_events)
147655714cddSPaolo Bonzini 			return -EBUSY;
1477221e7610SPaolo Bonzini 		if (!nested_exit_on_smi(svm))
1478221e7610SPaolo Bonzini 			return 0;
14793a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
148055714cddSPaolo Bonzini 		return 0;
148155714cddSPaolo Bonzini 	}
148231e83e21SPaolo Bonzini #endif
148355714cddSPaolo Bonzini 
1484221e7610SPaolo Bonzini 	if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
14859c3d370aSCathy Avery 		if (block_nested_events)
14869c3d370aSCathy Avery 			return -EBUSY;
1487221e7610SPaolo Bonzini 		if (!nested_exit_on_nmi(svm))
1488221e7610SPaolo Bonzini 			return 0;
14893a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
14909c3d370aSCathy Avery 		return 0;
14919c3d370aSCathy Avery 	}
14929c3d370aSCathy Avery 
1493221e7610SPaolo Bonzini 	if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1494883b0a91SJoerg Roedel 		if (block_nested_events)
1495883b0a91SJoerg Roedel 			return -EBUSY;
1496221e7610SPaolo Bonzini 		if (!nested_exit_on_intr(svm))
1497221e7610SPaolo Bonzini 			return 0;
14983a87c7e0SSean Christopherson 		trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
14993a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1500883b0a91SJoerg Roedel 		return 0;
1501883b0a91SJoerg Roedel 	}
1502883b0a91SJoerg Roedel 
1503883b0a91SJoerg Roedel 	return 0;
1504883b0a91SJoerg Roedel }
1505883b0a91SJoerg Roedel 
1506883b0a91SJoerg Roedel int nested_svm_exit_special(struct vcpu_svm *svm)
1507883b0a91SJoerg Roedel {
1508883b0a91SJoerg Roedel 	u32 exit_code = svm->vmcb->control.exit_code;
15093f4a812eSVitaly Kuznetsov 	struct kvm_vcpu *vcpu = &svm->vcpu;
1510883b0a91SJoerg Roedel 
1511883b0a91SJoerg Roedel 	switch (exit_code) {
1512883b0a91SJoerg Roedel 	case SVM_EXIT_INTR:
1513883b0a91SJoerg Roedel 	case SVM_EXIT_NMI:
1514883b0a91SJoerg Roedel 	case SVM_EXIT_NPF:
1515883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
15167c86663bSPaolo Bonzini 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
15177c86663bSPaolo Bonzini 		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
15187c86663bSPaolo Bonzini 
15194995a368SCathy Avery 		if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
15209780d51dSBabu Moger 		    excp_bits)
15217c86663bSPaolo Bonzini 			return NESTED_EXIT_HOST;
15227c86663bSPaolo Bonzini 		else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
152368fd66f1SVitaly Kuznetsov 			 svm->vcpu.arch.apf.host_apf_flags)
1524a3535be7SPaolo Bonzini 			/* Trap async PF even if not shadowing */
1525883b0a91SJoerg Roedel 			return NESTED_EXIT_HOST;
1526883b0a91SJoerg Roedel 		break;
15277c86663bSPaolo Bonzini 	}
15283f4a812eSVitaly Kuznetsov 	case SVM_EXIT_VMMCALL:
15293f4a812eSVitaly Kuznetsov 		/* Hyper-V L2 TLB flush hypercall is handled by L0 */
15303f4a812eSVitaly Kuznetsov 		if (guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
15313f4a812eSVitaly Kuznetsov 		    nested_svm_l2_tlb_flush_enabled(vcpu) &&
15323f4a812eSVitaly Kuznetsov 		    kvm_hv_is_tlb_flush_hcall(vcpu))
15333f4a812eSVitaly Kuznetsov 			return NESTED_EXIT_HOST;
15343f4a812eSVitaly Kuznetsov 		break;
1535883b0a91SJoerg Roedel 	default:
1536883b0a91SJoerg Roedel 		break;
1537883b0a91SJoerg Roedel 	}
1538883b0a91SJoerg Roedel 
1539883b0a91SJoerg Roedel 	return NESTED_EXIT_CONTINUE;
1540883b0a91SJoerg Roedel }
154133b22172SPaolo Bonzini 
15425228eb96SMaxim Levitsky void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
15435228eb96SMaxim Levitsky {
15445228eb96SMaxim Levitsky 	struct vcpu_svm *svm = to_svm(vcpu);
15455228eb96SMaxim Levitsky 
15465228eb96SMaxim Levitsky 	vcpu->arch.tsc_scaling_ratio =
15475228eb96SMaxim Levitsky 		kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
15485228eb96SMaxim Levitsky 					       svm->tsc_ratio_msr);
15492d636990SSean Christopherson 	svm_write_tsc_multiplier(vcpu);
15505228eb96SMaxim Levitsky }
15515228eb96SMaxim Levitsky 
15528fc78909SEmanuele Giuseppe Esposito /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
15538fc78909SEmanuele Giuseppe Esposito static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
15548fc78909SEmanuele Giuseppe Esposito 					      struct vmcb_ctrl_area_cached *from)
15558fc78909SEmanuele Giuseppe Esposito {
15568fc78909SEmanuele Giuseppe Esposito 	unsigned int i;
15578fc78909SEmanuele Giuseppe Esposito 
15588fc78909SEmanuele Giuseppe Esposito 	memset(dst, 0, sizeof(struct vmcb_control_area));
15598fc78909SEmanuele Giuseppe Esposito 
15608fc78909SEmanuele Giuseppe Esposito 	for (i = 0; i < MAX_INTERCEPT; i++)
15618fc78909SEmanuele Giuseppe Esposito 		dst->intercepts[i] = from->intercepts[i];
15628fc78909SEmanuele Giuseppe Esposito 
15638fc78909SEmanuele Giuseppe Esposito 	dst->iopm_base_pa         = from->iopm_base_pa;
15648fc78909SEmanuele Giuseppe Esposito 	dst->msrpm_base_pa        = from->msrpm_base_pa;
15658fc78909SEmanuele Giuseppe Esposito 	dst->tsc_offset           = from->tsc_offset;
15668fc78909SEmanuele Giuseppe Esposito 	dst->asid                 = from->asid;
15678fc78909SEmanuele Giuseppe Esposito 	dst->tlb_ctl              = from->tlb_ctl;
15688fc78909SEmanuele Giuseppe Esposito 	dst->int_ctl              = from->int_ctl;
15698fc78909SEmanuele Giuseppe Esposito 	dst->int_vector           = from->int_vector;
15708fc78909SEmanuele Giuseppe Esposito 	dst->int_state            = from->int_state;
15718fc78909SEmanuele Giuseppe Esposito 	dst->exit_code            = from->exit_code;
15728fc78909SEmanuele Giuseppe Esposito 	dst->exit_code_hi         = from->exit_code_hi;
15738fc78909SEmanuele Giuseppe Esposito 	dst->exit_info_1          = from->exit_info_1;
15748fc78909SEmanuele Giuseppe Esposito 	dst->exit_info_2          = from->exit_info_2;
15758fc78909SEmanuele Giuseppe Esposito 	dst->exit_int_info        = from->exit_int_info;
15768fc78909SEmanuele Giuseppe Esposito 	dst->exit_int_info_err    = from->exit_int_info_err;
15778fc78909SEmanuele Giuseppe Esposito 	dst->nested_ctl           = from->nested_ctl;
15788fc78909SEmanuele Giuseppe Esposito 	dst->event_inj            = from->event_inj;
15798fc78909SEmanuele Giuseppe Esposito 	dst->event_inj_err        = from->event_inj_err;
158000f08d99SMaciej S. Szmigiero 	dst->next_rip             = from->next_rip;
15818fc78909SEmanuele Giuseppe Esposito 	dst->nested_cr3           = from->nested_cr3;
15828fc78909SEmanuele Giuseppe Esposito 	dst->virt_ext              = from->virt_ext;
15838fc78909SEmanuele Giuseppe Esposito 	dst->pause_filter_count   = from->pause_filter_count;
15848fc78909SEmanuele Giuseppe Esposito 	dst->pause_filter_thresh  = from->pause_filter_thresh;
158568ae7c7bSSean Christopherson 	/* 'clean' and 'hv_enlightenments' are not changed by KVM */
15868fc78909SEmanuele Giuseppe Esposito }
15878fc78909SEmanuele Giuseppe Esposito 
1588cc440cdaSPaolo Bonzini static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1589cc440cdaSPaolo Bonzini 				struct kvm_nested_state __user *user_kvm_nested_state,
1590cc440cdaSPaolo Bonzini 				u32 user_data_size)
1591cc440cdaSPaolo Bonzini {
1592cc440cdaSPaolo Bonzini 	struct vcpu_svm *svm;
15938fc78909SEmanuele Giuseppe Esposito 	struct vmcb_control_area *ctl;
15948fc78909SEmanuele Giuseppe Esposito 	unsigned long r;
1595cc440cdaSPaolo Bonzini 	struct kvm_nested_state kvm_state = {
1596cc440cdaSPaolo Bonzini 		.flags = 0,
1597cc440cdaSPaolo Bonzini 		.format = KVM_STATE_NESTED_FORMAT_SVM,
1598cc440cdaSPaolo Bonzini 		.size = sizeof(kvm_state),
1599cc440cdaSPaolo Bonzini 	};
1600cc440cdaSPaolo Bonzini 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1601cc440cdaSPaolo Bonzini 		&user_kvm_nested_state->data.svm[0];
1602cc440cdaSPaolo Bonzini 
1603cc440cdaSPaolo Bonzini 	if (!vcpu)
1604cc440cdaSPaolo Bonzini 		return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1605cc440cdaSPaolo Bonzini 
1606cc440cdaSPaolo Bonzini 	svm = to_svm(vcpu);
1607cc440cdaSPaolo Bonzini 
1608cc440cdaSPaolo Bonzini 	if (user_data_size < kvm_state.size)
1609cc440cdaSPaolo Bonzini 		goto out;
1610cc440cdaSPaolo Bonzini 
1611cc440cdaSPaolo Bonzini 	/* First fill in the header and copy it out.  */
1612cc440cdaSPaolo Bonzini 	if (is_guest_mode(vcpu)) {
16130dd16b5bSMaxim Levitsky 		kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1614cc440cdaSPaolo Bonzini 		kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1615cc440cdaSPaolo Bonzini 		kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1616cc440cdaSPaolo Bonzini 
1617cc440cdaSPaolo Bonzini 		if (svm->nested.nested_run_pending)
1618cc440cdaSPaolo Bonzini 			kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1619cc440cdaSPaolo Bonzini 	}
1620cc440cdaSPaolo Bonzini 
1621cc440cdaSPaolo Bonzini 	if (gif_set(svm))
1622cc440cdaSPaolo Bonzini 		kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1623cc440cdaSPaolo Bonzini 
1624cc440cdaSPaolo Bonzini 	if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1625cc440cdaSPaolo Bonzini 		return -EFAULT;
1626cc440cdaSPaolo Bonzini 
1627cc440cdaSPaolo Bonzini 	if (!is_guest_mode(vcpu))
1628cc440cdaSPaolo Bonzini 		goto out;
1629cc440cdaSPaolo Bonzini 
1630cc440cdaSPaolo Bonzini 	/*
1631cc440cdaSPaolo Bonzini 	 * Copy over the full size of the VMCB rather than just the size
1632cc440cdaSPaolo Bonzini 	 * of the structs.
1633cc440cdaSPaolo Bonzini 	 */
1634cc440cdaSPaolo Bonzini 	if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1635cc440cdaSPaolo Bonzini 		return -EFAULT;
16368fc78909SEmanuele Giuseppe Esposito 
16378fc78909SEmanuele Giuseppe Esposito 	ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
16388fc78909SEmanuele Giuseppe Esposito 	if (!ctl)
16398fc78909SEmanuele Giuseppe Esposito 		return -ENOMEM;
16408fc78909SEmanuele Giuseppe Esposito 
16418fc78909SEmanuele Giuseppe Esposito 	nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
16428fc78909SEmanuele Giuseppe Esposito 	r = copy_to_user(&user_vmcb->control, ctl,
16438fc78909SEmanuele Giuseppe Esposito 			 sizeof(user_vmcb->control));
16448fc78909SEmanuele Giuseppe Esposito 	kfree(ctl);
16458fc78909SEmanuele Giuseppe Esposito 	if (r)
1646cc440cdaSPaolo Bonzini 		return -EFAULT;
16478fc78909SEmanuele Giuseppe Esposito 
16484995a368SCathy Avery 	if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1649cc440cdaSPaolo Bonzini 			 sizeof(user_vmcb->save)))
1650cc440cdaSPaolo Bonzini 		return -EFAULT;
1651cc440cdaSPaolo Bonzini out:
1652cc440cdaSPaolo Bonzini 	return kvm_state.size;
1653cc440cdaSPaolo Bonzini }
1654cc440cdaSPaolo Bonzini 
1655cc440cdaSPaolo Bonzini static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1656cc440cdaSPaolo Bonzini 				struct kvm_nested_state __user *user_kvm_nested_state,
1657cc440cdaSPaolo Bonzini 				struct kvm_nested_state *kvm_state)
1658cc440cdaSPaolo Bonzini {
1659cc440cdaSPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
1660cc440cdaSPaolo Bonzini 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1661cc440cdaSPaolo Bonzini 		&user_kvm_nested_state->data.svm[0];
16626ccbd29aSJoerg Roedel 	struct vmcb_control_area *ctl;
16636ccbd29aSJoerg Roedel 	struct vmcb_save_area *save;
1664b7a3d8b6SEmanuele Giuseppe Esposito 	struct vmcb_save_area_cached save_cached;
16658fc78909SEmanuele Giuseppe Esposito 	struct vmcb_ctrl_area_cached ctl_cached;
1666dbc4739bSSean Christopherson 	unsigned long cr0;
16676ccbd29aSJoerg Roedel 	int ret;
1668cc440cdaSPaolo Bonzini 
16696ccbd29aSJoerg Roedel 	BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
16706ccbd29aSJoerg Roedel 		     KVM_STATE_NESTED_SVM_VMCB_SIZE);
16716ccbd29aSJoerg Roedel 
1672cc440cdaSPaolo Bonzini 	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1673cc440cdaSPaolo Bonzini 		return -EINVAL;
1674cc440cdaSPaolo Bonzini 
1675cc440cdaSPaolo Bonzini 	if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1676cc440cdaSPaolo Bonzini 				 KVM_STATE_NESTED_RUN_PENDING |
1677cc440cdaSPaolo Bonzini 				 KVM_STATE_NESTED_GIF_SET))
1678cc440cdaSPaolo Bonzini 		return -EINVAL;
1679cc440cdaSPaolo Bonzini 
1680cc440cdaSPaolo Bonzini 	/*
1681cc440cdaSPaolo Bonzini 	 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1682cc440cdaSPaolo Bonzini 	 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1683cc440cdaSPaolo Bonzini 	 */
1684cc440cdaSPaolo Bonzini 	if (!(vcpu->arch.efer & EFER_SVME)) {
1685cc440cdaSPaolo Bonzini 		/* GIF=1 and no guest mode are required if SVME=0.  */
1686cc440cdaSPaolo Bonzini 		if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1687cc440cdaSPaolo Bonzini 			return -EINVAL;
1688cc440cdaSPaolo Bonzini 	}
1689cc440cdaSPaolo Bonzini 
1690cc440cdaSPaolo Bonzini 	/* SMM temporarily disables SVM, so we cannot be in guest mode.  */
1691cc440cdaSPaolo Bonzini 	if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1692cc440cdaSPaolo Bonzini 		return -EINVAL;
1693cc440cdaSPaolo Bonzini 
1694cc440cdaSPaolo Bonzini 	if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1695f7e57078SSean Christopherson 		svm_leave_nested(vcpu);
1696d5cd6f34SVitaly Kuznetsov 		svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1697d5cd6f34SVitaly Kuznetsov 		return 0;
1698cc440cdaSPaolo Bonzini 	}
1699cc440cdaSPaolo Bonzini 
1700cc440cdaSPaolo Bonzini 	if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1701cc440cdaSPaolo Bonzini 		return -EINVAL;
1702cc440cdaSPaolo Bonzini 	if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1703cc440cdaSPaolo Bonzini 		return -EINVAL;
1704cc440cdaSPaolo Bonzini 
17056ccbd29aSJoerg Roedel 	ret  = -ENOMEM;
1706eba04b20SSean Christopherson 	ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL_ACCOUNT);
1707eba04b20SSean Christopherson 	save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
17086ccbd29aSJoerg Roedel 	if (!ctl || !save)
17096ccbd29aSJoerg Roedel 		goto out_free;
17106ccbd29aSJoerg Roedel 
17116ccbd29aSJoerg Roedel 	ret = -EFAULT;
17126ccbd29aSJoerg Roedel 	if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
17136ccbd29aSJoerg Roedel 		goto out_free;
17146ccbd29aSJoerg Roedel 	if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
17156ccbd29aSJoerg Roedel 		goto out_free;
17166ccbd29aSJoerg Roedel 
17176ccbd29aSJoerg Roedel 	ret = -EINVAL;
171866c03a92SVitaly Kuznetsov 	__nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
17198fc78909SEmanuele Giuseppe Esposito 	if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
17206ccbd29aSJoerg Roedel 		goto out_free;
1721cc440cdaSPaolo Bonzini 
1722cc440cdaSPaolo Bonzini 	/*
1723cc440cdaSPaolo Bonzini 	 * Processor state contains L2 state.  Check that it is
1724cb9b6a1bSPaolo Bonzini 	 * valid for guest mode (see nested_vmcb_check_save).
1725cc440cdaSPaolo Bonzini 	 */
1726cc440cdaSPaolo Bonzini 	cr0 = kvm_read_cr0(vcpu);
1727cc440cdaSPaolo Bonzini         if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
17286ccbd29aSJoerg Roedel 		goto out_free;
1729cc440cdaSPaolo Bonzini 
1730cc440cdaSPaolo Bonzini 	/*
1731cc440cdaSPaolo Bonzini 	 * Validate host state saved from before VMRUN (see
1732cc440cdaSPaolo Bonzini 	 * nested_svm_check_permissions).
1733cc440cdaSPaolo Bonzini 	 */
1734b7a3d8b6SEmanuele Giuseppe Esposito 	__nested_copy_vmcb_save_to_cache(&save_cached, save);
17356906e06dSKrish Sadhukhan 	if (!(save->cr0 & X86_CR0_PG) ||
17366906e06dSKrish Sadhukhan 	    !(save->cr0 & X86_CR0_PE) ||
17376906e06dSKrish Sadhukhan 	    (save->rflags & X86_EFLAGS_VM) ||
1738b7a3d8b6SEmanuele Giuseppe Esposito 	    !__nested_vmcb_check_save(vcpu, &save_cached))
17396ccbd29aSJoerg Roedel 		goto out_free;
1740cc440cdaSPaolo Bonzini 
1741b222b0b8SMaxim Levitsky 
1742b222b0b8SMaxim Levitsky 	/*
17434995a368SCathy Avery 	 * All checks done, we can enter guest mode. Userspace provides
17444995a368SCathy Avery 	 * vmcb12.control, which will be combined with L1 and stored into
17454995a368SCathy Avery 	 * vmcb02, and the L1 save state which we store in vmcb01.
17464995a368SCathy Avery 	 * L2 registers if needed are moved from the current VMCB to VMCB02.
1747cc440cdaSPaolo Bonzini 	 */
174881f76adaSMaxim Levitsky 
17499d290e16SMaxim Levitsky 	if (is_guest_mode(vcpu))
1750f7e57078SSean Christopherson 		svm_leave_nested(vcpu);
17519d290e16SMaxim Levitsky 	else
17529d290e16SMaxim Levitsky 		svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
17539d290e16SMaxim Levitsky 
1754063ab16cSMaxim Levitsky 	svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1755063ab16cSMaxim Levitsky 
175681f76adaSMaxim Levitsky 	svm->nested.nested_run_pending =
175781f76adaSMaxim Levitsky 		!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
175881f76adaSMaxim Levitsky 
17590dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1760c08f390aSPaolo Bonzini 
17612bb16beaSVitaly Kuznetsov 	svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
17627907160dSEmanuele Giuseppe Esposito 	nested_copy_vmcb_control_to_cache(svm, ctl);
17634995a368SCathy Avery 
17644995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->nested.vmcb02);
1765da0b93d6SMaciej S. Szmigiero 	nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
1766e1779c27SMaxim Levitsky 
1767e1779c27SMaxim Levitsky 	/*
1768e1779c27SMaxim Levitsky 	 * While the nested guest CR3 is already checked and set by
1769e1779c27SMaxim Levitsky 	 * KVM_SET_SREGS, it was set when nested state was yet loaded,
1770e1779c27SMaxim Levitsky 	 * thus MMU might not be initialized correctly.
1771e1779c27SMaxim Levitsky 	 * Set it again to fix this.
1772e1779c27SMaxim Levitsky 	 */
1773e1779c27SMaxim Levitsky 
1774e1779c27SMaxim Levitsky 	ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1775e1779c27SMaxim Levitsky 				  nested_npt_enabled(svm), false);
1776e1779c27SMaxim Levitsky 	if (WARN_ON_ONCE(ret))
1777e1779c27SMaxim Levitsky 		goto out_free;
1778e1779c27SMaxim Levitsky 
177973c25546SVitaly Kuznetsov 	svm->nested.force_msr_bitmap_recalc = true;
1780e1779c27SMaxim Levitsky 
1781a7d5c7ceSPaolo Bonzini 	kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
17826ccbd29aSJoerg Roedel 	ret = 0;
17836ccbd29aSJoerg Roedel out_free:
17846ccbd29aSJoerg Roedel 	kfree(save);
17856ccbd29aSJoerg Roedel 	kfree(ctl);
17866ccbd29aSJoerg Roedel 
17876ccbd29aSJoerg Roedel 	return ret;
1788cc440cdaSPaolo Bonzini }
1789cc440cdaSPaolo Bonzini 
1790232f75d3SMaxim Levitsky static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1791232f75d3SMaxim Levitsky {
1792232f75d3SMaxim Levitsky 	struct vcpu_svm *svm = to_svm(vcpu);
1793232f75d3SMaxim Levitsky 
1794232f75d3SMaxim Levitsky 	if (WARN_ON(!is_guest_mode(vcpu)))
1795232f75d3SMaxim Levitsky 		return true;
1796232f75d3SMaxim Levitsky 
1797158a48ecSMaxim Levitsky 	if (!vcpu->arch.pdptrs_from_userspace &&
1798158a48ecSMaxim Levitsky 	    !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1799b222b0b8SMaxim Levitsky 		/*
1800b222b0b8SMaxim Levitsky 		 * Reload the guest's PDPTRs since after a migration
1801b222b0b8SMaxim Levitsky 		 * the guest CR3 might be restored prior to setting the nested
1802b222b0b8SMaxim Levitsky 		 * state which can lead to a load of wrong PDPTRs.
1803b222b0b8SMaxim Levitsky 		 */
18042df4a5ebSLai Jiangshan 		if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
1805232f75d3SMaxim Levitsky 			return false;
1806232f75d3SMaxim Levitsky 
1807232f75d3SMaxim Levitsky 	if (!nested_svm_vmrun_msrpm(svm)) {
1808232f75d3SMaxim Levitsky 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1809232f75d3SMaxim Levitsky 		vcpu->run->internal.suberror =
1810232f75d3SMaxim Levitsky 			KVM_INTERNAL_ERROR_EMULATION;
1811232f75d3SMaxim Levitsky 		vcpu->run->internal.ndata = 0;
1812232f75d3SMaxim Levitsky 		return false;
1813232f75d3SMaxim Levitsky 	}
1814232f75d3SMaxim Levitsky 
18153f4a812eSVitaly Kuznetsov 	if (kvm_hv_verify_vp_assist(vcpu))
18163f4a812eSVitaly Kuznetsov 		return false;
18173f4a812eSVitaly Kuznetsov 
1818232f75d3SMaxim Levitsky 	return true;
1819232f75d3SMaxim Levitsky }
1820232f75d3SMaxim Levitsky 
182133b22172SPaolo Bonzini struct kvm_x86_nested_ops svm_nested_ops = {
1822f7e57078SSean Christopherson 	.leave_nested = svm_leave_nested,
18237709aba8SSean Christopherson 	.is_exception_vmexit = nested_svm_is_exception_vmexit,
182433b22172SPaolo Bonzini 	.check_events = svm_check_nested_events,
1825cb6a32c2SSean Christopherson 	.triple_fault = nested_svm_triple_fault,
1826a7d5c7ceSPaolo Bonzini 	.get_nested_state_pages = svm_get_nested_state_pages,
1827cc440cdaSPaolo Bonzini 	.get_state = svm_get_nested_state,
1828cc440cdaSPaolo Bonzini 	.set_state = svm_set_nested_state,
1829b0c9c25eSVitaly Kuznetsov 	.hv_inject_synthetic_vmexit_post_tlb_flush = svm_hv_inject_synthetic_vmexit_post_tlb_flush,
183033b22172SPaolo Bonzini };
1831