xref: /linux/arch/x86/kvm/svm/nested.c (revision 0a758290762cf6fb69ad09712ac834cd4f07504f)
1883b0a91SJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only
2883b0a91SJoerg Roedel /*
3883b0a91SJoerg Roedel  * Kernel-based Virtual Machine driver for Linux
4883b0a91SJoerg Roedel  *
5883b0a91SJoerg Roedel  * AMD SVM support
6883b0a91SJoerg Roedel  *
7883b0a91SJoerg Roedel  * Copyright (C) 2006 Qumranet, Inc.
8883b0a91SJoerg Roedel  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9883b0a91SJoerg Roedel  *
10883b0a91SJoerg Roedel  * Authors:
11883b0a91SJoerg Roedel  *   Yaniv Kamay  <yaniv@qumranet.com>
12883b0a91SJoerg Roedel  *   Avi Kivity   <avi@qumranet.com>
13883b0a91SJoerg Roedel  */
14883b0a91SJoerg Roedel 
15883b0a91SJoerg Roedel #define pr_fmt(fmt) "SVM: " fmt
16883b0a91SJoerg Roedel 
17883b0a91SJoerg Roedel #include <linux/kvm_types.h>
18883b0a91SJoerg Roedel #include <linux/kvm_host.h>
19883b0a91SJoerg Roedel #include <linux/kernel.h>
20883b0a91SJoerg Roedel 
21883b0a91SJoerg Roedel #include <asm/msr-index.h>
225679b803SPaolo Bonzini #include <asm/debugreg.h>
23883b0a91SJoerg Roedel 
24883b0a91SJoerg Roedel #include "kvm_emulate.h"
25883b0a91SJoerg Roedel #include "trace.h"
26883b0a91SJoerg Roedel #include "mmu.h"
27883b0a91SJoerg Roedel #include "x86.h"
28cc440cdaSPaolo Bonzini #include "cpuid.h"
295b672408SPaolo Bonzini #include "lapic.h"
30883b0a91SJoerg Roedel #include "svm.h"
31883b0a91SJoerg Roedel 
3211f0cbf0SSean Christopherson #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
3311f0cbf0SSean Christopherson 
34883b0a91SJoerg Roedel static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
35883b0a91SJoerg Roedel 				       struct x86_exception *fault)
36883b0a91SJoerg Roedel {
37883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
38883b0a91SJoerg Roedel 
39883b0a91SJoerg Roedel 	if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
40883b0a91SJoerg Roedel 		/*
41883b0a91SJoerg Roedel 		 * TODO: track the cause of the nested page fault, and
42883b0a91SJoerg Roedel 		 * correctly fill in the high bits of exit_info_1.
43883b0a91SJoerg Roedel 		 */
44883b0a91SJoerg Roedel 		svm->vmcb->control.exit_code = SVM_EXIT_NPF;
45883b0a91SJoerg Roedel 		svm->vmcb->control.exit_code_hi = 0;
46883b0a91SJoerg Roedel 		svm->vmcb->control.exit_info_1 = (1ULL << 32);
47883b0a91SJoerg Roedel 		svm->vmcb->control.exit_info_2 = fault->address;
48883b0a91SJoerg Roedel 	}
49883b0a91SJoerg Roedel 
50883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
51883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_1 |= fault->error_code;
52883b0a91SJoerg Roedel 
53883b0a91SJoerg Roedel 	nested_svm_vmexit(svm);
54883b0a91SJoerg Roedel }
55883b0a91SJoerg Roedel 
56a04aead1SPaolo Bonzini static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
57a04aead1SPaolo Bonzini {
58a04aead1SPaolo Bonzini        struct vcpu_svm *svm = to_svm(vcpu);
59a04aead1SPaolo Bonzini        WARN_ON(!is_guest_mode(vcpu));
60a04aead1SPaolo Bonzini 
61a04aead1SPaolo Bonzini        if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
62a04aead1SPaolo Bonzini 	   !svm->nested.nested_run_pending) {
63a04aead1SPaolo Bonzini                svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
64a04aead1SPaolo Bonzini                svm->vmcb->control.exit_code_hi = 0;
65a04aead1SPaolo Bonzini                svm->vmcb->control.exit_info_1 = fault->error_code;
66a04aead1SPaolo Bonzini                svm->vmcb->control.exit_info_2 = fault->address;
67a04aead1SPaolo Bonzini                nested_svm_vmexit(svm);
68a04aead1SPaolo Bonzini        } else {
69a04aead1SPaolo Bonzini                kvm_inject_page_fault(vcpu, fault);
70a04aead1SPaolo Bonzini        }
71a04aead1SPaolo Bonzini }
72a04aead1SPaolo Bonzini 
73883b0a91SJoerg Roedel static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
74883b0a91SJoerg Roedel {
75883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
76e670bf68SPaolo Bonzini 	u64 cr3 = svm->nested.ctl.nested_cr3;
77883b0a91SJoerg Roedel 	u64 pdpte;
78883b0a91SJoerg Roedel 	int ret;
79883b0a91SJoerg Roedel 
802732be90SSean Christopherson 	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
81883b0a91SJoerg Roedel 				       offset_in_page(cr3) + index * 8, 8);
82883b0a91SJoerg Roedel 	if (ret)
83883b0a91SJoerg Roedel 		return 0;
84883b0a91SJoerg Roedel 	return pdpte;
85883b0a91SJoerg Roedel }
86883b0a91SJoerg Roedel 
87883b0a91SJoerg Roedel static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
88883b0a91SJoerg Roedel {
89883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
90883b0a91SJoerg Roedel 
91e670bf68SPaolo Bonzini 	return svm->nested.ctl.nested_cr3;
92883b0a91SJoerg Roedel }
93883b0a91SJoerg Roedel 
94883b0a91SJoerg Roedel static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
95883b0a91SJoerg Roedel {
96929d1cfaSPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
97929d1cfaSPaolo Bonzini 
98883b0a91SJoerg Roedel 	WARN_ON(mmu_is_nested(vcpu));
99883b0a91SJoerg Roedel 
100883b0a91SJoerg Roedel 	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
10131e96bc6SSean Christopherson 
10231e96bc6SSean Christopherson 	/*
10331e96bc6SSean Christopherson 	 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01.  Note,
10431e96bc6SSean Christopherson 	 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
10531e96bc6SSean Christopherson 	 * vCPU state.  CR0.WP is explicitly ignored, while CR0.PG is required.
10631e96bc6SSean Christopherson 	 */
1074995a368SCathy Avery 	kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
1084995a368SCathy Avery 				svm->vmcb01.ptr->save.efer,
1090f04a2acSVitaly Kuznetsov 				svm->nested.ctl.nested_cr3);
110883b0a91SJoerg Roedel 	vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
111883b0a91SJoerg Roedel 	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
112883b0a91SJoerg Roedel 	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
113883b0a91SJoerg Roedel 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
114883b0a91SJoerg Roedel }
115883b0a91SJoerg Roedel 
116883b0a91SJoerg Roedel static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
117883b0a91SJoerg Roedel {
118883b0a91SJoerg Roedel 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
119883b0a91SJoerg Roedel 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
120883b0a91SJoerg Roedel }
121883b0a91SJoerg Roedel 
122883b0a91SJoerg Roedel void recalc_intercepts(struct vcpu_svm *svm)
123883b0a91SJoerg Roedel {
124e670bf68SPaolo Bonzini 	struct vmcb_control_area *c, *h, *g;
125c45ad722SBabu Moger 	unsigned int i;
126883b0a91SJoerg Roedel 
12706e7852cSJoerg Roedel 	vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
128883b0a91SJoerg Roedel 
129883b0a91SJoerg Roedel 	if (!is_guest_mode(&svm->vcpu))
130883b0a91SJoerg Roedel 		return;
131883b0a91SJoerg Roedel 
132883b0a91SJoerg Roedel 	c = &svm->vmcb->control;
1334995a368SCathy Avery 	h = &svm->vmcb01.ptr->control;
134e670bf68SPaolo Bonzini 	g = &svm->nested.ctl;
135883b0a91SJoerg Roedel 
136c45ad722SBabu Moger 	for (i = 0; i < MAX_INTERCEPT; i++)
137c45ad722SBabu Moger 		c->intercepts[i] = h->intercepts[i];
138c45ad722SBabu Moger 
139e9fd761aSPaolo Bonzini 	if (g->int_ctl & V_INTR_MASKING_MASK) {
140883b0a91SJoerg Roedel 		/* We only want the cr8 intercept bits of L1 */
14103bfeeb9SBabu Moger 		vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
14203bfeeb9SBabu Moger 		vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
143883b0a91SJoerg Roedel 
144883b0a91SJoerg Roedel 		/*
145883b0a91SJoerg Roedel 		 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
146883b0a91SJoerg Roedel 		 * affect any interrupt we may want to inject; therefore,
147883b0a91SJoerg Roedel 		 * interrupt window vmexits are irrelevant to L0.
148883b0a91SJoerg Roedel 		 */
149c62e2e94SBabu Moger 		vmcb_clr_intercept(c, INTERCEPT_VINTR);
150883b0a91SJoerg Roedel 	}
151883b0a91SJoerg Roedel 
152883b0a91SJoerg Roedel 	/* We don't want to see VMMCALLs from a nested guest */
153c62e2e94SBabu Moger 	vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
154883b0a91SJoerg Roedel 
155c45ad722SBabu Moger 	for (i = 0; i < MAX_INTERCEPT; i++)
156c45ad722SBabu Moger 		c->intercepts[i] |= g->intercepts[i];
1574b639a9fSMaxim Levitsky 
1584b639a9fSMaxim Levitsky 	/* If SMI is not intercepted, ignore guest SMI intercept as well  */
1594b639a9fSMaxim Levitsky 	if (!intercept_smi)
1604b639a9fSMaxim Levitsky 		vmcb_clr_intercept(c, INTERCEPT_SMI);
161883b0a91SJoerg Roedel }
162883b0a91SJoerg Roedel 
1632f675917SPaolo Bonzini static void copy_vmcb_control_area(struct vmcb_control_area *dst,
1642f675917SPaolo Bonzini 				   struct vmcb_control_area *from)
165883b0a91SJoerg Roedel {
166c45ad722SBabu Moger 	unsigned int i;
167c45ad722SBabu Moger 
168c45ad722SBabu Moger 	for (i = 0; i < MAX_INTERCEPT; i++)
169c45ad722SBabu Moger 		dst->intercepts[i] = from->intercepts[i];
170c45ad722SBabu Moger 
171883b0a91SJoerg Roedel 	dst->iopm_base_pa         = from->iopm_base_pa;
172883b0a91SJoerg Roedel 	dst->msrpm_base_pa        = from->msrpm_base_pa;
173883b0a91SJoerg Roedel 	dst->tsc_offset           = from->tsc_offset;
1746c0238c4SPaolo Bonzini 	/* asid not copied, it is handled manually for svm->vmcb.  */
175883b0a91SJoerg Roedel 	dst->tlb_ctl              = from->tlb_ctl;
176883b0a91SJoerg Roedel 	dst->int_ctl              = from->int_ctl;
177883b0a91SJoerg Roedel 	dst->int_vector           = from->int_vector;
178883b0a91SJoerg Roedel 	dst->int_state            = from->int_state;
179883b0a91SJoerg Roedel 	dst->exit_code            = from->exit_code;
180883b0a91SJoerg Roedel 	dst->exit_code_hi         = from->exit_code_hi;
181883b0a91SJoerg Roedel 	dst->exit_info_1          = from->exit_info_1;
182883b0a91SJoerg Roedel 	dst->exit_info_2          = from->exit_info_2;
183883b0a91SJoerg Roedel 	dst->exit_int_info        = from->exit_int_info;
184883b0a91SJoerg Roedel 	dst->exit_int_info_err    = from->exit_int_info_err;
185883b0a91SJoerg Roedel 	dst->nested_ctl           = from->nested_ctl;
186883b0a91SJoerg Roedel 	dst->event_inj            = from->event_inj;
187883b0a91SJoerg Roedel 	dst->event_inj_err        = from->event_inj_err;
188883b0a91SJoerg Roedel 	dst->nested_cr3           = from->nested_cr3;
189883b0a91SJoerg Roedel 	dst->virt_ext              = from->virt_ext;
190883b0a91SJoerg Roedel 	dst->pause_filter_count   = from->pause_filter_count;
191883b0a91SJoerg Roedel 	dst->pause_filter_thresh  = from->pause_filter_thresh;
192883b0a91SJoerg Roedel }
193883b0a91SJoerg Roedel 
194883b0a91SJoerg Roedel static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
195883b0a91SJoerg Roedel {
196883b0a91SJoerg Roedel 	/*
197883b0a91SJoerg Roedel 	 * This function merges the msr permission bitmaps of kvm and the
198883b0a91SJoerg Roedel 	 * nested vmcb. It is optimized in that it only merges the parts where
199883b0a91SJoerg Roedel 	 * the kvm msr permission bitmap may contain zero bits
200883b0a91SJoerg Roedel 	 */
201883b0a91SJoerg Roedel 	int i;
202883b0a91SJoerg Roedel 
203c62e2e94SBabu Moger 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
204883b0a91SJoerg Roedel 		return true;
205883b0a91SJoerg Roedel 
206883b0a91SJoerg Roedel 	for (i = 0; i < MSRPM_OFFSETS; i++) {
207883b0a91SJoerg Roedel 		u32 value, p;
208883b0a91SJoerg Roedel 		u64 offset;
209883b0a91SJoerg Roedel 
210883b0a91SJoerg Roedel 		if (msrpm_offsets[i] == 0xffffffff)
211883b0a91SJoerg Roedel 			break;
212883b0a91SJoerg Roedel 
213883b0a91SJoerg Roedel 		p      = msrpm_offsets[i];
214e670bf68SPaolo Bonzini 		offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
215883b0a91SJoerg Roedel 
216883b0a91SJoerg Roedel 		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
217883b0a91SJoerg Roedel 			return false;
218883b0a91SJoerg Roedel 
219883b0a91SJoerg Roedel 		svm->nested.msrpm[p] = svm->msrpm[p] | value;
220883b0a91SJoerg Roedel 	}
221883b0a91SJoerg Roedel 
222883b0a91SJoerg Roedel 	svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
223883b0a91SJoerg Roedel 
224883b0a91SJoerg Roedel 	return true;
225883b0a91SJoerg Roedel }
226883b0a91SJoerg Roedel 
227ee695f22SKrish Sadhukhan /*
228ee695f22SKrish Sadhukhan  * Bits 11:0 of bitmap address are ignored by hardware
229ee695f22SKrish Sadhukhan  */
230ee695f22SKrish Sadhukhan static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
231ee695f22SKrish Sadhukhan {
232ee695f22SKrish Sadhukhan 	u64 addr = PAGE_ALIGN(pa);
233ee695f22SKrish Sadhukhan 
234ee695f22SKrish Sadhukhan 	return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
235ee695f22SKrish Sadhukhan 	    kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
236ee695f22SKrish Sadhukhan }
237ee695f22SKrish Sadhukhan 
238ee695f22SKrish Sadhukhan static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
239ee695f22SKrish Sadhukhan 				       struct vmcb_control_area *control)
240ca46d739SPaolo Bonzini {
24111f0cbf0SSean Christopherson 	if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN)))
242ca46d739SPaolo Bonzini 		return false;
243ca46d739SPaolo Bonzini 
24411f0cbf0SSean Christopherson 	if (CC(control->asid == 0))
245ca46d739SPaolo Bonzini 		return false;
246ca46d739SPaolo Bonzini 
24711f0cbf0SSean Christopherson 	if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
248ca46d739SPaolo Bonzini 		return false;
249ca46d739SPaolo Bonzini 
250ee695f22SKrish Sadhukhan 	if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
251ee695f22SKrish Sadhukhan 					   MSRPM_SIZE)))
252ee695f22SKrish Sadhukhan 		return false;
253ee695f22SKrish Sadhukhan 	if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
254ee695f22SKrish Sadhukhan 					   IOPM_SIZE)))
255ee695f22SKrish Sadhukhan 		return false;
256ee695f22SKrish Sadhukhan 
257ca46d739SPaolo Bonzini 	return true;
258ca46d739SPaolo Bonzini }
259ca46d739SPaolo Bonzini 
26063129754SPaolo Bonzini static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
2616906e06dSKrish Sadhukhan 				      struct vmcb_save_area *save)
262883b0a91SJoerg Roedel {
2636906e06dSKrish Sadhukhan 	/*
2646906e06dSKrish Sadhukhan 	 * These checks are also performed by KVM_SET_SREGS,
2656906e06dSKrish Sadhukhan 	 * except that EFER.LMA is not checked by SVM against
2666906e06dSKrish Sadhukhan 	 * CR0.PG && EFER.LME.
2676906e06dSKrish Sadhukhan 	 */
2686906e06dSKrish Sadhukhan 	if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
26911f0cbf0SSean Christopherson 		if (CC(!(save->cr4 & X86_CR4_PAE)) ||
27011f0cbf0SSean Christopherson 		    CC(!(save->cr0 & X86_CR0_PE)) ||
27111f0cbf0SSean Christopherson 		    CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
272761e4169SKrish Sadhukhan 			return false;
273761e4169SKrish Sadhukhan 	}
2746906e06dSKrish Sadhukhan 
27511f0cbf0SSean Christopherson 	if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
27611f0cbf0SSean Christopherson 		return false;
27711f0cbf0SSean Christopherson 
27811f0cbf0SSean Christopherson 	return true;
2796906e06dSKrish Sadhukhan }
2806906e06dSKrish Sadhukhan 
2816906e06dSKrish Sadhukhan /* Common checks that apply to both L1 and L2 state.  */
28263129754SPaolo Bonzini static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
2836906e06dSKrish Sadhukhan 				    struct vmcb_save_area *save)
2846906e06dSKrish Sadhukhan {
2853c346c0cSPaolo Bonzini 	/*
2863c346c0cSPaolo Bonzini 	 * FIXME: these should be done after copying the fields,
2873c346c0cSPaolo Bonzini 	 * to avoid TOC/TOU races.  For these save area checks
2883c346c0cSPaolo Bonzini 	 * the possible damage is limited since kvm_set_cr0 and
2893c346c0cSPaolo Bonzini 	 * kvm_set_cr4 handle failure; EFER_SVME is an exception
2903c346c0cSPaolo Bonzini 	 * so it is force-set later in nested_prepare_vmcb_save.
2913c346c0cSPaolo Bonzini 	 */
29211f0cbf0SSean Christopherson 	if (CC(!(save->efer & EFER_SVME)))
2936906e06dSKrish Sadhukhan 		return false;
2946906e06dSKrish Sadhukhan 
29511f0cbf0SSean Christopherson 	if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
29611f0cbf0SSean Christopherson 	    CC(save->cr0 & ~0xffffffffULL))
2976906e06dSKrish Sadhukhan 		return false;
2986906e06dSKrish Sadhukhan 
29911f0cbf0SSean Christopherson 	if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
3006906e06dSKrish Sadhukhan 		return false;
3016906e06dSKrish Sadhukhan 
30263129754SPaolo Bonzini 	if (!nested_vmcb_check_cr3_cr4(vcpu, save))
3036906e06dSKrish Sadhukhan 		return false;
3046906e06dSKrish Sadhukhan 
30563129754SPaolo Bonzini 	if (CC(!kvm_valid_efer(vcpu, save->efer)))
3066906e06dSKrish Sadhukhan 		return false;
3076906e06dSKrish Sadhukhan 
3086906e06dSKrish Sadhukhan 	return true;
3096906e06dSKrish Sadhukhan }
3106906e06dSKrish Sadhukhan 
3119e8f0fbfSPaolo Bonzini static void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
3123e06f016SPaolo Bonzini 					    struct vmcb_control_area *control)
3133e06f016SPaolo Bonzini {
314e670bf68SPaolo Bonzini 	copy_vmcb_control_area(&svm->nested.ctl, control);
3153e06f016SPaolo Bonzini 
316cc440cdaSPaolo Bonzini 	/* Copy it here because nested_svm_check_controls will check it.  */
317cc440cdaSPaolo Bonzini 	svm->nested.ctl.asid           = control->asid;
318e670bf68SPaolo Bonzini 	svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
319e670bf68SPaolo Bonzini 	svm->nested.ctl.iopm_base_pa  &= ~0x0fffULL;
3203e06f016SPaolo Bonzini }
3213e06f016SPaolo Bonzini 
3222d8a42beSPaolo Bonzini /*
3232d8a42beSPaolo Bonzini  * Synchronize fields that are written by the processor, so that
3249e8f0fbfSPaolo Bonzini  * they can be copied back into the vmcb12.
3252d8a42beSPaolo Bonzini  */
3269e8f0fbfSPaolo Bonzini void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
3272d8a42beSPaolo Bonzini {
3282d8a42beSPaolo Bonzini 	u32 mask;
3292d8a42beSPaolo Bonzini 	svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
3302d8a42beSPaolo Bonzini 	svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;
3312d8a42beSPaolo Bonzini 
3322d8a42beSPaolo Bonzini 	/* Only a few fields of int_ctl are written by the processor.  */
3332d8a42beSPaolo Bonzini 	mask = V_IRQ_MASK | V_TPR_MASK;
3342d8a42beSPaolo Bonzini 	if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
335a284ba56SJoerg Roedel 	    svm_is_intercept(svm, INTERCEPT_VINTR)) {
3362d8a42beSPaolo Bonzini 		/*
3372d8a42beSPaolo Bonzini 		 * In order to request an interrupt window, L0 is usurping
3382d8a42beSPaolo Bonzini 		 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
3392d8a42beSPaolo Bonzini 		 * even if it was clear in L1's VMCB.  Restoring it would be
3402d8a42beSPaolo Bonzini 		 * wrong.  However, in this case V_IRQ will remain true until
3412d8a42beSPaolo Bonzini 		 * interrupt_window_interception calls svm_clear_vintr and
3422d8a42beSPaolo Bonzini 		 * restores int_ctl.  We can just leave it aside.
3432d8a42beSPaolo Bonzini 		 */
3442d8a42beSPaolo Bonzini 		mask &= ~V_IRQ_MASK;
3452d8a42beSPaolo Bonzini 	}
3462d8a42beSPaolo Bonzini 	svm->nested.ctl.int_ctl        &= ~mask;
3472d8a42beSPaolo Bonzini 	svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
3482d8a42beSPaolo Bonzini }
3492d8a42beSPaolo Bonzini 
35036e2e983SPaolo Bonzini /*
35136e2e983SPaolo Bonzini  * Transfer any event that L0 or L1 wanted to inject into L2 to
35236e2e983SPaolo Bonzini  * EXIT_INT_INFO.
35336e2e983SPaolo Bonzini  */
3549e8f0fbfSPaolo Bonzini static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
3550dd16b5bSMaxim Levitsky 						struct vmcb *vmcb12)
35636e2e983SPaolo Bonzini {
35736e2e983SPaolo Bonzini 	struct kvm_vcpu *vcpu = &svm->vcpu;
35836e2e983SPaolo Bonzini 	u32 exit_int_info = 0;
35936e2e983SPaolo Bonzini 	unsigned int nr;
36036e2e983SPaolo Bonzini 
36136e2e983SPaolo Bonzini 	if (vcpu->arch.exception.injected) {
36236e2e983SPaolo Bonzini 		nr = vcpu->arch.exception.nr;
36336e2e983SPaolo Bonzini 		exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
36436e2e983SPaolo Bonzini 
36536e2e983SPaolo Bonzini 		if (vcpu->arch.exception.has_error_code) {
36636e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_VALID_ERR;
3670dd16b5bSMaxim Levitsky 			vmcb12->control.exit_int_info_err =
36836e2e983SPaolo Bonzini 				vcpu->arch.exception.error_code;
36936e2e983SPaolo Bonzini 		}
37036e2e983SPaolo Bonzini 
37136e2e983SPaolo Bonzini 	} else if (vcpu->arch.nmi_injected) {
37236e2e983SPaolo Bonzini 		exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
37336e2e983SPaolo Bonzini 
37436e2e983SPaolo Bonzini 	} else if (vcpu->arch.interrupt.injected) {
37536e2e983SPaolo Bonzini 		nr = vcpu->arch.interrupt.nr;
37636e2e983SPaolo Bonzini 		exit_int_info = nr | SVM_EVTINJ_VALID;
37736e2e983SPaolo Bonzini 
37836e2e983SPaolo Bonzini 		if (vcpu->arch.interrupt.soft)
37936e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
38036e2e983SPaolo Bonzini 		else
38136e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_TYPE_INTR;
38236e2e983SPaolo Bonzini 	}
38336e2e983SPaolo Bonzini 
3840dd16b5bSMaxim Levitsky 	vmcb12->control.exit_int_info = exit_int_info;
38536e2e983SPaolo Bonzini }
38636e2e983SPaolo Bonzini 
38762156f6cSVitaly Kuznetsov static inline bool nested_npt_enabled(struct vcpu_svm *svm)
38862156f6cSVitaly Kuznetsov {
38962156f6cSVitaly Kuznetsov 	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
39062156f6cSVitaly Kuznetsov }
39162156f6cSVitaly Kuznetsov 
392d2e56019SSean Christopherson static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
393d2e56019SSean Christopherson {
394d2e56019SSean Christopherson 	/*
395d2e56019SSean Christopherson 	 * TODO: optimize unconditional TLB flush/MMU sync.  A partial list of
396d2e56019SSean Christopherson 	 * things to fix before this can be conditional:
397d2e56019SSean Christopherson 	 *
398d2e56019SSean Christopherson 	 *  - Flush TLBs for both L1 and L2 remote TLB flush
399d2e56019SSean Christopherson 	 *  - Honor L1's request to flush an ASID on nested VMRUN
400d2e56019SSean Christopherson 	 *  - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
401d2e56019SSean Christopherson 	 *  - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
402d2e56019SSean Christopherson 	 *  - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
403d2e56019SSean Christopherson 	 *
404d2e56019SSean Christopherson 	 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
405d2e56019SSean Christopherson 	 *     NPT guest-physical mappings on VMRUN.
406d2e56019SSean Christopherson 	 */
407d2e56019SSean Christopherson 	kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
408d2e56019SSean Christopherson 	kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
409d2e56019SSean Christopherson }
410d2e56019SSean Christopherson 
41162156f6cSVitaly Kuznetsov /*
412d82aaef9SVitaly Kuznetsov  * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
413d82aaef9SVitaly Kuznetsov  * if we are emulating VM-Entry into a guest with NPT enabled.
41462156f6cSVitaly Kuznetsov  */
41562156f6cSVitaly Kuznetsov static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
416b222b0b8SMaxim Levitsky 			       bool nested_npt, bool reload_pdptrs)
41762156f6cSVitaly Kuznetsov {
41811f0cbf0SSean Christopherson 	if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
419a506fdd2SVitaly Kuznetsov 		return -EINVAL;
420a506fdd2SVitaly Kuznetsov 
421b222b0b8SMaxim Levitsky 	if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
422a36dbec6SSean Christopherson 	    CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
423a506fdd2SVitaly Kuznetsov 		return -EINVAL;
424a506fdd2SVitaly Kuznetsov 
425a506fdd2SVitaly Kuznetsov 	if (!nested_npt)
426b5129100SSean Christopherson 		kvm_mmu_new_pgd(vcpu, cr3);
427a506fdd2SVitaly Kuznetsov 
428a506fdd2SVitaly Kuznetsov 	vcpu->arch.cr3 = cr3;
429a506fdd2SVitaly Kuznetsov 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
430a506fdd2SVitaly Kuznetsov 
431616007c8SSean Christopherson 	/* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
432c9060662SSean Christopherson 	kvm_init_mmu(vcpu);
433a506fdd2SVitaly Kuznetsov 
434a506fdd2SVitaly Kuznetsov 	return 0;
43562156f6cSVitaly Kuznetsov }
43662156f6cSVitaly Kuznetsov 
4374995a368SCathy Avery void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
4384995a368SCathy Avery {
4394995a368SCathy Avery 	if (!svm->nested.vmcb02.ptr)
4404995a368SCathy Avery 		return;
4414995a368SCathy Avery 
4424995a368SCathy Avery 	/* FIXME: merge g_pat from vmcb01 and vmcb12.  */
4434995a368SCathy Avery 	svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
4444995a368SCathy Avery }
4454995a368SCathy Avery 
4469e8f0fbfSPaolo Bonzini static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
447883b0a91SJoerg Roedel {
4488173396eSCathy Avery 	bool new_vmcb12 = false;
4498173396eSCathy Avery 
4504995a368SCathy Avery 	nested_vmcb02_compute_g_pat(svm);
4514995a368SCathy Avery 
452883b0a91SJoerg Roedel 	/* Load the nested guest state */
4538173396eSCathy Avery 	if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
4548173396eSCathy Avery 		new_vmcb12 = true;
4558173396eSCathy Avery 		svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
4568173396eSCathy Avery 	}
4578173396eSCathy Avery 
4588173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
4590dd16b5bSMaxim Levitsky 		svm->vmcb->save.es = vmcb12->save.es;
4600dd16b5bSMaxim Levitsky 		svm->vmcb->save.cs = vmcb12->save.cs;
4610dd16b5bSMaxim Levitsky 		svm->vmcb->save.ss = vmcb12->save.ss;
4620dd16b5bSMaxim Levitsky 		svm->vmcb->save.ds = vmcb12->save.ds;
4634bb170a5SPaolo Bonzini 		svm->vmcb->save.cpl = vmcb12->save.cpl;
4644bb170a5SPaolo Bonzini 		vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
4658173396eSCathy Avery 	}
4664bb170a5SPaolo Bonzini 
4678173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
4680dd16b5bSMaxim Levitsky 		svm->vmcb->save.gdtr = vmcb12->save.gdtr;
4690dd16b5bSMaxim Levitsky 		svm->vmcb->save.idtr = vmcb12->save.idtr;
4704bb170a5SPaolo Bonzini 		vmcb_mark_dirty(svm->vmcb, VMCB_DT);
4718173396eSCathy Avery 	}
4724bb170a5SPaolo Bonzini 
4738cce12b3SPaolo Bonzini 	kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
4743c346c0cSPaolo Bonzini 
4753c346c0cSPaolo Bonzini 	/*
4763c346c0cSPaolo Bonzini 	 * Force-set EFER_SVME even though it is checked earlier on the
4773c346c0cSPaolo Bonzini 	 * VMCB12, because the guest can flip the bit between the check
4783c346c0cSPaolo Bonzini 	 * and now.  Clearing EFER_SVME would call svm_free_nested.
4793c346c0cSPaolo Bonzini 	 */
4803c346c0cSPaolo Bonzini 	svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
4813c346c0cSPaolo Bonzini 
4820dd16b5bSMaxim Levitsky 	svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
4830dd16b5bSMaxim Levitsky 	svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
4844bb170a5SPaolo Bonzini 
4854bb170a5SPaolo Bonzini 	svm->vcpu.arch.cr2 = vmcb12->save.cr2;
4868173396eSCathy Avery 
4870dd16b5bSMaxim Levitsky 	kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
4880dd16b5bSMaxim Levitsky 	kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
4890dd16b5bSMaxim Levitsky 	kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
490883b0a91SJoerg Roedel 
491883b0a91SJoerg Roedel 	/* In case we don't even reach vcpu_run, the fields are not updated */
4920dd16b5bSMaxim Levitsky 	svm->vmcb->save.rax = vmcb12->save.rax;
4930dd16b5bSMaxim Levitsky 	svm->vmcb->save.rsp = vmcb12->save.rsp;
4940dd16b5bSMaxim Levitsky 	svm->vmcb->save.rip = vmcb12->save.rip;
4954bb170a5SPaolo Bonzini 
4968173396eSCathy Avery 	/* These bits will be set properly on the first execution when new_vmc12 is true */
4978173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
4988cce12b3SPaolo Bonzini 		svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
4999a3ecd5eSChenyi Qiang 		svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
5004bb170a5SPaolo Bonzini 		vmcb_mark_dirty(svm->vmcb, VMCB_DR);
501f241d711SPaolo Bonzini 	}
5028173396eSCathy Avery }
503883b0a91SJoerg Roedel 
5049e8f0fbfSPaolo Bonzini static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
505f241d711SPaolo Bonzini {
50691b7130cSPaolo Bonzini 	const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
507d2e56019SSean Christopherson 	struct kvm_vcpu *vcpu = &svm->vcpu;
50862156f6cSVitaly Kuznetsov 
5097c3ecfcdSPaolo Bonzini 	/*
5107c3ecfcdSPaolo Bonzini 	 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
5117c3ecfcdSPaolo Bonzini 	 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
5127c3ecfcdSPaolo Bonzini 	 */
5134995a368SCathy Avery 
5147c3ecfcdSPaolo Bonzini 	/*
5157c3ecfcdSPaolo Bonzini 	 * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
5167c3ecfcdSPaolo Bonzini 	 * avic_physical_id.
5177c3ecfcdSPaolo Bonzini 	 */
5187c3ecfcdSPaolo Bonzini 	WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
5197c3ecfcdSPaolo Bonzini 
5207c3ecfcdSPaolo Bonzini 	/* Copied from vmcb01.  msrpm_base can be overwritten later.  */
5217c3ecfcdSPaolo Bonzini 	svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
5227c3ecfcdSPaolo Bonzini 	svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa;
5237c3ecfcdSPaolo Bonzini 	svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa;
5247c3ecfcdSPaolo Bonzini 
5257c3ecfcdSPaolo Bonzini 	/* Done at vmrun: asid.  */
5267c3ecfcdSPaolo Bonzini 
5277c3ecfcdSPaolo Bonzini 	/* Also overwritten later if necessary.  */
5287c3ecfcdSPaolo Bonzini 	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
5297c3ecfcdSPaolo Bonzini 
5307c3ecfcdSPaolo Bonzini 	/* nested_cr3.  */
53162156f6cSVitaly Kuznetsov 	if (nested_npt_enabled(svm))
532d2e56019SSean Christopherson 		nested_svm_init_mmu_context(vcpu);
53369cb8774SPaolo Bonzini 
534d2e56019SSean Christopherson 	svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset =
535d2e56019SSean Christopherson 		vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
536883b0a91SJoerg Roedel 
53791b7130cSPaolo Bonzini 	svm->vmcb->control.int_ctl             =
53891b7130cSPaolo Bonzini 		(svm->nested.ctl.int_ctl & ~mask) |
5394995a368SCathy Avery 		(svm->vmcb01.ptr->control.int_ctl & mask);
54091b7130cSPaolo Bonzini 
541e670bf68SPaolo Bonzini 	svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
542e670bf68SPaolo Bonzini 	svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
543e670bf68SPaolo Bonzini 	svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
544e670bf68SPaolo Bonzini 	svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
545e670bf68SPaolo Bonzini 	svm->vmcb->control.event_inj_err       = svm->nested.ctl.event_inj_err;
546883b0a91SJoerg Roedel 
547e670bf68SPaolo Bonzini 	svm->vmcb->control.pause_filter_count  = svm->nested.ctl.pause_filter_count;
548e670bf68SPaolo Bonzini 	svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
549883b0a91SJoerg Roedel 
550d2e56019SSean Christopherson 	nested_svm_transition_tlb_flush(vcpu);
551d2e56019SSean Christopherson 
552883b0a91SJoerg Roedel 	/* Enter Guest-Mode */
553d2e56019SSean Christopherson 	enter_guest_mode(vcpu);
554883b0a91SJoerg Roedel 
555883b0a91SJoerg Roedel 	/*
556883b0a91SJoerg Roedel 	 * Merge guest and host intercepts - must be called with vcpu in
5574bb170a5SPaolo Bonzini 	 * guest-mode to take effect.
558883b0a91SJoerg Roedel 	 */
559883b0a91SJoerg Roedel 	recalc_intercepts(svm);
560f241d711SPaolo Bonzini }
561f241d711SPaolo Bonzini 
562d00b99c5SBabu Moger static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
563d00b99c5SBabu Moger {
564d00b99c5SBabu Moger 	/*
565d00b99c5SBabu Moger 	 * Some VMCB state is shared between L1 and L2 and thus has to be
566d00b99c5SBabu Moger 	 * moved at the time of nested vmrun and vmexit.
567d00b99c5SBabu Moger 	 *
568d00b99c5SBabu Moger 	 * VMLOAD/VMSAVE state would also belong in this category, but KVM
569d00b99c5SBabu Moger 	 * always performs VMLOAD and VMSAVE from the VMCB01.
570d00b99c5SBabu Moger 	 */
571d00b99c5SBabu Moger 	to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
572d00b99c5SBabu Moger }
573d00b99c5SBabu Moger 
57463129754SPaolo Bonzini int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
5750dd16b5bSMaxim Levitsky 			 struct vmcb *vmcb12)
576f241d711SPaolo Bonzini {
57763129754SPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
578a506fdd2SVitaly Kuznetsov 	int ret;
579a506fdd2SVitaly Kuznetsov 
580954f419bSMaxim Levitsky 	trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
581954f419bSMaxim Levitsky 			       vmcb12->save.rip,
582954f419bSMaxim Levitsky 			       vmcb12->control.int_ctl,
583954f419bSMaxim Levitsky 			       vmcb12->control.event_inj,
584954f419bSMaxim Levitsky 			       vmcb12->control.nested_ctl);
585954f419bSMaxim Levitsky 
586954f419bSMaxim Levitsky 	trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
587954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
588954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
589954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD3],
590954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD4],
591954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD5]);
592954f419bSMaxim Levitsky 
593954f419bSMaxim Levitsky 
5940dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = vmcb12_gpa;
5954995a368SCathy Avery 
5964995a368SCathy Avery 	WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
5974995a368SCathy Avery 
598d00b99c5SBabu Moger 	nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
5994995a368SCathy Avery 
6004995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->nested.vmcb02);
6019e8f0fbfSPaolo Bonzini 	nested_vmcb02_prepare_control(svm);
6029e8f0fbfSPaolo Bonzini 	nested_vmcb02_prepare_save(svm, vmcb12);
603f241d711SPaolo Bonzini 
6040dd16b5bSMaxim Levitsky 	ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
605b222b0b8SMaxim Levitsky 				  nested_npt_enabled(svm), true);
606a506fdd2SVitaly Kuznetsov 	if (ret)
607a506fdd2SVitaly Kuznetsov 		return ret;
608a506fdd2SVitaly Kuznetsov 
609a04aead1SPaolo Bonzini 	if (!npt_enabled)
61063129754SPaolo Bonzini 		vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
611a04aead1SPaolo Bonzini 
612ffdf7f9eSPaolo Bonzini 	svm_set_gif(svm, true);
61359cd9bc5SVitaly Kuznetsov 
61459cd9bc5SVitaly Kuznetsov 	return 0;
615883b0a91SJoerg Roedel }
616883b0a91SJoerg Roedel 
61763129754SPaolo Bonzini int nested_svm_vmrun(struct kvm_vcpu *vcpu)
618883b0a91SJoerg Roedel {
61963129754SPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
620883b0a91SJoerg Roedel 	int ret;
6210dd16b5bSMaxim Levitsky 	struct vmcb *vmcb12;
622883b0a91SJoerg Roedel 	struct kvm_host_map map;
6230dd16b5bSMaxim Levitsky 	u64 vmcb12_gpa;
624883b0a91SJoerg Roedel 
625fb79f566SVitaly Kuznetsov 	if (!svm->nested.hsave_msr) {
626fb79f566SVitaly Kuznetsov 		kvm_inject_gp(vcpu, 0);
627fb79f566SVitaly Kuznetsov 		return 1;
628fb79f566SVitaly Kuznetsov 	}
629fb79f566SVitaly Kuznetsov 
63063129754SPaolo Bonzini 	if (is_smm(vcpu)) {
63163129754SPaolo Bonzini 		kvm_queue_exception(vcpu, UD_VECTOR);
6327c67f546SPaolo Bonzini 		return 1;
6337c67f546SPaolo Bonzini 	}
634883b0a91SJoerg Roedel 
6350dd16b5bSMaxim Levitsky 	vmcb12_gpa = svm->vmcb->save.rax;
63663129754SPaolo Bonzini 	ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
637883b0a91SJoerg Roedel 	if (ret == -EINVAL) {
63863129754SPaolo Bonzini 		kvm_inject_gp(vcpu, 0);
639883b0a91SJoerg Roedel 		return 1;
640883b0a91SJoerg Roedel 	} else if (ret) {
64163129754SPaolo Bonzini 		return kvm_skip_emulated_instruction(vcpu);
642883b0a91SJoerg Roedel 	}
643883b0a91SJoerg Roedel 
64463129754SPaolo Bonzini 	ret = kvm_skip_emulated_instruction(vcpu);
645883b0a91SJoerg Roedel 
6460dd16b5bSMaxim Levitsky 	vmcb12 = map.hva;
647883b0a91SJoerg Roedel 
6482fcf4876SMaxim Levitsky 	if (WARN_ON_ONCE(!svm->nested.initialized))
6492fcf4876SMaxim Levitsky 		return -EINVAL;
6502fcf4876SMaxim Levitsky 
651cb9b6a1bSPaolo Bonzini 	nested_load_control_from_vmcb12(svm, &vmcb12->control);
652cb9b6a1bSPaolo Bonzini 
653cb9b6a1bSPaolo Bonzini 	if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) ||
654ee695f22SKrish Sadhukhan 	    !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) {
6550dd16b5bSMaxim Levitsky 		vmcb12->control.exit_code    = SVM_EXIT_ERR;
6560dd16b5bSMaxim Levitsky 		vmcb12->control.exit_code_hi = 0;
6570dd16b5bSMaxim Levitsky 		vmcb12->control.exit_info_1  = 0;
6580dd16b5bSMaxim Levitsky 		vmcb12->control.exit_info_2  = 0;
65969c9dfa2SPaolo Bonzini 		goto out;
660883b0a91SJoerg Roedel 	}
661883b0a91SJoerg Roedel 
662883b0a91SJoerg Roedel 
663883b0a91SJoerg Roedel 	/* Clear internal status */
66463129754SPaolo Bonzini 	kvm_clear_exception_queue(vcpu);
66563129754SPaolo Bonzini 	kvm_clear_interrupt_queue(vcpu);
666883b0a91SJoerg Roedel 
667883b0a91SJoerg Roedel 	/*
6684995a368SCathy Avery 	 * Since vmcb01 is not in use, we can use it to store some of the L1
6694995a368SCathy Avery 	 * state.
670883b0a91SJoerg Roedel 	 */
67163129754SPaolo Bonzini 	svm->vmcb01.ptr->save.efer   = vcpu->arch.efer;
67263129754SPaolo Bonzini 	svm->vmcb01.ptr->save.cr0    = kvm_read_cr0(vcpu);
67363129754SPaolo Bonzini 	svm->vmcb01.ptr->save.cr4    = vcpu->arch.cr4;
67463129754SPaolo Bonzini 	svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu);
67563129754SPaolo Bonzini 	svm->vmcb01.ptr->save.rip    = kvm_rip_read(vcpu);
676883b0a91SJoerg Roedel 
6774995a368SCathy Avery 	if (!npt_enabled)
67863129754SPaolo Bonzini 		svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu);
679883b0a91SJoerg Roedel 
680f74f9414SPaolo Bonzini 	svm->nested.nested_run_pending = 1;
681883b0a91SJoerg Roedel 
68263129754SPaolo Bonzini 	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
68359cd9bc5SVitaly Kuznetsov 		goto out_exit_err;
68459cd9bc5SVitaly Kuznetsov 
68559cd9bc5SVitaly Kuznetsov 	if (nested_svm_vmrun_msrpm(svm))
68659cd9bc5SVitaly Kuznetsov 		goto out;
68759cd9bc5SVitaly Kuznetsov 
68859cd9bc5SVitaly Kuznetsov out_exit_err:
689ebdb3dbaSVitaly Kuznetsov 	svm->nested.nested_run_pending = 0;
690ebdb3dbaSVitaly Kuznetsov 
691883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
692883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code_hi = 0;
693883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_1  = 0;
694883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_2  = 0;
695883b0a91SJoerg Roedel 
696883b0a91SJoerg Roedel 	nested_svm_vmexit(svm);
697883b0a91SJoerg Roedel 
69869c9dfa2SPaolo Bonzini out:
69963129754SPaolo Bonzini 	kvm_vcpu_unmap(vcpu, &map, true);
70069c9dfa2SPaolo Bonzini 
701883b0a91SJoerg Roedel 	return ret;
702883b0a91SJoerg Roedel }
703883b0a91SJoerg Roedel 
704*0a758290SVitaly Kuznetsov /* Copy state save area fields which are handled by VMRUN */
705*0a758290SVitaly Kuznetsov void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
706*0a758290SVitaly Kuznetsov 			  struct vmcb_save_area *to_save)
707*0a758290SVitaly Kuznetsov {
708*0a758290SVitaly Kuznetsov 	to_save->es = from_save->es;
709*0a758290SVitaly Kuznetsov 	to_save->cs = from_save->cs;
710*0a758290SVitaly Kuznetsov 	to_save->ss = from_save->ss;
711*0a758290SVitaly Kuznetsov 	to_save->ds = from_save->ds;
712*0a758290SVitaly Kuznetsov 	to_save->gdtr = from_save->gdtr;
713*0a758290SVitaly Kuznetsov 	to_save->idtr = from_save->idtr;
714*0a758290SVitaly Kuznetsov 	to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
715*0a758290SVitaly Kuznetsov 	to_save->efer = from_save->efer;
716*0a758290SVitaly Kuznetsov 	to_save->cr0 = from_save->cr0;
717*0a758290SVitaly Kuznetsov 	to_save->cr3 = from_save->cr3;
718*0a758290SVitaly Kuznetsov 	to_save->cr4 = from_save->cr4;
719*0a758290SVitaly Kuznetsov 	to_save->rax = from_save->rax;
720*0a758290SVitaly Kuznetsov 	to_save->rsp = from_save->rsp;
721*0a758290SVitaly Kuznetsov 	to_save->rip = from_save->rip;
722*0a758290SVitaly Kuznetsov 	to_save->cpl = 0;
723*0a758290SVitaly Kuznetsov }
724*0a758290SVitaly Kuznetsov 
725883b0a91SJoerg Roedel void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
726883b0a91SJoerg Roedel {
727883b0a91SJoerg Roedel 	to_vmcb->save.fs = from_vmcb->save.fs;
728883b0a91SJoerg Roedel 	to_vmcb->save.gs = from_vmcb->save.gs;
729883b0a91SJoerg Roedel 	to_vmcb->save.tr = from_vmcb->save.tr;
730883b0a91SJoerg Roedel 	to_vmcb->save.ldtr = from_vmcb->save.ldtr;
731883b0a91SJoerg Roedel 	to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
732883b0a91SJoerg Roedel 	to_vmcb->save.star = from_vmcb->save.star;
733883b0a91SJoerg Roedel 	to_vmcb->save.lstar = from_vmcb->save.lstar;
734883b0a91SJoerg Roedel 	to_vmcb->save.cstar = from_vmcb->save.cstar;
735883b0a91SJoerg Roedel 	to_vmcb->save.sfmask = from_vmcb->save.sfmask;
736883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
737883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
738883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
739883b0a91SJoerg Roedel }
740883b0a91SJoerg Roedel 
741883b0a91SJoerg Roedel int nested_svm_vmexit(struct vcpu_svm *svm)
742883b0a91SJoerg Roedel {
74363129754SPaolo Bonzini 	struct kvm_vcpu *vcpu = &svm->vcpu;
7440dd16b5bSMaxim Levitsky 	struct vmcb *vmcb12;
745883b0a91SJoerg Roedel 	struct vmcb *vmcb = svm->vmcb;
746883b0a91SJoerg Roedel 	struct kvm_host_map map;
74763129754SPaolo Bonzini 	int rc;
748883b0a91SJoerg Roedel 
749cb6a32c2SSean Christopherson 	/* Triple faults in L2 should never escape. */
750cb6a32c2SSean Christopherson 	WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
751cb6a32c2SSean Christopherson 
75263129754SPaolo Bonzini 	rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
753883b0a91SJoerg Roedel 	if (rc) {
754883b0a91SJoerg Roedel 		if (rc == -EINVAL)
75563129754SPaolo Bonzini 			kvm_inject_gp(vcpu, 0);
756883b0a91SJoerg Roedel 		return 1;
757883b0a91SJoerg Roedel 	}
758883b0a91SJoerg Roedel 
7590dd16b5bSMaxim Levitsky 	vmcb12 = map.hva;
760883b0a91SJoerg Roedel 
761883b0a91SJoerg Roedel 	/* Exit Guest-Mode */
76263129754SPaolo Bonzini 	leave_guest_mode(vcpu);
7630dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = 0;
7642d8a42beSPaolo Bonzini 	WARN_ON_ONCE(svm->nested.nested_run_pending);
765883b0a91SJoerg Roedel 
76663129754SPaolo Bonzini 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
767f2c7ef3bSMaxim Levitsky 
76838c0b192SPaolo Bonzini 	/* in case we halted in L2 */
76938c0b192SPaolo Bonzini 	svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
77038c0b192SPaolo Bonzini 
771883b0a91SJoerg Roedel 	/* Give the current vmcb to the guest */
772883b0a91SJoerg Roedel 
7730dd16b5bSMaxim Levitsky 	vmcb12->save.es     = vmcb->save.es;
7740dd16b5bSMaxim Levitsky 	vmcb12->save.cs     = vmcb->save.cs;
7750dd16b5bSMaxim Levitsky 	vmcb12->save.ss     = vmcb->save.ss;
7760dd16b5bSMaxim Levitsky 	vmcb12->save.ds     = vmcb->save.ds;
7770dd16b5bSMaxim Levitsky 	vmcb12->save.gdtr   = vmcb->save.gdtr;
7780dd16b5bSMaxim Levitsky 	vmcb12->save.idtr   = vmcb->save.idtr;
7790dd16b5bSMaxim Levitsky 	vmcb12->save.efer   = svm->vcpu.arch.efer;
78063129754SPaolo Bonzini 	vmcb12->save.cr0    = kvm_read_cr0(vcpu);
78163129754SPaolo Bonzini 	vmcb12->save.cr3    = kvm_read_cr3(vcpu);
7820dd16b5bSMaxim Levitsky 	vmcb12->save.cr2    = vmcb->save.cr2;
7830dd16b5bSMaxim Levitsky 	vmcb12->save.cr4    = svm->vcpu.arch.cr4;
78463129754SPaolo Bonzini 	vmcb12->save.rflags = kvm_get_rflags(vcpu);
78563129754SPaolo Bonzini 	vmcb12->save.rip    = kvm_rip_read(vcpu);
78663129754SPaolo Bonzini 	vmcb12->save.rsp    = kvm_rsp_read(vcpu);
78763129754SPaolo Bonzini 	vmcb12->save.rax    = kvm_rax_read(vcpu);
7880dd16b5bSMaxim Levitsky 	vmcb12->save.dr7    = vmcb->save.dr7;
7890dd16b5bSMaxim Levitsky 	vmcb12->save.dr6    = svm->vcpu.arch.dr6;
7900dd16b5bSMaxim Levitsky 	vmcb12->save.cpl    = vmcb->save.cpl;
791883b0a91SJoerg Roedel 
7920dd16b5bSMaxim Levitsky 	vmcb12->control.int_state         = vmcb->control.int_state;
7930dd16b5bSMaxim Levitsky 	vmcb12->control.exit_code         = vmcb->control.exit_code;
7940dd16b5bSMaxim Levitsky 	vmcb12->control.exit_code_hi      = vmcb->control.exit_code_hi;
7950dd16b5bSMaxim Levitsky 	vmcb12->control.exit_info_1       = vmcb->control.exit_info_1;
7960dd16b5bSMaxim Levitsky 	vmcb12->control.exit_info_2       = vmcb->control.exit_info_2;
79736e2e983SPaolo Bonzini 
7980dd16b5bSMaxim Levitsky 	if (vmcb12->control.exit_code != SVM_EXIT_ERR)
7999e8f0fbfSPaolo Bonzini 		nested_save_pending_event_to_vmcb12(svm, vmcb12);
800883b0a91SJoerg Roedel 
801883b0a91SJoerg Roedel 	if (svm->nrips_enabled)
8020dd16b5bSMaxim Levitsky 		vmcb12->control.next_rip  = vmcb->control.next_rip;
803883b0a91SJoerg Roedel 
8040dd16b5bSMaxim Levitsky 	vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
8050dd16b5bSMaxim Levitsky 	vmcb12->control.tlb_ctl           = svm->nested.ctl.tlb_ctl;
8060dd16b5bSMaxim Levitsky 	vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
8070dd16b5bSMaxim Levitsky 	vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
808883b0a91SJoerg Roedel 
8090dd16b5bSMaxim Levitsky 	vmcb12->control.pause_filter_count =
810883b0a91SJoerg Roedel 		svm->vmcb->control.pause_filter_count;
8110dd16b5bSMaxim Levitsky 	vmcb12->control.pause_filter_thresh =
812883b0a91SJoerg Roedel 		svm->vmcb->control.pause_filter_thresh;
813883b0a91SJoerg Roedel 
814d00b99c5SBabu Moger 	nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
815d00b99c5SBabu Moger 
8164995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->vmcb01);
8174995a368SCathy Avery 
8184995a368SCathy Avery 	/*
8194995a368SCathy Avery 	 * On vmexit the  GIF is set to false and
8204995a368SCathy Avery 	 * no event can be injected in L1.
8214995a368SCathy Avery 	 */
8229883764aSMaxim Levitsky 	svm_set_gif(svm, false);
8234995a368SCathy Avery 	svm->vmcb->control.exit_int_info = 0;
8249883764aSMaxim Levitsky 
8257ca62d13SPaolo Bonzini 	svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
8267ca62d13SPaolo Bonzini 	if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
8277ca62d13SPaolo Bonzini 		svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
8287ca62d13SPaolo Bonzini 		vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
8297ca62d13SPaolo Bonzini 	}
83018fc6c55SPaolo Bonzini 
831e670bf68SPaolo Bonzini 	svm->nested.ctl.nested_cr3 = 0;
832883b0a91SJoerg Roedel 
8334995a368SCathy Avery 	/*
8344995a368SCathy Avery 	 * Restore processor state that had been saved in vmcb01
8354995a368SCathy Avery 	 */
83663129754SPaolo Bonzini 	kvm_set_rflags(vcpu, svm->vmcb->save.rflags);
83763129754SPaolo Bonzini 	svm_set_efer(vcpu, svm->vmcb->save.efer);
83863129754SPaolo Bonzini 	svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
83963129754SPaolo Bonzini 	svm_set_cr4(vcpu, svm->vmcb->save.cr4);
84063129754SPaolo Bonzini 	kvm_rax_write(vcpu, svm->vmcb->save.rax);
84163129754SPaolo Bonzini 	kvm_rsp_write(vcpu, svm->vmcb->save.rsp);
84263129754SPaolo Bonzini 	kvm_rip_write(vcpu, svm->vmcb->save.rip);
8434995a368SCathy Avery 
8444995a368SCathy Avery 	svm->vcpu.arch.dr7 = DR7_FIXED_1;
8454995a368SCathy Avery 	kvm_update_dr7(&svm->vcpu);
846883b0a91SJoerg Roedel 
8470dd16b5bSMaxim Levitsky 	trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
8480dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_info_1,
8490dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_info_2,
8500dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_int_info,
8510dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_int_info_err,
85236e2e983SPaolo Bonzini 				       KVM_ISA_SVM);
85336e2e983SPaolo Bonzini 
85463129754SPaolo Bonzini 	kvm_vcpu_unmap(vcpu, &map, true);
855883b0a91SJoerg Roedel 
856d2e56019SSean Christopherson 	nested_svm_transition_tlb_flush(vcpu);
857d2e56019SSean Christopherson 
85863129754SPaolo Bonzini 	nested_svm_uninit_mmu_context(vcpu);
859bf7dea42SVitaly Kuznetsov 
860b222b0b8SMaxim Levitsky 	rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true);
861d82aaef9SVitaly Kuznetsov 	if (rc)
862d82aaef9SVitaly Kuznetsov 		return 1;
863bf7dea42SVitaly Kuznetsov 
864883b0a91SJoerg Roedel 	/*
865883b0a91SJoerg Roedel 	 * Drop what we picked up for L2 via svm_complete_interrupts() so it
866883b0a91SJoerg Roedel 	 * doesn't end up in L1.
867883b0a91SJoerg Roedel 	 */
868883b0a91SJoerg Roedel 	svm->vcpu.arch.nmi_injected = false;
86963129754SPaolo Bonzini 	kvm_clear_exception_queue(vcpu);
87063129754SPaolo Bonzini 	kvm_clear_interrupt_queue(vcpu);
871883b0a91SJoerg Roedel 
8729a7de6ecSKrish Sadhukhan 	/*
8739a7de6ecSKrish Sadhukhan 	 * If we are here following the completion of a VMRUN that
8749a7de6ecSKrish Sadhukhan 	 * is being single-stepped, queue the pending #DB intercept
8759a7de6ecSKrish Sadhukhan 	 * right now so that it an be accounted for before we execute
8769a7de6ecSKrish Sadhukhan 	 * L1's next instruction.
8779a7de6ecSKrish Sadhukhan 	 */
8789a7de6ecSKrish Sadhukhan 	if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF))
8799a7de6ecSKrish Sadhukhan 		kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
8809a7de6ecSKrish Sadhukhan 
881883b0a91SJoerg Roedel 	return 0;
882883b0a91SJoerg Roedel }
883883b0a91SJoerg Roedel 
884cb6a32c2SSean Christopherson static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
885cb6a32c2SSean Christopherson {
8863a87c7e0SSean Christopherson 	nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
887cb6a32c2SSean Christopherson }
888cb6a32c2SSean Christopherson 
8892fcf4876SMaxim Levitsky int svm_allocate_nested(struct vcpu_svm *svm)
8902fcf4876SMaxim Levitsky {
8914995a368SCathy Avery 	struct page *vmcb02_page;
8922fcf4876SMaxim Levitsky 
8932fcf4876SMaxim Levitsky 	if (svm->nested.initialized)
8942fcf4876SMaxim Levitsky 		return 0;
8952fcf4876SMaxim Levitsky 
8964995a368SCathy Avery 	vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
8974995a368SCathy Avery 	if (!vmcb02_page)
8982fcf4876SMaxim Levitsky 		return -ENOMEM;
8994995a368SCathy Avery 	svm->nested.vmcb02.ptr = page_address(vmcb02_page);
9004995a368SCathy Avery 	svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
9012fcf4876SMaxim Levitsky 
9022fcf4876SMaxim Levitsky 	svm->nested.msrpm = svm_vcpu_alloc_msrpm();
9032fcf4876SMaxim Levitsky 	if (!svm->nested.msrpm)
9044995a368SCathy Avery 		goto err_free_vmcb02;
9052fcf4876SMaxim Levitsky 	svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
9062fcf4876SMaxim Levitsky 
9072fcf4876SMaxim Levitsky 	svm->nested.initialized = true;
9082fcf4876SMaxim Levitsky 	return 0;
9092fcf4876SMaxim Levitsky 
9104995a368SCathy Avery err_free_vmcb02:
9114995a368SCathy Avery 	__free_page(vmcb02_page);
9122fcf4876SMaxim Levitsky 	return -ENOMEM;
9132fcf4876SMaxim Levitsky }
9142fcf4876SMaxim Levitsky 
9152fcf4876SMaxim Levitsky void svm_free_nested(struct vcpu_svm *svm)
9162fcf4876SMaxim Levitsky {
9172fcf4876SMaxim Levitsky 	if (!svm->nested.initialized)
9182fcf4876SMaxim Levitsky 		return;
9192fcf4876SMaxim Levitsky 
9202fcf4876SMaxim Levitsky 	svm_vcpu_free_msrpm(svm->nested.msrpm);
9212fcf4876SMaxim Levitsky 	svm->nested.msrpm = NULL;
9222fcf4876SMaxim Levitsky 
9234995a368SCathy Avery 	__free_page(virt_to_page(svm->nested.vmcb02.ptr));
9244995a368SCathy Avery 	svm->nested.vmcb02.ptr = NULL;
9252fcf4876SMaxim Levitsky 
926c74ad08fSMaxim Levitsky 	/*
927c74ad08fSMaxim Levitsky 	 * When last_vmcb12_gpa matches the current vmcb12 gpa,
928c74ad08fSMaxim Levitsky 	 * some vmcb12 fields are not loaded if they are marked clean
929c74ad08fSMaxim Levitsky 	 * in the vmcb12, since in this case they are up to date already.
930c74ad08fSMaxim Levitsky 	 *
931c74ad08fSMaxim Levitsky 	 * When the vmcb02 is freed, this optimization becomes invalid.
932c74ad08fSMaxim Levitsky 	 */
933c74ad08fSMaxim Levitsky 	svm->nested.last_vmcb12_gpa = INVALID_GPA;
934c74ad08fSMaxim Levitsky 
9352fcf4876SMaxim Levitsky 	svm->nested.initialized = false;
9362fcf4876SMaxim Levitsky }
9372fcf4876SMaxim Levitsky 
938c513f484SPaolo Bonzini /*
939c513f484SPaolo Bonzini  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
940c513f484SPaolo Bonzini  */
941c513f484SPaolo Bonzini void svm_leave_nested(struct vcpu_svm *svm)
942c513f484SPaolo Bonzini {
94363129754SPaolo Bonzini 	struct kvm_vcpu *vcpu = &svm->vcpu;
94463129754SPaolo Bonzini 
94563129754SPaolo Bonzini 	if (is_guest_mode(vcpu)) {
946c513f484SPaolo Bonzini 		svm->nested.nested_run_pending = 0;
947c74ad08fSMaxim Levitsky 		svm->nested.vmcb12_gpa = INVALID_GPA;
948c74ad08fSMaxim Levitsky 
94963129754SPaolo Bonzini 		leave_guest_mode(vcpu);
9504995a368SCathy Avery 
951deee59baSMaxim Levitsky 		svm_switch_vmcb(svm, &svm->vmcb01);
9524995a368SCathy Avery 
95363129754SPaolo Bonzini 		nested_svm_uninit_mmu_context(vcpu);
95456fe28deSMaxim Levitsky 		vmcb_mark_all_dirty(svm->vmcb);
955c513f484SPaolo Bonzini 	}
956a7d5c7ceSPaolo Bonzini 
95763129754SPaolo Bonzini 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
958c513f484SPaolo Bonzini }
959c513f484SPaolo Bonzini 
960883b0a91SJoerg Roedel static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
961883b0a91SJoerg Roedel {
962883b0a91SJoerg Roedel 	u32 offset, msr, value;
963883b0a91SJoerg Roedel 	int write, mask;
964883b0a91SJoerg Roedel 
965c62e2e94SBabu Moger 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
966883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
967883b0a91SJoerg Roedel 
968883b0a91SJoerg Roedel 	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
969883b0a91SJoerg Roedel 	offset = svm_msrpm_offset(msr);
970883b0a91SJoerg Roedel 	write  = svm->vmcb->control.exit_info_1 & 1;
971883b0a91SJoerg Roedel 	mask   = 1 << ((2 * (msr & 0xf)) + write);
972883b0a91SJoerg Roedel 
973883b0a91SJoerg Roedel 	if (offset == MSR_INVALID)
974883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
975883b0a91SJoerg Roedel 
976883b0a91SJoerg Roedel 	/* Offset is in 32 bit units but need in 8 bit units */
977883b0a91SJoerg Roedel 	offset *= 4;
978883b0a91SJoerg Roedel 
979e670bf68SPaolo Bonzini 	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
980883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
981883b0a91SJoerg Roedel 
982883b0a91SJoerg Roedel 	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
983883b0a91SJoerg Roedel }
984883b0a91SJoerg Roedel 
985883b0a91SJoerg Roedel static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
986883b0a91SJoerg Roedel {
987883b0a91SJoerg Roedel 	unsigned port, size, iopm_len;
988883b0a91SJoerg Roedel 	u16 val, mask;
989883b0a91SJoerg Roedel 	u8 start_bit;
990883b0a91SJoerg Roedel 	u64 gpa;
991883b0a91SJoerg Roedel 
992c62e2e94SBabu Moger 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
993883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
994883b0a91SJoerg Roedel 
995883b0a91SJoerg Roedel 	port = svm->vmcb->control.exit_info_1 >> 16;
996883b0a91SJoerg Roedel 	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
997883b0a91SJoerg Roedel 		SVM_IOIO_SIZE_SHIFT;
998e670bf68SPaolo Bonzini 	gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
999883b0a91SJoerg Roedel 	start_bit = port % 8;
1000883b0a91SJoerg Roedel 	iopm_len = (start_bit + size > 8) ? 2 : 1;
1001883b0a91SJoerg Roedel 	mask = (0xf >> (4 - size)) << start_bit;
1002883b0a91SJoerg Roedel 	val = 0;
1003883b0a91SJoerg Roedel 
1004883b0a91SJoerg Roedel 	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1005883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
1006883b0a91SJoerg Roedel 
1007883b0a91SJoerg Roedel 	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1008883b0a91SJoerg Roedel }
1009883b0a91SJoerg Roedel 
1010883b0a91SJoerg Roedel static int nested_svm_intercept(struct vcpu_svm *svm)
1011883b0a91SJoerg Roedel {
1012883b0a91SJoerg Roedel 	u32 exit_code = svm->vmcb->control.exit_code;
1013883b0a91SJoerg Roedel 	int vmexit = NESTED_EXIT_HOST;
1014883b0a91SJoerg Roedel 
1015883b0a91SJoerg Roedel 	switch (exit_code) {
1016883b0a91SJoerg Roedel 	case SVM_EXIT_MSR:
1017883b0a91SJoerg Roedel 		vmexit = nested_svm_exit_handled_msr(svm);
1018883b0a91SJoerg Roedel 		break;
1019883b0a91SJoerg Roedel 	case SVM_EXIT_IOIO:
1020883b0a91SJoerg Roedel 		vmexit = nested_svm_intercept_ioio(svm);
1021883b0a91SJoerg Roedel 		break;
1022883b0a91SJoerg Roedel 	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
102303bfeeb9SBabu Moger 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
1024883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
1025883b0a91SJoerg Roedel 		break;
1026883b0a91SJoerg Roedel 	}
1027883b0a91SJoerg Roedel 	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
102830abaa88SBabu Moger 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
1029883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
1030883b0a91SJoerg Roedel 		break;
1031883b0a91SJoerg Roedel 	}
1032883b0a91SJoerg Roedel 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
10337c86663bSPaolo Bonzini 		/*
10347c86663bSPaolo Bonzini 		 * Host-intercepted exceptions have been checked already in
10357c86663bSPaolo Bonzini 		 * nested_svm_exit_special.  There is nothing to do here,
10367c86663bSPaolo Bonzini 		 * the vmexit is injected by svm_check_nested_events.
10377c86663bSPaolo Bonzini 		 */
1038883b0a91SJoerg Roedel 		vmexit = NESTED_EXIT_DONE;
1039883b0a91SJoerg Roedel 		break;
1040883b0a91SJoerg Roedel 	}
1041883b0a91SJoerg Roedel 	case SVM_EXIT_ERR: {
1042883b0a91SJoerg Roedel 		vmexit = NESTED_EXIT_DONE;
1043883b0a91SJoerg Roedel 		break;
1044883b0a91SJoerg Roedel 	}
1045883b0a91SJoerg Roedel 	default: {
1046c62e2e94SBabu Moger 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
1047883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
1048883b0a91SJoerg Roedel 	}
1049883b0a91SJoerg Roedel 	}
1050883b0a91SJoerg Roedel 
1051883b0a91SJoerg Roedel 	return vmexit;
1052883b0a91SJoerg Roedel }
1053883b0a91SJoerg Roedel 
1054883b0a91SJoerg Roedel int nested_svm_exit_handled(struct vcpu_svm *svm)
1055883b0a91SJoerg Roedel {
1056883b0a91SJoerg Roedel 	int vmexit;
1057883b0a91SJoerg Roedel 
1058883b0a91SJoerg Roedel 	vmexit = nested_svm_intercept(svm);
1059883b0a91SJoerg Roedel 
1060883b0a91SJoerg Roedel 	if (vmexit == NESTED_EXIT_DONE)
1061883b0a91SJoerg Roedel 		nested_svm_vmexit(svm);
1062883b0a91SJoerg Roedel 
1063883b0a91SJoerg Roedel 	return vmexit;
1064883b0a91SJoerg Roedel }
1065883b0a91SJoerg Roedel 
106663129754SPaolo Bonzini int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1067883b0a91SJoerg Roedel {
106863129754SPaolo Bonzini 	if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
106963129754SPaolo Bonzini 		kvm_queue_exception(vcpu, UD_VECTOR);
1070883b0a91SJoerg Roedel 		return 1;
1071883b0a91SJoerg Roedel 	}
1072883b0a91SJoerg Roedel 
107363129754SPaolo Bonzini 	if (to_svm(vcpu)->vmcb->save.cpl) {
107463129754SPaolo Bonzini 		kvm_inject_gp(vcpu, 0);
1075883b0a91SJoerg Roedel 		return 1;
1076883b0a91SJoerg Roedel 	}
1077883b0a91SJoerg Roedel 
1078883b0a91SJoerg Roedel 	return 0;
1079883b0a91SJoerg Roedel }
1080883b0a91SJoerg Roedel 
10817c86663bSPaolo Bonzini static bool nested_exit_on_exception(struct vcpu_svm *svm)
1082883b0a91SJoerg Roedel {
10837c86663bSPaolo Bonzini 	unsigned int nr = svm->vcpu.arch.exception.nr;
1084883b0a91SJoerg Roedel 
10859780d51dSBabu Moger 	return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
10867c86663bSPaolo Bonzini }
1087883b0a91SJoerg Roedel 
10887c86663bSPaolo Bonzini static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
10897c86663bSPaolo Bonzini {
10907c86663bSPaolo Bonzini 	unsigned int nr = svm->vcpu.arch.exception.nr;
1091883b0a91SJoerg Roedel 
1092883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1093883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code_hi = 0;
10947c86663bSPaolo Bonzini 
10957c86663bSPaolo Bonzini 	if (svm->vcpu.arch.exception.has_error_code)
10967c86663bSPaolo Bonzini 		svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
1097883b0a91SJoerg Roedel 
1098883b0a91SJoerg Roedel 	/*
1099883b0a91SJoerg Roedel 	 * EXITINFO2 is undefined for all exception intercepts other
1100883b0a91SJoerg Roedel 	 * than #PF.
1101883b0a91SJoerg Roedel 	 */
11027c86663bSPaolo Bonzini 	if (nr == PF_VECTOR) {
1103883b0a91SJoerg Roedel 		if (svm->vcpu.arch.exception.nested_apf)
1104883b0a91SJoerg Roedel 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
1105883b0a91SJoerg Roedel 		else if (svm->vcpu.arch.exception.has_payload)
1106883b0a91SJoerg Roedel 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
1107883b0a91SJoerg Roedel 		else
1108883b0a91SJoerg Roedel 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
11097c86663bSPaolo Bonzini 	} else if (nr == DB_VECTOR) {
11107c86663bSPaolo Bonzini 		/* See inject_pending_event.  */
11117c86663bSPaolo Bonzini 		kvm_deliver_exception_payload(&svm->vcpu);
11127c86663bSPaolo Bonzini 		if (svm->vcpu.arch.dr7 & DR7_GD) {
11137c86663bSPaolo Bonzini 			svm->vcpu.arch.dr7 &= ~DR7_GD;
11147c86663bSPaolo Bonzini 			kvm_update_dr7(&svm->vcpu);
11157c86663bSPaolo Bonzini 		}
11167c86663bSPaolo Bonzini 	} else
11177c86663bSPaolo Bonzini 		WARN_ON(svm->vcpu.arch.exception.has_payload);
1118883b0a91SJoerg Roedel 
11197c86663bSPaolo Bonzini 	nested_svm_vmexit(svm);
1120883b0a91SJoerg Roedel }
1121883b0a91SJoerg Roedel 
11225b672408SPaolo Bonzini static inline bool nested_exit_on_init(struct vcpu_svm *svm)
11235b672408SPaolo Bonzini {
1124c62e2e94SBabu Moger 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
11255b672408SPaolo Bonzini }
11265b672408SPaolo Bonzini 
112733b22172SPaolo Bonzini static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1128883b0a91SJoerg Roedel {
1129883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
1130883b0a91SJoerg Roedel 	bool block_nested_events =
1131bd279629SPaolo Bonzini 		kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
11325b672408SPaolo Bonzini 	struct kvm_lapic *apic = vcpu->arch.apic;
11335b672408SPaolo Bonzini 
11345b672408SPaolo Bonzini 	if (lapic_in_kernel(vcpu) &&
11355b672408SPaolo Bonzini 	    test_bit(KVM_APIC_INIT, &apic->pending_events)) {
11365b672408SPaolo Bonzini 		if (block_nested_events)
11375b672408SPaolo Bonzini 			return -EBUSY;
11385b672408SPaolo Bonzini 		if (!nested_exit_on_init(svm))
11395b672408SPaolo Bonzini 			return 0;
11403a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
11415b672408SPaolo Bonzini 		return 0;
11425b672408SPaolo Bonzini 	}
1143883b0a91SJoerg Roedel 
11447c86663bSPaolo Bonzini 	if (vcpu->arch.exception.pending) {
11454020da3bSMaxim Levitsky 		/*
11464020da3bSMaxim Levitsky 		 * Only a pending nested run can block a pending exception.
11474020da3bSMaxim Levitsky 		 * Otherwise an injected NMI/interrupt should either be
11484020da3bSMaxim Levitsky 		 * lost or delivered to the nested hypervisor in the EXITINTINFO
11494020da3bSMaxim Levitsky 		 * vmcb field, while delivering the pending exception.
11504020da3bSMaxim Levitsky 		 */
11514020da3bSMaxim Levitsky 		if (svm->nested.nested_run_pending)
11527c86663bSPaolo Bonzini                         return -EBUSY;
11537c86663bSPaolo Bonzini 		if (!nested_exit_on_exception(svm))
11547c86663bSPaolo Bonzini 			return 0;
11557c86663bSPaolo Bonzini 		nested_svm_inject_exception_vmexit(svm);
11567c86663bSPaolo Bonzini 		return 0;
11577c86663bSPaolo Bonzini 	}
11587c86663bSPaolo Bonzini 
1159221e7610SPaolo Bonzini 	if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
116055714cddSPaolo Bonzini 		if (block_nested_events)
116155714cddSPaolo Bonzini 			return -EBUSY;
1162221e7610SPaolo Bonzini 		if (!nested_exit_on_smi(svm))
1163221e7610SPaolo Bonzini 			return 0;
11643a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
116555714cddSPaolo Bonzini 		return 0;
116655714cddSPaolo Bonzini 	}
116755714cddSPaolo Bonzini 
1168221e7610SPaolo Bonzini 	if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
11699c3d370aSCathy Avery 		if (block_nested_events)
11709c3d370aSCathy Avery 			return -EBUSY;
1171221e7610SPaolo Bonzini 		if (!nested_exit_on_nmi(svm))
1172221e7610SPaolo Bonzini 			return 0;
11733a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
11749c3d370aSCathy Avery 		return 0;
11759c3d370aSCathy Avery 	}
11769c3d370aSCathy Avery 
1177221e7610SPaolo Bonzini 	if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1178883b0a91SJoerg Roedel 		if (block_nested_events)
1179883b0a91SJoerg Roedel 			return -EBUSY;
1180221e7610SPaolo Bonzini 		if (!nested_exit_on_intr(svm))
1181221e7610SPaolo Bonzini 			return 0;
11823a87c7e0SSean Christopherson 		trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
11833a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1184883b0a91SJoerg Roedel 		return 0;
1185883b0a91SJoerg Roedel 	}
1186883b0a91SJoerg Roedel 
1187883b0a91SJoerg Roedel 	return 0;
1188883b0a91SJoerg Roedel }
1189883b0a91SJoerg Roedel 
1190883b0a91SJoerg Roedel int nested_svm_exit_special(struct vcpu_svm *svm)
1191883b0a91SJoerg Roedel {
1192883b0a91SJoerg Roedel 	u32 exit_code = svm->vmcb->control.exit_code;
1193883b0a91SJoerg Roedel 
1194883b0a91SJoerg Roedel 	switch (exit_code) {
1195883b0a91SJoerg Roedel 	case SVM_EXIT_INTR:
1196883b0a91SJoerg Roedel 	case SVM_EXIT_NMI:
1197883b0a91SJoerg Roedel 	case SVM_EXIT_NPF:
1198883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
11997c86663bSPaolo Bonzini 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
12007c86663bSPaolo Bonzini 		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
12017c86663bSPaolo Bonzini 
12024995a368SCathy Avery 		if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
12039780d51dSBabu Moger 		    excp_bits)
12047c86663bSPaolo Bonzini 			return NESTED_EXIT_HOST;
12057c86663bSPaolo Bonzini 		else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
120668fd66f1SVitaly Kuznetsov 			 svm->vcpu.arch.apf.host_apf_flags)
1207a3535be7SPaolo Bonzini 			/* Trap async PF even if not shadowing */
1208883b0a91SJoerg Roedel 			return NESTED_EXIT_HOST;
1209883b0a91SJoerg Roedel 		break;
12107c86663bSPaolo Bonzini 	}
1211883b0a91SJoerg Roedel 	default:
1212883b0a91SJoerg Roedel 		break;
1213883b0a91SJoerg Roedel 	}
1214883b0a91SJoerg Roedel 
1215883b0a91SJoerg Roedel 	return NESTED_EXIT_CONTINUE;
1216883b0a91SJoerg Roedel }
121733b22172SPaolo Bonzini 
1218cc440cdaSPaolo Bonzini static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1219cc440cdaSPaolo Bonzini 				struct kvm_nested_state __user *user_kvm_nested_state,
1220cc440cdaSPaolo Bonzini 				u32 user_data_size)
1221cc440cdaSPaolo Bonzini {
1222cc440cdaSPaolo Bonzini 	struct vcpu_svm *svm;
1223cc440cdaSPaolo Bonzini 	struct kvm_nested_state kvm_state = {
1224cc440cdaSPaolo Bonzini 		.flags = 0,
1225cc440cdaSPaolo Bonzini 		.format = KVM_STATE_NESTED_FORMAT_SVM,
1226cc440cdaSPaolo Bonzini 		.size = sizeof(kvm_state),
1227cc440cdaSPaolo Bonzini 	};
1228cc440cdaSPaolo Bonzini 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1229cc440cdaSPaolo Bonzini 		&user_kvm_nested_state->data.svm[0];
1230cc440cdaSPaolo Bonzini 
1231cc440cdaSPaolo Bonzini 	if (!vcpu)
1232cc440cdaSPaolo Bonzini 		return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1233cc440cdaSPaolo Bonzini 
1234cc440cdaSPaolo Bonzini 	svm = to_svm(vcpu);
1235cc440cdaSPaolo Bonzini 
1236cc440cdaSPaolo Bonzini 	if (user_data_size < kvm_state.size)
1237cc440cdaSPaolo Bonzini 		goto out;
1238cc440cdaSPaolo Bonzini 
1239cc440cdaSPaolo Bonzini 	/* First fill in the header and copy it out.  */
1240cc440cdaSPaolo Bonzini 	if (is_guest_mode(vcpu)) {
12410dd16b5bSMaxim Levitsky 		kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1242cc440cdaSPaolo Bonzini 		kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1243cc440cdaSPaolo Bonzini 		kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1244cc440cdaSPaolo Bonzini 
1245cc440cdaSPaolo Bonzini 		if (svm->nested.nested_run_pending)
1246cc440cdaSPaolo Bonzini 			kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1247cc440cdaSPaolo Bonzini 	}
1248cc440cdaSPaolo Bonzini 
1249cc440cdaSPaolo Bonzini 	if (gif_set(svm))
1250cc440cdaSPaolo Bonzini 		kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1251cc440cdaSPaolo Bonzini 
1252cc440cdaSPaolo Bonzini 	if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1253cc440cdaSPaolo Bonzini 		return -EFAULT;
1254cc440cdaSPaolo Bonzini 
1255cc440cdaSPaolo Bonzini 	if (!is_guest_mode(vcpu))
1256cc440cdaSPaolo Bonzini 		goto out;
1257cc440cdaSPaolo Bonzini 
1258cc440cdaSPaolo Bonzini 	/*
1259cc440cdaSPaolo Bonzini 	 * Copy over the full size of the VMCB rather than just the size
1260cc440cdaSPaolo Bonzini 	 * of the structs.
1261cc440cdaSPaolo Bonzini 	 */
1262cc440cdaSPaolo Bonzini 	if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1263cc440cdaSPaolo Bonzini 		return -EFAULT;
1264cc440cdaSPaolo Bonzini 	if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1265cc440cdaSPaolo Bonzini 			 sizeof(user_vmcb->control)))
1266cc440cdaSPaolo Bonzini 		return -EFAULT;
12674995a368SCathy Avery 	if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1268cc440cdaSPaolo Bonzini 			 sizeof(user_vmcb->save)))
1269cc440cdaSPaolo Bonzini 		return -EFAULT;
1270cc440cdaSPaolo Bonzini out:
1271cc440cdaSPaolo Bonzini 	return kvm_state.size;
1272cc440cdaSPaolo Bonzini }
1273cc440cdaSPaolo Bonzini 
1274cc440cdaSPaolo Bonzini static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1275cc440cdaSPaolo Bonzini 				struct kvm_nested_state __user *user_kvm_nested_state,
1276cc440cdaSPaolo Bonzini 				struct kvm_nested_state *kvm_state)
1277cc440cdaSPaolo Bonzini {
1278cc440cdaSPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
1279cc440cdaSPaolo Bonzini 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1280cc440cdaSPaolo Bonzini 		&user_kvm_nested_state->data.svm[0];
12816ccbd29aSJoerg Roedel 	struct vmcb_control_area *ctl;
12826ccbd29aSJoerg Roedel 	struct vmcb_save_area *save;
1283dbc4739bSSean Christopherson 	unsigned long cr0;
12846ccbd29aSJoerg Roedel 	int ret;
1285cc440cdaSPaolo Bonzini 
12866ccbd29aSJoerg Roedel 	BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
12876ccbd29aSJoerg Roedel 		     KVM_STATE_NESTED_SVM_VMCB_SIZE);
12886ccbd29aSJoerg Roedel 
1289cc440cdaSPaolo Bonzini 	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1290cc440cdaSPaolo Bonzini 		return -EINVAL;
1291cc440cdaSPaolo Bonzini 
1292cc440cdaSPaolo Bonzini 	if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1293cc440cdaSPaolo Bonzini 				 KVM_STATE_NESTED_RUN_PENDING |
1294cc440cdaSPaolo Bonzini 				 KVM_STATE_NESTED_GIF_SET))
1295cc440cdaSPaolo Bonzini 		return -EINVAL;
1296cc440cdaSPaolo Bonzini 
1297cc440cdaSPaolo Bonzini 	/*
1298cc440cdaSPaolo Bonzini 	 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1299cc440cdaSPaolo Bonzini 	 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1300cc440cdaSPaolo Bonzini 	 */
1301cc440cdaSPaolo Bonzini 	if (!(vcpu->arch.efer & EFER_SVME)) {
1302cc440cdaSPaolo Bonzini 		/* GIF=1 and no guest mode are required if SVME=0.  */
1303cc440cdaSPaolo Bonzini 		if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1304cc440cdaSPaolo Bonzini 			return -EINVAL;
1305cc440cdaSPaolo Bonzini 	}
1306cc440cdaSPaolo Bonzini 
1307cc440cdaSPaolo Bonzini 	/* SMM temporarily disables SVM, so we cannot be in guest mode.  */
1308cc440cdaSPaolo Bonzini 	if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1309cc440cdaSPaolo Bonzini 		return -EINVAL;
1310cc440cdaSPaolo Bonzini 
1311cc440cdaSPaolo Bonzini 	if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1312cc440cdaSPaolo Bonzini 		svm_leave_nested(svm);
1313d5cd6f34SVitaly Kuznetsov 		svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1314d5cd6f34SVitaly Kuznetsov 		return 0;
1315cc440cdaSPaolo Bonzini 	}
1316cc440cdaSPaolo Bonzini 
1317cc440cdaSPaolo Bonzini 	if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1318cc440cdaSPaolo Bonzini 		return -EINVAL;
1319cc440cdaSPaolo Bonzini 	if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1320cc440cdaSPaolo Bonzini 		return -EINVAL;
1321cc440cdaSPaolo Bonzini 
13226ccbd29aSJoerg Roedel 	ret  = -ENOMEM;
1323eba04b20SSean Christopherson 	ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL_ACCOUNT);
1324eba04b20SSean Christopherson 	save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
13256ccbd29aSJoerg Roedel 	if (!ctl || !save)
13266ccbd29aSJoerg Roedel 		goto out_free;
13276ccbd29aSJoerg Roedel 
13286ccbd29aSJoerg Roedel 	ret = -EFAULT;
13296ccbd29aSJoerg Roedel 	if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
13306ccbd29aSJoerg Roedel 		goto out_free;
13316ccbd29aSJoerg Roedel 	if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
13326ccbd29aSJoerg Roedel 		goto out_free;
13336ccbd29aSJoerg Roedel 
13346ccbd29aSJoerg Roedel 	ret = -EINVAL;
1335ee695f22SKrish Sadhukhan 	if (!nested_vmcb_check_controls(vcpu, ctl))
13366ccbd29aSJoerg Roedel 		goto out_free;
1337cc440cdaSPaolo Bonzini 
1338cc440cdaSPaolo Bonzini 	/*
1339cc440cdaSPaolo Bonzini 	 * Processor state contains L2 state.  Check that it is
1340cb9b6a1bSPaolo Bonzini 	 * valid for guest mode (see nested_vmcb_check_save).
1341cc440cdaSPaolo Bonzini 	 */
1342cc440cdaSPaolo Bonzini 	cr0 = kvm_read_cr0(vcpu);
1343cc440cdaSPaolo Bonzini         if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
13446ccbd29aSJoerg Roedel 		goto out_free;
1345cc440cdaSPaolo Bonzini 
1346cc440cdaSPaolo Bonzini 	/*
1347cc440cdaSPaolo Bonzini 	 * Validate host state saved from before VMRUN (see
1348cc440cdaSPaolo Bonzini 	 * nested_svm_check_permissions).
1349cc440cdaSPaolo Bonzini 	 */
13506906e06dSKrish Sadhukhan 	if (!(save->cr0 & X86_CR0_PG) ||
13516906e06dSKrish Sadhukhan 	    !(save->cr0 & X86_CR0_PE) ||
13526906e06dSKrish Sadhukhan 	    (save->rflags & X86_EFLAGS_VM) ||
135363129754SPaolo Bonzini 	    !nested_vmcb_valid_sregs(vcpu, save))
13546ccbd29aSJoerg Roedel 		goto out_free;
1355cc440cdaSPaolo Bonzini 
1356cc440cdaSPaolo Bonzini 	/*
1357b222b0b8SMaxim Levitsky 	 * While the nested guest CR3 is already checked and set by
1358b222b0b8SMaxim Levitsky 	 * KVM_SET_SREGS, it was set when nested state was yet loaded,
1359b222b0b8SMaxim Levitsky 	 * thus MMU might not be initialized correctly.
1360b222b0b8SMaxim Levitsky 	 * Set it again to fix this.
1361b222b0b8SMaxim Levitsky 	 */
1362b222b0b8SMaxim Levitsky 
1363b222b0b8SMaxim Levitsky 	ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1364b222b0b8SMaxim Levitsky 				  nested_npt_enabled(svm), false);
1365b222b0b8SMaxim Levitsky 	if (WARN_ON_ONCE(ret))
1366b222b0b8SMaxim Levitsky 		goto out_free;
1367b222b0b8SMaxim Levitsky 
1368b222b0b8SMaxim Levitsky 
1369b222b0b8SMaxim Levitsky 	/*
13704995a368SCathy Avery 	 * All checks done, we can enter guest mode. Userspace provides
13714995a368SCathy Avery 	 * vmcb12.control, which will be combined with L1 and stored into
13724995a368SCathy Avery 	 * vmcb02, and the L1 save state which we store in vmcb01.
13734995a368SCathy Avery 	 * L2 registers if needed are moved from the current VMCB to VMCB02.
1374cc440cdaSPaolo Bonzini 	 */
137581f76adaSMaxim Levitsky 
13769d290e16SMaxim Levitsky 	if (is_guest_mode(vcpu))
13779d290e16SMaxim Levitsky 		svm_leave_nested(svm);
13789d290e16SMaxim Levitsky 	else
13799d290e16SMaxim Levitsky 		svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
13809d290e16SMaxim Levitsky 
1381063ab16cSMaxim Levitsky 	svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1382063ab16cSMaxim Levitsky 
138381f76adaSMaxim Levitsky 	svm->nested.nested_run_pending =
138481f76adaSMaxim Levitsky 		!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
138581f76adaSMaxim Levitsky 
13860dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1387c08f390aSPaolo Bonzini 
1388*0a758290SVitaly Kuznetsov 	svm_copy_vmrun_state(save, &svm->vmcb01.ptr->save);
13899e8f0fbfSPaolo Bonzini 	nested_load_control_from_vmcb12(svm, ctl);
13904995a368SCathy Avery 
13914995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->nested.vmcb02);
13929e8f0fbfSPaolo Bonzini 	nested_vmcb02_prepare_control(svm);
1393a7d5c7ceSPaolo Bonzini 	kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
13946ccbd29aSJoerg Roedel 	ret = 0;
13956ccbd29aSJoerg Roedel out_free:
13966ccbd29aSJoerg Roedel 	kfree(save);
13976ccbd29aSJoerg Roedel 	kfree(ctl);
13986ccbd29aSJoerg Roedel 
13996ccbd29aSJoerg Roedel 	return ret;
1400cc440cdaSPaolo Bonzini }
1401cc440cdaSPaolo Bonzini 
1402232f75d3SMaxim Levitsky static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1403232f75d3SMaxim Levitsky {
1404232f75d3SMaxim Levitsky 	struct vcpu_svm *svm = to_svm(vcpu);
1405232f75d3SMaxim Levitsky 
1406232f75d3SMaxim Levitsky 	if (WARN_ON(!is_guest_mode(vcpu)))
1407232f75d3SMaxim Levitsky 		return true;
1408232f75d3SMaxim Levitsky 
1409158a48ecSMaxim Levitsky 	if (!vcpu->arch.pdptrs_from_userspace &&
1410158a48ecSMaxim Levitsky 	    !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1411b222b0b8SMaxim Levitsky 		/*
1412b222b0b8SMaxim Levitsky 		 * Reload the guest's PDPTRs since after a migration
1413b222b0b8SMaxim Levitsky 		 * the guest CR3 might be restored prior to setting the nested
1414b222b0b8SMaxim Levitsky 		 * state which can lead to a load of wrong PDPTRs.
1415b222b0b8SMaxim Levitsky 		 */
1416b222b0b8SMaxim Levitsky 		if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
1417232f75d3SMaxim Levitsky 			return false;
1418232f75d3SMaxim Levitsky 
1419232f75d3SMaxim Levitsky 	if (!nested_svm_vmrun_msrpm(svm)) {
1420232f75d3SMaxim Levitsky 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1421232f75d3SMaxim Levitsky 		vcpu->run->internal.suberror =
1422232f75d3SMaxim Levitsky 			KVM_INTERNAL_ERROR_EMULATION;
1423232f75d3SMaxim Levitsky 		vcpu->run->internal.ndata = 0;
1424232f75d3SMaxim Levitsky 		return false;
1425232f75d3SMaxim Levitsky 	}
1426232f75d3SMaxim Levitsky 
1427232f75d3SMaxim Levitsky 	return true;
1428232f75d3SMaxim Levitsky }
1429232f75d3SMaxim Levitsky 
143033b22172SPaolo Bonzini struct kvm_x86_nested_ops svm_nested_ops = {
143133b22172SPaolo Bonzini 	.check_events = svm_check_nested_events,
1432cb6a32c2SSean Christopherson 	.triple_fault = nested_svm_triple_fault,
1433a7d5c7ceSPaolo Bonzini 	.get_nested_state_pages = svm_get_nested_state_pages,
1434cc440cdaSPaolo Bonzini 	.get_state = svm_get_nested_state,
1435cc440cdaSPaolo Bonzini 	.set_state = svm_set_nested_state,
143633b22172SPaolo Bonzini };
1437