xref: /linux/arch/x86/kvm/svm/nested.c (revision ee695f22b54a4b79753fdaa78cbbff1064050d13)
1883b0a91SJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only
2883b0a91SJoerg Roedel /*
3883b0a91SJoerg Roedel  * Kernel-based Virtual Machine driver for Linux
4883b0a91SJoerg Roedel  *
5883b0a91SJoerg Roedel  * AMD SVM support
6883b0a91SJoerg Roedel  *
7883b0a91SJoerg Roedel  * Copyright (C) 2006 Qumranet, Inc.
8883b0a91SJoerg Roedel  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9883b0a91SJoerg Roedel  *
10883b0a91SJoerg Roedel  * Authors:
11883b0a91SJoerg Roedel  *   Yaniv Kamay  <yaniv@qumranet.com>
12883b0a91SJoerg Roedel  *   Avi Kivity   <avi@qumranet.com>
13883b0a91SJoerg Roedel  */
14883b0a91SJoerg Roedel 
15883b0a91SJoerg Roedel #define pr_fmt(fmt) "SVM: " fmt
16883b0a91SJoerg Roedel 
17883b0a91SJoerg Roedel #include <linux/kvm_types.h>
18883b0a91SJoerg Roedel #include <linux/kvm_host.h>
19883b0a91SJoerg Roedel #include <linux/kernel.h>
20883b0a91SJoerg Roedel 
21883b0a91SJoerg Roedel #include <asm/msr-index.h>
225679b803SPaolo Bonzini #include <asm/debugreg.h>
23883b0a91SJoerg Roedel 
24883b0a91SJoerg Roedel #include "kvm_emulate.h"
25883b0a91SJoerg Roedel #include "trace.h"
26883b0a91SJoerg Roedel #include "mmu.h"
27883b0a91SJoerg Roedel #include "x86.h"
28cc440cdaSPaolo Bonzini #include "cpuid.h"
295b672408SPaolo Bonzini #include "lapic.h"
30883b0a91SJoerg Roedel #include "svm.h"
31883b0a91SJoerg Roedel 
3211f0cbf0SSean Christopherson #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
3311f0cbf0SSean Christopherson 
34883b0a91SJoerg Roedel static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
35883b0a91SJoerg Roedel 				       struct x86_exception *fault)
36883b0a91SJoerg Roedel {
37883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
38883b0a91SJoerg Roedel 
39883b0a91SJoerg Roedel 	if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
40883b0a91SJoerg Roedel 		/*
41883b0a91SJoerg Roedel 		 * TODO: track the cause of the nested page fault, and
42883b0a91SJoerg Roedel 		 * correctly fill in the high bits of exit_info_1.
43883b0a91SJoerg Roedel 		 */
44883b0a91SJoerg Roedel 		svm->vmcb->control.exit_code = SVM_EXIT_NPF;
45883b0a91SJoerg Roedel 		svm->vmcb->control.exit_code_hi = 0;
46883b0a91SJoerg Roedel 		svm->vmcb->control.exit_info_1 = (1ULL << 32);
47883b0a91SJoerg Roedel 		svm->vmcb->control.exit_info_2 = fault->address;
48883b0a91SJoerg Roedel 	}
49883b0a91SJoerg Roedel 
50883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
51883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_1 |= fault->error_code;
52883b0a91SJoerg Roedel 
53883b0a91SJoerg Roedel 	nested_svm_vmexit(svm);
54883b0a91SJoerg Roedel }
55883b0a91SJoerg Roedel 
56a04aead1SPaolo Bonzini static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
57a04aead1SPaolo Bonzini {
58a04aead1SPaolo Bonzini        struct vcpu_svm *svm = to_svm(vcpu);
59a04aead1SPaolo Bonzini        WARN_ON(!is_guest_mode(vcpu));
60a04aead1SPaolo Bonzini 
61a04aead1SPaolo Bonzini        if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
62a04aead1SPaolo Bonzini 	   !svm->nested.nested_run_pending) {
63a04aead1SPaolo Bonzini                svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
64a04aead1SPaolo Bonzini                svm->vmcb->control.exit_code_hi = 0;
65a04aead1SPaolo Bonzini                svm->vmcb->control.exit_info_1 = fault->error_code;
66a04aead1SPaolo Bonzini                svm->vmcb->control.exit_info_2 = fault->address;
67a04aead1SPaolo Bonzini                nested_svm_vmexit(svm);
68a04aead1SPaolo Bonzini        } else {
69a04aead1SPaolo Bonzini                kvm_inject_page_fault(vcpu, fault);
70a04aead1SPaolo Bonzini        }
71a04aead1SPaolo Bonzini }
72a04aead1SPaolo Bonzini 
73883b0a91SJoerg Roedel static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
74883b0a91SJoerg Roedel {
75883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
76e670bf68SPaolo Bonzini 	u64 cr3 = svm->nested.ctl.nested_cr3;
77883b0a91SJoerg Roedel 	u64 pdpte;
78883b0a91SJoerg Roedel 	int ret;
79883b0a91SJoerg Roedel 
802732be90SSean Christopherson 	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
81883b0a91SJoerg Roedel 				       offset_in_page(cr3) + index * 8, 8);
82883b0a91SJoerg Roedel 	if (ret)
83883b0a91SJoerg Roedel 		return 0;
84883b0a91SJoerg Roedel 	return pdpte;
85883b0a91SJoerg Roedel }
86883b0a91SJoerg Roedel 
87883b0a91SJoerg Roedel static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
88883b0a91SJoerg Roedel {
89883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
90883b0a91SJoerg Roedel 
91e670bf68SPaolo Bonzini 	return svm->nested.ctl.nested_cr3;
92883b0a91SJoerg Roedel }
93883b0a91SJoerg Roedel 
94883b0a91SJoerg Roedel static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
95883b0a91SJoerg Roedel {
96929d1cfaSPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
97929d1cfaSPaolo Bonzini 
98883b0a91SJoerg Roedel 	WARN_ON(mmu_is_nested(vcpu));
99883b0a91SJoerg Roedel 
100883b0a91SJoerg Roedel 	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
1014995a368SCathy Avery 	kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
1024995a368SCathy Avery 				svm->vmcb01.ptr->save.efer,
1030f04a2acSVitaly Kuznetsov 				svm->nested.ctl.nested_cr3);
104883b0a91SJoerg Roedel 	vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
105883b0a91SJoerg Roedel 	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
106883b0a91SJoerg Roedel 	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
107883b0a91SJoerg Roedel 	reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
108883b0a91SJoerg Roedel 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
109883b0a91SJoerg Roedel }
110883b0a91SJoerg Roedel 
111883b0a91SJoerg Roedel static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
112883b0a91SJoerg Roedel {
113883b0a91SJoerg Roedel 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
114883b0a91SJoerg Roedel 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
115883b0a91SJoerg Roedel }
116883b0a91SJoerg Roedel 
117883b0a91SJoerg Roedel void recalc_intercepts(struct vcpu_svm *svm)
118883b0a91SJoerg Roedel {
119e670bf68SPaolo Bonzini 	struct vmcb_control_area *c, *h, *g;
120c45ad722SBabu Moger 	unsigned int i;
121883b0a91SJoerg Roedel 
12206e7852cSJoerg Roedel 	vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
123883b0a91SJoerg Roedel 
124883b0a91SJoerg Roedel 	if (!is_guest_mode(&svm->vcpu))
125883b0a91SJoerg Roedel 		return;
126883b0a91SJoerg Roedel 
127883b0a91SJoerg Roedel 	c = &svm->vmcb->control;
1284995a368SCathy Avery 	h = &svm->vmcb01.ptr->control;
129e670bf68SPaolo Bonzini 	g = &svm->nested.ctl;
130883b0a91SJoerg Roedel 
131c45ad722SBabu Moger 	for (i = 0; i < MAX_INTERCEPT; i++)
132c45ad722SBabu Moger 		c->intercepts[i] = h->intercepts[i];
133c45ad722SBabu Moger 
134e9fd761aSPaolo Bonzini 	if (g->int_ctl & V_INTR_MASKING_MASK) {
135883b0a91SJoerg Roedel 		/* We only want the cr8 intercept bits of L1 */
13603bfeeb9SBabu Moger 		vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
13703bfeeb9SBabu Moger 		vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
138883b0a91SJoerg Roedel 
139883b0a91SJoerg Roedel 		/*
140883b0a91SJoerg Roedel 		 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
141883b0a91SJoerg Roedel 		 * affect any interrupt we may want to inject; therefore,
142883b0a91SJoerg Roedel 		 * interrupt window vmexits are irrelevant to L0.
143883b0a91SJoerg Roedel 		 */
144c62e2e94SBabu Moger 		vmcb_clr_intercept(c, INTERCEPT_VINTR);
145883b0a91SJoerg Roedel 	}
146883b0a91SJoerg Roedel 
147883b0a91SJoerg Roedel 	/* We don't want to see VMMCALLs from a nested guest */
148c62e2e94SBabu Moger 	vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
149883b0a91SJoerg Roedel 
150c45ad722SBabu Moger 	for (i = 0; i < MAX_INTERCEPT; i++)
151c45ad722SBabu Moger 		c->intercepts[i] |= g->intercepts[i];
152883b0a91SJoerg Roedel }
153883b0a91SJoerg Roedel 
1542f675917SPaolo Bonzini static void copy_vmcb_control_area(struct vmcb_control_area *dst,
1552f675917SPaolo Bonzini 				   struct vmcb_control_area *from)
156883b0a91SJoerg Roedel {
157c45ad722SBabu Moger 	unsigned int i;
158c45ad722SBabu Moger 
159c45ad722SBabu Moger 	for (i = 0; i < MAX_INTERCEPT; i++)
160c45ad722SBabu Moger 		dst->intercepts[i] = from->intercepts[i];
161c45ad722SBabu Moger 
162883b0a91SJoerg Roedel 	dst->iopm_base_pa         = from->iopm_base_pa;
163883b0a91SJoerg Roedel 	dst->msrpm_base_pa        = from->msrpm_base_pa;
164883b0a91SJoerg Roedel 	dst->tsc_offset           = from->tsc_offset;
1656c0238c4SPaolo Bonzini 	/* asid not copied, it is handled manually for svm->vmcb.  */
166883b0a91SJoerg Roedel 	dst->tlb_ctl              = from->tlb_ctl;
167883b0a91SJoerg Roedel 	dst->int_ctl              = from->int_ctl;
168883b0a91SJoerg Roedel 	dst->int_vector           = from->int_vector;
169883b0a91SJoerg Roedel 	dst->int_state            = from->int_state;
170883b0a91SJoerg Roedel 	dst->exit_code            = from->exit_code;
171883b0a91SJoerg Roedel 	dst->exit_code_hi         = from->exit_code_hi;
172883b0a91SJoerg Roedel 	dst->exit_info_1          = from->exit_info_1;
173883b0a91SJoerg Roedel 	dst->exit_info_2          = from->exit_info_2;
174883b0a91SJoerg Roedel 	dst->exit_int_info        = from->exit_int_info;
175883b0a91SJoerg Roedel 	dst->exit_int_info_err    = from->exit_int_info_err;
176883b0a91SJoerg Roedel 	dst->nested_ctl           = from->nested_ctl;
177883b0a91SJoerg Roedel 	dst->event_inj            = from->event_inj;
178883b0a91SJoerg Roedel 	dst->event_inj_err        = from->event_inj_err;
179883b0a91SJoerg Roedel 	dst->nested_cr3           = from->nested_cr3;
180883b0a91SJoerg Roedel 	dst->virt_ext              = from->virt_ext;
181883b0a91SJoerg Roedel 	dst->pause_filter_count   = from->pause_filter_count;
182883b0a91SJoerg Roedel 	dst->pause_filter_thresh  = from->pause_filter_thresh;
183883b0a91SJoerg Roedel }
184883b0a91SJoerg Roedel 
185883b0a91SJoerg Roedel static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
186883b0a91SJoerg Roedel {
187883b0a91SJoerg Roedel 	/*
188883b0a91SJoerg Roedel 	 * This function merges the msr permission bitmaps of kvm and the
189883b0a91SJoerg Roedel 	 * nested vmcb. It is optimized in that it only merges the parts where
190883b0a91SJoerg Roedel 	 * the kvm msr permission bitmap may contain zero bits
191883b0a91SJoerg Roedel 	 */
192883b0a91SJoerg Roedel 	int i;
193883b0a91SJoerg Roedel 
194c62e2e94SBabu Moger 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
195883b0a91SJoerg Roedel 		return true;
196883b0a91SJoerg Roedel 
197883b0a91SJoerg Roedel 	for (i = 0; i < MSRPM_OFFSETS; i++) {
198883b0a91SJoerg Roedel 		u32 value, p;
199883b0a91SJoerg Roedel 		u64 offset;
200883b0a91SJoerg Roedel 
201883b0a91SJoerg Roedel 		if (msrpm_offsets[i] == 0xffffffff)
202883b0a91SJoerg Roedel 			break;
203883b0a91SJoerg Roedel 
204883b0a91SJoerg Roedel 		p      = msrpm_offsets[i];
205e670bf68SPaolo Bonzini 		offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
206883b0a91SJoerg Roedel 
207883b0a91SJoerg Roedel 		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
208883b0a91SJoerg Roedel 			return false;
209883b0a91SJoerg Roedel 
210883b0a91SJoerg Roedel 		svm->nested.msrpm[p] = svm->msrpm[p] | value;
211883b0a91SJoerg Roedel 	}
212883b0a91SJoerg Roedel 
213883b0a91SJoerg Roedel 	svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
214883b0a91SJoerg Roedel 
215883b0a91SJoerg Roedel 	return true;
216883b0a91SJoerg Roedel }
217883b0a91SJoerg Roedel 
218*ee695f22SKrish Sadhukhan /*
219*ee695f22SKrish Sadhukhan  * Bits 11:0 of bitmap address are ignored by hardware
220*ee695f22SKrish Sadhukhan  */
221*ee695f22SKrish Sadhukhan static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
222*ee695f22SKrish Sadhukhan {
223*ee695f22SKrish Sadhukhan 	u64 addr = PAGE_ALIGN(pa);
224*ee695f22SKrish Sadhukhan 
225*ee695f22SKrish Sadhukhan 	return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
226*ee695f22SKrish Sadhukhan 	    kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
227*ee695f22SKrish Sadhukhan }
228*ee695f22SKrish Sadhukhan 
229*ee695f22SKrish Sadhukhan static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
230*ee695f22SKrish Sadhukhan 				       struct vmcb_control_area *control)
231ca46d739SPaolo Bonzini {
23211f0cbf0SSean Christopherson 	if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN)))
233ca46d739SPaolo Bonzini 		return false;
234ca46d739SPaolo Bonzini 
23511f0cbf0SSean Christopherson 	if (CC(control->asid == 0))
236ca46d739SPaolo Bonzini 		return false;
237ca46d739SPaolo Bonzini 
23811f0cbf0SSean Christopherson 	if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
239ca46d739SPaolo Bonzini 		return false;
240ca46d739SPaolo Bonzini 
241*ee695f22SKrish Sadhukhan 	if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
242*ee695f22SKrish Sadhukhan 					   MSRPM_SIZE)))
243*ee695f22SKrish Sadhukhan 		return false;
244*ee695f22SKrish Sadhukhan 	if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
245*ee695f22SKrish Sadhukhan 					   IOPM_SIZE)))
246*ee695f22SKrish Sadhukhan 		return false;
247*ee695f22SKrish Sadhukhan 
248ca46d739SPaolo Bonzini 	return true;
249ca46d739SPaolo Bonzini }
250ca46d739SPaolo Bonzini 
25163129754SPaolo Bonzini static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
2526906e06dSKrish Sadhukhan 				      struct vmcb_save_area *save)
253883b0a91SJoerg Roedel {
2546906e06dSKrish Sadhukhan 	/*
2556906e06dSKrish Sadhukhan 	 * These checks are also performed by KVM_SET_SREGS,
2566906e06dSKrish Sadhukhan 	 * except that EFER.LMA is not checked by SVM against
2576906e06dSKrish Sadhukhan 	 * CR0.PG && EFER.LME.
2586906e06dSKrish Sadhukhan 	 */
2596906e06dSKrish Sadhukhan 	if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
26011f0cbf0SSean Christopherson 		if (CC(!(save->cr4 & X86_CR4_PAE)) ||
26111f0cbf0SSean Christopherson 		    CC(!(save->cr0 & X86_CR0_PE)) ||
26211f0cbf0SSean Christopherson 		    CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
263761e4169SKrish Sadhukhan 			return false;
264761e4169SKrish Sadhukhan 	}
2656906e06dSKrish Sadhukhan 
26611f0cbf0SSean Christopherson 	if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
26711f0cbf0SSean Christopherson 		return false;
26811f0cbf0SSean Christopherson 
26911f0cbf0SSean Christopherson 	return true;
2706906e06dSKrish Sadhukhan }
2716906e06dSKrish Sadhukhan 
2726906e06dSKrish Sadhukhan /* Common checks that apply to both L1 and L2 state.  */
27363129754SPaolo Bonzini static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
2746906e06dSKrish Sadhukhan 				    struct vmcb_save_area *save)
2756906e06dSKrish Sadhukhan {
2763c346c0cSPaolo Bonzini 	/*
2773c346c0cSPaolo Bonzini 	 * FIXME: these should be done after copying the fields,
2783c346c0cSPaolo Bonzini 	 * to avoid TOC/TOU races.  For these save area checks
2793c346c0cSPaolo Bonzini 	 * the possible damage is limited since kvm_set_cr0 and
2803c346c0cSPaolo Bonzini 	 * kvm_set_cr4 handle failure; EFER_SVME is an exception
2813c346c0cSPaolo Bonzini 	 * so it is force-set later in nested_prepare_vmcb_save.
2823c346c0cSPaolo Bonzini 	 */
28311f0cbf0SSean Christopherson 	if (CC(!(save->efer & EFER_SVME)))
2846906e06dSKrish Sadhukhan 		return false;
2856906e06dSKrish Sadhukhan 
28611f0cbf0SSean Christopherson 	if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
28711f0cbf0SSean Christopherson 	    CC(save->cr0 & ~0xffffffffULL))
2886906e06dSKrish Sadhukhan 		return false;
2896906e06dSKrish Sadhukhan 
29011f0cbf0SSean Christopherson 	if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
2916906e06dSKrish Sadhukhan 		return false;
2926906e06dSKrish Sadhukhan 
29363129754SPaolo Bonzini 	if (!nested_vmcb_check_cr3_cr4(vcpu, save))
2946906e06dSKrish Sadhukhan 		return false;
2956906e06dSKrish Sadhukhan 
29663129754SPaolo Bonzini 	if (CC(!kvm_valid_efer(vcpu, save->efer)))
2976906e06dSKrish Sadhukhan 		return false;
2986906e06dSKrish Sadhukhan 
2996906e06dSKrish Sadhukhan 	return true;
3006906e06dSKrish Sadhukhan }
3016906e06dSKrish Sadhukhan 
3029e8f0fbfSPaolo Bonzini static void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
3033e06f016SPaolo Bonzini 					    struct vmcb_control_area *control)
3043e06f016SPaolo Bonzini {
305e670bf68SPaolo Bonzini 	copy_vmcb_control_area(&svm->nested.ctl, control);
3063e06f016SPaolo Bonzini 
307cc440cdaSPaolo Bonzini 	/* Copy it here because nested_svm_check_controls will check it.  */
308cc440cdaSPaolo Bonzini 	svm->nested.ctl.asid           = control->asid;
309e670bf68SPaolo Bonzini 	svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
310e670bf68SPaolo Bonzini 	svm->nested.ctl.iopm_base_pa  &= ~0x0fffULL;
3113e06f016SPaolo Bonzini }
3123e06f016SPaolo Bonzini 
3132d8a42beSPaolo Bonzini /*
3142d8a42beSPaolo Bonzini  * Synchronize fields that are written by the processor, so that
3159e8f0fbfSPaolo Bonzini  * they can be copied back into the vmcb12.
3162d8a42beSPaolo Bonzini  */
3179e8f0fbfSPaolo Bonzini void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
3182d8a42beSPaolo Bonzini {
3192d8a42beSPaolo Bonzini 	u32 mask;
3202d8a42beSPaolo Bonzini 	svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
3212d8a42beSPaolo Bonzini 	svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;
3222d8a42beSPaolo Bonzini 
3232d8a42beSPaolo Bonzini 	/* Only a few fields of int_ctl are written by the processor.  */
3242d8a42beSPaolo Bonzini 	mask = V_IRQ_MASK | V_TPR_MASK;
3252d8a42beSPaolo Bonzini 	if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
326a284ba56SJoerg Roedel 	    svm_is_intercept(svm, INTERCEPT_VINTR)) {
3272d8a42beSPaolo Bonzini 		/*
3282d8a42beSPaolo Bonzini 		 * In order to request an interrupt window, L0 is usurping
3292d8a42beSPaolo Bonzini 		 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
3302d8a42beSPaolo Bonzini 		 * even if it was clear in L1's VMCB.  Restoring it would be
3312d8a42beSPaolo Bonzini 		 * wrong.  However, in this case V_IRQ will remain true until
3322d8a42beSPaolo Bonzini 		 * interrupt_window_interception calls svm_clear_vintr and
3332d8a42beSPaolo Bonzini 		 * restores int_ctl.  We can just leave it aside.
3342d8a42beSPaolo Bonzini 		 */
3352d8a42beSPaolo Bonzini 		mask &= ~V_IRQ_MASK;
3362d8a42beSPaolo Bonzini 	}
3372d8a42beSPaolo Bonzini 	svm->nested.ctl.int_ctl        &= ~mask;
3382d8a42beSPaolo Bonzini 	svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
3392d8a42beSPaolo Bonzini }
3402d8a42beSPaolo Bonzini 
34136e2e983SPaolo Bonzini /*
34236e2e983SPaolo Bonzini  * Transfer any event that L0 or L1 wanted to inject into L2 to
34336e2e983SPaolo Bonzini  * EXIT_INT_INFO.
34436e2e983SPaolo Bonzini  */
3459e8f0fbfSPaolo Bonzini static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
3460dd16b5bSMaxim Levitsky 						struct vmcb *vmcb12)
34736e2e983SPaolo Bonzini {
34836e2e983SPaolo Bonzini 	struct kvm_vcpu *vcpu = &svm->vcpu;
34936e2e983SPaolo Bonzini 	u32 exit_int_info = 0;
35036e2e983SPaolo Bonzini 	unsigned int nr;
35136e2e983SPaolo Bonzini 
35236e2e983SPaolo Bonzini 	if (vcpu->arch.exception.injected) {
35336e2e983SPaolo Bonzini 		nr = vcpu->arch.exception.nr;
35436e2e983SPaolo Bonzini 		exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
35536e2e983SPaolo Bonzini 
35636e2e983SPaolo Bonzini 		if (vcpu->arch.exception.has_error_code) {
35736e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_VALID_ERR;
3580dd16b5bSMaxim Levitsky 			vmcb12->control.exit_int_info_err =
35936e2e983SPaolo Bonzini 				vcpu->arch.exception.error_code;
36036e2e983SPaolo Bonzini 		}
36136e2e983SPaolo Bonzini 
36236e2e983SPaolo Bonzini 	} else if (vcpu->arch.nmi_injected) {
36336e2e983SPaolo Bonzini 		exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
36436e2e983SPaolo Bonzini 
36536e2e983SPaolo Bonzini 	} else if (vcpu->arch.interrupt.injected) {
36636e2e983SPaolo Bonzini 		nr = vcpu->arch.interrupt.nr;
36736e2e983SPaolo Bonzini 		exit_int_info = nr | SVM_EVTINJ_VALID;
36836e2e983SPaolo Bonzini 
36936e2e983SPaolo Bonzini 		if (vcpu->arch.interrupt.soft)
37036e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
37136e2e983SPaolo Bonzini 		else
37236e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_TYPE_INTR;
37336e2e983SPaolo Bonzini 	}
37436e2e983SPaolo Bonzini 
3750dd16b5bSMaxim Levitsky 	vmcb12->control.exit_int_info = exit_int_info;
37636e2e983SPaolo Bonzini }
37736e2e983SPaolo Bonzini 
37862156f6cSVitaly Kuznetsov static inline bool nested_npt_enabled(struct vcpu_svm *svm)
37962156f6cSVitaly Kuznetsov {
38062156f6cSVitaly Kuznetsov 	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
38162156f6cSVitaly Kuznetsov }
38262156f6cSVitaly Kuznetsov 
38362156f6cSVitaly Kuznetsov /*
384d82aaef9SVitaly Kuznetsov  * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
385d82aaef9SVitaly Kuznetsov  * if we are emulating VM-Entry into a guest with NPT enabled.
38662156f6cSVitaly Kuznetsov  */
38762156f6cSVitaly Kuznetsov static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
38862156f6cSVitaly Kuznetsov 			       bool nested_npt)
38962156f6cSVitaly Kuznetsov {
39011f0cbf0SSean Christopherson 	if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
391a506fdd2SVitaly Kuznetsov 		return -EINVAL;
392a506fdd2SVitaly Kuznetsov 
393a506fdd2SVitaly Kuznetsov 	if (!nested_npt && is_pae_paging(vcpu) &&
394a506fdd2SVitaly Kuznetsov 	    (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
39511f0cbf0SSean Christopherson 		if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
396a506fdd2SVitaly Kuznetsov 			return -EINVAL;
397a506fdd2SVitaly Kuznetsov 	}
398a506fdd2SVitaly Kuznetsov 
399a506fdd2SVitaly Kuznetsov 	/*
400a506fdd2SVitaly Kuznetsov 	 * TODO: optimize unconditional TLB flush/MMU sync here and in
401a506fdd2SVitaly Kuznetsov 	 * kvm_init_shadow_npt_mmu().
402a506fdd2SVitaly Kuznetsov 	 */
403a506fdd2SVitaly Kuznetsov 	if (!nested_npt)
404a506fdd2SVitaly Kuznetsov 		kvm_mmu_new_pgd(vcpu, cr3, false, false);
405a506fdd2SVitaly Kuznetsov 
406a506fdd2SVitaly Kuznetsov 	vcpu->arch.cr3 = cr3;
407a506fdd2SVitaly Kuznetsov 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
408a506fdd2SVitaly Kuznetsov 
409a506fdd2SVitaly Kuznetsov 	kvm_init_mmu(vcpu, false);
410a506fdd2SVitaly Kuznetsov 
411a506fdd2SVitaly Kuznetsov 	return 0;
41262156f6cSVitaly Kuznetsov }
41362156f6cSVitaly Kuznetsov 
4144995a368SCathy Avery void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
4154995a368SCathy Avery {
4164995a368SCathy Avery 	if (!svm->nested.vmcb02.ptr)
4174995a368SCathy Avery 		return;
4184995a368SCathy Avery 
4194995a368SCathy Avery 	/* FIXME: merge g_pat from vmcb01 and vmcb12.  */
4204995a368SCathy Avery 	svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
4214995a368SCathy Avery }
4224995a368SCathy Avery 
4239e8f0fbfSPaolo Bonzini static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
424883b0a91SJoerg Roedel {
4258173396eSCathy Avery 	bool new_vmcb12 = false;
4268173396eSCathy Avery 
4274995a368SCathy Avery 	nested_vmcb02_compute_g_pat(svm);
4284995a368SCathy Avery 
429883b0a91SJoerg Roedel 	/* Load the nested guest state */
4308173396eSCathy Avery 
4318173396eSCathy Avery 	if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
4328173396eSCathy Avery 		new_vmcb12 = true;
4338173396eSCathy Avery 		svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
4348173396eSCathy Avery 	}
4358173396eSCathy Avery 
4368173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
4370dd16b5bSMaxim Levitsky 		svm->vmcb->save.es = vmcb12->save.es;
4380dd16b5bSMaxim Levitsky 		svm->vmcb->save.cs = vmcb12->save.cs;
4390dd16b5bSMaxim Levitsky 		svm->vmcb->save.ss = vmcb12->save.ss;
4400dd16b5bSMaxim Levitsky 		svm->vmcb->save.ds = vmcb12->save.ds;
4414bb170a5SPaolo Bonzini 		svm->vmcb->save.cpl = vmcb12->save.cpl;
4424bb170a5SPaolo Bonzini 		vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
4438173396eSCathy Avery 	}
4444bb170a5SPaolo Bonzini 
4458173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
4460dd16b5bSMaxim Levitsky 		svm->vmcb->save.gdtr = vmcb12->save.gdtr;
4470dd16b5bSMaxim Levitsky 		svm->vmcb->save.idtr = vmcb12->save.idtr;
4484bb170a5SPaolo Bonzini 		vmcb_mark_dirty(svm->vmcb, VMCB_DT);
4498173396eSCathy Avery 	}
4504bb170a5SPaolo Bonzini 
4518cce12b3SPaolo Bonzini 	kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
4523c346c0cSPaolo Bonzini 
4533c346c0cSPaolo Bonzini 	/*
4543c346c0cSPaolo Bonzini 	 * Force-set EFER_SVME even though it is checked earlier on the
4553c346c0cSPaolo Bonzini 	 * VMCB12, because the guest can flip the bit between the check
4563c346c0cSPaolo Bonzini 	 * and now.  Clearing EFER_SVME would call svm_free_nested.
4573c346c0cSPaolo Bonzini 	 */
4583c346c0cSPaolo Bonzini 	svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
4593c346c0cSPaolo Bonzini 
4600dd16b5bSMaxim Levitsky 	svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
4610dd16b5bSMaxim Levitsky 	svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
4624bb170a5SPaolo Bonzini 
4634bb170a5SPaolo Bonzini 	svm->vcpu.arch.cr2 = vmcb12->save.cr2;
4648173396eSCathy Avery 
4650dd16b5bSMaxim Levitsky 	kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
4660dd16b5bSMaxim Levitsky 	kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
4670dd16b5bSMaxim Levitsky 	kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
468883b0a91SJoerg Roedel 
469883b0a91SJoerg Roedel 	/* In case we don't even reach vcpu_run, the fields are not updated */
4700dd16b5bSMaxim Levitsky 	svm->vmcb->save.rax = vmcb12->save.rax;
4710dd16b5bSMaxim Levitsky 	svm->vmcb->save.rsp = vmcb12->save.rsp;
4720dd16b5bSMaxim Levitsky 	svm->vmcb->save.rip = vmcb12->save.rip;
4734bb170a5SPaolo Bonzini 
4748173396eSCathy Avery 	/* These bits will be set properly on the first execution when new_vmc12 is true */
4758173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
4768cce12b3SPaolo Bonzini 		svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
4779a3ecd5eSChenyi Qiang 		svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
4784bb170a5SPaolo Bonzini 		vmcb_mark_dirty(svm->vmcb, VMCB_DR);
479f241d711SPaolo Bonzini 	}
4808173396eSCathy Avery }
481883b0a91SJoerg Roedel 
4829e8f0fbfSPaolo Bonzini static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
483f241d711SPaolo Bonzini {
48491b7130cSPaolo Bonzini 	const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
48562156f6cSVitaly Kuznetsov 
4867c3ecfcdSPaolo Bonzini 	/*
4877c3ecfcdSPaolo Bonzini 	 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
4887c3ecfcdSPaolo Bonzini 	 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
4897c3ecfcdSPaolo Bonzini 	 */
4904995a368SCathy Avery 
4917c3ecfcdSPaolo Bonzini 	/*
4927c3ecfcdSPaolo Bonzini 	 * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
4937c3ecfcdSPaolo Bonzini 	 * avic_physical_id.
4947c3ecfcdSPaolo Bonzini 	 */
4957c3ecfcdSPaolo Bonzini 	WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
4967c3ecfcdSPaolo Bonzini 
4977c3ecfcdSPaolo Bonzini 	/* Copied from vmcb01.  msrpm_base can be overwritten later.  */
4987c3ecfcdSPaolo Bonzini 	svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
4997c3ecfcdSPaolo Bonzini 	svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa;
5007c3ecfcdSPaolo Bonzini 	svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa;
5017c3ecfcdSPaolo Bonzini 
5027c3ecfcdSPaolo Bonzini 	/* Done at vmrun: asid.  */
5037c3ecfcdSPaolo Bonzini 
5047c3ecfcdSPaolo Bonzini 	/* Also overwritten later if necessary.  */
5057c3ecfcdSPaolo Bonzini 	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
5067c3ecfcdSPaolo Bonzini 
5077c3ecfcdSPaolo Bonzini 	/* nested_cr3.  */
50862156f6cSVitaly Kuznetsov 	if (nested_npt_enabled(svm))
50969cb8774SPaolo Bonzini 		nested_svm_init_mmu_context(&svm->vcpu);
51069cb8774SPaolo Bonzini 
51118fc6c55SPaolo Bonzini 	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
512e670bf68SPaolo Bonzini 		svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
513883b0a91SJoerg Roedel 
51491b7130cSPaolo Bonzini 	svm->vmcb->control.int_ctl             =
51591b7130cSPaolo Bonzini 		(svm->nested.ctl.int_ctl & ~mask) |
5164995a368SCathy Avery 		(svm->vmcb01.ptr->control.int_ctl & mask);
51791b7130cSPaolo Bonzini 
518e670bf68SPaolo Bonzini 	svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
519e670bf68SPaolo Bonzini 	svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
520e670bf68SPaolo Bonzini 	svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
521e670bf68SPaolo Bonzini 	svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
522e670bf68SPaolo Bonzini 	svm->vmcb->control.event_inj_err       = svm->nested.ctl.event_inj_err;
523883b0a91SJoerg Roedel 
524e670bf68SPaolo Bonzini 	svm->vmcb->control.pause_filter_count  = svm->nested.ctl.pause_filter_count;
525e670bf68SPaolo Bonzini 	svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
526883b0a91SJoerg Roedel 
527883b0a91SJoerg Roedel 	/* Enter Guest-Mode */
528883b0a91SJoerg Roedel 	enter_guest_mode(&svm->vcpu);
529883b0a91SJoerg Roedel 
530883b0a91SJoerg Roedel 	/*
531883b0a91SJoerg Roedel 	 * Merge guest and host intercepts - must be called with vcpu in
5324bb170a5SPaolo Bonzini 	 * guest-mode to take effect.
533883b0a91SJoerg Roedel 	 */
534883b0a91SJoerg Roedel 	recalc_intercepts(svm);
535f241d711SPaolo Bonzini }
536f241d711SPaolo Bonzini 
537d00b99c5SBabu Moger static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
538d00b99c5SBabu Moger {
539d00b99c5SBabu Moger 	/*
540d00b99c5SBabu Moger 	 * Some VMCB state is shared between L1 and L2 and thus has to be
541d00b99c5SBabu Moger 	 * moved at the time of nested vmrun and vmexit.
542d00b99c5SBabu Moger 	 *
543d00b99c5SBabu Moger 	 * VMLOAD/VMSAVE state would also belong in this category, but KVM
544d00b99c5SBabu Moger 	 * always performs VMLOAD and VMSAVE from the VMCB01.
545d00b99c5SBabu Moger 	 */
546d00b99c5SBabu Moger 	to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
547d00b99c5SBabu Moger }
548d00b99c5SBabu Moger 
54963129754SPaolo Bonzini int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
5500dd16b5bSMaxim Levitsky 			 struct vmcb *vmcb12)
551f241d711SPaolo Bonzini {
55263129754SPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
553a506fdd2SVitaly Kuznetsov 	int ret;
554a506fdd2SVitaly Kuznetsov 
555954f419bSMaxim Levitsky 	trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
556954f419bSMaxim Levitsky 			       vmcb12->save.rip,
557954f419bSMaxim Levitsky 			       vmcb12->control.int_ctl,
558954f419bSMaxim Levitsky 			       vmcb12->control.event_inj,
559954f419bSMaxim Levitsky 			       vmcb12->control.nested_ctl);
560954f419bSMaxim Levitsky 
561954f419bSMaxim Levitsky 	trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
562954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
563954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
564954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD3],
565954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD4],
566954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD5]);
567954f419bSMaxim Levitsky 
568954f419bSMaxim Levitsky 
5690dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = vmcb12_gpa;
5704995a368SCathy Avery 
5714995a368SCathy Avery 	WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
5724995a368SCathy Avery 
573d00b99c5SBabu Moger 	nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
5744995a368SCathy Avery 
5754995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->nested.vmcb02);
5769e8f0fbfSPaolo Bonzini 	nested_vmcb02_prepare_control(svm);
5779e8f0fbfSPaolo Bonzini 	nested_vmcb02_prepare_save(svm, vmcb12);
578f241d711SPaolo Bonzini 
5790dd16b5bSMaxim Levitsky 	ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
580a506fdd2SVitaly Kuznetsov 				  nested_npt_enabled(svm));
581a506fdd2SVitaly Kuznetsov 	if (ret)
582a506fdd2SVitaly Kuznetsov 		return ret;
583a506fdd2SVitaly Kuznetsov 
584a04aead1SPaolo Bonzini 	if (!npt_enabled)
58563129754SPaolo Bonzini 		vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
586a04aead1SPaolo Bonzini 
587ffdf7f9eSPaolo Bonzini 	svm_set_gif(svm, true);
58859cd9bc5SVitaly Kuznetsov 
58959cd9bc5SVitaly Kuznetsov 	return 0;
590883b0a91SJoerg Roedel }
591883b0a91SJoerg Roedel 
59263129754SPaolo Bonzini int nested_svm_vmrun(struct kvm_vcpu *vcpu)
593883b0a91SJoerg Roedel {
59463129754SPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
595883b0a91SJoerg Roedel 	int ret;
5960dd16b5bSMaxim Levitsky 	struct vmcb *vmcb12;
597883b0a91SJoerg Roedel 	struct kvm_host_map map;
5980dd16b5bSMaxim Levitsky 	u64 vmcb12_gpa;
599883b0a91SJoerg Roedel 
60063129754SPaolo Bonzini 	++vcpu->stat.nested_run;
60143c11d91SDongli Zhang 
60263129754SPaolo Bonzini 	if (is_smm(vcpu)) {
60363129754SPaolo Bonzini 		kvm_queue_exception(vcpu, UD_VECTOR);
6047c67f546SPaolo Bonzini 		return 1;
6057c67f546SPaolo Bonzini 	}
606883b0a91SJoerg Roedel 
6070dd16b5bSMaxim Levitsky 	vmcb12_gpa = svm->vmcb->save.rax;
60863129754SPaolo Bonzini 	ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
609883b0a91SJoerg Roedel 	if (ret == -EINVAL) {
61063129754SPaolo Bonzini 		kvm_inject_gp(vcpu, 0);
611883b0a91SJoerg Roedel 		return 1;
612883b0a91SJoerg Roedel 	} else if (ret) {
61363129754SPaolo Bonzini 		return kvm_skip_emulated_instruction(vcpu);
614883b0a91SJoerg Roedel 	}
615883b0a91SJoerg Roedel 
61663129754SPaolo Bonzini 	ret = kvm_skip_emulated_instruction(vcpu);
617883b0a91SJoerg Roedel 
6180dd16b5bSMaxim Levitsky 	vmcb12 = map.hva;
619883b0a91SJoerg Roedel 
6202fcf4876SMaxim Levitsky 	if (WARN_ON_ONCE(!svm->nested.initialized))
6212fcf4876SMaxim Levitsky 		return -EINVAL;
6222fcf4876SMaxim Levitsky 
623cb9b6a1bSPaolo Bonzini 	nested_load_control_from_vmcb12(svm, &vmcb12->control);
624cb9b6a1bSPaolo Bonzini 
625cb9b6a1bSPaolo Bonzini 	if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) ||
626*ee695f22SKrish Sadhukhan 	    !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) {
6270dd16b5bSMaxim Levitsky 		vmcb12->control.exit_code    = SVM_EXIT_ERR;
6280dd16b5bSMaxim Levitsky 		vmcb12->control.exit_code_hi = 0;
6290dd16b5bSMaxim Levitsky 		vmcb12->control.exit_info_1  = 0;
6300dd16b5bSMaxim Levitsky 		vmcb12->control.exit_info_2  = 0;
63169c9dfa2SPaolo Bonzini 		goto out;
632883b0a91SJoerg Roedel 	}
633883b0a91SJoerg Roedel 
634883b0a91SJoerg Roedel 
635883b0a91SJoerg Roedel 	/* Clear internal status */
63663129754SPaolo Bonzini 	kvm_clear_exception_queue(vcpu);
63763129754SPaolo Bonzini 	kvm_clear_interrupt_queue(vcpu);
638883b0a91SJoerg Roedel 
639883b0a91SJoerg Roedel 	/*
6404995a368SCathy Avery 	 * Since vmcb01 is not in use, we can use it to store some of the L1
6414995a368SCathy Avery 	 * state.
642883b0a91SJoerg Roedel 	 */
64363129754SPaolo Bonzini 	svm->vmcb01.ptr->save.efer   = vcpu->arch.efer;
64463129754SPaolo Bonzini 	svm->vmcb01.ptr->save.cr0    = kvm_read_cr0(vcpu);
64563129754SPaolo Bonzini 	svm->vmcb01.ptr->save.cr4    = vcpu->arch.cr4;
64663129754SPaolo Bonzini 	svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu);
64763129754SPaolo Bonzini 	svm->vmcb01.ptr->save.rip    = kvm_rip_read(vcpu);
648883b0a91SJoerg Roedel 
6494995a368SCathy Avery 	if (!npt_enabled)
65063129754SPaolo Bonzini 		svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu);
651883b0a91SJoerg Roedel 
652f74f9414SPaolo Bonzini 	svm->nested.nested_run_pending = 1;
653883b0a91SJoerg Roedel 
65463129754SPaolo Bonzini 	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
65559cd9bc5SVitaly Kuznetsov 		goto out_exit_err;
65659cd9bc5SVitaly Kuznetsov 
65759cd9bc5SVitaly Kuznetsov 	if (nested_svm_vmrun_msrpm(svm))
65859cd9bc5SVitaly Kuznetsov 		goto out;
65959cd9bc5SVitaly Kuznetsov 
66059cd9bc5SVitaly Kuznetsov out_exit_err:
661ebdb3dbaSVitaly Kuznetsov 	svm->nested.nested_run_pending = 0;
662ebdb3dbaSVitaly Kuznetsov 
663883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
664883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code_hi = 0;
665883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_1  = 0;
666883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_2  = 0;
667883b0a91SJoerg Roedel 
668883b0a91SJoerg Roedel 	nested_svm_vmexit(svm);
669883b0a91SJoerg Roedel 
67069c9dfa2SPaolo Bonzini out:
67163129754SPaolo Bonzini 	kvm_vcpu_unmap(vcpu, &map, true);
67269c9dfa2SPaolo Bonzini 
673883b0a91SJoerg Roedel 	return ret;
674883b0a91SJoerg Roedel }
675883b0a91SJoerg Roedel 
676883b0a91SJoerg Roedel void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
677883b0a91SJoerg Roedel {
678883b0a91SJoerg Roedel 	to_vmcb->save.fs = from_vmcb->save.fs;
679883b0a91SJoerg Roedel 	to_vmcb->save.gs = from_vmcb->save.gs;
680883b0a91SJoerg Roedel 	to_vmcb->save.tr = from_vmcb->save.tr;
681883b0a91SJoerg Roedel 	to_vmcb->save.ldtr = from_vmcb->save.ldtr;
682883b0a91SJoerg Roedel 	to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
683883b0a91SJoerg Roedel 	to_vmcb->save.star = from_vmcb->save.star;
684883b0a91SJoerg Roedel 	to_vmcb->save.lstar = from_vmcb->save.lstar;
685883b0a91SJoerg Roedel 	to_vmcb->save.cstar = from_vmcb->save.cstar;
686883b0a91SJoerg Roedel 	to_vmcb->save.sfmask = from_vmcb->save.sfmask;
687883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
688883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
689883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
690883b0a91SJoerg Roedel }
691883b0a91SJoerg Roedel 
692883b0a91SJoerg Roedel int nested_svm_vmexit(struct vcpu_svm *svm)
693883b0a91SJoerg Roedel {
69463129754SPaolo Bonzini 	struct kvm_vcpu *vcpu = &svm->vcpu;
6950dd16b5bSMaxim Levitsky 	struct vmcb *vmcb12;
696883b0a91SJoerg Roedel 	struct vmcb *vmcb = svm->vmcb;
697883b0a91SJoerg Roedel 	struct kvm_host_map map;
69863129754SPaolo Bonzini 	int rc;
699883b0a91SJoerg Roedel 
700cb6a32c2SSean Christopherson 	/* Triple faults in L2 should never escape. */
701cb6a32c2SSean Christopherson 	WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
702cb6a32c2SSean Christopherson 
70363129754SPaolo Bonzini 	rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
704883b0a91SJoerg Roedel 	if (rc) {
705883b0a91SJoerg Roedel 		if (rc == -EINVAL)
70663129754SPaolo Bonzini 			kvm_inject_gp(vcpu, 0);
707883b0a91SJoerg Roedel 		return 1;
708883b0a91SJoerg Roedel 	}
709883b0a91SJoerg Roedel 
7100dd16b5bSMaxim Levitsky 	vmcb12 = map.hva;
711883b0a91SJoerg Roedel 
712883b0a91SJoerg Roedel 	/* Exit Guest-Mode */
71363129754SPaolo Bonzini 	leave_guest_mode(vcpu);
7140dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = 0;
7152d8a42beSPaolo Bonzini 	WARN_ON_ONCE(svm->nested.nested_run_pending);
716883b0a91SJoerg Roedel 
71763129754SPaolo Bonzini 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
718f2c7ef3bSMaxim Levitsky 
71938c0b192SPaolo Bonzini 	/* in case we halted in L2 */
72038c0b192SPaolo Bonzini 	svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
72138c0b192SPaolo Bonzini 
722883b0a91SJoerg Roedel 	/* Give the current vmcb to the guest */
723883b0a91SJoerg Roedel 
7240dd16b5bSMaxim Levitsky 	vmcb12->save.es     = vmcb->save.es;
7250dd16b5bSMaxim Levitsky 	vmcb12->save.cs     = vmcb->save.cs;
7260dd16b5bSMaxim Levitsky 	vmcb12->save.ss     = vmcb->save.ss;
7270dd16b5bSMaxim Levitsky 	vmcb12->save.ds     = vmcb->save.ds;
7280dd16b5bSMaxim Levitsky 	vmcb12->save.gdtr   = vmcb->save.gdtr;
7290dd16b5bSMaxim Levitsky 	vmcb12->save.idtr   = vmcb->save.idtr;
7300dd16b5bSMaxim Levitsky 	vmcb12->save.efer   = svm->vcpu.arch.efer;
73163129754SPaolo Bonzini 	vmcb12->save.cr0    = kvm_read_cr0(vcpu);
73263129754SPaolo Bonzini 	vmcb12->save.cr3    = kvm_read_cr3(vcpu);
7330dd16b5bSMaxim Levitsky 	vmcb12->save.cr2    = vmcb->save.cr2;
7340dd16b5bSMaxim Levitsky 	vmcb12->save.cr4    = svm->vcpu.arch.cr4;
73563129754SPaolo Bonzini 	vmcb12->save.rflags = kvm_get_rflags(vcpu);
73663129754SPaolo Bonzini 	vmcb12->save.rip    = kvm_rip_read(vcpu);
73763129754SPaolo Bonzini 	vmcb12->save.rsp    = kvm_rsp_read(vcpu);
73863129754SPaolo Bonzini 	vmcb12->save.rax    = kvm_rax_read(vcpu);
7390dd16b5bSMaxim Levitsky 	vmcb12->save.dr7    = vmcb->save.dr7;
7400dd16b5bSMaxim Levitsky 	vmcb12->save.dr6    = svm->vcpu.arch.dr6;
7410dd16b5bSMaxim Levitsky 	vmcb12->save.cpl    = vmcb->save.cpl;
742883b0a91SJoerg Roedel 
7430dd16b5bSMaxim Levitsky 	vmcb12->control.int_state         = vmcb->control.int_state;
7440dd16b5bSMaxim Levitsky 	vmcb12->control.exit_code         = vmcb->control.exit_code;
7450dd16b5bSMaxim Levitsky 	vmcb12->control.exit_code_hi      = vmcb->control.exit_code_hi;
7460dd16b5bSMaxim Levitsky 	vmcb12->control.exit_info_1       = vmcb->control.exit_info_1;
7470dd16b5bSMaxim Levitsky 	vmcb12->control.exit_info_2       = vmcb->control.exit_info_2;
74836e2e983SPaolo Bonzini 
7490dd16b5bSMaxim Levitsky 	if (vmcb12->control.exit_code != SVM_EXIT_ERR)
7509e8f0fbfSPaolo Bonzini 		nested_save_pending_event_to_vmcb12(svm, vmcb12);
751883b0a91SJoerg Roedel 
752883b0a91SJoerg Roedel 	if (svm->nrips_enabled)
7530dd16b5bSMaxim Levitsky 		vmcb12->control.next_rip  = vmcb->control.next_rip;
754883b0a91SJoerg Roedel 
7550dd16b5bSMaxim Levitsky 	vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
7560dd16b5bSMaxim Levitsky 	vmcb12->control.tlb_ctl           = svm->nested.ctl.tlb_ctl;
7570dd16b5bSMaxim Levitsky 	vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
7580dd16b5bSMaxim Levitsky 	vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
759883b0a91SJoerg Roedel 
7600dd16b5bSMaxim Levitsky 	vmcb12->control.pause_filter_count =
761883b0a91SJoerg Roedel 		svm->vmcb->control.pause_filter_count;
7620dd16b5bSMaxim Levitsky 	vmcb12->control.pause_filter_thresh =
763883b0a91SJoerg Roedel 		svm->vmcb->control.pause_filter_thresh;
764883b0a91SJoerg Roedel 
765d00b99c5SBabu Moger 	nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
766d00b99c5SBabu Moger 
7674995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->vmcb01);
7689a7de6ecSKrish Sadhukhan 	WARN_ON_ONCE(svm->vmcb->control.exit_code != SVM_EXIT_VMRUN);
7694995a368SCathy Avery 
7704995a368SCathy Avery 	/*
7714995a368SCathy Avery 	 * On vmexit the  GIF is set to false and
7724995a368SCathy Avery 	 * no event can be injected in L1.
7734995a368SCathy Avery 	 */
7749883764aSMaxim Levitsky 	svm_set_gif(svm, false);
7754995a368SCathy Avery 	svm->vmcb->control.exit_int_info = 0;
7769883764aSMaxim Levitsky 
7777ca62d13SPaolo Bonzini 	svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
7787ca62d13SPaolo Bonzini 	if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
7797ca62d13SPaolo Bonzini 		svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
7807ca62d13SPaolo Bonzini 		vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
7817ca62d13SPaolo Bonzini 	}
78218fc6c55SPaolo Bonzini 
783e670bf68SPaolo Bonzini 	svm->nested.ctl.nested_cr3 = 0;
784883b0a91SJoerg Roedel 
7854995a368SCathy Avery 	/*
7864995a368SCathy Avery 	 * Restore processor state that had been saved in vmcb01
7874995a368SCathy Avery 	 */
78863129754SPaolo Bonzini 	kvm_set_rflags(vcpu, svm->vmcb->save.rflags);
78963129754SPaolo Bonzini 	svm_set_efer(vcpu, svm->vmcb->save.efer);
79063129754SPaolo Bonzini 	svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
79163129754SPaolo Bonzini 	svm_set_cr4(vcpu, svm->vmcb->save.cr4);
79263129754SPaolo Bonzini 	kvm_rax_write(vcpu, svm->vmcb->save.rax);
79363129754SPaolo Bonzini 	kvm_rsp_write(vcpu, svm->vmcb->save.rsp);
79463129754SPaolo Bonzini 	kvm_rip_write(vcpu, svm->vmcb->save.rip);
7954995a368SCathy Avery 
7964995a368SCathy Avery 	svm->vcpu.arch.dr7 = DR7_FIXED_1;
7974995a368SCathy Avery 	kvm_update_dr7(&svm->vcpu);
798883b0a91SJoerg Roedel 
7990dd16b5bSMaxim Levitsky 	trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
8000dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_info_1,
8010dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_info_2,
8020dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_int_info,
8030dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_int_info_err,
80436e2e983SPaolo Bonzini 				       KVM_ISA_SVM);
80536e2e983SPaolo Bonzini 
80663129754SPaolo Bonzini 	kvm_vcpu_unmap(vcpu, &map, true);
807883b0a91SJoerg Roedel 
80863129754SPaolo Bonzini 	nested_svm_uninit_mmu_context(vcpu);
809bf7dea42SVitaly Kuznetsov 
81063129754SPaolo Bonzini 	rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false);
811d82aaef9SVitaly Kuznetsov 	if (rc)
812d82aaef9SVitaly Kuznetsov 		return 1;
813bf7dea42SVitaly Kuznetsov 
814883b0a91SJoerg Roedel 	/*
815883b0a91SJoerg Roedel 	 * Drop what we picked up for L2 via svm_complete_interrupts() so it
816883b0a91SJoerg Roedel 	 * doesn't end up in L1.
817883b0a91SJoerg Roedel 	 */
818883b0a91SJoerg Roedel 	svm->vcpu.arch.nmi_injected = false;
81963129754SPaolo Bonzini 	kvm_clear_exception_queue(vcpu);
82063129754SPaolo Bonzini 	kvm_clear_interrupt_queue(vcpu);
821883b0a91SJoerg Roedel 
8229a7de6ecSKrish Sadhukhan 	/*
8239a7de6ecSKrish Sadhukhan 	 * If we are here following the completion of a VMRUN that
8249a7de6ecSKrish Sadhukhan 	 * is being single-stepped, queue the pending #DB intercept
8259a7de6ecSKrish Sadhukhan 	 * right now so that it an be accounted for before we execute
8269a7de6ecSKrish Sadhukhan 	 * L1's next instruction.
8279a7de6ecSKrish Sadhukhan 	 */
8289a7de6ecSKrish Sadhukhan 	if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF))
8299a7de6ecSKrish Sadhukhan 		kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
8309a7de6ecSKrish Sadhukhan 
831883b0a91SJoerg Roedel 	return 0;
832883b0a91SJoerg Roedel }
833883b0a91SJoerg Roedel 
834cb6a32c2SSean Christopherson static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
835cb6a32c2SSean Christopherson {
8363a87c7e0SSean Christopherson 	nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
837cb6a32c2SSean Christopherson }
838cb6a32c2SSean Christopherson 
8392fcf4876SMaxim Levitsky int svm_allocate_nested(struct vcpu_svm *svm)
8402fcf4876SMaxim Levitsky {
8414995a368SCathy Avery 	struct page *vmcb02_page;
8422fcf4876SMaxim Levitsky 
8432fcf4876SMaxim Levitsky 	if (svm->nested.initialized)
8442fcf4876SMaxim Levitsky 		return 0;
8452fcf4876SMaxim Levitsky 
8464995a368SCathy Avery 	vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
8474995a368SCathy Avery 	if (!vmcb02_page)
8482fcf4876SMaxim Levitsky 		return -ENOMEM;
8494995a368SCathy Avery 	svm->nested.vmcb02.ptr = page_address(vmcb02_page);
8504995a368SCathy Avery 	svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
8512fcf4876SMaxim Levitsky 
8522fcf4876SMaxim Levitsky 	svm->nested.msrpm = svm_vcpu_alloc_msrpm();
8532fcf4876SMaxim Levitsky 	if (!svm->nested.msrpm)
8544995a368SCathy Avery 		goto err_free_vmcb02;
8552fcf4876SMaxim Levitsky 	svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
8562fcf4876SMaxim Levitsky 
8572fcf4876SMaxim Levitsky 	svm->nested.initialized = true;
8582fcf4876SMaxim Levitsky 	return 0;
8592fcf4876SMaxim Levitsky 
8604995a368SCathy Avery err_free_vmcb02:
8614995a368SCathy Avery 	__free_page(vmcb02_page);
8622fcf4876SMaxim Levitsky 	return -ENOMEM;
8632fcf4876SMaxim Levitsky }
8642fcf4876SMaxim Levitsky 
8652fcf4876SMaxim Levitsky void svm_free_nested(struct vcpu_svm *svm)
8662fcf4876SMaxim Levitsky {
8672fcf4876SMaxim Levitsky 	if (!svm->nested.initialized)
8682fcf4876SMaxim Levitsky 		return;
8692fcf4876SMaxim Levitsky 
8702fcf4876SMaxim Levitsky 	svm_vcpu_free_msrpm(svm->nested.msrpm);
8712fcf4876SMaxim Levitsky 	svm->nested.msrpm = NULL;
8722fcf4876SMaxim Levitsky 
8734995a368SCathy Avery 	__free_page(virt_to_page(svm->nested.vmcb02.ptr));
8744995a368SCathy Avery 	svm->nested.vmcb02.ptr = NULL;
8752fcf4876SMaxim Levitsky 
8762fcf4876SMaxim Levitsky 	svm->nested.initialized = false;
8772fcf4876SMaxim Levitsky }
8782fcf4876SMaxim Levitsky 
879c513f484SPaolo Bonzini /*
880c513f484SPaolo Bonzini  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
881c513f484SPaolo Bonzini  */
882c513f484SPaolo Bonzini void svm_leave_nested(struct vcpu_svm *svm)
883c513f484SPaolo Bonzini {
88463129754SPaolo Bonzini 	struct kvm_vcpu *vcpu = &svm->vcpu;
88563129754SPaolo Bonzini 
88663129754SPaolo Bonzini 	if (is_guest_mode(vcpu)) {
887c513f484SPaolo Bonzini 		svm->nested.nested_run_pending = 0;
88863129754SPaolo Bonzini 		leave_guest_mode(vcpu);
8894995a368SCathy Avery 
8904995a368SCathy Avery 		svm_switch_vmcb(svm, &svm->nested.vmcb02);
8914995a368SCathy Avery 
89263129754SPaolo Bonzini 		nested_svm_uninit_mmu_context(vcpu);
89356fe28deSMaxim Levitsky 		vmcb_mark_all_dirty(svm->vmcb);
894c513f484SPaolo Bonzini 	}
895a7d5c7ceSPaolo Bonzini 
89663129754SPaolo Bonzini 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
897c513f484SPaolo Bonzini }
898c513f484SPaolo Bonzini 
899883b0a91SJoerg Roedel static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
900883b0a91SJoerg Roedel {
901883b0a91SJoerg Roedel 	u32 offset, msr, value;
902883b0a91SJoerg Roedel 	int write, mask;
903883b0a91SJoerg Roedel 
904c62e2e94SBabu Moger 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
905883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
906883b0a91SJoerg Roedel 
907883b0a91SJoerg Roedel 	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
908883b0a91SJoerg Roedel 	offset = svm_msrpm_offset(msr);
909883b0a91SJoerg Roedel 	write  = svm->vmcb->control.exit_info_1 & 1;
910883b0a91SJoerg Roedel 	mask   = 1 << ((2 * (msr & 0xf)) + write);
911883b0a91SJoerg Roedel 
912883b0a91SJoerg Roedel 	if (offset == MSR_INVALID)
913883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
914883b0a91SJoerg Roedel 
915883b0a91SJoerg Roedel 	/* Offset is in 32 bit units but need in 8 bit units */
916883b0a91SJoerg Roedel 	offset *= 4;
917883b0a91SJoerg Roedel 
918e670bf68SPaolo Bonzini 	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
919883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
920883b0a91SJoerg Roedel 
921883b0a91SJoerg Roedel 	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
922883b0a91SJoerg Roedel }
923883b0a91SJoerg Roedel 
924883b0a91SJoerg Roedel static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
925883b0a91SJoerg Roedel {
926883b0a91SJoerg Roedel 	unsigned port, size, iopm_len;
927883b0a91SJoerg Roedel 	u16 val, mask;
928883b0a91SJoerg Roedel 	u8 start_bit;
929883b0a91SJoerg Roedel 	u64 gpa;
930883b0a91SJoerg Roedel 
931c62e2e94SBabu Moger 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
932883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
933883b0a91SJoerg Roedel 
934883b0a91SJoerg Roedel 	port = svm->vmcb->control.exit_info_1 >> 16;
935883b0a91SJoerg Roedel 	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
936883b0a91SJoerg Roedel 		SVM_IOIO_SIZE_SHIFT;
937e670bf68SPaolo Bonzini 	gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
938883b0a91SJoerg Roedel 	start_bit = port % 8;
939883b0a91SJoerg Roedel 	iopm_len = (start_bit + size > 8) ? 2 : 1;
940883b0a91SJoerg Roedel 	mask = (0xf >> (4 - size)) << start_bit;
941883b0a91SJoerg Roedel 	val = 0;
942883b0a91SJoerg Roedel 
943883b0a91SJoerg Roedel 	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
944883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
945883b0a91SJoerg Roedel 
946883b0a91SJoerg Roedel 	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
947883b0a91SJoerg Roedel }
948883b0a91SJoerg Roedel 
949883b0a91SJoerg Roedel static int nested_svm_intercept(struct vcpu_svm *svm)
950883b0a91SJoerg Roedel {
951883b0a91SJoerg Roedel 	u32 exit_code = svm->vmcb->control.exit_code;
952883b0a91SJoerg Roedel 	int vmexit = NESTED_EXIT_HOST;
953883b0a91SJoerg Roedel 
954883b0a91SJoerg Roedel 	switch (exit_code) {
955883b0a91SJoerg Roedel 	case SVM_EXIT_MSR:
956883b0a91SJoerg Roedel 		vmexit = nested_svm_exit_handled_msr(svm);
957883b0a91SJoerg Roedel 		break;
958883b0a91SJoerg Roedel 	case SVM_EXIT_IOIO:
959883b0a91SJoerg Roedel 		vmexit = nested_svm_intercept_ioio(svm);
960883b0a91SJoerg Roedel 		break;
961883b0a91SJoerg Roedel 	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
96203bfeeb9SBabu Moger 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
963883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
964883b0a91SJoerg Roedel 		break;
965883b0a91SJoerg Roedel 	}
966883b0a91SJoerg Roedel 	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
96730abaa88SBabu Moger 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
968883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
969883b0a91SJoerg Roedel 		break;
970883b0a91SJoerg Roedel 	}
971883b0a91SJoerg Roedel 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
9727c86663bSPaolo Bonzini 		/*
9737c86663bSPaolo Bonzini 		 * Host-intercepted exceptions have been checked already in
9747c86663bSPaolo Bonzini 		 * nested_svm_exit_special.  There is nothing to do here,
9757c86663bSPaolo Bonzini 		 * the vmexit is injected by svm_check_nested_events.
9767c86663bSPaolo Bonzini 		 */
977883b0a91SJoerg Roedel 		vmexit = NESTED_EXIT_DONE;
978883b0a91SJoerg Roedel 		break;
979883b0a91SJoerg Roedel 	}
980883b0a91SJoerg Roedel 	case SVM_EXIT_ERR: {
981883b0a91SJoerg Roedel 		vmexit = NESTED_EXIT_DONE;
982883b0a91SJoerg Roedel 		break;
983883b0a91SJoerg Roedel 	}
984883b0a91SJoerg Roedel 	default: {
985c62e2e94SBabu Moger 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
986883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
987883b0a91SJoerg Roedel 	}
988883b0a91SJoerg Roedel 	}
989883b0a91SJoerg Roedel 
990883b0a91SJoerg Roedel 	return vmexit;
991883b0a91SJoerg Roedel }
992883b0a91SJoerg Roedel 
993883b0a91SJoerg Roedel int nested_svm_exit_handled(struct vcpu_svm *svm)
994883b0a91SJoerg Roedel {
995883b0a91SJoerg Roedel 	int vmexit;
996883b0a91SJoerg Roedel 
997883b0a91SJoerg Roedel 	vmexit = nested_svm_intercept(svm);
998883b0a91SJoerg Roedel 
999883b0a91SJoerg Roedel 	if (vmexit == NESTED_EXIT_DONE)
1000883b0a91SJoerg Roedel 		nested_svm_vmexit(svm);
1001883b0a91SJoerg Roedel 
1002883b0a91SJoerg Roedel 	return vmexit;
1003883b0a91SJoerg Roedel }
1004883b0a91SJoerg Roedel 
100563129754SPaolo Bonzini int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1006883b0a91SJoerg Roedel {
100763129754SPaolo Bonzini 	if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
100863129754SPaolo Bonzini 		kvm_queue_exception(vcpu, UD_VECTOR);
1009883b0a91SJoerg Roedel 		return 1;
1010883b0a91SJoerg Roedel 	}
1011883b0a91SJoerg Roedel 
101263129754SPaolo Bonzini 	if (to_svm(vcpu)->vmcb->save.cpl) {
101363129754SPaolo Bonzini 		kvm_inject_gp(vcpu, 0);
1014883b0a91SJoerg Roedel 		return 1;
1015883b0a91SJoerg Roedel 	}
1016883b0a91SJoerg Roedel 
1017883b0a91SJoerg Roedel 	return 0;
1018883b0a91SJoerg Roedel }
1019883b0a91SJoerg Roedel 
10207c86663bSPaolo Bonzini static bool nested_exit_on_exception(struct vcpu_svm *svm)
1021883b0a91SJoerg Roedel {
10227c86663bSPaolo Bonzini 	unsigned int nr = svm->vcpu.arch.exception.nr;
1023883b0a91SJoerg Roedel 
10249780d51dSBabu Moger 	return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
10257c86663bSPaolo Bonzini }
1026883b0a91SJoerg Roedel 
10277c86663bSPaolo Bonzini static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
10287c86663bSPaolo Bonzini {
10297c86663bSPaolo Bonzini 	unsigned int nr = svm->vcpu.arch.exception.nr;
1030883b0a91SJoerg Roedel 
1031883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1032883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code_hi = 0;
10337c86663bSPaolo Bonzini 
10347c86663bSPaolo Bonzini 	if (svm->vcpu.arch.exception.has_error_code)
10357c86663bSPaolo Bonzini 		svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
1036883b0a91SJoerg Roedel 
1037883b0a91SJoerg Roedel 	/*
1038883b0a91SJoerg Roedel 	 * EXITINFO2 is undefined for all exception intercepts other
1039883b0a91SJoerg Roedel 	 * than #PF.
1040883b0a91SJoerg Roedel 	 */
10417c86663bSPaolo Bonzini 	if (nr == PF_VECTOR) {
1042883b0a91SJoerg Roedel 		if (svm->vcpu.arch.exception.nested_apf)
1043883b0a91SJoerg Roedel 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
1044883b0a91SJoerg Roedel 		else if (svm->vcpu.arch.exception.has_payload)
1045883b0a91SJoerg Roedel 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
1046883b0a91SJoerg Roedel 		else
1047883b0a91SJoerg Roedel 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
10487c86663bSPaolo Bonzini 	} else if (nr == DB_VECTOR) {
10497c86663bSPaolo Bonzini 		/* See inject_pending_event.  */
10507c86663bSPaolo Bonzini 		kvm_deliver_exception_payload(&svm->vcpu);
10517c86663bSPaolo Bonzini 		if (svm->vcpu.arch.dr7 & DR7_GD) {
10527c86663bSPaolo Bonzini 			svm->vcpu.arch.dr7 &= ~DR7_GD;
10537c86663bSPaolo Bonzini 			kvm_update_dr7(&svm->vcpu);
10547c86663bSPaolo Bonzini 		}
10557c86663bSPaolo Bonzini 	} else
10567c86663bSPaolo Bonzini 		WARN_ON(svm->vcpu.arch.exception.has_payload);
1057883b0a91SJoerg Roedel 
10587c86663bSPaolo Bonzini 	nested_svm_vmexit(svm);
1059883b0a91SJoerg Roedel }
1060883b0a91SJoerg Roedel 
10615b672408SPaolo Bonzini static inline bool nested_exit_on_init(struct vcpu_svm *svm)
10625b672408SPaolo Bonzini {
1063c62e2e94SBabu Moger 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
10645b672408SPaolo Bonzini }
10655b672408SPaolo Bonzini 
106633b22172SPaolo Bonzini static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1067883b0a91SJoerg Roedel {
1068883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
1069883b0a91SJoerg Roedel 	bool block_nested_events =
1070bd279629SPaolo Bonzini 		kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
10715b672408SPaolo Bonzini 	struct kvm_lapic *apic = vcpu->arch.apic;
10725b672408SPaolo Bonzini 
10735b672408SPaolo Bonzini 	if (lapic_in_kernel(vcpu) &&
10745b672408SPaolo Bonzini 	    test_bit(KVM_APIC_INIT, &apic->pending_events)) {
10755b672408SPaolo Bonzini 		if (block_nested_events)
10765b672408SPaolo Bonzini 			return -EBUSY;
10775b672408SPaolo Bonzini 		if (!nested_exit_on_init(svm))
10785b672408SPaolo Bonzini 			return 0;
10793a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
10805b672408SPaolo Bonzini 		return 0;
10815b672408SPaolo Bonzini 	}
1082883b0a91SJoerg Roedel 
10837c86663bSPaolo Bonzini 	if (vcpu->arch.exception.pending) {
10844020da3bSMaxim Levitsky 		/*
10854020da3bSMaxim Levitsky 		 * Only a pending nested run can block a pending exception.
10864020da3bSMaxim Levitsky 		 * Otherwise an injected NMI/interrupt should either be
10874020da3bSMaxim Levitsky 		 * lost or delivered to the nested hypervisor in the EXITINTINFO
10884020da3bSMaxim Levitsky 		 * vmcb field, while delivering the pending exception.
10894020da3bSMaxim Levitsky 		 */
10904020da3bSMaxim Levitsky 		if (svm->nested.nested_run_pending)
10917c86663bSPaolo Bonzini                         return -EBUSY;
10927c86663bSPaolo Bonzini 		if (!nested_exit_on_exception(svm))
10937c86663bSPaolo Bonzini 			return 0;
10947c86663bSPaolo Bonzini 		nested_svm_inject_exception_vmexit(svm);
10957c86663bSPaolo Bonzini 		return 0;
10967c86663bSPaolo Bonzini 	}
10977c86663bSPaolo Bonzini 
1098221e7610SPaolo Bonzini 	if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
109955714cddSPaolo Bonzini 		if (block_nested_events)
110055714cddSPaolo Bonzini 			return -EBUSY;
1101221e7610SPaolo Bonzini 		if (!nested_exit_on_smi(svm))
1102221e7610SPaolo Bonzini 			return 0;
11033a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
110455714cddSPaolo Bonzini 		return 0;
110555714cddSPaolo Bonzini 	}
110655714cddSPaolo Bonzini 
1107221e7610SPaolo Bonzini 	if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
11089c3d370aSCathy Avery 		if (block_nested_events)
11099c3d370aSCathy Avery 			return -EBUSY;
1110221e7610SPaolo Bonzini 		if (!nested_exit_on_nmi(svm))
1111221e7610SPaolo Bonzini 			return 0;
11123a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
11139c3d370aSCathy Avery 		return 0;
11149c3d370aSCathy Avery 	}
11159c3d370aSCathy Avery 
1116221e7610SPaolo Bonzini 	if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1117883b0a91SJoerg Roedel 		if (block_nested_events)
1118883b0a91SJoerg Roedel 			return -EBUSY;
1119221e7610SPaolo Bonzini 		if (!nested_exit_on_intr(svm))
1120221e7610SPaolo Bonzini 			return 0;
11213a87c7e0SSean Christopherson 		trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
11223a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1123883b0a91SJoerg Roedel 		return 0;
1124883b0a91SJoerg Roedel 	}
1125883b0a91SJoerg Roedel 
1126883b0a91SJoerg Roedel 	return 0;
1127883b0a91SJoerg Roedel }
1128883b0a91SJoerg Roedel 
1129883b0a91SJoerg Roedel int nested_svm_exit_special(struct vcpu_svm *svm)
1130883b0a91SJoerg Roedel {
1131883b0a91SJoerg Roedel 	u32 exit_code = svm->vmcb->control.exit_code;
1132883b0a91SJoerg Roedel 
1133883b0a91SJoerg Roedel 	switch (exit_code) {
1134883b0a91SJoerg Roedel 	case SVM_EXIT_INTR:
1135883b0a91SJoerg Roedel 	case SVM_EXIT_NMI:
1136883b0a91SJoerg Roedel 	case SVM_EXIT_NPF:
1137883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
11387c86663bSPaolo Bonzini 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
11397c86663bSPaolo Bonzini 		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
11407c86663bSPaolo Bonzini 
11414995a368SCathy Avery 		if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
11429780d51dSBabu Moger 		    excp_bits)
11437c86663bSPaolo Bonzini 			return NESTED_EXIT_HOST;
11447c86663bSPaolo Bonzini 		else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
114568fd66f1SVitaly Kuznetsov 			 svm->vcpu.arch.apf.host_apf_flags)
1146a3535be7SPaolo Bonzini 			/* Trap async PF even if not shadowing */
1147883b0a91SJoerg Roedel 			return NESTED_EXIT_HOST;
1148883b0a91SJoerg Roedel 		break;
11497c86663bSPaolo Bonzini 	}
1150883b0a91SJoerg Roedel 	default:
1151883b0a91SJoerg Roedel 		break;
1152883b0a91SJoerg Roedel 	}
1153883b0a91SJoerg Roedel 
1154883b0a91SJoerg Roedel 	return NESTED_EXIT_CONTINUE;
1155883b0a91SJoerg Roedel }
115633b22172SPaolo Bonzini 
1157cc440cdaSPaolo Bonzini static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1158cc440cdaSPaolo Bonzini 				struct kvm_nested_state __user *user_kvm_nested_state,
1159cc440cdaSPaolo Bonzini 				u32 user_data_size)
1160cc440cdaSPaolo Bonzini {
1161cc440cdaSPaolo Bonzini 	struct vcpu_svm *svm;
1162cc440cdaSPaolo Bonzini 	struct kvm_nested_state kvm_state = {
1163cc440cdaSPaolo Bonzini 		.flags = 0,
1164cc440cdaSPaolo Bonzini 		.format = KVM_STATE_NESTED_FORMAT_SVM,
1165cc440cdaSPaolo Bonzini 		.size = sizeof(kvm_state),
1166cc440cdaSPaolo Bonzini 	};
1167cc440cdaSPaolo Bonzini 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1168cc440cdaSPaolo Bonzini 		&user_kvm_nested_state->data.svm[0];
1169cc440cdaSPaolo Bonzini 
1170cc440cdaSPaolo Bonzini 	if (!vcpu)
1171cc440cdaSPaolo Bonzini 		return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1172cc440cdaSPaolo Bonzini 
1173cc440cdaSPaolo Bonzini 	svm = to_svm(vcpu);
1174cc440cdaSPaolo Bonzini 
1175cc440cdaSPaolo Bonzini 	if (user_data_size < kvm_state.size)
1176cc440cdaSPaolo Bonzini 		goto out;
1177cc440cdaSPaolo Bonzini 
1178cc440cdaSPaolo Bonzini 	/* First fill in the header and copy it out.  */
1179cc440cdaSPaolo Bonzini 	if (is_guest_mode(vcpu)) {
11800dd16b5bSMaxim Levitsky 		kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1181cc440cdaSPaolo Bonzini 		kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1182cc440cdaSPaolo Bonzini 		kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1183cc440cdaSPaolo Bonzini 
1184cc440cdaSPaolo Bonzini 		if (svm->nested.nested_run_pending)
1185cc440cdaSPaolo Bonzini 			kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1186cc440cdaSPaolo Bonzini 	}
1187cc440cdaSPaolo Bonzini 
1188cc440cdaSPaolo Bonzini 	if (gif_set(svm))
1189cc440cdaSPaolo Bonzini 		kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1190cc440cdaSPaolo Bonzini 
1191cc440cdaSPaolo Bonzini 	if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1192cc440cdaSPaolo Bonzini 		return -EFAULT;
1193cc440cdaSPaolo Bonzini 
1194cc440cdaSPaolo Bonzini 	if (!is_guest_mode(vcpu))
1195cc440cdaSPaolo Bonzini 		goto out;
1196cc440cdaSPaolo Bonzini 
1197cc440cdaSPaolo Bonzini 	/*
1198cc440cdaSPaolo Bonzini 	 * Copy over the full size of the VMCB rather than just the size
1199cc440cdaSPaolo Bonzini 	 * of the structs.
1200cc440cdaSPaolo Bonzini 	 */
1201cc440cdaSPaolo Bonzini 	if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1202cc440cdaSPaolo Bonzini 		return -EFAULT;
1203cc440cdaSPaolo Bonzini 	if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1204cc440cdaSPaolo Bonzini 			 sizeof(user_vmcb->control)))
1205cc440cdaSPaolo Bonzini 		return -EFAULT;
12064995a368SCathy Avery 	if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1207cc440cdaSPaolo Bonzini 			 sizeof(user_vmcb->save)))
1208cc440cdaSPaolo Bonzini 		return -EFAULT;
1209cc440cdaSPaolo Bonzini out:
1210cc440cdaSPaolo Bonzini 	return kvm_state.size;
1211cc440cdaSPaolo Bonzini }
1212cc440cdaSPaolo Bonzini 
1213cc440cdaSPaolo Bonzini static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1214cc440cdaSPaolo Bonzini 				struct kvm_nested_state __user *user_kvm_nested_state,
1215cc440cdaSPaolo Bonzini 				struct kvm_nested_state *kvm_state)
1216cc440cdaSPaolo Bonzini {
1217cc440cdaSPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
1218cc440cdaSPaolo Bonzini 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1219cc440cdaSPaolo Bonzini 		&user_kvm_nested_state->data.svm[0];
12206ccbd29aSJoerg Roedel 	struct vmcb_control_area *ctl;
12216ccbd29aSJoerg Roedel 	struct vmcb_save_area *save;
12226ccbd29aSJoerg Roedel 	int ret;
1223cc440cdaSPaolo Bonzini 	u32 cr0;
1224cc440cdaSPaolo Bonzini 
12256ccbd29aSJoerg Roedel 	BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
12266ccbd29aSJoerg Roedel 		     KVM_STATE_NESTED_SVM_VMCB_SIZE);
12276ccbd29aSJoerg Roedel 
1228cc440cdaSPaolo Bonzini 	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1229cc440cdaSPaolo Bonzini 		return -EINVAL;
1230cc440cdaSPaolo Bonzini 
1231cc440cdaSPaolo Bonzini 	if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1232cc440cdaSPaolo Bonzini 				 KVM_STATE_NESTED_RUN_PENDING |
1233cc440cdaSPaolo Bonzini 				 KVM_STATE_NESTED_GIF_SET))
1234cc440cdaSPaolo Bonzini 		return -EINVAL;
1235cc440cdaSPaolo Bonzini 
1236cc440cdaSPaolo Bonzini 	/*
1237cc440cdaSPaolo Bonzini 	 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1238cc440cdaSPaolo Bonzini 	 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1239cc440cdaSPaolo Bonzini 	 */
1240cc440cdaSPaolo Bonzini 	if (!(vcpu->arch.efer & EFER_SVME)) {
1241cc440cdaSPaolo Bonzini 		/* GIF=1 and no guest mode are required if SVME=0.  */
1242cc440cdaSPaolo Bonzini 		if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1243cc440cdaSPaolo Bonzini 			return -EINVAL;
1244cc440cdaSPaolo Bonzini 	}
1245cc440cdaSPaolo Bonzini 
1246cc440cdaSPaolo Bonzini 	/* SMM temporarily disables SVM, so we cannot be in guest mode.  */
1247cc440cdaSPaolo Bonzini 	if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1248cc440cdaSPaolo Bonzini 		return -EINVAL;
1249cc440cdaSPaolo Bonzini 
1250cc440cdaSPaolo Bonzini 	if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1251cc440cdaSPaolo Bonzini 		svm_leave_nested(svm);
1252d5cd6f34SVitaly Kuznetsov 		svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1253d5cd6f34SVitaly Kuznetsov 		return 0;
1254cc440cdaSPaolo Bonzini 	}
1255cc440cdaSPaolo Bonzini 
1256cc440cdaSPaolo Bonzini 	if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1257cc440cdaSPaolo Bonzini 		return -EINVAL;
1258cc440cdaSPaolo Bonzini 	if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1259cc440cdaSPaolo Bonzini 		return -EINVAL;
1260cc440cdaSPaolo Bonzini 
12616ccbd29aSJoerg Roedel 	ret  = -ENOMEM;
1262eba04b20SSean Christopherson 	ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL_ACCOUNT);
1263eba04b20SSean Christopherson 	save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
12646ccbd29aSJoerg Roedel 	if (!ctl || !save)
12656ccbd29aSJoerg Roedel 		goto out_free;
12666ccbd29aSJoerg Roedel 
12676ccbd29aSJoerg Roedel 	ret = -EFAULT;
12686ccbd29aSJoerg Roedel 	if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
12696ccbd29aSJoerg Roedel 		goto out_free;
12706ccbd29aSJoerg Roedel 	if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
12716ccbd29aSJoerg Roedel 		goto out_free;
12726ccbd29aSJoerg Roedel 
12736ccbd29aSJoerg Roedel 	ret = -EINVAL;
1274*ee695f22SKrish Sadhukhan 	if (!nested_vmcb_check_controls(vcpu, ctl))
12756ccbd29aSJoerg Roedel 		goto out_free;
1276cc440cdaSPaolo Bonzini 
1277cc440cdaSPaolo Bonzini 	/*
1278cc440cdaSPaolo Bonzini 	 * Processor state contains L2 state.  Check that it is
1279cb9b6a1bSPaolo Bonzini 	 * valid for guest mode (see nested_vmcb_check_save).
1280cc440cdaSPaolo Bonzini 	 */
1281cc440cdaSPaolo Bonzini 	cr0 = kvm_read_cr0(vcpu);
1282cc440cdaSPaolo Bonzini         if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
12836ccbd29aSJoerg Roedel 		goto out_free;
1284cc440cdaSPaolo Bonzini 
1285cc440cdaSPaolo Bonzini 	/*
1286cc440cdaSPaolo Bonzini 	 * Validate host state saved from before VMRUN (see
1287cc440cdaSPaolo Bonzini 	 * nested_svm_check_permissions).
1288cc440cdaSPaolo Bonzini 	 */
12896906e06dSKrish Sadhukhan 	if (!(save->cr0 & X86_CR0_PG) ||
12906906e06dSKrish Sadhukhan 	    !(save->cr0 & X86_CR0_PE) ||
12916906e06dSKrish Sadhukhan 	    (save->rflags & X86_EFLAGS_VM) ||
129263129754SPaolo Bonzini 	    !nested_vmcb_valid_sregs(vcpu, save))
12936ccbd29aSJoerg Roedel 		goto out_free;
1294cc440cdaSPaolo Bonzini 
1295cc440cdaSPaolo Bonzini 	/*
12964995a368SCathy Avery 	 * All checks done, we can enter guest mode. Userspace provides
12974995a368SCathy Avery 	 * vmcb12.control, which will be combined with L1 and stored into
12984995a368SCathy Avery 	 * vmcb02, and the L1 save state which we store in vmcb01.
12994995a368SCathy Avery 	 * L2 registers if needed are moved from the current VMCB to VMCB02.
1300cc440cdaSPaolo Bonzini 	 */
130181f76adaSMaxim Levitsky 
130281f76adaSMaxim Levitsky 	svm->nested.nested_run_pending =
130381f76adaSMaxim Levitsky 		!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
130481f76adaSMaxim Levitsky 
13050dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
13064995a368SCathy Avery 	if (svm->current_vmcb == &svm->vmcb01)
13074995a368SCathy Avery 		svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1308c08f390aSPaolo Bonzini 
1309c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.es = save->es;
1310c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.cs = save->cs;
1311c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.ss = save->ss;
1312c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.ds = save->ds;
1313c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.gdtr = save->gdtr;
1314c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.idtr = save->idtr;
1315c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.rflags = save->rflags | X86_EFLAGS_FIXED;
1316c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.efer = save->efer;
1317c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.cr0 = save->cr0;
1318c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.cr3 = save->cr3;
1319c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.cr4 = save->cr4;
1320c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.rax = save->rax;
1321c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.rsp = save->rsp;
1322c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.rip = save->rip;
1323c08f390aSPaolo Bonzini 	svm->vmcb01.ptr->save.cpl = 0;
1324c08f390aSPaolo Bonzini 
13259e8f0fbfSPaolo Bonzini 	nested_load_control_from_vmcb12(svm, ctl);
13264995a368SCathy Avery 
13274995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->nested.vmcb02);
13284995a368SCathy Avery 
13299e8f0fbfSPaolo Bonzini 	nested_vmcb02_prepare_control(svm);
1330cc440cdaSPaolo Bonzini 
1331a7d5c7ceSPaolo Bonzini 	kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
13326ccbd29aSJoerg Roedel 	ret = 0;
13336ccbd29aSJoerg Roedel out_free:
13346ccbd29aSJoerg Roedel 	kfree(save);
13356ccbd29aSJoerg Roedel 	kfree(ctl);
13366ccbd29aSJoerg Roedel 
13376ccbd29aSJoerg Roedel 	return ret;
1338cc440cdaSPaolo Bonzini }
1339cc440cdaSPaolo Bonzini 
1340232f75d3SMaxim Levitsky static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1341232f75d3SMaxim Levitsky {
1342232f75d3SMaxim Levitsky 	struct vcpu_svm *svm = to_svm(vcpu);
1343232f75d3SMaxim Levitsky 
1344232f75d3SMaxim Levitsky 	if (WARN_ON(!is_guest_mode(vcpu)))
1345232f75d3SMaxim Levitsky 		return true;
1346232f75d3SMaxim Levitsky 
1347232f75d3SMaxim Levitsky 	if (nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1348232f75d3SMaxim Levitsky 				nested_npt_enabled(svm)))
1349232f75d3SMaxim Levitsky 		return false;
1350232f75d3SMaxim Levitsky 
1351232f75d3SMaxim Levitsky 	if (!nested_svm_vmrun_msrpm(svm)) {
1352232f75d3SMaxim Levitsky 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1353232f75d3SMaxim Levitsky 		vcpu->run->internal.suberror =
1354232f75d3SMaxim Levitsky 			KVM_INTERNAL_ERROR_EMULATION;
1355232f75d3SMaxim Levitsky 		vcpu->run->internal.ndata = 0;
1356232f75d3SMaxim Levitsky 		return false;
1357232f75d3SMaxim Levitsky 	}
1358232f75d3SMaxim Levitsky 
1359232f75d3SMaxim Levitsky 	return true;
1360232f75d3SMaxim Levitsky }
1361232f75d3SMaxim Levitsky 
136233b22172SPaolo Bonzini struct kvm_x86_nested_ops svm_nested_ops = {
136333b22172SPaolo Bonzini 	.check_events = svm_check_nested_events,
1364cb6a32c2SSean Christopherson 	.triple_fault = nested_svm_triple_fault,
1365a7d5c7ceSPaolo Bonzini 	.get_nested_state_pages = svm_get_nested_state_pages,
1366cc440cdaSPaolo Bonzini 	.get_state = svm_get_nested_state,
1367cc440cdaSPaolo Bonzini 	.set_state = svm_set_nested_state,
136833b22172SPaolo Bonzini };
1369