xref: /linux/arch/x86/kvm/svm/nested.c (revision e3cdaab5ff022874e65df80ae8b8382ccc0a4fe0)
1883b0a91SJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only
2883b0a91SJoerg Roedel /*
3883b0a91SJoerg Roedel  * Kernel-based Virtual Machine driver for Linux
4883b0a91SJoerg Roedel  *
5883b0a91SJoerg Roedel  * AMD SVM support
6883b0a91SJoerg Roedel  *
7883b0a91SJoerg Roedel  * Copyright (C) 2006 Qumranet, Inc.
8883b0a91SJoerg Roedel  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9883b0a91SJoerg Roedel  *
10883b0a91SJoerg Roedel  * Authors:
11883b0a91SJoerg Roedel  *   Yaniv Kamay  <yaniv@qumranet.com>
12883b0a91SJoerg Roedel  *   Avi Kivity   <avi@qumranet.com>
13883b0a91SJoerg Roedel  */
14883b0a91SJoerg Roedel 
15883b0a91SJoerg Roedel #define pr_fmt(fmt) "SVM: " fmt
16883b0a91SJoerg Roedel 
17883b0a91SJoerg Roedel #include <linux/kvm_types.h>
18883b0a91SJoerg Roedel #include <linux/kvm_host.h>
19883b0a91SJoerg Roedel #include <linux/kernel.h>
20883b0a91SJoerg Roedel 
21883b0a91SJoerg Roedel #include <asm/msr-index.h>
225679b803SPaolo Bonzini #include <asm/debugreg.h>
23883b0a91SJoerg Roedel 
24883b0a91SJoerg Roedel #include "kvm_emulate.h"
25883b0a91SJoerg Roedel #include "trace.h"
26883b0a91SJoerg Roedel #include "mmu.h"
27883b0a91SJoerg Roedel #include "x86.h"
28cc440cdaSPaolo Bonzini #include "cpuid.h"
295b672408SPaolo Bonzini #include "lapic.h"
30883b0a91SJoerg Roedel #include "svm.h"
3166c03a92SVitaly Kuznetsov #include "hyperv.h"
32883b0a91SJoerg Roedel 
3311f0cbf0SSean Christopherson #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
3411f0cbf0SSean Christopherson 
35883b0a91SJoerg Roedel static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
36883b0a91SJoerg Roedel 				       struct x86_exception *fault)
37883b0a91SJoerg Roedel {
38883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
39db663af4SMaxim Levitsky 	struct vmcb *vmcb = svm->vmcb;
40883b0a91SJoerg Roedel 
41db663af4SMaxim Levitsky 	if (vmcb->control.exit_code != SVM_EXIT_NPF) {
42883b0a91SJoerg Roedel 		/*
43883b0a91SJoerg Roedel 		 * TODO: track the cause of the nested page fault, and
44883b0a91SJoerg Roedel 		 * correctly fill in the high bits of exit_info_1.
45883b0a91SJoerg Roedel 		 */
46db663af4SMaxim Levitsky 		vmcb->control.exit_code = SVM_EXIT_NPF;
47db663af4SMaxim Levitsky 		vmcb->control.exit_code_hi = 0;
48db663af4SMaxim Levitsky 		vmcb->control.exit_info_1 = (1ULL << 32);
49db663af4SMaxim Levitsky 		vmcb->control.exit_info_2 = fault->address;
50883b0a91SJoerg Roedel 	}
51883b0a91SJoerg Roedel 
52db663af4SMaxim Levitsky 	vmcb->control.exit_info_1 &= ~0xffffffffULL;
53db663af4SMaxim Levitsky 	vmcb->control.exit_info_1 |= fault->error_code;
54883b0a91SJoerg Roedel 
55883b0a91SJoerg Roedel 	nested_svm_vmexit(svm);
56883b0a91SJoerg Roedel }
57883b0a91SJoerg Roedel 
586819af75SSean Christopherson static bool nested_svm_handle_page_fault_workaround(struct kvm_vcpu *vcpu,
596819af75SSean Christopherson 						    struct x86_exception *fault)
60a04aead1SPaolo Bonzini {
61a04aead1SPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
62db663af4SMaxim Levitsky 	struct vmcb *vmcb = svm->vmcb;
63db663af4SMaxim Levitsky 
64a04aead1SPaolo Bonzini  	WARN_ON(!is_guest_mode(vcpu));
65a04aead1SPaolo Bonzini 
668fc78909SEmanuele Giuseppe Esposito 	if (vmcb12_is_intercept(&svm->nested.ctl,
678fc78909SEmanuele Giuseppe Esposito 				INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
686819af75SSean Christopherson 	    !WARN_ON_ONCE(svm->nested.nested_run_pending)) {
69db663af4SMaxim Levitsky 	     	vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
70db663af4SMaxim Levitsky 		vmcb->control.exit_code_hi = 0;
71db663af4SMaxim Levitsky 		vmcb->control.exit_info_1 = fault->error_code;
72db663af4SMaxim Levitsky 		vmcb->control.exit_info_2 = fault->address;
73a04aead1SPaolo Bonzini 		nested_svm_vmexit(svm);
746819af75SSean Christopherson 		return true;
75a04aead1SPaolo Bonzini 	}
766819af75SSean Christopherson 
776819af75SSean Christopherson 	return false;
78a04aead1SPaolo Bonzini }
79a04aead1SPaolo Bonzini 
80883b0a91SJoerg Roedel static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
81883b0a91SJoerg Roedel {
82883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
83e670bf68SPaolo Bonzini 	u64 cr3 = svm->nested.ctl.nested_cr3;
84883b0a91SJoerg Roedel 	u64 pdpte;
85883b0a91SJoerg Roedel 	int ret;
86883b0a91SJoerg Roedel 
872732be90SSean Christopherson 	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
88883b0a91SJoerg Roedel 				       offset_in_page(cr3) + index * 8, 8);
89883b0a91SJoerg Roedel 	if (ret)
90883b0a91SJoerg Roedel 		return 0;
91883b0a91SJoerg Roedel 	return pdpte;
92883b0a91SJoerg Roedel }
93883b0a91SJoerg Roedel 
94883b0a91SJoerg Roedel static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
95883b0a91SJoerg Roedel {
96883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
97883b0a91SJoerg Roedel 
98e670bf68SPaolo Bonzini 	return svm->nested.ctl.nested_cr3;
99883b0a91SJoerg Roedel }
100883b0a91SJoerg Roedel 
101883b0a91SJoerg Roedel static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
102883b0a91SJoerg Roedel {
103929d1cfaSPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
104929d1cfaSPaolo Bonzini 
105883b0a91SJoerg Roedel 	WARN_ON(mmu_is_nested(vcpu));
106883b0a91SJoerg Roedel 
107883b0a91SJoerg Roedel 	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
10831e96bc6SSean Christopherson 
10931e96bc6SSean Christopherson 	/*
11031e96bc6SSean Christopherson 	 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01.  Note,
11131e96bc6SSean Christopherson 	 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
11231e96bc6SSean Christopherson 	 * vCPU state.  CR0.WP is explicitly ignored, while CR0.PG is required.
11331e96bc6SSean Christopherson 	 */
1144995a368SCathy Avery 	kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
1154995a368SCathy Avery 				svm->vmcb01.ptr->save.efer,
1160f04a2acSVitaly Kuznetsov 				svm->nested.ctl.nested_cr3);
117883b0a91SJoerg Roedel 	vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
118883b0a91SJoerg Roedel 	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
119883b0a91SJoerg Roedel 	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
120883b0a91SJoerg Roedel 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
121883b0a91SJoerg Roedel }
122883b0a91SJoerg Roedel 
123883b0a91SJoerg Roedel static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
124883b0a91SJoerg Roedel {
125883b0a91SJoerg Roedel 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
126883b0a91SJoerg Roedel 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
127883b0a91SJoerg Roedel }
128883b0a91SJoerg Roedel 
129b9f3973aSMaxim Levitsky static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
130b9f3973aSMaxim Levitsky {
131b9f3973aSMaxim Levitsky 	if (!svm->v_vmload_vmsave_enabled)
132b9f3973aSMaxim Levitsky 		return true;
133b9f3973aSMaxim Levitsky 
134b9f3973aSMaxim Levitsky 	if (!nested_npt_enabled(svm))
135b9f3973aSMaxim Levitsky 		return true;
136b9f3973aSMaxim Levitsky 
137b9f3973aSMaxim Levitsky 	if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
138b9f3973aSMaxim Levitsky 		return true;
139b9f3973aSMaxim Levitsky 
140b9f3973aSMaxim Levitsky 	return false;
141b9f3973aSMaxim Levitsky }
142b9f3973aSMaxim Levitsky 
143883b0a91SJoerg Roedel void recalc_intercepts(struct vcpu_svm *svm)
144883b0a91SJoerg Roedel {
1458fc78909SEmanuele Giuseppe Esposito 	struct vmcb_control_area *c, *h;
1468fc78909SEmanuele Giuseppe Esposito 	struct vmcb_ctrl_area_cached *g;
147c45ad722SBabu Moger 	unsigned int i;
148883b0a91SJoerg Roedel 
14906e7852cSJoerg Roedel 	vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
150883b0a91SJoerg Roedel 
151883b0a91SJoerg Roedel 	if (!is_guest_mode(&svm->vcpu))
152883b0a91SJoerg Roedel 		return;
153883b0a91SJoerg Roedel 
154883b0a91SJoerg Roedel 	c = &svm->vmcb->control;
1554995a368SCathy Avery 	h = &svm->vmcb01.ptr->control;
156e670bf68SPaolo Bonzini 	g = &svm->nested.ctl;
157883b0a91SJoerg Roedel 
158c45ad722SBabu Moger 	for (i = 0; i < MAX_INTERCEPT; i++)
159c45ad722SBabu Moger 		c->intercepts[i] = h->intercepts[i];
160c45ad722SBabu Moger 
161e9fd761aSPaolo Bonzini 	if (g->int_ctl & V_INTR_MASKING_MASK) {
162883b0a91SJoerg Roedel 		/* We only want the cr8 intercept bits of L1 */
16303bfeeb9SBabu Moger 		vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
16403bfeeb9SBabu Moger 		vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
165883b0a91SJoerg Roedel 
166883b0a91SJoerg Roedel 		/*
167883b0a91SJoerg Roedel 		 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
168883b0a91SJoerg Roedel 		 * affect any interrupt we may want to inject; therefore,
169883b0a91SJoerg Roedel 		 * interrupt window vmexits are irrelevant to L0.
170883b0a91SJoerg Roedel 		 */
171c62e2e94SBabu Moger 		vmcb_clr_intercept(c, INTERCEPT_VINTR);
172883b0a91SJoerg Roedel 	}
173883b0a91SJoerg Roedel 
174883b0a91SJoerg Roedel 	/* We don't want to see VMMCALLs from a nested guest */
175c62e2e94SBabu Moger 	vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
176883b0a91SJoerg Roedel 
177c45ad722SBabu Moger 	for (i = 0; i < MAX_INTERCEPT; i++)
178c45ad722SBabu Moger 		c->intercepts[i] |= g->intercepts[i];
1794b639a9fSMaxim Levitsky 
1804b639a9fSMaxim Levitsky 	/* If SMI is not intercepted, ignore guest SMI intercept as well  */
1814b639a9fSMaxim Levitsky 	if (!intercept_smi)
1824b639a9fSMaxim Levitsky 		vmcb_clr_intercept(c, INTERCEPT_SMI);
183c7dfa400SMaxim Levitsky 
184b9f3973aSMaxim Levitsky 	if (nested_vmcb_needs_vls_intercept(svm)) {
185b9f3973aSMaxim Levitsky 		/*
186b9f3973aSMaxim Levitsky 		 * If the virtual VMLOAD/VMSAVE is not enabled for the L2,
187b9f3973aSMaxim Levitsky 		 * we must intercept these instructions to correctly
188b9f3973aSMaxim Levitsky 		 * emulate them in case L1 doesn't intercept them.
189b9f3973aSMaxim Levitsky 		 */
190c7dfa400SMaxim Levitsky 		vmcb_set_intercept(c, INTERCEPT_VMLOAD);
191c7dfa400SMaxim Levitsky 		vmcb_set_intercept(c, INTERCEPT_VMSAVE);
192b9f3973aSMaxim Levitsky 	} else {
193b9f3973aSMaxim Levitsky 		WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
194b9f3973aSMaxim Levitsky 	}
195883b0a91SJoerg Roedel }
196883b0a91SJoerg Roedel 
19766c03a92SVitaly Kuznetsov /*
19866c03a92SVitaly Kuznetsov  * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
19966c03a92SVitaly Kuznetsov  * is optimized in that it only merges the parts where KVM MSR permission bitmap
20066c03a92SVitaly Kuznetsov  * may contain zero bits.
20166c03a92SVitaly Kuznetsov  */
202883b0a91SJoerg Roedel static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
203883b0a91SJoerg Roedel {
20466c03a92SVitaly Kuznetsov 	struct hv_enlightenments *hve =
20566c03a92SVitaly Kuznetsov 		(struct hv_enlightenments *)svm->nested.ctl.reserved_sw;
206883b0a91SJoerg Roedel 	int i;
207883b0a91SJoerg Roedel 
20866c03a92SVitaly Kuznetsov 	/*
20966c03a92SVitaly Kuznetsov 	 * MSR bitmap update can be skipped when:
21066c03a92SVitaly Kuznetsov 	 * - MSR bitmap for L1 hasn't changed.
21166c03a92SVitaly Kuznetsov 	 * - Nested hypervisor (L1) is attempting to launch the same L2 as
21266c03a92SVitaly Kuznetsov 	 *   before.
21366c03a92SVitaly Kuznetsov 	 * - Nested hypervisor (L1) is using Hyper-V emulation interface and
21466c03a92SVitaly Kuznetsov 	 * tells KVM (L0) there were no changes in MSR bitmap for L2.
21566c03a92SVitaly Kuznetsov 	 */
21666c03a92SVitaly Kuznetsov 	if (!svm->nested.force_msr_bitmap_recalc &&
21766c03a92SVitaly Kuznetsov 	    kvm_hv_hypercall_enabled(&svm->vcpu) &&
21866c03a92SVitaly Kuznetsov 	    hve->hv_enlightenments_control.msr_bitmap &&
21966c03a92SVitaly Kuznetsov 	    (svm->nested.ctl.clean & BIT(VMCB_HV_NESTED_ENLIGHTENMENTS)))
22066c03a92SVitaly Kuznetsov 		goto set_msrpm_base_pa;
22166c03a92SVitaly Kuznetsov 
2228fc78909SEmanuele Giuseppe Esposito 	if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
223883b0a91SJoerg Roedel 		return true;
224883b0a91SJoerg Roedel 
225883b0a91SJoerg Roedel 	for (i = 0; i < MSRPM_OFFSETS; i++) {
226883b0a91SJoerg Roedel 		u32 value, p;
227883b0a91SJoerg Roedel 		u64 offset;
228883b0a91SJoerg Roedel 
229883b0a91SJoerg Roedel 		if (msrpm_offsets[i] == 0xffffffff)
230883b0a91SJoerg Roedel 			break;
231883b0a91SJoerg Roedel 
232883b0a91SJoerg Roedel 		p      = msrpm_offsets[i];
233e670bf68SPaolo Bonzini 		offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
234883b0a91SJoerg Roedel 
235883b0a91SJoerg Roedel 		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
236883b0a91SJoerg Roedel 			return false;
237883b0a91SJoerg Roedel 
238883b0a91SJoerg Roedel 		svm->nested.msrpm[p] = svm->msrpm[p] | value;
239883b0a91SJoerg Roedel 	}
240883b0a91SJoerg Roedel 
24173c25546SVitaly Kuznetsov 	svm->nested.force_msr_bitmap_recalc = false;
24273c25546SVitaly Kuznetsov 
24366c03a92SVitaly Kuznetsov set_msrpm_base_pa:
244883b0a91SJoerg Roedel 	svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
245883b0a91SJoerg Roedel 
246883b0a91SJoerg Roedel 	return true;
247883b0a91SJoerg Roedel }
248883b0a91SJoerg Roedel 
249ee695f22SKrish Sadhukhan /*
250ee695f22SKrish Sadhukhan  * Bits 11:0 of bitmap address are ignored by hardware
251ee695f22SKrish Sadhukhan  */
252ee695f22SKrish Sadhukhan static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
253ee695f22SKrish Sadhukhan {
254ee695f22SKrish Sadhukhan 	u64 addr = PAGE_ALIGN(pa);
255ee695f22SKrish Sadhukhan 
256ee695f22SKrish Sadhukhan 	return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
257ee695f22SKrish Sadhukhan 	    kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
258ee695f22SKrish Sadhukhan }
259ee695f22SKrish Sadhukhan 
260174a921bSKrish Sadhukhan static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
261174a921bSKrish Sadhukhan {
262174a921bSKrish Sadhukhan 	/* Nested FLUSHBYASID is not supported yet.  */
263174a921bSKrish Sadhukhan 	switch(tlb_ctl) {
264174a921bSKrish Sadhukhan 		case TLB_CONTROL_DO_NOTHING:
265174a921bSKrish Sadhukhan 		case TLB_CONTROL_FLUSH_ALL_ASID:
266174a921bSKrish Sadhukhan 			return true;
267174a921bSKrish Sadhukhan 		default:
268174a921bSKrish Sadhukhan 			return false;
269174a921bSKrish Sadhukhan 	}
270174a921bSKrish Sadhukhan }
271174a921bSKrish Sadhukhan 
272bd95926cSPaolo Bonzini static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
2738fc78909SEmanuele Giuseppe Esposito 					 struct vmcb_ctrl_area_cached *control)
274ca46d739SPaolo Bonzini {
2758fc78909SEmanuele Giuseppe Esposito 	if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
276ca46d739SPaolo Bonzini 		return false;
277ca46d739SPaolo Bonzini 
27811f0cbf0SSean Christopherson 	if (CC(control->asid == 0))
279ca46d739SPaolo Bonzini 		return false;
280ca46d739SPaolo Bonzini 
28111f0cbf0SSean Christopherson 	if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
282ca46d739SPaolo Bonzini 		return false;
283ca46d739SPaolo Bonzini 
284ee695f22SKrish Sadhukhan 	if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
285ee695f22SKrish Sadhukhan 					   MSRPM_SIZE)))
286ee695f22SKrish Sadhukhan 		return false;
287ee695f22SKrish Sadhukhan 	if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
288ee695f22SKrish Sadhukhan 					   IOPM_SIZE)))
289ee695f22SKrish Sadhukhan 		return false;
290ee695f22SKrish Sadhukhan 
291174a921bSKrish Sadhukhan 	if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
292174a921bSKrish Sadhukhan 		return false;
293174a921bSKrish Sadhukhan 
294ca46d739SPaolo Bonzini 	return true;
295ca46d739SPaolo Bonzini }
296ca46d739SPaolo Bonzini 
2976906e06dSKrish Sadhukhan /* Common checks that apply to both L1 and L2 state.  */
298b7a3d8b6SEmanuele Giuseppe Esposito static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
299b7a3d8b6SEmanuele Giuseppe Esposito 				     struct vmcb_save_area_cached *save)
3006906e06dSKrish Sadhukhan {
30111f0cbf0SSean Christopherson 	if (CC(!(save->efer & EFER_SVME)))
3026906e06dSKrish Sadhukhan 		return false;
3036906e06dSKrish Sadhukhan 
30411f0cbf0SSean Christopherson 	if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
30511f0cbf0SSean Christopherson 	    CC(save->cr0 & ~0xffffffffULL))
3066906e06dSKrish Sadhukhan 		return false;
3076906e06dSKrish Sadhukhan 
30811f0cbf0SSean Christopherson 	if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
3096906e06dSKrish Sadhukhan 		return false;
3106906e06dSKrish Sadhukhan 
311907afa48SEmanuele Giuseppe Esposito 	/*
312907afa48SEmanuele Giuseppe Esposito 	 * These checks are also performed by KVM_SET_SREGS,
313907afa48SEmanuele Giuseppe Esposito 	 * except that EFER.LMA is not checked by SVM against
314907afa48SEmanuele Giuseppe Esposito 	 * CR0.PG && EFER.LME.
315907afa48SEmanuele Giuseppe Esposito 	 */
316907afa48SEmanuele Giuseppe Esposito 	if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
317907afa48SEmanuele Giuseppe Esposito 		if (CC(!(save->cr4 & X86_CR4_PAE)) ||
318907afa48SEmanuele Giuseppe Esposito 		    CC(!(save->cr0 & X86_CR0_PE)) ||
319907afa48SEmanuele Giuseppe Esposito 		    CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
320907afa48SEmanuele Giuseppe Esposito 			return false;
321907afa48SEmanuele Giuseppe Esposito 	}
322907afa48SEmanuele Giuseppe Esposito 
323907afa48SEmanuele Giuseppe Esposito 	if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
3246906e06dSKrish Sadhukhan 		return false;
3256906e06dSKrish Sadhukhan 
32663129754SPaolo Bonzini 	if (CC(!kvm_valid_efer(vcpu, save->efer)))
3276906e06dSKrish Sadhukhan 		return false;
3286906e06dSKrish Sadhukhan 
3296906e06dSKrish Sadhukhan 	return true;
3306906e06dSKrish Sadhukhan }
3316906e06dSKrish Sadhukhan 
332b7a3d8b6SEmanuele Giuseppe Esposito static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
333b7a3d8b6SEmanuele Giuseppe Esposito {
334b7a3d8b6SEmanuele Giuseppe Esposito 	struct vcpu_svm *svm = to_svm(vcpu);
335b7a3d8b6SEmanuele Giuseppe Esposito 	struct vmcb_save_area_cached *save = &svm->nested.save;
336b7a3d8b6SEmanuele Giuseppe Esposito 
337b7a3d8b6SEmanuele Giuseppe Esposito 	return __nested_vmcb_check_save(vcpu, save);
338b7a3d8b6SEmanuele Giuseppe Esposito }
339b7a3d8b6SEmanuele Giuseppe Esposito 
340bd95926cSPaolo Bonzini static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
341bd95926cSPaolo Bonzini {
342bd95926cSPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
3438fc78909SEmanuele Giuseppe Esposito 	struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
344bd95926cSPaolo Bonzini 
345bd95926cSPaolo Bonzini 	return __nested_vmcb_check_controls(vcpu, ctl);
346bd95926cSPaolo Bonzini }
347bd95926cSPaolo Bonzini 
3487907160dSEmanuele Giuseppe Esposito static
34966c03a92SVitaly Kuznetsov void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
35066c03a92SVitaly Kuznetsov 					 struct vmcb_ctrl_area_cached *to,
3517907160dSEmanuele Giuseppe Esposito 					 struct vmcb_control_area *from)
3527907160dSEmanuele Giuseppe Esposito {
3537907160dSEmanuele Giuseppe Esposito 	unsigned int i;
3547907160dSEmanuele Giuseppe Esposito 
3557907160dSEmanuele Giuseppe Esposito 	for (i = 0; i < MAX_INTERCEPT; i++)
3567907160dSEmanuele Giuseppe Esposito 		to->intercepts[i] = from->intercepts[i];
3577907160dSEmanuele Giuseppe Esposito 
3587907160dSEmanuele Giuseppe Esposito 	to->iopm_base_pa        = from->iopm_base_pa;
3597907160dSEmanuele Giuseppe Esposito 	to->msrpm_base_pa       = from->msrpm_base_pa;
3607907160dSEmanuele Giuseppe Esposito 	to->tsc_offset          = from->tsc_offset;
3617907160dSEmanuele Giuseppe Esposito 	to->tlb_ctl             = from->tlb_ctl;
3627907160dSEmanuele Giuseppe Esposito 	to->int_ctl             = from->int_ctl;
3637907160dSEmanuele Giuseppe Esposito 	to->int_vector          = from->int_vector;
3647907160dSEmanuele Giuseppe Esposito 	to->int_state           = from->int_state;
3657907160dSEmanuele Giuseppe Esposito 	to->exit_code           = from->exit_code;
3667907160dSEmanuele Giuseppe Esposito 	to->exit_code_hi        = from->exit_code_hi;
3677907160dSEmanuele Giuseppe Esposito 	to->exit_info_1         = from->exit_info_1;
3687907160dSEmanuele Giuseppe Esposito 	to->exit_info_2         = from->exit_info_2;
3697907160dSEmanuele Giuseppe Esposito 	to->exit_int_info       = from->exit_int_info;
3707907160dSEmanuele Giuseppe Esposito 	to->exit_int_info_err   = from->exit_int_info_err;
3717907160dSEmanuele Giuseppe Esposito 	to->nested_ctl          = from->nested_ctl;
3727907160dSEmanuele Giuseppe Esposito 	to->event_inj           = from->event_inj;
3737907160dSEmanuele Giuseppe Esposito 	to->event_inj_err       = from->event_inj_err;
3747907160dSEmanuele Giuseppe Esposito 	to->nested_cr3          = from->nested_cr3;
3757907160dSEmanuele Giuseppe Esposito 	to->virt_ext            = from->virt_ext;
3767907160dSEmanuele Giuseppe Esposito 	to->pause_filter_count  = from->pause_filter_count;
3777907160dSEmanuele Giuseppe Esposito 	to->pause_filter_thresh = from->pause_filter_thresh;
3787907160dSEmanuele Giuseppe Esposito 
3797907160dSEmanuele Giuseppe Esposito 	/* Copy asid here because nested_vmcb_check_controls will check it.  */
3807907160dSEmanuele Giuseppe Esposito 	to->asid           = from->asid;
3817907160dSEmanuele Giuseppe Esposito 	to->msrpm_base_pa &= ~0x0fffULL;
3827907160dSEmanuele Giuseppe Esposito 	to->iopm_base_pa  &= ~0x0fffULL;
38366c03a92SVitaly Kuznetsov 
38466c03a92SVitaly Kuznetsov 	/* Hyper-V extensions (Enlightened VMCB) */
38566c03a92SVitaly Kuznetsov 	if (kvm_hv_hypercall_enabled(vcpu)) {
38666c03a92SVitaly Kuznetsov 		to->clean = from->clean;
38766c03a92SVitaly Kuznetsov 		memcpy(to->reserved_sw, from->reserved_sw,
38866c03a92SVitaly Kuznetsov 		       sizeof(struct hv_enlightenments));
38966c03a92SVitaly Kuznetsov 	}
3907907160dSEmanuele Giuseppe Esposito }
3917907160dSEmanuele Giuseppe Esposito 
3927907160dSEmanuele Giuseppe Esposito void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
3933e06f016SPaolo Bonzini 				       struct vmcb_control_area *control)
3943e06f016SPaolo Bonzini {
39566c03a92SVitaly Kuznetsov 	__nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
3963e06f016SPaolo Bonzini }
3973e06f016SPaolo Bonzini 
398f2740a8dSEmanuele Giuseppe Esposito static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
399f2740a8dSEmanuele Giuseppe Esposito 					     struct vmcb_save_area *from)
400f2740a8dSEmanuele Giuseppe Esposito {
401f2740a8dSEmanuele Giuseppe Esposito 	/*
402f2740a8dSEmanuele Giuseppe Esposito 	 * Copy only fields that are validated, as we need them
403f2740a8dSEmanuele Giuseppe Esposito 	 * to avoid TOC/TOU races.
404f2740a8dSEmanuele Giuseppe Esposito 	 */
405f2740a8dSEmanuele Giuseppe Esposito 	to->efer = from->efer;
406f2740a8dSEmanuele Giuseppe Esposito 	to->cr0 = from->cr0;
407f2740a8dSEmanuele Giuseppe Esposito 	to->cr3 = from->cr3;
408f2740a8dSEmanuele Giuseppe Esposito 	to->cr4 = from->cr4;
409f2740a8dSEmanuele Giuseppe Esposito 
410f2740a8dSEmanuele Giuseppe Esposito 	to->dr6 = from->dr6;
411f2740a8dSEmanuele Giuseppe Esposito 	to->dr7 = from->dr7;
412f2740a8dSEmanuele Giuseppe Esposito }
413f2740a8dSEmanuele Giuseppe Esposito 
414f2740a8dSEmanuele Giuseppe Esposito void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
415f2740a8dSEmanuele Giuseppe Esposito 				    struct vmcb_save_area *save)
416f2740a8dSEmanuele Giuseppe Esposito {
417f2740a8dSEmanuele Giuseppe Esposito 	__nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
418f2740a8dSEmanuele Giuseppe Esposito }
419f2740a8dSEmanuele Giuseppe Esposito 
4202d8a42beSPaolo Bonzini /*
4212d8a42beSPaolo Bonzini  * Synchronize fields that are written by the processor, so that
4229e8f0fbfSPaolo Bonzini  * they can be copied back into the vmcb12.
4232d8a42beSPaolo Bonzini  */
4249e8f0fbfSPaolo Bonzini void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
4252d8a42beSPaolo Bonzini {
4262d8a42beSPaolo Bonzini 	u32 mask;
4272d8a42beSPaolo Bonzini 	svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
4282d8a42beSPaolo Bonzini 	svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;
4292d8a42beSPaolo Bonzini 
4302d8a42beSPaolo Bonzini 	/* Only a few fields of int_ctl are written by the processor.  */
4312d8a42beSPaolo Bonzini 	mask = V_IRQ_MASK | V_TPR_MASK;
4322d8a42beSPaolo Bonzini 	if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
433a284ba56SJoerg Roedel 	    svm_is_intercept(svm, INTERCEPT_VINTR)) {
4342d8a42beSPaolo Bonzini 		/*
4352d8a42beSPaolo Bonzini 		 * In order to request an interrupt window, L0 is usurping
4362d8a42beSPaolo Bonzini 		 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
4372d8a42beSPaolo Bonzini 		 * even if it was clear in L1's VMCB.  Restoring it would be
4382d8a42beSPaolo Bonzini 		 * wrong.  However, in this case V_IRQ will remain true until
4392d8a42beSPaolo Bonzini 		 * interrupt_window_interception calls svm_clear_vintr and
4402d8a42beSPaolo Bonzini 		 * restores int_ctl.  We can just leave it aside.
4412d8a42beSPaolo Bonzini 		 */
4422d8a42beSPaolo Bonzini 		mask &= ~V_IRQ_MASK;
4432d8a42beSPaolo Bonzini 	}
4440b349662SMaxim Levitsky 
4450b349662SMaxim Levitsky 	if (nested_vgif_enabled(svm))
4460b349662SMaxim Levitsky 		mask |= V_GIF_MASK;
4470b349662SMaxim Levitsky 
4482d8a42beSPaolo Bonzini 	svm->nested.ctl.int_ctl        &= ~mask;
4492d8a42beSPaolo Bonzini 	svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
4502d8a42beSPaolo Bonzini }
4512d8a42beSPaolo Bonzini 
45236e2e983SPaolo Bonzini /*
45336e2e983SPaolo Bonzini  * Transfer any event that L0 or L1 wanted to inject into L2 to
45436e2e983SPaolo Bonzini  * EXIT_INT_INFO.
45536e2e983SPaolo Bonzini  */
4569e8f0fbfSPaolo Bonzini static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
4570dd16b5bSMaxim Levitsky 						struct vmcb *vmcb12)
45836e2e983SPaolo Bonzini {
45936e2e983SPaolo Bonzini 	struct kvm_vcpu *vcpu = &svm->vcpu;
46036e2e983SPaolo Bonzini 	u32 exit_int_info = 0;
46136e2e983SPaolo Bonzini 	unsigned int nr;
46236e2e983SPaolo Bonzini 
46336e2e983SPaolo Bonzini 	if (vcpu->arch.exception.injected) {
46436e2e983SPaolo Bonzini 		nr = vcpu->arch.exception.nr;
46536e2e983SPaolo Bonzini 		exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
46636e2e983SPaolo Bonzini 
46736e2e983SPaolo Bonzini 		if (vcpu->arch.exception.has_error_code) {
46836e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_VALID_ERR;
4690dd16b5bSMaxim Levitsky 			vmcb12->control.exit_int_info_err =
47036e2e983SPaolo Bonzini 				vcpu->arch.exception.error_code;
47136e2e983SPaolo Bonzini 		}
47236e2e983SPaolo Bonzini 
47336e2e983SPaolo Bonzini 	} else if (vcpu->arch.nmi_injected) {
47436e2e983SPaolo Bonzini 		exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
47536e2e983SPaolo Bonzini 
47636e2e983SPaolo Bonzini 	} else if (vcpu->arch.interrupt.injected) {
47736e2e983SPaolo Bonzini 		nr = vcpu->arch.interrupt.nr;
47836e2e983SPaolo Bonzini 		exit_int_info = nr | SVM_EVTINJ_VALID;
47936e2e983SPaolo Bonzini 
48036e2e983SPaolo Bonzini 		if (vcpu->arch.interrupt.soft)
48136e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
48236e2e983SPaolo Bonzini 		else
48336e2e983SPaolo Bonzini 			exit_int_info |= SVM_EVTINJ_TYPE_INTR;
48436e2e983SPaolo Bonzini 	}
48536e2e983SPaolo Bonzini 
4860dd16b5bSMaxim Levitsky 	vmcb12->control.exit_int_info = exit_int_info;
48736e2e983SPaolo Bonzini }
48836e2e983SPaolo Bonzini 
489d2e56019SSean Christopherson static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
490d2e56019SSean Christopherson {
491d2e56019SSean Christopherson 	/*
492d2e56019SSean Christopherson 	 * TODO: optimize unconditional TLB flush/MMU sync.  A partial list of
493d2e56019SSean Christopherson 	 * things to fix before this can be conditional:
494d2e56019SSean Christopherson 	 *
495d2e56019SSean Christopherson 	 *  - Flush TLBs for both L1 and L2 remote TLB flush
496d2e56019SSean Christopherson 	 *  - Honor L1's request to flush an ASID on nested VMRUN
497d2e56019SSean Christopherson 	 *  - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
498d2e56019SSean Christopherson 	 *  - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
499d2e56019SSean Christopherson 	 *  - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
500d2e56019SSean Christopherson 	 *
501d2e56019SSean Christopherson 	 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
502d2e56019SSean Christopherson 	 *     NPT guest-physical mappings on VMRUN.
503d2e56019SSean Christopherson 	 */
504d2e56019SSean Christopherson 	kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
505d2e56019SSean Christopherson 	kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
506d2e56019SSean Christopherson }
507d2e56019SSean Christopherson 
50862156f6cSVitaly Kuznetsov /*
509d82aaef9SVitaly Kuznetsov  * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
510d82aaef9SVitaly Kuznetsov  * if we are emulating VM-Entry into a guest with NPT enabled.
51162156f6cSVitaly Kuznetsov  */
51262156f6cSVitaly Kuznetsov static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
513b222b0b8SMaxim Levitsky 			       bool nested_npt, bool reload_pdptrs)
51462156f6cSVitaly Kuznetsov {
51511f0cbf0SSean Christopherson 	if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
516a506fdd2SVitaly Kuznetsov 		return -EINVAL;
517a506fdd2SVitaly Kuznetsov 
518b222b0b8SMaxim Levitsky 	if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
5192df4a5ebSLai Jiangshan 	    CC(!load_pdptrs(vcpu, cr3)))
520a506fdd2SVitaly Kuznetsov 		return -EINVAL;
521a506fdd2SVitaly Kuznetsov 
522a506fdd2SVitaly Kuznetsov 	vcpu->arch.cr3 = cr3;
523a506fdd2SVitaly Kuznetsov 
524616007c8SSean Christopherson 	/* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
525c9060662SSean Christopherson 	kvm_init_mmu(vcpu);
526a506fdd2SVitaly Kuznetsov 
5273cffc89dSPaolo Bonzini 	if (!nested_npt)
5283cffc89dSPaolo Bonzini 		kvm_mmu_new_pgd(vcpu, cr3);
5293cffc89dSPaolo Bonzini 
530a506fdd2SVitaly Kuznetsov 	return 0;
53162156f6cSVitaly Kuznetsov }
53262156f6cSVitaly Kuznetsov 
5334995a368SCathy Avery void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
5344995a368SCathy Avery {
5354995a368SCathy Avery 	if (!svm->nested.vmcb02.ptr)
5364995a368SCathy Avery 		return;
5374995a368SCathy Avery 
5384995a368SCathy Avery 	/* FIXME: merge g_pat from vmcb01 and vmcb12.  */
5394995a368SCathy Avery 	svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
5404995a368SCathy Avery }
5414995a368SCathy Avery 
5429e8f0fbfSPaolo Bonzini static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
543883b0a91SJoerg Roedel {
5448173396eSCathy Avery 	bool new_vmcb12 = false;
5451d5a1b58SMaxim Levitsky 	struct vmcb *vmcb01 = svm->vmcb01.ptr;
546db663af4SMaxim Levitsky 	struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
5478173396eSCathy Avery 
5484995a368SCathy Avery 	nested_vmcb02_compute_g_pat(svm);
5494995a368SCathy Avery 
550883b0a91SJoerg Roedel 	/* Load the nested guest state */
5518173396eSCathy Avery 	if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
5528173396eSCathy Avery 		new_vmcb12 = true;
5538173396eSCathy Avery 		svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
55473c25546SVitaly Kuznetsov 		svm->nested.force_msr_bitmap_recalc = true;
5558173396eSCathy Avery 	}
5568173396eSCathy Avery 
5578173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
558db663af4SMaxim Levitsky 		vmcb02->save.es = vmcb12->save.es;
559db663af4SMaxim Levitsky 		vmcb02->save.cs = vmcb12->save.cs;
560db663af4SMaxim Levitsky 		vmcb02->save.ss = vmcb12->save.ss;
561db663af4SMaxim Levitsky 		vmcb02->save.ds = vmcb12->save.ds;
562db663af4SMaxim Levitsky 		vmcb02->save.cpl = vmcb12->save.cpl;
563db663af4SMaxim Levitsky 		vmcb_mark_dirty(vmcb02, VMCB_SEG);
5648173396eSCathy Avery 	}
5654bb170a5SPaolo Bonzini 
5668173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
567db663af4SMaxim Levitsky 		vmcb02->save.gdtr = vmcb12->save.gdtr;
568db663af4SMaxim Levitsky 		vmcb02->save.idtr = vmcb12->save.idtr;
569db663af4SMaxim Levitsky 		vmcb_mark_dirty(vmcb02, VMCB_DT);
5708173396eSCathy Avery 	}
5714bb170a5SPaolo Bonzini 
5728cce12b3SPaolo Bonzini 	kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
5733c346c0cSPaolo Bonzini 
574355d0473SEmanuele Giuseppe Esposito 	svm_set_efer(&svm->vcpu, svm->nested.save.efer);
5753c346c0cSPaolo Bonzini 
576355d0473SEmanuele Giuseppe Esposito 	svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
577355d0473SEmanuele Giuseppe Esposito 	svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
5784bb170a5SPaolo Bonzini 
5794bb170a5SPaolo Bonzini 	svm->vcpu.arch.cr2 = vmcb12->save.cr2;
5808173396eSCathy Avery 
5810dd16b5bSMaxim Levitsky 	kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
5820dd16b5bSMaxim Levitsky 	kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
5830dd16b5bSMaxim Levitsky 	kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
584883b0a91SJoerg Roedel 
585883b0a91SJoerg Roedel 	/* In case we don't even reach vcpu_run, the fields are not updated */
586db663af4SMaxim Levitsky 	vmcb02->save.rax = vmcb12->save.rax;
587db663af4SMaxim Levitsky 	vmcb02->save.rsp = vmcb12->save.rsp;
588db663af4SMaxim Levitsky 	vmcb02->save.rip = vmcb12->save.rip;
5894bb170a5SPaolo Bonzini 
5908173396eSCathy Avery 	/* These bits will be set properly on the first execution when new_vmc12 is true */
5918173396eSCathy Avery 	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
592db663af4SMaxim Levitsky 		vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
593355d0473SEmanuele Giuseppe Esposito 		svm->vcpu.arch.dr6  = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
594db663af4SMaxim Levitsky 		vmcb_mark_dirty(vmcb02, VMCB_DR);
595f241d711SPaolo Bonzini 	}
5961d5a1b58SMaxim Levitsky 
597d20c796cSMaxim Levitsky 	if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
598d20c796cSMaxim Levitsky 		/*
599d20c796cSMaxim Levitsky 		 * Reserved bits of DEBUGCTL are ignored.  Be consistent with
600d20c796cSMaxim Levitsky 		 * svm_set_msr's definition of reserved bits.
601d20c796cSMaxim Levitsky 		 */
602d20c796cSMaxim Levitsky 		svm_copy_lbrs(vmcb02, vmcb12);
603d20c796cSMaxim Levitsky 		vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
604d20c796cSMaxim Levitsky 		svm_update_lbrv(&svm->vcpu);
605d20c796cSMaxim Levitsky 
606d20c796cSMaxim Levitsky 	} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
6071d5a1b58SMaxim Levitsky 		svm_copy_lbrs(vmcb02, vmcb01);
6088173396eSCathy Avery 	}
609d20c796cSMaxim Levitsky }
610883b0a91SJoerg Roedel 
6119e8f0fbfSPaolo Bonzini static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
612f241d711SPaolo Bonzini {
6130b349662SMaxim Levitsky 	u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
6140b349662SMaxim Levitsky 	u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
6150f923e07SMaxim Levitsky 
616d2e56019SSean Christopherson 	struct kvm_vcpu *vcpu = &svm->vcpu;
617db663af4SMaxim Levitsky 	struct vmcb *vmcb01 = svm->vmcb01.ptr;
618db663af4SMaxim Levitsky 	struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
619*e3cdaab5SPaolo Bonzini 	u32 pause_count12;
620*e3cdaab5SPaolo Bonzini 	u32 pause_thresh12;
62162156f6cSVitaly Kuznetsov 
6227c3ecfcdSPaolo Bonzini 	/*
6237c3ecfcdSPaolo Bonzini 	 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
6247c3ecfcdSPaolo Bonzini 	 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
6257c3ecfcdSPaolo Bonzini 	 */
6264995a368SCathy Avery 
6270b349662SMaxim Levitsky 	if (svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
6280b349662SMaxim Levitsky 		int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
6290b349662SMaxim Levitsky 	else
6300b349662SMaxim Levitsky 		int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
6310b349662SMaxim Levitsky 
6327c3ecfcdSPaolo Bonzini 	/* Copied from vmcb01.  msrpm_base can be overwritten later.  */
633db663af4SMaxim Levitsky 	vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
634db663af4SMaxim Levitsky 	vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
635db663af4SMaxim Levitsky 	vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
6367c3ecfcdSPaolo Bonzini 
6377c3ecfcdSPaolo Bonzini 	/* Done at vmrun: asid.  */
6387c3ecfcdSPaolo Bonzini 
6397c3ecfcdSPaolo Bonzini 	/* Also overwritten later if necessary.  */
640db663af4SMaxim Levitsky 	vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
6417c3ecfcdSPaolo Bonzini 
6427c3ecfcdSPaolo Bonzini 	/* nested_cr3.  */
64362156f6cSVitaly Kuznetsov 	if (nested_npt_enabled(svm))
644d2e56019SSean Christopherson 		nested_svm_init_mmu_context(vcpu);
64569cb8774SPaolo Bonzini 
6465228eb96SMaxim Levitsky 	vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
6475228eb96SMaxim Levitsky 			vcpu->arch.l1_tsc_offset,
6485228eb96SMaxim Levitsky 			svm->nested.ctl.tsc_offset,
6495228eb96SMaxim Levitsky 			svm->tsc_ratio_msr);
6505228eb96SMaxim Levitsky 
651db663af4SMaxim Levitsky 	vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
6525228eb96SMaxim Levitsky 
6535228eb96SMaxim Levitsky 	if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
6545228eb96SMaxim Levitsky 		WARN_ON(!svm->tsc_scaling_enabled);
6555228eb96SMaxim Levitsky 		nested_svm_update_tsc_ratio_msr(vcpu);
6565228eb96SMaxim Levitsky 	}
657883b0a91SJoerg Roedel 
658db663af4SMaxim Levitsky 	vmcb02->control.int_ctl             =
6590f923e07SMaxim Levitsky 		(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
660db663af4SMaxim Levitsky 		(vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
66191b7130cSPaolo Bonzini 
662db663af4SMaxim Levitsky 	vmcb02->control.int_vector          = svm->nested.ctl.int_vector;
663db663af4SMaxim Levitsky 	vmcb02->control.int_state           = svm->nested.ctl.int_state;
664db663af4SMaxim Levitsky 	vmcb02->control.event_inj           = svm->nested.ctl.event_inj;
665db663af4SMaxim Levitsky 	vmcb02->control.event_inj_err       = svm->nested.ctl.event_inj_err;
666883b0a91SJoerg Roedel 
6671d5a1b58SMaxim Levitsky 	vmcb02->control.virt_ext            = vmcb01->control.virt_ext &
6681d5a1b58SMaxim Levitsky 					      LBR_CTL_ENABLE_MASK;
669d20c796cSMaxim Levitsky 	if (svm->lbrv_enabled)
670d20c796cSMaxim Levitsky 		vmcb02->control.virt_ext  |=
671d20c796cSMaxim Levitsky 			(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
6721d5a1b58SMaxim Levitsky 
673b9f3973aSMaxim Levitsky 	if (!nested_vmcb_needs_vls_intercept(svm))
674db663af4SMaxim Levitsky 		vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
675b9f3973aSMaxim Levitsky 
676*e3cdaab5SPaolo Bonzini 	pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0;
677*e3cdaab5SPaolo Bonzini 	pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0;
67874fd41edSMaxim Levitsky 	if (kvm_pause_in_guest(svm->vcpu.kvm)) {
679*e3cdaab5SPaolo Bonzini 		/* use guest values since host doesn't intercept PAUSE */
680*e3cdaab5SPaolo Bonzini 		vmcb02->control.pause_filter_count = pause_count12;
681*e3cdaab5SPaolo Bonzini 		vmcb02->control.pause_filter_thresh = pause_thresh12;
68274fd41edSMaxim Levitsky 
683*e3cdaab5SPaolo Bonzini 	} else {
684*e3cdaab5SPaolo Bonzini 		/* start from host values otherwise */
68574fd41edSMaxim Levitsky 		vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
68674fd41edSMaxim Levitsky 		vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
687*e3cdaab5SPaolo Bonzini 
688*e3cdaab5SPaolo Bonzini 		/* ... but ensure filtering is disabled if so requested.  */
689*e3cdaab5SPaolo Bonzini 		if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
690*e3cdaab5SPaolo Bonzini 			if (!pause_count12)
69174fd41edSMaxim Levitsky 				vmcb02->control.pause_filter_count = 0;
692*e3cdaab5SPaolo Bonzini 			if (!pause_thresh12)
69374fd41edSMaxim Levitsky 				vmcb02->control.pause_filter_thresh = 0;
69474fd41edSMaxim Levitsky 		}
695*e3cdaab5SPaolo Bonzini 	}
69674fd41edSMaxim Levitsky 
697d2e56019SSean Christopherson 	nested_svm_transition_tlb_flush(vcpu);
698d2e56019SSean Christopherson 
699883b0a91SJoerg Roedel 	/* Enter Guest-Mode */
700d2e56019SSean Christopherson 	enter_guest_mode(vcpu);
701883b0a91SJoerg Roedel 
702883b0a91SJoerg Roedel 	/*
703883b0a91SJoerg Roedel 	 * Merge guest and host intercepts - must be called with vcpu in
7044bb170a5SPaolo Bonzini 	 * guest-mode to take effect.
705883b0a91SJoerg Roedel 	 */
706883b0a91SJoerg Roedel 	recalc_intercepts(svm);
707f241d711SPaolo Bonzini }
708f241d711SPaolo Bonzini 
709d00b99c5SBabu Moger static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
710d00b99c5SBabu Moger {
711d00b99c5SBabu Moger 	/*
712d00b99c5SBabu Moger 	 * Some VMCB state is shared between L1 and L2 and thus has to be
713d00b99c5SBabu Moger 	 * moved at the time of nested vmrun and vmexit.
714d00b99c5SBabu Moger 	 *
715d00b99c5SBabu Moger 	 * VMLOAD/VMSAVE state would also belong in this category, but KVM
716d00b99c5SBabu Moger 	 * always performs VMLOAD and VMSAVE from the VMCB01.
717d00b99c5SBabu Moger 	 */
718d00b99c5SBabu Moger 	to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
719d00b99c5SBabu Moger }
720d00b99c5SBabu Moger 
72163129754SPaolo Bonzini int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
722e85d3e7bSMaxim Levitsky 			 struct vmcb *vmcb12, bool from_vmrun)
723f241d711SPaolo Bonzini {
72463129754SPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
725a506fdd2SVitaly Kuznetsov 	int ret;
726a506fdd2SVitaly Kuznetsov 
727954f419bSMaxim Levitsky 	trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
728954f419bSMaxim Levitsky 			       vmcb12->save.rip,
729954f419bSMaxim Levitsky 			       vmcb12->control.int_ctl,
730954f419bSMaxim Levitsky 			       vmcb12->control.event_inj,
731954f419bSMaxim Levitsky 			       vmcb12->control.nested_ctl);
732954f419bSMaxim Levitsky 
733954f419bSMaxim Levitsky 	trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
734954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
735954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
736954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD3],
737954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD4],
738954f419bSMaxim Levitsky 				    vmcb12->control.intercepts[INTERCEPT_WORD5]);
739954f419bSMaxim Levitsky 
740954f419bSMaxim Levitsky 
7410dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = vmcb12_gpa;
7424995a368SCathy Avery 
7434995a368SCathy Avery 	WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
7444995a368SCathy Avery 
745d00b99c5SBabu Moger 	nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
7464995a368SCathy Avery 
7474995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->nested.vmcb02);
7489e8f0fbfSPaolo Bonzini 	nested_vmcb02_prepare_control(svm);
7499e8f0fbfSPaolo Bonzini 	nested_vmcb02_prepare_save(svm, vmcb12);
750f241d711SPaolo Bonzini 
751355d0473SEmanuele Giuseppe Esposito 	ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
752e85d3e7bSMaxim Levitsky 				  nested_npt_enabled(svm), from_vmrun);
753a506fdd2SVitaly Kuznetsov 	if (ret)
754a506fdd2SVitaly Kuznetsov 		return ret;
755a506fdd2SVitaly Kuznetsov 
756e85d3e7bSMaxim Levitsky 	if (!from_vmrun)
757e85d3e7bSMaxim Levitsky 		kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
758e85d3e7bSMaxim Levitsky 
759ffdf7f9eSPaolo Bonzini 	svm_set_gif(svm, true);
76059cd9bc5SVitaly Kuznetsov 
761f44509f8SMaxim Levitsky 	if (kvm_vcpu_apicv_active(vcpu))
762f44509f8SMaxim Levitsky 		kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
763f44509f8SMaxim Levitsky 
76459cd9bc5SVitaly Kuznetsov 	return 0;
765883b0a91SJoerg Roedel }
766883b0a91SJoerg Roedel 
76763129754SPaolo Bonzini int nested_svm_vmrun(struct kvm_vcpu *vcpu)
768883b0a91SJoerg Roedel {
76963129754SPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
770883b0a91SJoerg Roedel 	int ret;
7710dd16b5bSMaxim Levitsky 	struct vmcb *vmcb12;
772883b0a91SJoerg Roedel 	struct kvm_host_map map;
7730dd16b5bSMaxim Levitsky 	u64 vmcb12_gpa;
774db663af4SMaxim Levitsky 	struct vmcb *vmcb01 = svm->vmcb01.ptr;
775883b0a91SJoerg Roedel 
776fb79f566SVitaly Kuznetsov 	if (!svm->nested.hsave_msr) {
777fb79f566SVitaly Kuznetsov 		kvm_inject_gp(vcpu, 0);
778fb79f566SVitaly Kuznetsov 		return 1;
779fb79f566SVitaly Kuznetsov 	}
780fb79f566SVitaly Kuznetsov 
78163129754SPaolo Bonzini 	if (is_smm(vcpu)) {
78263129754SPaolo Bonzini 		kvm_queue_exception(vcpu, UD_VECTOR);
7837c67f546SPaolo Bonzini 		return 1;
7847c67f546SPaolo Bonzini 	}
785883b0a91SJoerg Roedel 
7860dd16b5bSMaxim Levitsky 	vmcb12_gpa = svm->vmcb->save.rax;
78763129754SPaolo Bonzini 	ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
788883b0a91SJoerg Roedel 	if (ret == -EINVAL) {
78963129754SPaolo Bonzini 		kvm_inject_gp(vcpu, 0);
790883b0a91SJoerg Roedel 		return 1;
791883b0a91SJoerg Roedel 	} else if (ret) {
79263129754SPaolo Bonzini 		return kvm_skip_emulated_instruction(vcpu);
793883b0a91SJoerg Roedel 	}
794883b0a91SJoerg Roedel 
79563129754SPaolo Bonzini 	ret = kvm_skip_emulated_instruction(vcpu);
796883b0a91SJoerg Roedel 
7970dd16b5bSMaxim Levitsky 	vmcb12 = map.hva;
798883b0a91SJoerg Roedel 
7992fcf4876SMaxim Levitsky 	if (WARN_ON_ONCE(!svm->nested.initialized))
8002fcf4876SMaxim Levitsky 		return -EINVAL;
8012fcf4876SMaxim Levitsky 
8027907160dSEmanuele Giuseppe Esposito 	nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
803f2740a8dSEmanuele Giuseppe Esposito 	nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
804cb9b6a1bSPaolo Bonzini 
805b7a3d8b6SEmanuele Giuseppe Esposito 	if (!nested_vmcb_check_save(vcpu) ||
806bd95926cSPaolo Bonzini 	    !nested_vmcb_check_controls(vcpu)) {
8070dd16b5bSMaxim Levitsky 		vmcb12->control.exit_code    = SVM_EXIT_ERR;
8080dd16b5bSMaxim Levitsky 		vmcb12->control.exit_code_hi = 0;
8090dd16b5bSMaxim Levitsky 		vmcb12->control.exit_info_1  = 0;
8100dd16b5bSMaxim Levitsky 		vmcb12->control.exit_info_2  = 0;
81169c9dfa2SPaolo Bonzini 		goto out;
812883b0a91SJoerg Roedel 	}
813883b0a91SJoerg Roedel 
814883b0a91SJoerg Roedel 	/*
8154995a368SCathy Avery 	 * Since vmcb01 is not in use, we can use it to store some of the L1
8164995a368SCathy Avery 	 * state.
817883b0a91SJoerg Roedel 	 */
818db663af4SMaxim Levitsky 	vmcb01->save.efer   = vcpu->arch.efer;
819db663af4SMaxim Levitsky 	vmcb01->save.cr0    = kvm_read_cr0(vcpu);
820db663af4SMaxim Levitsky 	vmcb01->save.cr4    = vcpu->arch.cr4;
821db663af4SMaxim Levitsky 	vmcb01->save.rflags = kvm_get_rflags(vcpu);
822db663af4SMaxim Levitsky 	vmcb01->save.rip    = kvm_rip_read(vcpu);
823883b0a91SJoerg Roedel 
8244995a368SCathy Avery 	if (!npt_enabled)
825db663af4SMaxim Levitsky 		vmcb01->save.cr3 = kvm_read_cr3(vcpu);
826883b0a91SJoerg Roedel 
827f74f9414SPaolo Bonzini 	svm->nested.nested_run_pending = 1;
828883b0a91SJoerg Roedel 
829e85d3e7bSMaxim Levitsky 	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
83059cd9bc5SVitaly Kuznetsov 		goto out_exit_err;
83159cd9bc5SVitaly Kuznetsov 
83259cd9bc5SVitaly Kuznetsov 	if (nested_svm_vmrun_msrpm(svm))
83359cd9bc5SVitaly Kuznetsov 		goto out;
83459cd9bc5SVitaly Kuznetsov 
83559cd9bc5SVitaly Kuznetsov out_exit_err:
836ebdb3dbaSVitaly Kuznetsov 	svm->nested.nested_run_pending = 0;
837ebdb3dbaSVitaly Kuznetsov 
838883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
839883b0a91SJoerg Roedel 	svm->vmcb->control.exit_code_hi = 0;
840883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_1  = 0;
841883b0a91SJoerg Roedel 	svm->vmcb->control.exit_info_2  = 0;
842883b0a91SJoerg Roedel 
843883b0a91SJoerg Roedel 	nested_svm_vmexit(svm);
844883b0a91SJoerg Roedel 
84569c9dfa2SPaolo Bonzini out:
84663129754SPaolo Bonzini 	kvm_vcpu_unmap(vcpu, &map, true);
84769c9dfa2SPaolo Bonzini 
848883b0a91SJoerg Roedel 	return ret;
849883b0a91SJoerg Roedel }
850883b0a91SJoerg Roedel 
8510a758290SVitaly Kuznetsov /* Copy state save area fields which are handled by VMRUN */
8522bb16beaSVitaly Kuznetsov void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
8532bb16beaSVitaly Kuznetsov 			  struct vmcb_save_area *from_save)
8540a758290SVitaly Kuznetsov {
8550a758290SVitaly Kuznetsov 	to_save->es = from_save->es;
8560a758290SVitaly Kuznetsov 	to_save->cs = from_save->cs;
8570a758290SVitaly Kuznetsov 	to_save->ss = from_save->ss;
8580a758290SVitaly Kuznetsov 	to_save->ds = from_save->ds;
8590a758290SVitaly Kuznetsov 	to_save->gdtr = from_save->gdtr;
8600a758290SVitaly Kuznetsov 	to_save->idtr = from_save->idtr;
8610a758290SVitaly Kuznetsov 	to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
8620a758290SVitaly Kuznetsov 	to_save->efer = from_save->efer;
8630a758290SVitaly Kuznetsov 	to_save->cr0 = from_save->cr0;
8640a758290SVitaly Kuznetsov 	to_save->cr3 = from_save->cr3;
8650a758290SVitaly Kuznetsov 	to_save->cr4 = from_save->cr4;
8660a758290SVitaly Kuznetsov 	to_save->rax = from_save->rax;
8670a758290SVitaly Kuznetsov 	to_save->rsp = from_save->rsp;
8680a758290SVitaly Kuznetsov 	to_save->rip = from_save->rip;
8690a758290SVitaly Kuznetsov 	to_save->cpl = 0;
8700a758290SVitaly Kuznetsov }
8710a758290SVitaly Kuznetsov 
8722bb16beaSVitaly Kuznetsov void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
873883b0a91SJoerg Roedel {
874883b0a91SJoerg Roedel 	to_vmcb->save.fs = from_vmcb->save.fs;
875883b0a91SJoerg Roedel 	to_vmcb->save.gs = from_vmcb->save.gs;
876883b0a91SJoerg Roedel 	to_vmcb->save.tr = from_vmcb->save.tr;
877883b0a91SJoerg Roedel 	to_vmcb->save.ldtr = from_vmcb->save.ldtr;
878883b0a91SJoerg Roedel 	to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
879883b0a91SJoerg Roedel 	to_vmcb->save.star = from_vmcb->save.star;
880883b0a91SJoerg Roedel 	to_vmcb->save.lstar = from_vmcb->save.lstar;
881883b0a91SJoerg Roedel 	to_vmcb->save.cstar = from_vmcb->save.cstar;
882883b0a91SJoerg Roedel 	to_vmcb->save.sfmask = from_vmcb->save.sfmask;
883883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
884883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
885883b0a91SJoerg Roedel 	to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
886883b0a91SJoerg Roedel }
887883b0a91SJoerg Roedel 
888883b0a91SJoerg Roedel int nested_svm_vmexit(struct vcpu_svm *svm)
889883b0a91SJoerg Roedel {
89063129754SPaolo Bonzini 	struct kvm_vcpu *vcpu = &svm->vcpu;
891db663af4SMaxim Levitsky 	struct vmcb *vmcb01 = svm->vmcb01.ptr;
892db663af4SMaxim Levitsky 	struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
8930dd16b5bSMaxim Levitsky 	struct vmcb *vmcb12;
894883b0a91SJoerg Roedel 	struct kvm_host_map map;
89563129754SPaolo Bonzini 	int rc;
896883b0a91SJoerg Roedel 
89763129754SPaolo Bonzini 	rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
898883b0a91SJoerg Roedel 	if (rc) {
899883b0a91SJoerg Roedel 		if (rc == -EINVAL)
90063129754SPaolo Bonzini 			kvm_inject_gp(vcpu, 0);
901883b0a91SJoerg Roedel 		return 1;
902883b0a91SJoerg Roedel 	}
903883b0a91SJoerg Roedel 
9040dd16b5bSMaxim Levitsky 	vmcb12 = map.hva;
905883b0a91SJoerg Roedel 
906883b0a91SJoerg Roedel 	/* Exit Guest-Mode */
90763129754SPaolo Bonzini 	leave_guest_mode(vcpu);
9080dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = 0;
9092d8a42beSPaolo Bonzini 	WARN_ON_ONCE(svm->nested.nested_run_pending);
910883b0a91SJoerg Roedel 
91163129754SPaolo Bonzini 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
912f2c7ef3bSMaxim Levitsky 
91338c0b192SPaolo Bonzini 	/* in case we halted in L2 */
91438c0b192SPaolo Bonzini 	svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
91538c0b192SPaolo Bonzini 
916883b0a91SJoerg Roedel 	/* Give the current vmcb to the guest */
917883b0a91SJoerg Roedel 
918db663af4SMaxim Levitsky 	vmcb12->save.es     = vmcb02->save.es;
919db663af4SMaxim Levitsky 	vmcb12->save.cs     = vmcb02->save.cs;
920db663af4SMaxim Levitsky 	vmcb12->save.ss     = vmcb02->save.ss;
921db663af4SMaxim Levitsky 	vmcb12->save.ds     = vmcb02->save.ds;
922db663af4SMaxim Levitsky 	vmcb12->save.gdtr   = vmcb02->save.gdtr;
923db663af4SMaxim Levitsky 	vmcb12->save.idtr   = vmcb02->save.idtr;
9240dd16b5bSMaxim Levitsky 	vmcb12->save.efer   = svm->vcpu.arch.efer;
92563129754SPaolo Bonzini 	vmcb12->save.cr0    = kvm_read_cr0(vcpu);
92663129754SPaolo Bonzini 	vmcb12->save.cr3    = kvm_read_cr3(vcpu);
927db663af4SMaxim Levitsky 	vmcb12->save.cr2    = vmcb02->save.cr2;
9280dd16b5bSMaxim Levitsky 	vmcb12->save.cr4    = svm->vcpu.arch.cr4;
92963129754SPaolo Bonzini 	vmcb12->save.rflags = kvm_get_rflags(vcpu);
93063129754SPaolo Bonzini 	vmcb12->save.rip    = kvm_rip_read(vcpu);
93163129754SPaolo Bonzini 	vmcb12->save.rsp    = kvm_rsp_read(vcpu);
93263129754SPaolo Bonzini 	vmcb12->save.rax    = kvm_rax_read(vcpu);
933db663af4SMaxim Levitsky 	vmcb12->save.dr7    = vmcb02->save.dr7;
9340dd16b5bSMaxim Levitsky 	vmcb12->save.dr6    = svm->vcpu.arch.dr6;
935db663af4SMaxim Levitsky 	vmcb12->save.cpl    = vmcb02->save.cpl;
936883b0a91SJoerg Roedel 
937db663af4SMaxim Levitsky 	vmcb12->control.int_state         = vmcb02->control.int_state;
938db663af4SMaxim Levitsky 	vmcb12->control.exit_code         = vmcb02->control.exit_code;
939db663af4SMaxim Levitsky 	vmcb12->control.exit_code_hi      = vmcb02->control.exit_code_hi;
940db663af4SMaxim Levitsky 	vmcb12->control.exit_info_1       = vmcb02->control.exit_info_1;
941db663af4SMaxim Levitsky 	vmcb12->control.exit_info_2       = vmcb02->control.exit_info_2;
94236e2e983SPaolo Bonzini 
9430dd16b5bSMaxim Levitsky 	if (vmcb12->control.exit_code != SVM_EXIT_ERR)
9449e8f0fbfSPaolo Bonzini 		nested_save_pending_event_to_vmcb12(svm, vmcb12);
945883b0a91SJoerg Roedel 
946883b0a91SJoerg Roedel 	if (svm->nrips_enabled)
947db663af4SMaxim Levitsky 		vmcb12->control.next_rip  = vmcb02->control.next_rip;
948883b0a91SJoerg Roedel 
9490dd16b5bSMaxim Levitsky 	vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
9500dd16b5bSMaxim Levitsky 	vmcb12->control.tlb_ctl           = svm->nested.ctl.tlb_ctl;
9510dd16b5bSMaxim Levitsky 	vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
9520dd16b5bSMaxim Levitsky 	vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
953883b0a91SJoerg Roedel 
954*e3cdaab5SPaolo Bonzini 	if (!kvm_pause_in_guest(vcpu->kvm)) {
95574fd41edSMaxim Levitsky 		vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
956*e3cdaab5SPaolo Bonzini 		vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
957*e3cdaab5SPaolo Bonzini 
958*e3cdaab5SPaolo Bonzini 	}
95974fd41edSMaxim Levitsky 
960d00b99c5SBabu Moger 	nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
961d00b99c5SBabu Moger 
9624995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->vmcb01);
9634995a368SCathy Avery 
964d20c796cSMaxim Levitsky 	if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
965d20c796cSMaxim Levitsky 		svm_copy_lbrs(vmcb12, vmcb02);
966d20c796cSMaxim Levitsky 		svm_update_lbrv(vcpu);
967d20c796cSMaxim Levitsky 	} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
9681d5a1b58SMaxim Levitsky 		svm_copy_lbrs(vmcb01, vmcb02);
9691d5a1b58SMaxim Levitsky 		svm_update_lbrv(vcpu);
9701d5a1b58SMaxim Levitsky 	}
9711d5a1b58SMaxim Levitsky 
9724995a368SCathy Avery 	/*
9734995a368SCathy Avery 	 * On vmexit the  GIF is set to false and
9744995a368SCathy Avery 	 * no event can be injected in L1.
9754995a368SCathy Avery 	 */
9769883764aSMaxim Levitsky 	svm_set_gif(svm, false);
977db663af4SMaxim Levitsky 	vmcb01->control.exit_int_info = 0;
9789883764aSMaxim Levitsky 
9797ca62d13SPaolo Bonzini 	svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
980db663af4SMaxim Levitsky 	if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
981db663af4SMaxim Levitsky 		vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
982db663af4SMaxim Levitsky 		vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
9837ca62d13SPaolo Bonzini 	}
98418fc6c55SPaolo Bonzini 
9855228eb96SMaxim Levitsky 	if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
9865228eb96SMaxim Levitsky 		WARN_ON(!svm->tsc_scaling_enabled);
9875228eb96SMaxim Levitsky 		vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
98811d39e8cSMaxim Levitsky 		__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
9895228eb96SMaxim Levitsky 	}
9905228eb96SMaxim Levitsky 
991e670bf68SPaolo Bonzini 	svm->nested.ctl.nested_cr3 = 0;
992883b0a91SJoerg Roedel 
9934995a368SCathy Avery 	/*
9944995a368SCathy Avery 	 * Restore processor state that had been saved in vmcb01
9954995a368SCathy Avery 	 */
996db663af4SMaxim Levitsky 	kvm_set_rflags(vcpu, vmcb01->save.rflags);
997db663af4SMaxim Levitsky 	svm_set_efer(vcpu, vmcb01->save.efer);
998db663af4SMaxim Levitsky 	svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
999db663af4SMaxim Levitsky 	svm_set_cr4(vcpu, vmcb01->save.cr4);
1000db663af4SMaxim Levitsky 	kvm_rax_write(vcpu, vmcb01->save.rax);
1001db663af4SMaxim Levitsky 	kvm_rsp_write(vcpu, vmcb01->save.rsp);
1002db663af4SMaxim Levitsky 	kvm_rip_write(vcpu, vmcb01->save.rip);
10034995a368SCathy Avery 
10044995a368SCathy Avery 	svm->vcpu.arch.dr7 = DR7_FIXED_1;
10054995a368SCathy Avery 	kvm_update_dr7(&svm->vcpu);
1006883b0a91SJoerg Roedel 
10070dd16b5bSMaxim Levitsky 	trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
10080dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_info_1,
10090dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_info_2,
10100dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_int_info,
10110dd16b5bSMaxim Levitsky 				       vmcb12->control.exit_int_info_err,
101236e2e983SPaolo Bonzini 				       KVM_ISA_SVM);
101336e2e983SPaolo Bonzini 
101463129754SPaolo Bonzini 	kvm_vcpu_unmap(vcpu, &map, true);
1015883b0a91SJoerg Roedel 
1016d2e56019SSean Christopherson 	nested_svm_transition_tlb_flush(vcpu);
1017d2e56019SSean Christopherson 
101863129754SPaolo Bonzini 	nested_svm_uninit_mmu_context(vcpu);
1019bf7dea42SVitaly Kuznetsov 
1020db663af4SMaxim Levitsky 	rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
1021d82aaef9SVitaly Kuznetsov 	if (rc)
1022d82aaef9SVitaly Kuznetsov 		return 1;
1023bf7dea42SVitaly Kuznetsov 
1024883b0a91SJoerg Roedel 	/*
1025883b0a91SJoerg Roedel 	 * Drop what we picked up for L2 via svm_complete_interrupts() so it
1026883b0a91SJoerg Roedel 	 * doesn't end up in L1.
1027883b0a91SJoerg Roedel 	 */
1028883b0a91SJoerg Roedel 	svm->vcpu.arch.nmi_injected = false;
102963129754SPaolo Bonzini 	kvm_clear_exception_queue(vcpu);
103063129754SPaolo Bonzini 	kvm_clear_interrupt_queue(vcpu);
1031883b0a91SJoerg Roedel 
10329a7de6ecSKrish Sadhukhan 	/*
10339a7de6ecSKrish Sadhukhan 	 * If we are here following the completion of a VMRUN that
10349a7de6ecSKrish Sadhukhan 	 * is being single-stepped, queue the pending #DB intercept
10359a7de6ecSKrish Sadhukhan 	 * right now so that it an be accounted for before we execute
10369a7de6ecSKrish Sadhukhan 	 * L1's next instruction.
10379a7de6ecSKrish Sadhukhan 	 */
1038db663af4SMaxim Levitsky 	if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
10399a7de6ecSKrish Sadhukhan 		kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
10409a7de6ecSKrish Sadhukhan 
1041f44509f8SMaxim Levitsky 	/*
1042f44509f8SMaxim Levitsky 	 * Un-inhibit the AVIC right away, so that other vCPUs can start
1043f44509f8SMaxim Levitsky 	 * to benefit from it right away.
1044f44509f8SMaxim Levitsky 	 */
1045f44509f8SMaxim Levitsky 	if (kvm_apicv_activated(vcpu->kvm))
1046f44509f8SMaxim Levitsky 		kvm_vcpu_update_apicv(vcpu);
1047f44509f8SMaxim Levitsky 
1048883b0a91SJoerg Roedel 	return 0;
1049883b0a91SJoerg Roedel }
1050883b0a91SJoerg Roedel 
1051cb6a32c2SSean Christopherson static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
1052cb6a32c2SSean Christopherson {
10533a87c7e0SSean Christopherson 	nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
1054cb6a32c2SSean Christopherson }
1055cb6a32c2SSean Christopherson 
10562fcf4876SMaxim Levitsky int svm_allocate_nested(struct vcpu_svm *svm)
10572fcf4876SMaxim Levitsky {
10584995a368SCathy Avery 	struct page *vmcb02_page;
10592fcf4876SMaxim Levitsky 
10602fcf4876SMaxim Levitsky 	if (svm->nested.initialized)
10612fcf4876SMaxim Levitsky 		return 0;
10622fcf4876SMaxim Levitsky 
10634995a368SCathy Avery 	vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
10644995a368SCathy Avery 	if (!vmcb02_page)
10652fcf4876SMaxim Levitsky 		return -ENOMEM;
10664995a368SCathy Avery 	svm->nested.vmcb02.ptr = page_address(vmcb02_page);
10674995a368SCathy Avery 	svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
10682fcf4876SMaxim Levitsky 
10692fcf4876SMaxim Levitsky 	svm->nested.msrpm = svm_vcpu_alloc_msrpm();
10702fcf4876SMaxim Levitsky 	if (!svm->nested.msrpm)
10714995a368SCathy Avery 		goto err_free_vmcb02;
10722fcf4876SMaxim Levitsky 	svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
10732fcf4876SMaxim Levitsky 
10742fcf4876SMaxim Levitsky 	svm->nested.initialized = true;
10752fcf4876SMaxim Levitsky 	return 0;
10762fcf4876SMaxim Levitsky 
10774995a368SCathy Avery err_free_vmcb02:
10784995a368SCathy Avery 	__free_page(vmcb02_page);
10792fcf4876SMaxim Levitsky 	return -ENOMEM;
10802fcf4876SMaxim Levitsky }
10812fcf4876SMaxim Levitsky 
10822fcf4876SMaxim Levitsky void svm_free_nested(struct vcpu_svm *svm)
10832fcf4876SMaxim Levitsky {
10842fcf4876SMaxim Levitsky 	if (!svm->nested.initialized)
10852fcf4876SMaxim Levitsky 		return;
10862fcf4876SMaxim Levitsky 
10872fcf4876SMaxim Levitsky 	svm_vcpu_free_msrpm(svm->nested.msrpm);
10882fcf4876SMaxim Levitsky 	svm->nested.msrpm = NULL;
10892fcf4876SMaxim Levitsky 
10904995a368SCathy Avery 	__free_page(virt_to_page(svm->nested.vmcb02.ptr));
10914995a368SCathy Avery 	svm->nested.vmcb02.ptr = NULL;
10922fcf4876SMaxim Levitsky 
1093c74ad08fSMaxim Levitsky 	/*
1094c74ad08fSMaxim Levitsky 	 * When last_vmcb12_gpa matches the current vmcb12 gpa,
1095c74ad08fSMaxim Levitsky 	 * some vmcb12 fields are not loaded if they are marked clean
1096c74ad08fSMaxim Levitsky 	 * in the vmcb12, since in this case they are up to date already.
1097c74ad08fSMaxim Levitsky 	 *
1098c74ad08fSMaxim Levitsky 	 * When the vmcb02 is freed, this optimization becomes invalid.
1099c74ad08fSMaxim Levitsky 	 */
1100c74ad08fSMaxim Levitsky 	svm->nested.last_vmcb12_gpa = INVALID_GPA;
1101c74ad08fSMaxim Levitsky 
11022fcf4876SMaxim Levitsky 	svm->nested.initialized = false;
11032fcf4876SMaxim Levitsky }
11042fcf4876SMaxim Levitsky 
1105c513f484SPaolo Bonzini /*
1106c513f484SPaolo Bonzini  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
1107c513f484SPaolo Bonzini  */
1108f7e57078SSean Christopherson void svm_leave_nested(struct kvm_vcpu *vcpu)
1109c513f484SPaolo Bonzini {
1110f7e57078SSean Christopherson 	struct vcpu_svm *svm = to_svm(vcpu);
111163129754SPaolo Bonzini 
111263129754SPaolo Bonzini 	if (is_guest_mode(vcpu)) {
1113c513f484SPaolo Bonzini 		svm->nested.nested_run_pending = 0;
1114c74ad08fSMaxim Levitsky 		svm->nested.vmcb12_gpa = INVALID_GPA;
1115c74ad08fSMaxim Levitsky 
111663129754SPaolo Bonzini 		leave_guest_mode(vcpu);
11174995a368SCathy Avery 
1118deee59baSMaxim Levitsky 		svm_switch_vmcb(svm, &svm->vmcb01);
11194995a368SCathy Avery 
112063129754SPaolo Bonzini 		nested_svm_uninit_mmu_context(vcpu);
112156fe28deSMaxim Levitsky 		vmcb_mark_all_dirty(svm->vmcb);
1122c513f484SPaolo Bonzini 	}
1123a7d5c7ceSPaolo Bonzini 
112463129754SPaolo Bonzini 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1125c513f484SPaolo Bonzini }
1126c513f484SPaolo Bonzini 
1127883b0a91SJoerg Roedel static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1128883b0a91SJoerg Roedel {
1129883b0a91SJoerg Roedel 	u32 offset, msr, value;
1130883b0a91SJoerg Roedel 	int write, mask;
1131883b0a91SJoerg Roedel 
11328fc78909SEmanuele Giuseppe Esposito 	if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1133883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
1134883b0a91SJoerg Roedel 
1135883b0a91SJoerg Roedel 	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1136883b0a91SJoerg Roedel 	offset = svm_msrpm_offset(msr);
1137883b0a91SJoerg Roedel 	write  = svm->vmcb->control.exit_info_1 & 1;
1138883b0a91SJoerg Roedel 	mask   = 1 << ((2 * (msr & 0xf)) + write);
1139883b0a91SJoerg Roedel 
1140883b0a91SJoerg Roedel 	if (offset == MSR_INVALID)
1141883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
1142883b0a91SJoerg Roedel 
1143883b0a91SJoerg Roedel 	/* Offset is in 32 bit units but need in 8 bit units */
1144883b0a91SJoerg Roedel 	offset *= 4;
1145883b0a91SJoerg Roedel 
1146e670bf68SPaolo Bonzini 	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1147883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
1148883b0a91SJoerg Roedel 
1149883b0a91SJoerg Roedel 	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1150883b0a91SJoerg Roedel }
1151883b0a91SJoerg Roedel 
1152883b0a91SJoerg Roedel static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1153883b0a91SJoerg Roedel {
1154883b0a91SJoerg Roedel 	unsigned port, size, iopm_len;
1155883b0a91SJoerg Roedel 	u16 val, mask;
1156883b0a91SJoerg Roedel 	u8 start_bit;
1157883b0a91SJoerg Roedel 	u64 gpa;
1158883b0a91SJoerg Roedel 
11598fc78909SEmanuele Giuseppe Esposito 	if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1160883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
1161883b0a91SJoerg Roedel 
1162883b0a91SJoerg Roedel 	port = svm->vmcb->control.exit_info_1 >> 16;
1163883b0a91SJoerg Roedel 	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1164883b0a91SJoerg Roedel 		SVM_IOIO_SIZE_SHIFT;
1165e670bf68SPaolo Bonzini 	gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
1166883b0a91SJoerg Roedel 	start_bit = port % 8;
1167883b0a91SJoerg Roedel 	iopm_len = (start_bit + size > 8) ? 2 : 1;
1168883b0a91SJoerg Roedel 	mask = (0xf >> (4 - size)) << start_bit;
1169883b0a91SJoerg Roedel 	val = 0;
1170883b0a91SJoerg Roedel 
1171883b0a91SJoerg Roedel 	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1172883b0a91SJoerg Roedel 		return NESTED_EXIT_DONE;
1173883b0a91SJoerg Roedel 
1174883b0a91SJoerg Roedel 	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1175883b0a91SJoerg Roedel }
1176883b0a91SJoerg Roedel 
1177883b0a91SJoerg Roedel static int nested_svm_intercept(struct vcpu_svm *svm)
1178883b0a91SJoerg Roedel {
1179883b0a91SJoerg Roedel 	u32 exit_code = svm->vmcb->control.exit_code;
1180883b0a91SJoerg Roedel 	int vmexit = NESTED_EXIT_HOST;
1181883b0a91SJoerg Roedel 
1182883b0a91SJoerg Roedel 	switch (exit_code) {
1183883b0a91SJoerg Roedel 	case SVM_EXIT_MSR:
1184883b0a91SJoerg Roedel 		vmexit = nested_svm_exit_handled_msr(svm);
1185883b0a91SJoerg Roedel 		break;
1186883b0a91SJoerg Roedel 	case SVM_EXIT_IOIO:
1187883b0a91SJoerg Roedel 		vmexit = nested_svm_intercept_ioio(svm);
1188883b0a91SJoerg Roedel 		break;
1189883b0a91SJoerg Roedel 	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
11908fc78909SEmanuele Giuseppe Esposito 		if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1191883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
1192883b0a91SJoerg Roedel 		break;
1193883b0a91SJoerg Roedel 	}
1194883b0a91SJoerg Roedel 	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
11958fc78909SEmanuele Giuseppe Esposito 		if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1196883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
1197883b0a91SJoerg Roedel 		break;
1198883b0a91SJoerg Roedel 	}
1199883b0a91SJoerg Roedel 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
12007c86663bSPaolo Bonzini 		/*
12017c86663bSPaolo Bonzini 		 * Host-intercepted exceptions have been checked already in
12027c86663bSPaolo Bonzini 		 * nested_svm_exit_special.  There is nothing to do here,
12037c86663bSPaolo Bonzini 		 * the vmexit is injected by svm_check_nested_events.
12047c86663bSPaolo Bonzini 		 */
1205883b0a91SJoerg Roedel 		vmexit = NESTED_EXIT_DONE;
1206883b0a91SJoerg Roedel 		break;
1207883b0a91SJoerg Roedel 	}
1208883b0a91SJoerg Roedel 	case SVM_EXIT_ERR: {
1209883b0a91SJoerg Roedel 		vmexit = NESTED_EXIT_DONE;
1210883b0a91SJoerg Roedel 		break;
1211883b0a91SJoerg Roedel 	}
1212883b0a91SJoerg Roedel 	default: {
12138fc78909SEmanuele Giuseppe Esposito 		if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1214883b0a91SJoerg Roedel 			vmexit = NESTED_EXIT_DONE;
1215883b0a91SJoerg Roedel 	}
1216883b0a91SJoerg Roedel 	}
1217883b0a91SJoerg Roedel 
1218883b0a91SJoerg Roedel 	return vmexit;
1219883b0a91SJoerg Roedel }
1220883b0a91SJoerg Roedel 
1221883b0a91SJoerg Roedel int nested_svm_exit_handled(struct vcpu_svm *svm)
1222883b0a91SJoerg Roedel {
1223883b0a91SJoerg Roedel 	int vmexit;
1224883b0a91SJoerg Roedel 
1225883b0a91SJoerg Roedel 	vmexit = nested_svm_intercept(svm);
1226883b0a91SJoerg Roedel 
1227883b0a91SJoerg Roedel 	if (vmexit == NESTED_EXIT_DONE)
1228883b0a91SJoerg Roedel 		nested_svm_vmexit(svm);
1229883b0a91SJoerg Roedel 
1230883b0a91SJoerg Roedel 	return vmexit;
1231883b0a91SJoerg Roedel }
1232883b0a91SJoerg Roedel 
123363129754SPaolo Bonzini int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1234883b0a91SJoerg Roedel {
123563129754SPaolo Bonzini 	if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
123663129754SPaolo Bonzini 		kvm_queue_exception(vcpu, UD_VECTOR);
1237883b0a91SJoerg Roedel 		return 1;
1238883b0a91SJoerg Roedel 	}
1239883b0a91SJoerg Roedel 
124063129754SPaolo Bonzini 	if (to_svm(vcpu)->vmcb->save.cpl) {
124163129754SPaolo Bonzini 		kvm_inject_gp(vcpu, 0);
1242883b0a91SJoerg Roedel 		return 1;
1243883b0a91SJoerg Roedel 	}
1244883b0a91SJoerg Roedel 
1245883b0a91SJoerg Roedel 	return 0;
1246883b0a91SJoerg Roedel }
1247883b0a91SJoerg Roedel 
12487c86663bSPaolo Bonzini static bool nested_exit_on_exception(struct vcpu_svm *svm)
1249883b0a91SJoerg Roedel {
12507c86663bSPaolo Bonzini 	unsigned int nr = svm->vcpu.arch.exception.nr;
1251883b0a91SJoerg Roedel 
12529780d51dSBabu Moger 	return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
12537c86663bSPaolo Bonzini }
1254883b0a91SJoerg Roedel 
12557c86663bSPaolo Bonzini static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
12567c86663bSPaolo Bonzini {
12577c86663bSPaolo Bonzini 	unsigned int nr = svm->vcpu.arch.exception.nr;
1258db663af4SMaxim Levitsky 	struct vmcb *vmcb = svm->vmcb;
1259883b0a91SJoerg Roedel 
1260db663af4SMaxim Levitsky 	vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1261db663af4SMaxim Levitsky 	vmcb->control.exit_code_hi = 0;
12627c86663bSPaolo Bonzini 
12637c86663bSPaolo Bonzini 	if (svm->vcpu.arch.exception.has_error_code)
1264db663af4SMaxim Levitsky 		vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
1265883b0a91SJoerg Roedel 
1266883b0a91SJoerg Roedel 	/*
1267883b0a91SJoerg Roedel 	 * EXITINFO2 is undefined for all exception intercepts other
1268883b0a91SJoerg Roedel 	 * than #PF.
1269883b0a91SJoerg Roedel 	 */
12707c86663bSPaolo Bonzini 	if (nr == PF_VECTOR) {
1271883b0a91SJoerg Roedel 		if (svm->vcpu.arch.exception.nested_apf)
1272db663af4SMaxim Levitsky 			vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
1273883b0a91SJoerg Roedel 		else if (svm->vcpu.arch.exception.has_payload)
1274db663af4SMaxim Levitsky 			vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
1275883b0a91SJoerg Roedel 		else
1276db663af4SMaxim Levitsky 			vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
12777c86663bSPaolo Bonzini 	} else if (nr == DB_VECTOR) {
12787c86663bSPaolo Bonzini 		/* See inject_pending_event.  */
12797c86663bSPaolo Bonzini 		kvm_deliver_exception_payload(&svm->vcpu);
12807c86663bSPaolo Bonzini 		if (svm->vcpu.arch.dr7 & DR7_GD) {
12817c86663bSPaolo Bonzini 			svm->vcpu.arch.dr7 &= ~DR7_GD;
12827c86663bSPaolo Bonzini 			kvm_update_dr7(&svm->vcpu);
12837c86663bSPaolo Bonzini 		}
12847c86663bSPaolo Bonzini 	} else
12857c86663bSPaolo Bonzini 		WARN_ON(svm->vcpu.arch.exception.has_payload);
1286883b0a91SJoerg Roedel 
12877c86663bSPaolo Bonzini 	nested_svm_vmexit(svm);
1288883b0a91SJoerg Roedel }
1289883b0a91SJoerg Roedel 
12905b672408SPaolo Bonzini static inline bool nested_exit_on_init(struct vcpu_svm *svm)
12915b672408SPaolo Bonzini {
12928fc78909SEmanuele Giuseppe Esposito 	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
12935b672408SPaolo Bonzini }
12945b672408SPaolo Bonzini 
129533b22172SPaolo Bonzini static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1296883b0a91SJoerg Roedel {
1297883b0a91SJoerg Roedel 	struct vcpu_svm *svm = to_svm(vcpu);
1298883b0a91SJoerg Roedel 	bool block_nested_events =
1299bd279629SPaolo Bonzini 		kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
13005b672408SPaolo Bonzini 	struct kvm_lapic *apic = vcpu->arch.apic;
13015b672408SPaolo Bonzini 
13025b672408SPaolo Bonzini 	if (lapic_in_kernel(vcpu) &&
13035b672408SPaolo Bonzini 	    test_bit(KVM_APIC_INIT, &apic->pending_events)) {
13045b672408SPaolo Bonzini 		if (block_nested_events)
13055b672408SPaolo Bonzini 			return -EBUSY;
13065b672408SPaolo Bonzini 		if (!nested_exit_on_init(svm))
13075b672408SPaolo Bonzini 			return 0;
13083a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
13095b672408SPaolo Bonzini 		return 0;
13105b672408SPaolo Bonzini 	}
1311883b0a91SJoerg Roedel 
13127c86663bSPaolo Bonzini 	if (vcpu->arch.exception.pending) {
13134020da3bSMaxim Levitsky 		/*
13144020da3bSMaxim Levitsky 		 * Only a pending nested run can block a pending exception.
13154020da3bSMaxim Levitsky 		 * Otherwise an injected NMI/interrupt should either be
13164020da3bSMaxim Levitsky 		 * lost or delivered to the nested hypervisor in the EXITINTINFO
13174020da3bSMaxim Levitsky 		 * vmcb field, while delivering the pending exception.
13184020da3bSMaxim Levitsky 		 */
13194020da3bSMaxim Levitsky 		if (svm->nested.nested_run_pending)
13207c86663bSPaolo Bonzini                         return -EBUSY;
13217c86663bSPaolo Bonzini 		if (!nested_exit_on_exception(svm))
13227c86663bSPaolo Bonzini 			return 0;
13237c86663bSPaolo Bonzini 		nested_svm_inject_exception_vmexit(svm);
13247c86663bSPaolo Bonzini 		return 0;
13257c86663bSPaolo Bonzini 	}
13267c86663bSPaolo Bonzini 
1327221e7610SPaolo Bonzini 	if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
132855714cddSPaolo Bonzini 		if (block_nested_events)
132955714cddSPaolo Bonzini 			return -EBUSY;
1330221e7610SPaolo Bonzini 		if (!nested_exit_on_smi(svm))
1331221e7610SPaolo Bonzini 			return 0;
13323a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
133355714cddSPaolo Bonzini 		return 0;
133455714cddSPaolo Bonzini 	}
133555714cddSPaolo Bonzini 
1336221e7610SPaolo Bonzini 	if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
13379c3d370aSCathy Avery 		if (block_nested_events)
13389c3d370aSCathy Avery 			return -EBUSY;
1339221e7610SPaolo Bonzini 		if (!nested_exit_on_nmi(svm))
1340221e7610SPaolo Bonzini 			return 0;
13413a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
13429c3d370aSCathy Avery 		return 0;
13439c3d370aSCathy Avery 	}
13449c3d370aSCathy Avery 
1345221e7610SPaolo Bonzini 	if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1346883b0a91SJoerg Roedel 		if (block_nested_events)
1347883b0a91SJoerg Roedel 			return -EBUSY;
1348221e7610SPaolo Bonzini 		if (!nested_exit_on_intr(svm))
1349221e7610SPaolo Bonzini 			return 0;
13503a87c7e0SSean Christopherson 		trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
13513a87c7e0SSean Christopherson 		nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1352883b0a91SJoerg Roedel 		return 0;
1353883b0a91SJoerg Roedel 	}
1354883b0a91SJoerg Roedel 
1355883b0a91SJoerg Roedel 	return 0;
1356883b0a91SJoerg Roedel }
1357883b0a91SJoerg Roedel 
1358883b0a91SJoerg Roedel int nested_svm_exit_special(struct vcpu_svm *svm)
1359883b0a91SJoerg Roedel {
1360883b0a91SJoerg Roedel 	u32 exit_code = svm->vmcb->control.exit_code;
1361883b0a91SJoerg Roedel 
1362883b0a91SJoerg Roedel 	switch (exit_code) {
1363883b0a91SJoerg Roedel 	case SVM_EXIT_INTR:
1364883b0a91SJoerg Roedel 	case SVM_EXIT_NMI:
1365883b0a91SJoerg Roedel 	case SVM_EXIT_NPF:
1366883b0a91SJoerg Roedel 		return NESTED_EXIT_HOST;
13677c86663bSPaolo Bonzini 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
13687c86663bSPaolo Bonzini 		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
13697c86663bSPaolo Bonzini 
13704995a368SCathy Avery 		if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
13719780d51dSBabu Moger 		    excp_bits)
13727c86663bSPaolo Bonzini 			return NESTED_EXIT_HOST;
13737c86663bSPaolo Bonzini 		else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
137468fd66f1SVitaly Kuznetsov 			 svm->vcpu.arch.apf.host_apf_flags)
1375a3535be7SPaolo Bonzini 			/* Trap async PF even if not shadowing */
1376883b0a91SJoerg Roedel 			return NESTED_EXIT_HOST;
1377883b0a91SJoerg Roedel 		break;
13787c86663bSPaolo Bonzini 	}
1379883b0a91SJoerg Roedel 	default:
1380883b0a91SJoerg Roedel 		break;
1381883b0a91SJoerg Roedel 	}
1382883b0a91SJoerg Roedel 
1383883b0a91SJoerg Roedel 	return NESTED_EXIT_CONTINUE;
1384883b0a91SJoerg Roedel }
138533b22172SPaolo Bonzini 
13865228eb96SMaxim Levitsky void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
13875228eb96SMaxim Levitsky {
13885228eb96SMaxim Levitsky 	struct vcpu_svm *svm = to_svm(vcpu);
13895228eb96SMaxim Levitsky 
13905228eb96SMaxim Levitsky 	vcpu->arch.tsc_scaling_ratio =
13915228eb96SMaxim Levitsky 		kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
13925228eb96SMaxim Levitsky 					       svm->tsc_ratio_msr);
139311d39e8cSMaxim Levitsky 	__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
13945228eb96SMaxim Levitsky }
13955228eb96SMaxim Levitsky 
13968fc78909SEmanuele Giuseppe Esposito /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
13978fc78909SEmanuele Giuseppe Esposito static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
13988fc78909SEmanuele Giuseppe Esposito 					      struct vmcb_ctrl_area_cached *from)
13998fc78909SEmanuele Giuseppe Esposito {
14008fc78909SEmanuele Giuseppe Esposito 	unsigned int i;
14018fc78909SEmanuele Giuseppe Esposito 
14028fc78909SEmanuele Giuseppe Esposito 	memset(dst, 0, sizeof(struct vmcb_control_area));
14038fc78909SEmanuele Giuseppe Esposito 
14048fc78909SEmanuele Giuseppe Esposito 	for (i = 0; i < MAX_INTERCEPT; i++)
14058fc78909SEmanuele Giuseppe Esposito 		dst->intercepts[i] = from->intercepts[i];
14068fc78909SEmanuele Giuseppe Esposito 
14078fc78909SEmanuele Giuseppe Esposito 	dst->iopm_base_pa         = from->iopm_base_pa;
14088fc78909SEmanuele Giuseppe Esposito 	dst->msrpm_base_pa        = from->msrpm_base_pa;
14098fc78909SEmanuele Giuseppe Esposito 	dst->tsc_offset           = from->tsc_offset;
14108fc78909SEmanuele Giuseppe Esposito 	dst->asid                 = from->asid;
14118fc78909SEmanuele Giuseppe Esposito 	dst->tlb_ctl              = from->tlb_ctl;
14128fc78909SEmanuele Giuseppe Esposito 	dst->int_ctl              = from->int_ctl;
14138fc78909SEmanuele Giuseppe Esposito 	dst->int_vector           = from->int_vector;
14148fc78909SEmanuele Giuseppe Esposito 	dst->int_state            = from->int_state;
14158fc78909SEmanuele Giuseppe Esposito 	dst->exit_code            = from->exit_code;
14168fc78909SEmanuele Giuseppe Esposito 	dst->exit_code_hi         = from->exit_code_hi;
14178fc78909SEmanuele Giuseppe Esposito 	dst->exit_info_1          = from->exit_info_1;
14188fc78909SEmanuele Giuseppe Esposito 	dst->exit_info_2          = from->exit_info_2;
14198fc78909SEmanuele Giuseppe Esposito 	dst->exit_int_info        = from->exit_int_info;
14208fc78909SEmanuele Giuseppe Esposito 	dst->exit_int_info_err    = from->exit_int_info_err;
14218fc78909SEmanuele Giuseppe Esposito 	dst->nested_ctl           = from->nested_ctl;
14228fc78909SEmanuele Giuseppe Esposito 	dst->event_inj            = from->event_inj;
14238fc78909SEmanuele Giuseppe Esposito 	dst->event_inj_err        = from->event_inj_err;
14248fc78909SEmanuele Giuseppe Esposito 	dst->nested_cr3           = from->nested_cr3;
14258fc78909SEmanuele Giuseppe Esposito 	dst->virt_ext              = from->virt_ext;
14268fc78909SEmanuele Giuseppe Esposito 	dst->pause_filter_count   = from->pause_filter_count;
14278fc78909SEmanuele Giuseppe Esposito 	dst->pause_filter_thresh  = from->pause_filter_thresh;
142866c03a92SVitaly Kuznetsov 	/* 'clean' and 'reserved_sw' are not changed by KVM */
14298fc78909SEmanuele Giuseppe Esposito }
14308fc78909SEmanuele Giuseppe Esposito 
1431cc440cdaSPaolo Bonzini static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1432cc440cdaSPaolo Bonzini 				struct kvm_nested_state __user *user_kvm_nested_state,
1433cc440cdaSPaolo Bonzini 				u32 user_data_size)
1434cc440cdaSPaolo Bonzini {
1435cc440cdaSPaolo Bonzini 	struct vcpu_svm *svm;
14368fc78909SEmanuele Giuseppe Esposito 	struct vmcb_control_area *ctl;
14378fc78909SEmanuele Giuseppe Esposito 	unsigned long r;
1438cc440cdaSPaolo Bonzini 	struct kvm_nested_state kvm_state = {
1439cc440cdaSPaolo Bonzini 		.flags = 0,
1440cc440cdaSPaolo Bonzini 		.format = KVM_STATE_NESTED_FORMAT_SVM,
1441cc440cdaSPaolo Bonzini 		.size = sizeof(kvm_state),
1442cc440cdaSPaolo Bonzini 	};
1443cc440cdaSPaolo Bonzini 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1444cc440cdaSPaolo Bonzini 		&user_kvm_nested_state->data.svm[0];
1445cc440cdaSPaolo Bonzini 
1446cc440cdaSPaolo Bonzini 	if (!vcpu)
1447cc440cdaSPaolo Bonzini 		return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1448cc440cdaSPaolo Bonzini 
1449cc440cdaSPaolo Bonzini 	svm = to_svm(vcpu);
1450cc440cdaSPaolo Bonzini 
1451cc440cdaSPaolo Bonzini 	if (user_data_size < kvm_state.size)
1452cc440cdaSPaolo Bonzini 		goto out;
1453cc440cdaSPaolo Bonzini 
1454cc440cdaSPaolo Bonzini 	/* First fill in the header and copy it out.  */
1455cc440cdaSPaolo Bonzini 	if (is_guest_mode(vcpu)) {
14560dd16b5bSMaxim Levitsky 		kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1457cc440cdaSPaolo Bonzini 		kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1458cc440cdaSPaolo Bonzini 		kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1459cc440cdaSPaolo Bonzini 
1460cc440cdaSPaolo Bonzini 		if (svm->nested.nested_run_pending)
1461cc440cdaSPaolo Bonzini 			kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1462cc440cdaSPaolo Bonzini 	}
1463cc440cdaSPaolo Bonzini 
1464cc440cdaSPaolo Bonzini 	if (gif_set(svm))
1465cc440cdaSPaolo Bonzini 		kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1466cc440cdaSPaolo Bonzini 
1467cc440cdaSPaolo Bonzini 	if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1468cc440cdaSPaolo Bonzini 		return -EFAULT;
1469cc440cdaSPaolo Bonzini 
1470cc440cdaSPaolo Bonzini 	if (!is_guest_mode(vcpu))
1471cc440cdaSPaolo Bonzini 		goto out;
1472cc440cdaSPaolo Bonzini 
1473cc440cdaSPaolo Bonzini 	/*
1474cc440cdaSPaolo Bonzini 	 * Copy over the full size of the VMCB rather than just the size
1475cc440cdaSPaolo Bonzini 	 * of the structs.
1476cc440cdaSPaolo Bonzini 	 */
1477cc440cdaSPaolo Bonzini 	if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1478cc440cdaSPaolo Bonzini 		return -EFAULT;
14798fc78909SEmanuele Giuseppe Esposito 
14808fc78909SEmanuele Giuseppe Esposito 	ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
14818fc78909SEmanuele Giuseppe Esposito 	if (!ctl)
14828fc78909SEmanuele Giuseppe Esposito 		return -ENOMEM;
14838fc78909SEmanuele Giuseppe Esposito 
14848fc78909SEmanuele Giuseppe Esposito 	nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
14858fc78909SEmanuele Giuseppe Esposito 	r = copy_to_user(&user_vmcb->control, ctl,
14868fc78909SEmanuele Giuseppe Esposito 			 sizeof(user_vmcb->control));
14878fc78909SEmanuele Giuseppe Esposito 	kfree(ctl);
14888fc78909SEmanuele Giuseppe Esposito 	if (r)
1489cc440cdaSPaolo Bonzini 		return -EFAULT;
14908fc78909SEmanuele Giuseppe Esposito 
14914995a368SCathy Avery 	if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1492cc440cdaSPaolo Bonzini 			 sizeof(user_vmcb->save)))
1493cc440cdaSPaolo Bonzini 		return -EFAULT;
1494cc440cdaSPaolo Bonzini out:
1495cc440cdaSPaolo Bonzini 	return kvm_state.size;
1496cc440cdaSPaolo Bonzini }
1497cc440cdaSPaolo Bonzini 
1498cc440cdaSPaolo Bonzini static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1499cc440cdaSPaolo Bonzini 				struct kvm_nested_state __user *user_kvm_nested_state,
1500cc440cdaSPaolo Bonzini 				struct kvm_nested_state *kvm_state)
1501cc440cdaSPaolo Bonzini {
1502cc440cdaSPaolo Bonzini 	struct vcpu_svm *svm = to_svm(vcpu);
1503cc440cdaSPaolo Bonzini 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1504cc440cdaSPaolo Bonzini 		&user_kvm_nested_state->data.svm[0];
15056ccbd29aSJoerg Roedel 	struct vmcb_control_area *ctl;
15066ccbd29aSJoerg Roedel 	struct vmcb_save_area *save;
1507b7a3d8b6SEmanuele Giuseppe Esposito 	struct vmcb_save_area_cached save_cached;
15088fc78909SEmanuele Giuseppe Esposito 	struct vmcb_ctrl_area_cached ctl_cached;
1509dbc4739bSSean Christopherson 	unsigned long cr0;
15106ccbd29aSJoerg Roedel 	int ret;
1511cc440cdaSPaolo Bonzini 
15126ccbd29aSJoerg Roedel 	BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
15136ccbd29aSJoerg Roedel 		     KVM_STATE_NESTED_SVM_VMCB_SIZE);
15146ccbd29aSJoerg Roedel 
1515cc440cdaSPaolo Bonzini 	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1516cc440cdaSPaolo Bonzini 		return -EINVAL;
1517cc440cdaSPaolo Bonzini 
1518cc440cdaSPaolo Bonzini 	if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1519cc440cdaSPaolo Bonzini 				 KVM_STATE_NESTED_RUN_PENDING |
1520cc440cdaSPaolo Bonzini 				 KVM_STATE_NESTED_GIF_SET))
1521cc440cdaSPaolo Bonzini 		return -EINVAL;
1522cc440cdaSPaolo Bonzini 
1523cc440cdaSPaolo Bonzini 	/*
1524cc440cdaSPaolo Bonzini 	 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1525cc440cdaSPaolo Bonzini 	 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1526cc440cdaSPaolo Bonzini 	 */
1527cc440cdaSPaolo Bonzini 	if (!(vcpu->arch.efer & EFER_SVME)) {
1528cc440cdaSPaolo Bonzini 		/* GIF=1 and no guest mode are required if SVME=0.  */
1529cc440cdaSPaolo Bonzini 		if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1530cc440cdaSPaolo Bonzini 			return -EINVAL;
1531cc440cdaSPaolo Bonzini 	}
1532cc440cdaSPaolo Bonzini 
1533cc440cdaSPaolo Bonzini 	/* SMM temporarily disables SVM, so we cannot be in guest mode.  */
1534cc440cdaSPaolo Bonzini 	if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1535cc440cdaSPaolo Bonzini 		return -EINVAL;
1536cc440cdaSPaolo Bonzini 
1537cc440cdaSPaolo Bonzini 	if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1538f7e57078SSean Christopherson 		svm_leave_nested(vcpu);
1539d5cd6f34SVitaly Kuznetsov 		svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1540d5cd6f34SVitaly Kuznetsov 		return 0;
1541cc440cdaSPaolo Bonzini 	}
1542cc440cdaSPaolo Bonzini 
1543cc440cdaSPaolo Bonzini 	if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1544cc440cdaSPaolo Bonzini 		return -EINVAL;
1545cc440cdaSPaolo Bonzini 	if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1546cc440cdaSPaolo Bonzini 		return -EINVAL;
1547cc440cdaSPaolo Bonzini 
15486ccbd29aSJoerg Roedel 	ret  = -ENOMEM;
1549eba04b20SSean Christopherson 	ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL_ACCOUNT);
1550eba04b20SSean Christopherson 	save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
15516ccbd29aSJoerg Roedel 	if (!ctl || !save)
15526ccbd29aSJoerg Roedel 		goto out_free;
15536ccbd29aSJoerg Roedel 
15546ccbd29aSJoerg Roedel 	ret = -EFAULT;
15556ccbd29aSJoerg Roedel 	if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
15566ccbd29aSJoerg Roedel 		goto out_free;
15576ccbd29aSJoerg Roedel 	if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
15586ccbd29aSJoerg Roedel 		goto out_free;
15596ccbd29aSJoerg Roedel 
15606ccbd29aSJoerg Roedel 	ret = -EINVAL;
156166c03a92SVitaly Kuznetsov 	__nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
15628fc78909SEmanuele Giuseppe Esposito 	if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
15636ccbd29aSJoerg Roedel 		goto out_free;
1564cc440cdaSPaolo Bonzini 
1565cc440cdaSPaolo Bonzini 	/*
1566cc440cdaSPaolo Bonzini 	 * Processor state contains L2 state.  Check that it is
1567cb9b6a1bSPaolo Bonzini 	 * valid for guest mode (see nested_vmcb_check_save).
1568cc440cdaSPaolo Bonzini 	 */
1569cc440cdaSPaolo Bonzini 	cr0 = kvm_read_cr0(vcpu);
1570cc440cdaSPaolo Bonzini         if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
15716ccbd29aSJoerg Roedel 		goto out_free;
1572cc440cdaSPaolo Bonzini 
1573cc440cdaSPaolo Bonzini 	/*
1574cc440cdaSPaolo Bonzini 	 * Validate host state saved from before VMRUN (see
1575cc440cdaSPaolo Bonzini 	 * nested_svm_check_permissions).
1576cc440cdaSPaolo Bonzini 	 */
1577b7a3d8b6SEmanuele Giuseppe Esposito 	__nested_copy_vmcb_save_to_cache(&save_cached, save);
15786906e06dSKrish Sadhukhan 	if (!(save->cr0 & X86_CR0_PG) ||
15796906e06dSKrish Sadhukhan 	    !(save->cr0 & X86_CR0_PE) ||
15806906e06dSKrish Sadhukhan 	    (save->rflags & X86_EFLAGS_VM) ||
1581b7a3d8b6SEmanuele Giuseppe Esposito 	    !__nested_vmcb_check_save(vcpu, &save_cached))
15826ccbd29aSJoerg Roedel 		goto out_free;
1583cc440cdaSPaolo Bonzini 
1584b222b0b8SMaxim Levitsky 
1585b222b0b8SMaxim Levitsky 	/*
15864995a368SCathy Avery 	 * All checks done, we can enter guest mode. Userspace provides
15874995a368SCathy Avery 	 * vmcb12.control, which will be combined with L1 and stored into
15884995a368SCathy Avery 	 * vmcb02, and the L1 save state which we store in vmcb01.
15894995a368SCathy Avery 	 * L2 registers if needed are moved from the current VMCB to VMCB02.
1590cc440cdaSPaolo Bonzini 	 */
159181f76adaSMaxim Levitsky 
15929d290e16SMaxim Levitsky 	if (is_guest_mode(vcpu))
1593f7e57078SSean Christopherson 		svm_leave_nested(vcpu);
15949d290e16SMaxim Levitsky 	else
15959d290e16SMaxim Levitsky 		svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
15969d290e16SMaxim Levitsky 
1597063ab16cSMaxim Levitsky 	svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1598063ab16cSMaxim Levitsky 
159981f76adaSMaxim Levitsky 	svm->nested.nested_run_pending =
160081f76adaSMaxim Levitsky 		!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
160181f76adaSMaxim Levitsky 
16020dd16b5bSMaxim Levitsky 	svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1603c08f390aSPaolo Bonzini 
16042bb16beaSVitaly Kuznetsov 	svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
16057907160dSEmanuele Giuseppe Esposito 	nested_copy_vmcb_control_to_cache(svm, ctl);
16064995a368SCathy Avery 
16074995a368SCathy Avery 	svm_switch_vmcb(svm, &svm->nested.vmcb02);
16089e8f0fbfSPaolo Bonzini 	nested_vmcb02_prepare_control(svm);
1609e1779c27SMaxim Levitsky 
1610e1779c27SMaxim Levitsky 	/*
1611e1779c27SMaxim Levitsky 	 * While the nested guest CR3 is already checked and set by
1612e1779c27SMaxim Levitsky 	 * KVM_SET_SREGS, it was set when nested state was yet loaded,
1613e1779c27SMaxim Levitsky 	 * thus MMU might not be initialized correctly.
1614e1779c27SMaxim Levitsky 	 * Set it again to fix this.
1615e1779c27SMaxim Levitsky 	 */
1616e1779c27SMaxim Levitsky 
1617e1779c27SMaxim Levitsky 	ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1618e1779c27SMaxim Levitsky 				  nested_npt_enabled(svm), false);
1619e1779c27SMaxim Levitsky 	if (WARN_ON_ONCE(ret))
1620e1779c27SMaxim Levitsky 		goto out_free;
1621e1779c27SMaxim Levitsky 
162273c25546SVitaly Kuznetsov 	svm->nested.force_msr_bitmap_recalc = true;
1623e1779c27SMaxim Levitsky 
1624a7d5c7ceSPaolo Bonzini 	kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
16256ccbd29aSJoerg Roedel 	ret = 0;
16266ccbd29aSJoerg Roedel out_free:
16276ccbd29aSJoerg Roedel 	kfree(save);
16286ccbd29aSJoerg Roedel 	kfree(ctl);
16296ccbd29aSJoerg Roedel 
16306ccbd29aSJoerg Roedel 	return ret;
1631cc440cdaSPaolo Bonzini }
1632cc440cdaSPaolo Bonzini 
1633232f75d3SMaxim Levitsky static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1634232f75d3SMaxim Levitsky {
1635232f75d3SMaxim Levitsky 	struct vcpu_svm *svm = to_svm(vcpu);
1636232f75d3SMaxim Levitsky 
1637232f75d3SMaxim Levitsky 	if (WARN_ON(!is_guest_mode(vcpu)))
1638232f75d3SMaxim Levitsky 		return true;
1639232f75d3SMaxim Levitsky 
1640158a48ecSMaxim Levitsky 	if (!vcpu->arch.pdptrs_from_userspace &&
1641158a48ecSMaxim Levitsky 	    !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1642b222b0b8SMaxim Levitsky 		/*
1643b222b0b8SMaxim Levitsky 		 * Reload the guest's PDPTRs since after a migration
1644b222b0b8SMaxim Levitsky 		 * the guest CR3 might be restored prior to setting the nested
1645b222b0b8SMaxim Levitsky 		 * state which can lead to a load of wrong PDPTRs.
1646b222b0b8SMaxim Levitsky 		 */
16472df4a5ebSLai Jiangshan 		if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
1648232f75d3SMaxim Levitsky 			return false;
1649232f75d3SMaxim Levitsky 
1650232f75d3SMaxim Levitsky 	if (!nested_svm_vmrun_msrpm(svm)) {
1651232f75d3SMaxim Levitsky 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1652232f75d3SMaxim Levitsky 		vcpu->run->internal.suberror =
1653232f75d3SMaxim Levitsky 			KVM_INTERNAL_ERROR_EMULATION;
1654232f75d3SMaxim Levitsky 		vcpu->run->internal.ndata = 0;
1655232f75d3SMaxim Levitsky 		return false;
1656232f75d3SMaxim Levitsky 	}
1657232f75d3SMaxim Levitsky 
1658232f75d3SMaxim Levitsky 	return true;
1659232f75d3SMaxim Levitsky }
1660232f75d3SMaxim Levitsky 
166133b22172SPaolo Bonzini struct kvm_x86_nested_ops svm_nested_ops = {
1662f7e57078SSean Christopherson 	.leave_nested = svm_leave_nested,
166333b22172SPaolo Bonzini 	.check_events = svm_check_nested_events,
16646819af75SSean Christopherson 	.handle_page_fault_workaround = nested_svm_handle_page_fault_workaround,
1665cb6a32c2SSean Christopherson 	.triple_fault = nested_svm_triple_fault,
1666a7d5c7ceSPaolo Bonzini 	.get_nested_state_pages = svm_get_nested_state_pages,
1667cc440cdaSPaolo Bonzini 	.get_state = svm_get_nested_state,
1668cc440cdaSPaolo Bonzini 	.set_state = svm_set_nested_state,
166933b22172SPaolo Bonzini };
1670