1883b0a91SJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only 2883b0a91SJoerg Roedel /* 3883b0a91SJoerg Roedel * Kernel-based Virtual Machine driver for Linux 4883b0a91SJoerg Roedel * 5883b0a91SJoerg Roedel * AMD SVM support 6883b0a91SJoerg Roedel * 7883b0a91SJoerg Roedel * Copyright (C) 2006 Qumranet, Inc. 8883b0a91SJoerg Roedel * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9883b0a91SJoerg Roedel * 10883b0a91SJoerg Roedel * Authors: 11883b0a91SJoerg Roedel * Yaniv Kamay <yaniv@qumranet.com> 12883b0a91SJoerg Roedel * Avi Kivity <avi@qumranet.com> 13883b0a91SJoerg Roedel */ 14883b0a91SJoerg Roedel 15883b0a91SJoerg Roedel #define pr_fmt(fmt) "SVM: " fmt 16883b0a91SJoerg Roedel 17883b0a91SJoerg Roedel #include <linux/kvm_types.h> 18883b0a91SJoerg Roedel #include <linux/kvm_host.h> 19883b0a91SJoerg Roedel #include <linux/kernel.h> 20883b0a91SJoerg Roedel 21883b0a91SJoerg Roedel #include <asm/msr-index.h> 225679b803SPaolo Bonzini #include <asm/debugreg.h> 23883b0a91SJoerg Roedel 24883b0a91SJoerg Roedel #include "kvm_emulate.h" 25883b0a91SJoerg Roedel #include "trace.h" 26883b0a91SJoerg Roedel #include "mmu.h" 27883b0a91SJoerg Roedel #include "x86.h" 28cc440cdaSPaolo Bonzini #include "cpuid.h" 295b672408SPaolo Bonzini #include "lapic.h" 30883b0a91SJoerg Roedel #include "svm.h" 3166c03a92SVitaly Kuznetsov #include "hyperv.h" 32883b0a91SJoerg Roedel 3311f0cbf0SSean Christopherson #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK 3411f0cbf0SSean Christopherson 35883b0a91SJoerg Roedel static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, 36883b0a91SJoerg Roedel struct x86_exception *fault) 37883b0a91SJoerg Roedel { 38883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 39db663af4SMaxim Levitsky struct vmcb *vmcb = svm->vmcb; 40883b0a91SJoerg Roedel 41db663af4SMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_NPF) { 42883b0a91SJoerg Roedel /* 43883b0a91SJoerg Roedel * TODO: track the cause of the nested page fault, and 44883b0a91SJoerg Roedel * correctly fill in the high bits of exit_info_1. 45883b0a91SJoerg Roedel */ 46db663af4SMaxim Levitsky vmcb->control.exit_code = SVM_EXIT_NPF; 47db663af4SMaxim Levitsky vmcb->control.exit_code_hi = 0; 48db663af4SMaxim Levitsky vmcb->control.exit_info_1 = (1ULL << 32); 49db663af4SMaxim Levitsky vmcb->control.exit_info_2 = fault->address; 50883b0a91SJoerg Roedel } 51883b0a91SJoerg Roedel 52db663af4SMaxim Levitsky vmcb->control.exit_info_1 &= ~0xffffffffULL; 53db663af4SMaxim Levitsky vmcb->control.exit_info_1 |= fault->error_code; 54883b0a91SJoerg Roedel 55883b0a91SJoerg Roedel nested_svm_vmexit(svm); 56883b0a91SJoerg Roedel } 57883b0a91SJoerg Roedel 586819af75SSean Christopherson static bool nested_svm_handle_page_fault_workaround(struct kvm_vcpu *vcpu, 596819af75SSean Christopherson struct x86_exception *fault) 60a04aead1SPaolo Bonzini { 61a04aead1SPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu); 62db663af4SMaxim Levitsky struct vmcb *vmcb = svm->vmcb; 63db663af4SMaxim Levitsky 64a04aead1SPaolo Bonzini WARN_ON(!is_guest_mode(vcpu)); 65a04aead1SPaolo Bonzini 668fc78909SEmanuele Giuseppe Esposito if (vmcb12_is_intercept(&svm->nested.ctl, 678fc78909SEmanuele Giuseppe Esposito INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) && 686819af75SSean Christopherson !WARN_ON_ONCE(svm->nested.nested_run_pending)) { 69db663af4SMaxim Levitsky vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR; 70db663af4SMaxim Levitsky vmcb->control.exit_code_hi = 0; 71db663af4SMaxim Levitsky vmcb->control.exit_info_1 = fault->error_code; 72db663af4SMaxim Levitsky vmcb->control.exit_info_2 = fault->address; 73a04aead1SPaolo Bonzini nested_svm_vmexit(svm); 746819af75SSean Christopherson return true; 75a04aead1SPaolo Bonzini } 766819af75SSean Christopherson 776819af75SSean Christopherson return false; 78a04aead1SPaolo Bonzini } 79a04aead1SPaolo Bonzini 80883b0a91SJoerg Roedel static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) 81883b0a91SJoerg Roedel { 82883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 83e670bf68SPaolo Bonzini u64 cr3 = svm->nested.ctl.nested_cr3; 84883b0a91SJoerg Roedel u64 pdpte; 85883b0a91SJoerg Roedel int ret; 86883b0a91SJoerg Roedel 872732be90SSean Christopherson ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte, 88883b0a91SJoerg Roedel offset_in_page(cr3) + index * 8, 8); 89883b0a91SJoerg Roedel if (ret) 90883b0a91SJoerg Roedel return 0; 91883b0a91SJoerg Roedel return pdpte; 92883b0a91SJoerg Roedel } 93883b0a91SJoerg Roedel 94883b0a91SJoerg Roedel static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) 95883b0a91SJoerg Roedel { 96883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 97883b0a91SJoerg Roedel 98e670bf68SPaolo Bonzini return svm->nested.ctl.nested_cr3; 99883b0a91SJoerg Roedel } 100883b0a91SJoerg Roedel 101883b0a91SJoerg Roedel static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) 102883b0a91SJoerg Roedel { 103929d1cfaSPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu); 104929d1cfaSPaolo Bonzini 105883b0a91SJoerg Roedel WARN_ON(mmu_is_nested(vcpu)); 106883b0a91SJoerg Roedel 107883b0a91SJoerg Roedel vcpu->arch.mmu = &vcpu->arch.guest_mmu; 10831e96bc6SSean Christopherson 10931e96bc6SSean Christopherson /* 11031e96bc6SSean Christopherson * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note, 11131e96bc6SSean Christopherson * when called via KVM_SET_NESTED_STATE, that state may _not_ match current 11231e96bc6SSean Christopherson * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required. 11331e96bc6SSean Christopherson */ 1144995a368SCathy Avery kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4, 1154995a368SCathy Avery svm->vmcb01.ptr->save.efer, 1160f04a2acSVitaly Kuznetsov svm->nested.ctl.nested_cr3); 117883b0a91SJoerg Roedel vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; 118883b0a91SJoerg Roedel vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; 119883b0a91SJoerg Roedel vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; 120883b0a91SJoerg Roedel vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 121883b0a91SJoerg Roedel } 122883b0a91SJoerg Roedel 123883b0a91SJoerg Roedel static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) 124883b0a91SJoerg Roedel { 125883b0a91SJoerg Roedel vcpu->arch.mmu = &vcpu->arch.root_mmu; 126883b0a91SJoerg Roedel vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 127883b0a91SJoerg Roedel } 128883b0a91SJoerg Roedel 129b9f3973aSMaxim Levitsky static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm) 130b9f3973aSMaxim Levitsky { 131b9f3973aSMaxim Levitsky if (!svm->v_vmload_vmsave_enabled) 132b9f3973aSMaxim Levitsky return true; 133b9f3973aSMaxim Levitsky 134b9f3973aSMaxim Levitsky if (!nested_npt_enabled(svm)) 135b9f3973aSMaxim Levitsky return true; 136b9f3973aSMaxim Levitsky 137b9f3973aSMaxim Levitsky if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK)) 138b9f3973aSMaxim Levitsky return true; 139b9f3973aSMaxim Levitsky 140b9f3973aSMaxim Levitsky return false; 141b9f3973aSMaxim Levitsky } 142b9f3973aSMaxim Levitsky 143883b0a91SJoerg Roedel void recalc_intercepts(struct vcpu_svm *svm) 144883b0a91SJoerg Roedel { 1458fc78909SEmanuele Giuseppe Esposito struct vmcb_control_area *c, *h; 1468fc78909SEmanuele Giuseppe Esposito struct vmcb_ctrl_area_cached *g; 147c45ad722SBabu Moger unsigned int i; 148883b0a91SJoerg Roedel 14906e7852cSJoerg Roedel vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 150883b0a91SJoerg Roedel 151883b0a91SJoerg Roedel if (!is_guest_mode(&svm->vcpu)) 152883b0a91SJoerg Roedel return; 153883b0a91SJoerg Roedel 154883b0a91SJoerg Roedel c = &svm->vmcb->control; 1554995a368SCathy Avery h = &svm->vmcb01.ptr->control; 156e670bf68SPaolo Bonzini g = &svm->nested.ctl; 157883b0a91SJoerg Roedel 158c45ad722SBabu Moger for (i = 0; i < MAX_INTERCEPT; i++) 159c45ad722SBabu Moger c->intercepts[i] = h->intercepts[i]; 160c45ad722SBabu Moger 161e9fd761aSPaolo Bonzini if (g->int_ctl & V_INTR_MASKING_MASK) { 162883b0a91SJoerg Roedel /* We only want the cr8 intercept bits of L1 */ 16303bfeeb9SBabu Moger vmcb_clr_intercept(c, INTERCEPT_CR8_READ); 16403bfeeb9SBabu Moger vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE); 165883b0a91SJoerg Roedel 166883b0a91SJoerg Roedel /* 167883b0a91SJoerg Roedel * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not 168883b0a91SJoerg Roedel * affect any interrupt we may want to inject; therefore, 169883b0a91SJoerg Roedel * interrupt window vmexits are irrelevant to L0. 170883b0a91SJoerg Roedel */ 171c62e2e94SBabu Moger vmcb_clr_intercept(c, INTERCEPT_VINTR); 172883b0a91SJoerg Roedel } 173883b0a91SJoerg Roedel 174883b0a91SJoerg Roedel /* We don't want to see VMMCALLs from a nested guest */ 175c62e2e94SBabu Moger vmcb_clr_intercept(c, INTERCEPT_VMMCALL); 176883b0a91SJoerg Roedel 177c45ad722SBabu Moger for (i = 0; i < MAX_INTERCEPT; i++) 178c45ad722SBabu Moger c->intercepts[i] |= g->intercepts[i]; 1794b639a9fSMaxim Levitsky 1804b639a9fSMaxim Levitsky /* If SMI is not intercepted, ignore guest SMI intercept as well */ 1814b639a9fSMaxim Levitsky if (!intercept_smi) 1824b639a9fSMaxim Levitsky vmcb_clr_intercept(c, INTERCEPT_SMI); 183c7dfa400SMaxim Levitsky 184b9f3973aSMaxim Levitsky if (nested_vmcb_needs_vls_intercept(svm)) { 185b9f3973aSMaxim Levitsky /* 186b9f3973aSMaxim Levitsky * If the virtual VMLOAD/VMSAVE is not enabled for the L2, 187b9f3973aSMaxim Levitsky * we must intercept these instructions to correctly 188b9f3973aSMaxim Levitsky * emulate them in case L1 doesn't intercept them. 189b9f3973aSMaxim Levitsky */ 190c7dfa400SMaxim Levitsky vmcb_set_intercept(c, INTERCEPT_VMLOAD); 191c7dfa400SMaxim Levitsky vmcb_set_intercept(c, INTERCEPT_VMSAVE); 192b9f3973aSMaxim Levitsky } else { 193b9f3973aSMaxim Levitsky WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK)); 194b9f3973aSMaxim Levitsky } 195883b0a91SJoerg Roedel } 196883b0a91SJoerg Roedel 19766c03a92SVitaly Kuznetsov /* 19866c03a92SVitaly Kuznetsov * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function 19966c03a92SVitaly Kuznetsov * is optimized in that it only merges the parts where KVM MSR permission bitmap 20066c03a92SVitaly Kuznetsov * may contain zero bits. 20166c03a92SVitaly Kuznetsov */ 202883b0a91SJoerg Roedel static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) 203883b0a91SJoerg Roedel { 20466c03a92SVitaly Kuznetsov struct hv_enlightenments *hve = 20566c03a92SVitaly Kuznetsov (struct hv_enlightenments *)svm->nested.ctl.reserved_sw; 206883b0a91SJoerg Roedel int i; 207883b0a91SJoerg Roedel 20866c03a92SVitaly Kuznetsov /* 20966c03a92SVitaly Kuznetsov * MSR bitmap update can be skipped when: 21066c03a92SVitaly Kuznetsov * - MSR bitmap for L1 hasn't changed. 21166c03a92SVitaly Kuznetsov * - Nested hypervisor (L1) is attempting to launch the same L2 as 21266c03a92SVitaly Kuznetsov * before. 21366c03a92SVitaly Kuznetsov * - Nested hypervisor (L1) is using Hyper-V emulation interface and 21466c03a92SVitaly Kuznetsov * tells KVM (L0) there were no changes in MSR bitmap for L2. 21566c03a92SVitaly Kuznetsov */ 21666c03a92SVitaly Kuznetsov if (!svm->nested.force_msr_bitmap_recalc && 21766c03a92SVitaly Kuznetsov kvm_hv_hypercall_enabled(&svm->vcpu) && 21866c03a92SVitaly Kuznetsov hve->hv_enlightenments_control.msr_bitmap && 21966c03a92SVitaly Kuznetsov (svm->nested.ctl.clean & BIT(VMCB_HV_NESTED_ENLIGHTENMENTS))) 22066c03a92SVitaly Kuznetsov goto set_msrpm_base_pa; 22166c03a92SVitaly Kuznetsov 2228fc78909SEmanuele Giuseppe Esposito if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) 223883b0a91SJoerg Roedel return true; 224883b0a91SJoerg Roedel 225883b0a91SJoerg Roedel for (i = 0; i < MSRPM_OFFSETS; i++) { 226883b0a91SJoerg Roedel u32 value, p; 227883b0a91SJoerg Roedel u64 offset; 228883b0a91SJoerg Roedel 229883b0a91SJoerg Roedel if (msrpm_offsets[i] == 0xffffffff) 230883b0a91SJoerg Roedel break; 231883b0a91SJoerg Roedel 232883b0a91SJoerg Roedel p = msrpm_offsets[i]; 233e670bf68SPaolo Bonzini offset = svm->nested.ctl.msrpm_base_pa + (p * 4); 234883b0a91SJoerg Roedel 235883b0a91SJoerg Roedel if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) 236883b0a91SJoerg Roedel return false; 237883b0a91SJoerg Roedel 238883b0a91SJoerg Roedel svm->nested.msrpm[p] = svm->msrpm[p] | value; 239883b0a91SJoerg Roedel } 240883b0a91SJoerg Roedel 24173c25546SVitaly Kuznetsov svm->nested.force_msr_bitmap_recalc = false; 24273c25546SVitaly Kuznetsov 24366c03a92SVitaly Kuznetsov set_msrpm_base_pa: 244883b0a91SJoerg Roedel svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); 245883b0a91SJoerg Roedel 246883b0a91SJoerg Roedel return true; 247883b0a91SJoerg Roedel } 248883b0a91SJoerg Roedel 249ee695f22SKrish Sadhukhan /* 250ee695f22SKrish Sadhukhan * Bits 11:0 of bitmap address are ignored by hardware 251ee695f22SKrish Sadhukhan */ 252ee695f22SKrish Sadhukhan static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size) 253ee695f22SKrish Sadhukhan { 254ee695f22SKrish Sadhukhan u64 addr = PAGE_ALIGN(pa); 255ee695f22SKrish Sadhukhan 256ee695f22SKrish Sadhukhan return kvm_vcpu_is_legal_gpa(vcpu, addr) && 257ee695f22SKrish Sadhukhan kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1); 258ee695f22SKrish Sadhukhan } 259ee695f22SKrish Sadhukhan 260174a921bSKrish Sadhukhan static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl) 261174a921bSKrish Sadhukhan { 262174a921bSKrish Sadhukhan /* Nested FLUSHBYASID is not supported yet. */ 263174a921bSKrish Sadhukhan switch(tlb_ctl) { 264174a921bSKrish Sadhukhan case TLB_CONTROL_DO_NOTHING: 265174a921bSKrish Sadhukhan case TLB_CONTROL_FLUSH_ALL_ASID: 266174a921bSKrish Sadhukhan return true; 267174a921bSKrish Sadhukhan default: 268174a921bSKrish Sadhukhan return false; 269174a921bSKrish Sadhukhan } 270174a921bSKrish Sadhukhan } 271174a921bSKrish Sadhukhan 272bd95926cSPaolo Bonzini static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu, 2738fc78909SEmanuele Giuseppe Esposito struct vmcb_ctrl_area_cached *control) 274ca46d739SPaolo Bonzini { 2758fc78909SEmanuele Giuseppe Esposito if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN))) 276ca46d739SPaolo Bonzini return false; 277ca46d739SPaolo Bonzini 27811f0cbf0SSean Christopherson if (CC(control->asid == 0)) 279ca46d739SPaolo Bonzini return false; 280ca46d739SPaolo Bonzini 28111f0cbf0SSean Christopherson if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled)) 282ca46d739SPaolo Bonzini return false; 283ca46d739SPaolo Bonzini 284ee695f22SKrish Sadhukhan if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa, 285ee695f22SKrish Sadhukhan MSRPM_SIZE))) 286ee695f22SKrish Sadhukhan return false; 287ee695f22SKrish Sadhukhan if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa, 288ee695f22SKrish Sadhukhan IOPM_SIZE))) 289ee695f22SKrish Sadhukhan return false; 290ee695f22SKrish Sadhukhan 291174a921bSKrish Sadhukhan if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl))) 292174a921bSKrish Sadhukhan return false; 293174a921bSKrish Sadhukhan 294ca46d739SPaolo Bonzini return true; 295ca46d739SPaolo Bonzini } 296ca46d739SPaolo Bonzini 2976906e06dSKrish Sadhukhan /* Common checks that apply to both L1 and L2 state. */ 298b7a3d8b6SEmanuele Giuseppe Esposito static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, 299b7a3d8b6SEmanuele Giuseppe Esposito struct vmcb_save_area_cached *save) 3006906e06dSKrish Sadhukhan { 30111f0cbf0SSean Christopherson if (CC(!(save->efer & EFER_SVME))) 3026906e06dSKrish Sadhukhan return false; 3036906e06dSKrish Sadhukhan 30411f0cbf0SSean Christopherson if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) || 30511f0cbf0SSean Christopherson CC(save->cr0 & ~0xffffffffULL)) 3066906e06dSKrish Sadhukhan return false; 3076906e06dSKrish Sadhukhan 30811f0cbf0SSean Christopherson if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7))) 3096906e06dSKrish Sadhukhan return false; 3106906e06dSKrish Sadhukhan 311907afa48SEmanuele Giuseppe Esposito /* 312907afa48SEmanuele Giuseppe Esposito * These checks are also performed by KVM_SET_SREGS, 313907afa48SEmanuele Giuseppe Esposito * except that EFER.LMA is not checked by SVM against 314907afa48SEmanuele Giuseppe Esposito * CR0.PG && EFER.LME. 315907afa48SEmanuele Giuseppe Esposito */ 316907afa48SEmanuele Giuseppe Esposito if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { 317907afa48SEmanuele Giuseppe Esposito if (CC(!(save->cr4 & X86_CR4_PAE)) || 318907afa48SEmanuele Giuseppe Esposito CC(!(save->cr0 & X86_CR0_PE)) || 319907afa48SEmanuele Giuseppe Esposito CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))) 320907afa48SEmanuele Giuseppe Esposito return false; 321907afa48SEmanuele Giuseppe Esposito } 322907afa48SEmanuele Giuseppe Esposito 323907afa48SEmanuele Giuseppe Esposito if (CC(!kvm_is_valid_cr4(vcpu, save->cr4))) 3246906e06dSKrish Sadhukhan return false; 3256906e06dSKrish Sadhukhan 32663129754SPaolo Bonzini if (CC(!kvm_valid_efer(vcpu, save->efer))) 3276906e06dSKrish Sadhukhan return false; 3286906e06dSKrish Sadhukhan 3296906e06dSKrish Sadhukhan return true; 3306906e06dSKrish Sadhukhan } 3316906e06dSKrish Sadhukhan 332b7a3d8b6SEmanuele Giuseppe Esposito static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu) 333b7a3d8b6SEmanuele Giuseppe Esposito { 334b7a3d8b6SEmanuele Giuseppe Esposito struct vcpu_svm *svm = to_svm(vcpu); 335b7a3d8b6SEmanuele Giuseppe Esposito struct vmcb_save_area_cached *save = &svm->nested.save; 336b7a3d8b6SEmanuele Giuseppe Esposito 337b7a3d8b6SEmanuele Giuseppe Esposito return __nested_vmcb_check_save(vcpu, save); 338b7a3d8b6SEmanuele Giuseppe Esposito } 339b7a3d8b6SEmanuele Giuseppe Esposito 340bd95926cSPaolo Bonzini static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu) 341bd95926cSPaolo Bonzini { 342bd95926cSPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu); 3438fc78909SEmanuele Giuseppe Esposito struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl; 344bd95926cSPaolo Bonzini 345bd95926cSPaolo Bonzini return __nested_vmcb_check_controls(vcpu, ctl); 346bd95926cSPaolo Bonzini } 347bd95926cSPaolo Bonzini 3487907160dSEmanuele Giuseppe Esposito static 34966c03a92SVitaly Kuznetsov void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu, 35066c03a92SVitaly Kuznetsov struct vmcb_ctrl_area_cached *to, 3517907160dSEmanuele Giuseppe Esposito struct vmcb_control_area *from) 3527907160dSEmanuele Giuseppe Esposito { 3537907160dSEmanuele Giuseppe Esposito unsigned int i; 3547907160dSEmanuele Giuseppe Esposito 3557907160dSEmanuele Giuseppe Esposito for (i = 0; i < MAX_INTERCEPT; i++) 3567907160dSEmanuele Giuseppe Esposito to->intercepts[i] = from->intercepts[i]; 3577907160dSEmanuele Giuseppe Esposito 3587907160dSEmanuele Giuseppe Esposito to->iopm_base_pa = from->iopm_base_pa; 3597907160dSEmanuele Giuseppe Esposito to->msrpm_base_pa = from->msrpm_base_pa; 3607907160dSEmanuele Giuseppe Esposito to->tsc_offset = from->tsc_offset; 3617907160dSEmanuele Giuseppe Esposito to->tlb_ctl = from->tlb_ctl; 3627907160dSEmanuele Giuseppe Esposito to->int_ctl = from->int_ctl; 3637907160dSEmanuele Giuseppe Esposito to->int_vector = from->int_vector; 3647907160dSEmanuele Giuseppe Esposito to->int_state = from->int_state; 3657907160dSEmanuele Giuseppe Esposito to->exit_code = from->exit_code; 3667907160dSEmanuele Giuseppe Esposito to->exit_code_hi = from->exit_code_hi; 3677907160dSEmanuele Giuseppe Esposito to->exit_info_1 = from->exit_info_1; 3687907160dSEmanuele Giuseppe Esposito to->exit_info_2 = from->exit_info_2; 3697907160dSEmanuele Giuseppe Esposito to->exit_int_info = from->exit_int_info; 3707907160dSEmanuele Giuseppe Esposito to->exit_int_info_err = from->exit_int_info_err; 3717907160dSEmanuele Giuseppe Esposito to->nested_ctl = from->nested_ctl; 3727907160dSEmanuele Giuseppe Esposito to->event_inj = from->event_inj; 3737907160dSEmanuele Giuseppe Esposito to->event_inj_err = from->event_inj_err; 37400f08d99SMaciej S. Szmigiero to->next_rip = from->next_rip; 3757907160dSEmanuele Giuseppe Esposito to->nested_cr3 = from->nested_cr3; 3767907160dSEmanuele Giuseppe Esposito to->virt_ext = from->virt_ext; 3777907160dSEmanuele Giuseppe Esposito to->pause_filter_count = from->pause_filter_count; 3787907160dSEmanuele Giuseppe Esposito to->pause_filter_thresh = from->pause_filter_thresh; 3797907160dSEmanuele Giuseppe Esposito 3807907160dSEmanuele Giuseppe Esposito /* Copy asid here because nested_vmcb_check_controls will check it. */ 3817907160dSEmanuele Giuseppe Esposito to->asid = from->asid; 3827907160dSEmanuele Giuseppe Esposito to->msrpm_base_pa &= ~0x0fffULL; 3837907160dSEmanuele Giuseppe Esposito to->iopm_base_pa &= ~0x0fffULL; 38466c03a92SVitaly Kuznetsov 38566c03a92SVitaly Kuznetsov /* Hyper-V extensions (Enlightened VMCB) */ 38666c03a92SVitaly Kuznetsov if (kvm_hv_hypercall_enabled(vcpu)) { 38766c03a92SVitaly Kuznetsov to->clean = from->clean; 38866c03a92SVitaly Kuznetsov memcpy(to->reserved_sw, from->reserved_sw, 38966c03a92SVitaly Kuznetsov sizeof(struct hv_enlightenments)); 39066c03a92SVitaly Kuznetsov } 3917907160dSEmanuele Giuseppe Esposito } 3927907160dSEmanuele Giuseppe Esposito 3937907160dSEmanuele Giuseppe Esposito void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, 3943e06f016SPaolo Bonzini struct vmcb_control_area *control) 3953e06f016SPaolo Bonzini { 39666c03a92SVitaly Kuznetsov __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control); 3973e06f016SPaolo Bonzini } 3983e06f016SPaolo Bonzini 399f2740a8dSEmanuele Giuseppe Esposito static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to, 400f2740a8dSEmanuele Giuseppe Esposito struct vmcb_save_area *from) 401f2740a8dSEmanuele Giuseppe Esposito { 402f2740a8dSEmanuele Giuseppe Esposito /* 403f2740a8dSEmanuele Giuseppe Esposito * Copy only fields that are validated, as we need them 404f2740a8dSEmanuele Giuseppe Esposito * to avoid TOC/TOU races. 405f2740a8dSEmanuele Giuseppe Esposito */ 406f2740a8dSEmanuele Giuseppe Esposito to->efer = from->efer; 407f2740a8dSEmanuele Giuseppe Esposito to->cr0 = from->cr0; 408f2740a8dSEmanuele Giuseppe Esposito to->cr3 = from->cr3; 409f2740a8dSEmanuele Giuseppe Esposito to->cr4 = from->cr4; 410f2740a8dSEmanuele Giuseppe Esposito 411f2740a8dSEmanuele Giuseppe Esposito to->dr6 = from->dr6; 412f2740a8dSEmanuele Giuseppe Esposito to->dr7 = from->dr7; 413f2740a8dSEmanuele Giuseppe Esposito } 414f2740a8dSEmanuele Giuseppe Esposito 415f2740a8dSEmanuele Giuseppe Esposito void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, 416f2740a8dSEmanuele Giuseppe Esposito struct vmcb_save_area *save) 417f2740a8dSEmanuele Giuseppe Esposito { 418f2740a8dSEmanuele Giuseppe Esposito __nested_copy_vmcb_save_to_cache(&svm->nested.save, save); 419f2740a8dSEmanuele Giuseppe Esposito } 420f2740a8dSEmanuele Giuseppe Esposito 4212d8a42beSPaolo Bonzini /* 4222d8a42beSPaolo Bonzini * Synchronize fields that are written by the processor, so that 4239e8f0fbfSPaolo Bonzini * they can be copied back into the vmcb12. 4242d8a42beSPaolo Bonzini */ 4259e8f0fbfSPaolo Bonzini void nested_sync_control_from_vmcb02(struct vcpu_svm *svm) 4262d8a42beSPaolo Bonzini { 4272d8a42beSPaolo Bonzini u32 mask; 4282d8a42beSPaolo Bonzini svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; 4292d8a42beSPaolo Bonzini svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; 4302d8a42beSPaolo Bonzini 4312d8a42beSPaolo Bonzini /* Only a few fields of int_ctl are written by the processor. */ 4322d8a42beSPaolo Bonzini mask = V_IRQ_MASK | V_TPR_MASK; 4332d8a42beSPaolo Bonzini if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && 434a284ba56SJoerg Roedel svm_is_intercept(svm, INTERCEPT_VINTR)) { 4352d8a42beSPaolo Bonzini /* 4362d8a42beSPaolo Bonzini * In order to request an interrupt window, L0 is usurping 4372d8a42beSPaolo Bonzini * svm->vmcb->control.int_ctl and possibly setting V_IRQ 4382d8a42beSPaolo Bonzini * even if it was clear in L1's VMCB. Restoring it would be 4392d8a42beSPaolo Bonzini * wrong. However, in this case V_IRQ will remain true until 4402d8a42beSPaolo Bonzini * interrupt_window_interception calls svm_clear_vintr and 4412d8a42beSPaolo Bonzini * restores int_ctl. We can just leave it aside. 4422d8a42beSPaolo Bonzini */ 4432d8a42beSPaolo Bonzini mask &= ~V_IRQ_MASK; 4442d8a42beSPaolo Bonzini } 4450b349662SMaxim Levitsky 4460b349662SMaxim Levitsky if (nested_vgif_enabled(svm)) 4470b349662SMaxim Levitsky mask |= V_GIF_MASK; 4480b349662SMaxim Levitsky 4492d8a42beSPaolo Bonzini svm->nested.ctl.int_ctl &= ~mask; 4502d8a42beSPaolo Bonzini svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; 4512d8a42beSPaolo Bonzini } 4522d8a42beSPaolo Bonzini 45336e2e983SPaolo Bonzini /* 45436e2e983SPaolo Bonzini * Transfer any event that L0 or L1 wanted to inject into L2 to 45536e2e983SPaolo Bonzini * EXIT_INT_INFO. 45636e2e983SPaolo Bonzini */ 4579e8f0fbfSPaolo Bonzini static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, 4580dd16b5bSMaxim Levitsky struct vmcb *vmcb12) 45936e2e983SPaolo Bonzini { 46036e2e983SPaolo Bonzini struct kvm_vcpu *vcpu = &svm->vcpu; 46136e2e983SPaolo Bonzini u32 exit_int_info = 0; 46236e2e983SPaolo Bonzini unsigned int nr; 46336e2e983SPaolo Bonzini 46436e2e983SPaolo Bonzini if (vcpu->arch.exception.injected) { 46536e2e983SPaolo Bonzini nr = vcpu->arch.exception.nr; 46636e2e983SPaolo Bonzini exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT; 46736e2e983SPaolo Bonzini 46836e2e983SPaolo Bonzini if (vcpu->arch.exception.has_error_code) { 46936e2e983SPaolo Bonzini exit_int_info |= SVM_EVTINJ_VALID_ERR; 4700dd16b5bSMaxim Levitsky vmcb12->control.exit_int_info_err = 47136e2e983SPaolo Bonzini vcpu->arch.exception.error_code; 47236e2e983SPaolo Bonzini } 47336e2e983SPaolo Bonzini 47436e2e983SPaolo Bonzini } else if (vcpu->arch.nmi_injected) { 47536e2e983SPaolo Bonzini exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; 47636e2e983SPaolo Bonzini 47736e2e983SPaolo Bonzini } else if (vcpu->arch.interrupt.injected) { 47836e2e983SPaolo Bonzini nr = vcpu->arch.interrupt.nr; 47936e2e983SPaolo Bonzini exit_int_info = nr | SVM_EVTINJ_VALID; 48036e2e983SPaolo Bonzini 48136e2e983SPaolo Bonzini if (vcpu->arch.interrupt.soft) 48236e2e983SPaolo Bonzini exit_int_info |= SVM_EVTINJ_TYPE_SOFT; 48336e2e983SPaolo Bonzini else 48436e2e983SPaolo Bonzini exit_int_info |= SVM_EVTINJ_TYPE_INTR; 48536e2e983SPaolo Bonzini } 48636e2e983SPaolo Bonzini 4870dd16b5bSMaxim Levitsky vmcb12->control.exit_int_info = exit_int_info; 48836e2e983SPaolo Bonzini } 48936e2e983SPaolo Bonzini 490d2e56019SSean Christopherson static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu) 491d2e56019SSean Christopherson { 492d2e56019SSean Christopherson /* 493d2e56019SSean Christopherson * TODO: optimize unconditional TLB flush/MMU sync. A partial list of 494d2e56019SSean Christopherson * things to fix before this can be conditional: 495d2e56019SSean Christopherson * 496d2e56019SSean Christopherson * - Flush TLBs for both L1 and L2 remote TLB flush 497d2e56019SSean Christopherson * - Honor L1's request to flush an ASID on nested VMRUN 498d2e56019SSean Christopherson * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*] 499d2e56019SSean Christopherson * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN 500d2e56019SSean Christopherson * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST 501d2e56019SSean Christopherson * 502d2e56019SSean Christopherson * [*] Unlike nested EPT, SVM's ASID management can invalidate nested 503d2e56019SSean Christopherson * NPT guest-physical mappings on VMRUN. 504d2e56019SSean Christopherson */ 505d2e56019SSean Christopherson kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 506d2e56019SSean Christopherson kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 507d2e56019SSean Christopherson } 508d2e56019SSean Christopherson 50962156f6cSVitaly Kuznetsov /* 510d82aaef9SVitaly Kuznetsov * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true 511d82aaef9SVitaly Kuznetsov * if we are emulating VM-Entry into a guest with NPT enabled. 51262156f6cSVitaly Kuznetsov */ 51362156f6cSVitaly Kuznetsov static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, 514b222b0b8SMaxim Levitsky bool nested_npt, bool reload_pdptrs) 51562156f6cSVitaly Kuznetsov { 51611f0cbf0SSean Christopherson if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) 517a506fdd2SVitaly Kuznetsov return -EINVAL; 518a506fdd2SVitaly Kuznetsov 519b222b0b8SMaxim Levitsky if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) && 5202df4a5ebSLai Jiangshan CC(!load_pdptrs(vcpu, cr3))) 521a506fdd2SVitaly Kuznetsov return -EINVAL; 522a506fdd2SVitaly Kuznetsov 523a506fdd2SVitaly Kuznetsov vcpu->arch.cr3 = cr3; 524a506fdd2SVitaly Kuznetsov 525616007c8SSean Christopherson /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ 526c9060662SSean Christopherson kvm_init_mmu(vcpu); 527a506fdd2SVitaly Kuznetsov 5283cffc89dSPaolo Bonzini if (!nested_npt) 5293cffc89dSPaolo Bonzini kvm_mmu_new_pgd(vcpu, cr3); 5303cffc89dSPaolo Bonzini 531a506fdd2SVitaly Kuznetsov return 0; 53262156f6cSVitaly Kuznetsov } 53362156f6cSVitaly Kuznetsov 5344995a368SCathy Avery void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm) 5354995a368SCathy Avery { 5364995a368SCathy Avery if (!svm->nested.vmcb02.ptr) 5374995a368SCathy Avery return; 5384995a368SCathy Avery 5394995a368SCathy Avery /* FIXME: merge g_pat from vmcb01 and vmcb12. */ 5404995a368SCathy Avery svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat; 5414995a368SCathy Avery } 5424995a368SCathy Avery 5439e8f0fbfSPaolo Bonzini static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12) 544883b0a91SJoerg Roedel { 5458173396eSCathy Avery bool new_vmcb12 = false; 5461d5a1b58SMaxim Levitsky struct vmcb *vmcb01 = svm->vmcb01.ptr; 547db663af4SMaxim Levitsky struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; 5488173396eSCathy Avery 5494995a368SCathy Avery nested_vmcb02_compute_g_pat(svm); 5504995a368SCathy Avery 551883b0a91SJoerg Roedel /* Load the nested guest state */ 5528173396eSCathy Avery if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) { 5538173396eSCathy Avery new_vmcb12 = true; 5548173396eSCathy Avery svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa; 55573c25546SVitaly Kuznetsov svm->nested.force_msr_bitmap_recalc = true; 5568173396eSCathy Avery } 5578173396eSCathy Avery 5588173396eSCathy Avery if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) { 559db663af4SMaxim Levitsky vmcb02->save.es = vmcb12->save.es; 560db663af4SMaxim Levitsky vmcb02->save.cs = vmcb12->save.cs; 561db663af4SMaxim Levitsky vmcb02->save.ss = vmcb12->save.ss; 562db663af4SMaxim Levitsky vmcb02->save.ds = vmcb12->save.ds; 563db663af4SMaxim Levitsky vmcb02->save.cpl = vmcb12->save.cpl; 564db663af4SMaxim Levitsky vmcb_mark_dirty(vmcb02, VMCB_SEG); 5658173396eSCathy Avery } 5664bb170a5SPaolo Bonzini 5678173396eSCathy Avery if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) { 568db663af4SMaxim Levitsky vmcb02->save.gdtr = vmcb12->save.gdtr; 569db663af4SMaxim Levitsky vmcb02->save.idtr = vmcb12->save.idtr; 570db663af4SMaxim Levitsky vmcb_mark_dirty(vmcb02, VMCB_DT); 5718173396eSCathy Avery } 5724bb170a5SPaolo Bonzini 5738cce12b3SPaolo Bonzini kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); 5743c346c0cSPaolo Bonzini 575355d0473SEmanuele Giuseppe Esposito svm_set_efer(&svm->vcpu, svm->nested.save.efer); 5763c346c0cSPaolo Bonzini 577355d0473SEmanuele Giuseppe Esposito svm_set_cr0(&svm->vcpu, svm->nested.save.cr0); 578355d0473SEmanuele Giuseppe Esposito svm_set_cr4(&svm->vcpu, svm->nested.save.cr4); 5794bb170a5SPaolo Bonzini 5804bb170a5SPaolo Bonzini svm->vcpu.arch.cr2 = vmcb12->save.cr2; 5818173396eSCathy Avery 5820dd16b5bSMaxim Levitsky kvm_rax_write(&svm->vcpu, vmcb12->save.rax); 5830dd16b5bSMaxim Levitsky kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); 5840dd16b5bSMaxim Levitsky kvm_rip_write(&svm->vcpu, vmcb12->save.rip); 585883b0a91SJoerg Roedel 586883b0a91SJoerg Roedel /* In case we don't even reach vcpu_run, the fields are not updated */ 587db663af4SMaxim Levitsky vmcb02->save.rax = vmcb12->save.rax; 588db663af4SMaxim Levitsky vmcb02->save.rsp = vmcb12->save.rsp; 589db663af4SMaxim Levitsky vmcb02->save.rip = vmcb12->save.rip; 5904bb170a5SPaolo Bonzini 5918173396eSCathy Avery /* These bits will be set properly on the first execution when new_vmc12 is true */ 5928173396eSCathy Avery if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) { 593db663af4SMaxim Levitsky vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1; 594355d0473SEmanuele Giuseppe Esposito svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW; 595db663af4SMaxim Levitsky vmcb_mark_dirty(vmcb02, VMCB_DR); 596f241d711SPaolo Bonzini } 5971d5a1b58SMaxim Levitsky 598d20c796cSMaxim Levitsky if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { 599d20c796cSMaxim Levitsky /* 600d20c796cSMaxim Levitsky * Reserved bits of DEBUGCTL are ignored. Be consistent with 601d20c796cSMaxim Levitsky * svm_set_msr's definition of reserved bits. 602d20c796cSMaxim Levitsky */ 603d20c796cSMaxim Levitsky svm_copy_lbrs(vmcb02, vmcb12); 604d20c796cSMaxim Levitsky vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS; 605d20c796cSMaxim Levitsky svm_update_lbrv(&svm->vcpu); 606d20c796cSMaxim Levitsky 607d20c796cSMaxim Levitsky } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) { 6081d5a1b58SMaxim Levitsky svm_copy_lbrs(vmcb02, vmcb01); 6098173396eSCathy Avery } 610d20c796cSMaxim Levitsky } 611883b0a91SJoerg Roedel 6126ef88d6eSSean Christopherson static inline bool is_evtinj_soft(u32 evtinj) 6136ef88d6eSSean Christopherson { 6146ef88d6eSSean Christopherson u32 type = evtinj & SVM_EVTINJ_TYPE_MASK; 6156ef88d6eSSean Christopherson u8 vector = evtinj & SVM_EVTINJ_VEC_MASK; 6166ef88d6eSSean Christopherson 6176ef88d6eSSean Christopherson if (!(evtinj & SVM_EVTINJ_VALID)) 6186ef88d6eSSean Christopherson return false; 6196ef88d6eSSean Christopherson 6207e5b5ef8SSean Christopherson if (type == SVM_EVTINJ_TYPE_SOFT) 6217e5b5ef8SSean Christopherson return true; 6227e5b5ef8SSean Christopherson 6236ef88d6eSSean Christopherson return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector); 6246ef88d6eSSean Christopherson } 6256ef88d6eSSean Christopherson 626*159fc6faSMaciej S. Szmigiero static bool is_evtinj_nmi(u32 evtinj) 627*159fc6faSMaciej S. Szmigiero { 628*159fc6faSMaciej S. Szmigiero u32 type = evtinj & SVM_EVTINJ_TYPE_MASK; 629*159fc6faSMaciej S. Szmigiero 630*159fc6faSMaciej S. Szmigiero if (!(evtinj & SVM_EVTINJ_VALID)) 631*159fc6faSMaciej S. Szmigiero return false; 632*159fc6faSMaciej S. Szmigiero 633*159fc6faSMaciej S. Szmigiero return type == SVM_EVTINJ_TYPE_NMI; 634*159fc6faSMaciej S. Szmigiero } 635*159fc6faSMaciej S. Szmigiero 63600f08d99SMaciej S. Szmigiero static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, 63700f08d99SMaciej S. Szmigiero unsigned long vmcb12_rip) 638f241d711SPaolo Bonzini { 6390b349662SMaxim Levitsky u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK; 6400b349662SMaxim Levitsky u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK; 6410f923e07SMaxim Levitsky 642d2e56019SSean Christopherson struct kvm_vcpu *vcpu = &svm->vcpu; 643db663af4SMaxim Levitsky struct vmcb *vmcb01 = svm->vmcb01.ptr; 644db663af4SMaxim Levitsky struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; 64562156f6cSVitaly Kuznetsov 6467c3ecfcdSPaolo Bonzini /* 6477c3ecfcdSPaolo Bonzini * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2, 6487c3ecfcdSPaolo Bonzini * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes. 6497c3ecfcdSPaolo Bonzini */ 6504995a368SCathy Avery 6510b349662SMaxim Levitsky if (svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK)) 6520b349662SMaxim Levitsky int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK); 6530b349662SMaxim Levitsky else 6540b349662SMaxim Levitsky int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK); 6550b349662SMaxim Levitsky 6567c3ecfcdSPaolo Bonzini /* Copied from vmcb01. msrpm_base can be overwritten later. */ 657db663af4SMaxim Levitsky vmcb02->control.nested_ctl = vmcb01->control.nested_ctl; 658db663af4SMaxim Levitsky vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa; 659db663af4SMaxim Levitsky vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa; 6607c3ecfcdSPaolo Bonzini 6617c3ecfcdSPaolo Bonzini /* Done at vmrun: asid. */ 6627c3ecfcdSPaolo Bonzini 6637c3ecfcdSPaolo Bonzini /* Also overwritten later if necessary. */ 664db663af4SMaxim Levitsky vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 6657c3ecfcdSPaolo Bonzini 6667c3ecfcdSPaolo Bonzini /* nested_cr3. */ 66762156f6cSVitaly Kuznetsov if (nested_npt_enabled(svm)) 668d2e56019SSean Christopherson nested_svm_init_mmu_context(vcpu); 66969cb8774SPaolo Bonzini 6705228eb96SMaxim Levitsky vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 6715228eb96SMaxim Levitsky vcpu->arch.l1_tsc_offset, 6725228eb96SMaxim Levitsky svm->nested.ctl.tsc_offset, 6735228eb96SMaxim Levitsky svm->tsc_ratio_msr); 6745228eb96SMaxim Levitsky 675db663af4SMaxim Levitsky vmcb02->control.tsc_offset = vcpu->arch.tsc_offset; 6765228eb96SMaxim Levitsky 6775228eb96SMaxim Levitsky if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) { 6785228eb96SMaxim Levitsky WARN_ON(!svm->tsc_scaling_enabled); 6795228eb96SMaxim Levitsky nested_svm_update_tsc_ratio_msr(vcpu); 6805228eb96SMaxim Levitsky } 681883b0a91SJoerg Roedel 682db663af4SMaxim Levitsky vmcb02->control.int_ctl = 6830f923e07SMaxim Levitsky (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | 684db663af4SMaxim Levitsky (vmcb01->control.int_ctl & int_ctl_vmcb01_bits); 68591b7130cSPaolo Bonzini 686db663af4SMaxim Levitsky vmcb02->control.int_vector = svm->nested.ctl.int_vector; 687db663af4SMaxim Levitsky vmcb02->control.int_state = svm->nested.ctl.int_state; 688db663af4SMaxim Levitsky vmcb02->control.event_inj = svm->nested.ctl.event_inj; 689db663af4SMaxim Levitsky vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err; 690883b0a91SJoerg Roedel 69100f08d99SMaciej S. Szmigiero /* 69200f08d99SMaciej S. Szmigiero * next_rip is consumed on VMRUN as the return address pushed on the 69300f08d99SMaciej S. Szmigiero * stack for injected soft exceptions/interrupts. If nrips is exposed 69400f08d99SMaciej S. Szmigiero * to L1, take it verbatim from vmcb12. If nrips is supported in 69500f08d99SMaciej S. Szmigiero * hardware but not exposed to L1, stuff the actual L2 RIP to emulate 69600f08d99SMaciej S. Szmigiero * what a nrips=0 CPU would do (L1 is responsible for advancing RIP 69700f08d99SMaciej S. Szmigiero * prior to injecting the event). 69800f08d99SMaciej S. Szmigiero */ 69900f08d99SMaciej S. Szmigiero if (svm->nrips_enabled) 70000f08d99SMaciej S. Szmigiero vmcb02->control.next_rip = svm->nested.ctl.next_rip; 70100f08d99SMaciej S. Szmigiero else if (boot_cpu_has(X86_FEATURE_NRIPS)) 70200f08d99SMaciej S. Szmigiero vmcb02->control.next_rip = vmcb12_rip; 70300f08d99SMaciej S. Szmigiero 704*159fc6faSMaciej S. Szmigiero svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj); 7056ef88d6eSSean Christopherson if (is_evtinj_soft(vmcb02->control.event_inj)) { 7066ef88d6eSSean Christopherson svm->soft_int_injected = true; 7076ef88d6eSSean Christopherson svm->soft_int_csbase = svm->vmcb->save.cs.base; 7086ef88d6eSSean Christopherson svm->soft_int_old_rip = vmcb12_rip; 7096ef88d6eSSean Christopherson if (svm->nrips_enabled) 7106ef88d6eSSean Christopherson svm->soft_int_next_rip = svm->nested.ctl.next_rip; 7116ef88d6eSSean Christopherson else 7126ef88d6eSSean Christopherson svm->soft_int_next_rip = vmcb12_rip; 7136ef88d6eSSean Christopherson } 7146ef88d6eSSean Christopherson 7151d5a1b58SMaxim Levitsky vmcb02->control.virt_ext = vmcb01->control.virt_ext & 7161d5a1b58SMaxim Levitsky LBR_CTL_ENABLE_MASK; 717d20c796cSMaxim Levitsky if (svm->lbrv_enabled) 718d20c796cSMaxim Levitsky vmcb02->control.virt_ext |= 719d20c796cSMaxim Levitsky (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK); 7201d5a1b58SMaxim Levitsky 721b9f3973aSMaxim Levitsky if (!nested_vmcb_needs_vls_intercept(svm)) 722db663af4SMaxim Levitsky vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; 723b9f3973aSMaxim Levitsky 72474fd41edSMaxim Levitsky if (kvm_pause_in_guest(svm->vcpu.kvm)) { 72574fd41edSMaxim Levitsky /* use guest values since host doesn't use them */ 72674fd41edSMaxim Levitsky vmcb02->control.pause_filter_count = 72774fd41edSMaxim Levitsky svm->pause_filter_enabled ? 72874fd41edSMaxim Levitsky svm->nested.ctl.pause_filter_count : 0; 72974fd41edSMaxim Levitsky 73074fd41edSMaxim Levitsky vmcb02->control.pause_filter_thresh = 73174fd41edSMaxim Levitsky svm->pause_threshold_enabled ? 73274fd41edSMaxim Levitsky svm->nested.ctl.pause_filter_thresh : 0; 73374fd41edSMaxim Levitsky 73474fd41edSMaxim Levitsky } else if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) { 73574fd41edSMaxim Levitsky /* use host values when guest doesn't use them */ 73674fd41edSMaxim Levitsky vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count; 73774fd41edSMaxim Levitsky vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh; 73874fd41edSMaxim Levitsky } else { 73974fd41edSMaxim Levitsky /* 74074fd41edSMaxim Levitsky * Intercept every PAUSE otherwise and 74174fd41edSMaxim Levitsky * ignore both host and guest values 74274fd41edSMaxim Levitsky */ 74374fd41edSMaxim Levitsky vmcb02->control.pause_filter_count = 0; 74474fd41edSMaxim Levitsky vmcb02->control.pause_filter_thresh = 0; 74574fd41edSMaxim Levitsky } 74674fd41edSMaxim Levitsky 747d2e56019SSean Christopherson nested_svm_transition_tlb_flush(vcpu); 748d2e56019SSean Christopherson 749883b0a91SJoerg Roedel /* Enter Guest-Mode */ 750d2e56019SSean Christopherson enter_guest_mode(vcpu); 751883b0a91SJoerg Roedel 752883b0a91SJoerg Roedel /* 753883b0a91SJoerg Roedel * Merge guest and host intercepts - must be called with vcpu in 7544bb170a5SPaolo Bonzini * guest-mode to take effect. 755883b0a91SJoerg Roedel */ 756883b0a91SJoerg Roedel recalc_intercepts(svm); 757f241d711SPaolo Bonzini } 758f241d711SPaolo Bonzini 759d00b99c5SBabu Moger static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb) 760d00b99c5SBabu Moger { 761d00b99c5SBabu Moger /* 762d00b99c5SBabu Moger * Some VMCB state is shared between L1 and L2 and thus has to be 763d00b99c5SBabu Moger * moved at the time of nested vmrun and vmexit. 764d00b99c5SBabu Moger * 765d00b99c5SBabu Moger * VMLOAD/VMSAVE state would also belong in this category, but KVM 766d00b99c5SBabu Moger * always performs VMLOAD and VMSAVE from the VMCB01. 767d00b99c5SBabu Moger */ 768d00b99c5SBabu Moger to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl; 769d00b99c5SBabu Moger } 770d00b99c5SBabu Moger 77163129754SPaolo Bonzini int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, 772e85d3e7bSMaxim Levitsky struct vmcb *vmcb12, bool from_vmrun) 773f241d711SPaolo Bonzini { 77463129754SPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu); 775a506fdd2SVitaly Kuznetsov int ret; 776a506fdd2SVitaly Kuznetsov 777954f419bSMaxim Levitsky trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, 778954f419bSMaxim Levitsky vmcb12->save.rip, 779954f419bSMaxim Levitsky vmcb12->control.int_ctl, 780954f419bSMaxim Levitsky vmcb12->control.event_inj, 781954f419bSMaxim Levitsky vmcb12->control.nested_ctl); 782954f419bSMaxim Levitsky 783954f419bSMaxim Levitsky trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff, 784954f419bSMaxim Levitsky vmcb12->control.intercepts[INTERCEPT_CR] >> 16, 785954f419bSMaxim Levitsky vmcb12->control.intercepts[INTERCEPT_EXCEPTION], 786954f419bSMaxim Levitsky vmcb12->control.intercepts[INTERCEPT_WORD3], 787954f419bSMaxim Levitsky vmcb12->control.intercepts[INTERCEPT_WORD4], 788954f419bSMaxim Levitsky vmcb12->control.intercepts[INTERCEPT_WORD5]); 789954f419bSMaxim Levitsky 790954f419bSMaxim Levitsky 7910dd16b5bSMaxim Levitsky svm->nested.vmcb12_gpa = vmcb12_gpa; 7924995a368SCathy Avery 7934995a368SCathy Avery WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr); 7944995a368SCathy Avery 795d00b99c5SBabu Moger nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr); 7964995a368SCathy Avery 7974995a368SCathy Avery svm_switch_vmcb(svm, &svm->nested.vmcb02); 79800f08d99SMaciej S. Szmigiero nested_vmcb02_prepare_control(svm, vmcb12->save.rip); 7999e8f0fbfSPaolo Bonzini nested_vmcb02_prepare_save(svm, vmcb12); 800f241d711SPaolo Bonzini 801355d0473SEmanuele Giuseppe Esposito ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3, 802e85d3e7bSMaxim Levitsky nested_npt_enabled(svm), from_vmrun); 803a506fdd2SVitaly Kuznetsov if (ret) 804a506fdd2SVitaly Kuznetsov return ret; 805a506fdd2SVitaly Kuznetsov 806e85d3e7bSMaxim Levitsky if (!from_vmrun) 807e85d3e7bSMaxim Levitsky kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 808e85d3e7bSMaxim Levitsky 809ffdf7f9eSPaolo Bonzini svm_set_gif(svm, true); 81059cd9bc5SVitaly Kuznetsov 811f44509f8SMaxim Levitsky if (kvm_vcpu_apicv_active(vcpu)) 812f44509f8SMaxim Levitsky kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 813f44509f8SMaxim Levitsky 81459cd9bc5SVitaly Kuznetsov return 0; 815883b0a91SJoerg Roedel } 816883b0a91SJoerg Roedel 81763129754SPaolo Bonzini int nested_svm_vmrun(struct kvm_vcpu *vcpu) 818883b0a91SJoerg Roedel { 81963129754SPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu); 820883b0a91SJoerg Roedel int ret; 8210dd16b5bSMaxim Levitsky struct vmcb *vmcb12; 822883b0a91SJoerg Roedel struct kvm_host_map map; 8230dd16b5bSMaxim Levitsky u64 vmcb12_gpa; 824db663af4SMaxim Levitsky struct vmcb *vmcb01 = svm->vmcb01.ptr; 825883b0a91SJoerg Roedel 826fb79f566SVitaly Kuznetsov if (!svm->nested.hsave_msr) { 827fb79f566SVitaly Kuznetsov kvm_inject_gp(vcpu, 0); 828fb79f566SVitaly Kuznetsov return 1; 829fb79f566SVitaly Kuznetsov } 830fb79f566SVitaly Kuznetsov 83163129754SPaolo Bonzini if (is_smm(vcpu)) { 83263129754SPaolo Bonzini kvm_queue_exception(vcpu, UD_VECTOR); 8337c67f546SPaolo Bonzini return 1; 8347c67f546SPaolo Bonzini } 835883b0a91SJoerg Roedel 8360dd16b5bSMaxim Levitsky vmcb12_gpa = svm->vmcb->save.rax; 83763129754SPaolo Bonzini ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map); 838883b0a91SJoerg Roedel if (ret == -EINVAL) { 83963129754SPaolo Bonzini kvm_inject_gp(vcpu, 0); 840883b0a91SJoerg Roedel return 1; 841883b0a91SJoerg Roedel } else if (ret) { 84263129754SPaolo Bonzini return kvm_skip_emulated_instruction(vcpu); 843883b0a91SJoerg Roedel } 844883b0a91SJoerg Roedel 84563129754SPaolo Bonzini ret = kvm_skip_emulated_instruction(vcpu); 846883b0a91SJoerg Roedel 8470dd16b5bSMaxim Levitsky vmcb12 = map.hva; 848883b0a91SJoerg Roedel 8492fcf4876SMaxim Levitsky if (WARN_ON_ONCE(!svm->nested.initialized)) 8502fcf4876SMaxim Levitsky return -EINVAL; 8512fcf4876SMaxim Levitsky 8527907160dSEmanuele Giuseppe Esposito nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); 853f2740a8dSEmanuele Giuseppe Esposito nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); 854cb9b6a1bSPaolo Bonzini 855b7a3d8b6SEmanuele Giuseppe Esposito if (!nested_vmcb_check_save(vcpu) || 856bd95926cSPaolo Bonzini !nested_vmcb_check_controls(vcpu)) { 8570dd16b5bSMaxim Levitsky vmcb12->control.exit_code = SVM_EXIT_ERR; 8580dd16b5bSMaxim Levitsky vmcb12->control.exit_code_hi = 0; 8590dd16b5bSMaxim Levitsky vmcb12->control.exit_info_1 = 0; 8600dd16b5bSMaxim Levitsky vmcb12->control.exit_info_2 = 0; 86169c9dfa2SPaolo Bonzini goto out; 862883b0a91SJoerg Roedel } 863883b0a91SJoerg Roedel 864883b0a91SJoerg Roedel /* 8654995a368SCathy Avery * Since vmcb01 is not in use, we can use it to store some of the L1 8664995a368SCathy Avery * state. 867883b0a91SJoerg Roedel */ 868db663af4SMaxim Levitsky vmcb01->save.efer = vcpu->arch.efer; 869db663af4SMaxim Levitsky vmcb01->save.cr0 = kvm_read_cr0(vcpu); 870db663af4SMaxim Levitsky vmcb01->save.cr4 = vcpu->arch.cr4; 871db663af4SMaxim Levitsky vmcb01->save.rflags = kvm_get_rflags(vcpu); 872db663af4SMaxim Levitsky vmcb01->save.rip = kvm_rip_read(vcpu); 873883b0a91SJoerg Roedel 8744995a368SCathy Avery if (!npt_enabled) 875db663af4SMaxim Levitsky vmcb01->save.cr3 = kvm_read_cr3(vcpu); 876883b0a91SJoerg Roedel 877f74f9414SPaolo Bonzini svm->nested.nested_run_pending = 1; 878883b0a91SJoerg Roedel 879e85d3e7bSMaxim Levitsky if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true)) 88059cd9bc5SVitaly Kuznetsov goto out_exit_err; 88159cd9bc5SVitaly Kuznetsov 88259cd9bc5SVitaly Kuznetsov if (nested_svm_vmrun_msrpm(svm)) 88359cd9bc5SVitaly Kuznetsov goto out; 88459cd9bc5SVitaly Kuznetsov 88559cd9bc5SVitaly Kuznetsov out_exit_err: 886ebdb3dbaSVitaly Kuznetsov svm->nested.nested_run_pending = 0; 887*159fc6faSMaciej S. Szmigiero svm->nmi_l1_to_l2 = false; 8886ef88d6eSSean Christopherson svm->soft_int_injected = false; 889ebdb3dbaSVitaly Kuznetsov 890883b0a91SJoerg Roedel svm->vmcb->control.exit_code = SVM_EXIT_ERR; 891883b0a91SJoerg Roedel svm->vmcb->control.exit_code_hi = 0; 892883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 = 0; 893883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = 0; 894883b0a91SJoerg Roedel 895883b0a91SJoerg Roedel nested_svm_vmexit(svm); 896883b0a91SJoerg Roedel 89769c9dfa2SPaolo Bonzini out: 89863129754SPaolo Bonzini kvm_vcpu_unmap(vcpu, &map, true); 89969c9dfa2SPaolo Bonzini 900883b0a91SJoerg Roedel return ret; 901883b0a91SJoerg Roedel } 902883b0a91SJoerg Roedel 9030a758290SVitaly Kuznetsov /* Copy state save area fields which are handled by VMRUN */ 9042bb16beaSVitaly Kuznetsov void svm_copy_vmrun_state(struct vmcb_save_area *to_save, 9052bb16beaSVitaly Kuznetsov struct vmcb_save_area *from_save) 9060a758290SVitaly Kuznetsov { 9070a758290SVitaly Kuznetsov to_save->es = from_save->es; 9080a758290SVitaly Kuznetsov to_save->cs = from_save->cs; 9090a758290SVitaly Kuznetsov to_save->ss = from_save->ss; 9100a758290SVitaly Kuznetsov to_save->ds = from_save->ds; 9110a758290SVitaly Kuznetsov to_save->gdtr = from_save->gdtr; 9120a758290SVitaly Kuznetsov to_save->idtr = from_save->idtr; 9130a758290SVitaly Kuznetsov to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED; 9140a758290SVitaly Kuznetsov to_save->efer = from_save->efer; 9150a758290SVitaly Kuznetsov to_save->cr0 = from_save->cr0; 9160a758290SVitaly Kuznetsov to_save->cr3 = from_save->cr3; 9170a758290SVitaly Kuznetsov to_save->cr4 = from_save->cr4; 9180a758290SVitaly Kuznetsov to_save->rax = from_save->rax; 9190a758290SVitaly Kuznetsov to_save->rsp = from_save->rsp; 9200a758290SVitaly Kuznetsov to_save->rip = from_save->rip; 9210a758290SVitaly Kuznetsov to_save->cpl = 0; 9220a758290SVitaly Kuznetsov } 9230a758290SVitaly Kuznetsov 9242bb16beaSVitaly Kuznetsov void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb) 925883b0a91SJoerg Roedel { 926883b0a91SJoerg Roedel to_vmcb->save.fs = from_vmcb->save.fs; 927883b0a91SJoerg Roedel to_vmcb->save.gs = from_vmcb->save.gs; 928883b0a91SJoerg Roedel to_vmcb->save.tr = from_vmcb->save.tr; 929883b0a91SJoerg Roedel to_vmcb->save.ldtr = from_vmcb->save.ldtr; 930883b0a91SJoerg Roedel to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; 931883b0a91SJoerg Roedel to_vmcb->save.star = from_vmcb->save.star; 932883b0a91SJoerg Roedel to_vmcb->save.lstar = from_vmcb->save.lstar; 933883b0a91SJoerg Roedel to_vmcb->save.cstar = from_vmcb->save.cstar; 934883b0a91SJoerg Roedel to_vmcb->save.sfmask = from_vmcb->save.sfmask; 935883b0a91SJoerg Roedel to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; 936883b0a91SJoerg Roedel to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; 937883b0a91SJoerg Roedel to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; 938883b0a91SJoerg Roedel } 939883b0a91SJoerg Roedel 940883b0a91SJoerg Roedel int nested_svm_vmexit(struct vcpu_svm *svm) 941883b0a91SJoerg Roedel { 94263129754SPaolo Bonzini struct kvm_vcpu *vcpu = &svm->vcpu; 943db663af4SMaxim Levitsky struct vmcb *vmcb01 = svm->vmcb01.ptr; 944db663af4SMaxim Levitsky struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; 9450dd16b5bSMaxim Levitsky struct vmcb *vmcb12; 946883b0a91SJoerg Roedel struct kvm_host_map map; 94763129754SPaolo Bonzini int rc; 948883b0a91SJoerg Roedel 94963129754SPaolo Bonzini rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); 950883b0a91SJoerg Roedel if (rc) { 951883b0a91SJoerg Roedel if (rc == -EINVAL) 95263129754SPaolo Bonzini kvm_inject_gp(vcpu, 0); 953883b0a91SJoerg Roedel return 1; 954883b0a91SJoerg Roedel } 955883b0a91SJoerg Roedel 9560dd16b5bSMaxim Levitsky vmcb12 = map.hva; 957883b0a91SJoerg Roedel 958883b0a91SJoerg Roedel /* Exit Guest-Mode */ 95963129754SPaolo Bonzini leave_guest_mode(vcpu); 9600dd16b5bSMaxim Levitsky svm->nested.vmcb12_gpa = 0; 9612d8a42beSPaolo Bonzini WARN_ON_ONCE(svm->nested.nested_run_pending); 962883b0a91SJoerg Roedel 96363129754SPaolo Bonzini kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 964f2c7ef3bSMaxim Levitsky 96538c0b192SPaolo Bonzini /* in case we halted in L2 */ 96638c0b192SPaolo Bonzini svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; 96738c0b192SPaolo Bonzini 968883b0a91SJoerg Roedel /* Give the current vmcb to the guest */ 969883b0a91SJoerg Roedel 970db663af4SMaxim Levitsky vmcb12->save.es = vmcb02->save.es; 971db663af4SMaxim Levitsky vmcb12->save.cs = vmcb02->save.cs; 972db663af4SMaxim Levitsky vmcb12->save.ss = vmcb02->save.ss; 973db663af4SMaxim Levitsky vmcb12->save.ds = vmcb02->save.ds; 974db663af4SMaxim Levitsky vmcb12->save.gdtr = vmcb02->save.gdtr; 975db663af4SMaxim Levitsky vmcb12->save.idtr = vmcb02->save.idtr; 9760dd16b5bSMaxim Levitsky vmcb12->save.efer = svm->vcpu.arch.efer; 97763129754SPaolo Bonzini vmcb12->save.cr0 = kvm_read_cr0(vcpu); 97863129754SPaolo Bonzini vmcb12->save.cr3 = kvm_read_cr3(vcpu); 979db663af4SMaxim Levitsky vmcb12->save.cr2 = vmcb02->save.cr2; 9800dd16b5bSMaxim Levitsky vmcb12->save.cr4 = svm->vcpu.arch.cr4; 98163129754SPaolo Bonzini vmcb12->save.rflags = kvm_get_rflags(vcpu); 98263129754SPaolo Bonzini vmcb12->save.rip = kvm_rip_read(vcpu); 98363129754SPaolo Bonzini vmcb12->save.rsp = kvm_rsp_read(vcpu); 98463129754SPaolo Bonzini vmcb12->save.rax = kvm_rax_read(vcpu); 985db663af4SMaxim Levitsky vmcb12->save.dr7 = vmcb02->save.dr7; 9860dd16b5bSMaxim Levitsky vmcb12->save.dr6 = svm->vcpu.arch.dr6; 987db663af4SMaxim Levitsky vmcb12->save.cpl = vmcb02->save.cpl; 988883b0a91SJoerg Roedel 989db663af4SMaxim Levitsky vmcb12->control.int_state = vmcb02->control.int_state; 990db663af4SMaxim Levitsky vmcb12->control.exit_code = vmcb02->control.exit_code; 991db663af4SMaxim Levitsky vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi; 992db663af4SMaxim Levitsky vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1; 993db663af4SMaxim Levitsky vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2; 99436e2e983SPaolo Bonzini 9950dd16b5bSMaxim Levitsky if (vmcb12->control.exit_code != SVM_EXIT_ERR) 9969e8f0fbfSPaolo Bonzini nested_save_pending_event_to_vmcb12(svm, vmcb12); 997883b0a91SJoerg Roedel 998883b0a91SJoerg Roedel if (svm->nrips_enabled) 999db663af4SMaxim Levitsky vmcb12->control.next_rip = vmcb02->control.next_rip; 1000883b0a91SJoerg Roedel 10010dd16b5bSMaxim Levitsky vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; 10020dd16b5bSMaxim Levitsky vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl; 10030dd16b5bSMaxim Levitsky vmcb12->control.event_inj = svm->nested.ctl.event_inj; 10040dd16b5bSMaxim Levitsky vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; 1005883b0a91SJoerg Roedel 100674fd41edSMaxim Levitsky if (!kvm_pause_in_guest(vcpu->kvm) && vmcb02->control.pause_filter_count) 100774fd41edSMaxim Levitsky vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count; 100874fd41edSMaxim Levitsky 1009d00b99c5SBabu Moger nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr); 1010d00b99c5SBabu Moger 10114995a368SCathy Avery svm_switch_vmcb(svm, &svm->vmcb01); 10124995a368SCathy Avery 1013d20c796cSMaxim Levitsky if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { 1014d20c796cSMaxim Levitsky svm_copy_lbrs(vmcb12, vmcb02); 1015d20c796cSMaxim Levitsky svm_update_lbrv(vcpu); 1016d20c796cSMaxim Levitsky } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) { 10171d5a1b58SMaxim Levitsky svm_copy_lbrs(vmcb01, vmcb02); 10181d5a1b58SMaxim Levitsky svm_update_lbrv(vcpu); 10191d5a1b58SMaxim Levitsky } 10201d5a1b58SMaxim Levitsky 10214995a368SCathy Avery /* 10224995a368SCathy Avery * On vmexit the GIF is set to false and 10234995a368SCathy Avery * no event can be injected in L1. 10244995a368SCathy Avery */ 10259883764aSMaxim Levitsky svm_set_gif(svm, false); 1026db663af4SMaxim Levitsky vmcb01->control.exit_int_info = 0; 10279883764aSMaxim Levitsky 10287ca62d13SPaolo Bonzini svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; 1029db663af4SMaxim Levitsky if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) { 1030db663af4SMaxim Levitsky vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset; 1031db663af4SMaxim Levitsky vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS); 10327ca62d13SPaolo Bonzini } 103318fc6c55SPaolo Bonzini 10345228eb96SMaxim Levitsky if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) { 10355228eb96SMaxim Levitsky WARN_ON(!svm->tsc_scaling_enabled); 10365228eb96SMaxim Levitsky vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; 103711d39e8cSMaxim Levitsky __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); 10385228eb96SMaxim Levitsky } 10395228eb96SMaxim Levitsky 1040e670bf68SPaolo Bonzini svm->nested.ctl.nested_cr3 = 0; 1041883b0a91SJoerg Roedel 10424995a368SCathy Avery /* 10434995a368SCathy Avery * Restore processor state that had been saved in vmcb01 10444995a368SCathy Avery */ 1045db663af4SMaxim Levitsky kvm_set_rflags(vcpu, vmcb01->save.rflags); 1046db663af4SMaxim Levitsky svm_set_efer(vcpu, vmcb01->save.efer); 1047db663af4SMaxim Levitsky svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE); 1048db663af4SMaxim Levitsky svm_set_cr4(vcpu, vmcb01->save.cr4); 1049db663af4SMaxim Levitsky kvm_rax_write(vcpu, vmcb01->save.rax); 1050db663af4SMaxim Levitsky kvm_rsp_write(vcpu, vmcb01->save.rsp); 1051db663af4SMaxim Levitsky kvm_rip_write(vcpu, vmcb01->save.rip); 10524995a368SCathy Avery 10534995a368SCathy Avery svm->vcpu.arch.dr7 = DR7_FIXED_1; 10544995a368SCathy Avery kvm_update_dr7(&svm->vcpu); 1055883b0a91SJoerg Roedel 10560dd16b5bSMaxim Levitsky trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code, 10570dd16b5bSMaxim Levitsky vmcb12->control.exit_info_1, 10580dd16b5bSMaxim Levitsky vmcb12->control.exit_info_2, 10590dd16b5bSMaxim Levitsky vmcb12->control.exit_int_info, 10600dd16b5bSMaxim Levitsky vmcb12->control.exit_int_info_err, 106136e2e983SPaolo Bonzini KVM_ISA_SVM); 106236e2e983SPaolo Bonzini 106363129754SPaolo Bonzini kvm_vcpu_unmap(vcpu, &map, true); 1064883b0a91SJoerg Roedel 1065d2e56019SSean Christopherson nested_svm_transition_tlb_flush(vcpu); 1066d2e56019SSean Christopherson 106763129754SPaolo Bonzini nested_svm_uninit_mmu_context(vcpu); 1068bf7dea42SVitaly Kuznetsov 1069db663af4SMaxim Levitsky rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true); 1070d82aaef9SVitaly Kuznetsov if (rc) 1071d82aaef9SVitaly Kuznetsov return 1; 1072bf7dea42SVitaly Kuznetsov 1073883b0a91SJoerg Roedel /* 1074883b0a91SJoerg Roedel * Drop what we picked up for L2 via svm_complete_interrupts() so it 1075883b0a91SJoerg Roedel * doesn't end up in L1. 1076883b0a91SJoerg Roedel */ 1077883b0a91SJoerg Roedel svm->vcpu.arch.nmi_injected = false; 107863129754SPaolo Bonzini kvm_clear_exception_queue(vcpu); 107963129754SPaolo Bonzini kvm_clear_interrupt_queue(vcpu); 1080883b0a91SJoerg Roedel 10819a7de6ecSKrish Sadhukhan /* 10829a7de6ecSKrish Sadhukhan * If we are here following the completion of a VMRUN that 10839a7de6ecSKrish Sadhukhan * is being single-stepped, queue the pending #DB intercept 10849a7de6ecSKrish Sadhukhan * right now so that it an be accounted for before we execute 10859a7de6ecSKrish Sadhukhan * L1's next instruction. 10869a7de6ecSKrish Sadhukhan */ 1087db663af4SMaxim Levitsky if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF)) 10889a7de6ecSKrish Sadhukhan kvm_queue_exception(&(svm->vcpu), DB_VECTOR); 10899a7de6ecSKrish Sadhukhan 1090f44509f8SMaxim Levitsky /* 1091f44509f8SMaxim Levitsky * Un-inhibit the AVIC right away, so that other vCPUs can start 1092f44509f8SMaxim Levitsky * to benefit from it right away. 1093f44509f8SMaxim Levitsky */ 1094f44509f8SMaxim Levitsky if (kvm_apicv_activated(vcpu->kvm)) 1095f44509f8SMaxim Levitsky kvm_vcpu_update_apicv(vcpu); 1096f44509f8SMaxim Levitsky 1097883b0a91SJoerg Roedel return 0; 1098883b0a91SJoerg Roedel } 1099883b0a91SJoerg Roedel 1100cb6a32c2SSean Christopherson static void nested_svm_triple_fault(struct kvm_vcpu *vcpu) 1101cb6a32c2SSean Christopherson { 11023a87c7e0SSean Christopherson nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN); 1103cb6a32c2SSean Christopherson } 1104cb6a32c2SSean Christopherson 11052fcf4876SMaxim Levitsky int svm_allocate_nested(struct vcpu_svm *svm) 11062fcf4876SMaxim Levitsky { 11074995a368SCathy Avery struct page *vmcb02_page; 11082fcf4876SMaxim Levitsky 11092fcf4876SMaxim Levitsky if (svm->nested.initialized) 11102fcf4876SMaxim Levitsky return 0; 11112fcf4876SMaxim Levitsky 11124995a368SCathy Avery vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 11134995a368SCathy Avery if (!vmcb02_page) 11142fcf4876SMaxim Levitsky return -ENOMEM; 11154995a368SCathy Avery svm->nested.vmcb02.ptr = page_address(vmcb02_page); 11164995a368SCathy Avery svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT); 11172fcf4876SMaxim Levitsky 11182fcf4876SMaxim Levitsky svm->nested.msrpm = svm_vcpu_alloc_msrpm(); 11192fcf4876SMaxim Levitsky if (!svm->nested.msrpm) 11204995a368SCathy Avery goto err_free_vmcb02; 11212fcf4876SMaxim Levitsky svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm); 11222fcf4876SMaxim Levitsky 11232fcf4876SMaxim Levitsky svm->nested.initialized = true; 11242fcf4876SMaxim Levitsky return 0; 11252fcf4876SMaxim Levitsky 11264995a368SCathy Avery err_free_vmcb02: 11274995a368SCathy Avery __free_page(vmcb02_page); 11282fcf4876SMaxim Levitsky return -ENOMEM; 11292fcf4876SMaxim Levitsky } 11302fcf4876SMaxim Levitsky 11312fcf4876SMaxim Levitsky void svm_free_nested(struct vcpu_svm *svm) 11322fcf4876SMaxim Levitsky { 11332fcf4876SMaxim Levitsky if (!svm->nested.initialized) 11342fcf4876SMaxim Levitsky return; 11352fcf4876SMaxim Levitsky 11362fcf4876SMaxim Levitsky svm_vcpu_free_msrpm(svm->nested.msrpm); 11372fcf4876SMaxim Levitsky svm->nested.msrpm = NULL; 11382fcf4876SMaxim Levitsky 11394995a368SCathy Avery __free_page(virt_to_page(svm->nested.vmcb02.ptr)); 11404995a368SCathy Avery svm->nested.vmcb02.ptr = NULL; 11412fcf4876SMaxim Levitsky 1142c74ad08fSMaxim Levitsky /* 1143c74ad08fSMaxim Levitsky * When last_vmcb12_gpa matches the current vmcb12 gpa, 1144c74ad08fSMaxim Levitsky * some vmcb12 fields are not loaded if they are marked clean 1145c74ad08fSMaxim Levitsky * in the vmcb12, since in this case they are up to date already. 1146c74ad08fSMaxim Levitsky * 1147c74ad08fSMaxim Levitsky * When the vmcb02 is freed, this optimization becomes invalid. 1148c74ad08fSMaxim Levitsky */ 1149c74ad08fSMaxim Levitsky svm->nested.last_vmcb12_gpa = INVALID_GPA; 1150c74ad08fSMaxim Levitsky 11512fcf4876SMaxim Levitsky svm->nested.initialized = false; 11522fcf4876SMaxim Levitsky } 11532fcf4876SMaxim Levitsky 1154c513f484SPaolo Bonzini /* 1155c513f484SPaolo Bonzini * Forcibly leave nested mode in order to be able to reset the VCPU later on. 1156c513f484SPaolo Bonzini */ 1157f7e57078SSean Christopherson void svm_leave_nested(struct kvm_vcpu *vcpu) 1158c513f484SPaolo Bonzini { 1159f7e57078SSean Christopherson struct vcpu_svm *svm = to_svm(vcpu); 116063129754SPaolo Bonzini 116163129754SPaolo Bonzini if (is_guest_mode(vcpu)) { 1162c513f484SPaolo Bonzini svm->nested.nested_run_pending = 0; 1163c74ad08fSMaxim Levitsky svm->nested.vmcb12_gpa = INVALID_GPA; 1164c74ad08fSMaxim Levitsky 116563129754SPaolo Bonzini leave_guest_mode(vcpu); 11664995a368SCathy Avery 1167deee59baSMaxim Levitsky svm_switch_vmcb(svm, &svm->vmcb01); 11684995a368SCathy Avery 116963129754SPaolo Bonzini nested_svm_uninit_mmu_context(vcpu); 117056fe28deSMaxim Levitsky vmcb_mark_all_dirty(svm->vmcb); 1171c513f484SPaolo Bonzini } 1172a7d5c7ceSPaolo Bonzini 117363129754SPaolo Bonzini kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 1174c513f484SPaolo Bonzini } 1175c513f484SPaolo Bonzini 1176883b0a91SJoerg Roedel static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) 1177883b0a91SJoerg Roedel { 1178883b0a91SJoerg Roedel u32 offset, msr, value; 1179883b0a91SJoerg Roedel int write, mask; 1180883b0a91SJoerg Roedel 11818fc78909SEmanuele Giuseppe Esposito if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) 1182883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 1183883b0a91SJoerg Roedel 1184883b0a91SJoerg Roedel msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 1185883b0a91SJoerg Roedel offset = svm_msrpm_offset(msr); 1186883b0a91SJoerg Roedel write = svm->vmcb->control.exit_info_1 & 1; 1187883b0a91SJoerg Roedel mask = 1 << ((2 * (msr & 0xf)) + write); 1188883b0a91SJoerg Roedel 1189883b0a91SJoerg Roedel if (offset == MSR_INVALID) 1190883b0a91SJoerg Roedel return NESTED_EXIT_DONE; 1191883b0a91SJoerg Roedel 1192883b0a91SJoerg Roedel /* Offset is in 32 bit units but need in 8 bit units */ 1193883b0a91SJoerg Roedel offset *= 4; 1194883b0a91SJoerg Roedel 1195e670bf68SPaolo Bonzini if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) 1196883b0a91SJoerg Roedel return NESTED_EXIT_DONE; 1197883b0a91SJoerg Roedel 1198883b0a91SJoerg Roedel return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 1199883b0a91SJoerg Roedel } 1200883b0a91SJoerg Roedel 1201883b0a91SJoerg Roedel static int nested_svm_intercept_ioio(struct vcpu_svm *svm) 1202883b0a91SJoerg Roedel { 1203883b0a91SJoerg Roedel unsigned port, size, iopm_len; 1204883b0a91SJoerg Roedel u16 val, mask; 1205883b0a91SJoerg Roedel u8 start_bit; 1206883b0a91SJoerg Roedel u64 gpa; 1207883b0a91SJoerg Roedel 12088fc78909SEmanuele Giuseppe Esposito if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) 1209883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 1210883b0a91SJoerg Roedel 1211883b0a91SJoerg Roedel port = svm->vmcb->control.exit_info_1 >> 16; 1212883b0a91SJoerg Roedel size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> 1213883b0a91SJoerg Roedel SVM_IOIO_SIZE_SHIFT; 1214e670bf68SPaolo Bonzini gpa = svm->nested.ctl.iopm_base_pa + (port / 8); 1215883b0a91SJoerg Roedel start_bit = port % 8; 1216883b0a91SJoerg Roedel iopm_len = (start_bit + size > 8) ? 2 : 1; 1217883b0a91SJoerg Roedel mask = (0xf >> (4 - size)) << start_bit; 1218883b0a91SJoerg Roedel val = 0; 1219883b0a91SJoerg Roedel 1220883b0a91SJoerg Roedel if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) 1221883b0a91SJoerg Roedel return NESTED_EXIT_DONE; 1222883b0a91SJoerg Roedel 1223883b0a91SJoerg Roedel return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 1224883b0a91SJoerg Roedel } 1225883b0a91SJoerg Roedel 1226883b0a91SJoerg Roedel static int nested_svm_intercept(struct vcpu_svm *svm) 1227883b0a91SJoerg Roedel { 1228883b0a91SJoerg Roedel u32 exit_code = svm->vmcb->control.exit_code; 1229883b0a91SJoerg Roedel int vmexit = NESTED_EXIT_HOST; 1230883b0a91SJoerg Roedel 1231883b0a91SJoerg Roedel switch (exit_code) { 1232883b0a91SJoerg Roedel case SVM_EXIT_MSR: 1233883b0a91SJoerg Roedel vmexit = nested_svm_exit_handled_msr(svm); 1234883b0a91SJoerg Roedel break; 1235883b0a91SJoerg Roedel case SVM_EXIT_IOIO: 1236883b0a91SJoerg Roedel vmexit = nested_svm_intercept_ioio(svm); 1237883b0a91SJoerg Roedel break; 1238883b0a91SJoerg Roedel case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { 12398fc78909SEmanuele Giuseppe Esposito if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) 1240883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 1241883b0a91SJoerg Roedel break; 1242883b0a91SJoerg Roedel } 1243883b0a91SJoerg Roedel case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { 12448fc78909SEmanuele Giuseppe Esposito if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) 1245883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 1246883b0a91SJoerg Roedel break; 1247883b0a91SJoerg Roedel } 1248883b0a91SJoerg Roedel case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 12497c86663bSPaolo Bonzini /* 12507c86663bSPaolo Bonzini * Host-intercepted exceptions have been checked already in 12517c86663bSPaolo Bonzini * nested_svm_exit_special. There is nothing to do here, 12527c86663bSPaolo Bonzini * the vmexit is injected by svm_check_nested_events. 12537c86663bSPaolo Bonzini */ 1254883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 1255883b0a91SJoerg Roedel break; 1256883b0a91SJoerg Roedel } 1257883b0a91SJoerg Roedel case SVM_EXIT_ERR: { 1258883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 1259883b0a91SJoerg Roedel break; 1260883b0a91SJoerg Roedel } 1261883b0a91SJoerg Roedel default: { 12628fc78909SEmanuele Giuseppe Esposito if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) 1263883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 1264883b0a91SJoerg Roedel } 1265883b0a91SJoerg Roedel } 1266883b0a91SJoerg Roedel 1267883b0a91SJoerg Roedel return vmexit; 1268883b0a91SJoerg Roedel } 1269883b0a91SJoerg Roedel 1270883b0a91SJoerg Roedel int nested_svm_exit_handled(struct vcpu_svm *svm) 1271883b0a91SJoerg Roedel { 1272883b0a91SJoerg Roedel int vmexit; 1273883b0a91SJoerg Roedel 1274883b0a91SJoerg Roedel vmexit = nested_svm_intercept(svm); 1275883b0a91SJoerg Roedel 1276883b0a91SJoerg Roedel if (vmexit == NESTED_EXIT_DONE) 1277883b0a91SJoerg Roedel nested_svm_vmexit(svm); 1278883b0a91SJoerg Roedel 1279883b0a91SJoerg Roedel return vmexit; 1280883b0a91SJoerg Roedel } 1281883b0a91SJoerg Roedel 128263129754SPaolo Bonzini int nested_svm_check_permissions(struct kvm_vcpu *vcpu) 1283883b0a91SJoerg Roedel { 128463129754SPaolo Bonzini if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) { 128563129754SPaolo Bonzini kvm_queue_exception(vcpu, UD_VECTOR); 1286883b0a91SJoerg Roedel return 1; 1287883b0a91SJoerg Roedel } 1288883b0a91SJoerg Roedel 128963129754SPaolo Bonzini if (to_svm(vcpu)->vmcb->save.cpl) { 129063129754SPaolo Bonzini kvm_inject_gp(vcpu, 0); 1291883b0a91SJoerg Roedel return 1; 1292883b0a91SJoerg Roedel } 1293883b0a91SJoerg Roedel 1294883b0a91SJoerg Roedel return 0; 1295883b0a91SJoerg Roedel } 1296883b0a91SJoerg Roedel 12977c86663bSPaolo Bonzini static bool nested_exit_on_exception(struct vcpu_svm *svm) 1298883b0a91SJoerg Roedel { 12997c86663bSPaolo Bonzini unsigned int nr = svm->vcpu.arch.exception.nr; 1300883b0a91SJoerg Roedel 13019780d51dSBabu Moger return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr)); 13027c86663bSPaolo Bonzini } 1303883b0a91SJoerg Roedel 13047c86663bSPaolo Bonzini static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) 13057c86663bSPaolo Bonzini { 13067c86663bSPaolo Bonzini unsigned int nr = svm->vcpu.arch.exception.nr; 1307db663af4SMaxim Levitsky struct vmcb *vmcb = svm->vmcb; 1308883b0a91SJoerg Roedel 1309db663af4SMaxim Levitsky vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; 1310db663af4SMaxim Levitsky vmcb->control.exit_code_hi = 0; 13117c86663bSPaolo Bonzini 13127c86663bSPaolo Bonzini if (svm->vcpu.arch.exception.has_error_code) 1313db663af4SMaxim Levitsky vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; 1314883b0a91SJoerg Roedel 1315883b0a91SJoerg Roedel /* 1316883b0a91SJoerg Roedel * EXITINFO2 is undefined for all exception intercepts other 1317883b0a91SJoerg Roedel * than #PF. 1318883b0a91SJoerg Roedel */ 13197c86663bSPaolo Bonzini if (nr == PF_VECTOR) { 1320883b0a91SJoerg Roedel if (svm->vcpu.arch.exception.nested_apf) 1321db663af4SMaxim Levitsky vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; 1322883b0a91SJoerg Roedel else if (svm->vcpu.arch.exception.has_payload) 1323db663af4SMaxim Levitsky vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; 1324883b0a91SJoerg Roedel else 1325db663af4SMaxim Levitsky vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; 13267c86663bSPaolo Bonzini } else if (nr == DB_VECTOR) { 13277c86663bSPaolo Bonzini /* See inject_pending_event. */ 13287c86663bSPaolo Bonzini kvm_deliver_exception_payload(&svm->vcpu); 13297c86663bSPaolo Bonzini if (svm->vcpu.arch.dr7 & DR7_GD) { 13307c86663bSPaolo Bonzini svm->vcpu.arch.dr7 &= ~DR7_GD; 13317c86663bSPaolo Bonzini kvm_update_dr7(&svm->vcpu); 13327c86663bSPaolo Bonzini } 13337c86663bSPaolo Bonzini } else 13347c86663bSPaolo Bonzini WARN_ON(svm->vcpu.arch.exception.has_payload); 1335883b0a91SJoerg Roedel 13367c86663bSPaolo Bonzini nested_svm_vmexit(svm); 1337883b0a91SJoerg Roedel } 1338883b0a91SJoerg Roedel 13395b672408SPaolo Bonzini static inline bool nested_exit_on_init(struct vcpu_svm *svm) 13405b672408SPaolo Bonzini { 13418fc78909SEmanuele Giuseppe Esposito return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); 13425b672408SPaolo Bonzini } 13435b672408SPaolo Bonzini 134433b22172SPaolo Bonzini static int svm_check_nested_events(struct kvm_vcpu *vcpu) 1345883b0a91SJoerg Roedel { 1346883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 1347883b0a91SJoerg Roedel bool block_nested_events = 1348bd279629SPaolo Bonzini kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; 13495b672408SPaolo Bonzini struct kvm_lapic *apic = vcpu->arch.apic; 13505b672408SPaolo Bonzini 13515b672408SPaolo Bonzini if (lapic_in_kernel(vcpu) && 13525b672408SPaolo Bonzini test_bit(KVM_APIC_INIT, &apic->pending_events)) { 13535b672408SPaolo Bonzini if (block_nested_events) 13545b672408SPaolo Bonzini return -EBUSY; 13555b672408SPaolo Bonzini if (!nested_exit_on_init(svm)) 13565b672408SPaolo Bonzini return 0; 13573a87c7e0SSean Christopherson nested_svm_simple_vmexit(svm, SVM_EXIT_INIT); 13585b672408SPaolo Bonzini return 0; 13595b672408SPaolo Bonzini } 1360883b0a91SJoerg Roedel 13617c86663bSPaolo Bonzini if (vcpu->arch.exception.pending) { 13624020da3bSMaxim Levitsky /* 13634020da3bSMaxim Levitsky * Only a pending nested run can block a pending exception. 13644020da3bSMaxim Levitsky * Otherwise an injected NMI/interrupt should either be 13654020da3bSMaxim Levitsky * lost or delivered to the nested hypervisor in the EXITINTINFO 13664020da3bSMaxim Levitsky * vmcb field, while delivering the pending exception. 13674020da3bSMaxim Levitsky */ 13684020da3bSMaxim Levitsky if (svm->nested.nested_run_pending) 13697c86663bSPaolo Bonzini return -EBUSY; 13707c86663bSPaolo Bonzini if (!nested_exit_on_exception(svm)) 13717c86663bSPaolo Bonzini return 0; 13727c86663bSPaolo Bonzini nested_svm_inject_exception_vmexit(svm); 13737c86663bSPaolo Bonzini return 0; 13747c86663bSPaolo Bonzini } 13757c86663bSPaolo Bonzini 1376221e7610SPaolo Bonzini if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { 137755714cddSPaolo Bonzini if (block_nested_events) 137855714cddSPaolo Bonzini return -EBUSY; 1379221e7610SPaolo Bonzini if (!nested_exit_on_smi(svm)) 1380221e7610SPaolo Bonzini return 0; 13813a87c7e0SSean Christopherson nested_svm_simple_vmexit(svm, SVM_EXIT_SMI); 138255714cddSPaolo Bonzini return 0; 138355714cddSPaolo Bonzini } 138455714cddSPaolo Bonzini 1385221e7610SPaolo Bonzini if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { 13869c3d370aSCathy Avery if (block_nested_events) 13879c3d370aSCathy Avery return -EBUSY; 1388221e7610SPaolo Bonzini if (!nested_exit_on_nmi(svm)) 1389221e7610SPaolo Bonzini return 0; 13903a87c7e0SSean Christopherson nested_svm_simple_vmexit(svm, SVM_EXIT_NMI); 13919c3d370aSCathy Avery return 0; 13929c3d370aSCathy Avery } 13939c3d370aSCathy Avery 1394221e7610SPaolo Bonzini if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) { 1395883b0a91SJoerg Roedel if (block_nested_events) 1396883b0a91SJoerg Roedel return -EBUSY; 1397221e7610SPaolo Bonzini if (!nested_exit_on_intr(svm)) 1398221e7610SPaolo Bonzini return 0; 13993a87c7e0SSean Christopherson trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); 14003a87c7e0SSean Christopherson nested_svm_simple_vmexit(svm, SVM_EXIT_INTR); 1401883b0a91SJoerg Roedel return 0; 1402883b0a91SJoerg Roedel } 1403883b0a91SJoerg Roedel 1404883b0a91SJoerg Roedel return 0; 1405883b0a91SJoerg Roedel } 1406883b0a91SJoerg Roedel 1407883b0a91SJoerg Roedel int nested_svm_exit_special(struct vcpu_svm *svm) 1408883b0a91SJoerg Roedel { 1409883b0a91SJoerg Roedel u32 exit_code = svm->vmcb->control.exit_code; 1410883b0a91SJoerg Roedel 1411883b0a91SJoerg Roedel switch (exit_code) { 1412883b0a91SJoerg Roedel case SVM_EXIT_INTR: 1413883b0a91SJoerg Roedel case SVM_EXIT_NMI: 1414883b0a91SJoerg Roedel case SVM_EXIT_NPF: 1415883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 14167c86663bSPaolo Bonzini case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 14177c86663bSPaolo Bonzini u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); 14187c86663bSPaolo Bonzini 14194995a368SCathy Avery if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] & 14209780d51dSBabu Moger excp_bits) 14217c86663bSPaolo Bonzini return NESTED_EXIT_HOST; 14227c86663bSPaolo Bonzini else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && 142368fd66f1SVitaly Kuznetsov svm->vcpu.arch.apf.host_apf_flags) 1424a3535be7SPaolo Bonzini /* Trap async PF even if not shadowing */ 1425883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 1426883b0a91SJoerg Roedel break; 14277c86663bSPaolo Bonzini } 1428883b0a91SJoerg Roedel default: 1429883b0a91SJoerg Roedel break; 1430883b0a91SJoerg Roedel } 1431883b0a91SJoerg Roedel 1432883b0a91SJoerg Roedel return NESTED_EXIT_CONTINUE; 1433883b0a91SJoerg Roedel } 143433b22172SPaolo Bonzini 14355228eb96SMaxim Levitsky void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu) 14365228eb96SMaxim Levitsky { 14375228eb96SMaxim Levitsky struct vcpu_svm *svm = to_svm(vcpu); 14385228eb96SMaxim Levitsky 14395228eb96SMaxim Levitsky vcpu->arch.tsc_scaling_ratio = 14405228eb96SMaxim Levitsky kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio, 14415228eb96SMaxim Levitsky svm->tsc_ratio_msr); 144211d39e8cSMaxim Levitsky __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); 14435228eb96SMaxim Levitsky } 14445228eb96SMaxim Levitsky 14458fc78909SEmanuele Giuseppe Esposito /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */ 14468fc78909SEmanuele Giuseppe Esposito static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst, 14478fc78909SEmanuele Giuseppe Esposito struct vmcb_ctrl_area_cached *from) 14488fc78909SEmanuele Giuseppe Esposito { 14498fc78909SEmanuele Giuseppe Esposito unsigned int i; 14508fc78909SEmanuele Giuseppe Esposito 14518fc78909SEmanuele Giuseppe Esposito memset(dst, 0, sizeof(struct vmcb_control_area)); 14528fc78909SEmanuele Giuseppe Esposito 14538fc78909SEmanuele Giuseppe Esposito for (i = 0; i < MAX_INTERCEPT; i++) 14548fc78909SEmanuele Giuseppe Esposito dst->intercepts[i] = from->intercepts[i]; 14558fc78909SEmanuele Giuseppe Esposito 14568fc78909SEmanuele Giuseppe Esposito dst->iopm_base_pa = from->iopm_base_pa; 14578fc78909SEmanuele Giuseppe Esposito dst->msrpm_base_pa = from->msrpm_base_pa; 14588fc78909SEmanuele Giuseppe Esposito dst->tsc_offset = from->tsc_offset; 14598fc78909SEmanuele Giuseppe Esposito dst->asid = from->asid; 14608fc78909SEmanuele Giuseppe Esposito dst->tlb_ctl = from->tlb_ctl; 14618fc78909SEmanuele Giuseppe Esposito dst->int_ctl = from->int_ctl; 14628fc78909SEmanuele Giuseppe Esposito dst->int_vector = from->int_vector; 14638fc78909SEmanuele Giuseppe Esposito dst->int_state = from->int_state; 14648fc78909SEmanuele Giuseppe Esposito dst->exit_code = from->exit_code; 14658fc78909SEmanuele Giuseppe Esposito dst->exit_code_hi = from->exit_code_hi; 14668fc78909SEmanuele Giuseppe Esposito dst->exit_info_1 = from->exit_info_1; 14678fc78909SEmanuele Giuseppe Esposito dst->exit_info_2 = from->exit_info_2; 14688fc78909SEmanuele Giuseppe Esposito dst->exit_int_info = from->exit_int_info; 14698fc78909SEmanuele Giuseppe Esposito dst->exit_int_info_err = from->exit_int_info_err; 14708fc78909SEmanuele Giuseppe Esposito dst->nested_ctl = from->nested_ctl; 14718fc78909SEmanuele Giuseppe Esposito dst->event_inj = from->event_inj; 14728fc78909SEmanuele Giuseppe Esposito dst->event_inj_err = from->event_inj_err; 147300f08d99SMaciej S. Szmigiero dst->next_rip = from->next_rip; 14748fc78909SEmanuele Giuseppe Esposito dst->nested_cr3 = from->nested_cr3; 14758fc78909SEmanuele Giuseppe Esposito dst->virt_ext = from->virt_ext; 14768fc78909SEmanuele Giuseppe Esposito dst->pause_filter_count = from->pause_filter_count; 14778fc78909SEmanuele Giuseppe Esposito dst->pause_filter_thresh = from->pause_filter_thresh; 147866c03a92SVitaly Kuznetsov /* 'clean' and 'reserved_sw' are not changed by KVM */ 14798fc78909SEmanuele Giuseppe Esposito } 14808fc78909SEmanuele Giuseppe Esposito 1481cc440cdaSPaolo Bonzini static int svm_get_nested_state(struct kvm_vcpu *vcpu, 1482cc440cdaSPaolo Bonzini struct kvm_nested_state __user *user_kvm_nested_state, 1483cc440cdaSPaolo Bonzini u32 user_data_size) 1484cc440cdaSPaolo Bonzini { 1485cc440cdaSPaolo Bonzini struct vcpu_svm *svm; 14868fc78909SEmanuele Giuseppe Esposito struct vmcb_control_area *ctl; 14878fc78909SEmanuele Giuseppe Esposito unsigned long r; 1488cc440cdaSPaolo Bonzini struct kvm_nested_state kvm_state = { 1489cc440cdaSPaolo Bonzini .flags = 0, 1490cc440cdaSPaolo Bonzini .format = KVM_STATE_NESTED_FORMAT_SVM, 1491cc440cdaSPaolo Bonzini .size = sizeof(kvm_state), 1492cc440cdaSPaolo Bonzini }; 1493cc440cdaSPaolo Bonzini struct vmcb __user *user_vmcb = (struct vmcb __user *) 1494cc440cdaSPaolo Bonzini &user_kvm_nested_state->data.svm[0]; 1495cc440cdaSPaolo Bonzini 1496cc440cdaSPaolo Bonzini if (!vcpu) 1497cc440cdaSPaolo Bonzini return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE; 1498cc440cdaSPaolo Bonzini 1499cc440cdaSPaolo Bonzini svm = to_svm(vcpu); 1500cc440cdaSPaolo Bonzini 1501cc440cdaSPaolo Bonzini if (user_data_size < kvm_state.size) 1502cc440cdaSPaolo Bonzini goto out; 1503cc440cdaSPaolo Bonzini 1504cc440cdaSPaolo Bonzini /* First fill in the header and copy it out. */ 1505cc440cdaSPaolo Bonzini if (is_guest_mode(vcpu)) { 15060dd16b5bSMaxim Levitsky kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; 1507cc440cdaSPaolo Bonzini kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE; 1508cc440cdaSPaolo Bonzini kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 1509cc440cdaSPaolo Bonzini 1510cc440cdaSPaolo Bonzini if (svm->nested.nested_run_pending) 1511cc440cdaSPaolo Bonzini kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 1512cc440cdaSPaolo Bonzini } 1513cc440cdaSPaolo Bonzini 1514cc440cdaSPaolo Bonzini if (gif_set(svm)) 1515cc440cdaSPaolo Bonzini kvm_state.flags |= KVM_STATE_NESTED_GIF_SET; 1516cc440cdaSPaolo Bonzini 1517cc440cdaSPaolo Bonzini if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 1518cc440cdaSPaolo Bonzini return -EFAULT; 1519cc440cdaSPaolo Bonzini 1520cc440cdaSPaolo Bonzini if (!is_guest_mode(vcpu)) 1521cc440cdaSPaolo Bonzini goto out; 1522cc440cdaSPaolo Bonzini 1523cc440cdaSPaolo Bonzini /* 1524cc440cdaSPaolo Bonzini * Copy over the full size of the VMCB rather than just the size 1525cc440cdaSPaolo Bonzini * of the structs. 1526cc440cdaSPaolo Bonzini */ 1527cc440cdaSPaolo Bonzini if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE)) 1528cc440cdaSPaolo Bonzini return -EFAULT; 15298fc78909SEmanuele Giuseppe Esposito 15308fc78909SEmanuele Giuseppe Esposito ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); 15318fc78909SEmanuele Giuseppe Esposito if (!ctl) 15328fc78909SEmanuele Giuseppe Esposito return -ENOMEM; 15338fc78909SEmanuele Giuseppe Esposito 15348fc78909SEmanuele Giuseppe Esposito nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl); 15358fc78909SEmanuele Giuseppe Esposito r = copy_to_user(&user_vmcb->control, ctl, 15368fc78909SEmanuele Giuseppe Esposito sizeof(user_vmcb->control)); 15378fc78909SEmanuele Giuseppe Esposito kfree(ctl); 15388fc78909SEmanuele Giuseppe Esposito if (r) 1539cc440cdaSPaolo Bonzini return -EFAULT; 15408fc78909SEmanuele Giuseppe Esposito 15414995a368SCathy Avery if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save, 1542cc440cdaSPaolo Bonzini sizeof(user_vmcb->save))) 1543cc440cdaSPaolo Bonzini return -EFAULT; 1544cc440cdaSPaolo Bonzini out: 1545cc440cdaSPaolo Bonzini return kvm_state.size; 1546cc440cdaSPaolo Bonzini } 1547cc440cdaSPaolo Bonzini 1548cc440cdaSPaolo Bonzini static int svm_set_nested_state(struct kvm_vcpu *vcpu, 1549cc440cdaSPaolo Bonzini struct kvm_nested_state __user *user_kvm_nested_state, 1550cc440cdaSPaolo Bonzini struct kvm_nested_state *kvm_state) 1551cc440cdaSPaolo Bonzini { 1552cc440cdaSPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu); 1553cc440cdaSPaolo Bonzini struct vmcb __user *user_vmcb = (struct vmcb __user *) 1554cc440cdaSPaolo Bonzini &user_kvm_nested_state->data.svm[0]; 15556ccbd29aSJoerg Roedel struct vmcb_control_area *ctl; 15566ccbd29aSJoerg Roedel struct vmcb_save_area *save; 1557b7a3d8b6SEmanuele Giuseppe Esposito struct vmcb_save_area_cached save_cached; 15588fc78909SEmanuele Giuseppe Esposito struct vmcb_ctrl_area_cached ctl_cached; 1559dbc4739bSSean Christopherson unsigned long cr0; 15606ccbd29aSJoerg Roedel int ret; 1561cc440cdaSPaolo Bonzini 15626ccbd29aSJoerg Roedel BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) > 15636ccbd29aSJoerg Roedel KVM_STATE_NESTED_SVM_VMCB_SIZE); 15646ccbd29aSJoerg Roedel 1565cc440cdaSPaolo Bonzini if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM) 1566cc440cdaSPaolo Bonzini return -EINVAL; 1567cc440cdaSPaolo Bonzini 1568cc440cdaSPaolo Bonzini if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE | 1569cc440cdaSPaolo Bonzini KVM_STATE_NESTED_RUN_PENDING | 1570cc440cdaSPaolo Bonzini KVM_STATE_NESTED_GIF_SET)) 1571cc440cdaSPaolo Bonzini return -EINVAL; 1572cc440cdaSPaolo Bonzini 1573cc440cdaSPaolo Bonzini /* 1574cc440cdaSPaolo Bonzini * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's 1575cc440cdaSPaolo Bonzini * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed. 1576cc440cdaSPaolo Bonzini */ 1577cc440cdaSPaolo Bonzini if (!(vcpu->arch.efer & EFER_SVME)) { 1578cc440cdaSPaolo Bonzini /* GIF=1 and no guest mode are required if SVME=0. */ 1579cc440cdaSPaolo Bonzini if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET) 1580cc440cdaSPaolo Bonzini return -EINVAL; 1581cc440cdaSPaolo Bonzini } 1582cc440cdaSPaolo Bonzini 1583cc440cdaSPaolo Bonzini /* SMM temporarily disables SVM, so we cannot be in guest mode. */ 1584cc440cdaSPaolo Bonzini if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 1585cc440cdaSPaolo Bonzini return -EINVAL; 1586cc440cdaSPaolo Bonzini 1587cc440cdaSPaolo Bonzini if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { 1588f7e57078SSean Christopherson svm_leave_nested(vcpu); 1589d5cd6f34SVitaly Kuznetsov svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); 1590d5cd6f34SVitaly Kuznetsov return 0; 1591cc440cdaSPaolo Bonzini } 1592cc440cdaSPaolo Bonzini 1593cc440cdaSPaolo Bonzini if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) 1594cc440cdaSPaolo Bonzini return -EINVAL; 1595cc440cdaSPaolo Bonzini if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE) 1596cc440cdaSPaolo Bonzini return -EINVAL; 1597cc440cdaSPaolo Bonzini 15986ccbd29aSJoerg Roedel ret = -ENOMEM; 1599eba04b20SSean Christopherson ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT); 1600eba04b20SSean Christopherson save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT); 16016ccbd29aSJoerg Roedel if (!ctl || !save) 16026ccbd29aSJoerg Roedel goto out_free; 16036ccbd29aSJoerg Roedel 16046ccbd29aSJoerg Roedel ret = -EFAULT; 16056ccbd29aSJoerg Roedel if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl))) 16066ccbd29aSJoerg Roedel goto out_free; 16076ccbd29aSJoerg Roedel if (copy_from_user(save, &user_vmcb->save, sizeof(*save))) 16086ccbd29aSJoerg Roedel goto out_free; 16096ccbd29aSJoerg Roedel 16106ccbd29aSJoerg Roedel ret = -EINVAL; 161166c03a92SVitaly Kuznetsov __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl); 16128fc78909SEmanuele Giuseppe Esposito if (!__nested_vmcb_check_controls(vcpu, &ctl_cached)) 16136ccbd29aSJoerg Roedel goto out_free; 1614cc440cdaSPaolo Bonzini 1615cc440cdaSPaolo Bonzini /* 1616cc440cdaSPaolo Bonzini * Processor state contains L2 state. Check that it is 1617cb9b6a1bSPaolo Bonzini * valid for guest mode (see nested_vmcb_check_save). 1618cc440cdaSPaolo Bonzini */ 1619cc440cdaSPaolo Bonzini cr0 = kvm_read_cr0(vcpu); 1620cc440cdaSPaolo Bonzini if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) 16216ccbd29aSJoerg Roedel goto out_free; 1622cc440cdaSPaolo Bonzini 1623cc440cdaSPaolo Bonzini /* 1624cc440cdaSPaolo Bonzini * Validate host state saved from before VMRUN (see 1625cc440cdaSPaolo Bonzini * nested_svm_check_permissions). 1626cc440cdaSPaolo Bonzini */ 1627b7a3d8b6SEmanuele Giuseppe Esposito __nested_copy_vmcb_save_to_cache(&save_cached, save); 16286906e06dSKrish Sadhukhan if (!(save->cr0 & X86_CR0_PG) || 16296906e06dSKrish Sadhukhan !(save->cr0 & X86_CR0_PE) || 16306906e06dSKrish Sadhukhan (save->rflags & X86_EFLAGS_VM) || 1631b7a3d8b6SEmanuele Giuseppe Esposito !__nested_vmcb_check_save(vcpu, &save_cached)) 16326ccbd29aSJoerg Roedel goto out_free; 1633cc440cdaSPaolo Bonzini 1634b222b0b8SMaxim Levitsky 1635b222b0b8SMaxim Levitsky /* 16364995a368SCathy Avery * All checks done, we can enter guest mode. Userspace provides 16374995a368SCathy Avery * vmcb12.control, which will be combined with L1 and stored into 16384995a368SCathy Avery * vmcb02, and the L1 save state which we store in vmcb01. 16394995a368SCathy Avery * L2 registers if needed are moved from the current VMCB to VMCB02. 1640cc440cdaSPaolo Bonzini */ 164181f76adaSMaxim Levitsky 16429d290e16SMaxim Levitsky if (is_guest_mode(vcpu)) 1643f7e57078SSean Christopherson svm_leave_nested(vcpu); 16449d290e16SMaxim Levitsky else 16459d290e16SMaxim Levitsky svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; 16469d290e16SMaxim Levitsky 1647063ab16cSMaxim Levitsky svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); 1648063ab16cSMaxim Levitsky 164981f76adaSMaxim Levitsky svm->nested.nested_run_pending = 165081f76adaSMaxim Levitsky !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 165181f76adaSMaxim Levitsky 16520dd16b5bSMaxim Levitsky svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; 1653c08f390aSPaolo Bonzini 16542bb16beaSVitaly Kuznetsov svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save); 16557907160dSEmanuele Giuseppe Esposito nested_copy_vmcb_control_to_cache(svm, ctl); 16564995a368SCathy Avery 16574995a368SCathy Avery svm_switch_vmcb(svm, &svm->nested.vmcb02); 165800f08d99SMaciej S. Szmigiero nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip); 1659e1779c27SMaxim Levitsky 1660e1779c27SMaxim Levitsky /* 1661e1779c27SMaxim Levitsky * While the nested guest CR3 is already checked and set by 1662e1779c27SMaxim Levitsky * KVM_SET_SREGS, it was set when nested state was yet loaded, 1663e1779c27SMaxim Levitsky * thus MMU might not be initialized correctly. 1664e1779c27SMaxim Levitsky * Set it again to fix this. 1665e1779c27SMaxim Levitsky */ 1666e1779c27SMaxim Levitsky 1667e1779c27SMaxim Levitsky ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3, 1668e1779c27SMaxim Levitsky nested_npt_enabled(svm), false); 1669e1779c27SMaxim Levitsky if (WARN_ON_ONCE(ret)) 1670e1779c27SMaxim Levitsky goto out_free; 1671e1779c27SMaxim Levitsky 167273c25546SVitaly Kuznetsov svm->nested.force_msr_bitmap_recalc = true; 1673e1779c27SMaxim Levitsky 1674a7d5c7ceSPaolo Bonzini kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 16756ccbd29aSJoerg Roedel ret = 0; 16766ccbd29aSJoerg Roedel out_free: 16776ccbd29aSJoerg Roedel kfree(save); 16786ccbd29aSJoerg Roedel kfree(ctl); 16796ccbd29aSJoerg Roedel 16806ccbd29aSJoerg Roedel return ret; 1681cc440cdaSPaolo Bonzini } 1682cc440cdaSPaolo Bonzini 1683232f75d3SMaxim Levitsky static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) 1684232f75d3SMaxim Levitsky { 1685232f75d3SMaxim Levitsky struct vcpu_svm *svm = to_svm(vcpu); 1686232f75d3SMaxim Levitsky 1687232f75d3SMaxim Levitsky if (WARN_ON(!is_guest_mode(vcpu))) 1688232f75d3SMaxim Levitsky return true; 1689232f75d3SMaxim Levitsky 1690158a48ecSMaxim Levitsky if (!vcpu->arch.pdptrs_from_userspace && 1691158a48ecSMaxim Levitsky !nested_npt_enabled(svm) && is_pae_paging(vcpu)) 1692b222b0b8SMaxim Levitsky /* 1693b222b0b8SMaxim Levitsky * Reload the guest's PDPTRs since after a migration 1694b222b0b8SMaxim Levitsky * the guest CR3 might be restored prior to setting the nested 1695b222b0b8SMaxim Levitsky * state which can lead to a load of wrong PDPTRs. 1696b222b0b8SMaxim Levitsky */ 16972df4a5ebSLai Jiangshan if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) 1698232f75d3SMaxim Levitsky return false; 1699232f75d3SMaxim Levitsky 1700232f75d3SMaxim Levitsky if (!nested_svm_vmrun_msrpm(svm)) { 1701232f75d3SMaxim Levitsky vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1702232f75d3SMaxim Levitsky vcpu->run->internal.suberror = 1703232f75d3SMaxim Levitsky KVM_INTERNAL_ERROR_EMULATION; 1704232f75d3SMaxim Levitsky vcpu->run->internal.ndata = 0; 1705232f75d3SMaxim Levitsky return false; 1706232f75d3SMaxim Levitsky } 1707232f75d3SMaxim Levitsky 1708232f75d3SMaxim Levitsky return true; 1709232f75d3SMaxim Levitsky } 1710232f75d3SMaxim Levitsky 171133b22172SPaolo Bonzini struct kvm_x86_nested_ops svm_nested_ops = { 1712f7e57078SSean Christopherson .leave_nested = svm_leave_nested, 171333b22172SPaolo Bonzini .check_events = svm_check_nested_events, 17146819af75SSean Christopherson .handle_page_fault_workaround = nested_svm_handle_page_fault_workaround, 1715cb6a32c2SSean Christopherson .triple_fault = nested_svm_triple_fault, 1716a7d5c7ceSPaolo Bonzini .get_nested_state_pages = svm_get_nested_state_pages, 1717cc440cdaSPaolo Bonzini .get_state = svm_get_nested_state, 1718cc440cdaSPaolo Bonzini .set_state = svm_set_nested_state, 171933b22172SPaolo Bonzini }; 1720