1883b0a91SJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only 2883b0a91SJoerg Roedel /* 3883b0a91SJoerg Roedel * Kernel-based Virtual Machine driver for Linux 4883b0a91SJoerg Roedel * 5883b0a91SJoerg Roedel * AMD SVM support 6883b0a91SJoerg Roedel * 7883b0a91SJoerg Roedel * Copyright (C) 2006 Qumranet, Inc. 8883b0a91SJoerg Roedel * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9883b0a91SJoerg Roedel * 10883b0a91SJoerg Roedel * Authors: 11883b0a91SJoerg Roedel * Yaniv Kamay <yaniv@qumranet.com> 12883b0a91SJoerg Roedel * Avi Kivity <avi@qumranet.com> 13883b0a91SJoerg Roedel */ 14883b0a91SJoerg Roedel 15883b0a91SJoerg Roedel #define pr_fmt(fmt) "SVM: " fmt 16883b0a91SJoerg Roedel 17883b0a91SJoerg Roedel #include <linux/kvm_types.h> 18883b0a91SJoerg Roedel #include <linux/kvm_host.h> 19883b0a91SJoerg Roedel #include <linux/kernel.h> 20883b0a91SJoerg Roedel 21883b0a91SJoerg Roedel #include <asm/msr-index.h> 225679b803SPaolo Bonzini #include <asm/debugreg.h> 23883b0a91SJoerg Roedel 24883b0a91SJoerg Roedel #include "kvm_emulate.h" 25883b0a91SJoerg Roedel #include "trace.h" 26883b0a91SJoerg Roedel #include "mmu.h" 27883b0a91SJoerg Roedel #include "x86.h" 285b672408SPaolo Bonzini #include "lapic.h" 29883b0a91SJoerg Roedel #include "svm.h" 30883b0a91SJoerg Roedel 31883b0a91SJoerg Roedel static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, 32883b0a91SJoerg Roedel struct x86_exception *fault) 33883b0a91SJoerg Roedel { 34883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 35883b0a91SJoerg Roedel 36883b0a91SJoerg Roedel if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { 37883b0a91SJoerg Roedel /* 38883b0a91SJoerg Roedel * TODO: track the cause of the nested page fault, and 39883b0a91SJoerg Roedel * correctly fill in the high bits of exit_info_1. 40883b0a91SJoerg Roedel */ 41883b0a91SJoerg Roedel svm->vmcb->control.exit_code = SVM_EXIT_NPF; 42883b0a91SJoerg Roedel svm->vmcb->control.exit_code_hi = 0; 43883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 = (1ULL << 32); 44883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = fault->address; 45883b0a91SJoerg Roedel } 46883b0a91SJoerg Roedel 47883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; 48883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 |= fault->error_code; 49883b0a91SJoerg Roedel 50883b0a91SJoerg Roedel /* 51883b0a91SJoerg Roedel * The present bit is always zero for page structure faults on real 52883b0a91SJoerg Roedel * hardware. 53883b0a91SJoerg Roedel */ 54883b0a91SJoerg Roedel if (svm->vmcb->control.exit_info_1 & (2ULL << 32)) 55883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 &= ~1; 56883b0a91SJoerg Roedel 57883b0a91SJoerg Roedel nested_svm_vmexit(svm); 58883b0a91SJoerg Roedel } 59883b0a91SJoerg Roedel 60883b0a91SJoerg Roedel static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) 61883b0a91SJoerg Roedel { 62883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 63883b0a91SJoerg Roedel u64 cr3 = svm->nested.nested_cr3; 64883b0a91SJoerg Roedel u64 pdpte; 65883b0a91SJoerg Roedel int ret; 66883b0a91SJoerg Roedel 67883b0a91SJoerg Roedel ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte, 68883b0a91SJoerg Roedel offset_in_page(cr3) + index * 8, 8); 69883b0a91SJoerg Roedel if (ret) 70883b0a91SJoerg Roedel return 0; 71883b0a91SJoerg Roedel return pdpte; 72883b0a91SJoerg Roedel } 73883b0a91SJoerg Roedel 74883b0a91SJoerg Roedel static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) 75883b0a91SJoerg Roedel { 76883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 77883b0a91SJoerg Roedel 78883b0a91SJoerg Roedel return svm->nested.nested_cr3; 79883b0a91SJoerg Roedel } 80883b0a91SJoerg Roedel 81883b0a91SJoerg Roedel static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) 82883b0a91SJoerg Roedel { 83883b0a91SJoerg Roedel WARN_ON(mmu_is_nested(vcpu)); 84883b0a91SJoerg Roedel 85883b0a91SJoerg Roedel vcpu->arch.mmu = &vcpu->arch.guest_mmu; 86883b0a91SJoerg Roedel kvm_init_shadow_mmu(vcpu); 87883b0a91SJoerg Roedel vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; 88883b0a91SJoerg Roedel vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; 89883b0a91SJoerg Roedel vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; 90e93fd3b3SSean Christopherson vcpu->arch.mmu->shadow_root_level = vcpu->arch.tdp_level; 91883b0a91SJoerg Roedel reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu); 92883b0a91SJoerg Roedel vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 93883b0a91SJoerg Roedel } 94883b0a91SJoerg Roedel 95883b0a91SJoerg Roedel static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) 96883b0a91SJoerg Roedel { 97883b0a91SJoerg Roedel vcpu->arch.mmu = &vcpu->arch.root_mmu; 98883b0a91SJoerg Roedel vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 99883b0a91SJoerg Roedel } 100883b0a91SJoerg Roedel 101883b0a91SJoerg Roedel void recalc_intercepts(struct vcpu_svm *svm) 102883b0a91SJoerg Roedel { 103883b0a91SJoerg Roedel struct vmcb_control_area *c, *h; 104883b0a91SJoerg Roedel struct nested_state *g; 105883b0a91SJoerg Roedel 106883b0a91SJoerg Roedel mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 107883b0a91SJoerg Roedel 108883b0a91SJoerg Roedel if (!is_guest_mode(&svm->vcpu)) 109883b0a91SJoerg Roedel return; 110883b0a91SJoerg Roedel 111883b0a91SJoerg Roedel c = &svm->vmcb->control; 112883b0a91SJoerg Roedel h = &svm->nested.hsave->control; 113883b0a91SJoerg Roedel g = &svm->nested; 114883b0a91SJoerg Roedel 1157c86663bSPaolo Bonzini svm->nested.host_intercept_exceptions = h->intercept_exceptions; 1167c86663bSPaolo Bonzini 117883b0a91SJoerg Roedel c->intercept_cr = h->intercept_cr; 118883b0a91SJoerg Roedel c->intercept_dr = h->intercept_dr; 119883b0a91SJoerg Roedel c->intercept_exceptions = h->intercept_exceptions; 120883b0a91SJoerg Roedel c->intercept = h->intercept; 121883b0a91SJoerg Roedel 122883b0a91SJoerg Roedel if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { 123883b0a91SJoerg Roedel /* We only want the cr8 intercept bits of L1 */ 124883b0a91SJoerg Roedel c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); 125883b0a91SJoerg Roedel c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); 126883b0a91SJoerg Roedel 127883b0a91SJoerg Roedel /* 128883b0a91SJoerg Roedel * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not 129883b0a91SJoerg Roedel * affect any interrupt we may want to inject; therefore, 130883b0a91SJoerg Roedel * interrupt window vmexits are irrelevant to L0. 131883b0a91SJoerg Roedel */ 132883b0a91SJoerg Roedel c->intercept &= ~(1ULL << INTERCEPT_VINTR); 133883b0a91SJoerg Roedel } 134883b0a91SJoerg Roedel 135883b0a91SJoerg Roedel /* We don't want to see VMMCALLs from a nested guest */ 136883b0a91SJoerg Roedel c->intercept &= ~(1ULL << INTERCEPT_VMMCALL); 137883b0a91SJoerg Roedel 138883b0a91SJoerg Roedel c->intercept_cr |= g->intercept_cr; 139883b0a91SJoerg Roedel c->intercept_dr |= g->intercept_dr; 140883b0a91SJoerg Roedel c->intercept_exceptions |= g->intercept_exceptions; 141883b0a91SJoerg Roedel c->intercept |= g->intercept; 142883b0a91SJoerg Roedel } 143883b0a91SJoerg Roedel 144883b0a91SJoerg Roedel static void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb) 145883b0a91SJoerg Roedel { 146883b0a91SJoerg Roedel struct vmcb_control_area *dst = &dst_vmcb->control; 147883b0a91SJoerg Roedel struct vmcb_control_area *from = &from_vmcb->control; 148883b0a91SJoerg Roedel 149883b0a91SJoerg Roedel dst->intercept_cr = from->intercept_cr; 150883b0a91SJoerg Roedel dst->intercept_dr = from->intercept_dr; 151883b0a91SJoerg Roedel dst->intercept_exceptions = from->intercept_exceptions; 152883b0a91SJoerg Roedel dst->intercept = from->intercept; 153883b0a91SJoerg Roedel dst->iopm_base_pa = from->iopm_base_pa; 154883b0a91SJoerg Roedel dst->msrpm_base_pa = from->msrpm_base_pa; 155883b0a91SJoerg Roedel dst->tsc_offset = from->tsc_offset; 1566c0238c4SPaolo Bonzini /* asid not copied, it is handled manually for svm->vmcb. */ 157883b0a91SJoerg Roedel dst->tlb_ctl = from->tlb_ctl; 158883b0a91SJoerg Roedel dst->int_ctl = from->int_ctl; 159883b0a91SJoerg Roedel dst->int_vector = from->int_vector; 160883b0a91SJoerg Roedel dst->int_state = from->int_state; 161883b0a91SJoerg Roedel dst->exit_code = from->exit_code; 162883b0a91SJoerg Roedel dst->exit_code_hi = from->exit_code_hi; 163883b0a91SJoerg Roedel dst->exit_info_1 = from->exit_info_1; 164883b0a91SJoerg Roedel dst->exit_info_2 = from->exit_info_2; 165883b0a91SJoerg Roedel dst->exit_int_info = from->exit_int_info; 166883b0a91SJoerg Roedel dst->exit_int_info_err = from->exit_int_info_err; 167883b0a91SJoerg Roedel dst->nested_ctl = from->nested_ctl; 168883b0a91SJoerg Roedel dst->event_inj = from->event_inj; 169883b0a91SJoerg Roedel dst->event_inj_err = from->event_inj_err; 170883b0a91SJoerg Roedel dst->nested_cr3 = from->nested_cr3; 171883b0a91SJoerg Roedel dst->virt_ext = from->virt_ext; 172883b0a91SJoerg Roedel dst->pause_filter_count = from->pause_filter_count; 173883b0a91SJoerg Roedel dst->pause_filter_thresh = from->pause_filter_thresh; 174883b0a91SJoerg Roedel } 175883b0a91SJoerg Roedel 176883b0a91SJoerg Roedel static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) 177883b0a91SJoerg Roedel { 178883b0a91SJoerg Roedel /* 179883b0a91SJoerg Roedel * This function merges the msr permission bitmaps of kvm and the 180883b0a91SJoerg Roedel * nested vmcb. It is optimized in that it only merges the parts where 181883b0a91SJoerg Roedel * the kvm msr permission bitmap may contain zero bits 182883b0a91SJoerg Roedel */ 183883b0a91SJoerg Roedel int i; 184883b0a91SJoerg Roedel 185883b0a91SJoerg Roedel if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) 186883b0a91SJoerg Roedel return true; 187883b0a91SJoerg Roedel 188883b0a91SJoerg Roedel for (i = 0; i < MSRPM_OFFSETS; i++) { 189883b0a91SJoerg Roedel u32 value, p; 190883b0a91SJoerg Roedel u64 offset; 191883b0a91SJoerg Roedel 192883b0a91SJoerg Roedel if (msrpm_offsets[i] == 0xffffffff) 193883b0a91SJoerg Roedel break; 194883b0a91SJoerg Roedel 195883b0a91SJoerg Roedel p = msrpm_offsets[i]; 196883b0a91SJoerg Roedel offset = svm->nested.vmcb_msrpm + (p * 4); 197883b0a91SJoerg Roedel 198883b0a91SJoerg Roedel if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) 199883b0a91SJoerg Roedel return false; 200883b0a91SJoerg Roedel 201883b0a91SJoerg Roedel svm->nested.msrpm[p] = svm->msrpm[p] | value; 202883b0a91SJoerg Roedel } 203883b0a91SJoerg Roedel 204883b0a91SJoerg Roedel svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); 205883b0a91SJoerg Roedel 206883b0a91SJoerg Roedel return true; 207883b0a91SJoerg Roedel } 208883b0a91SJoerg Roedel 209883b0a91SJoerg Roedel static bool nested_vmcb_checks(struct vmcb *vmcb) 210883b0a91SJoerg Roedel { 211883b0a91SJoerg Roedel if ((vmcb->save.efer & EFER_SVME) == 0) 212883b0a91SJoerg Roedel return false; 213883b0a91SJoerg Roedel 2144f233371SKrish Sadhukhan if (((vmcb->save.cr0 & X86_CR0_CD) == 0) && 2154f233371SKrish Sadhukhan (vmcb->save.cr0 & X86_CR0_NW)) 2164f233371SKrish Sadhukhan return false; 2174f233371SKrish Sadhukhan 218883b0a91SJoerg Roedel if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0) 219883b0a91SJoerg Roedel return false; 220883b0a91SJoerg Roedel 221883b0a91SJoerg Roedel if (vmcb->control.asid == 0) 222883b0a91SJoerg Roedel return false; 223883b0a91SJoerg Roedel 224883b0a91SJoerg Roedel if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && 225883b0a91SJoerg Roedel !npt_enabled) 226883b0a91SJoerg Roedel return false; 227883b0a91SJoerg Roedel 228883b0a91SJoerg Roedel return true; 229883b0a91SJoerg Roedel } 230883b0a91SJoerg Roedel 2313e06f016SPaolo Bonzini static void load_nested_vmcb_control(struct vcpu_svm *svm, 2323e06f016SPaolo Bonzini struct vmcb_control_area *control) 2333e06f016SPaolo Bonzini { 2343e06f016SPaolo Bonzini svm->nested.nested_cr3 = control->nested_cr3; 2353e06f016SPaolo Bonzini 2363e06f016SPaolo Bonzini svm->nested.vmcb_msrpm = control->msrpm_base_pa & ~0x0fffULL; 2373e06f016SPaolo Bonzini svm->nested.vmcb_iopm = control->iopm_base_pa & ~0x0fffULL; 2383e06f016SPaolo Bonzini 2393e06f016SPaolo Bonzini /* cache intercepts */ 2403e06f016SPaolo Bonzini svm->nested.intercept_cr = control->intercept_cr; 2413e06f016SPaolo Bonzini svm->nested.intercept_dr = control->intercept_dr; 2423e06f016SPaolo Bonzini svm->nested.intercept_exceptions = control->intercept_exceptions; 2433e06f016SPaolo Bonzini svm->nested.intercept = control->intercept; 2443e06f016SPaolo Bonzini 2453e06f016SPaolo Bonzini svm->vcpu.arch.tsc_offset += control->tsc_offset; 2463e06f016SPaolo Bonzini } 2473e06f016SPaolo Bonzini 248f241d711SPaolo Bonzini static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb) 249883b0a91SJoerg Roedel { 250883b0a91SJoerg Roedel /* Load the nested guest state */ 251883b0a91SJoerg Roedel svm->vmcb->save.es = nested_vmcb->save.es; 252883b0a91SJoerg Roedel svm->vmcb->save.cs = nested_vmcb->save.cs; 253883b0a91SJoerg Roedel svm->vmcb->save.ss = nested_vmcb->save.ss; 254883b0a91SJoerg Roedel svm->vmcb->save.ds = nested_vmcb->save.ds; 255883b0a91SJoerg Roedel svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; 256883b0a91SJoerg Roedel svm->vmcb->save.idtr = nested_vmcb->save.idtr; 257883b0a91SJoerg Roedel kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); 258883b0a91SJoerg Roedel svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); 259883b0a91SJoerg Roedel svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); 260883b0a91SJoerg Roedel svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); 261883b0a91SJoerg Roedel (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); 262883b0a91SJoerg Roedel 263883b0a91SJoerg Roedel svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; 264883b0a91SJoerg Roedel kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax); 265883b0a91SJoerg Roedel kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp); 266883b0a91SJoerg Roedel kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip); 267883b0a91SJoerg Roedel 268883b0a91SJoerg Roedel /* In case we don't even reach vcpu_run, the fields are not updated */ 269883b0a91SJoerg Roedel svm->vmcb->save.rax = nested_vmcb->save.rax; 270883b0a91SJoerg Roedel svm->vmcb->save.rsp = nested_vmcb->save.rsp; 271883b0a91SJoerg Roedel svm->vmcb->save.rip = nested_vmcb->save.rip; 272883b0a91SJoerg Roedel svm->vmcb->save.dr7 = nested_vmcb->save.dr7; 2735679b803SPaolo Bonzini svm->vcpu.arch.dr6 = nested_vmcb->save.dr6; 274883b0a91SJoerg Roedel svm->vmcb->save.cpl = nested_vmcb->save.cpl; 275f241d711SPaolo Bonzini } 276883b0a91SJoerg Roedel 277f241d711SPaolo Bonzini static void nested_prepare_vmcb_control(struct vcpu_svm *svm, struct vmcb *nested_vmcb) 278f241d711SPaolo Bonzini { 279*69cb8774SPaolo Bonzini if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) 280*69cb8774SPaolo Bonzini nested_svm_init_mmu_context(&svm->vcpu); 281*69cb8774SPaolo Bonzini 282*69cb8774SPaolo Bonzini /* Guest paging mode is active - reset mmu */ 283*69cb8774SPaolo Bonzini kvm_mmu_reset_context(&svm->vcpu); 284*69cb8774SPaolo Bonzini 285f55ac304SSean Christopherson svm_flush_tlb(&svm->vcpu); 286883b0a91SJoerg Roedel if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) 287883b0a91SJoerg Roedel svm->vcpu.arch.hflags |= HF_VINTR_MASK; 288883b0a91SJoerg Roedel else 289883b0a91SJoerg Roedel svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; 290883b0a91SJoerg Roedel 291883b0a91SJoerg Roedel svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; 292883b0a91SJoerg Roedel 2933e06f016SPaolo Bonzini svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; 294883b0a91SJoerg Roedel svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; 295883b0a91SJoerg Roedel svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; 296883b0a91SJoerg Roedel svm->vmcb->control.int_state = nested_vmcb->control.int_state; 297883b0a91SJoerg Roedel svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; 298883b0a91SJoerg Roedel svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; 299883b0a91SJoerg Roedel 300883b0a91SJoerg Roedel svm->vmcb->control.pause_filter_count = 301883b0a91SJoerg Roedel nested_vmcb->control.pause_filter_count; 302883b0a91SJoerg Roedel svm->vmcb->control.pause_filter_thresh = 303883b0a91SJoerg Roedel nested_vmcb->control.pause_filter_thresh; 304883b0a91SJoerg Roedel 305883b0a91SJoerg Roedel /* Enter Guest-Mode */ 306883b0a91SJoerg Roedel enter_guest_mode(&svm->vcpu); 307883b0a91SJoerg Roedel 308883b0a91SJoerg Roedel /* 309883b0a91SJoerg Roedel * Merge guest and host intercepts - must be called with vcpu in 310883b0a91SJoerg Roedel * guest-mode to take affect here 311883b0a91SJoerg Roedel */ 312883b0a91SJoerg Roedel recalc_intercepts(svm); 313883b0a91SJoerg Roedel 314f241d711SPaolo Bonzini mark_all_dirty(svm->vmcb); 315f241d711SPaolo Bonzini } 316f241d711SPaolo Bonzini 317f241d711SPaolo Bonzini void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, 318f241d711SPaolo Bonzini struct vmcb *nested_vmcb) 319f241d711SPaolo Bonzini { 320f241d711SPaolo Bonzini bool evaluate_pending_interrupts = 321f241d711SPaolo Bonzini is_intercept(svm, INTERCEPT_VINTR) || 322f241d711SPaolo Bonzini is_intercept(svm, INTERCEPT_IRET); 323f241d711SPaolo Bonzini 324f241d711SPaolo Bonzini svm->nested.vmcb = vmcb_gpa; 325f241d711SPaolo Bonzini if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) 326f241d711SPaolo Bonzini svm->vcpu.arch.hflags |= HF_HIF_MASK; 327f241d711SPaolo Bonzini else 328f241d711SPaolo Bonzini svm->vcpu.arch.hflags &= ~HF_HIF_MASK; 329f241d711SPaolo Bonzini 330f241d711SPaolo Bonzini load_nested_vmcb_control(svm, &nested_vmcb->control); 331f241d711SPaolo Bonzini nested_prepare_vmcb_save(svm, nested_vmcb); 332f241d711SPaolo Bonzini nested_prepare_vmcb_control(svm, nested_vmcb); 333f241d711SPaolo Bonzini 334883b0a91SJoerg Roedel /* 335883b0a91SJoerg Roedel * If L1 had a pending IRQ/NMI before executing VMRUN, 336883b0a91SJoerg Roedel * which wasn't delivered because it was disallowed (e.g. 337883b0a91SJoerg Roedel * interrupts disabled), L0 needs to evaluate if this pending 338883b0a91SJoerg Roedel * event should cause an exit from L2 to L1 or be delivered 339883b0a91SJoerg Roedel * directly to L2. 340883b0a91SJoerg Roedel * 341883b0a91SJoerg Roedel * Usually this would be handled by the processor noticing an 342883b0a91SJoerg Roedel * IRQ/NMI window request. However, VMRUN can unblock interrupts 343883b0a91SJoerg Roedel * by implicitly setting GIF, so force L0 to perform pending event 344883b0a91SJoerg Roedel * evaluation by requesting a KVM_REQ_EVENT. 345883b0a91SJoerg Roedel */ 346883b0a91SJoerg Roedel enable_gif(svm); 347883b0a91SJoerg Roedel if (unlikely(evaluate_pending_interrupts)) 348883b0a91SJoerg Roedel kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); 349883b0a91SJoerg Roedel } 350883b0a91SJoerg Roedel 351883b0a91SJoerg Roedel int nested_svm_vmrun(struct vcpu_svm *svm) 352883b0a91SJoerg Roedel { 353883b0a91SJoerg Roedel int ret; 354883b0a91SJoerg Roedel struct vmcb *nested_vmcb; 355883b0a91SJoerg Roedel struct vmcb *hsave = svm->nested.hsave; 356883b0a91SJoerg Roedel struct vmcb *vmcb = svm->vmcb; 357883b0a91SJoerg Roedel struct kvm_host_map map; 358883b0a91SJoerg Roedel u64 vmcb_gpa; 359883b0a91SJoerg Roedel 3607c67f546SPaolo Bonzini if (is_smm(&svm->vcpu)) { 3617c67f546SPaolo Bonzini kvm_queue_exception(&svm->vcpu, UD_VECTOR); 3627c67f546SPaolo Bonzini return 1; 3637c67f546SPaolo Bonzini } 364883b0a91SJoerg Roedel 3657c67f546SPaolo Bonzini vmcb_gpa = svm->vmcb->save.rax; 366883b0a91SJoerg Roedel ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map); 367883b0a91SJoerg Roedel if (ret == -EINVAL) { 368883b0a91SJoerg Roedel kvm_inject_gp(&svm->vcpu, 0); 369883b0a91SJoerg Roedel return 1; 370883b0a91SJoerg Roedel } else if (ret) { 371883b0a91SJoerg Roedel return kvm_skip_emulated_instruction(&svm->vcpu); 372883b0a91SJoerg Roedel } 373883b0a91SJoerg Roedel 374883b0a91SJoerg Roedel ret = kvm_skip_emulated_instruction(&svm->vcpu); 375883b0a91SJoerg Roedel 376883b0a91SJoerg Roedel nested_vmcb = map.hva; 377883b0a91SJoerg Roedel 378883b0a91SJoerg Roedel if (!nested_vmcb_checks(nested_vmcb)) { 379883b0a91SJoerg Roedel nested_vmcb->control.exit_code = SVM_EXIT_ERR; 380883b0a91SJoerg Roedel nested_vmcb->control.exit_code_hi = 0; 381883b0a91SJoerg Roedel nested_vmcb->control.exit_info_1 = 0; 382883b0a91SJoerg Roedel nested_vmcb->control.exit_info_2 = 0; 38369c9dfa2SPaolo Bonzini goto out; 384883b0a91SJoerg Roedel } 385883b0a91SJoerg Roedel 386883b0a91SJoerg Roedel trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, 387883b0a91SJoerg Roedel nested_vmcb->save.rip, 388883b0a91SJoerg Roedel nested_vmcb->control.int_ctl, 389883b0a91SJoerg Roedel nested_vmcb->control.event_inj, 390883b0a91SJoerg Roedel nested_vmcb->control.nested_ctl); 391883b0a91SJoerg Roedel 392883b0a91SJoerg Roedel trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, 393883b0a91SJoerg Roedel nested_vmcb->control.intercept_cr >> 16, 394883b0a91SJoerg Roedel nested_vmcb->control.intercept_exceptions, 395883b0a91SJoerg Roedel nested_vmcb->control.intercept); 396883b0a91SJoerg Roedel 397883b0a91SJoerg Roedel /* Clear internal status */ 398883b0a91SJoerg Roedel kvm_clear_exception_queue(&svm->vcpu); 399883b0a91SJoerg Roedel kvm_clear_interrupt_queue(&svm->vcpu); 400883b0a91SJoerg Roedel 401883b0a91SJoerg Roedel /* 402883b0a91SJoerg Roedel * Save the old vmcb, so we don't need to pick what we save, but can 403883b0a91SJoerg Roedel * restore everything when a VMEXIT occurs 404883b0a91SJoerg Roedel */ 405883b0a91SJoerg Roedel hsave->save.es = vmcb->save.es; 406883b0a91SJoerg Roedel hsave->save.cs = vmcb->save.cs; 407883b0a91SJoerg Roedel hsave->save.ss = vmcb->save.ss; 408883b0a91SJoerg Roedel hsave->save.ds = vmcb->save.ds; 409883b0a91SJoerg Roedel hsave->save.gdtr = vmcb->save.gdtr; 410883b0a91SJoerg Roedel hsave->save.idtr = vmcb->save.idtr; 411883b0a91SJoerg Roedel hsave->save.efer = svm->vcpu.arch.efer; 412883b0a91SJoerg Roedel hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); 413883b0a91SJoerg Roedel hsave->save.cr4 = svm->vcpu.arch.cr4; 414883b0a91SJoerg Roedel hsave->save.rflags = kvm_get_rflags(&svm->vcpu); 415883b0a91SJoerg Roedel hsave->save.rip = kvm_rip_read(&svm->vcpu); 416883b0a91SJoerg Roedel hsave->save.rsp = vmcb->save.rsp; 417883b0a91SJoerg Roedel hsave->save.rax = vmcb->save.rax; 418883b0a91SJoerg Roedel if (npt_enabled) 419883b0a91SJoerg Roedel hsave->save.cr3 = vmcb->save.cr3; 420883b0a91SJoerg Roedel else 421883b0a91SJoerg Roedel hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); 422883b0a91SJoerg Roedel 423883b0a91SJoerg Roedel copy_vmcb_control_area(hsave, vmcb); 424883b0a91SJoerg Roedel 425f74f9414SPaolo Bonzini svm->nested.nested_run_pending = 1; 42669c9dfa2SPaolo Bonzini enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb); 427883b0a91SJoerg Roedel 428883b0a91SJoerg Roedel if (!nested_svm_vmrun_msrpm(svm)) { 429883b0a91SJoerg Roedel svm->vmcb->control.exit_code = SVM_EXIT_ERR; 430883b0a91SJoerg Roedel svm->vmcb->control.exit_code_hi = 0; 431883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 = 0; 432883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = 0; 433883b0a91SJoerg Roedel 434883b0a91SJoerg Roedel nested_svm_vmexit(svm); 435883b0a91SJoerg Roedel } 436883b0a91SJoerg Roedel 43769c9dfa2SPaolo Bonzini out: 43869c9dfa2SPaolo Bonzini kvm_vcpu_unmap(&svm->vcpu, &map, true); 43969c9dfa2SPaolo Bonzini 440883b0a91SJoerg Roedel return ret; 441883b0a91SJoerg Roedel } 442883b0a91SJoerg Roedel 443883b0a91SJoerg Roedel void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) 444883b0a91SJoerg Roedel { 445883b0a91SJoerg Roedel to_vmcb->save.fs = from_vmcb->save.fs; 446883b0a91SJoerg Roedel to_vmcb->save.gs = from_vmcb->save.gs; 447883b0a91SJoerg Roedel to_vmcb->save.tr = from_vmcb->save.tr; 448883b0a91SJoerg Roedel to_vmcb->save.ldtr = from_vmcb->save.ldtr; 449883b0a91SJoerg Roedel to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; 450883b0a91SJoerg Roedel to_vmcb->save.star = from_vmcb->save.star; 451883b0a91SJoerg Roedel to_vmcb->save.lstar = from_vmcb->save.lstar; 452883b0a91SJoerg Roedel to_vmcb->save.cstar = from_vmcb->save.cstar; 453883b0a91SJoerg Roedel to_vmcb->save.sfmask = from_vmcb->save.sfmask; 454883b0a91SJoerg Roedel to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; 455883b0a91SJoerg Roedel to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; 456883b0a91SJoerg Roedel to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; 457883b0a91SJoerg Roedel } 458883b0a91SJoerg Roedel 459883b0a91SJoerg Roedel int nested_svm_vmexit(struct vcpu_svm *svm) 460883b0a91SJoerg Roedel { 461883b0a91SJoerg Roedel int rc; 462883b0a91SJoerg Roedel struct vmcb *nested_vmcb; 463883b0a91SJoerg Roedel struct vmcb *hsave = svm->nested.hsave; 464883b0a91SJoerg Roedel struct vmcb *vmcb = svm->vmcb; 465883b0a91SJoerg Roedel struct kvm_host_map map; 466883b0a91SJoerg Roedel 467883b0a91SJoerg Roedel trace_kvm_nested_vmexit_inject(vmcb->control.exit_code, 468883b0a91SJoerg Roedel vmcb->control.exit_info_1, 469883b0a91SJoerg Roedel vmcb->control.exit_info_2, 470883b0a91SJoerg Roedel vmcb->control.exit_int_info, 471883b0a91SJoerg Roedel vmcb->control.exit_int_info_err, 472883b0a91SJoerg Roedel KVM_ISA_SVM); 473883b0a91SJoerg Roedel 474883b0a91SJoerg Roedel rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map); 475883b0a91SJoerg Roedel if (rc) { 476883b0a91SJoerg Roedel if (rc == -EINVAL) 477883b0a91SJoerg Roedel kvm_inject_gp(&svm->vcpu, 0); 478883b0a91SJoerg Roedel return 1; 479883b0a91SJoerg Roedel } 480883b0a91SJoerg Roedel 481883b0a91SJoerg Roedel nested_vmcb = map.hva; 482883b0a91SJoerg Roedel 483883b0a91SJoerg Roedel /* Exit Guest-Mode */ 484883b0a91SJoerg Roedel leave_guest_mode(&svm->vcpu); 485883b0a91SJoerg Roedel svm->nested.vmcb = 0; 486883b0a91SJoerg Roedel 48738c0b192SPaolo Bonzini /* in case we halted in L2 */ 48838c0b192SPaolo Bonzini svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; 48938c0b192SPaolo Bonzini 490883b0a91SJoerg Roedel /* Give the current vmcb to the guest */ 491883b0a91SJoerg Roedel disable_gif(svm); 492883b0a91SJoerg Roedel 493883b0a91SJoerg Roedel nested_vmcb->save.es = vmcb->save.es; 494883b0a91SJoerg Roedel nested_vmcb->save.cs = vmcb->save.cs; 495883b0a91SJoerg Roedel nested_vmcb->save.ss = vmcb->save.ss; 496883b0a91SJoerg Roedel nested_vmcb->save.ds = vmcb->save.ds; 497883b0a91SJoerg Roedel nested_vmcb->save.gdtr = vmcb->save.gdtr; 498883b0a91SJoerg Roedel nested_vmcb->save.idtr = vmcb->save.idtr; 499883b0a91SJoerg Roedel nested_vmcb->save.efer = svm->vcpu.arch.efer; 500883b0a91SJoerg Roedel nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); 501883b0a91SJoerg Roedel nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); 502883b0a91SJoerg Roedel nested_vmcb->save.cr2 = vmcb->save.cr2; 503883b0a91SJoerg Roedel nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; 504883b0a91SJoerg Roedel nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); 505b6162e82SVitaly Kuznetsov nested_vmcb->save.rip = kvm_rip_read(&svm->vcpu); 506b6162e82SVitaly Kuznetsov nested_vmcb->save.rsp = kvm_rsp_read(&svm->vcpu); 507b6162e82SVitaly Kuznetsov nested_vmcb->save.rax = kvm_rax_read(&svm->vcpu); 508883b0a91SJoerg Roedel nested_vmcb->save.dr7 = vmcb->save.dr7; 5095679b803SPaolo Bonzini nested_vmcb->save.dr6 = svm->vcpu.arch.dr6; 510883b0a91SJoerg Roedel nested_vmcb->save.cpl = vmcb->save.cpl; 511883b0a91SJoerg Roedel 512883b0a91SJoerg Roedel nested_vmcb->control.int_ctl = vmcb->control.int_ctl; 513883b0a91SJoerg Roedel nested_vmcb->control.int_vector = vmcb->control.int_vector; 514883b0a91SJoerg Roedel nested_vmcb->control.int_state = vmcb->control.int_state; 515883b0a91SJoerg Roedel nested_vmcb->control.exit_code = vmcb->control.exit_code; 516883b0a91SJoerg Roedel nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi; 517883b0a91SJoerg Roedel nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1; 518883b0a91SJoerg Roedel nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; 519883b0a91SJoerg Roedel nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info; 520883b0a91SJoerg Roedel nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err; 521883b0a91SJoerg Roedel 522883b0a91SJoerg Roedel if (svm->nrips_enabled) 523883b0a91SJoerg Roedel nested_vmcb->control.next_rip = vmcb->control.next_rip; 524883b0a91SJoerg Roedel 525883b0a91SJoerg Roedel /* 526883b0a91SJoerg Roedel * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have 527883b0a91SJoerg Roedel * to make sure that we do not lose injected events. So check event_inj 528883b0a91SJoerg Roedel * here and copy it to exit_int_info if it is valid. 529883b0a91SJoerg Roedel * Exit_int_info and event_inj can't be both valid because the case 530883b0a91SJoerg Roedel * below only happens on a VMRUN instruction intercept which has 531883b0a91SJoerg Roedel * no valid exit_int_info set. 532883b0a91SJoerg Roedel */ 533883b0a91SJoerg Roedel if (vmcb->control.event_inj & SVM_EVTINJ_VALID) { 534883b0a91SJoerg Roedel struct vmcb_control_area *nc = &nested_vmcb->control; 535883b0a91SJoerg Roedel 536883b0a91SJoerg Roedel nc->exit_int_info = vmcb->control.event_inj; 537883b0a91SJoerg Roedel nc->exit_int_info_err = vmcb->control.event_inj_err; 538883b0a91SJoerg Roedel } 539883b0a91SJoerg Roedel 540883b0a91SJoerg Roedel nested_vmcb->control.tlb_ctl = 0; 541883b0a91SJoerg Roedel nested_vmcb->control.event_inj = 0; 542883b0a91SJoerg Roedel nested_vmcb->control.event_inj_err = 0; 543883b0a91SJoerg Roedel 544883b0a91SJoerg Roedel nested_vmcb->control.pause_filter_count = 545883b0a91SJoerg Roedel svm->vmcb->control.pause_filter_count; 546883b0a91SJoerg Roedel nested_vmcb->control.pause_filter_thresh = 547883b0a91SJoerg Roedel svm->vmcb->control.pause_filter_thresh; 548883b0a91SJoerg Roedel 549883b0a91SJoerg Roedel /* We always set V_INTR_MASKING and remember the old value in hflags */ 550883b0a91SJoerg Roedel if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) 551883b0a91SJoerg Roedel nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 552883b0a91SJoerg Roedel 553883b0a91SJoerg Roedel /* Restore the original control entries */ 554883b0a91SJoerg Roedel copy_vmcb_control_area(vmcb, hsave); 555883b0a91SJoerg Roedel 556883b0a91SJoerg Roedel svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset; 557883b0a91SJoerg Roedel kvm_clear_exception_queue(&svm->vcpu); 558883b0a91SJoerg Roedel kvm_clear_interrupt_queue(&svm->vcpu); 559883b0a91SJoerg Roedel 560883b0a91SJoerg Roedel svm->nested.nested_cr3 = 0; 561883b0a91SJoerg Roedel 562883b0a91SJoerg Roedel /* Restore selected save entries */ 563883b0a91SJoerg Roedel svm->vmcb->save.es = hsave->save.es; 564883b0a91SJoerg Roedel svm->vmcb->save.cs = hsave->save.cs; 565883b0a91SJoerg Roedel svm->vmcb->save.ss = hsave->save.ss; 566883b0a91SJoerg Roedel svm->vmcb->save.ds = hsave->save.ds; 567883b0a91SJoerg Roedel svm->vmcb->save.gdtr = hsave->save.gdtr; 568883b0a91SJoerg Roedel svm->vmcb->save.idtr = hsave->save.idtr; 569883b0a91SJoerg Roedel kvm_set_rflags(&svm->vcpu, hsave->save.rflags); 570883b0a91SJoerg Roedel svm_set_efer(&svm->vcpu, hsave->save.efer); 571883b0a91SJoerg Roedel svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); 572883b0a91SJoerg Roedel svm_set_cr4(&svm->vcpu, hsave->save.cr4); 573883b0a91SJoerg Roedel if (npt_enabled) { 574883b0a91SJoerg Roedel svm->vmcb->save.cr3 = hsave->save.cr3; 575883b0a91SJoerg Roedel svm->vcpu.arch.cr3 = hsave->save.cr3; 576883b0a91SJoerg Roedel } else { 577883b0a91SJoerg Roedel (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); 578883b0a91SJoerg Roedel } 579883b0a91SJoerg Roedel kvm_rax_write(&svm->vcpu, hsave->save.rax); 580883b0a91SJoerg Roedel kvm_rsp_write(&svm->vcpu, hsave->save.rsp); 581883b0a91SJoerg Roedel kvm_rip_write(&svm->vcpu, hsave->save.rip); 582883b0a91SJoerg Roedel svm->vmcb->save.dr7 = 0; 583883b0a91SJoerg Roedel svm->vmcb->save.cpl = 0; 584883b0a91SJoerg Roedel svm->vmcb->control.exit_int_info = 0; 585883b0a91SJoerg Roedel 586883b0a91SJoerg Roedel mark_all_dirty(svm->vmcb); 587883b0a91SJoerg Roedel 588883b0a91SJoerg Roedel kvm_vcpu_unmap(&svm->vcpu, &map, true); 589883b0a91SJoerg Roedel 590883b0a91SJoerg Roedel nested_svm_uninit_mmu_context(&svm->vcpu); 591883b0a91SJoerg Roedel kvm_mmu_reset_context(&svm->vcpu); 592883b0a91SJoerg Roedel kvm_mmu_load(&svm->vcpu); 593883b0a91SJoerg Roedel 594883b0a91SJoerg Roedel /* 595883b0a91SJoerg Roedel * Drop what we picked up for L2 via svm_complete_interrupts() so it 596883b0a91SJoerg Roedel * doesn't end up in L1. 597883b0a91SJoerg Roedel */ 598883b0a91SJoerg Roedel svm->vcpu.arch.nmi_injected = false; 599883b0a91SJoerg Roedel kvm_clear_exception_queue(&svm->vcpu); 600883b0a91SJoerg Roedel kvm_clear_interrupt_queue(&svm->vcpu); 601883b0a91SJoerg Roedel 602883b0a91SJoerg Roedel return 0; 603883b0a91SJoerg Roedel } 604883b0a91SJoerg Roedel 605883b0a91SJoerg Roedel static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) 606883b0a91SJoerg Roedel { 607883b0a91SJoerg Roedel u32 offset, msr, value; 608883b0a91SJoerg Roedel int write, mask; 609883b0a91SJoerg Roedel 610883b0a91SJoerg Roedel if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) 611883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 612883b0a91SJoerg Roedel 613883b0a91SJoerg Roedel msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 614883b0a91SJoerg Roedel offset = svm_msrpm_offset(msr); 615883b0a91SJoerg Roedel write = svm->vmcb->control.exit_info_1 & 1; 616883b0a91SJoerg Roedel mask = 1 << ((2 * (msr & 0xf)) + write); 617883b0a91SJoerg Roedel 618883b0a91SJoerg Roedel if (offset == MSR_INVALID) 619883b0a91SJoerg Roedel return NESTED_EXIT_DONE; 620883b0a91SJoerg Roedel 621883b0a91SJoerg Roedel /* Offset is in 32 bit units but need in 8 bit units */ 622883b0a91SJoerg Roedel offset *= 4; 623883b0a91SJoerg Roedel 624883b0a91SJoerg Roedel if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4)) 625883b0a91SJoerg Roedel return NESTED_EXIT_DONE; 626883b0a91SJoerg Roedel 627883b0a91SJoerg Roedel return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 628883b0a91SJoerg Roedel } 629883b0a91SJoerg Roedel 630883b0a91SJoerg Roedel static int nested_svm_intercept_ioio(struct vcpu_svm *svm) 631883b0a91SJoerg Roedel { 632883b0a91SJoerg Roedel unsigned port, size, iopm_len; 633883b0a91SJoerg Roedel u16 val, mask; 634883b0a91SJoerg Roedel u8 start_bit; 635883b0a91SJoerg Roedel u64 gpa; 636883b0a91SJoerg Roedel 637883b0a91SJoerg Roedel if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) 638883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 639883b0a91SJoerg Roedel 640883b0a91SJoerg Roedel port = svm->vmcb->control.exit_info_1 >> 16; 641883b0a91SJoerg Roedel size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> 642883b0a91SJoerg Roedel SVM_IOIO_SIZE_SHIFT; 643883b0a91SJoerg Roedel gpa = svm->nested.vmcb_iopm + (port / 8); 644883b0a91SJoerg Roedel start_bit = port % 8; 645883b0a91SJoerg Roedel iopm_len = (start_bit + size > 8) ? 2 : 1; 646883b0a91SJoerg Roedel mask = (0xf >> (4 - size)) << start_bit; 647883b0a91SJoerg Roedel val = 0; 648883b0a91SJoerg Roedel 649883b0a91SJoerg Roedel if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) 650883b0a91SJoerg Roedel return NESTED_EXIT_DONE; 651883b0a91SJoerg Roedel 652883b0a91SJoerg Roedel return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 653883b0a91SJoerg Roedel } 654883b0a91SJoerg Roedel 655883b0a91SJoerg Roedel static int nested_svm_intercept(struct vcpu_svm *svm) 656883b0a91SJoerg Roedel { 657883b0a91SJoerg Roedel u32 exit_code = svm->vmcb->control.exit_code; 658883b0a91SJoerg Roedel int vmexit = NESTED_EXIT_HOST; 659883b0a91SJoerg Roedel 660883b0a91SJoerg Roedel switch (exit_code) { 661883b0a91SJoerg Roedel case SVM_EXIT_MSR: 662883b0a91SJoerg Roedel vmexit = nested_svm_exit_handled_msr(svm); 663883b0a91SJoerg Roedel break; 664883b0a91SJoerg Roedel case SVM_EXIT_IOIO: 665883b0a91SJoerg Roedel vmexit = nested_svm_intercept_ioio(svm); 666883b0a91SJoerg Roedel break; 667883b0a91SJoerg Roedel case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { 668883b0a91SJoerg Roedel u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0); 669883b0a91SJoerg Roedel if (svm->nested.intercept_cr & bit) 670883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 671883b0a91SJoerg Roedel break; 672883b0a91SJoerg Roedel } 673883b0a91SJoerg Roedel case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { 674883b0a91SJoerg Roedel u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0); 675883b0a91SJoerg Roedel if (svm->nested.intercept_dr & bit) 676883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 677883b0a91SJoerg Roedel break; 678883b0a91SJoerg Roedel } 679883b0a91SJoerg Roedel case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 6807c86663bSPaolo Bonzini /* 6817c86663bSPaolo Bonzini * Host-intercepted exceptions have been checked already in 6827c86663bSPaolo Bonzini * nested_svm_exit_special. There is nothing to do here, 6837c86663bSPaolo Bonzini * the vmexit is injected by svm_check_nested_events. 6847c86663bSPaolo Bonzini */ 685883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 686883b0a91SJoerg Roedel break; 687883b0a91SJoerg Roedel } 688883b0a91SJoerg Roedel case SVM_EXIT_ERR: { 689883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 690883b0a91SJoerg Roedel break; 691883b0a91SJoerg Roedel } 692883b0a91SJoerg Roedel default: { 693883b0a91SJoerg Roedel u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); 694883b0a91SJoerg Roedel if (svm->nested.intercept & exit_bits) 695883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 696883b0a91SJoerg Roedel } 697883b0a91SJoerg Roedel } 698883b0a91SJoerg Roedel 699883b0a91SJoerg Roedel return vmexit; 700883b0a91SJoerg Roedel } 701883b0a91SJoerg Roedel 702883b0a91SJoerg Roedel int nested_svm_exit_handled(struct vcpu_svm *svm) 703883b0a91SJoerg Roedel { 704883b0a91SJoerg Roedel int vmexit; 705883b0a91SJoerg Roedel 706883b0a91SJoerg Roedel vmexit = nested_svm_intercept(svm); 707883b0a91SJoerg Roedel 708883b0a91SJoerg Roedel if (vmexit == NESTED_EXIT_DONE) 709883b0a91SJoerg Roedel nested_svm_vmexit(svm); 710883b0a91SJoerg Roedel 711883b0a91SJoerg Roedel return vmexit; 712883b0a91SJoerg Roedel } 713883b0a91SJoerg Roedel 714883b0a91SJoerg Roedel int nested_svm_check_permissions(struct vcpu_svm *svm) 715883b0a91SJoerg Roedel { 716883b0a91SJoerg Roedel if (!(svm->vcpu.arch.efer & EFER_SVME) || 717883b0a91SJoerg Roedel !is_paging(&svm->vcpu)) { 718883b0a91SJoerg Roedel kvm_queue_exception(&svm->vcpu, UD_VECTOR); 719883b0a91SJoerg Roedel return 1; 720883b0a91SJoerg Roedel } 721883b0a91SJoerg Roedel 722883b0a91SJoerg Roedel if (svm->vmcb->save.cpl) { 723883b0a91SJoerg Roedel kvm_inject_gp(&svm->vcpu, 0); 724883b0a91SJoerg Roedel return 1; 725883b0a91SJoerg Roedel } 726883b0a91SJoerg Roedel 727883b0a91SJoerg Roedel return 0; 728883b0a91SJoerg Roedel } 729883b0a91SJoerg Roedel 7307c86663bSPaolo Bonzini static bool nested_exit_on_exception(struct vcpu_svm *svm) 731883b0a91SJoerg Roedel { 7327c86663bSPaolo Bonzini unsigned int nr = svm->vcpu.arch.exception.nr; 733883b0a91SJoerg Roedel 7347c86663bSPaolo Bonzini return (svm->nested.intercept_exceptions & (1 << nr)); 7357c86663bSPaolo Bonzini } 736883b0a91SJoerg Roedel 7377c86663bSPaolo Bonzini static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) 7387c86663bSPaolo Bonzini { 7397c86663bSPaolo Bonzini unsigned int nr = svm->vcpu.arch.exception.nr; 740883b0a91SJoerg Roedel 741883b0a91SJoerg Roedel svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; 742883b0a91SJoerg Roedel svm->vmcb->control.exit_code_hi = 0; 7437c86663bSPaolo Bonzini 7447c86663bSPaolo Bonzini if (svm->vcpu.arch.exception.has_error_code) 7457c86663bSPaolo Bonzini svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; 746883b0a91SJoerg Roedel 747883b0a91SJoerg Roedel /* 748883b0a91SJoerg Roedel * EXITINFO2 is undefined for all exception intercepts other 749883b0a91SJoerg Roedel * than #PF. 750883b0a91SJoerg Roedel */ 7517c86663bSPaolo Bonzini if (nr == PF_VECTOR) { 752883b0a91SJoerg Roedel if (svm->vcpu.arch.exception.nested_apf) 753883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; 754883b0a91SJoerg Roedel else if (svm->vcpu.arch.exception.has_payload) 755883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; 756883b0a91SJoerg Roedel else 757883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; 7587c86663bSPaolo Bonzini } else if (nr == DB_VECTOR) { 7597c86663bSPaolo Bonzini /* See inject_pending_event. */ 7607c86663bSPaolo Bonzini kvm_deliver_exception_payload(&svm->vcpu); 7617c86663bSPaolo Bonzini if (svm->vcpu.arch.dr7 & DR7_GD) { 7627c86663bSPaolo Bonzini svm->vcpu.arch.dr7 &= ~DR7_GD; 7637c86663bSPaolo Bonzini kvm_update_dr7(&svm->vcpu); 7647c86663bSPaolo Bonzini } 7657c86663bSPaolo Bonzini } else 7667c86663bSPaolo Bonzini WARN_ON(svm->vcpu.arch.exception.has_payload); 767883b0a91SJoerg Roedel 7687c86663bSPaolo Bonzini nested_svm_vmexit(svm); 769883b0a91SJoerg Roedel } 770883b0a91SJoerg Roedel 77155714cddSPaolo Bonzini static void nested_svm_smi(struct vcpu_svm *svm) 77255714cddSPaolo Bonzini { 77355714cddSPaolo Bonzini svm->vmcb->control.exit_code = SVM_EXIT_SMI; 77455714cddSPaolo Bonzini svm->vmcb->control.exit_info_1 = 0; 77555714cddSPaolo Bonzini svm->vmcb->control.exit_info_2 = 0; 77655714cddSPaolo Bonzini 77755714cddSPaolo Bonzini nested_svm_vmexit(svm); 77855714cddSPaolo Bonzini } 77955714cddSPaolo Bonzini 7809c3d370aSCathy Avery static void nested_svm_nmi(struct vcpu_svm *svm) 7819c3d370aSCathy Avery { 7829c3d370aSCathy Avery svm->vmcb->control.exit_code = SVM_EXIT_NMI; 7839c3d370aSCathy Avery svm->vmcb->control.exit_info_1 = 0; 7849c3d370aSCathy Avery svm->vmcb->control.exit_info_2 = 0; 7859c3d370aSCathy Avery 7869c3d370aSCathy Avery nested_svm_vmexit(svm); 7879c3d370aSCathy Avery } 7889c3d370aSCathy Avery 789883b0a91SJoerg Roedel static void nested_svm_intr(struct vcpu_svm *svm) 790883b0a91SJoerg Roedel { 7916e085cbfSPaolo Bonzini trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); 7926e085cbfSPaolo Bonzini 793883b0a91SJoerg Roedel svm->vmcb->control.exit_code = SVM_EXIT_INTR; 794883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 = 0; 795883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = 0; 796883b0a91SJoerg Roedel 7976e085cbfSPaolo Bonzini nested_svm_vmexit(svm); 798883b0a91SJoerg Roedel } 799883b0a91SJoerg Roedel 8005b672408SPaolo Bonzini static inline bool nested_exit_on_init(struct vcpu_svm *svm) 8015b672408SPaolo Bonzini { 8025b672408SPaolo Bonzini return (svm->nested.intercept & (1ULL << INTERCEPT_INIT)); 8035b672408SPaolo Bonzini } 8045b672408SPaolo Bonzini 8055b672408SPaolo Bonzini static void nested_svm_init(struct vcpu_svm *svm) 8065b672408SPaolo Bonzini { 8075b672408SPaolo Bonzini svm->vmcb->control.exit_code = SVM_EXIT_INIT; 8085b672408SPaolo Bonzini svm->vmcb->control.exit_info_1 = 0; 8095b672408SPaolo Bonzini svm->vmcb->control.exit_info_2 = 0; 8105b672408SPaolo Bonzini 8115b672408SPaolo Bonzini nested_svm_vmexit(svm); 8125b672408SPaolo Bonzini } 8135b672408SPaolo Bonzini 8145b672408SPaolo Bonzini 81533b22172SPaolo Bonzini static int svm_check_nested_events(struct kvm_vcpu *vcpu) 816883b0a91SJoerg Roedel { 817883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 818883b0a91SJoerg Roedel bool block_nested_events = 819bd279629SPaolo Bonzini kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; 8205b672408SPaolo Bonzini struct kvm_lapic *apic = vcpu->arch.apic; 8215b672408SPaolo Bonzini 8225b672408SPaolo Bonzini if (lapic_in_kernel(vcpu) && 8235b672408SPaolo Bonzini test_bit(KVM_APIC_INIT, &apic->pending_events)) { 8245b672408SPaolo Bonzini if (block_nested_events) 8255b672408SPaolo Bonzini return -EBUSY; 8265b672408SPaolo Bonzini if (!nested_exit_on_init(svm)) 8275b672408SPaolo Bonzini return 0; 8285b672408SPaolo Bonzini nested_svm_init(svm); 8295b672408SPaolo Bonzini return 0; 8305b672408SPaolo Bonzini } 831883b0a91SJoerg Roedel 8327c86663bSPaolo Bonzini if (vcpu->arch.exception.pending) { 8337c86663bSPaolo Bonzini if (block_nested_events) 8347c86663bSPaolo Bonzini return -EBUSY; 8357c86663bSPaolo Bonzini if (!nested_exit_on_exception(svm)) 8367c86663bSPaolo Bonzini return 0; 8377c86663bSPaolo Bonzini nested_svm_inject_exception_vmexit(svm); 8387c86663bSPaolo Bonzini return 0; 8397c86663bSPaolo Bonzini } 8407c86663bSPaolo Bonzini 841221e7610SPaolo Bonzini if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { 84255714cddSPaolo Bonzini if (block_nested_events) 84355714cddSPaolo Bonzini return -EBUSY; 844221e7610SPaolo Bonzini if (!nested_exit_on_smi(svm)) 845221e7610SPaolo Bonzini return 0; 84655714cddSPaolo Bonzini nested_svm_smi(svm); 84755714cddSPaolo Bonzini return 0; 84855714cddSPaolo Bonzini } 84955714cddSPaolo Bonzini 850221e7610SPaolo Bonzini if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { 8519c3d370aSCathy Avery if (block_nested_events) 8529c3d370aSCathy Avery return -EBUSY; 853221e7610SPaolo Bonzini if (!nested_exit_on_nmi(svm)) 854221e7610SPaolo Bonzini return 0; 8559c3d370aSCathy Avery nested_svm_nmi(svm); 8569c3d370aSCathy Avery return 0; 8579c3d370aSCathy Avery } 8589c3d370aSCathy Avery 859221e7610SPaolo Bonzini if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) { 860883b0a91SJoerg Roedel if (block_nested_events) 861883b0a91SJoerg Roedel return -EBUSY; 862221e7610SPaolo Bonzini if (!nested_exit_on_intr(svm)) 863221e7610SPaolo Bonzini return 0; 864883b0a91SJoerg Roedel nested_svm_intr(svm); 865883b0a91SJoerg Roedel return 0; 866883b0a91SJoerg Roedel } 867883b0a91SJoerg Roedel 868883b0a91SJoerg Roedel return 0; 869883b0a91SJoerg Roedel } 870883b0a91SJoerg Roedel 871883b0a91SJoerg Roedel int nested_svm_exit_special(struct vcpu_svm *svm) 872883b0a91SJoerg Roedel { 873883b0a91SJoerg Roedel u32 exit_code = svm->vmcb->control.exit_code; 874883b0a91SJoerg Roedel 875883b0a91SJoerg Roedel switch (exit_code) { 876883b0a91SJoerg Roedel case SVM_EXIT_INTR: 877883b0a91SJoerg Roedel case SVM_EXIT_NMI: 878883b0a91SJoerg Roedel case SVM_EXIT_NPF: 879883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 8807c86663bSPaolo Bonzini case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 8817c86663bSPaolo Bonzini u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); 8827c86663bSPaolo Bonzini 8837c86663bSPaolo Bonzini if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits) 8847c86663bSPaolo Bonzini return NESTED_EXIT_HOST; 8857c86663bSPaolo Bonzini else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && 8867c86663bSPaolo Bonzini svm->vcpu.arch.apf.host_apf_reason) 887a3535be7SPaolo Bonzini /* Trap async PF even if not shadowing */ 888883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 889883b0a91SJoerg Roedel break; 8907c86663bSPaolo Bonzini } 891883b0a91SJoerg Roedel default: 892883b0a91SJoerg Roedel break; 893883b0a91SJoerg Roedel } 894883b0a91SJoerg Roedel 895883b0a91SJoerg Roedel return NESTED_EXIT_CONTINUE; 896883b0a91SJoerg Roedel } 89733b22172SPaolo Bonzini 89833b22172SPaolo Bonzini struct kvm_x86_nested_ops svm_nested_ops = { 89933b22172SPaolo Bonzini .check_events = svm_check_nested_events, 90033b22172SPaolo Bonzini }; 901