1883b0a91SJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only 2883b0a91SJoerg Roedel /* 3883b0a91SJoerg Roedel * Kernel-based Virtual Machine driver for Linux 4883b0a91SJoerg Roedel * 5883b0a91SJoerg Roedel * AMD SVM support 6883b0a91SJoerg Roedel * 7883b0a91SJoerg Roedel * Copyright (C) 2006 Qumranet, Inc. 8883b0a91SJoerg Roedel * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9883b0a91SJoerg Roedel * 10883b0a91SJoerg Roedel * Authors: 11883b0a91SJoerg Roedel * Yaniv Kamay <yaniv@qumranet.com> 12883b0a91SJoerg Roedel * Avi Kivity <avi@qumranet.com> 13883b0a91SJoerg Roedel */ 14883b0a91SJoerg Roedel 15883b0a91SJoerg Roedel #define pr_fmt(fmt) "SVM: " fmt 16883b0a91SJoerg Roedel 17883b0a91SJoerg Roedel #include <linux/kvm_types.h> 18883b0a91SJoerg Roedel #include <linux/kvm_host.h> 19883b0a91SJoerg Roedel #include <linux/kernel.h> 20883b0a91SJoerg Roedel 21883b0a91SJoerg Roedel #include <asm/msr-index.h> 225679b803SPaolo Bonzini #include <asm/debugreg.h> 23883b0a91SJoerg Roedel 24883b0a91SJoerg Roedel #include "kvm_emulate.h" 25883b0a91SJoerg Roedel #include "trace.h" 26883b0a91SJoerg Roedel #include "mmu.h" 27883b0a91SJoerg Roedel #include "x86.h" 285b672408SPaolo Bonzini #include "lapic.h" 29883b0a91SJoerg Roedel #include "svm.h" 30883b0a91SJoerg Roedel 31883b0a91SJoerg Roedel static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, 32883b0a91SJoerg Roedel struct x86_exception *fault) 33883b0a91SJoerg Roedel { 34883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 35883b0a91SJoerg Roedel 36883b0a91SJoerg Roedel if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { 37883b0a91SJoerg Roedel /* 38883b0a91SJoerg Roedel * TODO: track the cause of the nested page fault, and 39883b0a91SJoerg Roedel * correctly fill in the high bits of exit_info_1. 40883b0a91SJoerg Roedel */ 41883b0a91SJoerg Roedel svm->vmcb->control.exit_code = SVM_EXIT_NPF; 42883b0a91SJoerg Roedel svm->vmcb->control.exit_code_hi = 0; 43883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 = (1ULL << 32); 44883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = fault->address; 45883b0a91SJoerg Roedel } 46883b0a91SJoerg Roedel 47883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; 48883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 |= fault->error_code; 49883b0a91SJoerg Roedel 50883b0a91SJoerg Roedel /* 51883b0a91SJoerg Roedel * The present bit is always zero for page structure faults on real 52883b0a91SJoerg Roedel * hardware. 53883b0a91SJoerg Roedel */ 54883b0a91SJoerg Roedel if (svm->vmcb->control.exit_info_1 & (2ULL << 32)) 55883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 &= ~1; 56883b0a91SJoerg Roedel 57883b0a91SJoerg Roedel nested_svm_vmexit(svm); 58883b0a91SJoerg Roedel } 59883b0a91SJoerg Roedel 60883b0a91SJoerg Roedel static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) 61883b0a91SJoerg Roedel { 62883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 63*e670bf68SPaolo Bonzini u64 cr3 = svm->nested.ctl.nested_cr3; 64883b0a91SJoerg Roedel u64 pdpte; 65883b0a91SJoerg Roedel int ret; 66883b0a91SJoerg Roedel 67883b0a91SJoerg Roedel ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte, 68883b0a91SJoerg Roedel offset_in_page(cr3) + index * 8, 8); 69883b0a91SJoerg Roedel if (ret) 70883b0a91SJoerg Roedel return 0; 71883b0a91SJoerg Roedel return pdpte; 72883b0a91SJoerg Roedel } 73883b0a91SJoerg Roedel 74883b0a91SJoerg Roedel static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) 75883b0a91SJoerg Roedel { 76883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 77883b0a91SJoerg Roedel 78*e670bf68SPaolo Bonzini return svm->nested.ctl.nested_cr3; 79883b0a91SJoerg Roedel } 80883b0a91SJoerg Roedel 81883b0a91SJoerg Roedel static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) 82883b0a91SJoerg Roedel { 83883b0a91SJoerg Roedel WARN_ON(mmu_is_nested(vcpu)); 84883b0a91SJoerg Roedel 85883b0a91SJoerg Roedel vcpu->arch.mmu = &vcpu->arch.guest_mmu; 86883b0a91SJoerg Roedel kvm_init_shadow_mmu(vcpu); 87883b0a91SJoerg Roedel vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; 88883b0a91SJoerg Roedel vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; 89883b0a91SJoerg Roedel vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; 90e93fd3b3SSean Christopherson vcpu->arch.mmu->shadow_root_level = vcpu->arch.tdp_level; 91883b0a91SJoerg Roedel reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu); 92883b0a91SJoerg Roedel vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 93883b0a91SJoerg Roedel } 94883b0a91SJoerg Roedel 95883b0a91SJoerg Roedel static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) 96883b0a91SJoerg Roedel { 97883b0a91SJoerg Roedel vcpu->arch.mmu = &vcpu->arch.root_mmu; 98883b0a91SJoerg Roedel vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 99883b0a91SJoerg Roedel } 100883b0a91SJoerg Roedel 101883b0a91SJoerg Roedel void recalc_intercepts(struct vcpu_svm *svm) 102883b0a91SJoerg Roedel { 103*e670bf68SPaolo Bonzini struct vmcb_control_area *c, *h, *g; 104883b0a91SJoerg Roedel 105883b0a91SJoerg Roedel mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 106883b0a91SJoerg Roedel 107883b0a91SJoerg Roedel if (!is_guest_mode(&svm->vcpu)) 108883b0a91SJoerg Roedel return; 109883b0a91SJoerg Roedel 110883b0a91SJoerg Roedel c = &svm->vmcb->control; 111883b0a91SJoerg Roedel h = &svm->nested.hsave->control; 112*e670bf68SPaolo Bonzini g = &svm->nested.ctl; 113883b0a91SJoerg Roedel 1147c86663bSPaolo Bonzini svm->nested.host_intercept_exceptions = h->intercept_exceptions; 1157c86663bSPaolo Bonzini 116883b0a91SJoerg Roedel c->intercept_cr = h->intercept_cr; 117883b0a91SJoerg Roedel c->intercept_dr = h->intercept_dr; 118883b0a91SJoerg Roedel c->intercept_exceptions = h->intercept_exceptions; 119883b0a91SJoerg Roedel c->intercept = h->intercept; 120883b0a91SJoerg Roedel 121883b0a91SJoerg Roedel if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { 122883b0a91SJoerg Roedel /* We only want the cr8 intercept bits of L1 */ 123883b0a91SJoerg Roedel c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); 124883b0a91SJoerg Roedel c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); 125883b0a91SJoerg Roedel 126883b0a91SJoerg Roedel /* 127883b0a91SJoerg Roedel * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not 128883b0a91SJoerg Roedel * affect any interrupt we may want to inject; therefore, 129883b0a91SJoerg Roedel * interrupt window vmexits are irrelevant to L0. 130883b0a91SJoerg Roedel */ 131883b0a91SJoerg Roedel c->intercept &= ~(1ULL << INTERCEPT_VINTR); 132883b0a91SJoerg Roedel } 133883b0a91SJoerg Roedel 134883b0a91SJoerg Roedel /* We don't want to see VMMCALLs from a nested guest */ 135883b0a91SJoerg Roedel c->intercept &= ~(1ULL << INTERCEPT_VMMCALL); 136883b0a91SJoerg Roedel 137883b0a91SJoerg Roedel c->intercept_cr |= g->intercept_cr; 138883b0a91SJoerg Roedel c->intercept_dr |= g->intercept_dr; 139883b0a91SJoerg Roedel c->intercept_exceptions |= g->intercept_exceptions; 140883b0a91SJoerg Roedel c->intercept |= g->intercept; 141883b0a91SJoerg Roedel } 142883b0a91SJoerg Roedel 1432f675917SPaolo Bonzini static void copy_vmcb_control_area(struct vmcb_control_area *dst, 1442f675917SPaolo Bonzini struct vmcb_control_area *from) 145883b0a91SJoerg Roedel { 146883b0a91SJoerg Roedel dst->intercept_cr = from->intercept_cr; 147883b0a91SJoerg Roedel dst->intercept_dr = from->intercept_dr; 148883b0a91SJoerg Roedel dst->intercept_exceptions = from->intercept_exceptions; 149883b0a91SJoerg Roedel dst->intercept = from->intercept; 150883b0a91SJoerg Roedel dst->iopm_base_pa = from->iopm_base_pa; 151883b0a91SJoerg Roedel dst->msrpm_base_pa = from->msrpm_base_pa; 152883b0a91SJoerg Roedel dst->tsc_offset = from->tsc_offset; 1536c0238c4SPaolo Bonzini /* asid not copied, it is handled manually for svm->vmcb. */ 154883b0a91SJoerg Roedel dst->tlb_ctl = from->tlb_ctl; 155883b0a91SJoerg Roedel dst->int_ctl = from->int_ctl; 156883b0a91SJoerg Roedel dst->int_vector = from->int_vector; 157883b0a91SJoerg Roedel dst->int_state = from->int_state; 158883b0a91SJoerg Roedel dst->exit_code = from->exit_code; 159883b0a91SJoerg Roedel dst->exit_code_hi = from->exit_code_hi; 160883b0a91SJoerg Roedel dst->exit_info_1 = from->exit_info_1; 161883b0a91SJoerg Roedel dst->exit_info_2 = from->exit_info_2; 162883b0a91SJoerg Roedel dst->exit_int_info = from->exit_int_info; 163883b0a91SJoerg Roedel dst->exit_int_info_err = from->exit_int_info_err; 164883b0a91SJoerg Roedel dst->nested_ctl = from->nested_ctl; 165883b0a91SJoerg Roedel dst->event_inj = from->event_inj; 166883b0a91SJoerg Roedel dst->event_inj_err = from->event_inj_err; 167883b0a91SJoerg Roedel dst->nested_cr3 = from->nested_cr3; 168883b0a91SJoerg Roedel dst->virt_ext = from->virt_ext; 169883b0a91SJoerg Roedel dst->pause_filter_count = from->pause_filter_count; 170883b0a91SJoerg Roedel dst->pause_filter_thresh = from->pause_filter_thresh; 171883b0a91SJoerg Roedel } 172883b0a91SJoerg Roedel 173883b0a91SJoerg Roedel static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) 174883b0a91SJoerg Roedel { 175883b0a91SJoerg Roedel /* 176883b0a91SJoerg Roedel * This function merges the msr permission bitmaps of kvm and the 177883b0a91SJoerg Roedel * nested vmcb. It is optimized in that it only merges the parts where 178883b0a91SJoerg Roedel * the kvm msr permission bitmap may contain zero bits 179883b0a91SJoerg Roedel */ 180883b0a91SJoerg Roedel int i; 181883b0a91SJoerg Roedel 182*e670bf68SPaolo Bonzini if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT))) 183883b0a91SJoerg Roedel return true; 184883b0a91SJoerg Roedel 185883b0a91SJoerg Roedel for (i = 0; i < MSRPM_OFFSETS; i++) { 186883b0a91SJoerg Roedel u32 value, p; 187883b0a91SJoerg Roedel u64 offset; 188883b0a91SJoerg Roedel 189883b0a91SJoerg Roedel if (msrpm_offsets[i] == 0xffffffff) 190883b0a91SJoerg Roedel break; 191883b0a91SJoerg Roedel 192883b0a91SJoerg Roedel p = msrpm_offsets[i]; 193*e670bf68SPaolo Bonzini offset = svm->nested.ctl.msrpm_base_pa + (p * 4); 194883b0a91SJoerg Roedel 195883b0a91SJoerg Roedel if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) 196883b0a91SJoerg Roedel return false; 197883b0a91SJoerg Roedel 198883b0a91SJoerg Roedel svm->nested.msrpm[p] = svm->msrpm[p] | value; 199883b0a91SJoerg Roedel } 200883b0a91SJoerg Roedel 201883b0a91SJoerg Roedel svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); 202883b0a91SJoerg Roedel 203883b0a91SJoerg Roedel return true; 204883b0a91SJoerg Roedel } 205883b0a91SJoerg Roedel 206883b0a91SJoerg Roedel static bool nested_vmcb_checks(struct vmcb *vmcb) 207883b0a91SJoerg Roedel { 208883b0a91SJoerg Roedel if ((vmcb->save.efer & EFER_SVME) == 0) 209883b0a91SJoerg Roedel return false; 210883b0a91SJoerg Roedel 2114f233371SKrish Sadhukhan if (((vmcb->save.cr0 & X86_CR0_CD) == 0) && 2124f233371SKrish Sadhukhan (vmcb->save.cr0 & X86_CR0_NW)) 2134f233371SKrish Sadhukhan return false; 2144f233371SKrish Sadhukhan 215883b0a91SJoerg Roedel if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0) 216883b0a91SJoerg Roedel return false; 217883b0a91SJoerg Roedel 218883b0a91SJoerg Roedel if (vmcb->control.asid == 0) 219883b0a91SJoerg Roedel return false; 220883b0a91SJoerg Roedel 221883b0a91SJoerg Roedel if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && 222883b0a91SJoerg Roedel !npt_enabled) 223883b0a91SJoerg Roedel return false; 224883b0a91SJoerg Roedel 225883b0a91SJoerg Roedel return true; 226883b0a91SJoerg Roedel } 227883b0a91SJoerg Roedel 2283e06f016SPaolo Bonzini static void load_nested_vmcb_control(struct vcpu_svm *svm, 2293e06f016SPaolo Bonzini struct vmcb_control_area *control) 2303e06f016SPaolo Bonzini { 231*e670bf68SPaolo Bonzini copy_vmcb_control_area(&svm->nested.ctl, control); 2323e06f016SPaolo Bonzini 233*e670bf68SPaolo Bonzini svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL; 234*e670bf68SPaolo Bonzini svm->nested.ctl.iopm_base_pa &= ~0x0fffULL; 2353e06f016SPaolo Bonzini } 2363e06f016SPaolo Bonzini 237f241d711SPaolo Bonzini static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb) 238883b0a91SJoerg Roedel { 239883b0a91SJoerg Roedel /* Load the nested guest state */ 240883b0a91SJoerg Roedel svm->vmcb->save.es = nested_vmcb->save.es; 241883b0a91SJoerg Roedel svm->vmcb->save.cs = nested_vmcb->save.cs; 242883b0a91SJoerg Roedel svm->vmcb->save.ss = nested_vmcb->save.ss; 243883b0a91SJoerg Roedel svm->vmcb->save.ds = nested_vmcb->save.ds; 244883b0a91SJoerg Roedel svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; 245883b0a91SJoerg Roedel svm->vmcb->save.idtr = nested_vmcb->save.idtr; 246883b0a91SJoerg Roedel kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); 247883b0a91SJoerg Roedel svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); 248883b0a91SJoerg Roedel svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); 249883b0a91SJoerg Roedel svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); 250883b0a91SJoerg Roedel (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); 251883b0a91SJoerg Roedel 252883b0a91SJoerg Roedel svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; 253883b0a91SJoerg Roedel kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax); 254883b0a91SJoerg Roedel kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp); 255883b0a91SJoerg Roedel kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip); 256883b0a91SJoerg Roedel 257883b0a91SJoerg Roedel /* In case we don't even reach vcpu_run, the fields are not updated */ 258883b0a91SJoerg Roedel svm->vmcb->save.rax = nested_vmcb->save.rax; 259883b0a91SJoerg Roedel svm->vmcb->save.rsp = nested_vmcb->save.rsp; 260883b0a91SJoerg Roedel svm->vmcb->save.rip = nested_vmcb->save.rip; 261883b0a91SJoerg Roedel svm->vmcb->save.dr7 = nested_vmcb->save.dr7; 2625679b803SPaolo Bonzini svm->vcpu.arch.dr6 = nested_vmcb->save.dr6; 263883b0a91SJoerg Roedel svm->vmcb->save.cpl = nested_vmcb->save.cpl; 264f241d711SPaolo Bonzini } 265883b0a91SJoerg Roedel 266*e670bf68SPaolo Bonzini static void nested_prepare_vmcb_control(struct vcpu_svm *svm) 267f241d711SPaolo Bonzini { 268*e670bf68SPaolo Bonzini if (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) 26969cb8774SPaolo Bonzini nested_svm_init_mmu_context(&svm->vcpu); 27069cb8774SPaolo Bonzini 27169cb8774SPaolo Bonzini /* Guest paging mode is active - reset mmu */ 27269cb8774SPaolo Bonzini kvm_mmu_reset_context(&svm->vcpu); 27369cb8774SPaolo Bonzini 274f55ac304SSean Christopherson svm_flush_tlb(&svm->vcpu); 275*e670bf68SPaolo Bonzini if (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) 276883b0a91SJoerg Roedel svm->vcpu.arch.hflags |= HF_VINTR_MASK; 277883b0a91SJoerg Roedel else 278883b0a91SJoerg Roedel svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; 279883b0a91SJoerg Roedel 28018fc6c55SPaolo Bonzini svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = 281*e670bf68SPaolo Bonzini svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; 282883b0a91SJoerg Roedel 283*e670bf68SPaolo Bonzini svm->vmcb->control.int_ctl = svm->nested.ctl.int_ctl | V_INTR_MASKING_MASK; 284*e670bf68SPaolo Bonzini svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; 285*e670bf68SPaolo Bonzini svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; 286*e670bf68SPaolo Bonzini svm->vmcb->control.int_state = svm->nested.ctl.int_state; 287*e670bf68SPaolo Bonzini svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; 288*e670bf68SPaolo Bonzini svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; 289883b0a91SJoerg Roedel 290*e670bf68SPaolo Bonzini svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count; 291*e670bf68SPaolo Bonzini svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh; 292883b0a91SJoerg Roedel 293883b0a91SJoerg Roedel /* Enter Guest-Mode */ 294883b0a91SJoerg Roedel enter_guest_mode(&svm->vcpu); 295883b0a91SJoerg Roedel 296883b0a91SJoerg Roedel /* 297883b0a91SJoerg Roedel * Merge guest and host intercepts - must be called with vcpu in 298883b0a91SJoerg Roedel * guest-mode to take affect here 299883b0a91SJoerg Roedel */ 300883b0a91SJoerg Roedel recalc_intercepts(svm); 301883b0a91SJoerg Roedel 302f241d711SPaolo Bonzini mark_all_dirty(svm->vmcb); 303f241d711SPaolo Bonzini } 304f241d711SPaolo Bonzini 305f241d711SPaolo Bonzini void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, 306f241d711SPaolo Bonzini struct vmcb *nested_vmcb) 307f241d711SPaolo Bonzini { 308f241d711SPaolo Bonzini bool evaluate_pending_interrupts = 309f241d711SPaolo Bonzini is_intercept(svm, INTERCEPT_VINTR) || 310f241d711SPaolo Bonzini is_intercept(svm, INTERCEPT_IRET); 311f241d711SPaolo Bonzini 312f241d711SPaolo Bonzini svm->nested.vmcb = vmcb_gpa; 313f241d711SPaolo Bonzini if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) 314f241d711SPaolo Bonzini svm->vcpu.arch.hflags |= HF_HIF_MASK; 315f241d711SPaolo Bonzini else 316f241d711SPaolo Bonzini svm->vcpu.arch.hflags &= ~HF_HIF_MASK; 317f241d711SPaolo Bonzini 318f241d711SPaolo Bonzini load_nested_vmcb_control(svm, &nested_vmcb->control); 319f241d711SPaolo Bonzini nested_prepare_vmcb_save(svm, nested_vmcb); 320*e670bf68SPaolo Bonzini nested_prepare_vmcb_control(svm); 321f241d711SPaolo Bonzini 322883b0a91SJoerg Roedel /* 323883b0a91SJoerg Roedel * If L1 had a pending IRQ/NMI before executing VMRUN, 324883b0a91SJoerg Roedel * which wasn't delivered because it was disallowed (e.g. 325883b0a91SJoerg Roedel * interrupts disabled), L0 needs to evaluate if this pending 326883b0a91SJoerg Roedel * event should cause an exit from L2 to L1 or be delivered 327883b0a91SJoerg Roedel * directly to L2. 328883b0a91SJoerg Roedel * 329883b0a91SJoerg Roedel * Usually this would be handled by the processor noticing an 330883b0a91SJoerg Roedel * IRQ/NMI window request. However, VMRUN can unblock interrupts 331883b0a91SJoerg Roedel * by implicitly setting GIF, so force L0 to perform pending event 332883b0a91SJoerg Roedel * evaluation by requesting a KVM_REQ_EVENT. 333883b0a91SJoerg Roedel */ 334883b0a91SJoerg Roedel enable_gif(svm); 335883b0a91SJoerg Roedel if (unlikely(evaluate_pending_interrupts)) 336883b0a91SJoerg Roedel kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); 337883b0a91SJoerg Roedel } 338883b0a91SJoerg Roedel 339883b0a91SJoerg Roedel int nested_svm_vmrun(struct vcpu_svm *svm) 340883b0a91SJoerg Roedel { 341883b0a91SJoerg Roedel int ret; 342883b0a91SJoerg Roedel struct vmcb *nested_vmcb; 343883b0a91SJoerg Roedel struct vmcb *hsave = svm->nested.hsave; 344883b0a91SJoerg Roedel struct vmcb *vmcb = svm->vmcb; 345883b0a91SJoerg Roedel struct kvm_host_map map; 346883b0a91SJoerg Roedel u64 vmcb_gpa; 347883b0a91SJoerg Roedel 3487c67f546SPaolo Bonzini if (is_smm(&svm->vcpu)) { 3497c67f546SPaolo Bonzini kvm_queue_exception(&svm->vcpu, UD_VECTOR); 3507c67f546SPaolo Bonzini return 1; 3517c67f546SPaolo Bonzini } 352883b0a91SJoerg Roedel 3537c67f546SPaolo Bonzini vmcb_gpa = svm->vmcb->save.rax; 354883b0a91SJoerg Roedel ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map); 355883b0a91SJoerg Roedel if (ret == -EINVAL) { 356883b0a91SJoerg Roedel kvm_inject_gp(&svm->vcpu, 0); 357883b0a91SJoerg Roedel return 1; 358883b0a91SJoerg Roedel } else if (ret) { 359883b0a91SJoerg Roedel return kvm_skip_emulated_instruction(&svm->vcpu); 360883b0a91SJoerg Roedel } 361883b0a91SJoerg Roedel 362883b0a91SJoerg Roedel ret = kvm_skip_emulated_instruction(&svm->vcpu); 363883b0a91SJoerg Roedel 364883b0a91SJoerg Roedel nested_vmcb = map.hva; 365883b0a91SJoerg Roedel 366883b0a91SJoerg Roedel if (!nested_vmcb_checks(nested_vmcb)) { 367883b0a91SJoerg Roedel nested_vmcb->control.exit_code = SVM_EXIT_ERR; 368883b0a91SJoerg Roedel nested_vmcb->control.exit_code_hi = 0; 369883b0a91SJoerg Roedel nested_vmcb->control.exit_info_1 = 0; 370883b0a91SJoerg Roedel nested_vmcb->control.exit_info_2 = 0; 37169c9dfa2SPaolo Bonzini goto out; 372883b0a91SJoerg Roedel } 373883b0a91SJoerg Roedel 374883b0a91SJoerg Roedel trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, 375883b0a91SJoerg Roedel nested_vmcb->save.rip, 376883b0a91SJoerg Roedel nested_vmcb->control.int_ctl, 377883b0a91SJoerg Roedel nested_vmcb->control.event_inj, 378883b0a91SJoerg Roedel nested_vmcb->control.nested_ctl); 379883b0a91SJoerg Roedel 380883b0a91SJoerg Roedel trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, 381883b0a91SJoerg Roedel nested_vmcb->control.intercept_cr >> 16, 382883b0a91SJoerg Roedel nested_vmcb->control.intercept_exceptions, 383883b0a91SJoerg Roedel nested_vmcb->control.intercept); 384883b0a91SJoerg Roedel 385883b0a91SJoerg Roedel /* Clear internal status */ 386883b0a91SJoerg Roedel kvm_clear_exception_queue(&svm->vcpu); 387883b0a91SJoerg Roedel kvm_clear_interrupt_queue(&svm->vcpu); 388883b0a91SJoerg Roedel 389883b0a91SJoerg Roedel /* 390883b0a91SJoerg Roedel * Save the old vmcb, so we don't need to pick what we save, but can 391883b0a91SJoerg Roedel * restore everything when a VMEXIT occurs 392883b0a91SJoerg Roedel */ 393883b0a91SJoerg Roedel hsave->save.es = vmcb->save.es; 394883b0a91SJoerg Roedel hsave->save.cs = vmcb->save.cs; 395883b0a91SJoerg Roedel hsave->save.ss = vmcb->save.ss; 396883b0a91SJoerg Roedel hsave->save.ds = vmcb->save.ds; 397883b0a91SJoerg Roedel hsave->save.gdtr = vmcb->save.gdtr; 398883b0a91SJoerg Roedel hsave->save.idtr = vmcb->save.idtr; 399883b0a91SJoerg Roedel hsave->save.efer = svm->vcpu.arch.efer; 400883b0a91SJoerg Roedel hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); 401883b0a91SJoerg Roedel hsave->save.cr4 = svm->vcpu.arch.cr4; 402883b0a91SJoerg Roedel hsave->save.rflags = kvm_get_rflags(&svm->vcpu); 403883b0a91SJoerg Roedel hsave->save.rip = kvm_rip_read(&svm->vcpu); 404883b0a91SJoerg Roedel hsave->save.rsp = vmcb->save.rsp; 405883b0a91SJoerg Roedel hsave->save.rax = vmcb->save.rax; 406883b0a91SJoerg Roedel if (npt_enabled) 407883b0a91SJoerg Roedel hsave->save.cr3 = vmcb->save.cr3; 408883b0a91SJoerg Roedel else 409883b0a91SJoerg Roedel hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); 410883b0a91SJoerg Roedel 4112f675917SPaolo Bonzini copy_vmcb_control_area(&hsave->control, &vmcb->control); 412883b0a91SJoerg Roedel 413f74f9414SPaolo Bonzini svm->nested.nested_run_pending = 1; 41469c9dfa2SPaolo Bonzini enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb); 415883b0a91SJoerg Roedel 416883b0a91SJoerg Roedel if (!nested_svm_vmrun_msrpm(svm)) { 417883b0a91SJoerg Roedel svm->vmcb->control.exit_code = SVM_EXIT_ERR; 418883b0a91SJoerg Roedel svm->vmcb->control.exit_code_hi = 0; 419883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 = 0; 420883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = 0; 421883b0a91SJoerg Roedel 422883b0a91SJoerg Roedel nested_svm_vmexit(svm); 423883b0a91SJoerg Roedel } 424883b0a91SJoerg Roedel 42569c9dfa2SPaolo Bonzini out: 42669c9dfa2SPaolo Bonzini kvm_vcpu_unmap(&svm->vcpu, &map, true); 42769c9dfa2SPaolo Bonzini 428883b0a91SJoerg Roedel return ret; 429883b0a91SJoerg Roedel } 430883b0a91SJoerg Roedel 431883b0a91SJoerg Roedel void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) 432883b0a91SJoerg Roedel { 433883b0a91SJoerg Roedel to_vmcb->save.fs = from_vmcb->save.fs; 434883b0a91SJoerg Roedel to_vmcb->save.gs = from_vmcb->save.gs; 435883b0a91SJoerg Roedel to_vmcb->save.tr = from_vmcb->save.tr; 436883b0a91SJoerg Roedel to_vmcb->save.ldtr = from_vmcb->save.ldtr; 437883b0a91SJoerg Roedel to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; 438883b0a91SJoerg Roedel to_vmcb->save.star = from_vmcb->save.star; 439883b0a91SJoerg Roedel to_vmcb->save.lstar = from_vmcb->save.lstar; 440883b0a91SJoerg Roedel to_vmcb->save.cstar = from_vmcb->save.cstar; 441883b0a91SJoerg Roedel to_vmcb->save.sfmask = from_vmcb->save.sfmask; 442883b0a91SJoerg Roedel to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; 443883b0a91SJoerg Roedel to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; 444883b0a91SJoerg Roedel to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; 445883b0a91SJoerg Roedel } 446883b0a91SJoerg Roedel 447883b0a91SJoerg Roedel int nested_svm_vmexit(struct vcpu_svm *svm) 448883b0a91SJoerg Roedel { 449883b0a91SJoerg Roedel int rc; 450883b0a91SJoerg Roedel struct vmcb *nested_vmcb; 451883b0a91SJoerg Roedel struct vmcb *hsave = svm->nested.hsave; 452883b0a91SJoerg Roedel struct vmcb *vmcb = svm->vmcb; 453883b0a91SJoerg Roedel struct kvm_host_map map; 454883b0a91SJoerg Roedel 455883b0a91SJoerg Roedel trace_kvm_nested_vmexit_inject(vmcb->control.exit_code, 456883b0a91SJoerg Roedel vmcb->control.exit_info_1, 457883b0a91SJoerg Roedel vmcb->control.exit_info_2, 458883b0a91SJoerg Roedel vmcb->control.exit_int_info, 459883b0a91SJoerg Roedel vmcb->control.exit_int_info_err, 460883b0a91SJoerg Roedel KVM_ISA_SVM); 461883b0a91SJoerg Roedel 462883b0a91SJoerg Roedel rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map); 463883b0a91SJoerg Roedel if (rc) { 464883b0a91SJoerg Roedel if (rc == -EINVAL) 465883b0a91SJoerg Roedel kvm_inject_gp(&svm->vcpu, 0); 466883b0a91SJoerg Roedel return 1; 467883b0a91SJoerg Roedel } 468883b0a91SJoerg Roedel 469883b0a91SJoerg Roedel nested_vmcb = map.hva; 470883b0a91SJoerg Roedel 471883b0a91SJoerg Roedel /* Exit Guest-Mode */ 472883b0a91SJoerg Roedel leave_guest_mode(&svm->vcpu); 473883b0a91SJoerg Roedel svm->nested.vmcb = 0; 474883b0a91SJoerg Roedel 47538c0b192SPaolo Bonzini /* in case we halted in L2 */ 47638c0b192SPaolo Bonzini svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; 47738c0b192SPaolo Bonzini 478883b0a91SJoerg Roedel /* Give the current vmcb to the guest */ 479883b0a91SJoerg Roedel disable_gif(svm); 480883b0a91SJoerg Roedel 481883b0a91SJoerg Roedel nested_vmcb->save.es = vmcb->save.es; 482883b0a91SJoerg Roedel nested_vmcb->save.cs = vmcb->save.cs; 483883b0a91SJoerg Roedel nested_vmcb->save.ss = vmcb->save.ss; 484883b0a91SJoerg Roedel nested_vmcb->save.ds = vmcb->save.ds; 485883b0a91SJoerg Roedel nested_vmcb->save.gdtr = vmcb->save.gdtr; 486883b0a91SJoerg Roedel nested_vmcb->save.idtr = vmcb->save.idtr; 487883b0a91SJoerg Roedel nested_vmcb->save.efer = svm->vcpu.arch.efer; 488883b0a91SJoerg Roedel nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); 489883b0a91SJoerg Roedel nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); 490883b0a91SJoerg Roedel nested_vmcb->save.cr2 = vmcb->save.cr2; 491883b0a91SJoerg Roedel nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; 492883b0a91SJoerg Roedel nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); 493b6162e82SVitaly Kuznetsov nested_vmcb->save.rip = kvm_rip_read(&svm->vcpu); 494b6162e82SVitaly Kuznetsov nested_vmcb->save.rsp = kvm_rsp_read(&svm->vcpu); 495b6162e82SVitaly Kuznetsov nested_vmcb->save.rax = kvm_rax_read(&svm->vcpu); 496883b0a91SJoerg Roedel nested_vmcb->save.dr7 = vmcb->save.dr7; 4975679b803SPaolo Bonzini nested_vmcb->save.dr6 = svm->vcpu.arch.dr6; 498883b0a91SJoerg Roedel nested_vmcb->save.cpl = vmcb->save.cpl; 499883b0a91SJoerg Roedel 500883b0a91SJoerg Roedel nested_vmcb->control.int_ctl = vmcb->control.int_ctl; 501883b0a91SJoerg Roedel nested_vmcb->control.int_vector = vmcb->control.int_vector; 502883b0a91SJoerg Roedel nested_vmcb->control.int_state = vmcb->control.int_state; 503883b0a91SJoerg Roedel nested_vmcb->control.exit_code = vmcb->control.exit_code; 504883b0a91SJoerg Roedel nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi; 505883b0a91SJoerg Roedel nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1; 506883b0a91SJoerg Roedel nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; 507883b0a91SJoerg Roedel nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info; 508883b0a91SJoerg Roedel nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err; 509883b0a91SJoerg Roedel 510883b0a91SJoerg Roedel if (svm->nrips_enabled) 511883b0a91SJoerg Roedel nested_vmcb->control.next_rip = vmcb->control.next_rip; 512883b0a91SJoerg Roedel 513883b0a91SJoerg Roedel /* 514883b0a91SJoerg Roedel * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have 515883b0a91SJoerg Roedel * to make sure that we do not lose injected events. So check event_inj 516883b0a91SJoerg Roedel * here and copy it to exit_int_info if it is valid. 517883b0a91SJoerg Roedel * Exit_int_info and event_inj can't be both valid because the case 518883b0a91SJoerg Roedel * below only happens on a VMRUN instruction intercept which has 519883b0a91SJoerg Roedel * no valid exit_int_info set. 520883b0a91SJoerg Roedel */ 521883b0a91SJoerg Roedel if (vmcb->control.event_inj & SVM_EVTINJ_VALID) { 522883b0a91SJoerg Roedel struct vmcb_control_area *nc = &nested_vmcb->control; 523883b0a91SJoerg Roedel 524883b0a91SJoerg Roedel nc->exit_int_info = vmcb->control.event_inj; 525883b0a91SJoerg Roedel nc->exit_int_info_err = vmcb->control.event_inj_err; 526883b0a91SJoerg Roedel } 527883b0a91SJoerg Roedel 528883b0a91SJoerg Roedel nested_vmcb->control.tlb_ctl = 0; 529883b0a91SJoerg Roedel nested_vmcb->control.event_inj = 0; 530883b0a91SJoerg Roedel nested_vmcb->control.event_inj_err = 0; 531883b0a91SJoerg Roedel 532883b0a91SJoerg Roedel nested_vmcb->control.pause_filter_count = 533883b0a91SJoerg Roedel svm->vmcb->control.pause_filter_count; 534883b0a91SJoerg Roedel nested_vmcb->control.pause_filter_thresh = 535883b0a91SJoerg Roedel svm->vmcb->control.pause_filter_thresh; 536883b0a91SJoerg Roedel 537883b0a91SJoerg Roedel /* We always set V_INTR_MASKING and remember the old value in hflags */ 538883b0a91SJoerg Roedel if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) 539883b0a91SJoerg Roedel nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 540883b0a91SJoerg Roedel 541883b0a91SJoerg Roedel /* Restore the original control entries */ 5422f675917SPaolo Bonzini copy_vmcb_control_area(&vmcb->control, &hsave->control); 543883b0a91SJoerg Roedel 54418fc6c55SPaolo Bonzini svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = 54518fc6c55SPaolo Bonzini svm->vcpu.arch.l1_tsc_offset; 54618fc6c55SPaolo Bonzini 547883b0a91SJoerg Roedel kvm_clear_exception_queue(&svm->vcpu); 548883b0a91SJoerg Roedel kvm_clear_interrupt_queue(&svm->vcpu); 549883b0a91SJoerg Roedel 550*e670bf68SPaolo Bonzini svm->nested.ctl.nested_cr3 = 0; 551883b0a91SJoerg Roedel 552883b0a91SJoerg Roedel /* Restore selected save entries */ 553883b0a91SJoerg Roedel svm->vmcb->save.es = hsave->save.es; 554883b0a91SJoerg Roedel svm->vmcb->save.cs = hsave->save.cs; 555883b0a91SJoerg Roedel svm->vmcb->save.ss = hsave->save.ss; 556883b0a91SJoerg Roedel svm->vmcb->save.ds = hsave->save.ds; 557883b0a91SJoerg Roedel svm->vmcb->save.gdtr = hsave->save.gdtr; 558883b0a91SJoerg Roedel svm->vmcb->save.idtr = hsave->save.idtr; 559883b0a91SJoerg Roedel kvm_set_rflags(&svm->vcpu, hsave->save.rflags); 560883b0a91SJoerg Roedel svm_set_efer(&svm->vcpu, hsave->save.efer); 561883b0a91SJoerg Roedel svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); 562883b0a91SJoerg Roedel svm_set_cr4(&svm->vcpu, hsave->save.cr4); 563883b0a91SJoerg Roedel if (npt_enabled) { 564883b0a91SJoerg Roedel svm->vmcb->save.cr3 = hsave->save.cr3; 565883b0a91SJoerg Roedel svm->vcpu.arch.cr3 = hsave->save.cr3; 566883b0a91SJoerg Roedel } else { 567883b0a91SJoerg Roedel (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); 568883b0a91SJoerg Roedel } 569883b0a91SJoerg Roedel kvm_rax_write(&svm->vcpu, hsave->save.rax); 570883b0a91SJoerg Roedel kvm_rsp_write(&svm->vcpu, hsave->save.rsp); 571883b0a91SJoerg Roedel kvm_rip_write(&svm->vcpu, hsave->save.rip); 572883b0a91SJoerg Roedel svm->vmcb->save.dr7 = 0; 573883b0a91SJoerg Roedel svm->vmcb->save.cpl = 0; 574883b0a91SJoerg Roedel svm->vmcb->control.exit_int_info = 0; 575883b0a91SJoerg Roedel 576883b0a91SJoerg Roedel mark_all_dirty(svm->vmcb); 577883b0a91SJoerg Roedel 578883b0a91SJoerg Roedel kvm_vcpu_unmap(&svm->vcpu, &map, true); 579883b0a91SJoerg Roedel 580883b0a91SJoerg Roedel nested_svm_uninit_mmu_context(&svm->vcpu); 581883b0a91SJoerg Roedel kvm_mmu_reset_context(&svm->vcpu); 582883b0a91SJoerg Roedel kvm_mmu_load(&svm->vcpu); 583883b0a91SJoerg Roedel 584883b0a91SJoerg Roedel /* 585883b0a91SJoerg Roedel * Drop what we picked up for L2 via svm_complete_interrupts() so it 586883b0a91SJoerg Roedel * doesn't end up in L1. 587883b0a91SJoerg Roedel */ 588883b0a91SJoerg Roedel svm->vcpu.arch.nmi_injected = false; 589883b0a91SJoerg Roedel kvm_clear_exception_queue(&svm->vcpu); 590883b0a91SJoerg Roedel kvm_clear_interrupt_queue(&svm->vcpu); 591883b0a91SJoerg Roedel 592883b0a91SJoerg Roedel return 0; 593883b0a91SJoerg Roedel } 594883b0a91SJoerg Roedel 595883b0a91SJoerg Roedel static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) 596883b0a91SJoerg Roedel { 597883b0a91SJoerg Roedel u32 offset, msr, value; 598883b0a91SJoerg Roedel int write, mask; 599883b0a91SJoerg Roedel 600*e670bf68SPaolo Bonzini if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT))) 601883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 602883b0a91SJoerg Roedel 603883b0a91SJoerg Roedel msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 604883b0a91SJoerg Roedel offset = svm_msrpm_offset(msr); 605883b0a91SJoerg Roedel write = svm->vmcb->control.exit_info_1 & 1; 606883b0a91SJoerg Roedel mask = 1 << ((2 * (msr & 0xf)) + write); 607883b0a91SJoerg Roedel 608883b0a91SJoerg Roedel if (offset == MSR_INVALID) 609883b0a91SJoerg Roedel return NESTED_EXIT_DONE; 610883b0a91SJoerg Roedel 611883b0a91SJoerg Roedel /* Offset is in 32 bit units but need in 8 bit units */ 612883b0a91SJoerg Roedel offset *= 4; 613883b0a91SJoerg Roedel 614*e670bf68SPaolo Bonzini if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) 615883b0a91SJoerg Roedel return NESTED_EXIT_DONE; 616883b0a91SJoerg Roedel 617883b0a91SJoerg Roedel return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 618883b0a91SJoerg Roedel } 619883b0a91SJoerg Roedel 620883b0a91SJoerg Roedel static int nested_svm_intercept_ioio(struct vcpu_svm *svm) 621883b0a91SJoerg Roedel { 622883b0a91SJoerg Roedel unsigned port, size, iopm_len; 623883b0a91SJoerg Roedel u16 val, mask; 624883b0a91SJoerg Roedel u8 start_bit; 625883b0a91SJoerg Roedel u64 gpa; 626883b0a91SJoerg Roedel 627*e670bf68SPaolo Bonzini if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_IOIO_PROT))) 628883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 629883b0a91SJoerg Roedel 630883b0a91SJoerg Roedel port = svm->vmcb->control.exit_info_1 >> 16; 631883b0a91SJoerg Roedel size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> 632883b0a91SJoerg Roedel SVM_IOIO_SIZE_SHIFT; 633*e670bf68SPaolo Bonzini gpa = svm->nested.ctl.iopm_base_pa + (port / 8); 634883b0a91SJoerg Roedel start_bit = port % 8; 635883b0a91SJoerg Roedel iopm_len = (start_bit + size > 8) ? 2 : 1; 636883b0a91SJoerg Roedel mask = (0xf >> (4 - size)) << start_bit; 637883b0a91SJoerg Roedel val = 0; 638883b0a91SJoerg Roedel 639883b0a91SJoerg Roedel if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) 640883b0a91SJoerg Roedel return NESTED_EXIT_DONE; 641883b0a91SJoerg Roedel 642883b0a91SJoerg Roedel return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 643883b0a91SJoerg Roedel } 644883b0a91SJoerg Roedel 645883b0a91SJoerg Roedel static int nested_svm_intercept(struct vcpu_svm *svm) 646883b0a91SJoerg Roedel { 647883b0a91SJoerg Roedel u32 exit_code = svm->vmcb->control.exit_code; 648883b0a91SJoerg Roedel int vmexit = NESTED_EXIT_HOST; 649883b0a91SJoerg Roedel 650883b0a91SJoerg Roedel switch (exit_code) { 651883b0a91SJoerg Roedel case SVM_EXIT_MSR: 652883b0a91SJoerg Roedel vmexit = nested_svm_exit_handled_msr(svm); 653883b0a91SJoerg Roedel break; 654883b0a91SJoerg Roedel case SVM_EXIT_IOIO: 655883b0a91SJoerg Roedel vmexit = nested_svm_intercept_ioio(svm); 656883b0a91SJoerg Roedel break; 657883b0a91SJoerg Roedel case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { 658883b0a91SJoerg Roedel u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0); 659*e670bf68SPaolo Bonzini if (svm->nested.ctl.intercept_cr & bit) 660883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 661883b0a91SJoerg Roedel break; 662883b0a91SJoerg Roedel } 663883b0a91SJoerg Roedel case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { 664883b0a91SJoerg Roedel u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0); 665*e670bf68SPaolo Bonzini if (svm->nested.ctl.intercept_dr & bit) 666883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 667883b0a91SJoerg Roedel break; 668883b0a91SJoerg Roedel } 669883b0a91SJoerg Roedel case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 6707c86663bSPaolo Bonzini /* 6717c86663bSPaolo Bonzini * Host-intercepted exceptions have been checked already in 6727c86663bSPaolo Bonzini * nested_svm_exit_special. There is nothing to do here, 6737c86663bSPaolo Bonzini * the vmexit is injected by svm_check_nested_events. 6747c86663bSPaolo Bonzini */ 675883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 676883b0a91SJoerg Roedel break; 677883b0a91SJoerg Roedel } 678883b0a91SJoerg Roedel case SVM_EXIT_ERR: { 679883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 680883b0a91SJoerg Roedel break; 681883b0a91SJoerg Roedel } 682883b0a91SJoerg Roedel default: { 683883b0a91SJoerg Roedel u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); 684*e670bf68SPaolo Bonzini if (svm->nested.ctl.intercept & exit_bits) 685883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE; 686883b0a91SJoerg Roedel } 687883b0a91SJoerg Roedel } 688883b0a91SJoerg Roedel 689883b0a91SJoerg Roedel return vmexit; 690883b0a91SJoerg Roedel } 691883b0a91SJoerg Roedel 692883b0a91SJoerg Roedel int nested_svm_exit_handled(struct vcpu_svm *svm) 693883b0a91SJoerg Roedel { 694883b0a91SJoerg Roedel int vmexit; 695883b0a91SJoerg Roedel 696883b0a91SJoerg Roedel vmexit = nested_svm_intercept(svm); 697883b0a91SJoerg Roedel 698883b0a91SJoerg Roedel if (vmexit == NESTED_EXIT_DONE) 699883b0a91SJoerg Roedel nested_svm_vmexit(svm); 700883b0a91SJoerg Roedel 701883b0a91SJoerg Roedel return vmexit; 702883b0a91SJoerg Roedel } 703883b0a91SJoerg Roedel 704883b0a91SJoerg Roedel int nested_svm_check_permissions(struct vcpu_svm *svm) 705883b0a91SJoerg Roedel { 706883b0a91SJoerg Roedel if (!(svm->vcpu.arch.efer & EFER_SVME) || 707883b0a91SJoerg Roedel !is_paging(&svm->vcpu)) { 708883b0a91SJoerg Roedel kvm_queue_exception(&svm->vcpu, UD_VECTOR); 709883b0a91SJoerg Roedel return 1; 710883b0a91SJoerg Roedel } 711883b0a91SJoerg Roedel 712883b0a91SJoerg Roedel if (svm->vmcb->save.cpl) { 713883b0a91SJoerg Roedel kvm_inject_gp(&svm->vcpu, 0); 714883b0a91SJoerg Roedel return 1; 715883b0a91SJoerg Roedel } 716883b0a91SJoerg Roedel 717883b0a91SJoerg Roedel return 0; 718883b0a91SJoerg Roedel } 719883b0a91SJoerg Roedel 7207c86663bSPaolo Bonzini static bool nested_exit_on_exception(struct vcpu_svm *svm) 721883b0a91SJoerg Roedel { 7227c86663bSPaolo Bonzini unsigned int nr = svm->vcpu.arch.exception.nr; 723883b0a91SJoerg Roedel 724*e670bf68SPaolo Bonzini return (svm->nested.ctl.intercept_exceptions & (1 << nr)); 7257c86663bSPaolo Bonzini } 726883b0a91SJoerg Roedel 7277c86663bSPaolo Bonzini static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) 7287c86663bSPaolo Bonzini { 7297c86663bSPaolo Bonzini unsigned int nr = svm->vcpu.arch.exception.nr; 730883b0a91SJoerg Roedel 731883b0a91SJoerg Roedel svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; 732883b0a91SJoerg Roedel svm->vmcb->control.exit_code_hi = 0; 7337c86663bSPaolo Bonzini 7347c86663bSPaolo Bonzini if (svm->vcpu.arch.exception.has_error_code) 7357c86663bSPaolo Bonzini svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; 736883b0a91SJoerg Roedel 737883b0a91SJoerg Roedel /* 738883b0a91SJoerg Roedel * EXITINFO2 is undefined for all exception intercepts other 739883b0a91SJoerg Roedel * than #PF. 740883b0a91SJoerg Roedel */ 7417c86663bSPaolo Bonzini if (nr == PF_VECTOR) { 742883b0a91SJoerg Roedel if (svm->vcpu.arch.exception.nested_apf) 743883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; 744883b0a91SJoerg Roedel else if (svm->vcpu.arch.exception.has_payload) 745883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; 746883b0a91SJoerg Roedel else 747883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; 7487c86663bSPaolo Bonzini } else if (nr == DB_VECTOR) { 7497c86663bSPaolo Bonzini /* See inject_pending_event. */ 7507c86663bSPaolo Bonzini kvm_deliver_exception_payload(&svm->vcpu); 7517c86663bSPaolo Bonzini if (svm->vcpu.arch.dr7 & DR7_GD) { 7527c86663bSPaolo Bonzini svm->vcpu.arch.dr7 &= ~DR7_GD; 7537c86663bSPaolo Bonzini kvm_update_dr7(&svm->vcpu); 7547c86663bSPaolo Bonzini } 7557c86663bSPaolo Bonzini } else 7567c86663bSPaolo Bonzini WARN_ON(svm->vcpu.arch.exception.has_payload); 757883b0a91SJoerg Roedel 7587c86663bSPaolo Bonzini nested_svm_vmexit(svm); 759883b0a91SJoerg Roedel } 760883b0a91SJoerg Roedel 76155714cddSPaolo Bonzini static void nested_svm_smi(struct vcpu_svm *svm) 76255714cddSPaolo Bonzini { 76355714cddSPaolo Bonzini svm->vmcb->control.exit_code = SVM_EXIT_SMI; 76455714cddSPaolo Bonzini svm->vmcb->control.exit_info_1 = 0; 76555714cddSPaolo Bonzini svm->vmcb->control.exit_info_2 = 0; 76655714cddSPaolo Bonzini 76755714cddSPaolo Bonzini nested_svm_vmexit(svm); 76855714cddSPaolo Bonzini } 76955714cddSPaolo Bonzini 7709c3d370aSCathy Avery static void nested_svm_nmi(struct vcpu_svm *svm) 7719c3d370aSCathy Avery { 7729c3d370aSCathy Avery svm->vmcb->control.exit_code = SVM_EXIT_NMI; 7739c3d370aSCathy Avery svm->vmcb->control.exit_info_1 = 0; 7749c3d370aSCathy Avery svm->vmcb->control.exit_info_2 = 0; 7759c3d370aSCathy Avery 7769c3d370aSCathy Avery nested_svm_vmexit(svm); 7779c3d370aSCathy Avery } 7789c3d370aSCathy Avery 779883b0a91SJoerg Roedel static void nested_svm_intr(struct vcpu_svm *svm) 780883b0a91SJoerg Roedel { 7816e085cbfSPaolo Bonzini trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); 7826e085cbfSPaolo Bonzini 783883b0a91SJoerg Roedel svm->vmcb->control.exit_code = SVM_EXIT_INTR; 784883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 = 0; 785883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = 0; 786883b0a91SJoerg Roedel 7876e085cbfSPaolo Bonzini nested_svm_vmexit(svm); 788883b0a91SJoerg Roedel } 789883b0a91SJoerg Roedel 7905b672408SPaolo Bonzini static inline bool nested_exit_on_init(struct vcpu_svm *svm) 7915b672408SPaolo Bonzini { 792*e670bf68SPaolo Bonzini return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INIT)); 7935b672408SPaolo Bonzini } 7945b672408SPaolo Bonzini 7955b672408SPaolo Bonzini static void nested_svm_init(struct vcpu_svm *svm) 7965b672408SPaolo Bonzini { 7975b672408SPaolo Bonzini svm->vmcb->control.exit_code = SVM_EXIT_INIT; 7985b672408SPaolo Bonzini svm->vmcb->control.exit_info_1 = 0; 7995b672408SPaolo Bonzini svm->vmcb->control.exit_info_2 = 0; 8005b672408SPaolo Bonzini 8015b672408SPaolo Bonzini nested_svm_vmexit(svm); 8025b672408SPaolo Bonzini } 8035b672408SPaolo Bonzini 8045b672408SPaolo Bonzini 80533b22172SPaolo Bonzini static int svm_check_nested_events(struct kvm_vcpu *vcpu) 806883b0a91SJoerg Roedel { 807883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu); 808883b0a91SJoerg Roedel bool block_nested_events = 809bd279629SPaolo Bonzini kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; 8105b672408SPaolo Bonzini struct kvm_lapic *apic = vcpu->arch.apic; 8115b672408SPaolo Bonzini 8125b672408SPaolo Bonzini if (lapic_in_kernel(vcpu) && 8135b672408SPaolo Bonzini test_bit(KVM_APIC_INIT, &apic->pending_events)) { 8145b672408SPaolo Bonzini if (block_nested_events) 8155b672408SPaolo Bonzini return -EBUSY; 8165b672408SPaolo Bonzini if (!nested_exit_on_init(svm)) 8175b672408SPaolo Bonzini return 0; 8185b672408SPaolo Bonzini nested_svm_init(svm); 8195b672408SPaolo Bonzini return 0; 8205b672408SPaolo Bonzini } 821883b0a91SJoerg Roedel 8227c86663bSPaolo Bonzini if (vcpu->arch.exception.pending) { 8237c86663bSPaolo Bonzini if (block_nested_events) 8247c86663bSPaolo Bonzini return -EBUSY; 8257c86663bSPaolo Bonzini if (!nested_exit_on_exception(svm)) 8267c86663bSPaolo Bonzini return 0; 8277c86663bSPaolo Bonzini nested_svm_inject_exception_vmexit(svm); 8287c86663bSPaolo Bonzini return 0; 8297c86663bSPaolo Bonzini } 8307c86663bSPaolo Bonzini 831221e7610SPaolo Bonzini if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { 83255714cddSPaolo Bonzini if (block_nested_events) 83355714cddSPaolo Bonzini return -EBUSY; 834221e7610SPaolo Bonzini if (!nested_exit_on_smi(svm)) 835221e7610SPaolo Bonzini return 0; 83655714cddSPaolo Bonzini nested_svm_smi(svm); 83755714cddSPaolo Bonzini return 0; 83855714cddSPaolo Bonzini } 83955714cddSPaolo Bonzini 840221e7610SPaolo Bonzini if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { 8419c3d370aSCathy Avery if (block_nested_events) 8429c3d370aSCathy Avery return -EBUSY; 843221e7610SPaolo Bonzini if (!nested_exit_on_nmi(svm)) 844221e7610SPaolo Bonzini return 0; 8459c3d370aSCathy Avery nested_svm_nmi(svm); 8469c3d370aSCathy Avery return 0; 8479c3d370aSCathy Avery } 8489c3d370aSCathy Avery 849221e7610SPaolo Bonzini if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) { 850883b0a91SJoerg Roedel if (block_nested_events) 851883b0a91SJoerg Roedel return -EBUSY; 852221e7610SPaolo Bonzini if (!nested_exit_on_intr(svm)) 853221e7610SPaolo Bonzini return 0; 854883b0a91SJoerg Roedel nested_svm_intr(svm); 855883b0a91SJoerg Roedel return 0; 856883b0a91SJoerg Roedel } 857883b0a91SJoerg Roedel 858883b0a91SJoerg Roedel return 0; 859883b0a91SJoerg Roedel } 860883b0a91SJoerg Roedel 861883b0a91SJoerg Roedel int nested_svm_exit_special(struct vcpu_svm *svm) 862883b0a91SJoerg Roedel { 863883b0a91SJoerg Roedel u32 exit_code = svm->vmcb->control.exit_code; 864883b0a91SJoerg Roedel 865883b0a91SJoerg Roedel switch (exit_code) { 866883b0a91SJoerg Roedel case SVM_EXIT_INTR: 867883b0a91SJoerg Roedel case SVM_EXIT_NMI: 868883b0a91SJoerg Roedel case SVM_EXIT_NPF: 869883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 8707c86663bSPaolo Bonzini case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 8717c86663bSPaolo Bonzini u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); 8727c86663bSPaolo Bonzini 8737c86663bSPaolo Bonzini if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits) 8747c86663bSPaolo Bonzini return NESTED_EXIT_HOST; 8757c86663bSPaolo Bonzini else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && 8767c86663bSPaolo Bonzini svm->vcpu.arch.apf.host_apf_reason) 877a3535be7SPaolo Bonzini /* Trap async PF even if not shadowing */ 878883b0a91SJoerg Roedel return NESTED_EXIT_HOST; 879883b0a91SJoerg Roedel break; 8807c86663bSPaolo Bonzini } 881883b0a91SJoerg Roedel default: 882883b0a91SJoerg Roedel break; 883883b0a91SJoerg Roedel } 884883b0a91SJoerg Roedel 885883b0a91SJoerg Roedel return NESTED_EXIT_CONTINUE; 886883b0a91SJoerg Roedel } 88733b22172SPaolo Bonzini 88833b22172SPaolo Bonzini struct kvm_x86_nested_ops svm_nested_ops = { 88933b22172SPaolo Bonzini .check_events = svm_check_nested_events, 89033b22172SPaolo Bonzini }; 891