1366f6083SPeter Grehan /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 3c49761ddSPedro F. Giffuni * 4366f6083SPeter Grehan * Copyright (c) 2011 NetApp, Inc. 5366f6083SPeter Grehan * All rights reserved. 62c352febSJohn Baldwin * Copyright (c) 2018 Joyent, Inc. 7366f6083SPeter Grehan * 8366f6083SPeter Grehan * Redistribution and use in source and binary forms, with or without 9366f6083SPeter Grehan * modification, are permitted provided that the following conditions 10366f6083SPeter Grehan * are met: 11366f6083SPeter Grehan * 1. Redistributions of source code must retain the above copyright 12366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer. 13366f6083SPeter Grehan * 2. Redistributions in binary form must reproduce the above copyright 14366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer in the 15366f6083SPeter Grehan * documentation and/or other materials provided with the distribution. 16366f6083SPeter Grehan * 17366f6083SPeter Grehan * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18366f6083SPeter Grehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19366f6083SPeter Grehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20366f6083SPeter Grehan * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21366f6083SPeter Grehan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22366f6083SPeter Grehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23366f6083SPeter Grehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24366f6083SPeter Grehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25366f6083SPeter Grehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26366f6083SPeter Grehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27366f6083SPeter Grehan * SUCH DAMAGE. 28366f6083SPeter Grehan */ 29366f6083SPeter Grehan 30366f6083SPeter Grehan #include <sys/cdefs.h> 31483d953aSJohn Baldwin #include "opt_bhyve_snapshot.h" 32483d953aSJohn Baldwin 33366f6083SPeter Grehan #include <sys/param.h> 34366f6083SPeter Grehan #include <sys/systm.h> 35366f6083SPeter Grehan #include <sys/smp.h> 36366f6083SPeter Grehan #include <sys/kernel.h> 37366f6083SPeter Grehan #include <sys/malloc.h> 38366f6083SPeter Grehan #include <sys/pcpu.h> 39366f6083SPeter Grehan #include <sys/proc.h> 40b7924341SAndrew Turner #include <sys/reg.h> 416f5a9606SMark Johnston #include <sys/smr.h> 423565b59eSNeel Natu #include <sys/sysctl.h> 43366f6083SPeter Grehan 44366f6083SPeter Grehan #include <vm/vm.h> 4574ac712fSMark Johnston #include <vm/vm_extern.h> 46366f6083SPeter Grehan #include <vm/pmap.h> 47366f6083SPeter Grehan 48366f6083SPeter Grehan #include <machine/psl.h> 49366f6083SPeter Grehan #include <machine/cpufunc.h> 508b287612SJohn Baldwin #include <machine/md_var.h> 51366f6083SPeter Grehan #include <machine/segments.h> 52176666c2SNeel Natu #include <machine/smp.h> 53608f97c3SPeter Grehan #include <machine/specialreg.h> 54366f6083SPeter Grehan #include <machine/vmparam.h> 55366f6083SPeter Grehan 56366f6083SPeter Grehan #include <machine/vmm.h> 57dc506506SNeel Natu #include <machine/vmm_dev.h> 58e813a873SNeel Natu #include <machine/vmm_instruction_emul.h> 59483d953aSJohn Baldwin #include <machine/vmm_snapshot.h> 60483d953aSJohn Baldwin 613ccb0233SMark Johnston #include <dev/vmm/vmm_ktr.h> 623ccb0233SMark Johnston 63c3498942SNeel Natu #include "vmm_lapic.h" 64b01c2033SNeel Natu #include "vmm_host.h" 65762fd208STycho Nightingale #include "vmm_ioport.h" 66366f6083SPeter Grehan #include "vmm_stat.h" 670775fbb4STycho Nightingale #include "vatpic.h" 68de5ea6b6SNeel Natu #include "vlapic.h" 69de5ea6b6SNeel Natu #include "vlapic_priv.h" 70366f6083SPeter Grehan 71366f6083SPeter Grehan #include "ept.h" 72366f6083SPeter Grehan #include "vmx_cpufunc.h" 73366f6083SPeter Grehan #include "vmx.h" 74c3498942SNeel Natu #include "vmx_msr.h" 75366f6083SPeter Grehan #include "x86.h" 76366f6083SPeter Grehan #include "vmx_controls.h" 77366f6083SPeter Grehan 78366f6083SPeter Grehan #define PINBASED_CTLS_ONE_SETTING \ 79366f6083SPeter Grehan (PINBASED_EXTINT_EXITING | \ 80366f6083SPeter Grehan PINBASED_NMI_EXITING | \ 81366f6083SPeter Grehan PINBASED_VIRTUAL_NMI) 82366f6083SPeter Grehan #define PINBASED_CTLS_ZERO_SETTING 0 83366f6083SPeter Grehan 84366f6083SPeter Grehan #define PROCBASED_CTLS_WINDOW_SETTING \ 85366f6083SPeter Grehan (PROCBASED_INT_WINDOW_EXITING | \ 86366f6083SPeter Grehan PROCBASED_NMI_WINDOW_EXITING) 87366f6083SPeter Grehan 88366f6083SPeter Grehan #define PROCBASED_CTLS_ONE_SETTING \ 89366f6083SPeter Grehan (PROCBASED_SECONDARY_CONTROLS | \ 9065145c7fSNeel Natu PROCBASED_MWAIT_EXITING | \ 9165145c7fSNeel Natu PROCBASED_MONITOR_EXITING | \ 92366f6083SPeter Grehan PROCBASED_IO_EXITING | \ 93366f6083SPeter Grehan PROCBASED_MSR_BITMAPS | \ 94594db002STycho Nightingale PROCBASED_CTLS_WINDOW_SETTING | \ 95594db002STycho Nightingale PROCBASED_CR8_LOAD_EXITING | \ 96594db002STycho Nightingale PROCBASED_CR8_STORE_EXITING) 97366f6083SPeter Grehan #define PROCBASED_CTLS_ZERO_SETTING \ 98366f6083SPeter Grehan (PROCBASED_CR3_LOAD_EXITING | \ 99366f6083SPeter Grehan PROCBASED_CR3_STORE_EXITING | \ 100366f6083SPeter Grehan PROCBASED_IO_BITMAPS) 101366f6083SPeter Grehan 102366f6083SPeter Grehan #define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 103366f6083SPeter Grehan #define PROCBASED_CTLS2_ZERO_SETTING 0 104366f6083SPeter Grehan 105d72978ecSNeel Natu #define VM_EXIT_CTLS_ONE_SETTING \ 10665eefbe4SJohn Baldwin (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 10765eefbe4SJohn Baldwin VM_EXIT_HOST_LMA | \ 108366f6083SPeter Grehan VM_EXIT_SAVE_EFER | \ 109d72978ecSNeel Natu VM_EXIT_LOAD_EFER | \ 110a318f7ddSNeel Natu VM_EXIT_ACKNOWLEDGE_INTERRUPT) 111d72978ecSNeel Natu 11265eefbe4SJohn Baldwin #define VM_EXIT_CTLS_ZERO_SETTING 0 113366f6083SPeter Grehan 11465eefbe4SJohn Baldwin #define VM_ENTRY_CTLS_ONE_SETTING \ 11565eefbe4SJohn Baldwin (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 11665eefbe4SJohn Baldwin VM_ENTRY_LOAD_EFER) 117608f97c3SPeter Grehan 118366f6083SPeter Grehan #define VM_ENTRY_CTLS_ZERO_SETTING \ 11965eefbe4SJohn Baldwin (VM_ENTRY_INTO_SMM | \ 120366f6083SPeter Grehan VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 121366f6083SPeter Grehan 122366f6083SPeter Grehan #define HANDLED 1 123366f6083SPeter Grehan #define UNHANDLED 0 124366f6083SPeter Grehan 125de5ea6b6SNeel Natu static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 126de5ea6b6SNeel Natu static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 127366f6083SPeter Grehan 12873abae44SJohn Baldwin bool vmx_have_msr_tsc_aux; 12973abae44SJohn Baldwin 1303565b59eSNeel Natu SYSCTL_DECL(_hw_vmm); 131b40598c5SPawel Biernacki SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 132b40598c5SPawel Biernacki NULL); 1333565b59eSNeel Natu 134b3996dd4SJohn Baldwin int vmxon_enabled[MAXCPU]; 13574ac712fSMark Johnston static uint8_t *vmxon_region; 136366f6083SPeter Grehan 137366f6083SPeter Grehan static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 138366f6083SPeter Grehan static uint32_t exit_ctls, entry_ctls; 139366f6083SPeter Grehan 140366f6083SPeter Grehan static uint64_t cr0_ones_mask, cr0_zeros_mask; 1413565b59eSNeel Natu SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 1423565b59eSNeel Natu &cr0_ones_mask, 0, NULL); 1433565b59eSNeel Natu SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 1443565b59eSNeel Natu &cr0_zeros_mask, 0, NULL); 1453565b59eSNeel Natu 146366f6083SPeter Grehan static uint64_t cr4_ones_mask, cr4_zeros_mask; 1473565b59eSNeel Natu SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 1483565b59eSNeel Natu &cr4_ones_mask, 0, NULL); 1493565b59eSNeel Natu SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 1503565b59eSNeel Natu &cr4_zeros_mask, 0, NULL); 151366f6083SPeter Grehan 1523565b59eSNeel Natu static int vmx_initialized; 1533565b59eSNeel Natu SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 1543565b59eSNeel Natu &vmx_initialized, 0, "Intel VMX initialized"); 1553565b59eSNeel Natu 156366f6083SPeter Grehan /* 157366f6083SPeter Grehan * Optional capabilities 158366f6083SPeter Grehan */ 159b40598c5SPawel Biernacki static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, 160b40598c5SPawel Biernacki CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 161b40598c5SPawel Biernacki NULL); 16206fc6db9SJohn Baldwin 163366f6083SPeter Grehan static int cap_halt_exit; 16406fc6db9SJohn Baldwin SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0, 16506fc6db9SJohn Baldwin "HLT triggers a VM-exit"); 16606fc6db9SJohn Baldwin 167366f6083SPeter Grehan static int cap_pause_exit; 16806fc6db9SJohn Baldwin SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit, 16906fc6db9SJohn Baldwin 0, "PAUSE triggers a VM-exit"); 17006fc6db9SJohn Baldwin 1713ba952e1SCorvin Köhne static int cap_wbinvd_exit; 1723ba952e1SCorvin Köhne SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, wbinvd_exit, CTLFLAG_RD, &cap_wbinvd_exit, 1733ba952e1SCorvin Köhne 0, "WBINVD triggers a VM-exit"); 1743ba952e1SCorvin Köhne 175f5f5f1e7SPeter Grehan static int cap_rdpid; 176f5f5f1e7SPeter Grehan SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, rdpid, CTLFLAG_RD, &cap_rdpid, 0, 177f5f5f1e7SPeter Grehan "Guests are allowed to use RDPID"); 178f5f5f1e7SPeter Grehan 179f5f5f1e7SPeter Grehan static int cap_rdtscp; 180f5f5f1e7SPeter Grehan SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, rdtscp, CTLFLAG_RD, &cap_rdtscp, 0, 181f5f5f1e7SPeter Grehan "Guests are allowed to use RDTSCP"); 182f5f5f1e7SPeter Grehan 183366f6083SPeter Grehan static int cap_unrestricted_guest; 18406fc6db9SJohn Baldwin SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD, 18506fc6db9SJohn Baldwin &cap_unrestricted_guest, 0, "Unrestricted guests"); 18606fc6db9SJohn Baldwin 187366f6083SPeter Grehan static int cap_monitor_trap; 18806fc6db9SJohn Baldwin SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD, 18906fc6db9SJohn Baldwin &cap_monitor_trap, 0, "Monitor trap flag"); 19006fc6db9SJohn Baldwin 19149cc03daSNeel Natu static int cap_invpcid; 19206fc6db9SJohn Baldwin SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid, 19306fc6db9SJohn Baldwin 0, "Guests are allowed to use INVPCID"); 194366f6083SPeter Grehan 1951bc51badSMichael Reifenberger static int tpr_shadowing; 196f3ff0918SZhenlei Huang SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, tpr_shadowing, 197f3ff0918SZhenlei Huang CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 1981bc51badSMichael Reifenberger &tpr_shadowing, 0, "TPR shadowing support"); 1991bc51badSMichael Reifenberger 20088c4b8d1SNeel Natu static int virtual_interrupt_delivery; 201f3ff0918SZhenlei Huang SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, 202f3ff0918SZhenlei Huang CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 20388c4b8d1SNeel Natu &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 20488c4b8d1SNeel Natu 205176666c2SNeel Natu static int posted_interrupts; 206f3ff0918SZhenlei Huang SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, 207f3ff0918SZhenlei Huang CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 208176666c2SNeel Natu &posted_interrupts, 0, "APICv posted interrupt support"); 209176666c2SNeel Natu 21018a2b08eSNeel Natu static int pirvec = -1; 211176666c2SNeel Natu SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 212176666c2SNeel Natu &pirvec, 0, "APICv posted interrupt vector"); 213176666c2SNeel Natu 21445e51299SNeel Natu static struct unrhdr *vpid_unr; 21545e51299SNeel Natu static u_int vpid_alloc_failed; 21645e51299SNeel Natu SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 21745e51299SNeel Natu &vpid_alloc_failed, 0, NULL); 21845e51299SNeel Natu 219d3588766SMark Johnston int guest_l1d_flush; 220f3ff0918SZhenlei Huang SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 221c30578feSKonstantin Belousov &guest_l1d_flush, 0, NULL); 222d3588766SMark Johnston int guest_l1d_flush_sw; 223f3ff0918SZhenlei Huang SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 224c1141fbaSKonstantin Belousov &guest_l1d_flush_sw, 0, NULL); 225c30578feSKonstantin Belousov 226c1141fbaSKonstantin Belousov static struct msr_entry msr_load_list[1] __aligned(16); 227c30578feSKonstantin Belousov 22888c4b8d1SNeel Natu /* 2296ac73777STycho Nightingale * The definitions of SDT probes for VMX. 2306ac73777STycho Nightingale */ 2316ac73777STycho Nightingale 2326ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 2336ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2346ac73777STycho Nightingale 2356ac73777STycho Nightingale SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 2366ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 2376ac73777STycho Nightingale 2386ac73777STycho Nightingale SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 2396ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 2406ac73777STycho Nightingale 2416ac73777STycho Nightingale SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 2426ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 2436ac73777STycho Nightingale 2446ac73777STycho Nightingale SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 2456ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 2466ac73777STycho Nightingale 2476ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 2486ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2496ac73777STycho Nightingale 2506ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 2516ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2526ac73777STycho Nightingale 2536ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 2546ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2556ac73777STycho Nightingale 2566ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 2576ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2586ac73777STycho Nightingale 2596ac73777STycho Nightingale SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 2606ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 2616ac73777STycho Nightingale 2626ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 2636ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2646ac73777STycho Nightingale 2656ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 2666ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2676ac73777STycho Nightingale 2686ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 2696ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2706ac73777STycho Nightingale 2716ac73777STycho Nightingale SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 2726ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 2736ac73777STycho Nightingale 2746ac73777STycho Nightingale SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 2756ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 2766ac73777STycho Nightingale 2776ac73777STycho Nightingale SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 2786ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 2796ac73777STycho Nightingale 2806ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 2816ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2826ac73777STycho Nightingale 2836ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 2846ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2856ac73777STycho Nightingale 2866ac73777STycho Nightingale SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 2876ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 2886ac73777STycho Nightingale 2896ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 2906ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2916ac73777STycho Nightingale 2926ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 2936ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2946ac73777STycho Nightingale 2956ac73777STycho Nightingale SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 2966ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *"); 2976ac73777STycho Nightingale 29827d26457SAndrew Turner SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 29927d26457SAndrew Turner "struct vmx *", "int", "struct vm_exit *"); 30027d26457SAndrew Turner 3016ac73777STycho Nightingale SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 3026ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 3036ac73777STycho Nightingale 3046ac73777STycho Nightingale SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 3056ac73777STycho Nightingale "struct vmx *", "int", "struct vm_exit *", "int"); 3066ac73777STycho Nightingale 3076ac73777STycho Nightingale /* 30888c4b8d1SNeel Natu * Use the last page below 4GB as the APIC access address. This address is 30988c4b8d1SNeel Natu * occupied by the boot firmware so it is guaranteed that it will not conflict 31088c4b8d1SNeel Natu * with a page in system memory. 31188c4b8d1SNeel Natu */ 31288c4b8d1SNeel Natu #define APIC_ACCESS_ADDRESS 0xFFFFF000 31388c4b8d1SNeel Natu 314869c8d19SJohn Baldwin static int vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc); 315869c8d19SJohn Baldwin static int vmx_getreg(void *vcpui, int reg, uint64_t *retval); 316c3498942SNeel Natu static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val); 31788c4b8d1SNeel Natu static void vmx_inject_pir(struct vlapic *vlapic); 318483d953aSJohn Baldwin #ifdef BHYVE_SNAPSHOT 319869c8d19SJohn Baldwin static int vmx_restore_tsc(void *vcpui, uint64_t now); 320483d953aSJohn Baldwin #endif 32188c4b8d1SNeel Natu 322f5f5f1e7SPeter Grehan static inline bool 323f5f5f1e7SPeter Grehan host_has_rdpid(void) 324f5f5f1e7SPeter Grehan { 325f5f5f1e7SPeter Grehan return ((cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0); 326f5f5f1e7SPeter Grehan } 327f5f5f1e7SPeter Grehan 328f5f5f1e7SPeter Grehan static inline bool 329f5f5f1e7SPeter Grehan host_has_rdtscp(void) 330f5f5f1e7SPeter Grehan { 331f5f5f1e7SPeter Grehan return ((amd_feature & AMDID_RDTSCP) != 0); 332f5f5f1e7SPeter Grehan } 333f5f5f1e7SPeter Grehan 334366f6083SPeter Grehan #ifdef KTR 335366f6083SPeter Grehan static const char * 336366f6083SPeter Grehan exit_reason_to_str(int reason) 337366f6083SPeter Grehan { 338366f6083SPeter Grehan static char reasonbuf[32]; 339366f6083SPeter Grehan 340366f6083SPeter Grehan switch (reason) { 341366f6083SPeter Grehan case EXIT_REASON_EXCEPTION: 342366f6083SPeter Grehan return "exception"; 343366f6083SPeter Grehan case EXIT_REASON_EXT_INTR: 344366f6083SPeter Grehan return "extint"; 345366f6083SPeter Grehan case EXIT_REASON_TRIPLE_FAULT: 346366f6083SPeter Grehan return "triplefault"; 347366f6083SPeter Grehan case EXIT_REASON_INIT: 348366f6083SPeter Grehan return "init"; 349366f6083SPeter Grehan case EXIT_REASON_SIPI: 350366f6083SPeter Grehan return "sipi"; 351366f6083SPeter Grehan case EXIT_REASON_IO_SMI: 352366f6083SPeter Grehan return "iosmi"; 353366f6083SPeter Grehan case EXIT_REASON_SMI: 354366f6083SPeter Grehan return "smi"; 355366f6083SPeter Grehan case EXIT_REASON_INTR_WINDOW: 356366f6083SPeter Grehan return "intrwindow"; 357366f6083SPeter Grehan case EXIT_REASON_NMI_WINDOW: 358366f6083SPeter Grehan return "nmiwindow"; 359366f6083SPeter Grehan case EXIT_REASON_TASK_SWITCH: 360366f6083SPeter Grehan return "taskswitch"; 361366f6083SPeter Grehan case EXIT_REASON_CPUID: 362366f6083SPeter Grehan return "cpuid"; 363366f6083SPeter Grehan case EXIT_REASON_GETSEC: 364366f6083SPeter Grehan return "getsec"; 365366f6083SPeter Grehan case EXIT_REASON_HLT: 366366f6083SPeter Grehan return "hlt"; 367366f6083SPeter Grehan case EXIT_REASON_INVD: 368366f6083SPeter Grehan return "invd"; 369366f6083SPeter Grehan case EXIT_REASON_INVLPG: 370366f6083SPeter Grehan return "invlpg"; 371366f6083SPeter Grehan case EXIT_REASON_RDPMC: 372366f6083SPeter Grehan return "rdpmc"; 373366f6083SPeter Grehan case EXIT_REASON_RDTSC: 374366f6083SPeter Grehan return "rdtsc"; 375366f6083SPeter Grehan case EXIT_REASON_RSM: 376366f6083SPeter Grehan return "rsm"; 377366f6083SPeter Grehan case EXIT_REASON_VMCALL: 378366f6083SPeter Grehan return "vmcall"; 379366f6083SPeter Grehan case EXIT_REASON_VMCLEAR: 380366f6083SPeter Grehan return "vmclear"; 381366f6083SPeter Grehan case EXIT_REASON_VMLAUNCH: 382366f6083SPeter Grehan return "vmlaunch"; 383366f6083SPeter Grehan case EXIT_REASON_VMPTRLD: 384366f6083SPeter Grehan return "vmptrld"; 385366f6083SPeter Grehan case EXIT_REASON_VMPTRST: 386366f6083SPeter Grehan return "vmptrst"; 387366f6083SPeter Grehan case EXIT_REASON_VMREAD: 388366f6083SPeter Grehan return "vmread"; 389366f6083SPeter Grehan case EXIT_REASON_VMRESUME: 390366f6083SPeter Grehan return "vmresume"; 391366f6083SPeter Grehan case EXIT_REASON_VMWRITE: 392366f6083SPeter Grehan return "vmwrite"; 393366f6083SPeter Grehan case EXIT_REASON_VMXOFF: 394366f6083SPeter Grehan return "vmxoff"; 395366f6083SPeter Grehan case EXIT_REASON_VMXON: 396366f6083SPeter Grehan return "vmxon"; 397366f6083SPeter Grehan case EXIT_REASON_CR_ACCESS: 398366f6083SPeter Grehan return "craccess"; 399366f6083SPeter Grehan case EXIT_REASON_DR_ACCESS: 400366f6083SPeter Grehan return "draccess"; 401366f6083SPeter Grehan case EXIT_REASON_INOUT: 402366f6083SPeter Grehan return "inout"; 403366f6083SPeter Grehan case EXIT_REASON_RDMSR: 404366f6083SPeter Grehan return "rdmsr"; 405366f6083SPeter Grehan case EXIT_REASON_WRMSR: 406366f6083SPeter Grehan return "wrmsr"; 407366f6083SPeter Grehan case EXIT_REASON_INVAL_VMCS: 408366f6083SPeter Grehan return "invalvmcs"; 409366f6083SPeter Grehan case EXIT_REASON_INVAL_MSR: 410366f6083SPeter Grehan return "invalmsr"; 411366f6083SPeter Grehan case EXIT_REASON_MWAIT: 412366f6083SPeter Grehan return "mwait"; 413366f6083SPeter Grehan case EXIT_REASON_MTF: 414366f6083SPeter Grehan return "mtf"; 415366f6083SPeter Grehan case EXIT_REASON_MONITOR: 416366f6083SPeter Grehan return "monitor"; 417366f6083SPeter Grehan case EXIT_REASON_PAUSE: 418366f6083SPeter Grehan return "pause"; 419b0538143SNeel Natu case EXIT_REASON_MCE_DURING_ENTRY: 420b0538143SNeel Natu return "mce-during-entry"; 421366f6083SPeter Grehan case EXIT_REASON_TPR: 422366f6083SPeter Grehan return "tpr"; 42388c4b8d1SNeel Natu case EXIT_REASON_APIC_ACCESS: 42488c4b8d1SNeel Natu return "apic-access"; 425366f6083SPeter Grehan case EXIT_REASON_GDTR_IDTR: 426366f6083SPeter Grehan return "gdtridtr"; 427366f6083SPeter Grehan case EXIT_REASON_LDTR_TR: 428366f6083SPeter Grehan return "ldtrtr"; 429366f6083SPeter Grehan case EXIT_REASON_EPT_FAULT: 430366f6083SPeter Grehan return "eptfault"; 431366f6083SPeter Grehan case EXIT_REASON_EPT_MISCONFIG: 432366f6083SPeter Grehan return "eptmisconfig"; 433366f6083SPeter Grehan case EXIT_REASON_INVEPT: 434366f6083SPeter Grehan return "invept"; 435366f6083SPeter Grehan case EXIT_REASON_RDTSCP: 436366f6083SPeter Grehan return "rdtscp"; 437366f6083SPeter Grehan case EXIT_REASON_VMX_PREEMPT: 438366f6083SPeter Grehan return "vmxpreempt"; 439366f6083SPeter Grehan case EXIT_REASON_INVVPID: 440366f6083SPeter Grehan return "invvpid"; 441366f6083SPeter Grehan case EXIT_REASON_WBINVD: 442366f6083SPeter Grehan return "wbinvd"; 443366f6083SPeter Grehan case EXIT_REASON_XSETBV: 444366f6083SPeter Grehan return "xsetbv"; 44588c4b8d1SNeel Natu case EXIT_REASON_APIC_WRITE: 44688c4b8d1SNeel Natu return "apic-write"; 447366f6083SPeter Grehan default: 448366f6083SPeter Grehan snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 449366f6083SPeter Grehan return (reasonbuf); 450366f6083SPeter Grehan } 451366f6083SPeter Grehan } 452366f6083SPeter Grehan #endif /* KTR */ 453366f6083SPeter Grehan 454159dd56fSNeel Natu static int 455159dd56fSNeel Natu vmx_allow_x2apic_msrs(struct vmx *vmx) 456159dd56fSNeel Natu { 457159dd56fSNeel Natu int i, error; 458159dd56fSNeel Natu 459159dd56fSNeel Natu error = 0; 460159dd56fSNeel Natu 461159dd56fSNeel Natu /* 462159dd56fSNeel Natu * Allow readonly access to the following x2APIC MSRs from the guest. 463159dd56fSNeel Natu */ 464159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_ID); 465159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_VERSION); 466159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_LDR); 467159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_SVR); 468159dd56fSNeel Natu 469159dd56fSNeel Natu for (i = 0; i < 8; i++) 470159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); 471159dd56fSNeel Natu 472159dd56fSNeel Natu for (i = 0; i < 8; i++) 473159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); 474159dd56fSNeel Natu 475159dd56fSNeel Natu for (i = 0; i < 8; i++) 476159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); 477159dd56fSNeel Natu 478159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_ESR); 479159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); 480159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); 481159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); 482159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); 483159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); 484159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); 485159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); 486159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); 487159dd56fSNeel Natu error += guest_msr_ro(vmx, MSR_APIC_ICR); 488159dd56fSNeel Natu 489159dd56fSNeel Natu /* 490159dd56fSNeel Natu * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 491159dd56fSNeel Natu * 492159dd56fSNeel Natu * These registers get special treatment described in the section 493159dd56fSNeel Natu * "Virtualizing MSR-Based APIC Accesses". 494159dd56fSNeel Natu */ 495159dd56fSNeel Natu error += guest_msr_rw(vmx, MSR_APIC_TPR); 496159dd56fSNeel Natu error += guest_msr_rw(vmx, MSR_APIC_EOI); 497159dd56fSNeel Natu error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); 498159dd56fSNeel Natu 499159dd56fSNeel Natu return (error); 500159dd56fSNeel Natu } 501159dd56fSNeel Natu 502366f6083SPeter Grehan u_long 503366f6083SPeter Grehan vmx_fix_cr0(u_long cr0) 504366f6083SPeter Grehan { 505366f6083SPeter Grehan 506366f6083SPeter Grehan return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 507366f6083SPeter Grehan } 508366f6083SPeter Grehan 509366f6083SPeter Grehan u_long 510366f6083SPeter Grehan vmx_fix_cr4(u_long cr4) 511366f6083SPeter Grehan { 512366f6083SPeter Grehan 513366f6083SPeter Grehan return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 514366f6083SPeter Grehan } 515366f6083SPeter Grehan 516366f6083SPeter Grehan static void 51745e51299SNeel Natu vpid_free(int vpid) 51845e51299SNeel Natu { 51945e51299SNeel Natu if (vpid < 0 || vpid > 0xffff) 52045e51299SNeel Natu panic("vpid_free: invalid vpid %d", vpid); 52145e51299SNeel Natu 52245e51299SNeel Natu /* 523ee98f99dSJohn Baldwin * VPIDs [0,vm_maxcpu] are special and are not allocated from 52445e51299SNeel Natu * the unit number allocator. 52545e51299SNeel Natu */ 52645e51299SNeel Natu 527ee98f99dSJohn Baldwin if (vpid > vm_maxcpu) 52845e51299SNeel Natu free_unr(vpid_unr, vpid); 52945e51299SNeel Natu } 53045e51299SNeel Natu 53158eefc67SJohn Baldwin static uint16_t 53258eefc67SJohn Baldwin vpid_alloc(int vcpuid) 53345e51299SNeel Natu { 53458eefc67SJohn Baldwin int x; 53545e51299SNeel Natu 53645e51299SNeel Natu /* 53745e51299SNeel Natu * If the "enable vpid" execution control is not enabled then the 53845e51299SNeel Natu * VPID is required to be 0 for all vcpus. 53945e51299SNeel Natu */ 54058eefc67SJohn Baldwin if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) 54158eefc67SJohn Baldwin return (0); 54245e51299SNeel Natu 54345e51299SNeel Natu /* 54458eefc67SJohn Baldwin * Try to allocate a unique VPID for each from the unit number 54558eefc67SJohn Baldwin * allocator. 54645e51299SNeel Natu */ 54745e51299SNeel Natu x = alloc_unr(vpid_unr); 54845e51299SNeel Natu 54958eefc67SJohn Baldwin if (x == -1) { 55045e51299SNeel Natu atomic_add_int(&vpid_alloc_failed, 1); 55145e51299SNeel Natu 55245e51299SNeel Natu /* 55345e51299SNeel Natu * If the unit number allocator does not have enough unique 554ee98f99dSJohn Baldwin * VPIDs then we need to allocate from the [1,vm_maxcpu] range. 55545e51299SNeel Natu * 55645e51299SNeel Natu * These VPIDs are not be unique across VMs but this does not 55745e51299SNeel Natu * affect correctness because the combined mappings are also 55845e51299SNeel Natu * tagged with the EP4TA which is unique for each VM. 55945e51299SNeel Natu * 56045e51299SNeel Natu * It is still sub-optimal because the invvpid will invalidate 56145e51299SNeel Natu * combined mappings for a particular VPID across all EP4TAs. 56245e51299SNeel Natu */ 56358eefc67SJohn Baldwin return (vcpuid + 1); 56445e51299SNeel Natu } 56558eefc67SJohn Baldwin 56658eefc67SJohn Baldwin return (x); 56745e51299SNeel Natu } 56845e51299SNeel Natu 56945e51299SNeel Natu static void 57045e51299SNeel Natu vpid_init(void) 57145e51299SNeel Natu { 57245e51299SNeel Natu /* 57345e51299SNeel Natu * VPID 0 is required when the "enable VPID" execution control is 57445e51299SNeel Natu * disabled. 57545e51299SNeel Natu * 576ee98f99dSJohn Baldwin * VPIDs [1,vm_maxcpu] are used as the "overflow namespace" when the 57745e51299SNeel Natu * unit number allocator does not have sufficient unique VPIDs to 57845e51299SNeel Natu * satisfy the allocation. 57945e51299SNeel Natu * 58045e51299SNeel Natu * The remaining VPIDs are managed by the unit number allocator. 58145e51299SNeel Natu */ 582ee98f99dSJohn Baldwin vpid_unr = new_unrhdr(vm_maxcpu + 1, 0xffff, NULL); 58345e51299SNeel Natu } 58445e51299SNeel Natu 58545e51299SNeel Natu static void 586366f6083SPeter Grehan vmx_disable(void *arg __unused) 587366f6083SPeter Grehan { 588366f6083SPeter Grehan struct invvpid_desc invvpid_desc = { 0 }; 589366f6083SPeter Grehan struct invept_desc invept_desc = { 0 }; 590366f6083SPeter Grehan 591366f6083SPeter Grehan if (vmxon_enabled[curcpu]) { 592366f6083SPeter Grehan /* 593366f6083SPeter Grehan * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 594366f6083SPeter Grehan * 595366f6083SPeter Grehan * VMXON or VMXOFF are not required to invalidate any TLB 596366f6083SPeter Grehan * caching structures. This prevents potential retention of 597366f6083SPeter Grehan * cached information in the TLB between distinct VMX episodes. 598366f6083SPeter Grehan */ 599366f6083SPeter Grehan invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 600366f6083SPeter Grehan invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 601366f6083SPeter Grehan vmxoff(); 602366f6083SPeter Grehan } 603366f6083SPeter Grehan load_cr4(rcr4() & ~CR4_VMXE); 604366f6083SPeter Grehan } 605366f6083SPeter Grehan 606366f6083SPeter Grehan static int 60715add60dSPeter Grehan vmx_modcleanup(void) 608366f6083SPeter Grehan { 609366f6083SPeter Grehan 61018a2b08eSNeel Natu if (pirvec >= 0) 61118a2b08eSNeel Natu lapic_ipi_free(pirvec); 612176666c2SNeel Natu 61345e51299SNeel Natu if (vpid_unr != NULL) { 61445e51299SNeel Natu delete_unrhdr(vpid_unr); 61545e51299SNeel Natu vpid_unr = NULL; 61645e51299SNeel Natu } 61745e51299SNeel Natu 618c1141fbaSKonstantin Belousov if (nmi_flush_l1d_sw == 1) 619c1141fbaSKonstantin Belousov nmi_flush_l1d_sw = 0; 620c1141fbaSKonstantin Belousov 621366f6083SPeter Grehan smp_rendezvous(NULL, vmx_disable, NULL, NULL); 622b10e100dSCorvin Köhne 623b10e100dSCorvin Köhne if (vmxon_region != NULL) 62474ac712fSMark Johnston kmem_free(vmxon_region, (mp_maxid + 1) * PAGE_SIZE); 625366f6083SPeter Grehan 626366f6083SPeter Grehan return (0); 627366f6083SPeter Grehan } 628366f6083SPeter Grehan 629366f6083SPeter Grehan static void 630366f6083SPeter Grehan vmx_enable(void *arg __unused) 631366f6083SPeter Grehan { 632366f6083SPeter Grehan int error; 63311669a68STycho Nightingale uint64_t feature_control; 63411669a68STycho Nightingale 63511669a68STycho Nightingale feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 63611669a68STycho Nightingale if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 63711669a68STycho Nightingale (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 63811669a68STycho Nightingale wrmsr(MSR_IA32_FEATURE_CONTROL, 63911669a68STycho Nightingale feature_control | IA32_FEATURE_CONTROL_VMX_EN | 64011669a68STycho Nightingale IA32_FEATURE_CONTROL_LOCK); 64111669a68STycho Nightingale } 642366f6083SPeter Grehan 643366f6083SPeter Grehan load_cr4(rcr4() | CR4_VMXE); 644366f6083SPeter Grehan 64574ac712fSMark Johnston *(uint32_t *)&vmxon_region[curcpu * PAGE_SIZE] = vmx_revision(); 64674ac712fSMark Johnston error = vmxon(&vmxon_region[curcpu * PAGE_SIZE]); 647366f6083SPeter Grehan if (error == 0) 648366f6083SPeter Grehan vmxon_enabled[curcpu] = 1; 649366f6083SPeter Grehan } 650366f6083SPeter Grehan 65163e62d39SJohn Baldwin static void 652*0b32ef71SJoshua Rogers vmx_modsuspend(void) 653*0b32ef71SJoshua Rogers { 654*0b32ef71SJoshua Rogers 655*0b32ef71SJoshua Rogers if (vmxon_enabled[curcpu]) 656*0b32ef71SJoshua Rogers vmx_disable(NULL); 657*0b32ef71SJoshua Rogers } 658*0b32ef71SJoshua Rogers 659*0b32ef71SJoshua Rogers static void 66015add60dSPeter Grehan vmx_modresume(void) 66163e62d39SJohn Baldwin { 66263e62d39SJohn Baldwin 66363e62d39SJohn Baldwin if (vmxon_enabled[curcpu]) 664*0b32ef71SJoshua Rogers vmx_enable(NULL); 66563e62d39SJohn Baldwin } 66663e62d39SJohn Baldwin 667366f6083SPeter Grehan static int 66815add60dSPeter Grehan vmx_modinit(int ipinum) 669366f6083SPeter Grehan { 6701bc51badSMichael Reifenberger int error; 671d17b5104SNeel Natu uint64_t basic, fixed0, fixed1, feature_control; 67288c4b8d1SNeel Natu uint32_t tmp, procbased2_vid_bits; 673366f6083SPeter Grehan 674366f6083SPeter Grehan /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 6758b287612SJohn Baldwin if (!(cpu_feature2 & CPUID2_VMX)) { 67615add60dSPeter Grehan printf("vmx_modinit: processor does not support VMX " 67715add60dSPeter Grehan "operation\n"); 678366f6083SPeter Grehan return (ENXIO); 679366f6083SPeter Grehan } 680366f6083SPeter Grehan 6814bff7fadSNeel Natu /* 6824bff7fadSNeel Natu * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 6834bff7fadSNeel Natu * are set (bits 0 and 2 respectively). 6844bff7fadSNeel Natu */ 6854bff7fadSNeel Natu feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 68611669a68STycho Nightingale if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 && 687150369abSNeel Natu (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 68815add60dSPeter Grehan printf("vmx_modinit: VMX operation disabled by BIOS\n"); 6894bff7fadSNeel Natu return (ENXIO); 6904bff7fadSNeel Natu } 6914bff7fadSNeel Natu 692d17b5104SNeel Natu /* 693d17b5104SNeel Natu * Verify capabilities MSR_VMX_BASIC: 694d17b5104SNeel Natu * - bit 54 indicates support for INS/OUTS decoding 695d17b5104SNeel Natu */ 696d17b5104SNeel Natu basic = rdmsr(MSR_VMX_BASIC); 697d17b5104SNeel Natu if ((basic & (1UL << 54)) == 0) { 69815add60dSPeter Grehan printf("vmx_modinit: processor does not support desired basic " 699d17b5104SNeel Natu "capabilities\n"); 700d17b5104SNeel Natu return (EINVAL); 701d17b5104SNeel Natu } 702d17b5104SNeel Natu 703366f6083SPeter Grehan /* Check support for primary processor-based VM-execution controls */ 704366f6083SPeter Grehan error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 705366f6083SPeter Grehan MSR_VMX_TRUE_PROCBASED_CTLS, 706366f6083SPeter Grehan PROCBASED_CTLS_ONE_SETTING, 707366f6083SPeter Grehan PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 708366f6083SPeter Grehan if (error) { 70915add60dSPeter Grehan printf("vmx_modinit: processor does not support desired " 71015add60dSPeter Grehan "primary processor-based controls\n"); 711366f6083SPeter Grehan return (error); 712366f6083SPeter Grehan } 713366f6083SPeter Grehan 714366f6083SPeter Grehan /* Clear the processor-based ctl bits that are set on demand */ 715366f6083SPeter Grehan procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 716366f6083SPeter Grehan 717366f6083SPeter Grehan /* Check support for secondary processor-based VM-execution controls */ 718366f6083SPeter Grehan error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 719366f6083SPeter Grehan MSR_VMX_PROCBASED_CTLS2, 720366f6083SPeter Grehan PROCBASED_CTLS2_ONE_SETTING, 721366f6083SPeter Grehan PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 722366f6083SPeter Grehan if (error) { 72315add60dSPeter Grehan printf("vmx_modinit: processor does not support desired " 72415add60dSPeter Grehan "secondary processor-based controls\n"); 725366f6083SPeter Grehan return (error); 726366f6083SPeter Grehan } 727366f6083SPeter Grehan 728366f6083SPeter Grehan /* Check support for VPID */ 729366f6083SPeter Grehan error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 730366f6083SPeter Grehan PROCBASED2_ENABLE_VPID, 0, &tmp); 731366f6083SPeter Grehan if (error == 0) 732366f6083SPeter Grehan procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 733366f6083SPeter Grehan 734366f6083SPeter Grehan /* Check support for pin-based VM-execution controls */ 735366f6083SPeter Grehan error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 736366f6083SPeter Grehan MSR_VMX_TRUE_PINBASED_CTLS, 737366f6083SPeter Grehan PINBASED_CTLS_ONE_SETTING, 738366f6083SPeter Grehan PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 739366f6083SPeter Grehan if (error) { 74015add60dSPeter Grehan printf("vmx_modinit: processor does not support desired " 741366f6083SPeter Grehan "pin-based controls\n"); 742366f6083SPeter Grehan return (error); 743366f6083SPeter Grehan } 744366f6083SPeter Grehan 745366f6083SPeter Grehan /* Check support for VM-exit controls */ 746366f6083SPeter Grehan error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 747366f6083SPeter Grehan VM_EXIT_CTLS_ONE_SETTING, 748366f6083SPeter Grehan VM_EXIT_CTLS_ZERO_SETTING, 749366f6083SPeter Grehan &exit_ctls); 750366f6083SPeter Grehan if (error) { 75115add60dSPeter Grehan printf("vmx_modinit: processor does not support desired " 752366f6083SPeter Grehan "exit controls\n"); 753366f6083SPeter Grehan return (error); 754366f6083SPeter Grehan } 755366f6083SPeter Grehan 756366f6083SPeter Grehan /* Check support for VM-entry controls */ 757d72978ecSNeel Natu error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 758d72978ecSNeel Natu VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 759366f6083SPeter Grehan &entry_ctls); 760366f6083SPeter Grehan if (error) { 76115add60dSPeter Grehan printf("vmx_modinit: processor does not support desired " 762366f6083SPeter Grehan "entry controls\n"); 763366f6083SPeter Grehan return (error); 764366f6083SPeter Grehan } 765366f6083SPeter Grehan 766366f6083SPeter Grehan /* 767366f6083SPeter Grehan * Check support for optional features by testing them 768366f6083SPeter Grehan * as individual bits 769366f6083SPeter Grehan */ 770366f6083SPeter Grehan cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 771366f6083SPeter Grehan MSR_VMX_TRUE_PROCBASED_CTLS, 772366f6083SPeter Grehan PROCBASED_HLT_EXITING, 0, 773366f6083SPeter Grehan &tmp) == 0); 774366f6083SPeter Grehan 775366f6083SPeter Grehan cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 776366f6083SPeter Grehan MSR_VMX_PROCBASED_CTLS, 777366f6083SPeter Grehan PROCBASED_MTF, 0, 778366f6083SPeter Grehan &tmp) == 0); 779366f6083SPeter Grehan 780366f6083SPeter Grehan cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 781366f6083SPeter Grehan MSR_VMX_TRUE_PROCBASED_CTLS, 782366f6083SPeter Grehan PROCBASED_PAUSE_EXITING, 0, 783366f6083SPeter Grehan &tmp) == 0); 784366f6083SPeter Grehan 7853ba952e1SCorvin Köhne cap_wbinvd_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 7863ba952e1SCorvin Köhne MSR_VMX_PROCBASED_CTLS2, 7873ba952e1SCorvin Köhne PROCBASED2_WBINVD_EXITING, 7883ba952e1SCorvin Köhne 0, 7893ba952e1SCorvin Köhne &tmp) == 0); 7903ba952e1SCorvin Köhne 791f5f5f1e7SPeter Grehan /* 792f5f5f1e7SPeter Grehan * Check support for RDPID and/or RDTSCP. 793f5f5f1e7SPeter Grehan * 794f5f5f1e7SPeter Grehan * Support a pass-through-based implementation of these via the 795f5f5f1e7SPeter Grehan * "enable RDTSCP" VM-execution control and the "RDTSC exiting" 796f5f5f1e7SPeter Grehan * VM-execution control. 797f5f5f1e7SPeter Grehan * 798f5f5f1e7SPeter Grehan * The "enable RDTSCP" VM-execution control applies to both RDPID 799f5f5f1e7SPeter Grehan * and RDTSCP (see SDM volume 3, section 25.3, "Changes to 800f5f5f1e7SPeter Grehan * Instruction Behavior in VMX Non-root operation"); this is why 801f5f5f1e7SPeter Grehan * only this VM-execution control needs to be enabled in order to 802f5f5f1e7SPeter Grehan * enable passing through whichever of RDPID and/or RDTSCP are 803f5f5f1e7SPeter Grehan * supported by the host. 804f5f5f1e7SPeter Grehan * 805f5f5f1e7SPeter Grehan * The "RDTSC exiting" VM-execution control applies to both RDTSC 806f5f5f1e7SPeter Grehan * and RDTSCP (again, per SDM volume 3, section 25.3), and is 807f5f5f1e7SPeter Grehan * already set up for RDTSC and RDTSCP pass-through by the current 808f5f5f1e7SPeter Grehan * implementation of RDTSC. 809f5f5f1e7SPeter Grehan * 810f5f5f1e7SPeter Grehan * Although RDPID and RDTSCP are optional capabilities, since there 811f5f5f1e7SPeter Grehan * does not currently seem to be a use case for enabling/disabling 812f5f5f1e7SPeter Grehan * these via libvmmapi, choose not to support this and, instead, 813f5f5f1e7SPeter Grehan * just statically always enable or always disable this support 814f5f5f1e7SPeter Grehan * across all vCPUs on all VMs. (Note that there may be some 815f5f5f1e7SPeter Grehan * complications to providing this functionality, e.g., the MSR 816f5f5f1e7SPeter Grehan * bitmap is currently per-VM rather than per-vCPU while the 817f5f5f1e7SPeter Grehan * capability API wants to be able to control capabilities on a 818f5f5f1e7SPeter Grehan * per-vCPU basis). 819f5f5f1e7SPeter Grehan */ 820f5f5f1e7SPeter Grehan error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 821f5f5f1e7SPeter Grehan MSR_VMX_PROCBASED_CTLS2, 822f5f5f1e7SPeter Grehan PROCBASED2_ENABLE_RDTSCP, 0, &tmp); 823f5f5f1e7SPeter Grehan cap_rdpid = error == 0 && host_has_rdpid(); 824f5f5f1e7SPeter Grehan cap_rdtscp = error == 0 && host_has_rdtscp(); 82573abae44SJohn Baldwin if (cap_rdpid || cap_rdtscp) { 826f5f5f1e7SPeter Grehan procbased_ctls2 |= PROCBASED2_ENABLE_RDTSCP; 82773abae44SJohn Baldwin vmx_have_msr_tsc_aux = true; 82873abae44SJohn Baldwin } 829f5f5f1e7SPeter Grehan 830366f6083SPeter Grehan cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 831366f6083SPeter Grehan MSR_VMX_PROCBASED_CTLS2, 832366f6083SPeter Grehan PROCBASED2_UNRESTRICTED_GUEST, 0, 833366f6083SPeter Grehan &tmp) == 0); 834366f6083SPeter Grehan 83549cc03daSNeel Natu cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 83649cc03daSNeel Natu MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 83749cc03daSNeel Natu &tmp) == 0); 83849cc03daSNeel Natu 83988c4b8d1SNeel Natu /* 8401bc51badSMichael Reifenberger * Check support for TPR shadow. 8411bc51badSMichael Reifenberger */ 8421bc51badSMichael Reifenberger error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 8431bc51badSMichael Reifenberger MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 8441bc51badSMichael Reifenberger &tmp); 8451bc51badSMichael Reifenberger if (error == 0) { 8461bc51badSMichael Reifenberger tpr_shadowing = 1; 847f3ff0918SZhenlei Huang #ifndef BURN_BRIDGES 8481bc51badSMichael Reifenberger TUNABLE_INT_FETCH("hw.vmm.vmx.use_tpr_shadowing", 8491bc51badSMichael Reifenberger &tpr_shadowing); 850f3ff0918SZhenlei Huang #endif 851f3ff0918SZhenlei Huang TUNABLE_INT_FETCH("hw.vmm.vmx.cap.tpr_shadowing", 852f3ff0918SZhenlei Huang &tpr_shadowing); 8531bc51badSMichael Reifenberger } 8541bc51badSMichael Reifenberger 8551bc51badSMichael Reifenberger if (tpr_shadowing) { 8561bc51badSMichael Reifenberger procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 8571bc51badSMichael Reifenberger procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 8581bc51badSMichael Reifenberger procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING; 8591bc51badSMichael Reifenberger } 8601bc51badSMichael Reifenberger 8611bc51badSMichael Reifenberger /* 86288c4b8d1SNeel Natu * Check support for virtual interrupt delivery. 86388c4b8d1SNeel Natu */ 86488c4b8d1SNeel Natu procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 86588c4b8d1SNeel Natu PROCBASED2_VIRTUALIZE_X2APIC_MODE | 86688c4b8d1SNeel Natu PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 86788c4b8d1SNeel Natu PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 86888c4b8d1SNeel Natu 86988c4b8d1SNeel Natu error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 87088c4b8d1SNeel Natu procbased2_vid_bits, 0, &tmp); 8711bc51badSMichael Reifenberger if (error == 0 && tpr_shadowing) { 87288c4b8d1SNeel Natu virtual_interrupt_delivery = 1; 873f3ff0918SZhenlei Huang #ifndef BURN_BRIDGES 87488c4b8d1SNeel Natu TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 87588c4b8d1SNeel Natu &virtual_interrupt_delivery); 876f3ff0918SZhenlei Huang #endif 877f3ff0918SZhenlei Huang TUNABLE_INT_FETCH("hw.vmm.vmx.cap.virtual_interrupt_delivery", 878f3ff0918SZhenlei Huang &virtual_interrupt_delivery); 87988c4b8d1SNeel Natu } 88088c4b8d1SNeel Natu 88188c4b8d1SNeel Natu if (virtual_interrupt_delivery) { 88288c4b8d1SNeel Natu procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 88388c4b8d1SNeel Natu procbased_ctls2 |= procbased2_vid_bits; 88488c4b8d1SNeel Natu procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 885176666c2SNeel Natu 886176666c2SNeel Natu /* 887176666c2SNeel Natu * Check for Posted Interrupts only if Virtual Interrupt 888176666c2SNeel Natu * Delivery is enabled. 889176666c2SNeel Natu */ 890176666c2SNeel Natu error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 891176666c2SNeel Natu MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 892176666c2SNeel Natu &tmp); 893176666c2SNeel Natu if (error == 0) { 894bd50262fSKonstantin Belousov pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) : 895bd50262fSKonstantin Belousov &IDTVEC(justreturn)); 89618a2b08eSNeel Natu if (pirvec < 0) { 897176666c2SNeel Natu if (bootverbose) { 89815add60dSPeter Grehan printf("vmx_modinit: unable to " 89915add60dSPeter Grehan "allocate posted interrupt " 90015add60dSPeter Grehan "vector\n"); 90188c4b8d1SNeel Natu } 902176666c2SNeel Natu } else { 903176666c2SNeel Natu posted_interrupts = 1; 904f3ff0918SZhenlei Huang #ifndef BURN_BRIDGES 905176666c2SNeel Natu TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 906176666c2SNeel Natu &posted_interrupts); 907f3ff0918SZhenlei Huang #endif 908f3ff0918SZhenlei Huang TUNABLE_INT_FETCH("hw.vmm.vmx.cap.posted_interrupts", 909f3ff0918SZhenlei Huang &posted_interrupts); 910176666c2SNeel Natu } 911176666c2SNeel Natu } 912176666c2SNeel Natu } 913176666c2SNeel Natu 914176666c2SNeel Natu if (posted_interrupts) 915176666c2SNeel Natu pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 91649cc03daSNeel Natu 917366f6083SPeter Grehan /* Initialize EPT */ 918add611fdSNeel Natu error = ept_init(ipinum); 919366f6083SPeter Grehan if (error) { 92015add60dSPeter Grehan printf("vmx_modinit: ept initialization failed (%d)\n", error); 921366f6083SPeter Grehan return (error); 922366f6083SPeter Grehan } 923366f6083SPeter Grehan 92423437573SKonstantin Belousov guest_l1d_flush = (cpu_ia32_arch_caps & 92523437573SKonstantin Belousov IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 926f3ff0918SZhenlei Huang #ifndef BURN_BRIDGES 927c30578feSKonstantin Belousov TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 928f3ff0918SZhenlei Huang #endif 929f3ff0918SZhenlei Huang TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush", &guest_l1d_flush); 930c1141fbaSKonstantin Belousov 931c1141fbaSKonstantin Belousov /* 932c1141fbaSKonstantin Belousov * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 933c1141fbaSKonstantin Belousov * available. Otherwise fall back to the software flush 934c1141fbaSKonstantin Belousov * method which loads enough data from the kernel text to 935c1141fbaSKonstantin Belousov * flush existing L1D content, both on VMX entry and on NMI 936c1141fbaSKonstantin Belousov * return. 937c1141fbaSKonstantin Belousov */ 938c1141fbaSKonstantin Belousov if (guest_l1d_flush) { 939c1141fbaSKonstantin Belousov if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 940c1141fbaSKonstantin Belousov guest_l1d_flush_sw = 1; 941f3ff0918SZhenlei Huang #ifndef BURN_BRIDGES 942c1141fbaSKonstantin Belousov TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 943c1141fbaSKonstantin Belousov &guest_l1d_flush_sw); 944f3ff0918SZhenlei Huang #endif 945f3ff0918SZhenlei Huang TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush_sw", 946f3ff0918SZhenlei Huang &guest_l1d_flush_sw); 947c1141fbaSKonstantin Belousov } 948c1141fbaSKonstantin Belousov if (guest_l1d_flush_sw) { 949c1141fbaSKonstantin Belousov if (nmi_flush_l1d_sw <= 1) 950c1141fbaSKonstantin Belousov nmi_flush_l1d_sw = 1; 951c1141fbaSKonstantin Belousov } else { 952c1141fbaSKonstantin Belousov msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 953c1141fbaSKonstantin Belousov msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 954c1141fbaSKonstantin Belousov } 955c1141fbaSKonstantin Belousov } 956c30578feSKonstantin Belousov 957366f6083SPeter Grehan /* 958366f6083SPeter Grehan * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 959366f6083SPeter Grehan */ 960366f6083SPeter Grehan fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 961366f6083SPeter Grehan fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 962366f6083SPeter Grehan cr0_ones_mask = fixed0 & fixed1; 963366f6083SPeter Grehan cr0_zeros_mask = ~fixed0 & ~fixed1; 964366f6083SPeter Grehan 965366f6083SPeter Grehan /* 966366f6083SPeter Grehan * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 967366f6083SPeter Grehan * if unrestricted guest execution is allowed. 968366f6083SPeter Grehan */ 969366f6083SPeter Grehan if (cap_unrestricted_guest) 970366f6083SPeter Grehan cr0_ones_mask &= ~(CR0_PG | CR0_PE); 971366f6083SPeter Grehan 972366f6083SPeter Grehan /* 973366f6083SPeter Grehan * Do not allow the guest to set CR0_NW or CR0_CD. 974366f6083SPeter Grehan */ 975366f6083SPeter Grehan cr0_zeros_mask |= (CR0_NW | CR0_CD); 976366f6083SPeter Grehan 977366f6083SPeter Grehan fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 978366f6083SPeter Grehan fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 979366f6083SPeter Grehan cr4_ones_mask = fixed0 & fixed1; 980366f6083SPeter Grehan cr4_zeros_mask = ~fixed0 & ~fixed1; 981366f6083SPeter Grehan 98245e51299SNeel Natu vpid_init(); 98345e51299SNeel Natu 984c3498942SNeel Natu vmx_msr_init(); 985c3498942SNeel Natu 986366f6083SPeter Grehan /* enable VMX operation */ 98774ac712fSMark Johnston vmxon_region = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, 98874ac712fSMark Johnston M_WAITOK | M_ZERO); 989366f6083SPeter Grehan smp_rendezvous(NULL, vmx_enable, NULL, NULL); 990366f6083SPeter Grehan 9913565b59eSNeel Natu vmx_initialized = 1; 9923565b59eSNeel Natu 993366f6083SPeter Grehan return (0); 994366f6083SPeter Grehan } 995366f6083SPeter Grehan 996f7d47425SNeel Natu static void 997f7d47425SNeel Natu vmx_trigger_hostintr(int vector) 998f7d47425SNeel Natu { 999f7d47425SNeel Natu uintptr_t func; 1000f7d47425SNeel Natu struct gate_descriptor *gd; 1001f7d47425SNeel Natu 1002f7d47425SNeel Natu gd = &idt[vector]; 1003f7d47425SNeel Natu 1004f7d47425SNeel Natu KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 1005f7d47425SNeel Natu "invalid vector %d", vector)); 1006f7d47425SNeel Natu KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 1007f7d47425SNeel Natu vector)); 1008f7d47425SNeel Natu KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 1009f7d47425SNeel Natu "has invalid type %d", vector, gd->gd_type)); 1010f7d47425SNeel Natu KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 1011f7d47425SNeel Natu "has invalid dpl %d", vector, gd->gd_dpl)); 1012f7d47425SNeel Natu KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 1013f7d47425SNeel Natu "for vector %d has invalid selector %d", vector, gd->gd_selector)); 1014f7d47425SNeel Natu KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 1015f7d47425SNeel Natu "IST %d", vector, gd->gd_ist)); 1016f7d47425SNeel Natu 1017f7d47425SNeel Natu func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 1018f7d47425SNeel Natu vmx_call_isr(func); 1019f7d47425SNeel Natu } 1020f7d47425SNeel Natu 1021366f6083SPeter Grehan static int 1022aaaa0656SPeter Grehan vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 1023366f6083SPeter Grehan { 102439c21c2dSNeel Natu int error, mask_ident, shadow_ident; 1025aaaa0656SPeter Grehan uint64_t mask_value; 1026366f6083SPeter Grehan 102739c21c2dSNeel Natu if (which != 0 && which != 4) 102839c21c2dSNeel Natu panic("vmx_setup_cr_shadow: unknown cr%d", which); 102939c21c2dSNeel Natu 103039c21c2dSNeel Natu if (which == 0) { 103139c21c2dSNeel Natu mask_ident = VMCS_CR0_MASK; 103239c21c2dSNeel Natu mask_value = cr0_ones_mask | cr0_zeros_mask; 103339c21c2dSNeel Natu shadow_ident = VMCS_CR0_SHADOW; 103439c21c2dSNeel Natu } else { 103539c21c2dSNeel Natu mask_ident = VMCS_CR4_MASK; 103639c21c2dSNeel Natu mask_value = cr4_ones_mask | cr4_zeros_mask; 103739c21c2dSNeel Natu shadow_ident = VMCS_CR4_SHADOW; 103839c21c2dSNeel Natu } 103939c21c2dSNeel Natu 1040d3c11f40SPeter Grehan error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 1041366f6083SPeter Grehan if (error) 1042366f6083SPeter Grehan return (error); 1043366f6083SPeter Grehan 1044aaaa0656SPeter Grehan error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 1045366f6083SPeter Grehan if (error) 1046366f6083SPeter Grehan return (error); 1047366f6083SPeter Grehan 1048366f6083SPeter Grehan return (0); 1049366f6083SPeter Grehan } 1050aaaa0656SPeter Grehan #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 1051aaaa0656SPeter Grehan #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 1052366f6083SPeter Grehan 1053366f6083SPeter Grehan static void * 105415add60dSPeter Grehan vmx_init(struct vm *vm, pmap_t pmap) 1055366f6083SPeter Grehan { 1056d487cba3SCy Schubert int error __diagused; 1057366f6083SPeter Grehan struct vmx *vmx; 1058366f6083SPeter Grehan 1059366f6083SPeter Grehan vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 1060366f6083SPeter Grehan vmx->vm = vm; 1061366f6083SPeter Grehan 10629ce875d9SKonstantin Belousov vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pmltop)); 1063318224bbSNeel Natu 1064366f6083SPeter Grehan /* 1065366f6083SPeter Grehan * Clean up EPTP-tagged guest physical and combined mappings 1066366f6083SPeter Grehan * 1067366f6083SPeter Grehan * VMX transitions are not required to invalidate any guest physical 1068366f6083SPeter Grehan * mappings. So, it may be possible for stale guest physical mappings 1069366f6083SPeter Grehan * to be present in the processor TLBs. 1070366f6083SPeter Grehan * 1071366f6083SPeter Grehan * Combined mappings for this EP4TA are also invalidated for all VPIDs. 1072366f6083SPeter Grehan */ 1073318224bbSNeel Natu ept_invalidate_mappings(vmx->eptp); 1074366f6083SPeter Grehan 10750f00260cSJohn Baldwin vmx->msr_bitmap = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, 10760f00260cSJohn Baldwin M_WAITOK | M_ZERO); 1077366f6083SPeter Grehan msr_bitmap_initialize(vmx->msr_bitmap); 1078366f6083SPeter Grehan 1079366f6083SPeter Grehan /* 1080366f6083SPeter Grehan * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 1081366f6083SPeter Grehan * The guest FSBASE and GSBASE are saved and restored during 1082366f6083SPeter Grehan * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 1083366f6083SPeter Grehan * always restored from the vmcs host state area on vm-exit. 1084366f6083SPeter Grehan * 10851fb0ea3fSPeter Grehan * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 10861fb0ea3fSPeter Grehan * how they are saved/restored so can be directly accessed by the 10871fb0ea3fSPeter Grehan * guest. 10881fb0ea3fSPeter Grehan * 1089366f6083SPeter Grehan * MSR_EFER is saved and restored in the guest VMCS area on a 1090366f6083SPeter Grehan * VM exit and entry respectively. It is also restored from the 1091366f6083SPeter Grehan * host VMCS area on a VM exit. 10928d1d7a9eSPeter Grehan * 1093277bdd99STycho Nightingale * The TSC MSR is exposed read-only. Writes are disallowed as 1094277bdd99STycho Nightingale * that will impact the host TSC. If the guest does a write 1095277bdd99STycho Nightingale * the "use TSC offsetting" execution control is enabled and the 1096277bdd99STycho Nightingale * difference between the host TSC and the guest TSC is written 1097277bdd99STycho Nightingale * into the TSC offset in the VMCS. 1098f5f5f1e7SPeter Grehan * 1099f5f5f1e7SPeter Grehan * Guest TSC_AUX support is enabled if any of guest RDPID and/or 1100f5f5f1e7SPeter Grehan * guest RDTSCP support are enabled (since, as per Table 2-2 in SDM 1101f5f5f1e7SPeter Grehan * volume 4, TSC_AUX is supported if any of RDPID and/or RDTSCP are 1102f5f5f1e7SPeter Grehan * supported). If guest TSC_AUX support is enabled, TSC_AUX is 1103f5f5f1e7SPeter Grehan * exposed read-only so that the VMM can do one fewer MSR read per 1104f5f5f1e7SPeter Grehan * exit than if this register were exposed read-write; the guest 1105f5f5f1e7SPeter Grehan * restore value can be updated during guest writes (expected to be 1106f5f5f1e7SPeter Grehan * rare) instead of during all exits (common). 1107366f6083SPeter Grehan */ 1108366f6083SPeter Grehan if (guest_msr_rw(vmx, MSR_GSBASE) || 1109366f6083SPeter Grehan guest_msr_rw(vmx, MSR_FSBASE) || 11101fb0ea3fSPeter Grehan guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 11111fb0ea3fSPeter Grehan guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 11121fb0ea3fSPeter Grehan guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 11138d1d7a9eSPeter Grehan guest_msr_rw(vmx, MSR_EFER) || 1114f5f5f1e7SPeter Grehan guest_msr_ro(vmx, MSR_TSC) || 1115f5f5f1e7SPeter Grehan ((cap_rdpid || cap_rdtscp) && guest_msr_ro(vmx, MSR_TSC_AUX))) 111615add60dSPeter Grehan panic("vmx_init: error setting guest msr access"); 1117366f6083SPeter Grehan 111888c4b8d1SNeel Natu if (virtual_interrupt_delivery) { 111988c4b8d1SNeel Natu error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 112088c4b8d1SNeel Natu APIC_ACCESS_ADDRESS); 112188c4b8d1SNeel Natu /* XXX this should really return an error to the caller */ 112288c4b8d1SNeel Natu KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 112388c4b8d1SNeel Natu } 112488c4b8d1SNeel Natu 11251aa51504SJohn Baldwin vmx->pmap = pmap; 11261aa51504SJohn Baldwin return (vmx); 11271aa51504SJohn Baldwin } 11280f00260cSJohn Baldwin 11291aa51504SJohn Baldwin static void * 1130950af9ffSJohn Baldwin vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) 11311aa51504SJohn Baldwin { 1132869c8d19SJohn Baldwin struct vmx *vmx = vmi; 11331aa51504SJohn Baldwin struct vmcs *vmcs; 11341aa51504SJohn Baldwin struct vmx_vcpu *vcpu; 11351aa51504SJohn Baldwin uint32_t exc_bitmap; 113658eefc67SJohn Baldwin uint16_t vpid; 11371aa51504SJohn Baldwin int error; 11381aa51504SJohn Baldwin 113958eefc67SJohn Baldwin vpid = vpid_alloc(vcpuid); 114058eefc67SJohn Baldwin 11411aa51504SJohn Baldwin vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO); 1142869c8d19SJohn Baldwin vcpu->vmx = vmx; 1143950af9ffSJohn Baldwin vcpu->vcpu = vcpu1; 11441aa51504SJohn Baldwin vcpu->vcpuid = vcpuid; 11450f00260cSJohn Baldwin vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX, 11460f00260cSJohn Baldwin M_WAITOK | M_ZERO); 11470f00260cSJohn Baldwin vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, 11480f00260cSJohn Baldwin M_WAITOK | M_ZERO); 11491aa51504SJohn Baldwin vcpu->pir_desc = malloc_aligned(sizeof(*vcpu->pir_desc), 64, M_VMX, 11501aa51504SJohn Baldwin M_WAITOK | M_ZERO); 11510f00260cSJohn Baldwin 11520f00260cSJohn Baldwin vmcs = vcpu->vmcs; 1153c847a506SNeel Natu vmcs->identifier = vmx_revision(); 1154c847a506SNeel Natu error = vmclear(vmcs); 1155366f6083SPeter Grehan if (error != 0) { 115615add60dSPeter Grehan panic("vmx_init: vmclear error %d on vcpu %d\n", 11571aa51504SJohn Baldwin error, vcpuid); 1158366f6083SPeter Grehan } 1159366f6083SPeter Grehan 11601aa51504SJohn Baldwin vmx_msr_guest_init(vmx, vcpu); 1161c3498942SNeel Natu 1162c847a506SNeel Natu error = vmcs_init(vmcs); 1163c847a506SNeel Natu KASSERT(error == 0, ("vmcs_init error %d", error)); 1164366f6083SPeter Grehan 1165c847a506SNeel Natu VMPTRLD(vmcs); 1166c847a506SNeel Natu error = 0; 11670f00260cSJohn Baldwin error += vmwrite(VMCS_HOST_RSP, (u_long)&vcpu->ctx); 1168c847a506SNeel Natu error += vmwrite(VMCS_EPTP, vmx->eptp); 1169c847a506SNeel Natu error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 1170c847a506SNeel Natu error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 117180cb5d84SJohn Baldwin if (vcpu_trap_wbinvd(vcpu->vcpu)) { 11723ba952e1SCorvin Köhne KASSERT(cap_wbinvd_exit, ("WBINVD trap not available")); 11733ba952e1SCorvin Köhne procbased_ctls2 |= PROCBASED2_WBINVD_EXITING; 11743ba952e1SCorvin Köhne } 1175c847a506SNeel Natu error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 1176c847a506SNeel Natu error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 1177c847a506SNeel Natu error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 1178c847a506SNeel Natu error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 117958eefc67SJohn Baldwin error += vmwrite(VMCS_VPID, vpid); 1180b0538143SNeel Natu 1181c1141fbaSKonstantin Belousov if (guest_l1d_flush && !guest_l1d_flush_sw) { 1182c1141fbaSKonstantin Belousov vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract( 1183c1141fbaSKonstantin Belousov (vm_offset_t)&msr_load_list[0])); 1184c1141fbaSKonstantin Belousov vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 1185c1141fbaSKonstantin Belousov nitems(msr_load_list)); 1186c1141fbaSKonstantin Belousov vmcs_write(VMCS_EXIT_MSR_STORE, 0); 1187c1141fbaSKonstantin Belousov vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 1188c1141fbaSKonstantin Belousov } 1189c1141fbaSKonstantin Belousov 1190b0538143SNeel Natu /* exception bitmap */ 119180cb5d84SJohn Baldwin if (vcpu_trace_exceptions(vcpu->vcpu)) 1192b0538143SNeel Natu exc_bitmap = 0xffffffff; 1193b0538143SNeel Natu else 1194b0538143SNeel Natu exc_bitmap = 1 << IDT_MC; 1195b0538143SNeel Natu error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap); 1196b0538143SNeel Natu 11970f00260cSJohn Baldwin vcpu->ctx.guest_dr6 = DBREG_DR6_RESERVED1; 11989e2154ffSJohn Baldwin error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 119965eefbe4SJohn Baldwin 12001bc51badSMichael Reifenberger if (tpr_shadowing) { 12011aa51504SJohn Baldwin error += vmwrite(VMCS_VIRTUAL_APIC, vtophys(vcpu->apic_page)); 12021bc51badSMichael Reifenberger } 12031bc51badSMichael Reifenberger 12041bc51badSMichael Reifenberger if (virtual_interrupt_delivery) { 12051bc51badSMichael Reifenberger error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 120688c4b8d1SNeel Natu error += vmwrite(VMCS_EOI_EXIT0, 0); 120788c4b8d1SNeel Natu error += vmwrite(VMCS_EOI_EXIT1, 0); 120888c4b8d1SNeel Natu error += vmwrite(VMCS_EOI_EXIT2, 0); 120988c4b8d1SNeel Natu error += vmwrite(VMCS_EOI_EXIT3, 0); 121088c4b8d1SNeel Natu } 1211176666c2SNeel Natu if (posted_interrupts) { 1212176666c2SNeel Natu error += vmwrite(VMCS_PIR_VECTOR, pirvec); 12131aa51504SJohn Baldwin error += vmwrite(VMCS_PIR_DESC, vtophys(vcpu->pir_desc)); 1214176666c2SNeel Natu } 1215c847a506SNeel Natu VMCLEAR(vmcs); 121615add60dSPeter Grehan KASSERT(error == 0, ("vmx_init: error customizing the vmcs")); 1217366f6083SPeter Grehan 12180f00260cSJohn Baldwin vcpu->cap.set = 0; 12190f00260cSJohn Baldwin vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0; 12200f00260cSJohn Baldwin vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0; 12210f00260cSJohn Baldwin vcpu->cap.proc_ctls = procbased_ctls; 12220f00260cSJohn Baldwin vcpu->cap.proc_ctls2 = procbased_ctls2; 12230f00260cSJohn Baldwin vcpu->cap.exc_bitmap = exc_bitmap; 1224366f6083SPeter Grehan 12250f00260cSJohn Baldwin vcpu->state.nextrip = ~0; 12260f00260cSJohn Baldwin vcpu->state.lastcpu = NOCPU; 122758eefc67SJohn Baldwin vcpu->state.vpid = vpid; 1228366f6083SPeter Grehan 1229aaaa0656SPeter Grehan /* 1230aaaa0656SPeter Grehan * Set up the CR0/4 shadows, and init the read shadow 1231aaaa0656SPeter Grehan * to the power-on register value from the Intel Sys Arch. 1232aaaa0656SPeter Grehan * CR0 - 0x60000010 1233aaaa0656SPeter Grehan * CR4 - 0 1234aaaa0656SPeter Grehan */ 1235c847a506SNeel Natu error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 123639c21c2dSNeel Natu if (error != 0) 123739c21c2dSNeel Natu panic("vmx_setup_cr0_shadow %d", error); 123839c21c2dSNeel Natu 1239c847a506SNeel Natu error = vmx_setup_cr4_shadow(vmcs, 0); 124039c21c2dSNeel Natu if (error != 0) 124139c21c2dSNeel Natu panic("vmx_setup_cr4_shadow %d", error); 1242318224bbSNeel Natu 12431aa51504SJohn Baldwin vcpu->ctx.pmap = vmx->pmap; 1244366f6083SPeter Grehan 12451aa51504SJohn Baldwin return (vcpu); 1246366f6083SPeter Grehan } 1247366f6083SPeter Grehan 1248366f6083SPeter Grehan static int 124980cb5d84SJohn Baldwin vmx_handle_cpuid(struct vmx_vcpu *vcpu, struct vmxctx *vmxctx) 1250366f6083SPeter Grehan { 1251a3f2a9c5SJohn Baldwin int handled; 1252366f6083SPeter Grehan 125380cb5d84SJohn Baldwin handled = x86_emulate_cpuid(vcpu->vcpu, (uint64_t *)&vmxctx->guest_rax, 1254a3f2a9c5SJohn Baldwin (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, 1255a3f2a9c5SJohn Baldwin (uint64_t *)&vmxctx->guest_rdx); 1256366f6083SPeter Grehan return (handled); 1257366f6083SPeter Grehan } 1258366f6083SPeter Grehan 1259366f6083SPeter Grehan static __inline void 1260869c8d19SJohn Baldwin vmx_run_trace(struct vmx_vcpu *vcpu) 1261366f6083SPeter Grehan { 126257e0119eSJohn Baldwin VMX_CTR1(vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 1263366f6083SPeter Grehan } 1264366f6083SPeter Grehan 1265366f6083SPeter Grehan static __inline void 1266869c8d19SJohn Baldwin vmx_exit_trace(struct vmx_vcpu *vcpu, uint64_t rip, uint32_t exit_reason, 1267869c8d19SJohn Baldwin int handled) 1268366f6083SPeter Grehan { 126957e0119eSJohn Baldwin VMX_CTR3(vcpu, "%s %s vmexit at 0x%0lx", 1270366f6083SPeter Grehan handled ? "handled" : "unhandled", 1271366f6083SPeter Grehan exit_reason_to_str(exit_reason), rip); 1272eeefa4e4SNeel Natu } 1273366f6083SPeter Grehan 1274eeefa4e4SNeel Natu static __inline void 1275869c8d19SJohn Baldwin vmx_astpending_trace(struct vmx_vcpu *vcpu, uint64_t rip) 1276eeefa4e4SNeel Natu { 127757e0119eSJohn Baldwin VMX_CTR1(vcpu, "astpending vmexit at 0x%0lx", rip); 1278366f6083SPeter Grehan } 1279366f6083SPeter Grehan 1280953c2c47SNeel Natu static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 12813527963bSNeel Natu static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 1282953c2c47SNeel Natu 12833527963bSNeel Natu /* 12843527963bSNeel Natu * Invalidate guest mappings identified by its vpid from the TLB. 12853527963bSNeel Natu */ 12863527963bSNeel Natu static __inline void 12871aa51504SJohn Baldwin vmx_invvpid(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap, int running) 1288366f6083SPeter Grehan { 1289366f6083SPeter Grehan struct vmxstate *vmxstate; 1290953c2c47SNeel Natu struct invvpid_desc invvpid_desc; 1291366f6083SPeter Grehan 12921aa51504SJohn Baldwin vmxstate = &vcpu->state; 12933527963bSNeel Natu if (vmxstate->vpid == 0) 12943de83862SNeel Natu return; 1295366f6083SPeter Grehan 12963527963bSNeel Natu if (!running) { 12973527963bSNeel Natu /* 12983527963bSNeel Natu * Set the 'lastcpu' to an invalid host cpu. 12993527963bSNeel Natu * 13003527963bSNeel Natu * This will invalidate TLB entries tagged with the vcpu's 13013527963bSNeel Natu * vpid the next time it runs via vmx_set_pcpu_defaults(). 13023527963bSNeel Natu */ 13033527963bSNeel Natu vmxstate->lastcpu = NOCPU; 13043527963bSNeel Natu return; 13053527963bSNeel Natu } 1306953c2c47SNeel Natu 13073527963bSNeel Natu KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " 13081aa51504SJohn Baldwin "critical section", __func__, vcpu->vcpuid)); 1309366f6083SPeter Grehan 1310366f6083SPeter Grehan /* 13113527963bSNeel Natu * Invalidate all mappings tagged with 'vpid' 1312366f6083SPeter Grehan * 1313366f6083SPeter Grehan * We do this because this vcpu was executing on a different host 1314366f6083SPeter Grehan * cpu when it last ran. We do not track whether it invalidated 1315366f6083SPeter Grehan * mappings associated with its 'vpid' during that run. So we must 1316366f6083SPeter Grehan * assume that the mappings associated with 'vpid' on 'curcpu' are 1317366f6083SPeter Grehan * stale and invalidate them. 1318366f6083SPeter Grehan * 1319366f6083SPeter Grehan * Note that we incur this penalty only when the scheduler chooses to 1320366f6083SPeter Grehan * move the thread associated with this vcpu between host cpus. 1321366f6083SPeter Grehan * 1322366f6083SPeter Grehan * Note also that this will invalidate mappings tagged with 'vpid' 1323366f6083SPeter Grehan * for "all" EP4TAs. 1324366f6083SPeter Grehan */ 13256f5a9606SMark Johnston if (atomic_load_long(&pmap->pm_eptgen) == vmx->eptgen[curcpu]) { 1326953c2c47SNeel Natu invvpid_desc._res1 = 0; 1327953c2c47SNeel Natu invvpid_desc._res2 = 0; 1328366f6083SPeter Grehan invvpid_desc.vpid = vmxstate->vpid; 13290e30c5c0SWarner Losh invvpid_desc.linear_addr = 0; 1330366f6083SPeter Grehan invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 13313dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_DONE, 1); 1332953c2c47SNeel Natu } else { 1333953c2c47SNeel Natu /* 1334953c2c47SNeel Natu * The invvpid can be skipped if an invept is going to 1335953c2c47SNeel Natu * be performed before entering the guest. The invept 1336953c2c47SNeel Natu * will invalidate combined mappings tagged with 1337953c2c47SNeel Natu * 'vmx->eptp' for all vpids. 1338953c2c47SNeel Natu */ 13393dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_SAVED, 1); 1340953c2c47SNeel Natu } 1341366f6083SPeter Grehan } 13423527963bSNeel Natu 13433527963bSNeel Natu static void 13441aa51504SJohn Baldwin vmx_set_pcpu_defaults(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap) 13453527963bSNeel Natu { 13463527963bSNeel Natu struct vmxstate *vmxstate; 13473527963bSNeel Natu 13481aa51504SJohn Baldwin vmxstate = &vcpu->state; 13493527963bSNeel Natu if (vmxstate->lastcpu == curcpu) 13503527963bSNeel Natu return; 13513527963bSNeel Natu 13523527963bSNeel Natu vmxstate->lastcpu = curcpu; 13533527963bSNeel Natu 13543dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); 13553527963bSNeel Natu 13563527963bSNeel Natu vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 13573527963bSNeel Natu vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 13583527963bSNeel Natu vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 13593527963bSNeel Natu vmx_invvpid(vmx, vcpu, pmap, 1); 1360366f6083SPeter Grehan } 1361366f6083SPeter Grehan 1362366f6083SPeter Grehan /* 1363366f6083SPeter Grehan * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1364366f6083SPeter Grehan */ 1365366f6083SPeter Grehan CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1366366f6083SPeter Grehan 1367366f6083SPeter Grehan static void __inline 1368869c8d19SJohn Baldwin vmx_set_int_window_exiting(struct vmx_vcpu *vcpu) 1369366f6083SPeter Grehan { 1370366f6083SPeter Grehan 13711aa51504SJohn Baldwin if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 13721aa51504SJohn Baldwin vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 13731aa51504SJohn Baldwin vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); 137457e0119eSJohn Baldwin VMX_CTR0(vcpu, "Enabling interrupt window exiting"); 137548b2d828SNeel Natu } 1376366f6083SPeter Grehan } 1377366f6083SPeter Grehan 1378366f6083SPeter Grehan static void __inline 1379869c8d19SJohn Baldwin vmx_clear_int_window_exiting(struct vmx_vcpu *vcpu) 1380366f6083SPeter Grehan { 1381366f6083SPeter Grehan 13821aa51504SJohn Baldwin KASSERT((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 13831aa51504SJohn Baldwin ("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls)); 13841aa51504SJohn Baldwin vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 13851aa51504SJohn Baldwin vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); 138657e0119eSJohn Baldwin VMX_CTR0(vcpu, "Disabling interrupt window exiting"); 1387366f6083SPeter Grehan } 1388366f6083SPeter Grehan 1389366f6083SPeter Grehan static void __inline 1390869c8d19SJohn Baldwin vmx_set_nmi_window_exiting(struct vmx_vcpu *vcpu) 1391366f6083SPeter Grehan { 1392366f6083SPeter Grehan 13931aa51504SJohn Baldwin if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 13941aa51504SJohn Baldwin vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 13951aa51504SJohn Baldwin vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); 139657e0119eSJohn Baldwin VMX_CTR0(vcpu, "Enabling NMI window exiting"); 139748b2d828SNeel Natu } 1398366f6083SPeter Grehan } 1399366f6083SPeter Grehan 1400366f6083SPeter Grehan static void __inline 1401869c8d19SJohn Baldwin vmx_clear_nmi_window_exiting(struct vmx_vcpu *vcpu) 1402366f6083SPeter Grehan { 1403366f6083SPeter Grehan 14041aa51504SJohn Baldwin KASSERT((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 14051aa51504SJohn Baldwin ("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls)); 14061aa51504SJohn Baldwin vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 14071aa51504SJohn Baldwin vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); 140857e0119eSJohn Baldwin VMX_CTR0(vcpu, "Disabling NMI window exiting"); 1409366f6083SPeter Grehan } 1410366f6083SPeter Grehan 1411277bdd99STycho Nightingale int 141280cb5d84SJohn Baldwin vmx_set_tsc_offset(struct vmx_vcpu *vcpu, uint64_t offset) 1413277bdd99STycho Nightingale { 1414277bdd99STycho Nightingale int error; 1415277bdd99STycho Nightingale 14161aa51504SJohn Baldwin if ((vcpu->cap.proc_ctls & PROCBASED_TSC_OFFSET) == 0) { 14171aa51504SJohn Baldwin vcpu->cap.proc_ctls |= PROCBASED_TSC_OFFSET; 14181aa51504SJohn Baldwin vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); 141957e0119eSJohn Baldwin VMX_CTR0(vcpu, "Enabling TSC offsetting"); 1420277bdd99STycho Nightingale } 1421277bdd99STycho Nightingale 1422277bdd99STycho Nightingale error = vmwrite(VMCS_TSC_OFFSET, offset); 1423483d953aSJohn Baldwin #ifdef BHYVE_SNAPSHOT 1424483d953aSJohn Baldwin if (error == 0) 142580cb5d84SJohn Baldwin vm_set_tsc_offset(vcpu->vcpu, offset); 1426483d953aSJohn Baldwin #endif 1427277bdd99STycho Nightingale return (error); 1428277bdd99STycho Nightingale } 1429277bdd99STycho Nightingale 143048b2d828SNeel Natu #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 143148b2d828SNeel Natu VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 143248b2d828SNeel Natu #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 143348b2d828SNeel Natu VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 143448b2d828SNeel Natu 143548b2d828SNeel Natu static void 143680cb5d84SJohn Baldwin vmx_inject_nmi(struct vmx_vcpu *vcpu) 1437366f6083SPeter Grehan { 14385c272efaSRobert Wing uint32_t gi __diagused, info; 1439366f6083SPeter Grehan 144048b2d828SNeel Natu gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 144148b2d828SNeel Natu KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 144248b2d828SNeel Natu "interruptibility-state %#x", gi)); 1443366f6083SPeter Grehan 144448b2d828SNeel Natu info = vmcs_read(VMCS_ENTRY_INTR_INFO); 144548b2d828SNeel Natu KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 144648b2d828SNeel Natu "VM-entry interruption information %#x", info)); 1447366f6083SPeter Grehan 1448366f6083SPeter Grehan /* 1449366f6083SPeter Grehan * Inject the virtual NMI. The vector must be the NMI IDT entry 1450366f6083SPeter Grehan * or the VMCS entry check will fail. 1451366f6083SPeter Grehan */ 145248b2d828SNeel Natu info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 14533de83862SNeel Natu vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1454366f6083SPeter Grehan 145557e0119eSJohn Baldwin VMX_CTR0(vcpu, "Injecting vNMI"); 1456366f6083SPeter Grehan 1457366f6083SPeter Grehan /* Clear the request */ 145880cb5d84SJohn Baldwin vm_nmi_clear(vcpu->vcpu); 1459366f6083SPeter Grehan } 1460366f6083SPeter Grehan 1461366f6083SPeter Grehan static void 146280cb5d84SJohn Baldwin vmx_inject_interrupts(struct vmx_vcpu *vcpu, struct vlapic *vlapic, 146380cb5d84SJohn Baldwin uint64_t guestrip) 1464366f6083SPeter Grehan { 14650775fbb4STycho Nightingale int vector, need_nmi_exiting, extint_pending; 1466091d4532SNeel Natu uint64_t rflags, entryinfo; 146748b2d828SNeel Natu uint32_t gi, info; 1468366f6083SPeter Grehan 1469fefac543SBojan Novković if (vcpu->cap.set & (1 << VM_CAP_MASK_HWINTR)) { 1470fefac543SBojan Novković return; 1471fefac543SBojan Novković } 1472fefac543SBojan Novković 14731aa51504SJohn Baldwin if (vcpu->state.nextrip != guestrip) { 14742ce12423SNeel Natu gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 14752ce12423SNeel Natu if (gi & HWINTR_BLOCKING) { 147657e0119eSJohn Baldwin VMX_CTR2(vcpu, "Guest interrupt blocking " 14772ce12423SNeel Natu "cleared due to rip change: %#lx/%#lx", 14781aa51504SJohn Baldwin vcpu->state.nextrip, guestrip); 14792ce12423SNeel Natu gi &= ~HWINTR_BLOCKING; 14802ce12423SNeel Natu vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 14812ce12423SNeel Natu } 14822ce12423SNeel Natu } 14832ce12423SNeel Natu 148480cb5d84SJohn Baldwin if (vm_entry_intinfo(vcpu->vcpu, &entryinfo)) { 1485091d4532SNeel Natu KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry " 1486091d4532SNeel Natu "intinfo is not valid: %#lx", __func__, entryinfo)); 1487dc506506SNeel Natu 1488dc506506SNeel Natu info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1489dc506506SNeel Natu KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " 1490019008ebSNeel Natu "pending exception: %#lx/%#x", __func__, entryinfo, info)); 1491dc506506SNeel Natu 1492091d4532SNeel Natu info = entryinfo; 1493091d4532SNeel Natu vector = info & 0xff; 1494091d4532SNeel Natu if (vector == IDT_BP || vector == IDT_OF) { 1495091d4532SNeel Natu /* 1496091d4532SNeel Natu * VT-x requires #BP and #OF to be injected as software 1497091d4532SNeel Natu * exceptions. 1498091d4532SNeel Natu */ 1499091d4532SNeel Natu info &= ~VMCS_INTR_T_MASK; 1500091d4532SNeel Natu info |= VMCS_INTR_T_SWEXCEPTION; 1501dc506506SNeel Natu } 1502091d4532SNeel Natu 1503091d4532SNeel Natu if (info & VMCS_INTR_DEL_ERRCODE) 1504091d4532SNeel Natu vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); 1505091d4532SNeel Natu 1506dc506506SNeel Natu vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1507dc506506SNeel Natu } 1508dc506506SNeel Natu 150980cb5d84SJohn Baldwin if (vm_nmi_pending(vcpu->vcpu)) { 1510366f6083SPeter Grehan /* 151148b2d828SNeel Natu * If there are no conditions blocking NMI injection then 151248b2d828SNeel Natu * inject it directly here otherwise enable "NMI window 151348b2d828SNeel Natu * exiting" to inject it as soon as we can. 1514eeefa4e4SNeel Natu * 151548b2d828SNeel Natu * We also check for STI_BLOCKING because some implementations 151648b2d828SNeel Natu * don't allow NMI injection in this case. If we are running 151748b2d828SNeel Natu * on a processor that doesn't have this restriction it will 151848b2d828SNeel Natu * immediately exit and the NMI will be injected in the 151948b2d828SNeel Natu * "NMI window exiting" handler. 1520366f6083SPeter Grehan */ 152148b2d828SNeel Natu need_nmi_exiting = 1; 152248b2d828SNeel Natu gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 152348b2d828SNeel Natu if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 15243de83862SNeel Natu info = vmcs_read(VMCS_ENTRY_INTR_INFO); 152548b2d828SNeel Natu if ((info & VMCS_INTR_VALID) == 0) { 152680cb5d84SJohn Baldwin vmx_inject_nmi(vcpu); 152748b2d828SNeel Natu need_nmi_exiting = 0; 152848b2d828SNeel Natu } else { 152957e0119eSJohn Baldwin VMX_CTR1(vcpu, "Cannot inject NMI " 153057e0119eSJohn Baldwin "due to VM-entry intr info %#x", info); 153148b2d828SNeel Natu } 153248b2d828SNeel Natu } else { 153357e0119eSJohn Baldwin VMX_CTR1(vcpu, "Cannot inject NMI due to " 153457e0119eSJohn Baldwin "Guest Interruptibility-state %#x", gi); 153548b2d828SNeel Natu } 1536eeefa4e4SNeel Natu 153748b2d828SNeel Natu if (need_nmi_exiting) 1538869c8d19SJohn Baldwin vmx_set_nmi_window_exiting(vcpu); 153948b2d828SNeel Natu } 1540366f6083SPeter Grehan 154180cb5d84SJohn Baldwin extint_pending = vm_extint_pending(vcpu->vcpu); 15420775fbb4STycho Nightingale 15430775fbb4STycho Nightingale if (!extint_pending && virtual_interrupt_delivery) { 154488c4b8d1SNeel Natu vmx_inject_pir(vlapic); 154588c4b8d1SNeel Natu return; 154688c4b8d1SNeel Natu } 154788c4b8d1SNeel Natu 154848b2d828SNeel Natu /* 154936736912SNeel Natu * If interrupt-window exiting is already in effect then don't bother 155036736912SNeel Natu * checking for pending interrupts. This is just an optimization and 155136736912SNeel Natu * not needed for correctness. 155248b2d828SNeel Natu */ 15531aa51504SJohn Baldwin if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { 155457e0119eSJohn Baldwin VMX_CTR0(vcpu, "Skip interrupt injection due to " 155557e0119eSJohn Baldwin "pending int_window_exiting"); 155648b2d828SNeel Natu return; 155736736912SNeel Natu } 155848b2d828SNeel Natu 15590775fbb4STycho Nightingale if (!extint_pending) { 1560366f6083SPeter Grehan /* Ask the local apic for a vector to inject */ 15614d1e82a8SNeel Natu if (!vlapic_pending_intr(vlapic, &vector)) 1562366f6083SPeter Grehan return; 1563a026dc3fSTycho Nightingale 1564a026dc3fSTycho Nightingale /* 1565a026dc3fSTycho Nightingale * From the Intel SDM, Volume 3, Section "Maskable 1566a026dc3fSTycho Nightingale * Hardware Interrupts": 1567a026dc3fSTycho Nightingale * - maskable interrupt vectors [16,255] can be delivered 1568a026dc3fSTycho Nightingale * through the local APIC. 1569a026dc3fSTycho Nightingale */ 1570a026dc3fSTycho Nightingale KASSERT(vector >= 16 && vector <= 255, 1571a026dc3fSTycho Nightingale ("invalid vector %d from local APIC", vector)); 15720775fbb4STycho Nightingale } else { 15730775fbb4STycho Nightingale /* Ask the legacy pic for a vector to inject */ 157480cb5d84SJohn Baldwin vatpic_pending_intr(vcpu->vmx->vm, &vector); 1575366f6083SPeter Grehan 1576a026dc3fSTycho Nightingale /* 1577a026dc3fSTycho Nightingale * From the Intel SDM, Volume 3, Section "Maskable 1578a026dc3fSTycho Nightingale * Hardware Interrupts": 1579a026dc3fSTycho Nightingale * - maskable interrupt vectors [0,255] can be delivered 1580a026dc3fSTycho Nightingale * through the INTR pin. 1581a026dc3fSTycho Nightingale */ 1582a026dc3fSTycho Nightingale KASSERT(vector >= 0 && vector <= 255, 1583a026dc3fSTycho Nightingale ("invalid vector %d from INTR", vector)); 1584a026dc3fSTycho Nightingale } 1585366f6083SPeter Grehan 1586366f6083SPeter Grehan /* Check RFLAGS.IF and the interruptibility state of the guest */ 15873de83862SNeel Natu rflags = vmcs_read(VMCS_GUEST_RFLAGS); 158836736912SNeel Natu if ((rflags & PSL_I) == 0) { 158957e0119eSJohn Baldwin VMX_CTR2(vcpu, "Cannot inject vector %d due to " 159057e0119eSJohn Baldwin "rflags %#lx", vector, rflags); 1591366f6083SPeter Grehan goto cantinject; 159236736912SNeel Natu } 1593366f6083SPeter Grehan 159448b2d828SNeel Natu gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 159536736912SNeel Natu if (gi & HWINTR_BLOCKING) { 159657e0119eSJohn Baldwin VMX_CTR2(vcpu, "Cannot inject vector %d due to " 159757e0119eSJohn Baldwin "Guest Interruptibility-state %#x", vector, gi); 1598366f6083SPeter Grehan goto cantinject; 159936736912SNeel Natu } 160036736912SNeel Natu 160136736912SNeel Natu info = vmcs_read(VMCS_ENTRY_INTR_INFO); 160236736912SNeel Natu if (info & VMCS_INTR_VALID) { 160336736912SNeel Natu /* 160436736912SNeel Natu * This is expected and could happen for multiple reasons: 160536736912SNeel Natu * - A vectoring VM-entry was aborted due to astpending 160636736912SNeel Natu * - A VM-exit happened during event injection. 1607dc506506SNeel Natu * - An exception was injected above. 160836736912SNeel Natu * - An NMI was injected above or after "NMI window exiting" 160936736912SNeel Natu */ 161057e0119eSJohn Baldwin VMX_CTR2(vcpu, "Cannot inject vector %d due to " 161157e0119eSJohn Baldwin "VM-entry intr info %#x", vector, info); 161236736912SNeel Natu goto cantinject; 161336736912SNeel Natu } 1614366f6083SPeter Grehan 1615366f6083SPeter Grehan /* Inject the interrupt */ 1616160471d2SNeel Natu info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1617366f6083SPeter Grehan info |= vector; 16183de83862SNeel Natu vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1619366f6083SPeter Grehan 16200775fbb4STycho Nightingale if (!extint_pending) { 1621366f6083SPeter Grehan /* Update the Local APIC ISR */ 1622de5ea6b6SNeel Natu vlapic_intr_accepted(vlapic, vector); 16230775fbb4STycho Nightingale } else { 162480cb5d84SJohn Baldwin vm_extint_clear(vcpu->vcpu); 162580cb5d84SJohn Baldwin vatpic_intr_accepted(vcpu->vmx->vm, vector); 16260775fbb4STycho Nightingale 16270775fbb4STycho Nightingale /* 16280775fbb4STycho Nightingale * After we accepted the current ExtINT the PIC may 16290775fbb4STycho Nightingale * have posted another one. If that is the case, set 16300775fbb4STycho Nightingale * the Interrupt Window Exiting execution control so 16310775fbb4STycho Nightingale * we can inject that one too. 16320494cb1bSNeel Natu * 16330494cb1bSNeel Natu * Also, interrupt window exiting allows us to inject any 16340494cb1bSNeel Natu * pending APIC vector that was preempted by the ExtINT 16350494cb1bSNeel Natu * as soon as possible. This applies both for the software 16360494cb1bSNeel Natu * emulated vlapic and the hardware assisted virtual APIC. 16370775fbb4STycho Nightingale */ 1638869c8d19SJohn Baldwin vmx_set_int_window_exiting(vcpu); 16390775fbb4STycho Nightingale } 1640366f6083SPeter Grehan 164157e0119eSJohn Baldwin VMX_CTR1(vcpu, "Injecting hwintr at vector %d", vector); 1642366f6083SPeter Grehan 1643366f6083SPeter Grehan return; 1644366f6083SPeter Grehan 1645366f6083SPeter Grehan cantinject: 1646366f6083SPeter Grehan /* 1647366f6083SPeter Grehan * Set the Interrupt Window Exiting execution control so we can inject 1648366f6083SPeter Grehan * the interrupt as soon as blocking condition goes away. 1649366f6083SPeter Grehan */ 1650869c8d19SJohn Baldwin vmx_set_int_window_exiting(vcpu); 1651366f6083SPeter Grehan } 1652366f6083SPeter Grehan 1653e5a1d950SNeel Natu /* 1654e5a1d950SNeel Natu * If the Virtual NMIs execution control is '1' then the logical processor 1655e5a1d950SNeel Natu * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1656e5a1d950SNeel Natu * the VMCS. An IRET instruction in VMX non-root operation will remove any 1657e5a1d950SNeel Natu * virtual-NMI blocking. 1658e5a1d950SNeel Natu * 1659e5a1d950SNeel Natu * This unblocking occurs even if the IRET causes a fault. In this case the 1660e5a1d950SNeel Natu * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1661e5a1d950SNeel Natu */ 1662e5a1d950SNeel Natu static void 1663869c8d19SJohn Baldwin vmx_restore_nmi_blocking(struct vmx_vcpu *vcpu) 1664e5a1d950SNeel Natu { 1665e5a1d950SNeel Natu uint32_t gi; 1666e5a1d950SNeel Natu 166757e0119eSJohn Baldwin VMX_CTR0(vcpu, "Restore Virtual-NMI blocking"); 1668e5a1d950SNeel Natu gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1669e5a1d950SNeel Natu gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1670e5a1d950SNeel Natu vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1671e5a1d950SNeel Natu } 1672e5a1d950SNeel Natu 1673e5a1d950SNeel Natu static void 1674869c8d19SJohn Baldwin vmx_clear_nmi_blocking(struct vmx_vcpu *vcpu) 1675e5a1d950SNeel Natu { 1676e5a1d950SNeel Natu uint32_t gi; 1677e5a1d950SNeel Natu 167857e0119eSJohn Baldwin VMX_CTR0(vcpu, "Clear Virtual-NMI blocking"); 1679e5a1d950SNeel Natu gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1680e5a1d950SNeel Natu gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1681e5a1d950SNeel Natu vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1682e5a1d950SNeel Natu } 1683e5a1d950SNeel Natu 1684091d4532SNeel Natu static void 1685869c8d19SJohn Baldwin vmx_assert_nmi_blocking(struct vmx_vcpu *vcpu) 1686091d4532SNeel Natu { 16875c272efaSRobert Wing uint32_t gi __diagused; 1688091d4532SNeel Natu 1689091d4532SNeel Natu gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1690091d4532SNeel Natu KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1691091d4532SNeel Natu ("NMI blocking is not in effect %#x", gi)); 1692091d4532SNeel Natu } 1693091d4532SNeel Natu 1694366f6083SPeter Grehan static int 16951aa51504SJohn Baldwin vmx_emulate_xsetbv(struct vmx *vmx, struct vmx_vcpu *vcpu, 16961aa51504SJohn Baldwin struct vm_exit *vmexit) 1697abb023fbSJohn Baldwin { 1698abb023fbSJohn Baldwin struct vmxctx *vmxctx; 1699abb023fbSJohn Baldwin uint64_t xcrval; 1700abb023fbSJohn Baldwin const struct xsave_limits *limits; 1701abb023fbSJohn Baldwin 17021aa51504SJohn Baldwin vmxctx = &vcpu->ctx; 1703abb023fbSJohn Baldwin limits = vmm_get_xsave_limits(); 1704abb023fbSJohn Baldwin 1705a0efd3fbSJohn Baldwin /* 1706a0efd3fbSJohn Baldwin * Note that the processor raises a GP# fault on its own if 1707a0efd3fbSJohn Baldwin * xsetbv is executed for CPL != 0, so we do not have to 1708a0efd3fbSJohn Baldwin * emulate that fault here. 1709a0efd3fbSJohn Baldwin */ 1710a0efd3fbSJohn Baldwin 1711a0efd3fbSJohn Baldwin /* Only xcr0 is supported. */ 1712a0efd3fbSJohn Baldwin if (vmxctx->guest_rcx != 0) { 1713d3956e46SJohn Baldwin vm_inject_gp(vcpu->vcpu); 1714a0efd3fbSJohn Baldwin return (HANDLED); 1715a0efd3fbSJohn Baldwin } 1716a0efd3fbSJohn Baldwin 1717a0efd3fbSJohn Baldwin /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1718a0efd3fbSJohn Baldwin if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1719d3956e46SJohn Baldwin vm_inject_ud(vcpu->vcpu); 1720a0efd3fbSJohn Baldwin return (HANDLED); 1721a0efd3fbSJohn Baldwin } 1722abb023fbSJohn Baldwin 1723abb023fbSJohn Baldwin xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1724a0efd3fbSJohn Baldwin if ((xcrval & ~limits->xcr0_allowed) != 0) { 1725d3956e46SJohn Baldwin vm_inject_gp(vcpu->vcpu); 1726a0efd3fbSJohn Baldwin return (HANDLED); 1727a0efd3fbSJohn Baldwin } 1728abb023fbSJohn Baldwin 1729a0efd3fbSJohn Baldwin if (!(xcrval & XFEATURE_ENABLED_X87)) { 1730d3956e46SJohn Baldwin vm_inject_gp(vcpu->vcpu); 1731a0efd3fbSJohn Baldwin return (HANDLED); 1732a0efd3fbSJohn Baldwin } 1733abb023fbSJohn Baldwin 173444a68c4eSJohn Baldwin /* AVX (YMM_Hi128) requires SSE. */ 173544a68c4eSJohn Baldwin if (xcrval & XFEATURE_ENABLED_AVX && 173644a68c4eSJohn Baldwin (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1737d3956e46SJohn Baldwin vm_inject_gp(vcpu->vcpu); 173844a68c4eSJohn Baldwin return (HANDLED); 173944a68c4eSJohn Baldwin } 174044a68c4eSJohn Baldwin 174144a68c4eSJohn Baldwin /* 174244a68c4eSJohn Baldwin * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 174344a68c4eSJohn Baldwin * ZMM_Hi256, and Hi16_ZMM. 174444a68c4eSJohn Baldwin */ 174544a68c4eSJohn Baldwin if (xcrval & XFEATURE_AVX512 && 174644a68c4eSJohn Baldwin (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 174744a68c4eSJohn Baldwin (XFEATURE_AVX512 | XFEATURE_AVX)) { 1748d3956e46SJohn Baldwin vm_inject_gp(vcpu->vcpu); 174944a68c4eSJohn Baldwin return (HANDLED); 175044a68c4eSJohn Baldwin } 175144a68c4eSJohn Baldwin 175244a68c4eSJohn Baldwin /* 175344a68c4eSJohn Baldwin * Intel MPX requires both bound register state flags to be 175444a68c4eSJohn Baldwin * set. 175544a68c4eSJohn Baldwin */ 175644a68c4eSJohn Baldwin if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 175744a68c4eSJohn Baldwin ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1758d3956e46SJohn Baldwin vm_inject_gp(vcpu->vcpu); 1759a0efd3fbSJohn Baldwin return (HANDLED); 1760a0efd3fbSJohn Baldwin } 1761abb023fbSJohn Baldwin 1762abb023fbSJohn Baldwin /* 1763abb023fbSJohn Baldwin * This runs "inside" vmrun() with the guest's FPU state, so 1764abb023fbSJohn Baldwin * modifying xcr0 directly modifies the guest's xcr0, not the 1765abb023fbSJohn Baldwin * host's. 1766abb023fbSJohn Baldwin */ 1767abb023fbSJohn Baldwin load_xcr(0, xcrval); 1768abb023fbSJohn Baldwin return (HANDLED); 1769abb023fbSJohn Baldwin } 1770abb023fbSJohn Baldwin 1771594db002STycho Nightingale static uint64_t 17721aa51504SJohn Baldwin vmx_get_guest_reg(struct vmx_vcpu *vcpu, int ident) 1773366f6083SPeter Grehan { 1774366f6083SPeter Grehan const struct vmxctx *vmxctx; 1775366f6083SPeter Grehan 17761aa51504SJohn Baldwin vmxctx = &vcpu->ctx; 1777594db002STycho Nightingale 1778594db002STycho Nightingale switch (ident) { 1779594db002STycho Nightingale case 0: 1780594db002STycho Nightingale return (vmxctx->guest_rax); 1781594db002STycho Nightingale case 1: 1782594db002STycho Nightingale return (vmxctx->guest_rcx); 1783594db002STycho Nightingale case 2: 1784594db002STycho Nightingale return (vmxctx->guest_rdx); 1785594db002STycho Nightingale case 3: 1786594db002STycho Nightingale return (vmxctx->guest_rbx); 1787594db002STycho Nightingale case 4: 1788594db002STycho Nightingale return (vmcs_read(VMCS_GUEST_RSP)); 1789594db002STycho Nightingale case 5: 1790594db002STycho Nightingale return (vmxctx->guest_rbp); 1791594db002STycho Nightingale case 6: 1792594db002STycho Nightingale return (vmxctx->guest_rsi); 1793594db002STycho Nightingale case 7: 1794594db002STycho Nightingale return (vmxctx->guest_rdi); 1795594db002STycho Nightingale case 8: 1796594db002STycho Nightingale return (vmxctx->guest_r8); 1797594db002STycho Nightingale case 9: 1798594db002STycho Nightingale return (vmxctx->guest_r9); 1799594db002STycho Nightingale case 10: 1800594db002STycho Nightingale return (vmxctx->guest_r10); 1801594db002STycho Nightingale case 11: 1802594db002STycho Nightingale return (vmxctx->guest_r11); 1803594db002STycho Nightingale case 12: 1804594db002STycho Nightingale return (vmxctx->guest_r12); 1805594db002STycho Nightingale case 13: 1806594db002STycho Nightingale return (vmxctx->guest_r13); 1807594db002STycho Nightingale case 14: 1808594db002STycho Nightingale return (vmxctx->guest_r14); 1809594db002STycho Nightingale case 15: 1810594db002STycho Nightingale return (vmxctx->guest_r15); 1811594db002STycho Nightingale default: 1812594db002STycho Nightingale panic("invalid vmx register %d", ident); 1813594db002STycho Nightingale } 1814594db002STycho Nightingale } 1815594db002STycho Nightingale 1816594db002STycho Nightingale static void 18171aa51504SJohn Baldwin vmx_set_guest_reg(struct vmx_vcpu *vcpu, int ident, uint64_t regval) 1818594db002STycho Nightingale { 1819594db002STycho Nightingale struct vmxctx *vmxctx; 1820594db002STycho Nightingale 18211aa51504SJohn Baldwin vmxctx = &vcpu->ctx; 1822594db002STycho Nightingale 1823594db002STycho Nightingale switch (ident) { 1824594db002STycho Nightingale case 0: 1825594db002STycho Nightingale vmxctx->guest_rax = regval; 1826594db002STycho Nightingale break; 1827594db002STycho Nightingale case 1: 1828594db002STycho Nightingale vmxctx->guest_rcx = regval; 1829594db002STycho Nightingale break; 1830594db002STycho Nightingale case 2: 1831594db002STycho Nightingale vmxctx->guest_rdx = regval; 1832594db002STycho Nightingale break; 1833594db002STycho Nightingale case 3: 1834594db002STycho Nightingale vmxctx->guest_rbx = regval; 1835594db002STycho Nightingale break; 1836594db002STycho Nightingale case 4: 1837594db002STycho Nightingale vmcs_write(VMCS_GUEST_RSP, regval); 1838594db002STycho Nightingale break; 1839594db002STycho Nightingale case 5: 1840594db002STycho Nightingale vmxctx->guest_rbp = regval; 1841594db002STycho Nightingale break; 1842594db002STycho Nightingale case 6: 1843594db002STycho Nightingale vmxctx->guest_rsi = regval; 1844594db002STycho Nightingale break; 1845594db002STycho Nightingale case 7: 1846594db002STycho Nightingale vmxctx->guest_rdi = regval; 1847594db002STycho Nightingale break; 1848594db002STycho Nightingale case 8: 1849594db002STycho Nightingale vmxctx->guest_r8 = regval; 1850594db002STycho Nightingale break; 1851594db002STycho Nightingale case 9: 1852594db002STycho Nightingale vmxctx->guest_r9 = regval; 1853594db002STycho Nightingale break; 1854594db002STycho Nightingale case 10: 1855594db002STycho Nightingale vmxctx->guest_r10 = regval; 1856594db002STycho Nightingale break; 1857594db002STycho Nightingale case 11: 1858594db002STycho Nightingale vmxctx->guest_r11 = regval; 1859594db002STycho Nightingale break; 1860594db002STycho Nightingale case 12: 1861594db002STycho Nightingale vmxctx->guest_r12 = regval; 1862594db002STycho Nightingale break; 1863594db002STycho Nightingale case 13: 1864594db002STycho Nightingale vmxctx->guest_r13 = regval; 1865594db002STycho Nightingale break; 1866594db002STycho Nightingale case 14: 1867594db002STycho Nightingale vmxctx->guest_r14 = regval; 1868594db002STycho Nightingale break; 1869594db002STycho Nightingale case 15: 1870594db002STycho Nightingale vmxctx->guest_r15 = regval; 1871594db002STycho Nightingale break; 1872594db002STycho Nightingale default: 1873594db002STycho Nightingale panic("invalid vmx register %d", ident); 1874594db002STycho Nightingale } 1875594db002STycho Nightingale } 1876594db002STycho Nightingale 1877594db002STycho Nightingale static int 18781aa51504SJohn Baldwin vmx_emulate_cr0_access(struct vmx_vcpu *vcpu, uint64_t exitqual) 1879594db002STycho Nightingale { 1880594db002STycho Nightingale uint64_t crval, regval; 1881594db002STycho Nightingale 1882594db002STycho Nightingale /* We only handle mov to %cr0 at this time */ 188339c21c2dSNeel Natu if ((exitqual & 0xf0) != 0x00) 188439c21c2dSNeel Natu return (UNHANDLED); 188539c21c2dSNeel Natu 18861aa51504SJohn Baldwin regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf); 1887366f6083SPeter Grehan 1888594db002STycho Nightingale vmcs_write(VMCS_CR0_SHADOW, regval); 1889366f6083SPeter Grehan 1890594db002STycho Nightingale crval = regval | cr0_ones_mask; 1891594db002STycho Nightingale crval &= ~cr0_zeros_mask; 1892594db002STycho Nightingale vmcs_write(VMCS_GUEST_CR0, crval); 1893366f6083SPeter Grehan 1894594db002STycho Nightingale if (regval & CR0_PG) { 189580a902efSPeter Grehan uint64_t efer, entry_ctls; 189680a902efSPeter Grehan 189780a902efSPeter Grehan /* 189880a902efSPeter Grehan * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 189980a902efSPeter Grehan * the "IA-32e mode guest" bit in VM-entry control must be 190080a902efSPeter Grehan * equal. 190180a902efSPeter Grehan */ 19023de83862SNeel Natu efer = vmcs_read(VMCS_GUEST_IA32_EFER); 190380a902efSPeter Grehan if (efer & EFER_LME) { 190480a902efSPeter Grehan efer |= EFER_LMA; 19053de83862SNeel Natu vmcs_write(VMCS_GUEST_IA32_EFER, efer); 19063de83862SNeel Natu entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 190780a902efSPeter Grehan entry_ctls |= VM_ENTRY_GUEST_LMA; 19083de83862SNeel Natu vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 190980a902efSPeter Grehan } 191080a902efSPeter Grehan } 191180a902efSPeter Grehan 1912366f6083SPeter Grehan return (HANDLED); 1913366f6083SPeter Grehan } 1914366f6083SPeter Grehan 1915594db002STycho Nightingale static int 19161aa51504SJohn Baldwin vmx_emulate_cr4_access(struct vmx_vcpu *vcpu, uint64_t exitqual) 1917594db002STycho Nightingale { 1918594db002STycho Nightingale uint64_t crval, regval; 1919594db002STycho Nightingale 1920594db002STycho Nightingale /* We only handle mov to %cr4 at this time */ 1921594db002STycho Nightingale if ((exitqual & 0xf0) != 0x00) 1922594db002STycho Nightingale return (UNHANDLED); 1923594db002STycho Nightingale 19241aa51504SJohn Baldwin regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf); 1925594db002STycho Nightingale 1926594db002STycho Nightingale vmcs_write(VMCS_CR4_SHADOW, regval); 1927594db002STycho Nightingale 1928594db002STycho Nightingale crval = regval | cr4_ones_mask; 1929594db002STycho Nightingale crval &= ~cr4_zeros_mask; 1930594db002STycho Nightingale vmcs_write(VMCS_GUEST_CR4, crval); 1931594db002STycho Nightingale 1932594db002STycho Nightingale return (HANDLED); 1933594db002STycho Nightingale } 1934594db002STycho Nightingale 1935594db002STycho Nightingale static int 19361aa51504SJohn Baldwin vmx_emulate_cr8_access(struct vmx *vmx, struct vmx_vcpu *vcpu, 19371aa51504SJohn Baldwin uint64_t exitqual) 1938594db002STycho Nightingale { 1939051f2bd1SNeel Natu struct vlapic *vlapic; 1940051f2bd1SNeel Natu uint64_t cr8; 1941051f2bd1SNeel Natu int regnum; 1942594db002STycho Nightingale 1943594db002STycho Nightingale /* We only handle mov %cr8 to/from a register at this time. */ 1944594db002STycho Nightingale if ((exitqual & 0xe0) != 0x00) { 1945594db002STycho Nightingale return (UNHANDLED); 1946594db002STycho Nightingale } 1947594db002STycho Nightingale 1948d3956e46SJohn Baldwin vlapic = vm_lapic(vcpu->vcpu); 1949051f2bd1SNeel Natu regnum = (exitqual >> 8) & 0xf; 1950594db002STycho Nightingale if (exitqual & 0x10) { 1951051f2bd1SNeel Natu cr8 = vlapic_get_cr8(vlapic); 19521aa51504SJohn Baldwin vmx_set_guest_reg(vcpu, regnum, cr8); 1953594db002STycho Nightingale } else { 19541aa51504SJohn Baldwin cr8 = vmx_get_guest_reg(vcpu, regnum); 1955051f2bd1SNeel Natu vlapic_set_cr8(vlapic, cr8); 1956594db002STycho Nightingale } 1957594db002STycho Nightingale 1958594db002STycho Nightingale return (HANDLED); 1959594db002STycho Nightingale } 1960594db002STycho Nightingale 1961e4c8a13dSNeel Natu /* 1962e4c8a13dSNeel Natu * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1963e4c8a13dSNeel Natu */ 1964e4c8a13dSNeel Natu static int 1965e4c8a13dSNeel Natu vmx_cpl(void) 1966e4c8a13dSNeel Natu { 1967e4c8a13dSNeel Natu uint32_t ssar; 1968e4c8a13dSNeel Natu 1969e4c8a13dSNeel Natu ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1970e4c8a13dSNeel Natu return ((ssar >> 5) & 0x3); 1971e4c8a13dSNeel Natu } 1972e4c8a13dSNeel Natu 1973e813a873SNeel Natu static enum vm_cpu_mode 197400f3efe1SJohn Baldwin vmx_cpu_mode(void) 197500f3efe1SJohn Baldwin { 1976b301b9e2SNeel Natu uint32_t csar; 197700f3efe1SJohn Baldwin 1978b301b9e2SNeel Natu if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1979b301b9e2SNeel Natu csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1980b301b9e2SNeel Natu if (csar & 0x2000) 1981b301b9e2SNeel Natu return (CPU_MODE_64BIT); /* CS.L = 1 */ 198200f3efe1SJohn Baldwin else 198300f3efe1SJohn Baldwin return (CPU_MODE_COMPATIBILITY); 1984b301b9e2SNeel Natu } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1985b301b9e2SNeel Natu return (CPU_MODE_PROTECTED); 1986b301b9e2SNeel Natu } else { 1987b301b9e2SNeel Natu return (CPU_MODE_REAL); 1988b301b9e2SNeel Natu } 198900f3efe1SJohn Baldwin } 199000f3efe1SJohn Baldwin 1991e813a873SNeel Natu static enum vm_paging_mode 199200f3efe1SJohn Baldwin vmx_paging_mode(void) 199300f3efe1SJohn Baldwin { 1994f3eb12e4SKonstantin Belousov uint64_t cr4; 199500f3efe1SJohn Baldwin 199600f3efe1SJohn Baldwin if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 199700f3efe1SJohn Baldwin return (PAGING_MODE_FLAT); 1998f3eb12e4SKonstantin Belousov cr4 = vmcs_read(VMCS_GUEST_CR4); 1999f3eb12e4SKonstantin Belousov if (!(cr4 & CR4_PAE)) 200000f3efe1SJohn Baldwin return (PAGING_MODE_32); 2001f3eb12e4SKonstantin Belousov if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) { 2002f3eb12e4SKonstantin Belousov if (!(cr4 & CR4_LA57)) 200300f3efe1SJohn Baldwin return (PAGING_MODE_64); 2004f3eb12e4SKonstantin Belousov return (PAGING_MODE_64_LA57); 2005f3eb12e4SKonstantin Belousov } else 200600f3efe1SJohn Baldwin return (PAGING_MODE_PAE); 200700f3efe1SJohn Baldwin } 200800f3efe1SJohn Baldwin 2009d17b5104SNeel Natu static uint64_t 2010869c8d19SJohn Baldwin inout_str_index(struct vmx_vcpu *vcpu, int in) 2011d17b5104SNeel Natu { 2012d17b5104SNeel Natu uint64_t val; 20135c272efaSRobert Wing int error __diagused; 2014d17b5104SNeel Natu enum vm_reg_name reg; 2015d17b5104SNeel Natu 2016d17b5104SNeel Natu reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI; 2017869c8d19SJohn Baldwin error = vmx_getreg(vcpu, reg, &val); 2018d17b5104SNeel Natu KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error)); 2019d17b5104SNeel Natu return (val); 2020d17b5104SNeel Natu } 2021d17b5104SNeel Natu 2022d17b5104SNeel Natu static uint64_t 2023869c8d19SJohn Baldwin inout_str_count(struct vmx_vcpu *vcpu, int rep) 2024d17b5104SNeel Natu { 2025d17b5104SNeel Natu uint64_t val; 20265c272efaSRobert Wing int error __diagused; 2027d17b5104SNeel Natu 2028d17b5104SNeel Natu if (rep) { 2029869c8d19SJohn Baldwin error = vmx_getreg(vcpu, VM_REG_GUEST_RCX, &val); 2030d17b5104SNeel Natu KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error)); 2031d17b5104SNeel Natu } else { 2032d17b5104SNeel Natu val = 1; 2033d17b5104SNeel Natu } 2034d17b5104SNeel Natu return (val); 2035d17b5104SNeel Natu } 2036d17b5104SNeel Natu 2037d17b5104SNeel Natu static int 2038d17b5104SNeel Natu inout_str_addrsize(uint32_t inst_info) 2039d17b5104SNeel Natu { 2040d17b5104SNeel Natu uint32_t size; 2041d17b5104SNeel Natu 2042d17b5104SNeel Natu size = (inst_info >> 7) & 0x7; 2043d17b5104SNeel Natu switch (size) { 2044d17b5104SNeel Natu case 0: 2045d17b5104SNeel Natu return (2); /* 16 bit */ 2046d17b5104SNeel Natu case 1: 2047d17b5104SNeel Natu return (4); /* 32 bit */ 2048d17b5104SNeel Natu case 2: 2049d17b5104SNeel Natu return (8); /* 64 bit */ 2050d17b5104SNeel Natu default: 2051d17b5104SNeel Natu panic("%s: invalid size encoding %d", __func__, size); 2052d17b5104SNeel Natu } 2053d17b5104SNeel Natu } 2054d17b5104SNeel Natu 2055d17b5104SNeel Natu static void 2056869c8d19SJohn Baldwin inout_str_seginfo(struct vmx_vcpu *vcpu, uint32_t inst_info, int in, 2057869c8d19SJohn Baldwin struct vm_inout_str *vis) 2058d17b5104SNeel Natu { 20595c272efaSRobert Wing int error __diagused, s; 2060d17b5104SNeel Natu 2061d17b5104SNeel Natu if (in) { 2062d17b5104SNeel Natu vis->seg_name = VM_REG_GUEST_ES; 2063d17b5104SNeel Natu } else { 2064d17b5104SNeel Natu s = (inst_info >> 15) & 0x7; 2065d17b5104SNeel Natu vis->seg_name = vm_segment_name(s); 2066d17b5104SNeel Natu } 2067d17b5104SNeel Natu 2068869c8d19SJohn Baldwin error = vmx_getdesc(vcpu, vis->seg_name, &vis->seg_desc); 2069d17b5104SNeel Natu KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error)); 2070d17b5104SNeel Natu } 2071d17b5104SNeel Natu 2072e4c8a13dSNeel Natu static void 2073e813a873SNeel Natu vmx_paging_info(struct vm_guest_paging *paging) 2074e813a873SNeel Natu { 2075e813a873SNeel Natu paging->cr3 = vmcs_guest_cr3(); 2076e813a873SNeel Natu paging->cpl = vmx_cpl(); 2077e813a873SNeel Natu paging->cpu_mode = vmx_cpu_mode(); 2078e813a873SNeel Natu paging->paging_mode = vmx_paging_mode(); 2079e813a873SNeel Natu } 2080e813a873SNeel Natu 2081e813a873SNeel Natu static void 2082e4c8a13dSNeel Natu vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla) 2083e4c8a13dSNeel Natu { 2084f7a9f178SNeel Natu struct vm_guest_paging *paging; 2085f7a9f178SNeel Natu uint32_t csar; 2086f7a9f178SNeel Natu 2087f7a9f178SNeel Natu paging = &vmexit->u.inst_emul.paging; 2088f7a9f178SNeel Natu 2089e4c8a13dSNeel Natu vmexit->exitcode = VM_EXITCODE_INST_EMUL; 20901c73ea3eSNeel Natu vmexit->inst_length = 0; 2091e4c8a13dSNeel Natu vmexit->u.inst_emul.gpa = gpa; 2092e4c8a13dSNeel Natu vmexit->u.inst_emul.gla = gla; 2093f7a9f178SNeel Natu vmx_paging_info(paging); 2094f7a9f178SNeel Natu switch (paging->cpu_mode) { 2095e4f605eeSTycho Nightingale case CPU_MODE_REAL: 2096e4f605eeSTycho Nightingale vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 2097e4f605eeSTycho Nightingale vmexit->u.inst_emul.cs_d = 0; 2098e4f605eeSTycho Nightingale break; 2099f7a9f178SNeel Natu case CPU_MODE_PROTECTED: 2100f7a9f178SNeel Natu case CPU_MODE_COMPATIBILITY: 2101e4f605eeSTycho Nightingale vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 2102f7a9f178SNeel Natu csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 2103f7a9f178SNeel Natu vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar); 2104f7a9f178SNeel Natu break; 2105f7a9f178SNeel Natu default: 2106e4f605eeSTycho Nightingale vmexit->u.inst_emul.cs_base = 0; 2107f7a9f178SNeel Natu vmexit->u.inst_emul.cs_d = 0; 2108f7a9f178SNeel Natu break; 2109f7a9f178SNeel Natu } 2110c2a875f9SNeel Natu vie_init(&vmexit->u.inst_emul.vie, NULL, 0); 2111e4c8a13dSNeel Natu } 2112e4c8a13dSNeel Natu 2113366f6083SPeter Grehan static int 2114318224bbSNeel Natu ept_fault_type(uint64_t ept_qual) 2115a2da7af6SNeel Natu { 2116318224bbSNeel Natu int fault_type; 2117a2da7af6SNeel Natu 2118318224bbSNeel Natu if (ept_qual & EPT_VIOLATION_DATA_WRITE) 2119318224bbSNeel Natu fault_type = VM_PROT_WRITE; 2120318224bbSNeel Natu else if (ept_qual & EPT_VIOLATION_INST_FETCH) 2121318224bbSNeel Natu fault_type = VM_PROT_EXECUTE; 2122318224bbSNeel Natu else 2123318224bbSNeel Natu fault_type= VM_PROT_READ; 2124318224bbSNeel Natu 2125318224bbSNeel Natu return (fault_type); 2126318224bbSNeel Natu } 2127318224bbSNeel Natu 2128490d56c5SEd Maste static bool 2129318224bbSNeel Natu ept_emulation_fault(uint64_t ept_qual) 2130318224bbSNeel Natu { 2131318224bbSNeel Natu int read, write; 2132318224bbSNeel Natu 2133318224bbSNeel Natu /* EPT fault on an instruction fetch doesn't make sense here */ 2134a2da7af6SNeel Natu if (ept_qual & EPT_VIOLATION_INST_FETCH) 2135490d56c5SEd Maste return (false); 2136a2da7af6SNeel Natu 2137318224bbSNeel Natu /* EPT fault must be a read fault or a write fault */ 2138a2da7af6SNeel Natu read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 2139a2da7af6SNeel Natu write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 21403b2b0011SPeter Grehan if ((read | write) == 0) 2141490d56c5SEd Maste return (false); 2142a2da7af6SNeel Natu 2143a2da7af6SNeel Natu /* 21443b2b0011SPeter Grehan * The EPT violation must have been caused by accessing a 21453b2b0011SPeter Grehan * guest-physical address that is a translation of a guest-linear 21463b2b0011SPeter Grehan * address. 2147a2da7af6SNeel Natu */ 2148a2da7af6SNeel Natu if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 2149a2da7af6SNeel Natu (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 2150490d56c5SEd Maste return (false); 2151a2da7af6SNeel Natu } 2152a2da7af6SNeel Natu 2153490d56c5SEd Maste return (true); 2154a2da7af6SNeel Natu } 2155a2da7af6SNeel Natu 2156159dd56fSNeel Natu static __inline int 21571aa51504SJohn Baldwin apic_access_virtualization(struct vmx_vcpu *vcpu) 2158159dd56fSNeel Natu { 2159159dd56fSNeel Natu uint32_t proc_ctls2; 2160159dd56fSNeel Natu 21611aa51504SJohn Baldwin proc_ctls2 = vcpu->cap.proc_ctls2; 2162159dd56fSNeel Natu return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 2163159dd56fSNeel Natu } 2164159dd56fSNeel Natu 2165159dd56fSNeel Natu static __inline int 21661aa51504SJohn Baldwin x2apic_virtualization(struct vmx_vcpu *vcpu) 2167159dd56fSNeel Natu { 2168159dd56fSNeel Natu uint32_t proc_ctls2; 2169159dd56fSNeel Natu 21701aa51504SJohn Baldwin proc_ctls2 = vcpu->cap.proc_ctls2; 2171159dd56fSNeel Natu return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 2172159dd56fSNeel Natu } 2173159dd56fSNeel Natu 2174a2da7af6SNeel Natu static int 21751aa51504SJohn Baldwin vmx_handle_apic_write(struct vmx_vcpu *vcpu, struct vlapic *vlapic, 2176159dd56fSNeel Natu uint64_t qual) 217788c4b8d1SNeel Natu { 217888c4b8d1SNeel Natu int error, handled, offset; 2179159dd56fSNeel Natu uint32_t *apic_regs, vector; 218088c4b8d1SNeel Natu bool retu; 218188c4b8d1SNeel Natu 2182a0efd3fbSJohn Baldwin handled = HANDLED; 218388c4b8d1SNeel Natu offset = APIC_WRITE_OFFSET(qual); 2184159dd56fSNeel Natu 21851aa51504SJohn Baldwin if (!apic_access_virtualization(vcpu)) { 2186159dd56fSNeel Natu /* 2187159dd56fSNeel Natu * In general there should not be any APIC write VM-exits 2188159dd56fSNeel Natu * unless APIC-access virtualization is enabled. 2189159dd56fSNeel Natu * 2190159dd56fSNeel Natu * However self-IPI virtualization can legitimately trigger 2191159dd56fSNeel Natu * an APIC-write VM-exit so treat it specially. 2192159dd56fSNeel Natu */ 21931aa51504SJohn Baldwin if (x2apic_virtualization(vcpu) && 2194159dd56fSNeel Natu offset == APIC_OFFSET_SELF_IPI) { 2195159dd56fSNeel Natu apic_regs = (uint32_t *)(vlapic->apic_page); 2196159dd56fSNeel Natu vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 2197159dd56fSNeel Natu vlapic_self_ipi_handler(vlapic, vector); 2198159dd56fSNeel Natu return (HANDLED); 2199159dd56fSNeel Natu } else 2200159dd56fSNeel Natu return (UNHANDLED); 2201159dd56fSNeel Natu } 2202159dd56fSNeel Natu 220388c4b8d1SNeel Natu switch (offset) { 220488c4b8d1SNeel Natu case APIC_OFFSET_ID: 220588c4b8d1SNeel Natu vlapic_id_write_handler(vlapic); 220688c4b8d1SNeel Natu break; 220788c4b8d1SNeel Natu case APIC_OFFSET_LDR: 220888c4b8d1SNeel Natu vlapic_ldr_write_handler(vlapic); 220988c4b8d1SNeel Natu break; 221088c4b8d1SNeel Natu case APIC_OFFSET_DFR: 221188c4b8d1SNeel Natu vlapic_dfr_write_handler(vlapic); 221288c4b8d1SNeel Natu break; 221388c4b8d1SNeel Natu case APIC_OFFSET_SVR: 221488c4b8d1SNeel Natu vlapic_svr_write_handler(vlapic); 221588c4b8d1SNeel Natu break; 221688c4b8d1SNeel Natu case APIC_OFFSET_ESR: 221788c4b8d1SNeel Natu vlapic_esr_write_handler(vlapic); 221888c4b8d1SNeel Natu break; 221988c4b8d1SNeel Natu case APIC_OFFSET_ICR_LOW: 222088c4b8d1SNeel Natu retu = false; 222188c4b8d1SNeel Natu error = vlapic_icrlo_write_handler(vlapic, &retu); 222288c4b8d1SNeel Natu if (error != 0 || retu) 2223a0efd3fbSJohn Baldwin handled = UNHANDLED; 222488c4b8d1SNeel Natu break; 222588c4b8d1SNeel Natu case APIC_OFFSET_CMCI_LVT: 222688c4b8d1SNeel Natu case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 222788c4b8d1SNeel Natu vlapic_lvt_write_handler(vlapic, offset); 222888c4b8d1SNeel Natu break; 222988c4b8d1SNeel Natu case APIC_OFFSET_TIMER_ICR: 223088c4b8d1SNeel Natu vlapic_icrtmr_write_handler(vlapic); 223188c4b8d1SNeel Natu break; 223288c4b8d1SNeel Natu case APIC_OFFSET_TIMER_DCR: 223388c4b8d1SNeel Natu vlapic_dcr_write_handler(vlapic); 223488c4b8d1SNeel Natu break; 223588c4b8d1SNeel Natu default: 2236a0efd3fbSJohn Baldwin handled = UNHANDLED; 223788c4b8d1SNeel Natu break; 223888c4b8d1SNeel Natu } 223988c4b8d1SNeel Natu return (handled); 224088c4b8d1SNeel Natu } 224188c4b8d1SNeel Natu 224288c4b8d1SNeel Natu static bool 22431aa51504SJohn Baldwin apic_access_fault(struct vmx_vcpu *vcpu, uint64_t gpa) 224488c4b8d1SNeel Natu { 224588c4b8d1SNeel Natu 22461aa51504SJohn Baldwin if (apic_access_virtualization(vcpu) && 224788c4b8d1SNeel Natu (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 224888c4b8d1SNeel Natu return (true); 224988c4b8d1SNeel Natu else 225088c4b8d1SNeel Natu return (false); 225188c4b8d1SNeel Natu } 225288c4b8d1SNeel Natu 225388c4b8d1SNeel Natu static int 22541aa51504SJohn Baldwin vmx_handle_apic_access(struct vmx_vcpu *vcpu, struct vm_exit *vmexit) 225588c4b8d1SNeel Natu { 225688c4b8d1SNeel Natu uint64_t qual; 225788c4b8d1SNeel Natu int access_type, offset, allowed; 225888c4b8d1SNeel Natu 22591aa51504SJohn Baldwin if (!apic_access_virtualization(vcpu)) 226088c4b8d1SNeel Natu return (UNHANDLED); 226188c4b8d1SNeel Natu 226288c4b8d1SNeel Natu qual = vmexit->u.vmx.exit_qualification; 226388c4b8d1SNeel Natu access_type = APIC_ACCESS_TYPE(qual); 226488c4b8d1SNeel Natu offset = APIC_ACCESS_OFFSET(qual); 226588c4b8d1SNeel Natu 226688c4b8d1SNeel Natu allowed = 0; 226788c4b8d1SNeel Natu if (access_type == 0) { 226888c4b8d1SNeel Natu /* 226988c4b8d1SNeel Natu * Read data access to the following registers is expected. 227088c4b8d1SNeel Natu */ 227188c4b8d1SNeel Natu switch (offset) { 227288c4b8d1SNeel Natu case APIC_OFFSET_APR: 227388c4b8d1SNeel Natu case APIC_OFFSET_PPR: 227488c4b8d1SNeel Natu case APIC_OFFSET_RRR: 227588c4b8d1SNeel Natu case APIC_OFFSET_CMCI_LVT: 227688c4b8d1SNeel Natu case APIC_OFFSET_TIMER_CCR: 227788c4b8d1SNeel Natu allowed = 1; 227888c4b8d1SNeel Natu break; 227988c4b8d1SNeel Natu default: 228088c4b8d1SNeel Natu break; 228188c4b8d1SNeel Natu } 228288c4b8d1SNeel Natu } else if (access_type == 1) { 228388c4b8d1SNeel Natu /* 228488c4b8d1SNeel Natu * Write data access to the following registers is expected. 228588c4b8d1SNeel Natu */ 228688c4b8d1SNeel Natu switch (offset) { 228788c4b8d1SNeel Natu case APIC_OFFSET_VER: 228888c4b8d1SNeel Natu case APIC_OFFSET_APR: 228988c4b8d1SNeel Natu case APIC_OFFSET_PPR: 229088c4b8d1SNeel Natu case APIC_OFFSET_RRR: 229188c4b8d1SNeel Natu case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 229288c4b8d1SNeel Natu case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 229388c4b8d1SNeel Natu case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 229488c4b8d1SNeel Natu case APIC_OFFSET_CMCI_LVT: 229588c4b8d1SNeel Natu case APIC_OFFSET_TIMER_CCR: 229688c4b8d1SNeel Natu allowed = 1; 229788c4b8d1SNeel Natu break; 229888c4b8d1SNeel Natu default: 229988c4b8d1SNeel Natu break; 230088c4b8d1SNeel Natu } 230188c4b8d1SNeel Natu } 230288c4b8d1SNeel Natu 230388c4b8d1SNeel Natu if (allowed) { 2304e4c8a13dSNeel Natu vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset, 2305e4c8a13dSNeel Natu VIE_INVALID_GLA); 230688c4b8d1SNeel Natu } 230788c4b8d1SNeel Natu 230888c4b8d1SNeel Natu /* 230988c4b8d1SNeel Natu * Regardless of whether the APIC-access is allowed this handler 231088c4b8d1SNeel Natu * always returns UNHANDLED: 231188c4b8d1SNeel Natu * - if the access is allowed then it is handled by emulating the 231288c4b8d1SNeel Natu * instruction that caused the VM-exit (outside the critical section) 231388c4b8d1SNeel Natu * - if the access is not allowed then it will be converted to an 231488c4b8d1SNeel Natu * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 231588c4b8d1SNeel Natu */ 231688c4b8d1SNeel Natu return (UNHANDLED); 231788c4b8d1SNeel Natu } 231888c4b8d1SNeel Natu 23193d5444c8SNeel Natu static enum task_switch_reason 23203d5444c8SNeel Natu vmx_task_switch_reason(uint64_t qual) 23213d5444c8SNeel Natu { 23223d5444c8SNeel Natu int reason; 23233d5444c8SNeel Natu 23243d5444c8SNeel Natu reason = (qual >> 30) & 0x3; 23253d5444c8SNeel Natu switch (reason) { 23263d5444c8SNeel Natu case 0: 23273d5444c8SNeel Natu return (TSR_CALL); 23283d5444c8SNeel Natu case 1: 23293d5444c8SNeel Natu return (TSR_IRET); 23303d5444c8SNeel Natu case 2: 23313d5444c8SNeel Natu return (TSR_JMP); 23323d5444c8SNeel Natu case 3: 23333d5444c8SNeel Natu return (TSR_IDT_GATE); 23343d5444c8SNeel Natu default: 23353d5444c8SNeel Natu panic("%s: invalid reason %d", __func__, reason); 23363d5444c8SNeel Natu } 23373d5444c8SNeel Natu } 23383d5444c8SNeel Natu 233988c4b8d1SNeel Natu static int 234080cb5d84SJohn Baldwin emulate_wrmsr(struct vmx_vcpu *vcpu, u_int num, uint64_t val, bool *retu) 2341c3498942SNeel Natu { 2342c3498942SNeel Natu int error; 2343c3498942SNeel Natu 2344c3498942SNeel Natu if (lapic_msr(num)) 234580cb5d84SJohn Baldwin error = lapic_wrmsr(vcpu->vcpu, num, val, retu); 2346c3498942SNeel Natu else 234780cb5d84SJohn Baldwin error = vmx_wrmsr(vcpu, num, val, retu); 2348c3498942SNeel Natu 2349c3498942SNeel Natu return (error); 2350c3498942SNeel Natu } 2351c3498942SNeel Natu 2352c3498942SNeel Natu static int 235380cb5d84SJohn Baldwin emulate_rdmsr(struct vmx_vcpu *vcpu, u_int num, bool *retu) 2354c3498942SNeel Natu { 2355c3498942SNeel Natu struct vmxctx *vmxctx; 2356c3498942SNeel Natu uint64_t result; 2357c3498942SNeel Natu uint32_t eax, edx; 2358c3498942SNeel Natu int error; 2359c3498942SNeel Natu 2360c3498942SNeel Natu if (lapic_msr(num)) 236180cb5d84SJohn Baldwin error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); 2362c3498942SNeel Natu else 236380cb5d84SJohn Baldwin error = vmx_rdmsr(vcpu, num, &result, retu); 2364c3498942SNeel Natu 2365c3498942SNeel Natu if (error == 0) { 2366c3498942SNeel Natu eax = result; 23671aa51504SJohn Baldwin vmxctx = &vcpu->ctx; 2368c3498942SNeel Natu error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax); 2369c3498942SNeel Natu KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error)); 2370c3498942SNeel Natu 2371c3498942SNeel Natu edx = result >> 32; 2372c3498942SNeel Natu error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx); 2373c3498942SNeel Natu KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error)); 2374c3498942SNeel Natu } 2375c3498942SNeel Natu 2376c3498942SNeel Natu return (error); 2377c3498942SNeel Natu } 2378c3498942SNeel Natu 2379c3498942SNeel Natu static int 23801aa51504SJohn Baldwin vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit) 2381366f6083SPeter Grehan { 2382c9c75df4SNeel Natu int error, errcode, errcode_valid, handled, in; 2383366f6083SPeter Grehan struct vmxctx *vmxctx; 238488c4b8d1SNeel Natu struct vlapic *vlapic; 2385d17b5104SNeel Natu struct vm_inout_str *vis; 23863d5444c8SNeel Natu struct vm_task_switch *ts; 2387d17b5104SNeel Natu uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info; 2388b0538143SNeel Natu uint32_t intr_type, intr_vec, reason; 2389091d4532SNeel Natu uint64_t exitintinfo, qual, gpa; 23902ee1a18dSDmitry Chagin #ifdef KDTRACE_HOOKS 23911aa51504SJohn Baldwin int vcpuid; 23922ee1a18dSDmitry Chagin #endif 2393becd9849SNeel Natu bool retu; 2394366f6083SPeter Grehan 2395160471d2SNeel Natu CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2396c308b23bSNeel Natu CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2397160471d2SNeel Natu 2398a0efd3fbSJohn Baldwin handled = UNHANDLED; 23991aa51504SJohn Baldwin vmxctx = &vcpu->ctx; 24002ee1a18dSDmitry Chagin #ifdef KDTRACE_HOOKS 24011aa51504SJohn Baldwin vcpuid = vcpu->vcpuid; 24022ee1a18dSDmitry Chagin #endif 24030492757cSNeel Natu 2404366f6083SPeter Grehan qual = vmexit->u.vmx.exit_qualification; 2405318224bbSNeel Natu reason = vmexit->u.vmx.exit_reason; 2406366f6083SPeter Grehan vmexit->exitcode = VM_EXITCODE_BOGUS; 2407366f6083SPeter Grehan 24083dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); 24091aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpuid, vmexit); 241061592433SNeel Natu 2411318224bbSNeel Natu /* 2412b0538143SNeel Natu * VM-entry failures during or after loading guest state. 2413b0538143SNeel Natu * 2414b0538143SNeel Natu * These VM-exits are uncommon but must be handled specially 2415b0538143SNeel Natu * as most VM-exit fields are not populated as usual. 2416b0538143SNeel Natu */ 2417b0538143SNeel Natu if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { 241857e0119eSJohn Baldwin VMX_CTR0(vcpu, "Handling MCE during VM-entry"); 2419b0538143SNeel Natu __asm __volatile("int $18"); 2420b0538143SNeel Natu return (1); 2421b0538143SNeel Natu } 2422b0538143SNeel Natu 2423b0538143SNeel Natu /* 24243d5444c8SNeel Natu * VM exits that can be triggered during event delivery need to 24253d5444c8SNeel Natu * be handled specially by re-injecting the event if the IDT 24263d5444c8SNeel Natu * vectoring information field's valid bit is set. 2427318224bbSNeel Natu * 2428318224bbSNeel Natu * See "Information for VM Exits During Event Delivery" in Intel SDM 2429318224bbSNeel Natu * for details. 2430318224bbSNeel Natu */ 2431318224bbSNeel Natu idtvec_info = vmcs_idt_vectoring_info(); 2432318224bbSNeel Natu if (idtvec_info & VMCS_IDT_VEC_VALID) { 2433318224bbSNeel Natu idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2434091d4532SNeel Natu exitintinfo = idtvec_info; 2435318224bbSNeel Natu if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2436318224bbSNeel Natu idtvec_err = vmcs_idt_vectoring_err(); 2437091d4532SNeel Natu exitintinfo |= (uint64_t)idtvec_err << 32; 2438318224bbSNeel Natu } 243980cb5d84SJohn Baldwin error = vm_exit_intinfo(vcpu->vcpu, exitintinfo); 2440091d4532SNeel Natu KASSERT(error == 0, ("%s: vm_set_intinfo error %d", 2441091d4532SNeel Natu __func__, error)); 2442091d4532SNeel Natu 2443160471d2SNeel Natu /* 2444160471d2SNeel Natu * If 'virtual NMIs' are being used and the VM-exit 2445160471d2SNeel Natu * happened while injecting an NMI during the previous 2446091d4532SNeel Natu * VM-entry, then clear "blocking by NMI" in the 2447091d4532SNeel Natu * Guest Interruptibility-State so the NMI can be 2448091d4532SNeel Natu * reinjected on the subsequent VM-entry. 2449091d4532SNeel Natu * 2450091d4532SNeel Natu * However, if the NMI was being delivered through a task 2451091d4532SNeel Natu * gate, then the new task must start execution with NMIs 2452091d4532SNeel Natu * blocked so don't clear NMI blocking in this case. 2453160471d2SNeel Natu */ 2454091d4532SNeel Natu intr_type = idtvec_info & VMCS_INTR_T_MASK; 2455091d4532SNeel Natu if (intr_type == VMCS_INTR_T_NMI) { 2456091d4532SNeel Natu if (reason != EXIT_REASON_TASK_SWITCH) 2457869c8d19SJohn Baldwin vmx_clear_nmi_blocking(vcpu); 2458091d4532SNeel Natu else 2459869c8d19SJohn Baldwin vmx_assert_nmi_blocking(vcpu); 2460160471d2SNeel Natu } 2461091d4532SNeel Natu 2462091d4532SNeel Natu /* 2463091d4532SNeel Natu * Update VM-entry instruction length if the event being 2464091d4532SNeel Natu * delivered was a software interrupt or software exception. 2465091d4532SNeel Natu */ 2466091d4532SNeel Natu if (intr_type == VMCS_INTR_T_SWINTR || 2467091d4532SNeel Natu intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2468091d4532SNeel Natu intr_type == VMCS_INTR_T_SWEXCEPTION) { 24693de83862SNeel Natu vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2470318224bbSNeel Natu } 2471318224bbSNeel Natu } 2472318224bbSNeel Natu 2473318224bbSNeel Natu switch (reason) { 24743d5444c8SNeel Natu case EXIT_REASON_TASK_SWITCH: 24753d5444c8SNeel Natu ts = &vmexit->u.task_switch; 24763d5444c8SNeel Natu ts->tsssel = qual & 0xffff; 24773d5444c8SNeel Natu ts->reason = vmx_task_switch_reason(qual); 24783d5444c8SNeel Natu ts->ext = 0; 24793d5444c8SNeel Natu ts->errcode_valid = 0; 24803d5444c8SNeel Natu vmx_paging_info(&ts->paging); 24813d5444c8SNeel Natu /* 24823d5444c8SNeel Natu * If the task switch was due to a CALL, JMP, IRET, software 24833d5444c8SNeel Natu * interrupt (INT n) or software exception (INT3, INTO), 24843d5444c8SNeel Natu * then the saved %rip references the instruction that caused 24853d5444c8SNeel Natu * the task switch. The instruction length field in the VMCS 24863d5444c8SNeel Natu * is valid in this case. 24873d5444c8SNeel Natu * 24883d5444c8SNeel Natu * In all other cases (e.g., NMI, hardware exception) the 24893d5444c8SNeel Natu * saved %rip is one that would have been saved in the old TSS 24903d5444c8SNeel Natu * had the task switch completed normally so the instruction 24913d5444c8SNeel Natu * length field is not needed in this case and is explicitly 24923d5444c8SNeel Natu * set to 0. 24933d5444c8SNeel Natu */ 24943d5444c8SNeel Natu if (ts->reason == TSR_IDT_GATE) { 24953d5444c8SNeel Natu KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2496091d4532SNeel Natu ("invalid idtvec_info %#x for IDT task switch", 24973d5444c8SNeel Natu idtvec_info)); 24983d5444c8SNeel Natu intr_type = idtvec_info & VMCS_INTR_T_MASK; 24993d5444c8SNeel Natu if (intr_type != VMCS_INTR_T_SWINTR && 25003d5444c8SNeel Natu intr_type != VMCS_INTR_T_SWEXCEPTION && 25013d5444c8SNeel Natu intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 25023d5444c8SNeel Natu /* Task switch triggered by external event */ 25033d5444c8SNeel Natu ts->ext = 1; 25043d5444c8SNeel Natu vmexit->inst_length = 0; 25053d5444c8SNeel Natu if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 25063d5444c8SNeel Natu ts->errcode_valid = 1; 25073d5444c8SNeel Natu ts->errcode = vmcs_idt_vectoring_err(); 25083d5444c8SNeel Natu } 25093d5444c8SNeel Natu } 25103d5444c8SNeel Natu } 25113d5444c8SNeel Natu vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 25121aa51504SJohn Baldwin SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpuid, vmexit, ts); 251357e0119eSJohn Baldwin VMX_CTR4(vcpu, "task switch reason %d, tss 0x%04x, " 25143d5444c8SNeel Natu "%s errcode 0x%016lx", ts->reason, ts->tsssel, 25153d5444c8SNeel Natu ts->ext ? "external" : "internal", 25163d5444c8SNeel Natu ((uint64_t)ts->errcode << 32) | ts->errcode_valid); 25173d5444c8SNeel Natu break; 2518366f6083SPeter Grehan case EXIT_REASON_CR_ACCESS: 25193dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_CR_ACCESS, 1); 25201aa51504SJohn Baldwin SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpuid, vmexit, qual); 2521594db002STycho Nightingale switch (qual & 0xf) { 2522594db002STycho Nightingale case 0: 25231aa51504SJohn Baldwin handled = vmx_emulate_cr0_access(vcpu, qual); 2524594db002STycho Nightingale break; 2525594db002STycho Nightingale case 4: 25261aa51504SJohn Baldwin handled = vmx_emulate_cr4_access(vcpu, qual); 2527594db002STycho Nightingale break; 2528594db002STycho Nightingale case 8: 2529594db002STycho Nightingale handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2530594db002STycho Nightingale break; 2531594db002STycho Nightingale } 2532366f6083SPeter Grehan break; 2533366f6083SPeter Grehan case EXIT_REASON_RDMSR: 25343dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); 2535becd9849SNeel Natu retu = false; 2536366f6083SPeter Grehan ecx = vmxctx->guest_rcx; 253757e0119eSJohn Baldwin VMX_CTR1(vcpu, "rdmsr 0x%08x", ecx); 25381aa51504SJohn Baldwin SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpuid, vmexit, ecx); 253980cb5d84SJohn Baldwin error = emulate_rdmsr(vcpu, ecx, &retu); 2540b42206f3SNeel Natu if (error) { 2541366f6083SPeter Grehan vmexit->exitcode = VM_EXITCODE_RDMSR; 2542366f6083SPeter Grehan vmexit->u.msr.code = ecx; 2543becd9849SNeel Natu } else if (!retu) { 2544a0efd3fbSJohn Baldwin handled = HANDLED; 2545becd9849SNeel Natu } else { 2546becd9849SNeel Natu /* Return to userspace with a valid exitcode */ 2547becd9849SNeel Natu KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2548c3498942SNeel Natu ("emulate_rdmsr retu with bogus exitcode")); 2549becd9849SNeel Natu } 2550366f6083SPeter Grehan break; 2551366f6083SPeter Grehan case EXIT_REASON_WRMSR: 25523dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); 2553becd9849SNeel Natu retu = false; 2554366f6083SPeter Grehan eax = vmxctx->guest_rax; 2555366f6083SPeter Grehan ecx = vmxctx->guest_rcx; 2556366f6083SPeter Grehan edx = vmxctx->guest_rdx; 255757e0119eSJohn Baldwin VMX_CTR2(vcpu, "wrmsr 0x%08x value 0x%016lx", 25582cb97c9dSNeel Natu ecx, (uint64_t)edx << 32 | eax); 25591aa51504SJohn Baldwin SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpuid, ecx, 25606ac73777STycho Nightingale (uint64_t)edx << 32 | eax); 256180cb5d84SJohn Baldwin error = emulate_wrmsr(vcpu, ecx, (uint64_t)edx << 32 | eax, 256280cb5d84SJohn Baldwin &retu); 2563b42206f3SNeel Natu if (error) { 2564366f6083SPeter Grehan vmexit->exitcode = VM_EXITCODE_WRMSR; 2565366f6083SPeter Grehan vmexit->u.msr.code = ecx; 2566366f6083SPeter Grehan vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 2567becd9849SNeel Natu } else if (!retu) { 2568a0efd3fbSJohn Baldwin handled = HANDLED; 2569becd9849SNeel Natu } else { 2570becd9849SNeel Natu /* Return to userspace with a valid exitcode */ 2571becd9849SNeel Natu KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2572becd9849SNeel Natu ("emulate_wrmsr retu with bogus exitcode")); 2573becd9849SNeel Natu } 2574366f6083SPeter Grehan break; 2575366f6083SPeter Grehan case EXIT_REASON_HLT: 25763dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); 25771aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpuid, vmexit); 2578366f6083SPeter Grehan vmexit->exitcode = VM_EXITCODE_HLT; 25793de83862SNeel Natu vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2580490768e2STycho Nightingale if (virtual_interrupt_delivery) 2581490768e2STycho Nightingale vmexit->u.hlt.intr_status = 2582490768e2STycho Nightingale vmcs_read(VMCS_GUEST_INTR_STATUS); 2583490768e2STycho Nightingale else 2584490768e2STycho Nightingale vmexit->u.hlt.intr_status = 0; 2585366f6083SPeter Grehan break; 2586366f6083SPeter Grehan case EXIT_REASON_MTF: 25873dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_MTRAP, 1); 25881aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpuid, vmexit); 2589366f6083SPeter Grehan vmexit->exitcode = VM_EXITCODE_MTRAP; 2590c9c75df4SNeel Natu vmexit->inst_length = 0; 2591366f6083SPeter Grehan break; 2592366f6083SPeter Grehan case EXIT_REASON_PAUSE: 25933dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); 25941aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpuid, vmexit); 2595366f6083SPeter Grehan vmexit->exitcode = VM_EXITCODE_PAUSE; 2596366f6083SPeter Grehan break; 2597366f6083SPeter Grehan case EXIT_REASON_INTR_WINDOW: 25983dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_INTR_WINDOW, 1); 25991aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpuid, vmexit); 2600869c8d19SJohn Baldwin vmx_clear_int_window_exiting(vcpu); 2601b5aaf7b2SNeel Natu return (1); 2602366f6083SPeter Grehan case EXIT_REASON_EXT_INTR: 2603366f6083SPeter Grehan /* 2604366f6083SPeter Grehan * External interrupts serve only to cause VM exits and allow 2605366f6083SPeter Grehan * the host interrupt handler to run. 2606366f6083SPeter Grehan * 2607366f6083SPeter Grehan * If this external interrupt triggers a virtual interrupt 2608366f6083SPeter Grehan * to a VM, then that state will be recorded by the 2609366f6083SPeter Grehan * host interrupt handler in the VM's softc. We will inject 2610366f6083SPeter Grehan * this virtual interrupt during the subsequent VM enter. 2611366f6083SPeter Grehan */ 2612f7d47425SNeel Natu intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 26136ac73777STycho Nightingale SDT_PROBE4(vmm, vmx, exit, interrupt, 26141aa51504SJohn Baldwin vmx, vcpuid, vmexit, intr_info); 2615722b6744SJohn Baldwin 2616722b6744SJohn Baldwin /* 2617722b6744SJohn Baldwin * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2618ad3e3687SJohn Baldwin * This appears to be a bug in VMware Fusion? 2619722b6744SJohn Baldwin */ 2620722b6744SJohn Baldwin if (!(intr_info & VMCS_INTR_VALID)) 2621722b6744SJohn Baldwin return (1); 2622160471d2SNeel Natu KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2623160471d2SNeel Natu (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2624f7d47425SNeel Natu ("VM exit interruption info invalid: %#x", intr_info)); 2625f7d47425SNeel Natu vmx_trigger_hostintr(intr_info & 0xff); 2626366f6083SPeter Grehan 2627366f6083SPeter Grehan /* 2628366f6083SPeter Grehan * This is special. We want to treat this as an 'handled' 2629366f6083SPeter Grehan * VM-exit but not increment the instruction pointer. 2630366f6083SPeter Grehan */ 26313dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); 2632366f6083SPeter Grehan return (1); 2633366f6083SPeter Grehan case EXIT_REASON_NMI_WINDOW: 26341aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpuid, vmexit); 2635366f6083SPeter Grehan /* Exit to allow the pending virtual NMI to be injected */ 263680cb5d84SJohn Baldwin if (vm_nmi_pending(vcpu->vcpu)) 263780cb5d84SJohn Baldwin vmx_inject_nmi(vcpu); 2638869c8d19SJohn Baldwin vmx_clear_nmi_window_exiting(vcpu); 26393dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1); 2640366f6083SPeter Grehan return (1); 2641366f6083SPeter Grehan case EXIT_REASON_INOUT: 26423dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); 2643366f6083SPeter Grehan vmexit->exitcode = VM_EXITCODE_INOUT; 2644366f6083SPeter Grehan vmexit->u.inout.bytes = (qual & 0x7) + 1; 2645d17b5104SNeel Natu vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0; 2646366f6083SPeter Grehan vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 2647366f6083SPeter Grehan vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 2648366f6083SPeter Grehan vmexit->u.inout.port = (uint16_t)(qual >> 16); 2649366f6083SPeter Grehan vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 2650d17b5104SNeel Natu if (vmexit->u.inout.string) { 2651d17b5104SNeel Natu inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 2652d17b5104SNeel Natu vmexit->exitcode = VM_EXITCODE_INOUT_STR; 2653d17b5104SNeel Natu vis = &vmexit->u.inout_str; 2654e813a873SNeel Natu vmx_paging_info(&vis->paging); 2655d17b5104SNeel Natu vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2656d17b5104SNeel Natu vis->cr0 = vmcs_read(VMCS_GUEST_CR0); 2657869c8d19SJohn Baldwin vis->index = inout_str_index(vcpu, in); 2658869c8d19SJohn Baldwin vis->count = inout_str_count(vcpu, vis->inout.rep); 2659d17b5104SNeel Natu vis->addrsize = inout_str_addrsize(inst_info); 2660869c8d19SJohn Baldwin inout_str_seginfo(vcpu, inst_info, in, vis); 2661762fd208STycho Nightingale } 26621aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpuid, vmexit); 2663366f6083SPeter Grehan break; 2664366f6083SPeter Grehan case EXIT_REASON_CPUID: 26653dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); 26661aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpuid, vmexit); 266780cb5d84SJohn Baldwin handled = vmx_handle_cpuid(vcpu, vmxctx); 2668366f6083SPeter Grehan break; 2669e5a1d950SNeel Natu case EXIT_REASON_EXCEPTION: 26703dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); 2671e5a1d950SNeel Natu intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2672e5a1d950SNeel Natu KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2673e5a1d950SNeel Natu ("VM exit interruption info invalid: %#x", intr_info)); 2674c308b23bSNeel Natu 2675b0538143SNeel Natu intr_vec = intr_info & 0xff; 2676b0538143SNeel Natu intr_type = intr_info & VMCS_INTR_T_MASK; 2677b0538143SNeel Natu 2678e5a1d950SNeel Natu /* 2679e5a1d950SNeel Natu * If Virtual NMIs control is 1 and the VM-exit is due to a 2680e5a1d950SNeel Natu * fault encountered during the execution of IRET then we must 2681e5a1d950SNeel Natu * restore the state of "virtual-NMI blocking" before resuming 2682e5a1d950SNeel Natu * the guest. 2683e5a1d950SNeel Natu * 2684e5a1d950SNeel Natu * See "Resuming Guest Software after Handling an Exception". 2685091d4532SNeel Natu * See "Information for VM Exits Due to Vectored Events". 2686e5a1d950SNeel Natu */ 2687e5a1d950SNeel Natu if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2688b0538143SNeel Natu (intr_vec != IDT_DF) && 2689e5a1d950SNeel Natu (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2690869c8d19SJohn Baldwin vmx_restore_nmi_blocking(vcpu); 2691c308b23bSNeel Natu 2692c308b23bSNeel Natu /* 269362fbd7c2SNeel Natu * The NMI has already been handled in vmx_exit_handle_nmi(). 2694c308b23bSNeel Natu */ 2695b0538143SNeel Natu if (intr_type == VMCS_INTR_T_NMI) 2696c308b23bSNeel Natu return (1); 2697b0538143SNeel Natu 2698b0538143SNeel Natu /* 2699b0538143SNeel Natu * Call the machine check handler by hand. Also don't reflect 2700b0538143SNeel Natu * the machine check back into the guest. 2701b0538143SNeel Natu */ 2702b0538143SNeel Natu if (intr_vec == IDT_MC) { 270357e0119eSJohn Baldwin VMX_CTR0(vcpu, "Vectoring to MCE handler"); 2704b0538143SNeel Natu __asm __volatile("int $18"); 2705b0538143SNeel Natu return (1); 2706b0538143SNeel Natu } 2707b0538143SNeel Natu 2708cbd03a9dSJohn Baldwin /* 2709cbd03a9dSJohn Baldwin * If the hypervisor has requested user exits for 2710cbd03a9dSJohn Baldwin * debug exceptions, bounce them out to userland. 2711cbd03a9dSJohn Baldwin */ 2712cbd03a9dSJohn Baldwin if (intr_type == VMCS_INTR_T_SWEXCEPTION && intr_vec == IDT_BP && 27131aa51504SJohn Baldwin (vcpu->cap.set & (1 << VM_CAP_BPT_EXIT))) { 2714cbd03a9dSJohn Baldwin vmexit->exitcode = VM_EXITCODE_BPT; 2715cbd03a9dSJohn Baldwin vmexit->u.bpt.inst_length = vmexit->inst_length; 2716cbd03a9dSJohn Baldwin vmexit->inst_length = 0; 2717cbd03a9dSJohn Baldwin break; 2718cbd03a9dSJohn Baldwin } 2719cbd03a9dSJohn Baldwin 2720b0538143SNeel Natu if (intr_vec == IDT_PF) { 2721b0538143SNeel Natu error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual); 2722b0538143SNeel Natu KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d", 2723b0538143SNeel Natu __func__, error)); 2724b0538143SNeel Natu } 2725b0538143SNeel Natu 2726b0538143SNeel Natu /* 2727b0538143SNeel Natu * Software exceptions exhibit trap-like behavior. This in 2728b0538143SNeel Natu * turn requires populating the VM-entry instruction length 2729b0538143SNeel Natu * so that the %rip in the trap frame is past the INT3/INTO 2730b0538143SNeel Natu * instruction. 2731b0538143SNeel Natu */ 2732b0538143SNeel Natu if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2733b0538143SNeel Natu vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2734b0538143SNeel Natu 2735b0538143SNeel Natu /* Reflect all other exceptions back into the guest */ 2736c9c75df4SNeel Natu errcode_valid = errcode = 0; 2737b0538143SNeel Natu if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2738c9c75df4SNeel Natu errcode_valid = 1; 2739c9c75df4SNeel Natu errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2740b0538143SNeel Natu } 274157e0119eSJohn Baldwin VMX_CTR2(vcpu, "Reflecting exception %d/%#x into " 2742c9c75df4SNeel Natu "the guest", intr_vec, errcode); 27436ac73777STycho Nightingale SDT_PROBE5(vmm, vmx, exit, exception, 27441aa51504SJohn Baldwin vmx, vcpuid, vmexit, intr_vec, errcode); 2745d3956e46SJohn Baldwin error = vm_inject_exception(vcpu->vcpu, intr_vec, 2746c9c75df4SNeel Natu errcode_valid, errcode, 0); 2747b0538143SNeel Natu KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2748b0538143SNeel Natu __func__, error)); 2749b0538143SNeel Natu return (1); 2750b0538143SNeel Natu 2751cd942e0fSPeter Grehan case EXIT_REASON_EPT_FAULT: 2752318224bbSNeel Natu /* 2753318224bbSNeel Natu * If 'gpa' lies within the address space allocated to 2754318224bbSNeel Natu * memory then this must be a nested page fault otherwise 2755318224bbSNeel Natu * this must be an instruction that accesses MMIO space. 2756318224bbSNeel Natu */ 2757a2da7af6SNeel Natu gpa = vmcs_gpa(); 275880cb5d84SJohn Baldwin if (vm_mem_allocated(vcpu->vcpu, gpa) || 27591aa51504SJohn Baldwin apic_access_fault(vcpu, gpa)) { 2760cd942e0fSPeter Grehan vmexit->exitcode = VM_EXITCODE_PAGING; 2761d087a399SNeel Natu vmexit->inst_length = 0; 276213ec9371SPeter Grehan vmexit->u.paging.gpa = gpa; 2763318224bbSNeel Natu vmexit->u.paging.fault_type = ept_fault_type(qual); 27643dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); 27656ac73777STycho Nightingale SDT_PROBE5(vmm, vmx, exit, nestedfault, 27661aa51504SJohn Baldwin vmx, vcpuid, vmexit, gpa, qual); 2767318224bbSNeel Natu } else if (ept_emulation_fault(qual)) { 2768e4c8a13dSNeel Natu vmexit_inst_emul(vmexit, gpa, vmcs_gla()); 27693dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); 27706ac73777STycho Nightingale SDT_PROBE4(vmm, vmx, exit, mmiofault, 27711aa51504SJohn Baldwin vmx, vcpuid, vmexit, gpa); 2772a2da7af6SNeel Natu } 2773e5a1d950SNeel Natu /* 2774e5a1d950SNeel Natu * If Virtual NMIs control is 1 and the VM-exit is due to an 2775e5a1d950SNeel Natu * EPT fault during the execution of IRET then we must restore 2776e5a1d950SNeel Natu * the state of "virtual-NMI blocking" before resuming. 2777e5a1d950SNeel Natu * 2778e5a1d950SNeel Natu * See description of "NMI unblocking due to IRET" in 2779e5a1d950SNeel Natu * "Exit Qualification for EPT Violations". 2780e5a1d950SNeel Natu */ 2781e5a1d950SNeel Natu if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2782e5a1d950SNeel Natu (qual & EXIT_QUAL_NMIUDTI) != 0) 2783869c8d19SJohn Baldwin vmx_restore_nmi_blocking(vcpu); 2784cd942e0fSPeter Grehan break; 278530b94db8SNeel Natu case EXIT_REASON_VIRTUALIZED_EOI: 278630b94db8SNeel Natu vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 278730b94db8SNeel Natu vmexit->u.ioapic_eoi.vector = qual & 0xFF; 27881aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpuid, vmexit); 278930b94db8SNeel Natu vmexit->inst_length = 0; /* trap-like */ 279030b94db8SNeel Natu break; 279188c4b8d1SNeel Natu case EXIT_REASON_APIC_ACCESS: 27921aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpuid, vmexit); 27931aa51504SJohn Baldwin handled = vmx_handle_apic_access(vcpu, vmexit); 279488c4b8d1SNeel Natu break; 279588c4b8d1SNeel Natu case EXIT_REASON_APIC_WRITE: 279688c4b8d1SNeel Natu /* 279788c4b8d1SNeel Natu * APIC-write VM exit is trap-like so the %rip is already 279888c4b8d1SNeel Natu * pointing to the next instruction. 279988c4b8d1SNeel Natu */ 280088c4b8d1SNeel Natu vmexit->inst_length = 0; 2801d3956e46SJohn Baldwin vlapic = vm_lapic(vcpu->vcpu); 28026ac73777STycho Nightingale SDT_PROBE4(vmm, vmx, exit, apicwrite, 28031aa51504SJohn Baldwin vmx, vcpuid, vmexit, vlapic); 28041aa51504SJohn Baldwin handled = vmx_handle_apic_write(vcpu, vlapic, qual); 280588c4b8d1SNeel Natu break; 2806abb023fbSJohn Baldwin case EXIT_REASON_XSETBV: 28071aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpuid, vmexit); 2808a0efd3fbSJohn Baldwin handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2809abb023fbSJohn Baldwin break; 281065145c7fSNeel Natu case EXIT_REASON_MONITOR: 28111aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpuid, vmexit); 281265145c7fSNeel Natu vmexit->exitcode = VM_EXITCODE_MONITOR; 281365145c7fSNeel Natu break; 281465145c7fSNeel Natu case EXIT_REASON_MWAIT: 28151aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpuid, vmexit); 281665145c7fSNeel Natu vmexit->exitcode = VM_EXITCODE_MWAIT; 281765145c7fSNeel Natu break; 28181bc51badSMichael Reifenberger case EXIT_REASON_TPR: 2819d3956e46SJohn Baldwin vlapic = vm_lapic(vcpu->vcpu); 28201bc51badSMichael Reifenberger vlapic_sync_tpr(vlapic); 28211bc51badSMichael Reifenberger vmexit->inst_length = 0; 28221bc51badSMichael Reifenberger handled = HANDLED; 28231bc51badSMichael Reifenberger break; 282427d26457SAndrew Turner case EXIT_REASON_VMCALL: 282527d26457SAndrew Turner case EXIT_REASON_VMCLEAR: 282627d26457SAndrew Turner case EXIT_REASON_VMLAUNCH: 282727d26457SAndrew Turner case EXIT_REASON_VMPTRLD: 282827d26457SAndrew Turner case EXIT_REASON_VMPTRST: 282927d26457SAndrew Turner case EXIT_REASON_VMREAD: 283027d26457SAndrew Turner case EXIT_REASON_VMRESUME: 283127d26457SAndrew Turner case EXIT_REASON_VMWRITE: 283227d26457SAndrew Turner case EXIT_REASON_VMXOFF: 283327d26457SAndrew Turner case EXIT_REASON_VMXON: 28341aa51504SJohn Baldwin SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpuid, vmexit); 283527d26457SAndrew Turner vmexit->exitcode = VM_EXITCODE_VMINSN; 283627d26457SAndrew Turner break; 28374eadbef9SCorvin Köhne case EXIT_REASON_INVD: 28383ba952e1SCorvin Köhne case EXIT_REASON_WBINVD: 28394eadbef9SCorvin Köhne /* ignore exit */ 28403ba952e1SCorvin Köhne handled = HANDLED; 28413ba952e1SCorvin Köhne break; 2842366f6083SPeter Grehan default: 28436ac73777STycho Nightingale SDT_PROBE4(vmm, vmx, exit, unknown, 28441aa51504SJohn Baldwin vmx, vcpuid, vmexit, reason); 28453dc3d32aSJohn Baldwin vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); 2846366f6083SPeter Grehan break; 2847366f6083SPeter Grehan } 2848366f6083SPeter Grehan 2849366f6083SPeter Grehan if (handled) { 2850366f6083SPeter Grehan /* 2851366f6083SPeter Grehan * It is possible that control is returned to userland 2852366f6083SPeter Grehan * even though we were able to handle the VM exit in the 2853eeefa4e4SNeel Natu * kernel. 2854366f6083SPeter Grehan * 2855366f6083SPeter Grehan * In such a case we want to make sure that the userland 2856366f6083SPeter Grehan * restarts guest execution at the instruction *after* 2857366f6083SPeter Grehan * the one we just processed. Therefore we update the 2858366f6083SPeter Grehan * guest rip in the VMCS and in 'vmexit'. 2859366f6083SPeter Grehan */ 2860366f6083SPeter Grehan vmexit->rip += vmexit->inst_length; 2861366f6083SPeter Grehan vmexit->inst_length = 0; 28623de83862SNeel Natu vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2863366f6083SPeter Grehan } else { 2864366f6083SPeter Grehan if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2865366f6083SPeter Grehan /* 2866366f6083SPeter Grehan * If this VM exit was not claimed by anybody then 2867366f6083SPeter Grehan * treat it as a generic VMX exit. 2868366f6083SPeter Grehan */ 2869366f6083SPeter Grehan vmexit->exitcode = VM_EXITCODE_VMX; 28700492757cSNeel Natu vmexit->u.vmx.status = VM_SUCCESS; 2871c308b23bSNeel Natu vmexit->u.vmx.inst_type = 0; 2872c308b23bSNeel Natu vmexit->u.vmx.inst_error = 0; 2873366f6083SPeter Grehan } else { 2874366f6083SPeter Grehan /* 2875366f6083SPeter Grehan * The exitcode and collateral have been populated. 2876366f6083SPeter Grehan * The VM exit will be processed further in userland. 2877366f6083SPeter Grehan */ 2878366f6083SPeter Grehan } 2879366f6083SPeter Grehan } 28806ac73777STycho Nightingale 28816ac73777STycho Nightingale SDT_PROBE4(vmm, vmx, exit, return, 28821aa51504SJohn Baldwin vmx, vcpuid, vmexit, handled); 2883366f6083SPeter Grehan return (handled); 2884366f6083SPeter Grehan } 2885366f6083SPeter Grehan 288640487465SNeel Natu static __inline void 28870492757cSNeel Natu vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 28880492757cSNeel Natu { 28890492757cSNeel Natu 28900492757cSNeel Natu KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 28910492757cSNeel Natu ("vmx_exit_inst_error: invalid inst_fail_status %d", 28920492757cSNeel Natu vmxctx->inst_fail_status)); 28930492757cSNeel Natu 28940492757cSNeel Natu vmexit->inst_length = 0; 28950492757cSNeel Natu vmexit->exitcode = VM_EXITCODE_VMX; 28960492757cSNeel Natu vmexit->u.vmx.status = vmxctx->inst_fail_status; 28970492757cSNeel Natu vmexit->u.vmx.inst_error = vmcs_instruction_error(); 28980492757cSNeel Natu vmexit->u.vmx.exit_reason = ~0; 28990492757cSNeel Natu vmexit->u.vmx.exit_qualification = ~0; 29000492757cSNeel Natu 29010492757cSNeel Natu switch (rc) { 29020492757cSNeel Natu case VMX_VMRESUME_ERROR: 29030492757cSNeel Natu case VMX_VMLAUNCH_ERROR: 29040492757cSNeel Natu vmexit->u.vmx.inst_type = rc; 29050492757cSNeel Natu break; 29060492757cSNeel Natu default: 29070492757cSNeel Natu panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 29080492757cSNeel Natu } 29090492757cSNeel Natu } 29100492757cSNeel Natu 291162fbd7c2SNeel Natu /* 291262fbd7c2SNeel Natu * If the NMI-exiting VM execution control is set to '1' then an NMI in 291362fbd7c2SNeel Natu * non-root operation causes a VM-exit. NMI blocking is in effect so it is 291462fbd7c2SNeel Natu * sufficient to simply vector to the NMI handler via a software interrupt. 291562fbd7c2SNeel Natu * However, this must be done before maskable interrupts are enabled 291662fbd7c2SNeel Natu * otherwise the "iret" issued by an interrupt handler will incorrectly 291762fbd7c2SNeel Natu * clear NMI blocking. 291862fbd7c2SNeel Natu */ 291962fbd7c2SNeel Natu static __inline void 2920869c8d19SJohn Baldwin vmx_exit_handle_nmi(struct vmx_vcpu *vcpu, struct vm_exit *vmexit) 292162fbd7c2SNeel Natu { 292262fbd7c2SNeel Natu uint32_t intr_info; 292362fbd7c2SNeel Natu 292462fbd7c2SNeel Natu KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 292562fbd7c2SNeel Natu 292662fbd7c2SNeel Natu if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 292762fbd7c2SNeel Natu return; 292862fbd7c2SNeel Natu 292962fbd7c2SNeel Natu intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 293062fbd7c2SNeel Natu KASSERT((intr_info & VMCS_INTR_VALID) != 0, 293162fbd7c2SNeel Natu ("VM exit interruption info invalid: %#x", intr_info)); 293262fbd7c2SNeel Natu 293362fbd7c2SNeel Natu if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 293462fbd7c2SNeel Natu KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 293562fbd7c2SNeel Natu "to NMI has invalid vector: %#x", intr_info)); 293657e0119eSJohn Baldwin VMX_CTR0(vcpu, "Vectoring to NMI handler"); 293762fbd7c2SNeel Natu __asm __volatile("int $2"); 293862fbd7c2SNeel Natu } 293962fbd7c2SNeel Natu } 294062fbd7c2SNeel Natu 294165eefbe4SJohn Baldwin static __inline void 294265eefbe4SJohn Baldwin vmx_dr_enter_guest(struct vmxctx *vmxctx) 294365eefbe4SJohn Baldwin { 294465eefbe4SJohn Baldwin register_t rflags; 294565eefbe4SJohn Baldwin 294665eefbe4SJohn Baldwin /* Save host control debug registers. */ 294765eefbe4SJohn Baldwin vmxctx->host_dr7 = rdr7(); 294865eefbe4SJohn Baldwin vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 294965eefbe4SJohn Baldwin 295065eefbe4SJohn Baldwin /* 295165eefbe4SJohn Baldwin * Disable debugging in DR7 and DEBUGCTL to avoid triggering 295265eefbe4SJohn Baldwin * exceptions in the host based on the guest DRx values. The 295365eefbe4SJohn Baldwin * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 295465eefbe4SJohn Baldwin */ 295565eefbe4SJohn Baldwin load_dr7(0); 295665eefbe4SJohn Baldwin wrmsr(MSR_DEBUGCTLMSR, 0); 295765eefbe4SJohn Baldwin 295865eefbe4SJohn Baldwin /* 295965eefbe4SJohn Baldwin * Disable single stepping the kernel to avoid corrupting the 296065eefbe4SJohn Baldwin * guest DR6. A debugger might still be able to corrupt the 296165eefbe4SJohn Baldwin * guest DR6 by setting a breakpoint after this point and then 296265eefbe4SJohn Baldwin * single stepping. 296365eefbe4SJohn Baldwin */ 296465eefbe4SJohn Baldwin rflags = read_rflags(); 296565eefbe4SJohn Baldwin vmxctx->host_tf = rflags & PSL_T; 296665eefbe4SJohn Baldwin write_rflags(rflags & ~PSL_T); 296765eefbe4SJohn Baldwin 296865eefbe4SJohn Baldwin /* Save host debug registers. */ 296965eefbe4SJohn Baldwin vmxctx->host_dr0 = rdr0(); 297065eefbe4SJohn Baldwin vmxctx->host_dr1 = rdr1(); 297165eefbe4SJohn Baldwin vmxctx->host_dr2 = rdr2(); 297265eefbe4SJohn Baldwin vmxctx->host_dr3 = rdr3(); 297365eefbe4SJohn Baldwin vmxctx->host_dr6 = rdr6(); 297465eefbe4SJohn Baldwin 297565eefbe4SJohn Baldwin /* Restore guest debug registers. */ 297665eefbe4SJohn Baldwin load_dr0(vmxctx->guest_dr0); 297765eefbe4SJohn Baldwin load_dr1(vmxctx->guest_dr1); 297865eefbe4SJohn Baldwin load_dr2(vmxctx->guest_dr2); 297965eefbe4SJohn Baldwin load_dr3(vmxctx->guest_dr3); 298065eefbe4SJohn Baldwin load_dr6(vmxctx->guest_dr6); 298165eefbe4SJohn Baldwin } 298265eefbe4SJohn Baldwin 298365eefbe4SJohn Baldwin static __inline void 298465eefbe4SJohn Baldwin vmx_dr_leave_guest(struct vmxctx *vmxctx) 298565eefbe4SJohn Baldwin { 298665eefbe4SJohn Baldwin 298765eefbe4SJohn Baldwin /* Save guest debug registers. */ 298865eefbe4SJohn Baldwin vmxctx->guest_dr0 = rdr0(); 298965eefbe4SJohn Baldwin vmxctx->guest_dr1 = rdr1(); 299065eefbe4SJohn Baldwin vmxctx->guest_dr2 = rdr2(); 299165eefbe4SJohn Baldwin vmxctx->guest_dr3 = rdr3(); 299265eefbe4SJohn Baldwin vmxctx->guest_dr6 = rdr6(); 299365eefbe4SJohn Baldwin 299465eefbe4SJohn Baldwin /* 299565eefbe4SJohn Baldwin * Restore host debug registers. Restore DR7, DEBUGCTL, and 299665eefbe4SJohn Baldwin * PSL_T last. 299765eefbe4SJohn Baldwin */ 299865eefbe4SJohn Baldwin load_dr0(vmxctx->host_dr0); 299965eefbe4SJohn Baldwin load_dr1(vmxctx->host_dr1); 300065eefbe4SJohn Baldwin load_dr2(vmxctx->host_dr2); 300165eefbe4SJohn Baldwin load_dr3(vmxctx->host_dr3); 300265eefbe4SJohn Baldwin load_dr6(vmxctx->host_dr6); 300365eefbe4SJohn Baldwin wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 300465eefbe4SJohn Baldwin load_dr7(vmxctx->host_dr7); 300565eefbe4SJohn Baldwin write_rflags(read_rflags() | vmxctx->host_tf); 300665eefbe4SJohn Baldwin } 300765eefbe4SJohn Baldwin 30088e2cbc56SMark Johnston static __inline void 30098e2cbc56SMark Johnston vmx_pmap_activate(struct vmx *vmx, pmap_t pmap) 30108e2cbc56SMark Johnston { 30118e2cbc56SMark Johnston long eptgen; 30128e2cbc56SMark Johnston int cpu; 30138e2cbc56SMark Johnston 30148e2cbc56SMark Johnston cpu = curcpu; 30158e2cbc56SMark Johnston 30168e2cbc56SMark Johnston CPU_SET_ATOMIC(cpu, &pmap->pm_active); 30176f5a9606SMark Johnston smr_enter(pmap->pm_eptsmr); 30188e2cbc56SMark Johnston eptgen = atomic_load_long(&pmap->pm_eptgen); 30198e2cbc56SMark Johnston if (eptgen != vmx->eptgen[cpu]) { 30208e2cbc56SMark Johnston vmx->eptgen[cpu] = eptgen; 30218e2cbc56SMark Johnston invept(INVEPT_TYPE_SINGLE_CONTEXT, 30228e2cbc56SMark Johnston (struct invept_desc){ .eptp = vmx->eptp, ._res = 0 }); 30238e2cbc56SMark Johnston } 30248e2cbc56SMark Johnston } 30258e2cbc56SMark Johnston 30268e2cbc56SMark Johnston static __inline void 30278e2cbc56SMark Johnston vmx_pmap_deactivate(struct vmx *vmx, pmap_t pmap) 30288e2cbc56SMark Johnston { 30296f5a9606SMark Johnston smr_exit(pmap->pm_eptsmr); 30308e2cbc56SMark Johnston CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 30318e2cbc56SMark Johnston } 30328e2cbc56SMark Johnston 30330492757cSNeel Natu static int 3034869c8d19SJohn Baldwin vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) 30350492757cSNeel Natu { 303680cb5d84SJohn Baldwin int rc, handled, launched; 3037366f6083SPeter Grehan struct vmx *vmx; 30381aa51504SJohn Baldwin struct vmx_vcpu *vcpu; 3039366f6083SPeter Grehan struct vmxctx *vmxctx; 3040366f6083SPeter Grehan struct vmcs *vmcs; 304198ed632cSNeel Natu struct vm_exit *vmexit; 3042de5ea6b6SNeel Natu struct vlapic *vlapic; 304379c59630SNeel Natu uint32_t exit_reason; 3044b843f9beSJohn Baldwin struct region_descriptor gdtr, idtr; 3045b843f9beSJohn Baldwin uint16_t ldt_sel; 3046366f6083SPeter Grehan 30471aa51504SJohn Baldwin vcpu = vcpui; 3048869c8d19SJohn Baldwin vmx = vcpu->vmx; 30491aa51504SJohn Baldwin vmcs = vcpu->vmcs; 30501aa51504SJohn Baldwin vmxctx = &vcpu->ctx; 3051d3956e46SJohn Baldwin vlapic = vm_lapic(vcpu->vcpu); 305280cb5d84SJohn Baldwin vmexit = vm_exitinfo(vcpu->vcpu); 30530492757cSNeel Natu launched = 0; 305498ed632cSNeel Natu 3055318224bbSNeel Natu KASSERT(vmxctx->pmap == pmap, 3056318224bbSNeel Natu ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 3057318224bbSNeel Natu 305880cb5d84SJohn Baldwin vmx_msr_guest_enter(vcpu); 3059c3498942SNeel Natu 3060366f6083SPeter Grehan VMPTRLD(vmcs); 3061366f6083SPeter Grehan 3062366f6083SPeter Grehan /* 3063366f6083SPeter Grehan * XXX 3064366f6083SPeter Grehan * We do this every time because we may setup the virtual machine 3065366f6083SPeter Grehan * from a different process than the one that actually runs it. 3066366f6083SPeter Grehan * 3067366f6083SPeter Grehan * If the life of a virtual machine was spent entirely in the context 306815add60dSPeter Grehan * of a single process we could do this once in vmx_init(). 3069366f6083SPeter Grehan */ 30703de83862SNeel Natu vmcs_write(VMCS_HOST_CR3, rcr3()); 3071366f6083SPeter Grehan 30722ce12423SNeel Natu vmcs_write(VMCS_GUEST_RIP, rip); 3073953c2c47SNeel Natu vmx_set_pcpu_defaults(vmx, vcpu, pmap); 3074366f6083SPeter Grehan do { 30752ce12423SNeel Natu KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " 30762ce12423SNeel Natu "%#lx/%#lx", __func__, vmcs_guest_rip(), rip)); 307740487465SNeel Natu 30782ce12423SNeel Natu handled = UNHANDLED; 30790492757cSNeel Natu /* 30800492757cSNeel Natu * Interrupts are disabled from this point on until the 30810492757cSNeel Natu * guest starts executing. This is done for the following 30820492757cSNeel Natu * reasons: 30830492757cSNeel Natu * 30840492757cSNeel Natu * If an AST is asserted on this thread after the check below, 30850492757cSNeel Natu * then the IPI_AST notification will not be lost, because it 30860492757cSNeel Natu * will cause a VM exit due to external interrupt as soon as 30870492757cSNeel Natu * the guest state is loaded. 30880492757cSNeel Natu * 30890492757cSNeel Natu * A posted interrupt after 'vmx_inject_interrupts()' will 30900492757cSNeel Natu * not be "lost" because it will be held pending in the host 30910492757cSNeel Natu * APIC because interrupts are disabled. The pending interrupt 30920492757cSNeel Natu * will be recognized as soon as the guest state is loaded. 30930492757cSNeel Natu * 30940492757cSNeel Natu * The same reasoning applies to the IPI generated by 30950492757cSNeel Natu * pmap_invalidate_ept(). 30960492757cSNeel Natu */ 30970492757cSNeel Natu disable_intr(); 309880cb5d84SJohn Baldwin vmx_inject_interrupts(vcpu, vlapic, rip); 3099091d4532SNeel Natu 3100091d4532SNeel Natu /* 3101091d4532SNeel Natu * Check for vcpu suspension after injecting events because 3102091d4532SNeel Natu * vmx_inject_interrupts() can suspend the vcpu due to a 3103091d4532SNeel Natu * triple fault. 3104091d4532SNeel Natu */ 3105248e6799SNeel Natu if (vcpu_suspended(evinfo)) { 31060492757cSNeel Natu enable_intr(); 310780cb5d84SJohn Baldwin vm_exit_suspended(vcpu->vcpu, rip); 31080492757cSNeel Natu break; 31090492757cSNeel Natu } 31100492757cSNeel Natu 3111892feec2SCorvin Köhne if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { 31125b8a8cd1SNeel Natu enable_intr(); 311380cb5d84SJohn Baldwin vm_exit_rendezvous(vcpu->vcpu, rip); 31145b8a8cd1SNeel Natu break; 31155b8a8cd1SNeel Natu } 31165b8a8cd1SNeel Natu 3117248e6799SNeel Natu if (vcpu_reqidle(evinfo)) { 3118248e6799SNeel Natu enable_intr(); 311980cb5d84SJohn Baldwin vm_exit_reqidle(vcpu->vcpu, rip); 3120248e6799SNeel Natu break; 3121248e6799SNeel Natu } 3122248e6799SNeel Natu 312380cb5d84SJohn Baldwin if (vcpu_should_yield(vcpu->vcpu)) { 3124b15a09c0SNeel Natu enable_intr(); 312580cb5d84SJohn Baldwin vm_exit_astpending(vcpu->vcpu, rip); 3126869c8d19SJohn Baldwin vmx_astpending_trace(vcpu, rip); 312740487465SNeel Natu handled = HANDLED; 3128b15a09c0SNeel Natu break; 3129b15a09c0SNeel Natu } 3130b15a09c0SNeel Natu 313180cb5d84SJohn Baldwin if (vcpu_debugged(vcpu->vcpu)) { 3132fc276d92SJohn Baldwin enable_intr(); 313380cb5d84SJohn Baldwin vm_exit_debug(vcpu->vcpu, rip); 3134fc276d92SJohn Baldwin break; 3135fc276d92SJohn Baldwin } 3136fc276d92SJohn Baldwin 3137b843f9beSJohn Baldwin /* 31381bc51badSMichael Reifenberger * If TPR Shadowing is enabled, the TPR Threshold 31391bc51badSMichael Reifenberger * must be updated right before entering the guest. 31401bc51badSMichael Reifenberger */ 31411bc51badSMichael Reifenberger if (tpr_shadowing && !virtual_interrupt_delivery) { 31421aa51504SJohn Baldwin if ((vcpu->cap.proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) { 31431bc51badSMichael Reifenberger vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 31441bc51badSMichael Reifenberger } 31451bc51badSMichael Reifenberger } 31461bc51badSMichael Reifenberger 31471bc51badSMichael Reifenberger /* 3148b843f9beSJohn Baldwin * VM exits restore the base address but not the 3149b843f9beSJohn Baldwin * limits of GDTR and IDTR. The VMCS only stores the 3150b843f9beSJohn Baldwin * base address, so VM exits set the limits to 0xffff. 3151b843f9beSJohn Baldwin * Save and restore the full GDTR and IDTR to restore 3152b843f9beSJohn Baldwin * the limits. 3153b843f9beSJohn Baldwin * 3154b843f9beSJohn Baldwin * The VMCS does not save the LDTR at all, and VM 3155b843f9beSJohn Baldwin * exits clear LDTR as if a NULL selector were loaded. 3156b843f9beSJohn Baldwin * The userspace hypervisor probably doesn't use a 3157b843f9beSJohn Baldwin * LDT, but save and restore it to be safe. 3158b843f9beSJohn Baldwin */ 3159b843f9beSJohn Baldwin sgdt(&gdtr); 3160b843f9beSJohn Baldwin sidt(&idtr); 3161b843f9beSJohn Baldwin ldt_sel = sldt(); 3162b843f9beSJohn Baldwin 3163f5f5f1e7SPeter Grehan /* 3164f5f5f1e7SPeter Grehan * The TSC_AUX MSR must be saved/restored while interrupts 3165f5f5f1e7SPeter Grehan * are disabled so that it is not possible for the guest 3166f5f5f1e7SPeter Grehan * TSC_AUX MSR value to be overwritten by the resume 3167f5f5f1e7SPeter Grehan * portion of the IPI_SUSPEND codepath. This is why the 3168f5f5f1e7SPeter Grehan * transition of this MSR is handled separately from those 3169f5f5f1e7SPeter Grehan * handled by vmx_msr_guest_{enter,exit}(), which are ok to 3170f5f5f1e7SPeter Grehan * be transitioned with preemption disabled but interrupts 3171f5f5f1e7SPeter Grehan * enabled. 3172f5f5f1e7SPeter Grehan * 3173f5f5f1e7SPeter Grehan * These vmx_msr_guest_{enter,exit}_tsc_aux() calls can be 3174f5f5f1e7SPeter Grehan * anywhere in this loop so long as they happen with 3175f5f5f1e7SPeter Grehan * interrupts disabled. This location is chosen for 3176f5f5f1e7SPeter Grehan * simplicity. 3177f5f5f1e7SPeter Grehan */ 3178f5f5f1e7SPeter Grehan vmx_msr_guest_enter_tsc_aux(vmx, vcpu); 3179f5f5f1e7SPeter Grehan 318065eefbe4SJohn Baldwin vmx_dr_enter_guest(vmxctx); 318179c59630SNeel Natu 31828e2cbc56SMark Johnston /* 31838e2cbc56SMark Johnston * Mark the EPT as active on this host CPU and invalidate 31848e2cbc56SMark Johnston * EPTP-tagged TLB entries if required. 31858e2cbc56SMark Johnston */ 31868e2cbc56SMark Johnston vmx_pmap_activate(vmx, pmap); 31878e2cbc56SMark Johnston 3188869c8d19SJohn Baldwin vmx_run_trace(vcpu); 31898e2cbc56SMark Johnston rc = vmx_enter_guest(vmxctx, vmx, launched); 31908e2cbc56SMark Johnston 31918e2cbc56SMark Johnston vmx_pmap_deactivate(vmx, pmap); 31928e2cbc56SMark Johnston vmx_dr_leave_guest(vmxctx); 3193f5f5f1e7SPeter Grehan vmx_msr_guest_exit_tsc_aux(vmx, vcpu); 3194f5f5f1e7SPeter Grehan 3195b843f9beSJohn Baldwin bare_lgdt(&gdtr); 3196b843f9beSJohn Baldwin lidt(&idtr); 3197b843f9beSJohn Baldwin lldt(ldt_sel); 3198b843f9beSJohn Baldwin 319979c59630SNeel Natu /* Collect some information for VM exit processing */ 320079c59630SNeel Natu vmexit->rip = rip = vmcs_guest_rip(); 320179c59630SNeel Natu vmexit->inst_length = vmexit_instruction_length(); 320279c59630SNeel Natu vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 320379c59630SNeel Natu vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 320479c59630SNeel Natu 32052ce12423SNeel Natu /* Update 'nextrip' */ 32061aa51504SJohn Baldwin vcpu->state.nextrip = rip; 32072ce12423SNeel Natu 32080492757cSNeel Natu if (rc == VMX_GUEST_VMEXIT) { 3209869c8d19SJohn Baldwin vmx_exit_handle_nmi(vcpu, vmexit); 321062fbd7c2SNeel Natu enable_intr(); 32110492757cSNeel Natu handled = vmx_exit_process(vmx, vcpu, vmexit); 32120492757cSNeel Natu } else { 321362fbd7c2SNeel Natu enable_intr(); 321440487465SNeel Natu vmx_exit_inst_error(vmxctx, rc, vmexit); 3215eeefa4e4SNeel Natu } 321662fbd7c2SNeel Natu launched = 1; 3217869c8d19SJohn Baldwin vmx_exit_trace(vcpu, rip, exit_reason, handled); 32182ce12423SNeel Natu rip = vmexit->rip; 3219eeefa4e4SNeel Natu } while (handled); 3220366f6083SPeter Grehan 3221366f6083SPeter Grehan /* 3222366f6083SPeter Grehan * If a VM exit has been handled then the exitcode must be BOGUS 3223366f6083SPeter Grehan * If a VM exit is not handled then the exitcode must not be BOGUS 3224366f6083SPeter Grehan */ 3225366f6083SPeter Grehan if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 3226366f6083SPeter Grehan (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 3227366f6083SPeter Grehan panic("Mismatch between handled (%d) and exitcode (%d)", 3228366f6083SPeter Grehan handled, vmexit->exitcode); 3229366f6083SPeter Grehan } 3230366f6083SPeter Grehan 323157e0119eSJohn Baldwin VMX_CTR1(vcpu, "returning from vmx_run: exitcode %d", 32320492757cSNeel Natu vmexit->exitcode); 3233366f6083SPeter Grehan 3234366f6083SPeter Grehan VMCLEAR(vmcs); 323580cb5d84SJohn Baldwin vmx_msr_guest_exit(vcpu); 3236c3498942SNeel Natu 3237366f6083SPeter Grehan return (0); 3238366f6083SPeter Grehan } 3239366f6083SPeter Grehan 3240366f6083SPeter Grehan static void 3241869c8d19SJohn Baldwin vmx_vcpu_cleanup(void *vcpui) 3242366f6083SPeter Grehan { 32431aa51504SJohn Baldwin struct vmx_vcpu *vcpu = vcpui; 3244366f6083SPeter Grehan 32450f00260cSJohn Baldwin vpid_free(vcpu->state.vpid); 32460f00260cSJohn Baldwin free(vcpu->pir_desc, M_VMX); 32470f00260cSJohn Baldwin free(vcpu->apic_page, M_VMX); 32480f00260cSJohn Baldwin free(vcpu->vmcs, M_VMX); 32491aa51504SJohn Baldwin free(vcpu, M_VMX); 32500f00260cSJohn Baldwin } 325145e51299SNeel Natu 32521aa51504SJohn Baldwin static void 3253869c8d19SJohn Baldwin vmx_cleanup(void *vmi) 32541aa51504SJohn Baldwin { 3255869c8d19SJohn Baldwin struct vmx *vmx = vmi; 32561aa51504SJohn Baldwin 32571aa51504SJohn Baldwin if (virtual_interrupt_delivery) 32581aa51504SJohn Baldwin vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 32591aa51504SJohn Baldwin 32600f00260cSJohn Baldwin free(vmx->msr_bitmap, M_VMX); 3261366f6083SPeter Grehan free(vmx, M_VMX); 3262366f6083SPeter Grehan 3263366f6083SPeter Grehan return; 3264366f6083SPeter Grehan } 3265366f6083SPeter Grehan 3266366f6083SPeter Grehan static register_t * 3267366f6083SPeter Grehan vmxctx_regptr(struct vmxctx *vmxctx, int reg) 3268366f6083SPeter Grehan { 3269366f6083SPeter Grehan 3270366f6083SPeter Grehan switch (reg) { 3271366f6083SPeter Grehan case VM_REG_GUEST_RAX: 3272366f6083SPeter Grehan return (&vmxctx->guest_rax); 3273366f6083SPeter Grehan case VM_REG_GUEST_RBX: 3274366f6083SPeter Grehan return (&vmxctx->guest_rbx); 3275366f6083SPeter Grehan case VM_REG_GUEST_RCX: 3276366f6083SPeter Grehan return (&vmxctx->guest_rcx); 3277366f6083SPeter Grehan case VM_REG_GUEST_RDX: 3278366f6083SPeter Grehan return (&vmxctx->guest_rdx); 3279366f6083SPeter Grehan case VM_REG_GUEST_RSI: 3280366f6083SPeter Grehan return (&vmxctx->guest_rsi); 3281366f6083SPeter Grehan case VM_REG_GUEST_RDI: 3282366f6083SPeter Grehan return (&vmxctx->guest_rdi); 3283366f6083SPeter Grehan case VM_REG_GUEST_RBP: 3284366f6083SPeter Grehan return (&vmxctx->guest_rbp); 3285366f6083SPeter Grehan case VM_REG_GUEST_R8: 3286366f6083SPeter Grehan return (&vmxctx->guest_r8); 3287366f6083SPeter Grehan case VM_REG_GUEST_R9: 3288366f6083SPeter Grehan return (&vmxctx->guest_r9); 3289366f6083SPeter Grehan case VM_REG_GUEST_R10: 3290366f6083SPeter Grehan return (&vmxctx->guest_r10); 3291366f6083SPeter Grehan case VM_REG_GUEST_R11: 3292366f6083SPeter Grehan return (&vmxctx->guest_r11); 3293366f6083SPeter Grehan case VM_REG_GUEST_R12: 3294366f6083SPeter Grehan return (&vmxctx->guest_r12); 3295366f6083SPeter Grehan case VM_REG_GUEST_R13: 3296366f6083SPeter Grehan return (&vmxctx->guest_r13); 3297366f6083SPeter Grehan case VM_REG_GUEST_R14: 3298366f6083SPeter Grehan return (&vmxctx->guest_r14); 3299366f6083SPeter Grehan case VM_REG_GUEST_R15: 3300366f6083SPeter Grehan return (&vmxctx->guest_r15); 330137a723a5SNeel Natu case VM_REG_GUEST_CR2: 330237a723a5SNeel Natu return (&vmxctx->guest_cr2); 330365eefbe4SJohn Baldwin case VM_REG_GUEST_DR0: 330465eefbe4SJohn Baldwin return (&vmxctx->guest_dr0); 330565eefbe4SJohn Baldwin case VM_REG_GUEST_DR1: 330665eefbe4SJohn Baldwin return (&vmxctx->guest_dr1); 330765eefbe4SJohn Baldwin case VM_REG_GUEST_DR2: 330865eefbe4SJohn Baldwin return (&vmxctx->guest_dr2); 330965eefbe4SJohn Baldwin case VM_REG_GUEST_DR3: 331065eefbe4SJohn Baldwin return (&vmxctx->guest_dr3); 331165eefbe4SJohn Baldwin case VM_REG_GUEST_DR6: 331265eefbe4SJohn Baldwin return (&vmxctx->guest_dr6); 3313366f6083SPeter Grehan default: 3314366f6083SPeter Grehan break; 3315366f6083SPeter Grehan } 3316366f6083SPeter Grehan return (NULL); 3317366f6083SPeter Grehan } 3318366f6083SPeter Grehan 3319366f6083SPeter Grehan static int 3320366f6083SPeter Grehan vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 3321366f6083SPeter Grehan { 3322366f6083SPeter Grehan register_t *regp; 3323366f6083SPeter Grehan 3324366f6083SPeter Grehan if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 3325366f6083SPeter Grehan *retval = *regp; 3326366f6083SPeter Grehan return (0); 3327366f6083SPeter Grehan } else 3328366f6083SPeter Grehan return (EINVAL); 3329366f6083SPeter Grehan } 3330366f6083SPeter Grehan 3331366f6083SPeter Grehan static int 3332366f6083SPeter Grehan vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 3333366f6083SPeter Grehan { 3334366f6083SPeter Grehan register_t *regp; 3335366f6083SPeter Grehan 3336366f6083SPeter Grehan if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 3337366f6083SPeter Grehan *regp = val; 3338366f6083SPeter Grehan return (0); 3339366f6083SPeter Grehan } else 3340366f6083SPeter Grehan return (EINVAL); 3341366f6083SPeter Grehan } 3342366f6083SPeter Grehan 3343366f6083SPeter Grehan static int 33441aa51504SJohn Baldwin vmx_get_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t *retval) 3345d1819632SNeel Natu { 3346d1819632SNeel Natu uint64_t gi; 3347d1819632SNeel Natu int error; 3348d1819632SNeel Natu 33491aa51504SJohn Baldwin error = vmcs_getreg(vcpu->vmcs, running, 3350d1819632SNeel Natu VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi); 3351d1819632SNeel Natu *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3352d1819632SNeel Natu return (error); 3353d1819632SNeel Natu } 3354d1819632SNeel Natu 3355d1819632SNeel Natu static int 3356869c8d19SJohn Baldwin vmx_modify_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t val) 3357d1819632SNeel Natu { 3358d1819632SNeel Natu struct vmcs *vmcs; 3359d1819632SNeel Natu uint64_t gi; 3360d1819632SNeel Natu int error, ident; 3361d1819632SNeel Natu 3362d1819632SNeel Natu /* 3363d1819632SNeel Natu * Forcing the vcpu into an interrupt shadow is not supported. 3364d1819632SNeel Natu */ 3365d1819632SNeel Natu if (val) { 3366d1819632SNeel Natu error = EINVAL; 3367d1819632SNeel Natu goto done; 3368d1819632SNeel Natu } 3369d1819632SNeel Natu 33701aa51504SJohn Baldwin vmcs = vcpu->vmcs; 3371d1819632SNeel Natu ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY); 3372d1819632SNeel Natu error = vmcs_getreg(vmcs, running, ident, &gi); 3373d1819632SNeel Natu if (error == 0) { 3374d1819632SNeel Natu gi &= ~HWINTR_BLOCKING; 3375d1819632SNeel Natu error = vmcs_setreg(vmcs, running, ident, gi); 3376d1819632SNeel Natu } 3377d1819632SNeel Natu done: 337857e0119eSJohn Baldwin VMX_CTR2(vcpu, "Setting intr_shadow to %#lx %s", val, 337957e0119eSJohn Baldwin error ? "failed" : "succeeded"); 3380d1819632SNeel Natu return (error); 3381d1819632SNeel Natu } 3382d1819632SNeel Natu 3383d1819632SNeel Natu static int 3384aaaa0656SPeter Grehan vmx_shadow_reg(int reg) 3385aaaa0656SPeter Grehan { 3386aaaa0656SPeter Grehan int shreg; 3387aaaa0656SPeter Grehan 3388aaaa0656SPeter Grehan shreg = -1; 3389aaaa0656SPeter Grehan 3390aaaa0656SPeter Grehan switch (reg) { 3391aaaa0656SPeter Grehan case VM_REG_GUEST_CR0: 3392aaaa0656SPeter Grehan shreg = VMCS_CR0_SHADOW; 3393aaaa0656SPeter Grehan break; 3394aaaa0656SPeter Grehan case VM_REG_GUEST_CR4: 3395aaaa0656SPeter Grehan shreg = VMCS_CR4_SHADOW; 3396aaaa0656SPeter Grehan break; 3397aaaa0656SPeter Grehan default: 3398aaaa0656SPeter Grehan break; 3399aaaa0656SPeter Grehan } 3400aaaa0656SPeter Grehan 3401aaaa0656SPeter Grehan return (shreg); 3402aaaa0656SPeter Grehan } 3403aaaa0656SPeter Grehan 3404aaaa0656SPeter Grehan static int 3405869c8d19SJohn Baldwin vmx_getreg(void *vcpui, int reg, uint64_t *retval) 3406366f6083SPeter Grehan { 3407d3c11f40SPeter Grehan int running, hostcpu; 34081aa51504SJohn Baldwin struct vmx_vcpu *vcpu = vcpui; 3409869c8d19SJohn Baldwin struct vmx *vmx = vcpu->vmx; 3410366f6083SPeter Grehan 341180cb5d84SJohn Baldwin running = vcpu_is_running(vcpu->vcpu, &hostcpu); 3412d3c11f40SPeter Grehan if (running && hostcpu != curcpu) 34131aa51504SJohn Baldwin panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), 34141aa51504SJohn Baldwin vcpu->vcpuid); 3415d3c11f40SPeter Grehan 3416f493ea65SMark Johnston switch (reg) { 3417f493ea65SMark Johnston case VM_REG_GUEST_INTR_SHADOW: 34181aa51504SJohn Baldwin return (vmx_get_intr_shadow(vcpu, running, retval)); 3419f493ea65SMark Johnston case VM_REG_GUEST_KGS_BASE: 3420f493ea65SMark Johnston *retval = vcpu->guest_msrs[IDX_MSR_KGSBASE]; 3421f493ea65SMark Johnston return (0); 3422f493ea65SMark Johnston case VM_REG_GUEST_TPR: 3423f493ea65SMark Johnston *retval = vlapic_get_cr8(vm_lapic(vcpu->vcpu)); 3424f493ea65SMark Johnston return (0); 3425f493ea65SMark Johnston } 3426d1819632SNeel Natu 34271aa51504SJohn Baldwin if (vmxctx_getreg(&vcpu->ctx, reg, retval) == 0) 3428366f6083SPeter Grehan return (0); 3429366f6083SPeter Grehan 34301aa51504SJohn Baldwin return (vmcs_getreg(vcpu->vmcs, running, reg, retval)); 3431366f6083SPeter Grehan } 3432366f6083SPeter Grehan 3433366f6083SPeter Grehan static int 3434869c8d19SJohn Baldwin vmx_setreg(void *vcpui, int reg, uint64_t val) 3435366f6083SPeter Grehan { 3436aaaa0656SPeter Grehan int error, hostcpu, running, shadow; 3437366f6083SPeter Grehan uint64_t ctls; 34383527963bSNeel Natu pmap_t pmap; 34391aa51504SJohn Baldwin struct vmx_vcpu *vcpu = vcpui; 3440869c8d19SJohn Baldwin struct vmx *vmx = vcpu->vmx; 3441366f6083SPeter Grehan 344280cb5d84SJohn Baldwin running = vcpu_is_running(vcpu->vcpu, &hostcpu); 3443d3c11f40SPeter Grehan if (running && hostcpu != curcpu) 34441aa51504SJohn Baldwin panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), 34451aa51504SJohn Baldwin vcpu->vcpuid); 3446d3c11f40SPeter Grehan 3447d1819632SNeel Natu if (reg == VM_REG_GUEST_INTR_SHADOW) 3448869c8d19SJohn Baldwin return (vmx_modify_intr_shadow(vcpu, running, val)); 3449d1819632SNeel Natu 34501aa51504SJohn Baldwin if (vmxctx_setreg(&vcpu->ctx, reg, val) == 0) 3451366f6083SPeter Grehan return (0); 3452366f6083SPeter Grehan 345309860d44SEd Maste /* Do not permit user write access to VMCS fields by offset. */ 345409860d44SEd Maste if (reg < 0) 345509860d44SEd Maste return (EINVAL); 345609860d44SEd Maste 34571aa51504SJohn Baldwin error = vmcs_setreg(vcpu->vmcs, running, reg, val); 3458366f6083SPeter Grehan 3459366f6083SPeter Grehan if (error == 0) { 3460366f6083SPeter Grehan /* 3461366f6083SPeter Grehan * If the "load EFER" VM-entry control is 1 then the 3462366f6083SPeter Grehan * value of EFER.LMA must be identical to "IA-32e mode guest" 3463366f6083SPeter Grehan * bit in the VM-entry control. 3464366f6083SPeter Grehan */ 3465366f6083SPeter Grehan if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 3466366f6083SPeter Grehan (reg == VM_REG_GUEST_EFER)) { 34671aa51504SJohn Baldwin vmcs_getreg(vcpu->vmcs, running, 3468366f6083SPeter Grehan VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 3469366f6083SPeter Grehan if (val & EFER_LMA) 3470366f6083SPeter Grehan ctls |= VM_ENTRY_GUEST_LMA; 3471366f6083SPeter Grehan else 3472366f6083SPeter Grehan ctls &= ~VM_ENTRY_GUEST_LMA; 34731aa51504SJohn Baldwin vmcs_setreg(vcpu->vmcs, running, 3474366f6083SPeter Grehan VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 3475366f6083SPeter Grehan } 3476aaaa0656SPeter Grehan 3477aaaa0656SPeter Grehan shadow = vmx_shadow_reg(reg); 3478aaaa0656SPeter Grehan if (shadow > 0) { 3479aaaa0656SPeter Grehan /* 3480aaaa0656SPeter Grehan * Store the unmodified value in the shadow 3481aaaa0656SPeter Grehan */ 34821aa51504SJohn Baldwin error = vmcs_setreg(vcpu->vmcs, running, 3483aaaa0656SPeter Grehan VMCS_IDENT(shadow), val); 3484aaaa0656SPeter Grehan } 34853527963bSNeel Natu 34863527963bSNeel Natu if (reg == VM_REG_GUEST_CR3) { 34873527963bSNeel Natu /* 34883527963bSNeel Natu * Invalidate the guest vcpu's TLB mappings to emulate 34893527963bSNeel Natu * the behavior of updating %cr3. 34903527963bSNeel Natu * 34913527963bSNeel Natu * XXX the processor retains global mappings when %cr3 34923527963bSNeel Natu * is updated but vmx_invvpid() does not. 34933527963bSNeel Natu */ 34941aa51504SJohn Baldwin pmap = vcpu->ctx.pmap; 34953527963bSNeel Natu vmx_invvpid(vmx, vcpu, pmap, running); 34963527963bSNeel Natu } 3497366f6083SPeter Grehan } 3498366f6083SPeter Grehan 3499366f6083SPeter Grehan return (error); 3500366f6083SPeter Grehan } 3501366f6083SPeter Grehan 3502366f6083SPeter Grehan static int 3503869c8d19SJohn Baldwin vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc) 3504366f6083SPeter Grehan { 3505ba6f5e23SNeel Natu int hostcpu, running; 35061aa51504SJohn Baldwin struct vmx_vcpu *vcpu = vcpui; 3507869c8d19SJohn Baldwin struct vmx *vmx = vcpu->vmx; 3508366f6083SPeter Grehan 350980cb5d84SJohn Baldwin running = vcpu_is_running(vcpu->vcpu, &hostcpu); 3510ba6f5e23SNeel Natu if (running && hostcpu != curcpu) 35111aa51504SJohn Baldwin panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), 35121aa51504SJohn Baldwin vcpu->vcpuid); 3513ba6f5e23SNeel Natu 35141aa51504SJohn Baldwin return (vmcs_getdesc(vcpu->vmcs, running, reg, desc)); 3515366f6083SPeter Grehan } 3516366f6083SPeter Grehan 3517366f6083SPeter Grehan static int 3518869c8d19SJohn Baldwin vmx_setdesc(void *vcpui, int reg, struct seg_desc *desc) 3519366f6083SPeter Grehan { 3520ba6f5e23SNeel Natu int hostcpu, running; 35211aa51504SJohn Baldwin struct vmx_vcpu *vcpu = vcpui; 3522869c8d19SJohn Baldwin struct vmx *vmx = vcpu->vmx; 3523366f6083SPeter Grehan 352480cb5d84SJohn Baldwin running = vcpu_is_running(vcpu->vcpu, &hostcpu); 3525ba6f5e23SNeel Natu if (running && hostcpu != curcpu) 35261aa51504SJohn Baldwin panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), 35271aa51504SJohn Baldwin vcpu->vcpuid); 3528ba6f5e23SNeel Natu 35291aa51504SJohn Baldwin return (vmcs_setdesc(vcpu->vmcs, running, reg, desc)); 3530366f6083SPeter Grehan } 3531366f6083SPeter Grehan 3532366f6083SPeter Grehan static int 3533869c8d19SJohn Baldwin vmx_getcap(void *vcpui, int type, int *retval) 3534366f6083SPeter Grehan { 35351aa51504SJohn Baldwin struct vmx_vcpu *vcpu = vcpui; 3536366f6083SPeter Grehan int vcap; 3537366f6083SPeter Grehan int ret; 3538366f6083SPeter Grehan 3539366f6083SPeter Grehan ret = ENOENT; 3540366f6083SPeter Grehan 35411aa51504SJohn Baldwin vcap = vcpu->cap.set; 3542366f6083SPeter Grehan 3543366f6083SPeter Grehan switch (type) { 3544366f6083SPeter Grehan case VM_CAP_HALT_EXIT: 3545366f6083SPeter Grehan if (cap_halt_exit) 3546366f6083SPeter Grehan ret = 0; 3547366f6083SPeter Grehan break; 3548366f6083SPeter Grehan case VM_CAP_PAUSE_EXIT: 3549366f6083SPeter Grehan if (cap_pause_exit) 3550366f6083SPeter Grehan ret = 0; 3551366f6083SPeter Grehan break; 3552366f6083SPeter Grehan case VM_CAP_MTRAP_EXIT: 3553366f6083SPeter Grehan if (cap_monitor_trap) 3554366f6083SPeter Grehan ret = 0; 3555366f6083SPeter Grehan break; 3556f5f5f1e7SPeter Grehan case VM_CAP_RDPID: 3557f5f5f1e7SPeter Grehan if (cap_rdpid) 3558f5f5f1e7SPeter Grehan ret = 0; 3559f5f5f1e7SPeter Grehan break; 3560f5f5f1e7SPeter Grehan case VM_CAP_RDTSCP: 3561f5f5f1e7SPeter Grehan if (cap_rdtscp) 3562f5f5f1e7SPeter Grehan ret = 0; 3563f5f5f1e7SPeter Grehan break; 3564366f6083SPeter Grehan case VM_CAP_UNRESTRICTED_GUEST: 3565366f6083SPeter Grehan if (cap_unrestricted_guest) 3566366f6083SPeter Grehan ret = 0; 3567366f6083SPeter Grehan break; 356849cc03daSNeel Natu case VM_CAP_ENABLE_INVPCID: 356949cc03daSNeel Natu if (cap_invpcid) 357049cc03daSNeel Natu ret = 0; 357149cc03daSNeel Natu break; 3572cbd03a9dSJohn Baldwin case VM_CAP_BPT_EXIT: 35730bda8d3eSCorvin Köhne case VM_CAP_IPI_EXIT: 3574cbd03a9dSJohn Baldwin ret = 0; 3575cbd03a9dSJohn Baldwin break; 3576366f6083SPeter Grehan default: 3577366f6083SPeter Grehan break; 3578366f6083SPeter Grehan } 3579366f6083SPeter Grehan 3580366f6083SPeter Grehan if (ret == 0) 3581366f6083SPeter Grehan *retval = (vcap & (1 << type)) ? 1 : 0; 3582366f6083SPeter Grehan 3583366f6083SPeter Grehan return (ret); 3584366f6083SPeter Grehan } 3585366f6083SPeter Grehan 3586366f6083SPeter Grehan static int 3587869c8d19SJohn Baldwin vmx_setcap(void *vcpui, int type, int val) 3588366f6083SPeter Grehan { 35891aa51504SJohn Baldwin struct vmx_vcpu *vcpu = vcpui; 35901aa51504SJohn Baldwin struct vmcs *vmcs = vcpu->vmcs; 35910bda8d3eSCorvin Köhne struct vlapic *vlapic; 3592366f6083SPeter Grehan uint32_t baseval; 3593366f6083SPeter Grehan uint32_t *pptr; 3594366f6083SPeter Grehan int error; 3595366f6083SPeter Grehan int flag; 3596366f6083SPeter Grehan int reg; 3597366f6083SPeter Grehan int retval; 3598366f6083SPeter Grehan 3599366f6083SPeter Grehan retval = ENOENT; 3600366f6083SPeter Grehan pptr = NULL; 3601366f6083SPeter Grehan 3602366f6083SPeter Grehan switch (type) { 3603366f6083SPeter Grehan case VM_CAP_HALT_EXIT: 3604366f6083SPeter Grehan if (cap_halt_exit) { 3605366f6083SPeter Grehan retval = 0; 36061aa51504SJohn Baldwin pptr = &vcpu->cap.proc_ctls; 3607366f6083SPeter Grehan baseval = *pptr; 3608366f6083SPeter Grehan flag = PROCBASED_HLT_EXITING; 3609366f6083SPeter Grehan reg = VMCS_PRI_PROC_BASED_CTLS; 3610366f6083SPeter Grehan } 3611366f6083SPeter Grehan break; 3612366f6083SPeter Grehan case VM_CAP_MTRAP_EXIT: 3613366f6083SPeter Grehan if (cap_monitor_trap) { 3614366f6083SPeter Grehan retval = 0; 36151aa51504SJohn Baldwin pptr = &vcpu->cap.proc_ctls; 3616366f6083SPeter Grehan baseval = *pptr; 3617366f6083SPeter Grehan flag = PROCBASED_MTF; 3618366f6083SPeter Grehan reg = VMCS_PRI_PROC_BASED_CTLS; 3619366f6083SPeter Grehan } 3620366f6083SPeter Grehan break; 3621366f6083SPeter Grehan case VM_CAP_PAUSE_EXIT: 3622366f6083SPeter Grehan if (cap_pause_exit) { 3623366f6083SPeter Grehan retval = 0; 36241aa51504SJohn Baldwin pptr = &vcpu->cap.proc_ctls; 3625366f6083SPeter Grehan baseval = *pptr; 3626366f6083SPeter Grehan flag = PROCBASED_PAUSE_EXITING; 3627366f6083SPeter Grehan reg = VMCS_PRI_PROC_BASED_CTLS; 3628366f6083SPeter Grehan } 3629366f6083SPeter Grehan break; 3630f5f5f1e7SPeter Grehan case VM_CAP_RDPID: 3631f5f5f1e7SPeter Grehan case VM_CAP_RDTSCP: 3632f5f5f1e7SPeter Grehan if (cap_rdpid || cap_rdtscp) 3633f5f5f1e7SPeter Grehan /* 3634f5f5f1e7SPeter Grehan * Choose not to support enabling/disabling 3635f5f5f1e7SPeter Grehan * RDPID/RDTSCP via libvmmapi since, as per the 363615add60dSPeter Grehan * discussion in vmx_modinit(), RDPID/RDTSCP are 3637f5f5f1e7SPeter Grehan * either always enabled or always disabled. 3638f5f5f1e7SPeter Grehan */ 3639f5f5f1e7SPeter Grehan error = EOPNOTSUPP; 3640f5f5f1e7SPeter Grehan break; 3641366f6083SPeter Grehan case VM_CAP_UNRESTRICTED_GUEST: 3642366f6083SPeter Grehan if (cap_unrestricted_guest) { 3643366f6083SPeter Grehan retval = 0; 36441aa51504SJohn Baldwin pptr = &vcpu->cap.proc_ctls2; 364549cc03daSNeel Natu baseval = *pptr; 3646366f6083SPeter Grehan flag = PROCBASED2_UNRESTRICTED_GUEST; 3647366f6083SPeter Grehan reg = VMCS_SEC_PROC_BASED_CTLS; 3648366f6083SPeter Grehan } 3649366f6083SPeter Grehan break; 365049cc03daSNeel Natu case VM_CAP_ENABLE_INVPCID: 365149cc03daSNeel Natu if (cap_invpcid) { 365249cc03daSNeel Natu retval = 0; 36531aa51504SJohn Baldwin pptr = &vcpu->cap.proc_ctls2; 365449cc03daSNeel Natu baseval = *pptr; 365549cc03daSNeel Natu flag = PROCBASED2_ENABLE_INVPCID; 365649cc03daSNeel Natu reg = VMCS_SEC_PROC_BASED_CTLS; 365749cc03daSNeel Natu } 365849cc03daSNeel Natu break; 3659cbd03a9dSJohn Baldwin case VM_CAP_BPT_EXIT: 3660cbd03a9dSJohn Baldwin retval = 0; 3661cbd03a9dSJohn Baldwin 3662cbd03a9dSJohn Baldwin /* Don't change the bitmap if we are tracing all exceptions. */ 36631aa51504SJohn Baldwin if (vcpu->cap.exc_bitmap != 0xffffffff) { 36641aa51504SJohn Baldwin pptr = &vcpu->cap.exc_bitmap; 3665cbd03a9dSJohn Baldwin baseval = *pptr; 3666cbd03a9dSJohn Baldwin flag = (1 << IDT_BP); 3667cbd03a9dSJohn Baldwin reg = VMCS_EXCEPTION_BITMAP; 3668cbd03a9dSJohn Baldwin } 3669cbd03a9dSJohn Baldwin break; 36700bda8d3eSCorvin Köhne case VM_CAP_IPI_EXIT: 36710bda8d3eSCorvin Köhne retval = 0; 36720bda8d3eSCorvin Köhne 3673d3956e46SJohn Baldwin vlapic = vm_lapic(vcpu->vcpu); 36740bda8d3eSCorvin Köhne vlapic->ipi_exit = val; 36750bda8d3eSCorvin Köhne break; 3676fefac543SBojan Novković case VM_CAP_MASK_HWINTR: 3677fefac543SBojan Novković retval = 0; 3678fefac543SBojan Novković break; 3679366f6083SPeter Grehan default: 3680366f6083SPeter Grehan break; 3681366f6083SPeter Grehan } 3682366f6083SPeter Grehan 3683cbd03a9dSJohn Baldwin if (retval) 3684cbd03a9dSJohn Baldwin return (retval); 3685cbd03a9dSJohn Baldwin 3686cbd03a9dSJohn Baldwin if (pptr != NULL) { 3687366f6083SPeter Grehan if (val) { 3688366f6083SPeter Grehan baseval |= flag; 3689366f6083SPeter Grehan } else { 3690366f6083SPeter Grehan baseval &= ~flag; 3691366f6083SPeter Grehan } 3692366f6083SPeter Grehan VMPTRLD(vmcs); 3693366f6083SPeter Grehan error = vmwrite(reg, baseval); 3694366f6083SPeter Grehan VMCLEAR(vmcs); 3695366f6083SPeter Grehan 3696cbd03a9dSJohn Baldwin if (error) 3697cbd03a9dSJohn Baldwin return (error); 3698cbd03a9dSJohn Baldwin 3699366f6083SPeter Grehan /* 3700366f6083SPeter Grehan * Update optional stored flags, and record 3701366f6083SPeter Grehan * setting 3702366f6083SPeter Grehan */ 3703366f6083SPeter Grehan *pptr = baseval; 3704366f6083SPeter Grehan } 3705366f6083SPeter Grehan 3706366f6083SPeter Grehan if (val) { 37071aa51504SJohn Baldwin vcpu->cap.set |= (1 << type); 3708366f6083SPeter Grehan } else { 37091aa51504SJohn Baldwin vcpu->cap.set &= ~(1 << type); 3710366f6083SPeter Grehan } 3711366f6083SPeter Grehan 3712cbd03a9dSJohn Baldwin return (0); 3713366f6083SPeter Grehan } 3714366f6083SPeter Grehan 371515add60dSPeter Grehan static struct vmspace * 371615add60dSPeter Grehan vmx_vmspace_alloc(vm_offset_t min, vm_offset_t max) 371715add60dSPeter Grehan { 371815add60dSPeter Grehan return (ept_vmspace_alloc(min, max)); 371915add60dSPeter Grehan } 372015add60dSPeter Grehan 372115add60dSPeter Grehan static void 372215add60dSPeter Grehan vmx_vmspace_free(struct vmspace *vmspace) 372315add60dSPeter Grehan { 372415add60dSPeter Grehan ept_vmspace_free(vmspace); 372515add60dSPeter Grehan } 372615add60dSPeter Grehan 372788c4b8d1SNeel Natu struct vlapic_vtx { 372888c4b8d1SNeel Natu struct vlapic vlapic; 3729176666c2SNeel Natu struct pir_desc *pir_desc; 37301aa51504SJohn Baldwin struct vmx_vcpu *vcpu; 37312c352febSJohn Baldwin u_int pending_prio; 373288c4b8d1SNeel Natu }; 373388c4b8d1SNeel Natu 37342c352febSJohn Baldwin #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 37352c352febSJohn Baldwin 3736d030f941SJohn Baldwin #define VMX_CTR_PIR(vlapic, pir_desc, notify, vector, level, msg) \ 373788c4b8d1SNeel Natu do { \ 3738d030f941SJohn Baldwin VLAPIC_CTR2(vlapic, msg " assert %s-triggered vector %d", \ 373988c4b8d1SNeel Natu level ? "level" : "edge", vector); \ 3740d030f941SJohn Baldwin VLAPIC_CTR1(vlapic, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 3741d030f941SJohn Baldwin VLAPIC_CTR1(vlapic, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 3742d030f941SJohn Baldwin VLAPIC_CTR1(vlapic, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 3743d030f941SJohn Baldwin VLAPIC_CTR1(vlapic, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 3744d030f941SJohn Baldwin VLAPIC_CTR1(vlapic, msg " notify: %s", notify ? "yes" : "no"); \ 374588c4b8d1SNeel Natu } while (0) 374688c4b8d1SNeel Natu 374788c4b8d1SNeel Natu /* 374888c4b8d1SNeel Natu * vlapic->ops handlers that utilize the APICv hardware assist described in 374988c4b8d1SNeel Natu * Chapter 29 of the Intel SDM. 375088c4b8d1SNeel Natu */ 375188c4b8d1SNeel Natu static int 375288c4b8d1SNeel Natu vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 375388c4b8d1SNeel Natu { 375488c4b8d1SNeel Natu struct vlapic_vtx *vlapic_vtx; 375588c4b8d1SNeel Natu struct pir_desc *pir_desc; 375688c4b8d1SNeel Natu uint64_t mask; 37572c352febSJohn Baldwin int idx, notify = 0; 375888c4b8d1SNeel Natu 375988c4b8d1SNeel Natu vlapic_vtx = (struct vlapic_vtx *)vlapic; 3760176666c2SNeel Natu pir_desc = vlapic_vtx->pir_desc; 376188c4b8d1SNeel Natu 376288c4b8d1SNeel Natu /* 376388c4b8d1SNeel Natu * Keep track of interrupt requests in the PIR descriptor. This is 376488c4b8d1SNeel Natu * because the virtual APIC page pointed to by the VMCS cannot be 376588c4b8d1SNeel Natu * modified if the vcpu is running. 376688c4b8d1SNeel Natu */ 376788c4b8d1SNeel Natu idx = vector / 64; 376888c4b8d1SNeel Natu mask = 1UL << (vector % 64); 376988c4b8d1SNeel Natu atomic_set_long(&pir_desc->pir[idx], mask); 37702c352febSJohn Baldwin 37712c352febSJohn Baldwin /* 37722c352febSJohn Baldwin * A notification is required whenever the 'pending' bit makes a 37732c352febSJohn Baldwin * transition from 0->1. 37742c352febSJohn Baldwin * 37752c352febSJohn Baldwin * Even if the 'pending' bit is already asserted, notification about 37762c352febSJohn Baldwin * the incoming interrupt may still be necessary. For example, if a 37772c352febSJohn Baldwin * vCPU is HLTed with a high PPR, a low priority interrupt would cause 37782c352febSJohn Baldwin * the 0->1 'pending' transition with a notification, but the vCPU 37792c352febSJohn Baldwin * would ignore the interrupt for the time being. The same vCPU would 37802c352febSJohn Baldwin * need to then be notified if a high-priority interrupt arrived which 37812c352febSJohn Baldwin * satisfied the PPR. 37822c352febSJohn Baldwin * 37832c352febSJohn Baldwin * The priorities of interrupts injected while 'pending' is asserted 37842c352febSJohn Baldwin * are tracked in a custom bitfield 'pending_prio'. Should the 37852c352febSJohn Baldwin * to-be-injected interrupt exceed the priorities already present, the 37862c352febSJohn Baldwin * notification is sent. The priorities recorded in 'pending_prio' are 37872c352febSJohn Baldwin * cleared whenever the 'pending' bit makes another 0->1 transition. 37882c352febSJohn Baldwin */ 37892c352febSJohn Baldwin if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 37902c352febSJohn Baldwin notify = 1; 37912c352febSJohn Baldwin vlapic_vtx->pending_prio = 0; 37922c352febSJohn Baldwin } else { 37932c352febSJohn Baldwin const u_int old_prio = vlapic_vtx->pending_prio; 37942c352febSJohn Baldwin const u_int prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 37952c352febSJohn Baldwin 37962c352febSJohn Baldwin if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 37972c352febSJohn Baldwin atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 37982c352febSJohn Baldwin notify = 1; 37992c352febSJohn Baldwin } 38002c352febSJohn Baldwin } 380188c4b8d1SNeel Natu 3802d030f941SJohn Baldwin VMX_CTR_PIR(vlapic, pir_desc, notify, vector, level, 3803d030f941SJohn Baldwin "vmx_set_intr_ready"); 380488c4b8d1SNeel Natu return (notify); 380588c4b8d1SNeel Natu } 380688c4b8d1SNeel Natu 380788c4b8d1SNeel Natu static int 380888c4b8d1SNeel Natu vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 380988c4b8d1SNeel Natu { 381088c4b8d1SNeel Natu struct vlapic_vtx *vlapic_vtx; 381188c4b8d1SNeel Natu struct pir_desc *pir_desc; 381288c4b8d1SNeel Natu struct LAPIC *lapic; 381388c4b8d1SNeel Natu uint64_t pending, pirval; 38140912408aSVitaliy Gusev uint8_t ppr, vpr, rvi; 38150912408aSVitaliy Gusev struct vm_exit *vmexit; 381688c4b8d1SNeel Natu int i; 381788c4b8d1SNeel Natu 381888c4b8d1SNeel Natu /* 381988c4b8d1SNeel Natu * This function is only expected to be called from the 'HLT' exit 382088c4b8d1SNeel Natu * handler which does not care about the vector that is pending. 382188c4b8d1SNeel Natu */ 382288c4b8d1SNeel Natu KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 382388c4b8d1SNeel Natu 382488c4b8d1SNeel Natu vlapic_vtx = (struct vlapic_vtx *)vlapic; 3825176666c2SNeel Natu pir_desc = vlapic_vtx->pir_desc; 38260912408aSVitaliy Gusev lapic = vlapic->apic_page; 382788c4b8d1SNeel Natu 38289e33a616STycho Nightingale /* 38299e33a616STycho Nightingale * While a virtual interrupt may have already been 38309e33a616STycho Nightingale * processed the actual delivery maybe pending the 38319e33a616STycho Nightingale * interruptibility of the guest. Recognize a pending 38329e33a616STycho Nightingale * interrupt by reevaluating virtual interrupts 38330912408aSVitaliy Gusev * following Section 30.2.1 in the Intel SDM Volume 3. 38349e33a616STycho Nightingale */ 383580cb5d84SJohn Baldwin vmexit = vm_exitinfo(vlapic->vcpu); 3836490768e2STycho Nightingale KASSERT(vmexit->exitcode == VM_EXITCODE_HLT, 3837490768e2STycho Nightingale ("vmx_pending_intr: exitcode not 'HLT'")); 3838490768e2STycho Nightingale rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT; 38399e33a616STycho Nightingale ppr = lapic->ppr & APIC_TPR_INT; 38400912408aSVitaliy Gusev if (rvi > ppr) 38419e33a616STycho Nightingale return (1); 38429e33a616STycho Nightingale 38430912408aSVitaliy Gusev pending = atomic_load_acq_long(&pir_desc->pending); 38440912408aSVitaliy Gusev if (!pending) 38459e33a616STycho Nightingale return (0); 384688c4b8d1SNeel Natu 384788c4b8d1SNeel Natu /* 384888c4b8d1SNeel Natu * If there is an interrupt pending then it will be recognized only 384988c4b8d1SNeel Natu * if its priority is greater than the processor priority. 385088c4b8d1SNeel Natu * 385188c4b8d1SNeel Natu * Special case: if the processor priority is zero then any pending 385288c4b8d1SNeel Natu * interrupt will be recognized. 385388c4b8d1SNeel Natu */ 385488c4b8d1SNeel Natu if (ppr == 0) 385588c4b8d1SNeel Natu return (1); 385688c4b8d1SNeel Natu 3857d030f941SJohn Baldwin VLAPIC_CTR1(vlapic, "HLT with non-zero PPR %d", lapic->ppr); 385888c4b8d1SNeel Natu 38592c352febSJohn Baldwin vpr = 0; 386088c4b8d1SNeel Natu for (i = 3; i >= 0; i--) { 386188c4b8d1SNeel Natu pirval = pir_desc->pir[i]; 386288c4b8d1SNeel Natu if (pirval != 0) { 38639e33a616STycho Nightingale vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT; 38642c352febSJohn Baldwin break; 386588c4b8d1SNeel Natu } 386688c4b8d1SNeel Natu } 38672c352febSJohn Baldwin 38682c352febSJohn Baldwin /* 38692c352febSJohn Baldwin * If the highest-priority pending interrupt falls short of the 38702c352febSJohn Baldwin * processor priority of this vCPU, ensure that 'pending_prio' does not 38712c352febSJohn Baldwin * have any stale bits which would preclude a higher-priority interrupt 38722c352febSJohn Baldwin * from incurring a notification later. 38732c352febSJohn Baldwin */ 38742c352febSJohn Baldwin if (vpr <= ppr) { 38752c352febSJohn Baldwin const u_int prio_bit = VPR_PRIO_BIT(vpr); 38762c352febSJohn Baldwin const u_int old = vlapic_vtx->pending_prio; 38772c352febSJohn Baldwin 38782c352febSJohn Baldwin if (old > prio_bit && (old & prio_bit) == 0) { 38792c352febSJohn Baldwin vlapic_vtx->pending_prio = prio_bit; 38802c352febSJohn Baldwin } 388188c4b8d1SNeel Natu return (0); 388288c4b8d1SNeel Natu } 38832c352febSJohn Baldwin return (1); 38842c352febSJohn Baldwin } 388588c4b8d1SNeel Natu 388688c4b8d1SNeel Natu static void 388788c4b8d1SNeel Natu vmx_intr_accepted(struct vlapic *vlapic, int vector) 388888c4b8d1SNeel Natu { 388988c4b8d1SNeel Natu 389088c4b8d1SNeel Natu panic("vmx_intr_accepted: not expected to be called"); 389188c4b8d1SNeel Natu } 389288c4b8d1SNeel Natu 3893176666c2SNeel Natu static void 389430b94db8SNeel Natu vmx_set_tmr(struct vlapic *vlapic, int vector, bool level) 389530b94db8SNeel Natu { 389630b94db8SNeel Natu struct vlapic_vtx *vlapic_vtx; 389730b94db8SNeel Natu struct vmcs *vmcs; 389830b94db8SNeel Natu uint64_t mask, val; 389930b94db8SNeel Natu 390030b94db8SNeel Natu KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); 390180cb5d84SJohn Baldwin KASSERT(!vcpu_is_running(vlapic->vcpu, NULL), 390230b94db8SNeel Natu ("vmx_set_tmr: vcpu cannot be running")); 390330b94db8SNeel Natu 390430b94db8SNeel Natu vlapic_vtx = (struct vlapic_vtx *)vlapic; 39051aa51504SJohn Baldwin vmcs = vlapic_vtx->vcpu->vmcs; 390630b94db8SNeel Natu mask = 1UL << (vector % 64); 390730b94db8SNeel Natu 390830b94db8SNeel Natu VMPTRLD(vmcs); 390930b94db8SNeel Natu val = vmcs_read(VMCS_EOI_EXIT(vector)); 391030b94db8SNeel Natu if (level) 391130b94db8SNeel Natu val |= mask; 391230b94db8SNeel Natu else 391330b94db8SNeel Natu val &= ~mask; 391430b94db8SNeel Natu vmcs_write(VMCS_EOI_EXIT(vector), val); 391530b94db8SNeel Natu VMCLEAR(vmcs); 391630b94db8SNeel Natu } 391730b94db8SNeel Natu 391830b94db8SNeel Natu static void 39191bc51badSMichael Reifenberger vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 39201bc51badSMichael Reifenberger { 39211aa51504SJohn Baldwin struct vlapic_vtx *vlapic_vtx; 39220f00260cSJohn Baldwin struct vmx_vcpu *vcpu; 39231bc51badSMichael Reifenberger struct vmcs *vmcs; 39241bc51badSMichael Reifenberger uint32_t proc_ctls; 39251bc51badSMichael Reifenberger 39261aa51504SJohn Baldwin vlapic_vtx = (struct vlapic_vtx *)vlapic; 39271aa51504SJohn Baldwin vcpu = vlapic_vtx->vcpu; 39280f00260cSJohn Baldwin vmcs = vcpu->vmcs; 39291bc51badSMichael Reifenberger 39300f00260cSJohn Baldwin proc_ctls = vcpu->cap.proc_ctls; 39311bc51badSMichael Reifenberger proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 39321bc51badSMichael Reifenberger proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 39331bc51badSMichael Reifenberger proc_ctls |= PROCBASED_CR8_STORE_EXITING; 39340f00260cSJohn Baldwin vcpu->cap.proc_ctls = proc_ctls; 39351bc51badSMichael Reifenberger 39361bc51badSMichael Reifenberger VMPTRLD(vmcs); 39371bc51badSMichael Reifenberger vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 39381bc51badSMichael Reifenberger VMCLEAR(vmcs); 39391bc51badSMichael Reifenberger } 39401bc51badSMichael Reifenberger 39411bc51badSMichael Reifenberger static void 39421bc51badSMichael Reifenberger vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3943159dd56fSNeel Natu { 39441aa51504SJohn Baldwin struct vlapic_vtx *vlapic_vtx; 3945159dd56fSNeel Natu struct vmx *vmx; 39460f00260cSJohn Baldwin struct vmx_vcpu *vcpu; 3947159dd56fSNeel Natu struct vmcs *vmcs; 3948159dd56fSNeel Natu uint32_t proc_ctls2; 39491aa51504SJohn Baldwin int error __diagused; 3950159dd56fSNeel Natu 39511aa51504SJohn Baldwin vlapic_vtx = (struct vlapic_vtx *)vlapic; 39521aa51504SJohn Baldwin vcpu = vlapic_vtx->vcpu; 3953869c8d19SJohn Baldwin vmx = vcpu->vmx; 39540f00260cSJohn Baldwin vmcs = vcpu->vmcs; 3955159dd56fSNeel Natu 39560f00260cSJohn Baldwin proc_ctls2 = vcpu->cap.proc_ctls2; 3957159dd56fSNeel Natu KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3958159dd56fSNeel Natu ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2)); 3959159dd56fSNeel Natu 3960159dd56fSNeel Natu proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3961159dd56fSNeel Natu proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 39620f00260cSJohn Baldwin vcpu->cap.proc_ctls2 = proc_ctls2; 3963159dd56fSNeel Natu 3964159dd56fSNeel Natu VMPTRLD(vmcs); 3965159dd56fSNeel Natu vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3966159dd56fSNeel Natu VMCLEAR(vmcs); 3967159dd56fSNeel Natu 3968159dd56fSNeel Natu if (vlapic->vcpuid == 0) { 3969159dd56fSNeel Natu /* 3970159dd56fSNeel Natu * The nested page table mappings are shared by all vcpus 3971159dd56fSNeel Natu * so unmap the APIC access page just once. 3972159dd56fSNeel Natu */ 3973159dd56fSNeel Natu error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3974159dd56fSNeel Natu KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", 3975159dd56fSNeel Natu __func__, error)); 3976159dd56fSNeel Natu 3977159dd56fSNeel Natu /* 3978159dd56fSNeel Natu * The MSR bitmap is shared by all vcpus so modify it only 3979159dd56fSNeel Natu * once in the context of vcpu 0. 3980159dd56fSNeel Natu */ 3981159dd56fSNeel Natu error = vmx_allow_x2apic_msrs(vmx); 3982159dd56fSNeel Natu KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", 3983159dd56fSNeel Natu __func__, error)); 3984159dd56fSNeel Natu } 3985159dd56fSNeel Natu } 3986159dd56fSNeel Natu 3987159dd56fSNeel Natu static void 3988176666c2SNeel Natu vmx_post_intr(struct vlapic *vlapic, int hostcpu) 3989176666c2SNeel Natu { 3990176666c2SNeel Natu 3991176666c2SNeel Natu ipi_cpu(hostcpu, pirvec); 3992176666c2SNeel Natu } 3993176666c2SNeel Natu 399488c4b8d1SNeel Natu /* 399588c4b8d1SNeel Natu * Transfer the pending interrupts in the PIR descriptor to the IRR 399688c4b8d1SNeel Natu * in the virtual APIC page. 399788c4b8d1SNeel Natu */ 399888c4b8d1SNeel Natu static void 399988c4b8d1SNeel Natu vmx_inject_pir(struct vlapic *vlapic) 400088c4b8d1SNeel Natu { 400188c4b8d1SNeel Natu struct vlapic_vtx *vlapic_vtx; 400288c4b8d1SNeel Natu struct pir_desc *pir_desc; 400388c4b8d1SNeel Natu struct LAPIC *lapic; 400488c4b8d1SNeel Natu uint64_t val, pirval; 40050e30c5c0SWarner Losh int rvi, pirbase = -1; 400688c4b8d1SNeel Natu uint16_t intr_status_old, intr_status_new; 400788c4b8d1SNeel Natu 400888c4b8d1SNeel Natu vlapic_vtx = (struct vlapic_vtx *)vlapic; 4009176666c2SNeel Natu pir_desc = vlapic_vtx->pir_desc; 401088c4b8d1SNeel Natu if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 4011d030f941SJohn Baldwin VLAPIC_CTR0(vlapic, "vmx_inject_pir: " 401288c4b8d1SNeel Natu "no posted interrupt pending"); 401388c4b8d1SNeel Natu return; 401488c4b8d1SNeel Natu } 401588c4b8d1SNeel Natu 401688c4b8d1SNeel Natu pirval = 0; 4017201b1cccSPeter Grehan pirbase = -1; 401888c4b8d1SNeel Natu lapic = vlapic->apic_page; 401988c4b8d1SNeel Natu 402088c4b8d1SNeel Natu val = atomic_readandclear_long(&pir_desc->pir[0]); 402188c4b8d1SNeel Natu if (val != 0) { 402288c4b8d1SNeel Natu lapic->irr0 |= val; 402388c4b8d1SNeel Natu lapic->irr1 |= val >> 32; 402488c4b8d1SNeel Natu pirbase = 0; 402588c4b8d1SNeel Natu pirval = val; 402688c4b8d1SNeel Natu } 402788c4b8d1SNeel Natu 402888c4b8d1SNeel Natu val = atomic_readandclear_long(&pir_desc->pir[1]); 402988c4b8d1SNeel Natu if (val != 0) { 403088c4b8d1SNeel Natu lapic->irr2 |= val; 403188c4b8d1SNeel Natu lapic->irr3 |= val >> 32; 403288c4b8d1SNeel Natu pirbase = 64; 403388c4b8d1SNeel Natu pirval = val; 403488c4b8d1SNeel Natu } 403588c4b8d1SNeel Natu 403688c4b8d1SNeel Natu val = atomic_readandclear_long(&pir_desc->pir[2]); 403788c4b8d1SNeel Natu if (val != 0) { 403888c4b8d1SNeel Natu lapic->irr4 |= val; 403988c4b8d1SNeel Natu lapic->irr5 |= val >> 32; 404088c4b8d1SNeel Natu pirbase = 128; 404188c4b8d1SNeel Natu pirval = val; 404288c4b8d1SNeel Natu } 404388c4b8d1SNeel Natu 404488c4b8d1SNeel Natu val = atomic_readandclear_long(&pir_desc->pir[3]); 404588c4b8d1SNeel Natu if (val != 0) { 404688c4b8d1SNeel Natu lapic->irr6 |= val; 404788c4b8d1SNeel Natu lapic->irr7 |= val >> 32; 404888c4b8d1SNeel Natu pirbase = 192; 404988c4b8d1SNeel Natu pirval = val; 405088c4b8d1SNeel Natu } 4051201b1cccSPeter Grehan 405288c4b8d1SNeel Natu VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 405388c4b8d1SNeel Natu 405488c4b8d1SNeel Natu /* 405588c4b8d1SNeel Natu * Update RVI so the processor can evaluate pending virtual 405688c4b8d1SNeel Natu * interrupts on VM-entry. 4057201b1cccSPeter Grehan * 4058201b1cccSPeter Grehan * It is possible for pirval to be 0 here, even though the 4059201b1cccSPeter Grehan * pending bit has been set. The scenario is: 4060201b1cccSPeter Grehan * CPU-Y is sending a posted interrupt to CPU-X, which 4061201b1cccSPeter Grehan * is running a guest and processing posted interrupts in h/w. 4062201b1cccSPeter Grehan * CPU-X will eventually exit and the state seen in s/w is 4063201b1cccSPeter Grehan * the pending bit set, but no PIR bits set. 4064201b1cccSPeter Grehan * 4065201b1cccSPeter Grehan * CPU-X CPU-Y 4066201b1cccSPeter Grehan * (vm running) (host running) 4067201b1cccSPeter Grehan * rx posted interrupt 4068201b1cccSPeter Grehan * CLEAR pending bit 4069201b1cccSPeter Grehan * SET PIR bit 4070201b1cccSPeter Grehan * READ/CLEAR PIR bits 4071201b1cccSPeter Grehan * SET pending bit 4072201b1cccSPeter Grehan * (vm exit) 4073201b1cccSPeter Grehan * pending bit set, PIR 0 407488c4b8d1SNeel Natu */ 407588c4b8d1SNeel Natu if (pirval != 0) { 407688c4b8d1SNeel Natu rvi = pirbase + flsl(pirval) - 1; 407788c4b8d1SNeel Natu intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 407888c4b8d1SNeel Natu intr_status_new = (intr_status_old & 0xFF00) | rvi; 407988c4b8d1SNeel Natu if (intr_status_new > intr_status_old) { 408088c4b8d1SNeel Natu vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 4081d030f941SJohn Baldwin VLAPIC_CTR2(vlapic, "vmx_inject_pir: " 408288c4b8d1SNeel Natu "guest_intr_status changed from 0x%04x to 0x%04x", 408388c4b8d1SNeel Natu intr_status_old, intr_status_new); 408488c4b8d1SNeel Natu } 408588c4b8d1SNeel Natu } 408688c4b8d1SNeel Natu } 408788c4b8d1SNeel Natu 4088de5ea6b6SNeel Natu static struct vlapic * 4089869c8d19SJohn Baldwin vmx_vlapic_init(void *vcpui) 4090de5ea6b6SNeel Natu { 4091de5ea6b6SNeel Natu struct vmx *vmx; 40921aa51504SJohn Baldwin struct vmx_vcpu *vcpu; 4093de5ea6b6SNeel Natu struct vlapic *vlapic; 4094176666c2SNeel Natu struct vlapic_vtx *vlapic_vtx; 4095de5ea6b6SNeel Natu 40961aa51504SJohn Baldwin vcpu = vcpui; 4097869c8d19SJohn Baldwin vmx = vcpu->vmx; 4098de5ea6b6SNeel Natu 409988c4b8d1SNeel Natu vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 4100de5ea6b6SNeel Natu vlapic->vm = vmx->vm; 4101950af9ffSJohn Baldwin vlapic->vcpu = vcpu->vcpu; 41021aa51504SJohn Baldwin vlapic->vcpuid = vcpu->vcpuid; 41031aa51504SJohn Baldwin vlapic->apic_page = (struct LAPIC *)vcpu->apic_page; 4104de5ea6b6SNeel Natu 4105176666c2SNeel Natu vlapic_vtx = (struct vlapic_vtx *)vlapic; 41061aa51504SJohn Baldwin vlapic_vtx->pir_desc = vcpu->pir_desc; 41071aa51504SJohn Baldwin vlapic_vtx->vcpu = vcpu; 4108176666c2SNeel Natu 41091bc51badSMichael Reifenberger if (tpr_shadowing) { 41101bc51badSMichael Reifenberger vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 41111bc51badSMichael Reifenberger } 41121bc51badSMichael Reifenberger 411388c4b8d1SNeel Natu if (virtual_interrupt_delivery) { 411488c4b8d1SNeel Natu vlapic->ops.set_intr_ready = vmx_set_intr_ready; 411588c4b8d1SNeel Natu vlapic->ops.pending_intr = vmx_pending_intr; 411688c4b8d1SNeel Natu vlapic->ops.intr_accepted = vmx_intr_accepted; 411730b94db8SNeel Natu vlapic->ops.set_tmr = vmx_set_tmr; 41181bc51badSMichael Reifenberger vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 411988c4b8d1SNeel Natu } 412088c4b8d1SNeel Natu 4121176666c2SNeel Natu if (posted_interrupts) 4122176666c2SNeel Natu vlapic->ops.post_intr = vmx_post_intr; 4123176666c2SNeel Natu 4124de5ea6b6SNeel Natu vlapic_init(vlapic); 4125de5ea6b6SNeel Natu 4126de5ea6b6SNeel Natu return (vlapic); 4127de5ea6b6SNeel Natu } 4128de5ea6b6SNeel Natu 4129de5ea6b6SNeel Natu static void 4130869c8d19SJohn Baldwin vmx_vlapic_cleanup(struct vlapic *vlapic) 4131de5ea6b6SNeel Natu { 4132de5ea6b6SNeel Natu 4133de5ea6b6SNeel Natu vlapic_cleanup(vlapic); 4134de5ea6b6SNeel Natu free(vlapic, M_VLAPIC); 4135de5ea6b6SNeel Natu } 4136de5ea6b6SNeel Natu 4137483d953aSJohn Baldwin #ifdef BHYVE_SNAPSHOT 4138483d953aSJohn Baldwin static int 4139869c8d19SJohn Baldwin vmx_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta) 4140483d953aSJohn Baldwin { 4141483d953aSJohn Baldwin struct vmcs *vmcs; 4142483d953aSJohn Baldwin struct vmx *vmx; 414339ec056eSJohn Baldwin struct vmx_vcpu *vcpu; 414439ec056eSJohn Baldwin struct vmxctx *vmxctx; 4145483d953aSJohn Baldwin int err, run, hostcpu; 4146483d953aSJohn Baldwin 4147483d953aSJohn Baldwin err = 0; 4148869c8d19SJohn Baldwin vcpu = vcpui; 4149869c8d19SJohn Baldwin vmx = vcpu->vmx; 415039ec056eSJohn Baldwin vmcs = vcpu->vmcs; 4151483d953aSJohn Baldwin 415280cb5d84SJohn Baldwin run = vcpu_is_running(vcpu->vcpu, &hostcpu); 4153483d953aSJohn Baldwin if (run && hostcpu != curcpu) { 415439ec056eSJohn Baldwin printf("%s: %s%d is running", __func__, vm_name(vmx->vm), 41551aa51504SJohn Baldwin vcpu->vcpuid); 4156483d953aSJohn Baldwin return (EINVAL); 4157483d953aSJohn Baldwin } 4158483d953aSJohn Baldwin 4159483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CR0, meta); 4160483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CR3, meta); 4161483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CR4, meta); 4162483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_DR7, meta); 4163483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_RSP, meta); 4164483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_RIP, meta); 4165483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_RFLAGS, meta); 4166483d953aSJohn Baldwin 4167483d953aSJohn Baldwin /* Guest segments */ 4168483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_ES, meta); 4169483d953aSJohn Baldwin err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_ES, meta); 4170483d953aSJohn Baldwin 4171483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CS, meta); 4172483d953aSJohn Baldwin err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_CS, meta); 4173483d953aSJohn Baldwin 4174483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_SS, meta); 4175483d953aSJohn Baldwin err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_SS, meta); 4176483d953aSJohn Baldwin 4177483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_DS, meta); 4178483d953aSJohn Baldwin err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_DS, meta); 4179483d953aSJohn Baldwin 4180483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_FS, meta); 4181483d953aSJohn Baldwin err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_FS, meta); 4182483d953aSJohn Baldwin 4183483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_GS, meta); 4184483d953aSJohn Baldwin err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_GS, meta); 4185483d953aSJohn Baldwin 4186483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_TR, meta); 4187483d953aSJohn Baldwin err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_TR, meta); 4188483d953aSJohn Baldwin 4189483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_LDTR, meta); 4190483d953aSJohn Baldwin err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_LDTR, meta); 4191483d953aSJohn Baldwin 4192483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_EFER, meta); 4193483d953aSJohn Baldwin 4194483d953aSJohn Baldwin err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_IDTR, meta); 4195483d953aSJohn Baldwin err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_GDTR, meta); 4196483d953aSJohn Baldwin 4197483d953aSJohn Baldwin /* Guest page tables */ 4198483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE0, meta); 4199483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE1, meta); 4200483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE2, meta); 4201483d953aSJohn Baldwin err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE3, meta); 4202483d953aSJohn Baldwin 4203483d953aSJohn Baldwin /* Other guest state */ 4204483d953aSJohn Baldwin err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_IA32_SYSENTER_CS, meta); 4205483d953aSJohn Baldwin err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_IA32_SYSENTER_ESP, meta); 4206483d953aSJohn Baldwin err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_IA32_SYSENTER_EIP, meta); 4207483d953aSJohn Baldwin err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_INTERRUPTIBILITY, meta); 4208483d953aSJohn Baldwin err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_ACTIVITY, meta); 4209483d953aSJohn Baldwin err += vmcs_snapshot_any(vmcs, run, VMCS_ENTRY_CTLS, meta); 4210483d953aSJohn Baldwin err += vmcs_snapshot_any(vmcs, run, VMCS_EXIT_CTLS, meta); 421139ec056eSJohn Baldwin if (err != 0) 421239ec056eSJohn Baldwin goto done; 4213483d953aSJohn Baldwin 421439ec056eSJohn Baldwin SNAPSHOT_BUF_OR_LEAVE(vcpu->guest_msrs, 421539ec056eSJohn Baldwin sizeof(vcpu->guest_msrs), meta, err, done); 421639ec056eSJohn Baldwin 4217c543e09fSVitaliy Gusev SNAPSHOT_BUF_OR_LEAVE(vcpu->pir_desc, 4218c543e09fSVitaliy Gusev sizeof(*vcpu->pir_desc), meta, err, done); 4219c543e09fSVitaliy Gusev 4220683ea4d2SVitaliy Gusev SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, 4221683ea4d2SVitaliy Gusev sizeof(vcpu->mtrr), meta, err, done); 4222683ea4d2SVitaliy Gusev 422339ec056eSJohn Baldwin vmxctx = &vcpu->ctx; 422439ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rdi, meta, err, done); 422539ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rsi, meta, err, done); 422639ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rdx, meta, err, done); 422739ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rcx, meta, err, done); 422839ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r8, meta, err, done); 422939ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r9, meta, err, done); 423039ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rax, meta, err, done); 423139ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rbx, meta, err, done); 423239ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rbp, meta, err, done); 423339ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r10, meta, err, done); 423439ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r11, meta, err, done); 423539ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r12, meta, err, done); 423639ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r13, meta, err, done); 423739ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r14, meta, err, done); 423839ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r15, meta, err, done); 423939ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_cr2, meta, err, done); 424039ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr0, meta, err, done); 424139ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr1, meta, err, done); 424239ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr2, meta, err, done); 424339ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr3, meta, err, done); 424439ec056eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr6, meta, err, done); 424539ec056eSJohn Baldwin 424639ec056eSJohn Baldwin done: 4247483d953aSJohn Baldwin return (err); 4248483d953aSJohn Baldwin } 4249483d953aSJohn Baldwin 4250483d953aSJohn Baldwin static int 4251869c8d19SJohn Baldwin vmx_restore_tsc(void *vcpui, uint64_t offset) 4252483d953aSJohn Baldwin { 42531aa51504SJohn Baldwin struct vmx_vcpu *vcpu = vcpui; 4254869c8d19SJohn Baldwin struct vmcs *vmcs; 4255869c8d19SJohn Baldwin struct vmx *vmx; 4256483d953aSJohn Baldwin int error, running, hostcpu; 4257483d953aSJohn Baldwin 4258869c8d19SJohn Baldwin vmx = vcpu->vmx; 42591aa51504SJohn Baldwin vmcs = vcpu->vmcs; 4260483d953aSJohn Baldwin 426180cb5d84SJohn Baldwin running = vcpu_is_running(vcpu->vcpu, &hostcpu); 4262483d953aSJohn Baldwin if (running && hostcpu != curcpu) { 42631aa51504SJohn Baldwin printf("%s: %s%d is running", __func__, vm_name(vmx->vm), 42641aa51504SJohn Baldwin vcpu->vcpuid); 4265483d953aSJohn Baldwin return (EINVAL); 4266483d953aSJohn Baldwin } 4267483d953aSJohn Baldwin 4268483d953aSJohn Baldwin if (!running) 4269483d953aSJohn Baldwin VMPTRLD(vmcs); 4270483d953aSJohn Baldwin 427180cb5d84SJohn Baldwin error = vmx_set_tsc_offset(vcpu, offset); 4272483d953aSJohn Baldwin 4273483d953aSJohn Baldwin if (!running) 4274483d953aSJohn Baldwin VMCLEAR(vmcs); 4275483d953aSJohn Baldwin return (error); 4276483d953aSJohn Baldwin } 4277483d953aSJohn Baldwin #endif 4278483d953aSJohn Baldwin 427915add60dSPeter Grehan const struct vmm_ops vmm_ops_intel = { 428015add60dSPeter Grehan .modinit = vmx_modinit, 428115add60dSPeter Grehan .modcleanup = vmx_modcleanup, 4282*0b32ef71SJoshua Rogers .modsuspend = vmx_modsuspend, 428315add60dSPeter Grehan .modresume = vmx_modresume, 428413a7c4d4SMark Johnston .init = vmx_init, 428515add60dSPeter Grehan .run = vmx_run, 428613a7c4d4SMark Johnston .cleanup = vmx_cleanup, 42871aa51504SJohn Baldwin .vcpu_init = vmx_vcpu_init, 42881aa51504SJohn Baldwin .vcpu_cleanup = vmx_vcpu_cleanup, 428915add60dSPeter Grehan .getreg = vmx_getreg, 429015add60dSPeter Grehan .setreg = vmx_setreg, 429115add60dSPeter Grehan .getdesc = vmx_getdesc, 429215add60dSPeter Grehan .setdesc = vmx_setdesc, 429315add60dSPeter Grehan .getcap = vmx_getcap, 429415add60dSPeter Grehan .setcap = vmx_setcap, 429515add60dSPeter Grehan .vmspace_alloc = vmx_vmspace_alloc, 429615add60dSPeter Grehan .vmspace_free = vmx_vmspace_free, 429713a7c4d4SMark Johnston .vlapic_init = vmx_vlapic_init, 429813a7c4d4SMark Johnston .vlapic_cleanup = vmx_vlapic_cleanup, 4299483d953aSJohn Baldwin #ifdef BHYVE_SNAPSHOT 430039ec056eSJohn Baldwin .vcpu_snapshot = vmx_vcpu_snapshot, 430115add60dSPeter Grehan .restore_tsc = vmx_restore_tsc, 4302483d953aSJohn Baldwin #endif 4303366f6083SPeter Grehan }; 4304