1366f6083SPeter Grehan /*- 2366f6083SPeter Grehan * Copyright (c) 2011 NetApp, Inc. 3366f6083SPeter Grehan * All rights reserved. 4366f6083SPeter Grehan * 5366f6083SPeter Grehan * Redistribution and use in source and binary forms, with or without 6366f6083SPeter Grehan * modification, are permitted provided that the following conditions 7366f6083SPeter Grehan * are met: 8366f6083SPeter Grehan * 1. Redistributions of source code must retain the above copyright 9366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer. 10366f6083SPeter Grehan * 2. Redistributions in binary form must reproduce the above copyright 11366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer in the 12366f6083SPeter Grehan * documentation and/or other materials provided with the distribution. 13366f6083SPeter Grehan * 14366f6083SPeter Grehan * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15366f6083SPeter Grehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16366f6083SPeter Grehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17366f6083SPeter Grehan * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18366f6083SPeter Grehan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19366f6083SPeter Grehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20366f6083SPeter Grehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21366f6083SPeter Grehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22366f6083SPeter Grehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23366f6083SPeter Grehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24366f6083SPeter Grehan * SUCH DAMAGE. 25366f6083SPeter Grehan * 26366f6083SPeter Grehan * $FreeBSD$ 27366f6083SPeter Grehan */ 28366f6083SPeter Grehan 29366f6083SPeter Grehan #include <sys/cdefs.h> 30366f6083SPeter Grehan __FBSDID("$FreeBSD$"); 31366f6083SPeter Grehan 32366f6083SPeter Grehan #include <sys/param.h> 3338f1b189SPeter Grehan #include <sys/systm.h> 34366f6083SPeter Grehan #include <sys/kernel.h> 35366f6083SPeter Grehan #include <sys/module.h> 36366f6083SPeter Grehan #include <sys/sysctl.h> 37366f6083SPeter Grehan #include <sys/malloc.h> 38366f6083SPeter Grehan #include <sys/pcpu.h> 39366f6083SPeter Grehan #include <sys/lock.h> 40366f6083SPeter Grehan #include <sys/mutex.h> 41366f6083SPeter Grehan #include <sys/proc.h> 42318224bbSNeel Natu #include <sys/rwlock.h> 43366f6083SPeter Grehan #include <sys/sched.h> 44366f6083SPeter Grehan #include <sys/smp.h> 45366f6083SPeter Grehan #include <sys/systm.h> 46366f6083SPeter Grehan 47366f6083SPeter Grehan #include <vm/vm.h> 48318224bbSNeel Natu #include <vm/vm_object.h> 49318224bbSNeel Natu #include <vm/vm_page.h> 50318224bbSNeel Natu #include <vm/pmap.h> 51318224bbSNeel Natu #include <vm/vm_map.h> 52318224bbSNeel Natu #include <vm/vm_extern.h> 53318224bbSNeel Natu #include <vm/vm_param.h> 54366f6083SPeter Grehan 5563e62d39SJohn Baldwin #include <machine/cpu.h> 56366f6083SPeter Grehan #include <machine/vm.h> 57366f6083SPeter Grehan #include <machine/pcb.h> 5875dd3366SNeel Natu #include <machine/smp.h> 591c052192SNeel Natu #include <x86/psl.h> 6034a6b2d6SJohn Baldwin #include <x86/apicreg.h> 61318224bbSNeel Natu #include <machine/vmparam.h> 62366f6083SPeter Grehan 63366f6083SPeter Grehan #include <machine/vmm.h> 64565bbb86SNeel Natu #include <machine/vmm_dev.h> 65e813a873SNeel Natu #include <machine/vmm_instruction_emul.h> 66565bbb86SNeel Natu 67d17b5104SNeel Natu #include "vmm_ioport.h" 68318224bbSNeel Natu #include "vmm_ktr.h" 69b01c2033SNeel Natu #include "vmm_host.h" 70366f6083SPeter Grehan #include "vmm_mem.h" 71366f6083SPeter Grehan #include "vmm_util.h" 72762fd208STycho Nightingale #include "vatpic.h" 73e883c9bbSTycho Nightingale #include "vatpit.h" 7408e3ff32SNeel Natu #include "vhpet.h" 75565bbb86SNeel Natu #include "vioapic.h" 76366f6083SPeter Grehan #include "vlapic.h" 77160ef77aSNeel Natu #include "vpmtmr.h" 780dafa5cdSNeel Natu #include "vrtc.h" 79366f6083SPeter Grehan #include "vmm_ipi.h" 80366f6083SPeter Grehan #include "vmm_stat.h" 81f76fc5d4SNeel Natu #include "vmm_lapic.h" 82366f6083SPeter Grehan 83366f6083SPeter Grehan #include "io/ppt.h" 84366f6083SPeter Grehan #include "io/iommu.h" 85366f6083SPeter Grehan 86366f6083SPeter Grehan struct vlapic; 87366f6083SPeter Grehan 885fcf252fSNeel Natu /* 895fcf252fSNeel Natu * Initialization: 905fcf252fSNeel Natu * (a) allocated when vcpu is created 915fcf252fSNeel Natu * (i) initialized when vcpu is created and when it is reinitialized 925fcf252fSNeel Natu * (o) initialized the first time the vcpu is created 935fcf252fSNeel Natu * (x) initialized before use 945fcf252fSNeel Natu */ 95366f6083SPeter Grehan struct vcpu { 965fcf252fSNeel Natu struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */ 975fcf252fSNeel Natu enum vcpu_state state; /* (o) vcpu state */ 985fcf252fSNeel Natu int hostcpu; /* (o) vcpu's host cpu */ 995fcf252fSNeel Natu struct vlapic *vlapic; /* (i) APIC device model */ 1005fcf252fSNeel Natu enum x2apic_state x2apic_state; /* (i) APIC mode */ 101091d4532SNeel Natu uint64_t exitintinfo; /* (i) events pending at VM exit */ 1025fcf252fSNeel Natu int nmi_pending; /* (i) NMI pending */ 1035fcf252fSNeel Natu int extint_pending; /* (i) INTR pending */ 1045fcf252fSNeel Natu int exception_pending; /* (i) exception pending */ 105c9c75df4SNeel Natu int exc_vector; /* (x) exception collateral */ 106c9c75df4SNeel Natu int exc_errcode_valid; 107c9c75df4SNeel Natu uint32_t exc_errcode; 1085fcf252fSNeel Natu struct savefpu *guestfpu; /* (a,i) guest fpu state */ 1095fcf252fSNeel Natu uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 1105fcf252fSNeel Natu void *stats; /* (a,i) statistics */ 1115fcf252fSNeel Natu struct vm_exit exitinfo; /* (x) exit reason and collateral */ 112*d087a399SNeel Natu uint64_t nextrip; /* (x) next instruction to execute */ 113366f6083SPeter Grehan }; 114366f6083SPeter Grehan 1155fcf252fSNeel Natu #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) 116f76fc5d4SNeel Natu #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 117f76fc5d4SNeel Natu #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 118f76fc5d4SNeel Natu #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 119318224bbSNeel Natu #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) 12075dd3366SNeel Natu 121318224bbSNeel Natu struct mem_seg { 122318224bbSNeel Natu vm_paddr_t gpa; 123318224bbSNeel Natu size_t len; 124318224bbSNeel Natu boolean_t wired; 125318224bbSNeel Natu vm_object_t object; 126318224bbSNeel Natu }; 127366f6083SPeter Grehan #define VM_MAX_MEMORY_SEGMENTS 2 128366f6083SPeter Grehan 129366f6083SPeter Grehan /* 1305fcf252fSNeel Natu * Initialization: 1315fcf252fSNeel Natu * (o) initialized the first time the VM is created 1325fcf252fSNeel Natu * (i) initialized when VM is created and when it is reinitialized 1335fcf252fSNeel Natu * (x) initialized before use 134366f6083SPeter Grehan */ 1355fcf252fSNeel Natu struct vm { 1365fcf252fSNeel Natu void *cookie; /* (i) cpu-specific data */ 1375fcf252fSNeel Natu void *iommu; /* (x) iommu-specific data */ 1385fcf252fSNeel Natu struct vhpet *vhpet; /* (i) virtual HPET */ 1395fcf252fSNeel Natu struct vioapic *vioapic; /* (i) virtual ioapic */ 1405fcf252fSNeel Natu struct vatpic *vatpic; /* (i) virtual atpic */ 1415fcf252fSNeel Natu struct vatpit *vatpit; /* (i) virtual atpit */ 142160ef77aSNeel Natu struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ 1430dafa5cdSNeel Natu struct vrtc *vrtc; /* (o) virtual RTC */ 1445fcf252fSNeel Natu volatile cpuset_t active_cpus; /* (i) active vcpus */ 1455fcf252fSNeel Natu int suspend; /* (i) stop VM execution */ 1465fcf252fSNeel Natu volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 1475fcf252fSNeel Natu volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 1485fcf252fSNeel Natu cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */ 1495fcf252fSNeel Natu cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */ 1505fcf252fSNeel Natu void *rendezvous_arg; /* (x) rendezvous func/arg */ 1515b8a8cd1SNeel Natu vm_rendezvous_func_t rendezvous_func; 1525fcf252fSNeel Natu struct mtx rendezvous_mtx; /* (o) rendezvous lock */ 1535fcf252fSNeel Natu int num_mem_segs; /* (o) guest memory segments */ 1545fcf252fSNeel Natu struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS]; 1555fcf252fSNeel Natu struct vmspace *vmspace; /* (o) guest's address space */ 1565fcf252fSNeel Natu char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ 1575fcf252fSNeel Natu struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ 158366f6083SPeter Grehan }; 159366f6083SPeter Grehan 160d5408b1dSNeel Natu static int vmm_initialized; 161d5408b1dSNeel Natu 162366f6083SPeter Grehan static struct vmm_ops *ops; 163add611fdSNeel Natu #define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0) 164366f6083SPeter Grehan #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0) 16563e62d39SJohn Baldwin #define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0) 166366f6083SPeter Grehan 167318224bbSNeel Natu #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL) 168b15a09c0SNeel Natu #define VMRUN(vmi, vcpu, rip, pmap, rptr, sptr) \ 169b15a09c0SNeel Natu (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, rptr, sptr) : ENXIO) 170366f6083SPeter Grehan #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL) 171318224bbSNeel Natu #define VMSPACE_ALLOC(min, max) \ 172318224bbSNeel Natu (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL) 173318224bbSNeel Natu #define VMSPACE_FREE(vmspace) \ 174318224bbSNeel Natu (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO) 175366f6083SPeter Grehan #define VMGETREG(vmi, vcpu, num, retval) \ 176366f6083SPeter Grehan (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO) 177366f6083SPeter Grehan #define VMSETREG(vmi, vcpu, num, val) \ 178366f6083SPeter Grehan (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO) 179366f6083SPeter Grehan #define VMGETDESC(vmi, vcpu, num, desc) \ 180366f6083SPeter Grehan (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO) 181366f6083SPeter Grehan #define VMSETDESC(vmi, vcpu, num, desc) \ 182366f6083SPeter Grehan (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO) 183366f6083SPeter Grehan #define VMGETCAP(vmi, vcpu, num, retval) \ 184366f6083SPeter Grehan (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO) 185366f6083SPeter Grehan #define VMSETCAP(vmi, vcpu, num, val) \ 186366f6083SPeter Grehan (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO) 187de5ea6b6SNeel Natu #define VLAPIC_INIT(vmi, vcpu) \ 188de5ea6b6SNeel Natu (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL) 189de5ea6b6SNeel Natu #define VLAPIC_CLEANUP(vmi, vlapic) \ 190de5ea6b6SNeel Natu (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL) 191366f6083SPeter Grehan 192014a52f3SNeel Natu #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 193014a52f3SNeel Natu #define fpu_stop_emulating() clts() 194366f6083SPeter Grehan 195366f6083SPeter Grehan static MALLOC_DEFINE(M_VM, "vm", "vm"); 196366f6083SPeter Grehan 197366f6083SPeter Grehan /* statistics */ 19861592433SNeel Natu static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 199366f6083SPeter Grehan 200add611fdSNeel Natu SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); 201add611fdSNeel Natu 202055fc2cbSNeel Natu /* 203055fc2cbSNeel Natu * Halt the guest if all vcpus are executing a HLT instruction with 204055fc2cbSNeel Natu * interrupts disabled. 205055fc2cbSNeel Natu */ 206055fc2cbSNeel Natu static int halt_detection_enabled = 1; 207055fc2cbSNeel Natu SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN, 208055fc2cbSNeel Natu &halt_detection_enabled, 0, 209055fc2cbSNeel Natu "Halt VM if all vcpus execute HLT with interrupts disabled"); 210055fc2cbSNeel Natu 211add611fdSNeel Natu static int vmm_ipinum; 212add611fdSNeel Natu SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, 213add611fdSNeel Natu "IPI vector used for vcpu notifications"); 214add611fdSNeel Natu 215b0538143SNeel Natu static int trace_guest_exceptions; 216b0538143SNeel Natu SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN, 217b0538143SNeel Natu &trace_guest_exceptions, 0, 218b0538143SNeel Natu "Trap into hypervisor on all guest exceptions and reflect them back"); 219b0538143SNeel Natu 220366f6083SPeter Grehan static void 2215fcf252fSNeel Natu vcpu_cleanup(struct vm *vm, int i, bool destroy) 222366f6083SPeter Grehan { 223de5ea6b6SNeel Natu struct vcpu *vcpu = &vm->vcpu[i]; 224de5ea6b6SNeel Natu 225de5ea6b6SNeel Natu VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 2265fcf252fSNeel Natu if (destroy) { 227366f6083SPeter Grehan vmm_stat_free(vcpu->stats); 22838f1b189SPeter Grehan fpu_save_area_free(vcpu->guestfpu); 229366f6083SPeter Grehan } 2305fcf252fSNeel Natu } 231366f6083SPeter Grehan 232366f6083SPeter Grehan static void 2335fcf252fSNeel Natu vcpu_init(struct vm *vm, int vcpu_id, bool create) 234366f6083SPeter Grehan { 235366f6083SPeter Grehan struct vcpu *vcpu; 236366f6083SPeter Grehan 2375fcf252fSNeel Natu KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU, 2385fcf252fSNeel Natu ("vcpu_init: invalid vcpu %d", vcpu_id)); 2395fcf252fSNeel Natu 240366f6083SPeter Grehan vcpu = &vm->vcpu[vcpu_id]; 241366f6083SPeter Grehan 2425fcf252fSNeel Natu if (create) { 2435fcf252fSNeel Natu KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already " 2445fcf252fSNeel Natu "initialized", vcpu_id)); 24575dd3366SNeel Natu vcpu_lock_init(vcpu); 2465fcf252fSNeel Natu vcpu->state = VCPU_IDLE; 24775dd3366SNeel Natu vcpu->hostcpu = NOCPU; 2485fcf252fSNeel Natu vcpu->guestfpu = fpu_save_area_alloc(); 2495fcf252fSNeel Natu vcpu->stats = vmm_stat_alloc(); 2505fcf252fSNeel Natu } 2515fcf252fSNeel Natu 252de5ea6b6SNeel Natu vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 25352e5c8a2SNeel Natu vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 254091d4532SNeel Natu vcpu->exitintinfo = 0; 2555fcf252fSNeel Natu vcpu->nmi_pending = 0; 2565fcf252fSNeel Natu vcpu->extint_pending = 0; 2575fcf252fSNeel Natu vcpu->exception_pending = 0; 258abb023fbSJohn Baldwin vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 25938f1b189SPeter Grehan fpu_save_area_reset(vcpu->guestfpu); 2605fcf252fSNeel Natu vmm_stat_init(vcpu->stats); 261366f6083SPeter Grehan } 262366f6083SPeter Grehan 263b0538143SNeel Natu int 264b0538143SNeel Natu vcpu_trace_exceptions(struct vm *vm, int vcpuid) 265b0538143SNeel Natu { 266b0538143SNeel Natu 267b0538143SNeel Natu return (trace_guest_exceptions); 268b0538143SNeel Natu } 269b0538143SNeel Natu 27098ed632cSNeel Natu struct vm_exit * 27198ed632cSNeel Natu vm_exitinfo(struct vm *vm, int cpuid) 27298ed632cSNeel Natu { 27398ed632cSNeel Natu struct vcpu *vcpu; 27498ed632cSNeel Natu 27598ed632cSNeel Natu if (cpuid < 0 || cpuid >= VM_MAXCPU) 27698ed632cSNeel Natu panic("vm_exitinfo: invalid cpuid %d", cpuid); 27798ed632cSNeel Natu 27898ed632cSNeel Natu vcpu = &vm->vcpu[cpuid]; 27998ed632cSNeel Natu 28098ed632cSNeel Natu return (&vcpu->exitinfo); 28198ed632cSNeel Natu } 28298ed632cSNeel Natu 28363e62d39SJohn Baldwin static void 28463e62d39SJohn Baldwin vmm_resume(void) 28563e62d39SJohn Baldwin { 28663e62d39SJohn Baldwin VMM_RESUME(); 28763e62d39SJohn Baldwin } 28863e62d39SJohn Baldwin 289366f6083SPeter Grehan static int 290366f6083SPeter Grehan vmm_init(void) 291366f6083SPeter Grehan { 292366f6083SPeter Grehan int error; 293366f6083SPeter Grehan 294b01c2033SNeel Natu vmm_host_state_init(); 295add611fdSNeel Natu 296add611fdSNeel Natu vmm_ipinum = vmm_ipi_alloc(); 297add611fdSNeel Natu if (vmm_ipinum == 0) 298add611fdSNeel Natu vmm_ipinum = IPI_AST; 299366f6083SPeter Grehan 300366f6083SPeter Grehan error = vmm_mem_init(); 301366f6083SPeter Grehan if (error) 302366f6083SPeter Grehan return (error); 303366f6083SPeter Grehan 304366f6083SPeter Grehan if (vmm_is_intel()) 305366f6083SPeter Grehan ops = &vmm_ops_intel; 306366f6083SPeter Grehan else if (vmm_is_amd()) 307366f6083SPeter Grehan ops = &vmm_ops_amd; 308366f6083SPeter Grehan else 309366f6083SPeter Grehan return (ENXIO); 310366f6083SPeter Grehan 31163e62d39SJohn Baldwin vmm_resume_p = vmm_resume; 312366f6083SPeter Grehan 313add611fdSNeel Natu return (VMM_INIT(vmm_ipinum)); 314366f6083SPeter Grehan } 315366f6083SPeter Grehan 316366f6083SPeter Grehan static int 317366f6083SPeter Grehan vmm_handler(module_t mod, int what, void *arg) 318366f6083SPeter Grehan { 319366f6083SPeter Grehan int error; 320366f6083SPeter Grehan 321366f6083SPeter Grehan switch (what) { 322366f6083SPeter Grehan case MOD_LOAD: 323366f6083SPeter Grehan vmmdev_init(); 32451f45d01SNeel Natu if (ppt_avail_devices() > 0) 325366f6083SPeter Grehan iommu_init(); 326366f6083SPeter Grehan error = vmm_init(); 327d5408b1dSNeel Natu if (error == 0) 328d5408b1dSNeel Natu vmm_initialized = 1; 329366f6083SPeter Grehan break; 330366f6083SPeter Grehan case MOD_UNLOAD: 331cdc5b9e7SNeel Natu error = vmmdev_cleanup(); 332cdc5b9e7SNeel Natu if (error == 0) { 33363e62d39SJohn Baldwin vmm_resume_p = NULL; 334366f6083SPeter Grehan iommu_cleanup(); 335add611fdSNeel Natu if (vmm_ipinum != IPI_AST) 336add611fdSNeel Natu vmm_ipi_free(vmm_ipinum); 337366f6083SPeter Grehan error = VMM_CLEANUP(); 33881ef6611SPeter Grehan /* 33981ef6611SPeter Grehan * Something bad happened - prevent new 34081ef6611SPeter Grehan * VMs from being created 34181ef6611SPeter Grehan */ 34281ef6611SPeter Grehan if (error) 343d5408b1dSNeel Natu vmm_initialized = 0; 34481ef6611SPeter Grehan } 345366f6083SPeter Grehan break; 346366f6083SPeter Grehan default: 347366f6083SPeter Grehan error = 0; 348366f6083SPeter Grehan break; 349366f6083SPeter Grehan } 350366f6083SPeter Grehan return (error); 351366f6083SPeter Grehan } 352366f6083SPeter Grehan 353366f6083SPeter Grehan static moduledata_t vmm_kmod = { 354366f6083SPeter Grehan "vmm", 355366f6083SPeter Grehan vmm_handler, 356366f6083SPeter Grehan NULL 357366f6083SPeter Grehan }; 358366f6083SPeter Grehan 359366f6083SPeter Grehan /* 360e3f0800bSNeel Natu * vmm initialization has the following dependencies: 361e3f0800bSNeel Natu * 362e3f0800bSNeel Natu * - iommu initialization must happen after the pci passthru driver has had 363e3f0800bSNeel Natu * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE). 364e3f0800bSNeel Natu * 365e3f0800bSNeel Natu * - VT-x initialization requires smp_rendezvous() and therefore must happen 366e3f0800bSNeel Natu * after SMP is fully functional (after SI_SUB_SMP). 367366f6083SPeter Grehan */ 368e3f0800bSNeel Natu DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 369366f6083SPeter Grehan MODULE_VERSION(vmm, 1); 370366f6083SPeter Grehan 3715fcf252fSNeel Natu static void 3725fcf252fSNeel Natu vm_init(struct vm *vm, bool create) 3735fcf252fSNeel Natu { 3745fcf252fSNeel Natu int i; 3755fcf252fSNeel Natu 3765fcf252fSNeel Natu vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace)); 3775fcf252fSNeel Natu vm->iommu = NULL; 3785fcf252fSNeel Natu vm->vioapic = vioapic_init(vm); 3795fcf252fSNeel Natu vm->vhpet = vhpet_init(vm); 3805fcf252fSNeel Natu vm->vatpic = vatpic_init(vm); 3815fcf252fSNeel Natu vm->vatpit = vatpit_init(vm); 382160ef77aSNeel Natu vm->vpmtmr = vpmtmr_init(vm); 3830dafa5cdSNeel Natu if (create) 3840dafa5cdSNeel Natu vm->vrtc = vrtc_init(vm); 3855fcf252fSNeel Natu 3865fcf252fSNeel Natu CPU_ZERO(&vm->active_cpus); 3875fcf252fSNeel Natu 3885fcf252fSNeel Natu vm->suspend = 0; 3895fcf252fSNeel Natu CPU_ZERO(&vm->suspended_cpus); 3905fcf252fSNeel Natu 3915fcf252fSNeel Natu for (i = 0; i < VM_MAXCPU; i++) 3925fcf252fSNeel Natu vcpu_init(vm, i, create); 3935fcf252fSNeel Natu } 3945fcf252fSNeel Natu 395d5408b1dSNeel Natu int 396d5408b1dSNeel Natu vm_create(const char *name, struct vm **retvm) 397366f6083SPeter Grehan { 398366f6083SPeter Grehan struct vm *vm; 399318224bbSNeel Natu struct vmspace *vmspace; 400366f6083SPeter Grehan 401d5408b1dSNeel Natu /* 402d5408b1dSNeel Natu * If vmm.ko could not be successfully initialized then don't attempt 403d5408b1dSNeel Natu * to create the virtual machine. 404d5408b1dSNeel Natu */ 405d5408b1dSNeel Natu if (!vmm_initialized) 406d5408b1dSNeel Natu return (ENXIO); 407d5408b1dSNeel Natu 408366f6083SPeter Grehan if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) 409d5408b1dSNeel Natu return (EINVAL); 410366f6083SPeter Grehan 411526c8885SPeter Grehan vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS); 412318224bbSNeel Natu if (vmspace == NULL) 413318224bbSNeel Natu return (ENOMEM); 414318224bbSNeel Natu 415366f6083SPeter Grehan vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 416366f6083SPeter Grehan strcpy(vm->name, name); 4175fcf252fSNeel Natu vm->num_mem_segs = 0; 41888c4b8d1SNeel Natu vm->vmspace = vmspace; 4195b8a8cd1SNeel Natu mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); 420366f6083SPeter Grehan 4215fcf252fSNeel Natu vm_init(vm, true); 422366f6083SPeter Grehan 423d5408b1dSNeel Natu *retvm = vm; 424d5408b1dSNeel Natu return (0); 425366f6083SPeter Grehan } 426366f6083SPeter Grehan 427f7d51510SNeel Natu static void 428318224bbSNeel Natu vm_free_mem_seg(struct vm *vm, struct mem_seg *seg) 429f7d51510SNeel Natu { 4307ce04d0aSNeel Natu 431318224bbSNeel Natu if (seg->object != NULL) 432318224bbSNeel Natu vmm_mem_free(vm->vmspace, seg->gpa, seg->len); 433f7d51510SNeel Natu 434318224bbSNeel Natu bzero(seg, sizeof(*seg)); 435f7d51510SNeel Natu } 436f7d51510SNeel Natu 4375fcf252fSNeel Natu static void 4385fcf252fSNeel Natu vm_cleanup(struct vm *vm, bool destroy) 439366f6083SPeter Grehan { 440366f6083SPeter Grehan int i; 441366f6083SPeter Grehan 442366f6083SPeter Grehan ppt_unassign_all(vm); 443366f6083SPeter Grehan 444318224bbSNeel Natu if (vm->iommu != NULL) 445318224bbSNeel Natu iommu_destroy_domain(vm->iommu); 446318224bbSNeel Natu 4470dafa5cdSNeel Natu if (destroy) 4480dafa5cdSNeel Natu vrtc_cleanup(vm->vrtc); 4490dafa5cdSNeel Natu else 4500dafa5cdSNeel Natu vrtc_reset(vm->vrtc); 451160ef77aSNeel Natu vpmtmr_cleanup(vm->vpmtmr); 452e883c9bbSTycho Nightingale vatpit_cleanup(vm->vatpit); 45308e3ff32SNeel Natu vhpet_cleanup(vm->vhpet); 454762fd208STycho Nightingale vatpic_cleanup(vm->vatpic); 45508e3ff32SNeel Natu vioapic_cleanup(vm->vioapic); 45608e3ff32SNeel Natu 4575fcf252fSNeel Natu for (i = 0; i < VM_MAXCPU; i++) 4585fcf252fSNeel Natu vcpu_cleanup(vm, i, destroy); 4595fcf252fSNeel Natu 4605fcf252fSNeel Natu VMCLEANUP(vm->cookie); 4615fcf252fSNeel Natu 4625fcf252fSNeel Natu if (destroy) { 463366f6083SPeter Grehan for (i = 0; i < vm->num_mem_segs; i++) 464f7d51510SNeel Natu vm_free_mem_seg(vm, &vm->mem_segs[i]); 465f7d51510SNeel Natu 466f7d51510SNeel Natu vm->num_mem_segs = 0; 467366f6083SPeter Grehan 468318224bbSNeel Natu VMSPACE_FREE(vm->vmspace); 4695fcf252fSNeel Natu vm->vmspace = NULL; 4705fcf252fSNeel Natu } 4715fcf252fSNeel Natu } 472366f6083SPeter Grehan 4735fcf252fSNeel Natu void 4745fcf252fSNeel Natu vm_destroy(struct vm *vm) 4755fcf252fSNeel Natu { 4765fcf252fSNeel Natu vm_cleanup(vm, true); 477366f6083SPeter Grehan free(vm, M_VM); 478366f6083SPeter Grehan } 479366f6083SPeter Grehan 4805fcf252fSNeel Natu int 4815fcf252fSNeel Natu vm_reinit(struct vm *vm) 4825fcf252fSNeel Natu { 4835fcf252fSNeel Natu int error; 4845fcf252fSNeel Natu 4855fcf252fSNeel Natu /* 4865fcf252fSNeel Natu * A virtual machine can be reset only if all vcpus are suspended. 4875fcf252fSNeel Natu */ 4885fcf252fSNeel Natu if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 4895fcf252fSNeel Natu vm_cleanup(vm, false); 4905fcf252fSNeel Natu vm_init(vm, false); 4915fcf252fSNeel Natu error = 0; 4925fcf252fSNeel Natu } else { 4935fcf252fSNeel Natu error = EBUSY; 4945fcf252fSNeel Natu } 4955fcf252fSNeel Natu 4965fcf252fSNeel Natu return (error); 4975fcf252fSNeel Natu } 4985fcf252fSNeel Natu 499366f6083SPeter Grehan const char * 500366f6083SPeter Grehan vm_name(struct vm *vm) 501366f6083SPeter Grehan { 502366f6083SPeter Grehan return (vm->name); 503366f6083SPeter Grehan } 504366f6083SPeter Grehan 505366f6083SPeter Grehan int 506366f6083SPeter Grehan vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 507366f6083SPeter Grehan { 508318224bbSNeel Natu vm_object_t obj; 509366f6083SPeter Grehan 510318224bbSNeel Natu if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 511318224bbSNeel Natu return (ENOMEM); 512318224bbSNeel Natu else 513318224bbSNeel Natu return (0); 514366f6083SPeter Grehan } 515366f6083SPeter Grehan 516366f6083SPeter Grehan int 517366f6083SPeter Grehan vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 518366f6083SPeter Grehan { 519366f6083SPeter Grehan 520318224bbSNeel Natu vmm_mmio_free(vm->vmspace, gpa, len); 521318224bbSNeel Natu return (0); 522366f6083SPeter Grehan } 523366f6083SPeter Grehan 524318224bbSNeel Natu boolean_t 525318224bbSNeel Natu vm_mem_allocated(struct vm *vm, vm_paddr_t gpa) 526366f6083SPeter Grehan { 527341f19c9SNeel Natu int i; 528341f19c9SNeel Natu vm_paddr_t gpabase, gpalimit; 529341f19c9SNeel Natu 530341f19c9SNeel Natu for (i = 0; i < vm->num_mem_segs; i++) { 531341f19c9SNeel Natu gpabase = vm->mem_segs[i].gpa; 532341f19c9SNeel Natu gpalimit = gpabase + vm->mem_segs[i].len; 533341f19c9SNeel Natu if (gpa >= gpabase && gpa < gpalimit) 534318224bbSNeel Natu return (TRUE); /* 'gpa' is regular memory */ 535341f19c9SNeel Natu } 536341f19c9SNeel Natu 537318224bbSNeel Natu if (ppt_is_mmio(vm, gpa)) 538318224bbSNeel Natu return (TRUE); /* 'gpa' is pci passthru mmio */ 539318224bbSNeel Natu 540318224bbSNeel Natu return (FALSE); 541341f19c9SNeel Natu } 542341f19c9SNeel Natu 543341f19c9SNeel Natu int 544341f19c9SNeel Natu vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len) 545341f19c9SNeel Natu { 546318224bbSNeel Natu int available, allocated; 547318224bbSNeel Natu struct mem_seg *seg; 548318224bbSNeel Natu vm_object_t object; 549318224bbSNeel Natu vm_paddr_t g; 550366f6083SPeter Grehan 551341f19c9SNeel Natu if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0) 552341f19c9SNeel Natu return (EINVAL); 553341f19c9SNeel Natu 554341f19c9SNeel Natu available = allocated = 0; 555341f19c9SNeel Natu g = gpa; 556341f19c9SNeel Natu while (g < gpa + len) { 557318224bbSNeel Natu if (vm_mem_allocated(vm, g)) 558341f19c9SNeel Natu allocated++; 559318224bbSNeel Natu else 560318224bbSNeel Natu available++; 561341f19c9SNeel Natu 562341f19c9SNeel Natu g += PAGE_SIZE; 563341f19c9SNeel Natu } 564341f19c9SNeel Natu 565366f6083SPeter Grehan /* 566341f19c9SNeel Natu * If there are some allocated and some available pages in the address 567341f19c9SNeel Natu * range then it is an error. 568366f6083SPeter Grehan */ 569341f19c9SNeel Natu if (allocated && available) 570341f19c9SNeel Natu return (EINVAL); 571341f19c9SNeel Natu 572341f19c9SNeel Natu /* 573341f19c9SNeel Natu * If the entire address range being requested has already been 574341f19c9SNeel Natu * allocated then there isn't anything more to do. 575341f19c9SNeel Natu */ 576341f19c9SNeel Natu if (allocated && available == 0) 577341f19c9SNeel Natu return (0); 578366f6083SPeter Grehan 579366f6083SPeter Grehan if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS) 580366f6083SPeter Grehan return (E2BIG); 581366f6083SPeter Grehan 582f7d51510SNeel Natu seg = &vm->mem_segs[vm->num_mem_segs]; 583366f6083SPeter Grehan 584318224bbSNeel Natu if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL) 585318224bbSNeel Natu return (ENOMEM); 586318224bbSNeel Natu 587f7d51510SNeel Natu seg->gpa = gpa; 588318224bbSNeel Natu seg->len = len; 589318224bbSNeel Natu seg->object = object; 590318224bbSNeel Natu seg->wired = FALSE; 5917ce04d0aSNeel Natu 592366f6083SPeter Grehan vm->num_mem_segs++; 593341f19c9SNeel Natu 594366f6083SPeter Grehan return (0); 595366f6083SPeter Grehan } 596366f6083SPeter Grehan 597477867a0SNeel Natu static vm_paddr_t 598477867a0SNeel Natu vm_maxmem(struct vm *vm) 599477867a0SNeel Natu { 600477867a0SNeel Natu int i; 601477867a0SNeel Natu vm_paddr_t gpa, maxmem; 602477867a0SNeel Natu 603477867a0SNeel Natu maxmem = 0; 604477867a0SNeel Natu for (i = 0; i < vm->num_mem_segs; i++) { 605477867a0SNeel Natu gpa = vm->mem_segs[i].gpa + vm->mem_segs[i].len; 606477867a0SNeel Natu if (gpa > maxmem) 607477867a0SNeel Natu maxmem = gpa; 608477867a0SNeel Natu } 609477867a0SNeel Natu return (maxmem); 610477867a0SNeel Natu } 611477867a0SNeel Natu 612318224bbSNeel Natu static void 613318224bbSNeel Natu vm_gpa_unwire(struct vm *vm) 614366f6083SPeter Grehan { 615318224bbSNeel Natu int i, rv; 616318224bbSNeel Natu struct mem_seg *seg; 6174db4fb2cSNeel Natu 618318224bbSNeel Natu for (i = 0; i < vm->num_mem_segs; i++) { 619318224bbSNeel Natu seg = &vm->mem_segs[i]; 620318224bbSNeel Natu if (!seg->wired) 621318224bbSNeel Natu continue; 622366f6083SPeter Grehan 623318224bbSNeel Natu rv = vm_map_unwire(&vm->vmspace->vm_map, 624318224bbSNeel Natu seg->gpa, seg->gpa + seg->len, 625318224bbSNeel Natu VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 626318224bbSNeel Natu KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment " 627318224bbSNeel Natu "%#lx/%ld could not be unwired: %d", 628318224bbSNeel Natu vm_name(vm), seg->gpa, seg->len, rv)); 629318224bbSNeel Natu 630318224bbSNeel Natu seg->wired = FALSE; 631318224bbSNeel Natu } 632318224bbSNeel Natu } 633318224bbSNeel Natu 634318224bbSNeel Natu static int 635318224bbSNeel Natu vm_gpa_wire(struct vm *vm) 636318224bbSNeel Natu { 637318224bbSNeel Natu int i, rv; 638318224bbSNeel Natu struct mem_seg *seg; 639318224bbSNeel Natu 640318224bbSNeel Natu for (i = 0; i < vm->num_mem_segs; i++) { 641318224bbSNeel Natu seg = &vm->mem_segs[i]; 642318224bbSNeel Natu if (seg->wired) 643318224bbSNeel Natu continue; 644318224bbSNeel Natu 645318224bbSNeel Natu /* XXX rlimits? */ 646318224bbSNeel Natu rv = vm_map_wire(&vm->vmspace->vm_map, 647318224bbSNeel Natu seg->gpa, seg->gpa + seg->len, 648318224bbSNeel Natu VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 649318224bbSNeel Natu if (rv != KERN_SUCCESS) 650318224bbSNeel Natu break; 651318224bbSNeel Natu 652318224bbSNeel Natu seg->wired = TRUE; 653318224bbSNeel Natu } 654318224bbSNeel Natu 655318224bbSNeel Natu if (i < vm->num_mem_segs) { 656318224bbSNeel Natu /* 657318224bbSNeel Natu * Undo the wiring before returning an error. 658318224bbSNeel Natu */ 659318224bbSNeel Natu vm_gpa_unwire(vm); 660318224bbSNeel Natu return (EAGAIN); 661318224bbSNeel Natu } 662318224bbSNeel Natu 663318224bbSNeel Natu return (0); 664318224bbSNeel Natu } 665318224bbSNeel Natu 666318224bbSNeel Natu static void 667318224bbSNeel Natu vm_iommu_modify(struct vm *vm, boolean_t map) 668318224bbSNeel Natu { 669318224bbSNeel Natu int i, sz; 670318224bbSNeel Natu vm_paddr_t gpa, hpa; 671318224bbSNeel Natu struct mem_seg *seg; 672318224bbSNeel Natu void *vp, *cookie, *host_domain; 673318224bbSNeel Natu 674318224bbSNeel Natu sz = PAGE_SIZE; 675318224bbSNeel Natu host_domain = iommu_host_domain(); 676318224bbSNeel Natu 677318224bbSNeel Natu for (i = 0; i < vm->num_mem_segs; i++) { 678318224bbSNeel Natu seg = &vm->mem_segs[i]; 679318224bbSNeel Natu KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired", 680318224bbSNeel Natu vm_name(vm), seg->gpa, seg->len)); 681318224bbSNeel Natu 682318224bbSNeel Natu gpa = seg->gpa; 683318224bbSNeel Natu while (gpa < seg->gpa + seg->len) { 684318224bbSNeel Natu vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE, 685318224bbSNeel Natu &cookie); 686318224bbSNeel Natu KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx", 687318224bbSNeel Natu vm_name(vm), gpa)); 688318224bbSNeel Natu 689318224bbSNeel Natu vm_gpa_release(cookie); 690318224bbSNeel Natu 691318224bbSNeel Natu hpa = DMAP_TO_PHYS((uintptr_t)vp); 692318224bbSNeel Natu if (map) { 693318224bbSNeel Natu iommu_create_mapping(vm->iommu, gpa, hpa, sz); 694318224bbSNeel Natu iommu_remove_mapping(host_domain, hpa, sz); 695318224bbSNeel Natu } else { 696318224bbSNeel Natu iommu_remove_mapping(vm->iommu, gpa, sz); 697318224bbSNeel Natu iommu_create_mapping(host_domain, hpa, hpa, sz); 698318224bbSNeel Natu } 699318224bbSNeel Natu 700318224bbSNeel Natu gpa += PAGE_SIZE; 701318224bbSNeel Natu } 702318224bbSNeel Natu } 703318224bbSNeel Natu 704318224bbSNeel Natu /* 705318224bbSNeel Natu * Invalidate the cached translations associated with the domain 706318224bbSNeel Natu * from which pages were removed. 707318224bbSNeel Natu */ 708318224bbSNeel Natu if (map) 709318224bbSNeel Natu iommu_invalidate_tlb(host_domain); 710318224bbSNeel Natu else 711318224bbSNeel Natu iommu_invalidate_tlb(vm->iommu); 712318224bbSNeel Natu } 713318224bbSNeel Natu 714318224bbSNeel Natu #define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE) 715318224bbSNeel Natu #define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE) 716318224bbSNeel Natu 717318224bbSNeel Natu int 718318224bbSNeel Natu vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) 719318224bbSNeel Natu { 720318224bbSNeel Natu int error; 721318224bbSNeel Natu 722318224bbSNeel Natu error = ppt_unassign_device(vm, bus, slot, func); 723318224bbSNeel Natu if (error) 724318224bbSNeel Natu return (error); 725318224bbSNeel Natu 72651f45d01SNeel Natu if (ppt_assigned_devices(vm) == 0) { 727318224bbSNeel Natu vm_iommu_unmap(vm); 728318224bbSNeel Natu vm_gpa_unwire(vm); 729318224bbSNeel Natu } 730318224bbSNeel Natu return (0); 731318224bbSNeel Natu } 732318224bbSNeel Natu 733318224bbSNeel Natu int 734318224bbSNeel Natu vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) 735318224bbSNeel Natu { 736318224bbSNeel Natu int error; 737318224bbSNeel Natu vm_paddr_t maxaddr; 738318224bbSNeel Natu 739318224bbSNeel Natu /* 740318224bbSNeel Natu * Virtual machines with pci passthru devices get special treatment: 741318224bbSNeel Natu * - the guest physical memory is wired 742318224bbSNeel Natu * - the iommu is programmed to do the 'gpa' to 'hpa' translation 743318224bbSNeel Natu * 744318224bbSNeel Natu * We need to do this before the first pci passthru device is attached. 745318224bbSNeel Natu */ 74651f45d01SNeel Natu if (ppt_assigned_devices(vm) == 0) { 747318224bbSNeel Natu KASSERT(vm->iommu == NULL, 748318224bbSNeel Natu ("vm_assign_pptdev: iommu must be NULL")); 749477867a0SNeel Natu maxaddr = vm_maxmem(vm); 750318224bbSNeel Natu vm->iommu = iommu_create_domain(maxaddr); 751318224bbSNeel Natu 752318224bbSNeel Natu error = vm_gpa_wire(vm); 753318224bbSNeel Natu if (error) 754318224bbSNeel Natu return (error); 755318224bbSNeel Natu 756318224bbSNeel Natu vm_iommu_map(vm); 757318224bbSNeel Natu } 758318224bbSNeel Natu 759318224bbSNeel Natu error = ppt_assign_device(vm, bus, slot, func); 760318224bbSNeel Natu return (error); 761318224bbSNeel Natu } 762318224bbSNeel Natu 763318224bbSNeel Natu void * 764318224bbSNeel Natu vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 765318224bbSNeel Natu void **cookie) 766318224bbSNeel Natu { 767318224bbSNeel Natu int count, pageoff; 768318224bbSNeel Natu vm_page_t m; 769318224bbSNeel Natu 770318224bbSNeel Natu pageoff = gpa & PAGE_MASK; 771318224bbSNeel Natu if (len > PAGE_SIZE - pageoff) 772318224bbSNeel Natu panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); 773318224bbSNeel Natu 774318224bbSNeel Natu count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, 775318224bbSNeel Natu trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); 776318224bbSNeel Natu 777318224bbSNeel Natu if (count == 1) { 778318224bbSNeel Natu *cookie = m; 779318224bbSNeel Natu return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); 780318224bbSNeel Natu } else { 781318224bbSNeel Natu *cookie = NULL; 782318224bbSNeel Natu return (NULL); 783318224bbSNeel Natu } 784318224bbSNeel Natu } 785318224bbSNeel Natu 786318224bbSNeel Natu void 787318224bbSNeel Natu vm_gpa_release(void *cookie) 788318224bbSNeel Natu { 789318224bbSNeel Natu vm_page_t m = cookie; 790318224bbSNeel Natu 791318224bbSNeel Natu vm_page_lock(m); 792318224bbSNeel Natu vm_page_unhold(m); 793318224bbSNeel Natu vm_page_unlock(m); 794366f6083SPeter Grehan } 795366f6083SPeter Grehan 796366f6083SPeter Grehan int 797366f6083SPeter Grehan vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase, 798366f6083SPeter Grehan struct vm_memory_segment *seg) 799366f6083SPeter Grehan { 800366f6083SPeter Grehan int i; 801366f6083SPeter Grehan 802366f6083SPeter Grehan for (i = 0; i < vm->num_mem_segs; i++) { 803366f6083SPeter Grehan if (gpabase == vm->mem_segs[i].gpa) { 804318224bbSNeel Natu seg->gpa = vm->mem_segs[i].gpa; 805318224bbSNeel Natu seg->len = vm->mem_segs[i].len; 806318224bbSNeel Natu seg->wired = vm->mem_segs[i].wired; 807366f6083SPeter Grehan return (0); 808366f6083SPeter Grehan } 809366f6083SPeter Grehan } 810366f6083SPeter Grehan return (-1); 811366f6083SPeter Grehan } 812366f6083SPeter Grehan 813366f6083SPeter Grehan int 814318224bbSNeel Natu vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len, 815318224bbSNeel Natu vm_offset_t *offset, struct vm_object **object) 816318224bbSNeel Natu { 817318224bbSNeel Natu int i; 818318224bbSNeel Natu size_t seg_len; 819318224bbSNeel Natu vm_paddr_t seg_gpa; 820318224bbSNeel Natu vm_object_t seg_obj; 821318224bbSNeel Natu 822318224bbSNeel Natu for (i = 0; i < vm->num_mem_segs; i++) { 823318224bbSNeel Natu if ((seg_obj = vm->mem_segs[i].object) == NULL) 824318224bbSNeel Natu continue; 825318224bbSNeel Natu 826318224bbSNeel Natu seg_gpa = vm->mem_segs[i].gpa; 827318224bbSNeel Natu seg_len = vm->mem_segs[i].len; 828318224bbSNeel Natu 829318224bbSNeel Natu if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) { 830318224bbSNeel Natu *offset = gpa - seg_gpa; 831318224bbSNeel Natu *object = seg_obj; 832318224bbSNeel Natu vm_object_reference(seg_obj); 833318224bbSNeel Natu return (0); 834318224bbSNeel Natu } 835318224bbSNeel Natu } 836318224bbSNeel Natu 837318224bbSNeel Natu return (EINVAL); 838318224bbSNeel Natu } 839318224bbSNeel Natu 840318224bbSNeel Natu int 841366f6083SPeter Grehan vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) 842366f6083SPeter Grehan { 843366f6083SPeter Grehan 844366f6083SPeter Grehan if (vcpu < 0 || vcpu >= VM_MAXCPU) 845366f6083SPeter Grehan return (EINVAL); 846366f6083SPeter Grehan 847366f6083SPeter Grehan if (reg >= VM_REG_LAST) 848366f6083SPeter Grehan return (EINVAL); 849366f6083SPeter Grehan 850366f6083SPeter Grehan return (VMGETREG(vm->cookie, vcpu, reg, retval)); 851366f6083SPeter Grehan } 852366f6083SPeter Grehan 853366f6083SPeter Grehan int 854*d087a399SNeel Natu vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val) 855366f6083SPeter Grehan { 856*d087a399SNeel Natu struct vcpu *vcpu; 857*d087a399SNeel Natu int error; 858366f6083SPeter Grehan 859*d087a399SNeel Natu if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 860366f6083SPeter Grehan return (EINVAL); 861366f6083SPeter Grehan 862366f6083SPeter Grehan if (reg >= VM_REG_LAST) 863366f6083SPeter Grehan return (EINVAL); 864366f6083SPeter Grehan 865*d087a399SNeel Natu error = VMSETREG(vm->cookie, vcpuid, reg, val); 866*d087a399SNeel Natu if (error || reg != VM_REG_GUEST_RIP) 867*d087a399SNeel Natu return (error); 868*d087a399SNeel Natu 869*d087a399SNeel Natu /* Set 'nextrip' to match the value of %rip */ 870*d087a399SNeel Natu VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val); 871*d087a399SNeel Natu vcpu = &vm->vcpu[vcpuid]; 872*d087a399SNeel Natu vcpu->nextrip = val; 873*d087a399SNeel Natu return (0); 874366f6083SPeter Grehan } 875366f6083SPeter Grehan 876366f6083SPeter Grehan static boolean_t 877366f6083SPeter Grehan is_descriptor_table(int reg) 878366f6083SPeter Grehan { 879366f6083SPeter Grehan 880366f6083SPeter Grehan switch (reg) { 881366f6083SPeter Grehan case VM_REG_GUEST_IDTR: 882366f6083SPeter Grehan case VM_REG_GUEST_GDTR: 883366f6083SPeter Grehan return (TRUE); 884366f6083SPeter Grehan default: 885366f6083SPeter Grehan return (FALSE); 886366f6083SPeter Grehan } 887366f6083SPeter Grehan } 888366f6083SPeter Grehan 889366f6083SPeter Grehan static boolean_t 890366f6083SPeter Grehan is_segment_register(int reg) 891366f6083SPeter Grehan { 892366f6083SPeter Grehan 893366f6083SPeter Grehan switch (reg) { 894366f6083SPeter Grehan case VM_REG_GUEST_ES: 895366f6083SPeter Grehan case VM_REG_GUEST_CS: 896366f6083SPeter Grehan case VM_REG_GUEST_SS: 897366f6083SPeter Grehan case VM_REG_GUEST_DS: 898366f6083SPeter Grehan case VM_REG_GUEST_FS: 899366f6083SPeter Grehan case VM_REG_GUEST_GS: 900366f6083SPeter Grehan case VM_REG_GUEST_TR: 901366f6083SPeter Grehan case VM_REG_GUEST_LDTR: 902366f6083SPeter Grehan return (TRUE); 903366f6083SPeter Grehan default: 904366f6083SPeter Grehan return (FALSE); 905366f6083SPeter Grehan } 906366f6083SPeter Grehan } 907366f6083SPeter Grehan 908366f6083SPeter Grehan int 909366f6083SPeter Grehan vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 910366f6083SPeter Grehan struct seg_desc *desc) 911366f6083SPeter Grehan { 912366f6083SPeter Grehan 913366f6083SPeter Grehan if (vcpu < 0 || vcpu >= VM_MAXCPU) 914366f6083SPeter Grehan return (EINVAL); 915366f6083SPeter Grehan 916366f6083SPeter Grehan if (!is_segment_register(reg) && !is_descriptor_table(reg)) 917366f6083SPeter Grehan return (EINVAL); 918366f6083SPeter Grehan 919366f6083SPeter Grehan return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 920366f6083SPeter Grehan } 921366f6083SPeter Grehan 922366f6083SPeter Grehan int 923366f6083SPeter Grehan vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 924366f6083SPeter Grehan struct seg_desc *desc) 925366f6083SPeter Grehan { 926366f6083SPeter Grehan if (vcpu < 0 || vcpu >= VM_MAXCPU) 927366f6083SPeter Grehan return (EINVAL); 928366f6083SPeter Grehan 929366f6083SPeter Grehan if (!is_segment_register(reg) && !is_descriptor_table(reg)) 930366f6083SPeter Grehan return (EINVAL); 931366f6083SPeter Grehan 932366f6083SPeter Grehan return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 933366f6083SPeter Grehan } 934366f6083SPeter Grehan 935366f6083SPeter Grehan static void 936366f6083SPeter Grehan restore_guest_fpustate(struct vcpu *vcpu) 937366f6083SPeter Grehan { 938366f6083SPeter Grehan 93938f1b189SPeter Grehan /* flush host state to the pcb */ 94038f1b189SPeter Grehan fpuexit(curthread); 941bd8572e0SNeel Natu 942bd8572e0SNeel Natu /* restore guest FPU state */ 943366f6083SPeter Grehan fpu_stop_emulating(); 94438f1b189SPeter Grehan fpurestore(vcpu->guestfpu); 945bd8572e0SNeel Natu 946abb023fbSJohn Baldwin /* restore guest XCR0 if XSAVE is enabled in the host */ 947abb023fbSJohn Baldwin if (rcr4() & CR4_XSAVE) 948abb023fbSJohn Baldwin load_xcr(0, vcpu->guest_xcr0); 949abb023fbSJohn Baldwin 950bd8572e0SNeel Natu /* 951bd8572e0SNeel Natu * The FPU is now "dirty" with the guest's state so turn on emulation 952bd8572e0SNeel Natu * to trap any access to the FPU by the host. 953bd8572e0SNeel Natu */ 954bd8572e0SNeel Natu fpu_start_emulating(); 955366f6083SPeter Grehan } 956366f6083SPeter Grehan 957366f6083SPeter Grehan static void 958366f6083SPeter Grehan save_guest_fpustate(struct vcpu *vcpu) 959366f6083SPeter Grehan { 960366f6083SPeter Grehan 961bd8572e0SNeel Natu if ((rcr0() & CR0_TS) == 0) 962bd8572e0SNeel Natu panic("fpu emulation not enabled in host!"); 963bd8572e0SNeel Natu 964abb023fbSJohn Baldwin /* save guest XCR0 and restore host XCR0 */ 965abb023fbSJohn Baldwin if (rcr4() & CR4_XSAVE) { 966abb023fbSJohn Baldwin vcpu->guest_xcr0 = rxcr(0); 967abb023fbSJohn Baldwin load_xcr(0, vmm_get_host_xcr0()); 968abb023fbSJohn Baldwin } 969abb023fbSJohn Baldwin 970bd8572e0SNeel Natu /* save guest FPU state */ 971bd8572e0SNeel Natu fpu_stop_emulating(); 97238f1b189SPeter Grehan fpusave(vcpu->guestfpu); 973366f6083SPeter Grehan fpu_start_emulating(); 974366f6083SPeter Grehan } 975366f6083SPeter Grehan 97661592433SNeel Natu static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 977f76fc5d4SNeel Natu 978318224bbSNeel Natu static int 979f80330a8SNeel Natu vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, 980f80330a8SNeel Natu bool from_idle) 981366f6083SPeter Grehan { 982318224bbSNeel Natu int error; 983366f6083SPeter Grehan 984318224bbSNeel Natu vcpu_assert_locked(vcpu); 985366f6083SPeter Grehan 986f76fc5d4SNeel Natu /* 987f80330a8SNeel Natu * State transitions from the vmmdev_ioctl() must always begin from 988f80330a8SNeel Natu * the VCPU_IDLE state. This guarantees that there is only a single 989f80330a8SNeel Natu * ioctl() operating on a vcpu at any point. 990f80330a8SNeel Natu */ 991f80330a8SNeel Natu if (from_idle) { 992f80330a8SNeel Natu while (vcpu->state != VCPU_IDLE) 993f80330a8SNeel Natu msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); 994f80330a8SNeel Natu } else { 995f80330a8SNeel Natu KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 996f80330a8SNeel Natu "vcpu idle state")); 997f80330a8SNeel Natu } 998f80330a8SNeel Natu 999ef39d7e9SNeel Natu if (vcpu->state == VCPU_RUNNING) { 1000ef39d7e9SNeel Natu KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 1001ef39d7e9SNeel Natu "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 1002ef39d7e9SNeel Natu } else { 1003ef39d7e9SNeel Natu KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 1004ef39d7e9SNeel Natu "vcpu that is not running", vcpu->hostcpu)); 1005ef39d7e9SNeel Natu } 1006ef39d7e9SNeel Natu 1007f80330a8SNeel Natu /* 1008318224bbSNeel Natu * The following state transitions are allowed: 1009318224bbSNeel Natu * IDLE -> FROZEN -> IDLE 1010318224bbSNeel Natu * FROZEN -> RUNNING -> FROZEN 1011318224bbSNeel Natu * FROZEN -> SLEEPING -> FROZEN 1012f76fc5d4SNeel Natu */ 1013318224bbSNeel Natu switch (vcpu->state) { 1014318224bbSNeel Natu case VCPU_IDLE: 1015318224bbSNeel Natu case VCPU_RUNNING: 1016318224bbSNeel Natu case VCPU_SLEEPING: 1017318224bbSNeel Natu error = (newstate != VCPU_FROZEN); 1018318224bbSNeel Natu break; 1019318224bbSNeel Natu case VCPU_FROZEN: 1020318224bbSNeel Natu error = (newstate == VCPU_FROZEN); 1021318224bbSNeel Natu break; 1022318224bbSNeel Natu default: 1023318224bbSNeel Natu error = 1; 1024318224bbSNeel Natu break; 1025318224bbSNeel Natu } 1026318224bbSNeel Natu 1027f80330a8SNeel Natu if (error) 1028f80330a8SNeel Natu return (EBUSY); 1029318224bbSNeel Natu 1030f80330a8SNeel Natu vcpu->state = newstate; 1031ef39d7e9SNeel Natu if (newstate == VCPU_RUNNING) 1032ef39d7e9SNeel Natu vcpu->hostcpu = curcpu; 1033ef39d7e9SNeel Natu else 1034ef39d7e9SNeel Natu vcpu->hostcpu = NOCPU; 1035ef39d7e9SNeel Natu 1036f80330a8SNeel Natu if (newstate == VCPU_IDLE) 1037f80330a8SNeel Natu wakeup(&vcpu->state); 1038f80330a8SNeel Natu 1039f80330a8SNeel Natu return (0); 1040318224bbSNeel Natu } 1041318224bbSNeel Natu 1042318224bbSNeel Natu static void 1043318224bbSNeel Natu vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1044318224bbSNeel Natu { 1045318224bbSNeel Natu int error; 1046318224bbSNeel Natu 1047f80330a8SNeel Natu if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 1048318224bbSNeel Natu panic("Error %d setting state to %d\n", error, newstate); 1049318224bbSNeel Natu } 1050318224bbSNeel Natu 1051318224bbSNeel Natu static void 1052318224bbSNeel Natu vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) 1053318224bbSNeel Natu { 1054318224bbSNeel Natu int error; 1055318224bbSNeel Natu 1056f80330a8SNeel Natu if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) 1057318224bbSNeel Natu panic("Error %d setting state to %d", error, newstate); 1058318224bbSNeel Natu } 1059318224bbSNeel Natu 10605b8a8cd1SNeel Natu static void 10615b8a8cd1SNeel Natu vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func) 10625b8a8cd1SNeel Natu { 10635b8a8cd1SNeel Natu 10645b8a8cd1SNeel Natu KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked")); 10655b8a8cd1SNeel Natu 10665b8a8cd1SNeel Natu /* 10675b8a8cd1SNeel Natu * Update 'rendezvous_func' and execute a write memory barrier to 10685b8a8cd1SNeel Natu * ensure that it is visible across all host cpus. This is not needed 10695b8a8cd1SNeel Natu * for correctness but it does ensure that all the vcpus will notice 10705b8a8cd1SNeel Natu * that the rendezvous is requested immediately. 10715b8a8cd1SNeel Natu */ 10725b8a8cd1SNeel Natu vm->rendezvous_func = func; 10735b8a8cd1SNeel Natu wmb(); 10745b8a8cd1SNeel Natu } 10755b8a8cd1SNeel Natu 10765b8a8cd1SNeel Natu #define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \ 10775b8a8cd1SNeel Natu do { \ 10785b8a8cd1SNeel Natu if (vcpuid >= 0) \ 10795b8a8cd1SNeel Natu VCPU_CTR0(vm, vcpuid, fmt); \ 10805b8a8cd1SNeel Natu else \ 10815b8a8cd1SNeel Natu VM_CTR0(vm, fmt); \ 10825b8a8cd1SNeel Natu } while (0) 10835b8a8cd1SNeel Natu 10845b8a8cd1SNeel Natu static void 10855b8a8cd1SNeel Natu vm_handle_rendezvous(struct vm *vm, int vcpuid) 10865b8a8cd1SNeel Natu { 10875b8a8cd1SNeel Natu 10885b8a8cd1SNeel Natu KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 10895b8a8cd1SNeel Natu ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); 10905b8a8cd1SNeel Natu 10915b8a8cd1SNeel Natu mtx_lock(&vm->rendezvous_mtx); 10925b8a8cd1SNeel Natu while (vm->rendezvous_func != NULL) { 109322d822c6SNeel Natu /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ 109422d822c6SNeel Natu CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); 109522d822c6SNeel Natu 10965b8a8cd1SNeel Natu if (vcpuid != -1 && 109722d822c6SNeel Natu CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && 109822d822c6SNeel Natu !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { 10995b8a8cd1SNeel Natu VCPU_CTR0(vm, vcpuid, "Calling rendezvous func"); 11005b8a8cd1SNeel Natu (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg); 11015b8a8cd1SNeel Natu CPU_SET(vcpuid, &vm->rendezvous_done_cpus); 11025b8a8cd1SNeel Natu } 11035b8a8cd1SNeel Natu if (CPU_CMP(&vm->rendezvous_req_cpus, 11045b8a8cd1SNeel Natu &vm->rendezvous_done_cpus) == 0) { 11055b8a8cd1SNeel Natu VCPU_CTR0(vm, vcpuid, "Rendezvous completed"); 11065b8a8cd1SNeel Natu vm_set_rendezvous_func(vm, NULL); 11075b8a8cd1SNeel Natu wakeup(&vm->rendezvous_func); 11085b8a8cd1SNeel Natu break; 11095b8a8cd1SNeel Natu } 11105b8a8cd1SNeel Natu RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion"); 11115b8a8cd1SNeel Natu mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, 11125b8a8cd1SNeel Natu "vmrndv", 0); 11135b8a8cd1SNeel Natu } 11145b8a8cd1SNeel Natu mtx_unlock(&vm->rendezvous_mtx); 11155b8a8cd1SNeel Natu } 11165b8a8cd1SNeel Natu 1117318224bbSNeel Natu /* 1118318224bbSNeel Natu * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1119318224bbSNeel Natu */ 1120318224bbSNeel Natu static int 1121becd9849SNeel Natu vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) 1122318224bbSNeel Natu { 1123318224bbSNeel Natu struct vcpu *vcpu; 1124c6a0cc2eSNeel Natu const char *wmesg; 11252ce12423SNeel Natu int t, vcpu_halted, vm_halted; 1126e50ce2aaSNeel Natu 1127e50ce2aaSNeel Natu KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1128318224bbSNeel Natu 1129318224bbSNeel Natu vcpu = &vm->vcpu[vcpuid]; 1130e50ce2aaSNeel Natu vcpu_halted = 0; 1131e50ce2aaSNeel Natu vm_halted = 0; 1132318224bbSNeel Natu 1133f76fc5d4SNeel Natu vcpu_lock(vcpu); 1134c6a0cc2eSNeel Natu while (1) { 1135f76fc5d4SNeel Natu /* 1136f76fc5d4SNeel Natu * Do a final check for pending NMI or interrupts before 1137c6a0cc2eSNeel Natu * really putting this thread to sleep. Also check for 1138c6a0cc2eSNeel Natu * software events that would cause this vcpu to wakeup. 1139f76fc5d4SNeel Natu * 1140c6a0cc2eSNeel Natu * These interrupts/events could have happened after the 1141c6a0cc2eSNeel Natu * vcpu returned from VMRUN() and before it acquired the 1142c6a0cc2eSNeel Natu * vcpu lock above. 1143f76fc5d4SNeel Natu */ 1144c6a0cc2eSNeel Natu if (vm->rendezvous_func != NULL || vm->suspend) 1145c6a0cc2eSNeel Natu break; 1146c6a0cc2eSNeel Natu if (vm_nmi_pending(vm, vcpuid)) 1147c6a0cc2eSNeel Natu break; 1148c6a0cc2eSNeel Natu if (!intr_disabled) { 1149c6a0cc2eSNeel Natu if (vm_extint_pending(vm, vcpuid) || 1150c6a0cc2eSNeel Natu vlapic_pending_intr(vcpu->vlapic, NULL)) { 1151c6a0cc2eSNeel Natu break; 1152c6a0cc2eSNeel Natu } 1153c6a0cc2eSNeel Natu } 1154c6a0cc2eSNeel Natu 1155f008d157SNeel Natu /* Don't go to sleep if the vcpu thread needs to yield */ 1156f008d157SNeel Natu if (vcpu_should_yield(vm, vcpuid)) 1157f008d157SNeel Natu break; 1158f008d157SNeel Natu 1159e50ce2aaSNeel Natu /* 1160e50ce2aaSNeel Natu * Some Linux guests implement "halt" by having all vcpus 1161e50ce2aaSNeel Natu * execute HLT with interrupts disabled. 'halted_cpus' keeps 1162e50ce2aaSNeel Natu * track of the vcpus that have entered this state. When all 1163e50ce2aaSNeel Natu * vcpus enter the halted state the virtual machine is halted. 1164e50ce2aaSNeel Natu */ 1165e50ce2aaSNeel Natu if (intr_disabled) { 1166c6a0cc2eSNeel Natu wmesg = "vmhalt"; 1167e50ce2aaSNeel Natu VCPU_CTR0(vm, vcpuid, "Halted"); 1168055fc2cbSNeel Natu if (!vcpu_halted && halt_detection_enabled) { 1169e50ce2aaSNeel Natu vcpu_halted = 1; 1170e50ce2aaSNeel Natu CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1171e50ce2aaSNeel Natu } 1172e50ce2aaSNeel Natu if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1173e50ce2aaSNeel Natu vm_halted = 1; 1174e50ce2aaSNeel Natu break; 1175e50ce2aaSNeel Natu } 1176e50ce2aaSNeel Natu } else { 1177e50ce2aaSNeel Natu wmesg = "vmidle"; 1178e50ce2aaSNeel Natu } 1179c6a0cc2eSNeel Natu 1180f76fc5d4SNeel Natu t = ticks; 1181318224bbSNeel Natu vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1182f008d157SNeel Natu /* 1183f008d157SNeel Natu * XXX msleep_spin() cannot be interrupted by signals so 1184f008d157SNeel Natu * wake up periodically to check pending signals. 1185f008d157SNeel Natu */ 1186f008d157SNeel Natu msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); 1187318224bbSNeel Natu vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1188f76fc5d4SNeel Natu vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); 1189f76fc5d4SNeel Natu } 1190e50ce2aaSNeel Natu 1191e50ce2aaSNeel Natu if (vcpu_halted) 1192e50ce2aaSNeel Natu CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1193e50ce2aaSNeel Natu 1194f76fc5d4SNeel Natu vcpu_unlock(vcpu); 1195f76fc5d4SNeel Natu 1196e50ce2aaSNeel Natu if (vm_halted) 1197e50ce2aaSNeel Natu vm_suspend(vm, VM_SUSPEND_HALT); 1198e50ce2aaSNeel Natu 1199318224bbSNeel Natu return (0); 1200318224bbSNeel Natu } 1201318224bbSNeel Natu 1202318224bbSNeel Natu static int 1203becd9849SNeel Natu vm_handle_paging(struct vm *vm, int vcpuid, bool *retu) 1204318224bbSNeel Natu { 1205318224bbSNeel Natu int rv, ftype; 1206318224bbSNeel Natu struct vm_map *map; 1207318224bbSNeel Natu struct vcpu *vcpu; 1208318224bbSNeel Natu struct vm_exit *vme; 1209318224bbSNeel Natu 1210318224bbSNeel Natu vcpu = &vm->vcpu[vcpuid]; 1211318224bbSNeel Natu vme = &vcpu->exitinfo; 1212318224bbSNeel Natu 1213*d087a399SNeel Natu KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 1214*d087a399SNeel Natu __func__, vme->inst_length)); 1215*d087a399SNeel Natu 1216318224bbSNeel Natu ftype = vme->u.paging.fault_type; 1217318224bbSNeel Natu KASSERT(ftype == VM_PROT_READ || 1218318224bbSNeel Natu ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, 1219318224bbSNeel Natu ("vm_handle_paging: invalid fault_type %d", ftype)); 1220318224bbSNeel Natu 1221318224bbSNeel Natu if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 1222318224bbSNeel Natu rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), 1223318224bbSNeel Natu vme->u.paging.gpa, ftype); 12249d8d8e3eSNeel Natu if (rv == 0) { 12259d8d8e3eSNeel Natu VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx", 12269d8d8e3eSNeel Natu ftype == VM_PROT_READ ? "accessed" : "dirty", 12279d8d8e3eSNeel Natu vme->u.paging.gpa); 1228318224bbSNeel Natu goto done; 1229318224bbSNeel Natu } 12309d8d8e3eSNeel Natu } 1231318224bbSNeel Natu 1232318224bbSNeel Natu map = &vm->vmspace->vm_map; 1233318224bbSNeel Natu rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL); 1234318224bbSNeel Natu 1235513c8d33SNeel Natu VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, " 1236513c8d33SNeel Natu "ftype = %d", rv, vme->u.paging.gpa, ftype); 1237318224bbSNeel Natu 1238318224bbSNeel Natu if (rv != KERN_SUCCESS) 1239318224bbSNeel Natu return (EFAULT); 1240318224bbSNeel Natu done: 1241318224bbSNeel Natu return (0); 1242318224bbSNeel Natu } 1243318224bbSNeel Natu 1244318224bbSNeel Natu static int 1245becd9849SNeel Natu vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu) 1246318224bbSNeel Natu { 1247318224bbSNeel Natu struct vie *vie; 1248318224bbSNeel Natu struct vcpu *vcpu; 1249318224bbSNeel Natu struct vm_exit *vme; 1250e813a873SNeel Natu uint64_t gla, gpa; 1251e813a873SNeel Natu struct vm_guest_paging *paging; 1252565bbb86SNeel Natu mem_region_read_t mread; 1253565bbb86SNeel Natu mem_region_write_t mwrite; 1254f7a9f178SNeel Natu enum vm_cpu_mode cpu_mode; 1255c2a875f9SNeel Natu int cs_d, error, length; 1256318224bbSNeel Natu 1257318224bbSNeel Natu vcpu = &vm->vcpu[vcpuid]; 1258318224bbSNeel Natu vme = &vcpu->exitinfo; 1259318224bbSNeel Natu 1260318224bbSNeel Natu gla = vme->u.inst_emul.gla; 1261318224bbSNeel Natu gpa = vme->u.inst_emul.gpa; 1262f7a9f178SNeel Natu cs_d = vme->u.inst_emul.cs_d; 1263318224bbSNeel Natu vie = &vme->u.inst_emul.vie; 1264e813a873SNeel Natu paging = &vme->u.inst_emul.paging; 1265f7a9f178SNeel Natu cpu_mode = paging->cpu_mode; 1266318224bbSNeel Natu 12679d8d8e3eSNeel Natu VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa); 12689d8d8e3eSNeel Natu 1269318224bbSNeel Natu /* Fetch, decode and emulate the faulting instruction */ 1270c2a875f9SNeel Natu if (vie->num_valid == 0) { 1271c2a875f9SNeel Natu /* 1272c2a875f9SNeel Natu * If the instruction length is not known then assume a 1273c2a875f9SNeel Natu * maximum size instruction. 1274c2a875f9SNeel Natu */ 1275c2a875f9SNeel Natu length = vme->inst_length ? vme->inst_length : VIE_INST_SIZE; 1276e813a873SNeel Natu error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip, 1277c2a875f9SNeel Natu length, vie); 1278c2a875f9SNeel Natu } else { 1279c2a875f9SNeel Natu /* 1280c2a875f9SNeel Natu * The instruction bytes have already been copied into 'vie' 1281c2a875f9SNeel Natu */ 1282c2a875f9SNeel Natu error = 0; 1283c2a875f9SNeel Natu } 1284fd949af6SNeel Natu if (error == 1) 1285fd949af6SNeel Natu return (0); /* Resume guest to handle page fault */ 1286fd949af6SNeel Natu else if (error == -1) 1287318224bbSNeel Natu return (EFAULT); 1288fd949af6SNeel Natu else if (error != 0) 1289fd949af6SNeel Natu panic("%s: vmm_fetch_instruction error %d", __func__, error); 1290318224bbSNeel Natu 1291f7a9f178SNeel Natu if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) 1292318224bbSNeel Natu return (EFAULT); 1293318224bbSNeel Natu 1294a0b78f09SPeter Grehan /* 1295*d087a399SNeel Natu * If the instruction length was not specified then update it now 1296*d087a399SNeel Natu * along with 'nextrip'. 1297a0b78f09SPeter Grehan */ 1298*d087a399SNeel Natu if (vme->inst_length == 0) { 1299a0b78f09SPeter Grehan vme->inst_length = vie->num_processed; 1300*d087a399SNeel Natu vcpu->nextrip += vie->num_processed; 1301*d087a399SNeel Natu } 1302a0b78f09SPeter Grehan 130308e3ff32SNeel Natu /* return to userland unless this is an in-kernel emulated device */ 1304565bbb86SNeel Natu if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1305565bbb86SNeel Natu mread = lapic_mmio_read; 1306565bbb86SNeel Natu mwrite = lapic_mmio_write; 1307565bbb86SNeel Natu } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1308565bbb86SNeel Natu mread = vioapic_mmio_read; 1309565bbb86SNeel Natu mwrite = vioapic_mmio_write; 131008e3ff32SNeel Natu } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 131108e3ff32SNeel Natu mread = vhpet_mmio_read; 131208e3ff32SNeel Natu mwrite = vhpet_mmio_write; 1313565bbb86SNeel Natu } else { 1314becd9849SNeel Natu *retu = true; 1315318224bbSNeel Natu return (0); 1316318224bbSNeel Natu } 1317318224bbSNeel Natu 1318d665d229SNeel Natu error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging, 1319d665d229SNeel Natu mread, mwrite, retu); 1320318224bbSNeel Natu 1321318224bbSNeel Natu return (error); 1322318224bbSNeel Natu } 1323318224bbSNeel Natu 1324b15a09c0SNeel Natu static int 1325b15a09c0SNeel Natu vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu) 1326b15a09c0SNeel Natu { 1327b15a09c0SNeel Natu int i, done; 1328b15a09c0SNeel Natu struct vcpu *vcpu; 1329b15a09c0SNeel Natu 1330b15a09c0SNeel Natu done = 0; 1331b15a09c0SNeel Natu vcpu = &vm->vcpu[vcpuid]; 1332b15a09c0SNeel Natu 1333b15a09c0SNeel Natu CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 1334b15a09c0SNeel Natu 1335b15a09c0SNeel Natu /* 1336b15a09c0SNeel Natu * Wait until all 'active_cpus' have suspended themselves. 1337b15a09c0SNeel Natu * 1338b15a09c0SNeel Natu * Since a VM may be suspended at any time including when one or 1339b15a09c0SNeel Natu * more vcpus are doing a rendezvous we need to call the rendezvous 1340b15a09c0SNeel Natu * handler while we are waiting to prevent a deadlock. 1341b15a09c0SNeel Natu */ 1342b15a09c0SNeel Natu vcpu_lock(vcpu); 1343b15a09c0SNeel Natu while (1) { 1344b15a09c0SNeel Natu if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1345b15a09c0SNeel Natu VCPU_CTR0(vm, vcpuid, "All vcpus suspended"); 1346b15a09c0SNeel Natu break; 1347b15a09c0SNeel Natu } 1348b15a09c0SNeel Natu 1349b15a09c0SNeel Natu if (vm->rendezvous_func == NULL) { 1350b15a09c0SNeel Natu VCPU_CTR0(vm, vcpuid, "Sleeping during suspend"); 1351b15a09c0SNeel Natu vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1352b15a09c0SNeel Natu msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); 1353b15a09c0SNeel Natu vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1354b15a09c0SNeel Natu } else { 1355b15a09c0SNeel Natu VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend"); 1356b15a09c0SNeel Natu vcpu_unlock(vcpu); 1357b15a09c0SNeel Natu vm_handle_rendezvous(vm, vcpuid); 1358b15a09c0SNeel Natu vcpu_lock(vcpu); 1359b15a09c0SNeel Natu } 1360b15a09c0SNeel Natu } 1361b15a09c0SNeel Natu vcpu_unlock(vcpu); 1362b15a09c0SNeel Natu 1363b15a09c0SNeel Natu /* 1364b15a09c0SNeel Natu * Wakeup the other sleeping vcpus and return to userspace. 1365b15a09c0SNeel Natu */ 1366b15a09c0SNeel Natu for (i = 0; i < VM_MAXCPU; i++) { 1367b15a09c0SNeel Natu if (CPU_ISSET(i, &vm->suspended_cpus)) { 1368b15a09c0SNeel Natu vcpu_notify_event(vm, i, false); 1369b15a09c0SNeel Natu } 1370b15a09c0SNeel Natu } 1371b15a09c0SNeel Natu 1372b15a09c0SNeel Natu *retu = true; 1373b15a09c0SNeel Natu return (0); 1374b15a09c0SNeel Natu } 1375b15a09c0SNeel Natu 1376b15a09c0SNeel Natu int 1377f0fdcfe2SNeel Natu vm_suspend(struct vm *vm, enum vm_suspend_how how) 1378b15a09c0SNeel Natu { 1379f0fdcfe2SNeel Natu int i; 1380b15a09c0SNeel Natu 1381f0fdcfe2SNeel Natu if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 1382f0fdcfe2SNeel Natu return (EINVAL); 1383f0fdcfe2SNeel Natu 1384f0fdcfe2SNeel Natu if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { 1385f0fdcfe2SNeel Natu VM_CTR2(vm, "virtual machine already suspended %d/%d", 1386f0fdcfe2SNeel Natu vm->suspend, how); 1387b15a09c0SNeel Natu return (EALREADY); 1388b15a09c0SNeel Natu } 1389f0fdcfe2SNeel Natu 1390f0fdcfe2SNeel Natu VM_CTR1(vm, "virtual machine successfully suspended %d", how); 1391f0fdcfe2SNeel Natu 1392f0fdcfe2SNeel Natu /* 1393f0fdcfe2SNeel Natu * Notify all active vcpus that they are now suspended. 1394f0fdcfe2SNeel Natu */ 1395f0fdcfe2SNeel Natu for (i = 0; i < VM_MAXCPU; i++) { 1396f0fdcfe2SNeel Natu if (CPU_ISSET(i, &vm->active_cpus)) 1397f0fdcfe2SNeel Natu vcpu_notify_event(vm, i, false); 1398f0fdcfe2SNeel Natu } 1399f0fdcfe2SNeel Natu 1400f0fdcfe2SNeel Natu return (0); 1401f0fdcfe2SNeel Natu } 1402f0fdcfe2SNeel Natu 1403f0fdcfe2SNeel Natu void 1404f0fdcfe2SNeel Natu vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip) 1405f0fdcfe2SNeel Natu { 1406f0fdcfe2SNeel Natu struct vm_exit *vmexit; 1407f0fdcfe2SNeel Natu 1408f0fdcfe2SNeel Natu KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, 1409f0fdcfe2SNeel Natu ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); 1410f0fdcfe2SNeel Natu 1411f0fdcfe2SNeel Natu vmexit = vm_exitinfo(vm, vcpuid); 1412f0fdcfe2SNeel Natu vmexit->rip = rip; 1413f0fdcfe2SNeel Natu vmexit->inst_length = 0; 1414f0fdcfe2SNeel Natu vmexit->exitcode = VM_EXITCODE_SUSPENDED; 1415f0fdcfe2SNeel Natu vmexit->u.suspended.how = vm->suspend; 1416b15a09c0SNeel Natu } 1417b15a09c0SNeel Natu 141840487465SNeel Natu void 141940487465SNeel Natu vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip) 142040487465SNeel Natu { 142140487465SNeel Natu struct vm_exit *vmexit; 142240487465SNeel Natu 142340487465SNeel Natu KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress")); 142440487465SNeel Natu 142540487465SNeel Natu vmexit = vm_exitinfo(vm, vcpuid); 142640487465SNeel Natu vmexit->rip = rip; 142740487465SNeel Natu vmexit->inst_length = 0; 142840487465SNeel Natu vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 142940487465SNeel Natu vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1); 143040487465SNeel Natu } 143140487465SNeel Natu 143240487465SNeel Natu void 143340487465SNeel Natu vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip) 143440487465SNeel Natu { 143540487465SNeel Natu struct vm_exit *vmexit; 143640487465SNeel Natu 143740487465SNeel Natu vmexit = vm_exitinfo(vm, vcpuid); 143840487465SNeel Natu vmexit->rip = rip; 143940487465SNeel Natu vmexit->inst_length = 0; 144040487465SNeel Natu vmexit->exitcode = VM_EXITCODE_BOGUS; 144140487465SNeel Natu vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1); 144240487465SNeel Natu } 144340487465SNeel Natu 1444318224bbSNeel Natu int 1445318224bbSNeel Natu vm_run(struct vm *vm, struct vm_run *vmrun) 1446318224bbSNeel Natu { 1447318224bbSNeel Natu int error, vcpuid; 1448318224bbSNeel Natu struct vcpu *vcpu; 1449318224bbSNeel Natu struct pcb *pcb; 1450*d087a399SNeel Natu uint64_t tscval; 1451318224bbSNeel Natu struct vm_exit *vme; 1452becd9849SNeel Natu bool retu, intr_disabled; 1453318224bbSNeel Natu pmap_t pmap; 1454b15a09c0SNeel Natu void *rptr, *sptr; 1455318224bbSNeel Natu 1456318224bbSNeel Natu vcpuid = vmrun->cpuid; 1457318224bbSNeel Natu 1458318224bbSNeel Natu if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1459318224bbSNeel Natu return (EINVAL); 1460318224bbSNeel Natu 146195ebc360SNeel Natu if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 146295ebc360SNeel Natu return (EINVAL); 146395ebc360SNeel Natu 146495ebc360SNeel Natu if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) 146595ebc360SNeel Natu return (EINVAL); 146695ebc360SNeel Natu 1467b15a09c0SNeel Natu rptr = &vm->rendezvous_func; 1468b15a09c0SNeel Natu sptr = &vm->suspend; 1469318224bbSNeel Natu pmap = vmspace_pmap(vm->vmspace); 1470318224bbSNeel Natu vcpu = &vm->vcpu[vcpuid]; 1471318224bbSNeel Natu vme = &vcpu->exitinfo; 1472318224bbSNeel Natu restart: 1473318224bbSNeel Natu critical_enter(); 1474318224bbSNeel Natu 1475318224bbSNeel Natu KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1476318224bbSNeel Natu ("vm_run: absurd pm_active")); 1477318224bbSNeel Natu 1478318224bbSNeel Natu tscval = rdtsc(); 1479318224bbSNeel Natu 1480318224bbSNeel Natu pcb = PCPU_GET(curpcb); 1481318224bbSNeel Natu set_pcb_flags(pcb, PCB_FULL_IRET); 1482318224bbSNeel Natu 1483318224bbSNeel Natu restore_guest_fpustate(vcpu); 1484318224bbSNeel Natu 1485318224bbSNeel Natu vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 1486*d087a399SNeel Natu error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, rptr, sptr); 1487318224bbSNeel Natu vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 1488318224bbSNeel Natu 1489318224bbSNeel Natu save_guest_fpustate(vcpu); 1490318224bbSNeel Natu 1491318224bbSNeel Natu vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 1492318224bbSNeel Natu 1493318224bbSNeel Natu critical_exit(); 1494318224bbSNeel Natu 1495318224bbSNeel Natu if (error == 0) { 1496becd9849SNeel Natu retu = false; 1497*d087a399SNeel Natu vcpu->nextrip = vme->rip + vme->inst_length; 1498318224bbSNeel Natu switch (vme->exitcode) { 1499b15a09c0SNeel Natu case VM_EXITCODE_SUSPENDED: 1500b15a09c0SNeel Natu error = vm_handle_suspend(vm, vcpuid, &retu); 1501b15a09c0SNeel Natu break; 150230b94db8SNeel Natu case VM_EXITCODE_IOAPIC_EOI: 150330b94db8SNeel Natu vioapic_process_eoi(vm, vcpuid, 150430b94db8SNeel Natu vme->u.ioapic_eoi.vector); 150530b94db8SNeel Natu break; 15065b8a8cd1SNeel Natu case VM_EXITCODE_RENDEZVOUS: 15075b8a8cd1SNeel Natu vm_handle_rendezvous(vm, vcpuid); 15085b8a8cd1SNeel Natu error = 0; 15095b8a8cd1SNeel Natu break; 1510318224bbSNeel Natu case VM_EXITCODE_HLT: 1511becd9849SNeel Natu intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 15121c052192SNeel Natu error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); 1513318224bbSNeel Natu break; 1514318224bbSNeel Natu case VM_EXITCODE_PAGING: 1515318224bbSNeel Natu error = vm_handle_paging(vm, vcpuid, &retu); 1516318224bbSNeel Natu break; 1517318224bbSNeel Natu case VM_EXITCODE_INST_EMUL: 1518318224bbSNeel Natu error = vm_handle_inst_emul(vm, vcpuid, &retu); 1519318224bbSNeel Natu break; 1520d17b5104SNeel Natu case VM_EXITCODE_INOUT: 1521d17b5104SNeel Natu case VM_EXITCODE_INOUT_STR: 1522d17b5104SNeel Natu error = vm_handle_inout(vm, vcpuid, vme, &retu); 1523d17b5104SNeel Natu break; 152465145c7fSNeel Natu case VM_EXITCODE_MONITOR: 152565145c7fSNeel Natu case VM_EXITCODE_MWAIT: 152665145c7fSNeel Natu vm_inject_ud(vm, vcpuid); 152765145c7fSNeel Natu break; 1528318224bbSNeel Natu default: 1529becd9849SNeel Natu retu = true; /* handled in userland */ 1530318224bbSNeel Natu break; 1531318224bbSNeel Natu } 1532318224bbSNeel Natu } 1533318224bbSNeel Natu 1534*d087a399SNeel Natu if (error == 0 && retu == false) 1535f76fc5d4SNeel Natu goto restart; 1536f76fc5d4SNeel Natu 1537318224bbSNeel Natu /* copy the exit information */ 1538318224bbSNeel Natu bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit)); 1539366f6083SPeter Grehan return (error); 1540366f6083SPeter Grehan } 1541366f6083SPeter Grehan 1542366f6083SPeter Grehan int 1543c9c75df4SNeel Natu vm_restart_instruction(void *arg, int vcpuid) 1544c9c75df4SNeel Natu { 1545*d087a399SNeel Natu struct vm *vm; 1546c9c75df4SNeel Natu struct vcpu *vcpu; 1547*d087a399SNeel Natu enum vcpu_state state; 1548*d087a399SNeel Natu uint64_t rip; 1549*d087a399SNeel Natu int error; 1550c9c75df4SNeel Natu 1551*d087a399SNeel Natu vm = arg; 1552c9c75df4SNeel Natu if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1553c9c75df4SNeel Natu return (EINVAL); 1554c9c75df4SNeel Natu 1555c9c75df4SNeel Natu vcpu = &vm->vcpu[vcpuid]; 1556*d087a399SNeel Natu state = vcpu_get_state(vm, vcpuid, NULL); 1557*d087a399SNeel Natu if (state == VCPU_RUNNING) { 1558*d087a399SNeel Natu /* 1559*d087a399SNeel Natu * When a vcpu is "running" the next instruction is determined 1560*d087a399SNeel Natu * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. 1561*d087a399SNeel Natu * Thus setting 'inst_length' to zero will cause the current 1562*d087a399SNeel Natu * instruction to be restarted. 1563*d087a399SNeel Natu */ 1564c9c75df4SNeel Natu vcpu->exitinfo.inst_length = 0; 1565*d087a399SNeel Natu VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by " 1566*d087a399SNeel Natu "setting inst_length to zero", vcpu->exitinfo.rip); 1567*d087a399SNeel Natu } else if (state == VCPU_FROZEN) { 1568*d087a399SNeel Natu /* 1569*d087a399SNeel Natu * When a vcpu is "frozen" it is outside the critical section 1570*d087a399SNeel Natu * around VMRUN() and 'nextrip' points to the next instruction. 1571*d087a399SNeel Natu * Thus instruction restart is achieved by setting 'nextrip' 1572*d087a399SNeel Natu * to the vcpu's %rip. 1573*d087a399SNeel Natu */ 1574*d087a399SNeel Natu error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip); 1575*d087a399SNeel Natu KASSERT(!error, ("%s: error %d getting rip", __func__, error)); 1576*d087a399SNeel Natu VCPU_CTR2(vm, vcpuid, "restarting instruction by updating " 1577*d087a399SNeel Natu "nextrip from %#lx to %#lx", vcpu->nextrip, rip); 1578*d087a399SNeel Natu vcpu->nextrip = rip; 1579*d087a399SNeel Natu } else { 1580*d087a399SNeel Natu panic("%s: invalid state %d", __func__, state); 1581*d087a399SNeel Natu } 1582c9c75df4SNeel Natu return (0); 1583c9c75df4SNeel Natu } 1584c9c75df4SNeel Natu 1585c9c75df4SNeel Natu int 1586091d4532SNeel Natu vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) 1587091d4532SNeel Natu { 1588091d4532SNeel Natu struct vcpu *vcpu; 1589091d4532SNeel Natu int type, vector; 1590091d4532SNeel Natu 1591091d4532SNeel Natu if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1592091d4532SNeel Natu return (EINVAL); 1593091d4532SNeel Natu 1594091d4532SNeel Natu vcpu = &vm->vcpu[vcpuid]; 1595091d4532SNeel Natu 1596091d4532SNeel Natu if (info & VM_INTINFO_VALID) { 1597091d4532SNeel Natu type = info & VM_INTINFO_TYPE; 1598091d4532SNeel Natu vector = info & 0xff; 1599091d4532SNeel Natu if (type == VM_INTINFO_NMI && vector != IDT_NMI) 1600091d4532SNeel Natu return (EINVAL); 1601091d4532SNeel Natu if (type == VM_INTINFO_HWEXCEPTION && vector >= 32) 1602091d4532SNeel Natu return (EINVAL); 1603091d4532SNeel Natu if (info & VM_INTINFO_RSVD) 1604091d4532SNeel Natu return (EINVAL); 1605091d4532SNeel Natu } else { 1606091d4532SNeel Natu info = 0; 1607091d4532SNeel Natu } 1608091d4532SNeel Natu VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info); 1609091d4532SNeel Natu vcpu->exitintinfo = info; 1610091d4532SNeel Natu return (0); 1611091d4532SNeel Natu } 1612091d4532SNeel Natu 1613091d4532SNeel Natu enum exc_class { 1614091d4532SNeel Natu EXC_BENIGN, 1615091d4532SNeel Natu EXC_CONTRIBUTORY, 1616091d4532SNeel Natu EXC_PAGEFAULT 1617091d4532SNeel Natu }; 1618091d4532SNeel Natu 1619091d4532SNeel Natu #define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 1620091d4532SNeel Natu 1621091d4532SNeel Natu static enum exc_class 1622091d4532SNeel Natu exception_class(uint64_t info) 1623091d4532SNeel Natu { 1624091d4532SNeel Natu int type, vector; 1625091d4532SNeel Natu 1626091d4532SNeel Natu KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info)); 1627091d4532SNeel Natu type = info & VM_INTINFO_TYPE; 1628091d4532SNeel Natu vector = info & 0xff; 1629091d4532SNeel Natu 1630091d4532SNeel Natu /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 1631091d4532SNeel Natu switch (type) { 1632091d4532SNeel Natu case VM_INTINFO_HWINTR: 1633091d4532SNeel Natu case VM_INTINFO_SWINTR: 1634091d4532SNeel Natu case VM_INTINFO_NMI: 1635091d4532SNeel Natu return (EXC_BENIGN); 1636091d4532SNeel Natu default: 1637091d4532SNeel Natu /* 1638091d4532SNeel Natu * Hardware exception. 1639091d4532SNeel Natu * 1640091d4532SNeel Natu * SVM and VT-x use identical type values to represent NMI, 1641091d4532SNeel Natu * hardware interrupt and software interrupt. 1642091d4532SNeel Natu * 1643091d4532SNeel Natu * SVM uses type '3' for all exceptions. VT-x uses type '3' 1644091d4532SNeel Natu * for exceptions except #BP and #OF. #BP and #OF use a type 1645091d4532SNeel Natu * value of '5' or '6'. Therefore we don't check for explicit 1646091d4532SNeel Natu * values of 'type' to classify 'intinfo' into a hardware 1647091d4532SNeel Natu * exception. 1648091d4532SNeel Natu */ 1649091d4532SNeel Natu break; 1650091d4532SNeel Natu } 1651091d4532SNeel Natu 1652091d4532SNeel Natu switch (vector) { 1653091d4532SNeel Natu case IDT_PF: 1654091d4532SNeel Natu case IDT_VE: 1655091d4532SNeel Natu return (EXC_PAGEFAULT); 1656091d4532SNeel Natu case IDT_DE: 1657091d4532SNeel Natu case IDT_TS: 1658091d4532SNeel Natu case IDT_NP: 1659091d4532SNeel Natu case IDT_SS: 1660091d4532SNeel Natu case IDT_GP: 1661091d4532SNeel Natu return (EXC_CONTRIBUTORY); 1662091d4532SNeel Natu default: 1663091d4532SNeel Natu return (EXC_BENIGN); 1664091d4532SNeel Natu } 1665091d4532SNeel Natu } 1666091d4532SNeel Natu 1667091d4532SNeel Natu static int 1668091d4532SNeel Natu nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2, 1669091d4532SNeel Natu uint64_t *retinfo) 1670091d4532SNeel Natu { 1671091d4532SNeel Natu enum exc_class exc1, exc2; 1672091d4532SNeel Natu int type1, vector1; 1673091d4532SNeel Natu 1674091d4532SNeel Natu KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1)); 1675091d4532SNeel Natu KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2)); 1676091d4532SNeel Natu 1677091d4532SNeel Natu /* 1678091d4532SNeel Natu * If an exception occurs while attempting to call the double-fault 1679091d4532SNeel Natu * handler the processor enters shutdown mode (aka triple fault). 1680091d4532SNeel Natu */ 1681091d4532SNeel Natu type1 = info1 & VM_INTINFO_TYPE; 1682091d4532SNeel Natu vector1 = info1 & 0xff; 1683091d4532SNeel Natu if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) { 1684091d4532SNeel Natu VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)", 1685091d4532SNeel Natu info1, info2); 1686091d4532SNeel Natu vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT); 1687091d4532SNeel Natu *retinfo = 0; 1688091d4532SNeel Natu return (0); 1689091d4532SNeel Natu } 1690091d4532SNeel Natu 1691091d4532SNeel Natu /* 1692091d4532SNeel Natu * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3 1693091d4532SNeel Natu */ 1694091d4532SNeel Natu exc1 = exception_class(info1); 1695091d4532SNeel Natu exc2 = exception_class(info2); 1696091d4532SNeel Natu if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 1697091d4532SNeel Natu (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 1698091d4532SNeel Natu /* Convert nested fault into a double fault. */ 1699091d4532SNeel Natu *retinfo = IDT_DF; 1700091d4532SNeel Natu *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1701091d4532SNeel Natu *retinfo |= VM_INTINFO_DEL_ERRCODE; 1702091d4532SNeel Natu } else { 1703091d4532SNeel Natu /* Handle exceptions serially */ 1704091d4532SNeel Natu *retinfo = info2; 1705091d4532SNeel Natu } 1706091d4532SNeel Natu return (1); 1707091d4532SNeel Natu } 1708091d4532SNeel Natu 1709091d4532SNeel Natu static uint64_t 1710091d4532SNeel Natu vcpu_exception_intinfo(struct vcpu *vcpu) 1711091d4532SNeel Natu { 1712091d4532SNeel Natu uint64_t info = 0; 1713091d4532SNeel Natu 1714091d4532SNeel Natu if (vcpu->exception_pending) { 1715c9c75df4SNeel Natu info = vcpu->exc_vector & 0xff; 1716091d4532SNeel Natu info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1717c9c75df4SNeel Natu if (vcpu->exc_errcode_valid) { 1718091d4532SNeel Natu info |= VM_INTINFO_DEL_ERRCODE; 1719c9c75df4SNeel Natu info |= (uint64_t)vcpu->exc_errcode << 32; 1720091d4532SNeel Natu } 1721091d4532SNeel Natu } 1722091d4532SNeel Natu return (info); 1723091d4532SNeel Natu } 1724091d4532SNeel Natu 1725091d4532SNeel Natu int 1726091d4532SNeel Natu vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) 1727091d4532SNeel Natu { 1728091d4532SNeel Natu struct vcpu *vcpu; 1729091d4532SNeel Natu uint64_t info1, info2; 1730091d4532SNeel Natu int valid; 1731091d4532SNeel Natu 1732091d4532SNeel Natu KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid)); 1733091d4532SNeel Natu 1734091d4532SNeel Natu vcpu = &vm->vcpu[vcpuid]; 1735091d4532SNeel Natu 1736091d4532SNeel Natu info1 = vcpu->exitintinfo; 1737091d4532SNeel Natu vcpu->exitintinfo = 0; 1738091d4532SNeel Natu 1739091d4532SNeel Natu info2 = 0; 1740091d4532SNeel Natu if (vcpu->exception_pending) { 1741091d4532SNeel Natu info2 = vcpu_exception_intinfo(vcpu); 1742091d4532SNeel Natu vcpu->exception_pending = 0; 1743091d4532SNeel Natu VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx", 1744c9c75df4SNeel Natu vcpu->exc_vector, info2); 1745091d4532SNeel Natu } 1746091d4532SNeel Natu 1747091d4532SNeel Natu if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) { 1748091d4532SNeel Natu valid = nested_fault(vm, vcpuid, info1, info2, retinfo); 1749091d4532SNeel Natu } else if (info1 & VM_INTINFO_VALID) { 1750091d4532SNeel Natu *retinfo = info1; 1751091d4532SNeel Natu valid = 1; 1752091d4532SNeel Natu } else if (info2 & VM_INTINFO_VALID) { 1753091d4532SNeel Natu *retinfo = info2; 1754091d4532SNeel Natu valid = 1; 1755091d4532SNeel Natu } else { 1756091d4532SNeel Natu valid = 0; 1757091d4532SNeel Natu } 1758091d4532SNeel Natu 1759091d4532SNeel Natu if (valid) { 1760091d4532SNeel Natu VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), " 1761091d4532SNeel Natu "retinfo(%#lx)", __func__, info1, info2, *retinfo); 1762091d4532SNeel Natu } 1763091d4532SNeel Natu 1764091d4532SNeel Natu return (valid); 1765091d4532SNeel Natu } 1766091d4532SNeel Natu 1767091d4532SNeel Natu int 1768091d4532SNeel Natu vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) 1769091d4532SNeel Natu { 1770091d4532SNeel Natu struct vcpu *vcpu; 1771091d4532SNeel Natu 1772091d4532SNeel Natu if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1773091d4532SNeel Natu return (EINVAL); 1774091d4532SNeel Natu 1775091d4532SNeel Natu vcpu = &vm->vcpu[vcpuid]; 1776091d4532SNeel Natu *info1 = vcpu->exitintinfo; 1777091d4532SNeel Natu *info2 = vcpu_exception_intinfo(vcpu); 1778091d4532SNeel Natu return (0); 1779091d4532SNeel Natu } 1780091d4532SNeel Natu 1781091d4532SNeel Natu int 1782c9c75df4SNeel Natu vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid, 1783c9c75df4SNeel Natu uint32_t errcode, int restart_instruction) 1784366f6083SPeter Grehan { 1785dc506506SNeel Natu struct vcpu *vcpu; 17862ce12423SNeel Natu int error; 1787dc506506SNeel Natu 1788366f6083SPeter Grehan if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1789366f6083SPeter Grehan return (EINVAL); 1790366f6083SPeter Grehan 1791c9c75df4SNeel Natu if (vector < 0 || vector >= 32) 1792366f6083SPeter Grehan return (EINVAL); 1793366f6083SPeter Grehan 1794091d4532SNeel Natu /* 1795091d4532SNeel Natu * A double fault exception should never be injected directly into 1796091d4532SNeel Natu * the guest. It is a derived exception that results from specific 1797091d4532SNeel Natu * combinations of nested faults. 1798091d4532SNeel Natu */ 1799c9c75df4SNeel Natu if (vector == IDT_DF) 1800091d4532SNeel Natu return (EINVAL); 1801091d4532SNeel Natu 1802dc506506SNeel Natu vcpu = &vm->vcpu[vcpuid]; 1803366f6083SPeter Grehan 1804dc506506SNeel Natu if (vcpu->exception_pending) { 1805dc506506SNeel Natu VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to " 1806c9c75df4SNeel Natu "pending exception %d", vector, vcpu->exc_vector); 1807dc506506SNeel Natu return (EBUSY); 1808dc506506SNeel Natu } 1809dc506506SNeel Natu 18102ce12423SNeel Natu /* 18112ce12423SNeel Natu * From section 26.6.1 "Interruptibility State" in Intel SDM: 18122ce12423SNeel Natu * 18132ce12423SNeel Natu * Event blocking by "STI" or "MOV SS" is cleared after guest executes 18142ce12423SNeel Natu * one instruction or incurs an exception. 18152ce12423SNeel Natu */ 18162ce12423SNeel Natu error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); 18172ce12423SNeel Natu KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", 18182ce12423SNeel Natu __func__, error)); 18192ce12423SNeel Natu 1820c9c75df4SNeel Natu if (restart_instruction) 1821c9c75df4SNeel Natu vm_restart_instruction(vm, vcpuid); 1822c9c75df4SNeel Natu 1823dc506506SNeel Natu vcpu->exception_pending = 1; 1824c9c75df4SNeel Natu vcpu->exc_vector = vector; 1825c9c75df4SNeel Natu vcpu->exc_errcode = errcode; 1826c9c75df4SNeel Natu vcpu->exc_errcode_valid = errcode_valid; 1827c9c75df4SNeel Natu VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector); 1828dc506506SNeel Natu return (0); 1829dc506506SNeel Natu } 1830dc506506SNeel Natu 1831d37f2adbSNeel Natu void 1832d37f2adbSNeel Natu vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid, 1833d37f2adbSNeel Natu int errcode) 1834dc506506SNeel Natu { 1835d37f2adbSNeel Natu struct vm *vm; 1836c9c75df4SNeel Natu int error, restart_instruction; 1837dc506506SNeel Natu 1838d37f2adbSNeel Natu vm = vmarg; 1839c9c75df4SNeel Natu restart_instruction = 1; 1840d37f2adbSNeel Natu 1841c9c75df4SNeel Natu error = vm_inject_exception(vm, vcpuid, vector, errcode_valid, 1842c9c75df4SNeel Natu errcode, restart_instruction); 1843dc506506SNeel Natu KASSERT(error == 0, ("vm_inject_exception error %d", error)); 1844dc506506SNeel Natu } 1845dc506506SNeel Natu 1846dc506506SNeel Natu void 1847d37f2adbSNeel Natu vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2) 1848fd949af6SNeel Natu { 1849d37f2adbSNeel Natu struct vm *vm; 185037a723a5SNeel Natu int error; 185137a723a5SNeel Natu 1852d37f2adbSNeel Natu vm = vmarg; 185337a723a5SNeel Natu VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx", 185437a723a5SNeel Natu error_code, cr2); 185537a723a5SNeel Natu 185637a723a5SNeel Natu error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2); 185737a723a5SNeel Natu KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); 1858fd949af6SNeel Natu 1859d37f2adbSNeel Natu vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code); 1860366f6083SPeter Grehan } 1861366f6083SPeter Grehan 186261592433SNeel Natu static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 1863366f6083SPeter Grehan 1864f352ff0cSNeel Natu int 1865f352ff0cSNeel Natu vm_inject_nmi(struct vm *vm, int vcpuid) 1866f352ff0cSNeel Natu { 1867f352ff0cSNeel Natu struct vcpu *vcpu; 1868f352ff0cSNeel Natu 1869f352ff0cSNeel Natu if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1870366f6083SPeter Grehan return (EINVAL); 1871366f6083SPeter Grehan 1872f352ff0cSNeel Natu vcpu = &vm->vcpu[vcpuid]; 1873f352ff0cSNeel Natu 1874f352ff0cSNeel Natu vcpu->nmi_pending = 1; 1875de5ea6b6SNeel Natu vcpu_notify_event(vm, vcpuid, false); 1876f352ff0cSNeel Natu return (0); 1877f352ff0cSNeel Natu } 1878f352ff0cSNeel Natu 1879f352ff0cSNeel Natu int 1880f352ff0cSNeel Natu vm_nmi_pending(struct vm *vm, int vcpuid) 1881f352ff0cSNeel Natu { 1882f352ff0cSNeel Natu struct vcpu *vcpu; 1883f352ff0cSNeel Natu 1884f352ff0cSNeel Natu if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1885f352ff0cSNeel Natu panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1886f352ff0cSNeel Natu 1887f352ff0cSNeel Natu vcpu = &vm->vcpu[vcpuid]; 1888f352ff0cSNeel Natu 1889f352ff0cSNeel Natu return (vcpu->nmi_pending); 1890f352ff0cSNeel Natu } 1891f352ff0cSNeel Natu 1892f352ff0cSNeel Natu void 1893f352ff0cSNeel Natu vm_nmi_clear(struct vm *vm, int vcpuid) 1894f352ff0cSNeel Natu { 1895f352ff0cSNeel Natu struct vcpu *vcpu; 1896f352ff0cSNeel Natu 1897f352ff0cSNeel Natu if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1898f352ff0cSNeel Natu panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1899f352ff0cSNeel Natu 1900f352ff0cSNeel Natu vcpu = &vm->vcpu[vcpuid]; 1901f352ff0cSNeel Natu 1902f352ff0cSNeel Natu if (vcpu->nmi_pending == 0) 1903f352ff0cSNeel Natu panic("vm_nmi_clear: inconsistent nmi_pending state"); 1904f352ff0cSNeel Natu 1905f352ff0cSNeel Natu vcpu->nmi_pending = 0; 1906f352ff0cSNeel Natu vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 1907366f6083SPeter Grehan } 1908366f6083SPeter Grehan 19090775fbb4STycho Nightingale static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 19100775fbb4STycho Nightingale 19110775fbb4STycho Nightingale int 19120775fbb4STycho Nightingale vm_inject_extint(struct vm *vm, int vcpuid) 19130775fbb4STycho Nightingale { 19140775fbb4STycho Nightingale struct vcpu *vcpu; 19150775fbb4STycho Nightingale 19160775fbb4STycho Nightingale if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 19170775fbb4STycho Nightingale return (EINVAL); 19180775fbb4STycho Nightingale 19190775fbb4STycho Nightingale vcpu = &vm->vcpu[vcpuid]; 19200775fbb4STycho Nightingale 19210775fbb4STycho Nightingale vcpu->extint_pending = 1; 19220775fbb4STycho Nightingale vcpu_notify_event(vm, vcpuid, false); 19230775fbb4STycho Nightingale return (0); 19240775fbb4STycho Nightingale } 19250775fbb4STycho Nightingale 19260775fbb4STycho Nightingale int 19270775fbb4STycho Nightingale vm_extint_pending(struct vm *vm, int vcpuid) 19280775fbb4STycho Nightingale { 19290775fbb4STycho Nightingale struct vcpu *vcpu; 19300775fbb4STycho Nightingale 19310775fbb4STycho Nightingale if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 19320775fbb4STycho Nightingale panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 19330775fbb4STycho Nightingale 19340775fbb4STycho Nightingale vcpu = &vm->vcpu[vcpuid]; 19350775fbb4STycho Nightingale 19360775fbb4STycho Nightingale return (vcpu->extint_pending); 19370775fbb4STycho Nightingale } 19380775fbb4STycho Nightingale 19390775fbb4STycho Nightingale void 19400775fbb4STycho Nightingale vm_extint_clear(struct vm *vm, int vcpuid) 19410775fbb4STycho Nightingale { 19420775fbb4STycho Nightingale struct vcpu *vcpu; 19430775fbb4STycho Nightingale 19440775fbb4STycho Nightingale if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 19450775fbb4STycho Nightingale panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 19460775fbb4STycho Nightingale 19470775fbb4STycho Nightingale vcpu = &vm->vcpu[vcpuid]; 19480775fbb4STycho Nightingale 19490775fbb4STycho Nightingale if (vcpu->extint_pending == 0) 19500775fbb4STycho Nightingale panic("vm_extint_clear: inconsistent extint_pending state"); 19510775fbb4STycho Nightingale 19520775fbb4STycho Nightingale vcpu->extint_pending = 0; 19530775fbb4STycho Nightingale vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); 19540775fbb4STycho Nightingale } 19550775fbb4STycho Nightingale 1956366f6083SPeter Grehan int 1957366f6083SPeter Grehan vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 1958366f6083SPeter Grehan { 1959366f6083SPeter Grehan if (vcpu < 0 || vcpu >= VM_MAXCPU) 1960366f6083SPeter Grehan return (EINVAL); 1961366f6083SPeter Grehan 1962366f6083SPeter Grehan if (type < 0 || type >= VM_CAP_MAX) 1963366f6083SPeter Grehan return (EINVAL); 1964366f6083SPeter Grehan 1965366f6083SPeter Grehan return (VMGETCAP(vm->cookie, vcpu, type, retval)); 1966366f6083SPeter Grehan } 1967366f6083SPeter Grehan 1968366f6083SPeter Grehan int 1969366f6083SPeter Grehan vm_set_capability(struct vm *vm, int vcpu, int type, int val) 1970366f6083SPeter Grehan { 1971366f6083SPeter Grehan if (vcpu < 0 || vcpu >= VM_MAXCPU) 1972366f6083SPeter Grehan return (EINVAL); 1973366f6083SPeter Grehan 1974366f6083SPeter Grehan if (type < 0 || type >= VM_CAP_MAX) 1975366f6083SPeter Grehan return (EINVAL); 1976366f6083SPeter Grehan 1977366f6083SPeter Grehan return (VMSETCAP(vm->cookie, vcpu, type, val)); 1978366f6083SPeter Grehan } 1979366f6083SPeter Grehan 1980366f6083SPeter Grehan struct vlapic * 1981366f6083SPeter Grehan vm_lapic(struct vm *vm, int cpu) 1982366f6083SPeter Grehan { 1983366f6083SPeter Grehan return (vm->vcpu[cpu].vlapic); 1984366f6083SPeter Grehan } 1985366f6083SPeter Grehan 1986565bbb86SNeel Natu struct vioapic * 1987565bbb86SNeel Natu vm_ioapic(struct vm *vm) 1988565bbb86SNeel Natu { 1989565bbb86SNeel Natu 1990565bbb86SNeel Natu return (vm->vioapic); 1991565bbb86SNeel Natu } 1992565bbb86SNeel Natu 199308e3ff32SNeel Natu struct vhpet * 199408e3ff32SNeel Natu vm_hpet(struct vm *vm) 199508e3ff32SNeel Natu { 199608e3ff32SNeel Natu 199708e3ff32SNeel Natu return (vm->vhpet); 199808e3ff32SNeel Natu } 199908e3ff32SNeel Natu 2000366f6083SPeter Grehan boolean_t 2001366f6083SPeter Grehan vmm_is_pptdev(int bus, int slot, int func) 2002366f6083SPeter Grehan { 200307044a96SNeel Natu int found, i, n; 200407044a96SNeel Natu int b, s, f; 2005366f6083SPeter Grehan char *val, *cp, *cp2; 2006366f6083SPeter Grehan 2007366f6083SPeter Grehan /* 200807044a96SNeel Natu * XXX 200907044a96SNeel Natu * The length of an environment variable is limited to 128 bytes which 201007044a96SNeel Natu * puts an upper limit on the number of passthru devices that may be 201107044a96SNeel Natu * specified using a single environment variable. 201207044a96SNeel Natu * 201307044a96SNeel Natu * Work around this by scanning multiple environment variable 201407044a96SNeel Natu * names instead of a single one - yuck! 2015366f6083SPeter Grehan */ 201607044a96SNeel Natu const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 201707044a96SNeel Natu 201807044a96SNeel Natu /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 2019366f6083SPeter Grehan found = 0; 202007044a96SNeel Natu for (i = 0; names[i] != NULL && !found; i++) { 20212be111bfSDavide Italiano cp = val = kern_getenv(names[i]); 2022366f6083SPeter Grehan while (cp != NULL && *cp != '\0') { 2023366f6083SPeter Grehan if ((cp2 = strchr(cp, ' ')) != NULL) 2024366f6083SPeter Grehan *cp2 = '\0'; 2025366f6083SPeter Grehan 2026366f6083SPeter Grehan n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 2027366f6083SPeter Grehan if (n == 3 && bus == b && slot == s && func == f) { 2028366f6083SPeter Grehan found = 1; 2029366f6083SPeter Grehan break; 2030366f6083SPeter Grehan } 2031366f6083SPeter Grehan 2032366f6083SPeter Grehan if (cp2 != NULL) 2033366f6083SPeter Grehan *cp2++ = ' '; 2034366f6083SPeter Grehan 2035366f6083SPeter Grehan cp = cp2; 2036366f6083SPeter Grehan } 2037366f6083SPeter Grehan freeenv(val); 203807044a96SNeel Natu } 2039366f6083SPeter Grehan return (found); 2040366f6083SPeter Grehan } 2041366f6083SPeter Grehan 2042366f6083SPeter Grehan void * 2043366f6083SPeter Grehan vm_iommu_domain(struct vm *vm) 2044366f6083SPeter Grehan { 2045366f6083SPeter Grehan 2046366f6083SPeter Grehan return (vm->iommu); 2047366f6083SPeter Grehan } 2048366f6083SPeter Grehan 204975dd3366SNeel Natu int 2050f80330a8SNeel Natu vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 2051f80330a8SNeel Natu bool from_idle) 2052366f6083SPeter Grehan { 205375dd3366SNeel Natu int error; 2054366f6083SPeter Grehan struct vcpu *vcpu; 2055366f6083SPeter Grehan 2056366f6083SPeter Grehan if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2057366f6083SPeter Grehan panic("vm_set_run_state: invalid vcpuid %d", vcpuid); 2058366f6083SPeter Grehan 2059366f6083SPeter Grehan vcpu = &vm->vcpu[vcpuid]; 2060366f6083SPeter Grehan 206175dd3366SNeel Natu vcpu_lock(vcpu); 2062f80330a8SNeel Natu error = vcpu_set_state_locked(vcpu, newstate, from_idle); 206375dd3366SNeel Natu vcpu_unlock(vcpu); 206475dd3366SNeel Natu 206575dd3366SNeel Natu return (error); 206675dd3366SNeel Natu } 206775dd3366SNeel Natu 206875dd3366SNeel Natu enum vcpu_state 2069d3c11f40SPeter Grehan vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 2070366f6083SPeter Grehan { 2071366f6083SPeter Grehan struct vcpu *vcpu; 207275dd3366SNeel Natu enum vcpu_state state; 2073366f6083SPeter Grehan 2074366f6083SPeter Grehan if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2075366f6083SPeter Grehan panic("vm_get_run_state: invalid vcpuid %d", vcpuid); 2076366f6083SPeter Grehan 2077366f6083SPeter Grehan vcpu = &vm->vcpu[vcpuid]; 2078366f6083SPeter Grehan 207975dd3366SNeel Natu vcpu_lock(vcpu); 208075dd3366SNeel Natu state = vcpu->state; 2081d3c11f40SPeter Grehan if (hostcpu != NULL) 2082d3c11f40SPeter Grehan *hostcpu = vcpu->hostcpu; 208375dd3366SNeel Natu vcpu_unlock(vcpu); 2084366f6083SPeter Grehan 208575dd3366SNeel Natu return (state); 2086366f6083SPeter Grehan } 2087366f6083SPeter Grehan 208895ebc360SNeel Natu int 2089366f6083SPeter Grehan vm_activate_cpu(struct vm *vm, int vcpuid) 2090366f6083SPeter Grehan { 2091366f6083SPeter Grehan 209295ebc360SNeel Natu if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 209395ebc360SNeel Natu return (EINVAL); 209495ebc360SNeel Natu 209595ebc360SNeel Natu if (CPU_ISSET(vcpuid, &vm->active_cpus)) 209695ebc360SNeel Natu return (EBUSY); 209722d822c6SNeel Natu 209822d822c6SNeel Natu VCPU_CTR0(vm, vcpuid, "activated"); 209922d822c6SNeel Natu CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); 210095ebc360SNeel Natu return (0); 2101366f6083SPeter Grehan } 2102366f6083SPeter Grehan 2103a5615c90SPeter Grehan cpuset_t 2104366f6083SPeter Grehan vm_active_cpus(struct vm *vm) 2105366f6083SPeter Grehan { 2106366f6083SPeter Grehan 2107366f6083SPeter Grehan return (vm->active_cpus); 2108366f6083SPeter Grehan } 2109366f6083SPeter Grehan 211095ebc360SNeel Natu cpuset_t 211195ebc360SNeel Natu vm_suspended_cpus(struct vm *vm) 211295ebc360SNeel Natu { 211395ebc360SNeel Natu 211495ebc360SNeel Natu return (vm->suspended_cpus); 211595ebc360SNeel Natu } 211695ebc360SNeel Natu 2117366f6083SPeter Grehan void * 2118366f6083SPeter Grehan vcpu_stats(struct vm *vm, int vcpuid) 2119366f6083SPeter Grehan { 2120366f6083SPeter Grehan 2121366f6083SPeter Grehan return (vm->vcpu[vcpuid].stats); 2122366f6083SPeter Grehan } 2123e9027382SNeel Natu 2124e9027382SNeel Natu int 2125e9027382SNeel Natu vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 2126e9027382SNeel Natu { 2127e9027382SNeel Natu if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2128e9027382SNeel Natu return (EINVAL); 2129e9027382SNeel Natu 2130e9027382SNeel Natu *state = vm->vcpu[vcpuid].x2apic_state; 2131e9027382SNeel Natu 2132e9027382SNeel Natu return (0); 2133e9027382SNeel Natu } 2134e9027382SNeel Natu 2135e9027382SNeel Natu int 2136e9027382SNeel Natu vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 2137e9027382SNeel Natu { 2138e9027382SNeel Natu if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2139e9027382SNeel Natu return (EINVAL); 2140e9027382SNeel Natu 21413f23d3caSNeel Natu if (state >= X2APIC_STATE_LAST) 2142e9027382SNeel Natu return (EINVAL); 2143e9027382SNeel Natu 2144e9027382SNeel Natu vm->vcpu[vcpuid].x2apic_state = state; 2145e9027382SNeel Natu 214673820fb0SNeel Natu vlapic_set_x2apic_state(vm, vcpuid, state); 214773820fb0SNeel Natu 2148e9027382SNeel Natu return (0); 2149e9027382SNeel Natu } 215075dd3366SNeel Natu 215122821874SNeel Natu /* 215222821874SNeel Natu * This function is called to ensure that a vcpu "sees" a pending event 215322821874SNeel Natu * as soon as possible: 215422821874SNeel Natu * - If the vcpu thread is sleeping then it is woken up. 215522821874SNeel Natu * - If the vcpu is running on a different host_cpu then an IPI will be directed 215622821874SNeel Natu * to the host_cpu to cause the vcpu to trap into the hypervisor. 215722821874SNeel Natu */ 215875dd3366SNeel Natu void 2159de5ea6b6SNeel Natu vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr) 216075dd3366SNeel Natu { 216175dd3366SNeel Natu int hostcpu; 216275dd3366SNeel Natu struct vcpu *vcpu; 216375dd3366SNeel Natu 216475dd3366SNeel Natu vcpu = &vm->vcpu[vcpuid]; 216575dd3366SNeel Natu 2166f76fc5d4SNeel Natu vcpu_lock(vcpu); 216775dd3366SNeel Natu hostcpu = vcpu->hostcpu; 2168ef39d7e9SNeel Natu if (vcpu->state == VCPU_RUNNING) { 2169ef39d7e9SNeel Natu KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 2170de5ea6b6SNeel Natu if (hostcpu != curcpu) { 2171ef39d7e9SNeel Natu if (lapic_intr) { 2172add611fdSNeel Natu vlapic_post_intr(vcpu->vlapic, hostcpu, 2173add611fdSNeel Natu vmm_ipinum); 2174ef39d7e9SNeel Natu } else { 217575dd3366SNeel Natu ipi_cpu(hostcpu, vmm_ipinum); 217675dd3366SNeel Natu } 2177ef39d7e9SNeel Natu } else { 2178ef39d7e9SNeel Natu /* 2179ef39d7e9SNeel Natu * If the 'vcpu' is running on 'curcpu' then it must 2180ef39d7e9SNeel Natu * be sending a notification to itself (e.g. SELF_IPI). 2181ef39d7e9SNeel Natu * The pending event will be picked up when the vcpu 2182ef39d7e9SNeel Natu * transitions back to guest context. 2183ef39d7e9SNeel Natu */ 2184ef39d7e9SNeel Natu } 2185ef39d7e9SNeel Natu } else { 2186ef39d7e9SNeel Natu KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 2187ef39d7e9SNeel Natu "with hostcpu %d", vcpu->state, hostcpu)); 2188366f6083SPeter Grehan if (vcpu->state == VCPU_SLEEPING) 2189366f6083SPeter Grehan wakeup_one(vcpu); 2190366f6083SPeter Grehan } 2191f76fc5d4SNeel Natu vcpu_unlock(vcpu); 2192f76fc5d4SNeel Natu } 2193318224bbSNeel Natu 2194318224bbSNeel Natu struct vmspace * 2195318224bbSNeel Natu vm_get_vmspace(struct vm *vm) 2196318224bbSNeel Natu { 2197318224bbSNeel Natu 2198318224bbSNeel Natu return (vm->vmspace); 2199318224bbSNeel Natu } 2200565bbb86SNeel Natu 2201565bbb86SNeel Natu int 2202565bbb86SNeel Natu vm_apicid2vcpuid(struct vm *vm, int apicid) 2203565bbb86SNeel Natu { 2204565bbb86SNeel Natu /* 2205565bbb86SNeel Natu * XXX apic id is assumed to be numerically identical to vcpu id 2206565bbb86SNeel Natu */ 2207565bbb86SNeel Natu return (apicid); 2208565bbb86SNeel Natu } 22095b8a8cd1SNeel Natu 22105b8a8cd1SNeel Natu void 22115b8a8cd1SNeel Natu vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, 22125b8a8cd1SNeel Natu vm_rendezvous_func_t func, void *arg) 22135b8a8cd1SNeel Natu { 2214970955e4SNeel Natu int i; 2215970955e4SNeel Natu 22165b8a8cd1SNeel Natu /* 22175b8a8cd1SNeel Natu * Enforce that this function is called without any locks 22185b8a8cd1SNeel Natu */ 22195b8a8cd1SNeel Natu WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); 22205b8a8cd1SNeel Natu KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 22215b8a8cd1SNeel Natu ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); 22225b8a8cd1SNeel Natu 22235b8a8cd1SNeel Natu restart: 22245b8a8cd1SNeel Natu mtx_lock(&vm->rendezvous_mtx); 22255b8a8cd1SNeel Natu if (vm->rendezvous_func != NULL) { 22265b8a8cd1SNeel Natu /* 22275b8a8cd1SNeel Natu * If a rendezvous is already in progress then we need to 22285b8a8cd1SNeel Natu * call the rendezvous handler in case this 'vcpuid' is one 22295b8a8cd1SNeel Natu * of the targets of the rendezvous. 22305b8a8cd1SNeel Natu */ 22315b8a8cd1SNeel Natu RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress"); 22325b8a8cd1SNeel Natu mtx_unlock(&vm->rendezvous_mtx); 22335b8a8cd1SNeel Natu vm_handle_rendezvous(vm, vcpuid); 22345b8a8cd1SNeel Natu goto restart; 22355b8a8cd1SNeel Natu } 22365b8a8cd1SNeel Natu KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " 22375b8a8cd1SNeel Natu "rendezvous is still in progress")); 22385b8a8cd1SNeel Natu 22395b8a8cd1SNeel Natu RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous"); 22405b8a8cd1SNeel Natu vm->rendezvous_req_cpus = dest; 22415b8a8cd1SNeel Natu CPU_ZERO(&vm->rendezvous_done_cpus); 22425b8a8cd1SNeel Natu vm->rendezvous_arg = arg; 22435b8a8cd1SNeel Natu vm_set_rendezvous_func(vm, func); 22445b8a8cd1SNeel Natu mtx_unlock(&vm->rendezvous_mtx); 22455b8a8cd1SNeel Natu 2246970955e4SNeel Natu /* 2247970955e4SNeel Natu * Wake up any sleeping vcpus and trigger a VM-exit in any running 2248970955e4SNeel Natu * vcpus so they handle the rendezvous as soon as possible. 2249970955e4SNeel Natu */ 2250970955e4SNeel Natu for (i = 0; i < VM_MAXCPU; i++) { 2251970955e4SNeel Natu if (CPU_ISSET(i, &dest)) 2252970955e4SNeel Natu vcpu_notify_event(vm, i, false); 2253970955e4SNeel Natu } 2254970955e4SNeel Natu 22555b8a8cd1SNeel Natu vm_handle_rendezvous(vm, vcpuid); 22565b8a8cd1SNeel Natu } 2257762fd208STycho Nightingale 2258762fd208STycho Nightingale struct vatpic * 2259762fd208STycho Nightingale vm_atpic(struct vm *vm) 2260762fd208STycho Nightingale { 2261762fd208STycho Nightingale return (vm->vatpic); 2262762fd208STycho Nightingale } 2263e883c9bbSTycho Nightingale 2264e883c9bbSTycho Nightingale struct vatpit * 2265e883c9bbSTycho Nightingale vm_atpit(struct vm *vm) 2266e883c9bbSTycho Nightingale { 2267e883c9bbSTycho Nightingale return (vm->vatpit); 2268e883c9bbSTycho Nightingale } 2269d17b5104SNeel Natu 2270160ef77aSNeel Natu struct vpmtmr * 2271160ef77aSNeel Natu vm_pmtmr(struct vm *vm) 2272160ef77aSNeel Natu { 2273160ef77aSNeel Natu 2274160ef77aSNeel Natu return (vm->vpmtmr); 2275160ef77aSNeel Natu } 2276160ef77aSNeel Natu 22770dafa5cdSNeel Natu struct vrtc * 22780dafa5cdSNeel Natu vm_rtc(struct vm *vm) 22790dafa5cdSNeel Natu { 22800dafa5cdSNeel Natu 22810dafa5cdSNeel Natu return (vm->vrtc); 22820dafa5cdSNeel Natu } 22830dafa5cdSNeel Natu 2284d17b5104SNeel Natu enum vm_reg_name 2285d17b5104SNeel Natu vm_segment_name(int seg) 2286d17b5104SNeel Natu { 2287d17b5104SNeel Natu static enum vm_reg_name seg_names[] = { 2288d17b5104SNeel Natu VM_REG_GUEST_ES, 2289d17b5104SNeel Natu VM_REG_GUEST_CS, 2290d17b5104SNeel Natu VM_REG_GUEST_SS, 2291d17b5104SNeel Natu VM_REG_GUEST_DS, 2292d17b5104SNeel Natu VM_REG_GUEST_FS, 2293d17b5104SNeel Natu VM_REG_GUEST_GS 2294d17b5104SNeel Natu }; 2295d17b5104SNeel Natu 2296d17b5104SNeel Natu KASSERT(seg >= 0 && seg < nitems(seg_names), 2297d17b5104SNeel Natu ("%s: invalid segment encoding %d", __func__, seg)); 2298d17b5104SNeel Natu return (seg_names[seg]); 2299d17b5104SNeel Natu } 2300cf1d80d8SPeter Grehan 2301d665d229SNeel Natu void 2302d665d229SNeel Natu vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 2303d665d229SNeel Natu int num_copyinfo) 2304d665d229SNeel Natu { 2305d665d229SNeel Natu int idx; 2306d665d229SNeel Natu 2307d665d229SNeel Natu for (idx = 0; idx < num_copyinfo; idx++) { 2308d665d229SNeel Natu if (copyinfo[idx].cookie != NULL) 2309d665d229SNeel Natu vm_gpa_release(copyinfo[idx].cookie); 2310d665d229SNeel Natu } 2311d665d229SNeel Natu bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo)); 2312d665d229SNeel Natu } 2313d665d229SNeel Natu 2314d665d229SNeel Natu int 2315d665d229SNeel Natu vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 2316d665d229SNeel Natu uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 2317d665d229SNeel Natu int num_copyinfo) 2318d665d229SNeel Natu { 2319d665d229SNeel Natu int error, idx, nused; 2320d665d229SNeel Natu size_t n, off, remaining; 2321d665d229SNeel Natu void *hva, *cookie; 2322d665d229SNeel Natu uint64_t gpa; 2323d665d229SNeel Natu 2324d665d229SNeel Natu bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo); 2325d665d229SNeel Natu 2326d665d229SNeel Natu nused = 0; 2327d665d229SNeel Natu remaining = len; 2328d665d229SNeel Natu while (remaining > 0) { 2329d665d229SNeel Natu KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 2330d665d229SNeel Natu error = vmm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa); 2331d665d229SNeel Natu if (error) 2332d665d229SNeel Natu return (error); 2333d665d229SNeel Natu off = gpa & PAGE_MASK; 2334d665d229SNeel Natu n = min(remaining, PAGE_SIZE - off); 2335d665d229SNeel Natu copyinfo[nused].gpa = gpa; 2336d665d229SNeel Natu copyinfo[nused].len = n; 2337d665d229SNeel Natu remaining -= n; 2338d665d229SNeel Natu gla += n; 2339d665d229SNeel Natu nused++; 2340d665d229SNeel Natu } 2341d665d229SNeel Natu 2342d665d229SNeel Natu for (idx = 0; idx < nused; idx++) { 2343d665d229SNeel Natu hva = vm_gpa_hold(vm, copyinfo[idx].gpa, copyinfo[idx].len, 2344d665d229SNeel Natu prot, &cookie); 2345d665d229SNeel Natu if (hva == NULL) 2346d665d229SNeel Natu break; 2347d665d229SNeel Natu copyinfo[idx].hva = hva; 2348d665d229SNeel Natu copyinfo[idx].cookie = cookie; 2349d665d229SNeel Natu } 2350d665d229SNeel Natu 2351d665d229SNeel Natu if (idx != nused) { 2352d665d229SNeel Natu vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo); 2353d665d229SNeel Natu return (-1); 2354d665d229SNeel Natu } else { 2355d665d229SNeel Natu return (0); 2356d665d229SNeel Natu } 2357d665d229SNeel Natu } 2358d665d229SNeel Natu 2359d665d229SNeel Natu void 2360d665d229SNeel Natu vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr, 2361d665d229SNeel Natu size_t len) 2362d665d229SNeel Natu { 2363d665d229SNeel Natu char *dst; 2364d665d229SNeel Natu int idx; 2365d665d229SNeel Natu 2366d665d229SNeel Natu dst = kaddr; 2367d665d229SNeel Natu idx = 0; 2368d665d229SNeel Natu while (len > 0) { 2369d665d229SNeel Natu bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 2370d665d229SNeel Natu len -= copyinfo[idx].len; 2371d665d229SNeel Natu dst += copyinfo[idx].len; 2372d665d229SNeel Natu idx++; 2373d665d229SNeel Natu } 2374d665d229SNeel Natu } 2375d665d229SNeel Natu 2376d665d229SNeel Natu void 2377d665d229SNeel Natu vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 2378d665d229SNeel Natu struct vm_copyinfo *copyinfo, size_t len) 2379d665d229SNeel Natu { 2380d665d229SNeel Natu const char *src; 2381d665d229SNeel Natu int idx; 2382d665d229SNeel Natu 2383d665d229SNeel Natu src = kaddr; 2384d665d229SNeel Natu idx = 0; 2385d665d229SNeel Natu while (len > 0) { 2386d665d229SNeel Natu bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 2387d665d229SNeel Natu len -= copyinfo[idx].len; 2388d665d229SNeel Natu src += copyinfo[idx].len; 2389d665d229SNeel Natu idx++; 2390d665d229SNeel Natu } 2391d665d229SNeel Natu } 2392cf1d80d8SPeter Grehan 2393cf1d80d8SPeter Grehan /* 2394cf1d80d8SPeter Grehan * Return the amount of in-use and wired memory for the VM. Since 2395cf1d80d8SPeter Grehan * these are global stats, only return the values with for vCPU 0 2396cf1d80d8SPeter Grehan */ 2397cf1d80d8SPeter Grehan VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 2398cf1d80d8SPeter Grehan VMM_STAT_DECLARE(VMM_MEM_WIRED); 2399cf1d80d8SPeter Grehan 2400cf1d80d8SPeter Grehan static void 2401cf1d80d8SPeter Grehan vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2402cf1d80d8SPeter Grehan { 2403cf1d80d8SPeter Grehan 2404cf1d80d8SPeter Grehan if (vcpu == 0) { 2405cf1d80d8SPeter Grehan vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT, 2406cf1d80d8SPeter Grehan PAGE_SIZE * vmspace_resident_count(vm->vmspace)); 2407cf1d80d8SPeter Grehan } 2408cf1d80d8SPeter Grehan } 2409cf1d80d8SPeter Grehan 2410cf1d80d8SPeter Grehan static void 2411cf1d80d8SPeter Grehan vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2412cf1d80d8SPeter Grehan { 2413cf1d80d8SPeter Grehan 2414cf1d80d8SPeter Grehan if (vcpu == 0) { 2415cf1d80d8SPeter Grehan vmm_stat_set(vm, vcpu, VMM_MEM_WIRED, 2416cf1d80d8SPeter Grehan PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace))); 2417cf1d80d8SPeter Grehan } 2418cf1d80d8SPeter Grehan } 2419cf1d80d8SPeter Grehan 2420cf1d80d8SPeter Grehan VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 2421cf1d80d8SPeter Grehan VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt); 2422