1366f6083SPeter Grehan /*- 2c49761ddSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3c49761ddSPedro F. Giffuni * 4366f6083SPeter Grehan * Copyright (c) 2011 NetApp, Inc. 5366f6083SPeter Grehan * All rights reserved. 6366f6083SPeter Grehan * 7366f6083SPeter Grehan * Redistribution and use in source and binary forms, with or without 8366f6083SPeter Grehan * modification, are permitted provided that the following conditions 9366f6083SPeter Grehan * are met: 10366f6083SPeter Grehan * 1. Redistributions of source code must retain the above copyright 11366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer. 12366f6083SPeter Grehan * 2. Redistributions in binary form must reproduce the above copyright 13366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer in the 14366f6083SPeter Grehan * documentation and/or other materials provided with the distribution. 15366f6083SPeter Grehan * 16366f6083SPeter Grehan * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17366f6083SPeter Grehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18366f6083SPeter Grehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19366f6083SPeter Grehan * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20366f6083SPeter Grehan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21366f6083SPeter Grehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22366f6083SPeter Grehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23366f6083SPeter Grehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24366f6083SPeter Grehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25366f6083SPeter Grehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26366f6083SPeter Grehan * SUCH DAMAGE. 27366f6083SPeter Grehan * 28366f6083SPeter Grehan * $FreeBSD$ 29366f6083SPeter Grehan */ 30366f6083SPeter Grehan 31366f6083SPeter Grehan #include <sys/cdefs.h> 32366f6083SPeter Grehan __FBSDID("$FreeBSD$"); 33366f6083SPeter Grehan 34483d953aSJohn Baldwin #include "opt_bhyve_snapshot.h" 35483d953aSJohn Baldwin 36366f6083SPeter Grehan #include <sys/param.h> 3738f1b189SPeter Grehan #include <sys/systm.h> 38366f6083SPeter Grehan #include <sys/kernel.h> 39366f6083SPeter Grehan #include <sys/module.h> 40366f6083SPeter Grehan #include <sys/sysctl.h> 41366f6083SPeter Grehan #include <sys/malloc.h> 42366f6083SPeter Grehan #include <sys/pcpu.h> 43366f6083SPeter Grehan #include <sys/lock.h> 44366f6083SPeter Grehan #include <sys/mutex.h> 45366f6083SPeter Grehan #include <sys/proc.h> 46318224bbSNeel Natu #include <sys/rwlock.h> 47366f6083SPeter Grehan #include <sys/sched.h> 48366f6083SPeter Grehan #include <sys/smp.h> 49483d953aSJohn Baldwin #include <sys/vnode.h> 50366f6083SPeter Grehan 51366f6083SPeter Grehan #include <vm/vm.h> 523c48106aSKonstantin Belousov #include <vm/vm_param.h> 533c48106aSKonstantin Belousov #include <vm/vm_extern.h> 54318224bbSNeel Natu #include <vm/vm_object.h> 55318224bbSNeel Natu #include <vm/vm_page.h> 56318224bbSNeel Natu #include <vm/pmap.h> 57318224bbSNeel Natu #include <vm/vm_map.h> 58483d953aSJohn Baldwin #include <vm/vm_pager.h> 59483d953aSJohn Baldwin #include <vm/vm_kern.h> 60483d953aSJohn Baldwin #include <vm/vnode_pager.h> 61483d953aSJohn Baldwin #include <vm/swap_pager.h> 62483d953aSJohn Baldwin #include <vm/uma.h> 63366f6083SPeter Grehan 6463e62d39SJohn Baldwin #include <machine/cpu.h> 65366f6083SPeter Grehan #include <machine/pcb.h> 6675dd3366SNeel Natu #include <machine/smp.h> 67bd50262fSKonstantin Belousov #include <machine/md_var.h> 681c052192SNeel Natu #include <x86/psl.h> 6934a6b2d6SJohn Baldwin #include <x86/apicreg.h> 7015add60dSPeter Grehan #include <x86/ifunc.h> 71366f6083SPeter Grehan 72366f6083SPeter Grehan #include <machine/vmm.h> 73565bbb86SNeel Natu #include <machine/vmm_dev.h> 74e813a873SNeel Natu #include <machine/vmm_instruction_emul.h> 75483d953aSJohn Baldwin #include <machine/vmm_snapshot.h> 76565bbb86SNeel Natu 77d17b5104SNeel Natu #include "vmm_ioport.h" 78318224bbSNeel Natu #include "vmm_ktr.h" 79b01c2033SNeel Natu #include "vmm_host.h" 80366f6083SPeter Grehan #include "vmm_mem.h" 81366f6083SPeter Grehan #include "vmm_util.h" 82762fd208STycho Nightingale #include "vatpic.h" 83e883c9bbSTycho Nightingale #include "vatpit.h" 8408e3ff32SNeel Natu #include "vhpet.h" 85565bbb86SNeel Natu #include "vioapic.h" 86366f6083SPeter Grehan #include "vlapic.h" 87160ef77aSNeel Natu #include "vpmtmr.h" 880dafa5cdSNeel Natu #include "vrtc.h" 89366f6083SPeter Grehan #include "vmm_stat.h" 90f76fc5d4SNeel Natu #include "vmm_lapic.h" 91366f6083SPeter Grehan 92366f6083SPeter Grehan #include "io/ppt.h" 93366f6083SPeter Grehan #include "io/iommu.h" 94366f6083SPeter Grehan 95366f6083SPeter Grehan struct vlapic; 96366f6083SPeter Grehan 975fcf252fSNeel Natu /* 985fcf252fSNeel Natu * Initialization: 995fcf252fSNeel Natu * (a) allocated when vcpu is created 1005fcf252fSNeel Natu * (i) initialized when vcpu is created and when it is reinitialized 1015fcf252fSNeel Natu * (o) initialized the first time the vcpu is created 1025fcf252fSNeel Natu * (x) initialized before use 1035fcf252fSNeel Natu */ 104366f6083SPeter Grehan struct vcpu { 1055fcf252fSNeel Natu struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */ 1065fcf252fSNeel Natu enum vcpu_state state; /* (o) vcpu state */ 107950af9ffSJohn Baldwin int vcpuid; /* (o) */ 1085fcf252fSNeel Natu int hostcpu; /* (o) vcpu's host cpu */ 109248e6799SNeel Natu int reqidle; /* (i) request vcpu to idle */ 110950af9ffSJohn Baldwin struct vm *vm; /* (o) */ 1111aa51504SJohn Baldwin void *cookie; /* (i) cpu-specific data */ 1125fcf252fSNeel Natu struct vlapic *vlapic; /* (i) APIC device model */ 1135fcf252fSNeel Natu enum x2apic_state x2apic_state; /* (i) APIC mode */ 114091d4532SNeel Natu uint64_t exitintinfo; /* (i) events pending at VM exit */ 1155fcf252fSNeel Natu int nmi_pending; /* (i) NMI pending */ 1165fcf252fSNeel Natu int extint_pending; /* (i) INTR pending */ 1175fcf252fSNeel Natu int exception_pending; /* (i) exception pending */ 118c9c75df4SNeel Natu int exc_vector; /* (x) exception collateral */ 119c9c75df4SNeel Natu int exc_errcode_valid; 120c9c75df4SNeel Natu uint32_t exc_errcode; 1215fcf252fSNeel Natu struct savefpu *guestfpu; /* (a,i) guest fpu state */ 1225fcf252fSNeel Natu uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 1235fcf252fSNeel Natu void *stats; /* (a,i) statistics */ 1245fcf252fSNeel Natu struct vm_exit exitinfo; /* (x) exit reason and collateral */ 125d087a399SNeel Natu uint64_t nextrip; /* (x) next instruction to execute */ 126483d953aSJohn Baldwin uint64_t tsc_offset; /* (o) TSC offsetting */ 127366f6083SPeter Grehan }; 128366f6083SPeter Grehan 1295fcf252fSNeel Natu #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) 130f76fc5d4SNeel Natu #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 131f76fc5d4SNeel Natu #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 132f76fc5d4SNeel Natu #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 133318224bbSNeel Natu #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) 13475dd3366SNeel Natu 135318224bbSNeel Natu struct mem_seg { 1369b1aa8d6SNeel Natu size_t len; 1379b1aa8d6SNeel Natu bool sysmem; 1389b1aa8d6SNeel Natu struct vm_object *object; 1399b1aa8d6SNeel Natu }; 140e47fe318SCorvin Köhne #define VM_MAX_MEMSEGS 4 1419b1aa8d6SNeel Natu 1429b1aa8d6SNeel Natu struct mem_map { 143318224bbSNeel Natu vm_paddr_t gpa; 144318224bbSNeel Natu size_t len; 1459b1aa8d6SNeel Natu vm_ooffset_t segoff; 1469b1aa8d6SNeel Natu int segid; 1479b1aa8d6SNeel Natu int prot; 1489b1aa8d6SNeel Natu int flags; 149318224bbSNeel Natu }; 15000d3723fSConrad Meyer #define VM_MAX_MEMMAPS 8 151366f6083SPeter Grehan 152366f6083SPeter Grehan /* 1535fcf252fSNeel Natu * Initialization: 1545fcf252fSNeel Natu * (o) initialized the first time the VM is created 1555fcf252fSNeel Natu * (i) initialized when VM is created and when it is reinitialized 1565fcf252fSNeel Natu * (x) initialized before use 157366f6083SPeter Grehan */ 1585fcf252fSNeel Natu struct vm { 1595fcf252fSNeel Natu void *cookie; /* (i) cpu-specific data */ 1605fcf252fSNeel Natu void *iommu; /* (x) iommu-specific data */ 1615fcf252fSNeel Natu struct vhpet *vhpet; /* (i) virtual HPET */ 1625fcf252fSNeel Natu struct vioapic *vioapic; /* (i) virtual ioapic */ 1635fcf252fSNeel Natu struct vatpic *vatpic; /* (i) virtual atpic */ 1645fcf252fSNeel Natu struct vatpit *vatpit; /* (i) virtual atpit */ 165160ef77aSNeel Natu struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ 1660dafa5cdSNeel Natu struct vrtc *vrtc; /* (o) virtual RTC */ 1675fcf252fSNeel Natu volatile cpuset_t active_cpus; /* (i) active vcpus */ 168fc276d92SJohn Baldwin volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */ 1695fcf252fSNeel Natu int suspend; /* (i) stop VM execution */ 1705fcf252fSNeel Natu volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 1715fcf252fSNeel Natu volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 1725fcf252fSNeel Natu cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */ 1735fcf252fSNeel Natu cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */ 1745fcf252fSNeel Natu void *rendezvous_arg; /* (x) rendezvous func/arg */ 1755b8a8cd1SNeel Natu vm_rendezvous_func_t rendezvous_func; 1765fcf252fSNeel Natu struct mtx rendezvous_mtx; /* (o) rendezvous lock */ 1779b1aa8d6SNeel Natu struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */ 1789b1aa8d6SNeel Natu struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */ 1795fcf252fSNeel Natu struct vmspace *vmspace; /* (o) guest's address space */ 180df95cc76SKa Ho Ng char name[VM_MAX_NAMELEN+1]; /* (o) virtual machine name */ 1815fcf252fSNeel Natu struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ 18201d822d3SRodney W. Grimes /* The following describe the vm cpu topology */ 18301d822d3SRodney W. Grimes uint16_t sockets; /* (o) num of sockets */ 18401d822d3SRodney W. Grimes uint16_t cores; /* (o) num of cores/socket */ 18501d822d3SRodney W. Grimes uint16_t threads; /* (o) num of threads/core */ 18601d822d3SRodney W. Grimes uint16_t maxcpus; /* (o) max pluggable cpus */ 187366f6083SPeter Grehan }; 188366f6083SPeter Grehan 189950af9ffSJohn Baldwin #define VMM_CTR0(vcpu, format) \ 190950af9ffSJohn Baldwin VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format) 191950af9ffSJohn Baldwin 192950af9ffSJohn Baldwin #define VMM_CTR1(vcpu, format, p1) \ 193950af9ffSJohn Baldwin VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1) 194950af9ffSJohn Baldwin 195950af9ffSJohn Baldwin #define VMM_CTR2(vcpu, format, p1, p2) \ 196950af9ffSJohn Baldwin VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2) 197950af9ffSJohn Baldwin 198950af9ffSJohn Baldwin #define VMM_CTR3(vcpu, format, p1, p2, p3) \ 199950af9ffSJohn Baldwin VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3) 200950af9ffSJohn Baldwin 201950af9ffSJohn Baldwin #define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \ 202950af9ffSJohn Baldwin VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4) 203950af9ffSJohn Baldwin 204d5408b1dSNeel Natu static int vmm_initialized; 205d5408b1dSNeel Natu 20615add60dSPeter Grehan static void vmmops_panic(void); 207366f6083SPeter Grehan 20815add60dSPeter Grehan static void 20915add60dSPeter Grehan vmmops_panic(void) 21015add60dSPeter Grehan { 21115add60dSPeter Grehan panic("vmm_ops func called when !vmm_is_intel() && !vmm_is_svm()"); 21215add60dSPeter Grehan } 21315add60dSPeter Grehan 21415add60dSPeter Grehan #define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \ 21515add60dSPeter Grehan DEFINE_IFUNC(static, ret_type, vmmops_##opname, args) \ 21615add60dSPeter Grehan { \ 21715add60dSPeter Grehan if (vmm_is_intel()) \ 21815add60dSPeter Grehan return (vmm_ops_intel.opname); \ 21915add60dSPeter Grehan else if (vmm_is_svm()) \ 22015add60dSPeter Grehan return (vmm_ops_amd.opname); \ 22115add60dSPeter Grehan else \ 22215add60dSPeter Grehan return ((ret_type (*)args)vmmops_panic); \ 22315add60dSPeter Grehan } 22415add60dSPeter Grehan 22515add60dSPeter Grehan DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum)) 22615add60dSPeter Grehan DEFINE_VMMOPS_IFUNC(int, modcleanup, (void)) 22715add60dSPeter Grehan DEFINE_VMMOPS_IFUNC(void, modresume, (void)) 22815add60dSPeter Grehan DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap)) 229869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t rip, struct pmap *pmap, 230869c8d19SJohn Baldwin struct vm_eventinfo *info)) 23115add60dSPeter Grehan DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi)) 232950af9ffSJohn Baldwin DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu, 233950af9ffSJohn Baldwin int vcpu_id)) 234869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui)) 235869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval)) 236869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val)) 237869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vcpui, int num, struct seg_desc *desc)) 238869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vcpui, int num, struct seg_desc *desc)) 239869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval)) 240869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val)) 24115add60dSPeter Grehan DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min, 24215add60dSPeter Grehan vm_offset_t max)) 24315add60dSPeter Grehan DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace)) 244869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vcpui)) 245869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (struct vlapic *vlapic)) 246483d953aSJohn Baldwin #ifdef BHYVE_SNAPSHOT 247869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta *meta)) 248869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui, 249869c8d19SJohn Baldwin struct vm_snapshot_meta *meta)) 250869c8d19SJohn Baldwin DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now)) 251483d953aSJohn Baldwin #endif 252366f6083SPeter Grehan 253014a52f3SNeel Natu #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 254014a52f3SNeel Natu #define fpu_stop_emulating() clts() 255366f6083SPeter Grehan 2566ac73777STycho Nightingale SDT_PROVIDER_DEFINE(vmm); 2576ac73777STycho Nightingale 258366f6083SPeter Grehan static MALLOC_DEFINE(M_VM, "vm", "vm"); 259366f6083SPeter Grehan 260366f6083SPeter Grehan /* statistics */ 26161592433SNeel Natu static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 262366f6083SPeter Grehan 263b40598c5SPawel Biernacki SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 264b40598c5SPawel Biernacki NULL); 265add611fdSNeel Natu 266055fc2cbSNeel Natu /* 267055fc2cbSNeel Natu * Halt the guest if all vcpus are executing a HLT instruction with 268055fc2cbSNeel Natu * interrupts disabled. 269055fc2cbSNeel Natu */ 270055fc2cbSNeel Natu static int halt_detection_enabled = 1; 271055fc2cbSNeel Natu SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN, 272055fc2cbSNeel Natu &halt_detection_enabled, 0, 273055fc2cbSNeel Natu "Halt VM if all vcpus execute HLT with interrupts disabled"); 274055fc2cbSNeel Natu 275978f3da1SAndriy Gapon static int vmm_ipinum; 276add611fdSNeel Natu SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, 277add611fdSNeel Natu "IPI vector used for vcpu notifications"); 278add611fdSNeel Natu 279b0538143SNeel Natu static int trace_guest_exceptions; 280b0538143SNeel Natu SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN, 281b0538143SNeel Natu &trace_guest_exceptions, 0, 282b0538143SNeel Natu "Trap into hypervisor on all guest exceptions and reflect them back"); 283b0538143SNeel Natu 2843ba952e1SCorvin Köhne static int trap_wbinvd; 2853ba952e1SCorvin Köhne SYSCTL_INT(_hw_vmm, OID_AUTO, trap_wbinvd, CTLFLAG_RDTUN, &trap_wbinvd, 0, 2863ba952e1SCorvin Köhne "WBINVD triggers a VM-exit"); 2873ba952e1SCorvin Köhne 2889b1aa8d6SNeel Natu static void vm_free_memmap(struct vm *vm, int ident); 2899b1aa8d6SNeel Natu static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); 290248e6799SNeel Natu static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr); 291248e6799SNeel Natu 292248e6799SNeel Natu #ifdef KTR 293248e6799SNeel Natu static const char * 294248e6799SNeel Natu vcpu_state2str(enum vcpu_state state) 295248e6799SNeel Natu { 296248e6799SNeel Natu 297248e6799SNeel Natu switch (state) { 298248e6799SNeel Natu case VCPU_IDLE: 299248e6799SNeel Natu return ("idle"); 300248e6799SNeel Natu case VCPU_FROZEN: 301248e6799SNeel Natu return ("frozen"); 302248e6799SNeel Natu case VCPU_RUNNING: 303248e6799SNeel Natu return ("running"); 304248e6799SNeel Natu case VCPU_SLEEPING: 305248e6799SNeel Natu return ("sleeping"); 306248e6799SNeel Natu default: 307248e6799SNeel Natu return ("unknown"); 308248e6799SNeel Natu } 309248e6799SNeel Natu } 310248e6799SNeel Natu #endif 311248e6799SNeel Natu 3121aa51504SJohn Baldwin static __inline void * 3131aa51504SJohn Baldwin vcpu_cookie(struct vm *vm, int i) 3141aa51504SJohn Baldwin { 3151aa51504SJohn Baldwin return (vm->vcpu[i].cookie); 3161aa51504SJohn Baldwin } 3171aa51504SJohn Baldwin 318366f6083SPeter Grehan static void 3195fcf252fSNeel Natu vcpu_cleanup(struct vm *vm, int i, bool destroy) 320366f6083SPeter Grehan { 321de5ea6b6SNeel Natu struct vcpu *vcpu = &vm->vcpu[i]; 322de5ea6b6SNeel Natu 323869c8d19SJohn Baldwin vmmops_vlapic_cleanup(vcpu->vlapic); 324869c8d19SJohn Baldwin vmmops_vcpu_cleanup(vcpu->cookie); 3251aa51504SJohn Baldwin vcpu->cookie = NULL; 3265fcf252fSNeel Natu if (destroy) { 327366f6083SPeter Grehan vmm_stat_free(vcpu->stats); 32838f1b189SPeter Grehan fpu_save_area_free(vcpu->guestfpu); 329366f6083SPeter Grehan } 3305fcf252fSNeel Natu } 331366f6083SPeter Grehan 332366f6083SPeter Grehan static void 3335fcf252fSNeel Natu vcpu_init(struct vm *vm, int vcpu_id, bool create) 334366f6083SPeter Grehan { 335366f6083SPeter Grehan struct vcpu *vcpu; 336366f6083SPeter Grehan 337a488c9c9SRodney W. Grimes KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, 3385fcf252fSNeel Natu ("vcpu_init: invalid vcpu %d", vcpu_id)); 3395fcf252fSNeel Natu 340366f6083SPeter Grehan vcpu = &vm->vcpu[vcpu_id]; 341366f6083SPeter Grehan 3425fcf252fSNeel Natu if (create) { 3435fcf252fSNeel Natu KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already " 3445fcf252fSNeel Natu "initialized", vcpu_id)); 34575dd3366SNeel Natu vcpu_lock_init(vcpu); 3465fcf252fSNeel Natu vcpu->state = VCPU_IDLE; 34775dd3366SNeel Natu vcpu->hostcpu = NOCPU; 348950af9ffSJohn Baldwin vcpu->vcpuid = vcpu_id; 349950af9ffSJohn Baldwin vcpu->vm = vm; 3505fcf252fSNeel Natu vcpu->guestfpu = fpu_save_area_alloc(); 3515fcf252fSNeel Natu vcpu->stats = vmm_stat_alloc(); 352483d953aSJohn Baldwin vcpu->tsc_offset = 0; 3535fcf252fSNeel Natu } 3545fcf252fSNeel Natu 355950af9ffSJohn Baldwin vcpu->cookie = vmmops_vcpu_init(vm->cookie, vcpu, vcpu_id); 356869c8d19SJohn Baldwin vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie); 35752e5c8a2SNeel Natu vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 358248e6799SNeel Natu vcpu->reqidle = 0; 359091d4532SNeel Natu vcpu->exitintinfo = 0; 3605fcf252fSNeel Natu vcpu->nmi_pending = 0; 3615fcf252fSNeel Natu vcpu->extint_pending = 0; 3625fcf252fSNeel Natu vcpu->exception_pending = 0; 363abb023fbSJohn Baldwin vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 36438f1b189SPeter Grehan fpu_save_area_reset(vcpu->guestfpu); 3655fcf252fSNeel Natu vmm_stat_init(vcpu->stats); 366366f6083SPeter Grehan } 367366f6083SPeter Grehan 368b0538143SNeel Natu int 36980cb5d84SJohn Baldwin vcpu_trace_exceptions(struct vcpu *vcpu) 370b0538143SNeel Natu { 371b0538143SNeel Natu 372b0538143SNeel Natu return (trace_guest_exceptions); 373b0538143SNeel Natu } 374b0538143SNeel Natu 3753ba952e1SCorvin Köhne int 37680cb5d84SJohn Baldwin vcpu_trap_wbinvd(struct vcpu *vcpu) 3773ba952e1SCorvin Köhne { 3783ba952e1SCorvin Köhne return (trap_wbinvd); 3793ba952e1SCorvin Köhne } 3803ba952e1SCorvin Köhne 38198ed632cSNeel Natu struct vm_exit * 38280cb5d84SJohn Baldwin vm_exitinfo(struct vcpu *vcpu) 38398ed632cSNeel Natu { 38498ed632cSNeel Natu return (&vcpu->exitinfo); 38598ed632cSNeel Natu } 38698ed632cSNeel Natu 387366f6083SPeter Grehan static int 388366f6083SPeter Grehan vmm_init(void) 389366f6083SPeter Grehan { 390366f6083SPeter Grehan int error; 391366f6083SPeter Grehan 39215add60dSPeter Grehan if (!vmm_is_hw_supported()) 39315add60dSPeter Grehan return (ENXIO); 39415add60dSPeter Grehan 395b01c2033SNeel Natu vmm_host_state_init(); 396add611fdSNeel Natu 397bd50262fSKonstantin Belousov vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) : 398bd50262fSKonstantin Belousov &IDTVEC(justreturn)); 39918a2b08eSNeel Natu if (vmm_ipinum < 0) 400add611fdSNeel Natu vmm_ipinum = IPI_AST; 401366f6083SPeter Grehan 402366f6083SPeter Grehan error = vmm_mem_init(); 403366f6083SPeter Grehan if (error) 404366f6083SPeter Grehan return (error); 405366f6083SPeter Grehan 40615add60dSPeter Grehan vmm_resume_p = vmmops_modresume; 407366f6083SPeter Grehan 40815add60dSPeter Grehan return (vmmops_modinit(vmm_ipinum)); 409366f6083SPeter Grehan } 410366f6083SPeter Grehan 411366f6083SPeter Grehan static int 412366f6083SPeter Grehan vmm_handler(module_t mod, int what, void *arg) 413366f6083SPeter Grehan { 414366f6083SPeter Grehan int error; 415366f6083SPeter Grehan 416366f6083SPeter Grehan switch (what) { 417366f6083SPeter Grehan case MOD_LOAD: 41815add60dSPeter Grehan if (vmm_is_hw_supported()) { 419366f6083SPeter Grehan vmmdev_init(); 420366f6083SPeter Grehan error = vmm_init(); 421d5408b1dSNeel Natu if (error == 0) 422d5408b1dSNeel Natu vmm_initialized = 1; 42315add60dSPeter Grehan } else { 42415add60dSPeter Grehan error = ENXIO; 42515add60dSPeter Grehan } 426366f6083SPeter Grehan break; 427366f6083SPeter Grehan case MOD_UNLOAD: 42815add60dSPeter Grehan if (vmm_is_hw_supported()) { 429cdc5b9e7SNeel Natu error = vmmdev_cleanup(); 430cdc5b9e7SNeel Natu if (error == 0) { 43163e62d39SJohn Baldwin vmm_resume_p = NULL; 432366f6083SPeter Grehan iommu_cleanup(); 433add611fdSNeel Natu if (vmm_ipinum != IPI_AST) 43418a2b08eSNeel Natu lapic_ipi_free(vmm_ipinum); 43515add60dSPeter Grehan error = vmmops_modcleanup(); 43681ef6611SPeter Grehan /* 43781ef6611SPeter Grehan * Something bad happened - prevent new 43881ef6611SPeter Grehan * VMs from being created 43981ef6611SPeter Grehan */ 44081ef6611SPeter Grehan if (error) 441d5408b1dSNeel Natu vmm_initialized = 0; 44281ef6611SPeter Grehan } 44315add60dSPeter Grehan } else { 44415add60dSPeter Grehan error = 0; 44515add60dSPeter Grehan } 446366f6083SPeter Grehan break; 447366f6083SPeter Grehan default: 448366f6083SPeter Grehan error = 0; 449366f6083SPeter Grehan break; 450366f6083SPeter Grehan } 451366f6083SPeter Grehan return (error); 452366f6083SPeter Grehan } 453366f6083SPeter Grehan 454366f6083SPeter Grehan static moduledata_t vmm_kmod = { 455366f6083SPeter Grehan "vmm", 456366f6083SPeter Grehan vmm_handler, 457366f6083SPeter Grehan NULL 458366f6083SPeter Grehan }; 459366f6083SPeter Grehan 460366f6083SPeter Grehan /* 461e3f0800bSNeel Natu * vmm initialization has the following dependencies: 462e3f0800bSNeel Natu * 463e3f0800bSNeel Natu * - VT-x initialization requires smp_rendezvous() and therefore must happen 464e3f0800bSNeel Natu * after SMP is fully functional (after SI_SUB_SMP). 465366f6083SPeter Grehan */ 466e3f0800bSNeel Natu DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 467366f6083SPeter Grehan MODULE_VERSION(vmm, 1); 468366f6083SPeter Grehan 4695fcf252fSNeel Natu static void 4705fcf252fSNeel Natu vm_init(struct vm *vm, bool create) 4715fcf252fSNeel Natu { 4725fcf252fSNeel Natu int i; 4735fcf252fSNeel Natu 47415add60dSPeter Grehan vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); 4755fcf252fSNeel Natu vm->iommu = NULL; 4765fcf252fSNeel Natu vm->vioapic = vioapic_init(vm); 4775fcf252fSNeel Natu vm->vhpet = vhpet_init(vm); 4785fcf252fSNeel Natu vm->vatpic = vatpic_init(vm); 4795fcf252fSNeel Natu vm->vatpit = vatpit_init(vm); 480160ef77aSNeel Natu vm->vpmtmr = vpmtmr_init(vm); 4810dafa5cdSNeel Natu if (create) 4820dafa5cdSNeel Natu vm->vrtc = vrtc_init(vm); 4835fcf252fSNeel Natu 4845fcf252fSNeel Natu CPU_ZERO(&vm->active_cpus); 485fc276d92SJohn Baldwin CPU_ZERO(&vm->debug_cpus); 4865fcf252fSNeel Natu 4875fcf252fSNeel Natu vm->suspend = 0; 4885fcf252fSNeel Natu CPU_ZERO(&vm->suspended_cpus); 4895fcf252fSNeel Natu 490a488c9c9SRodney W. Grimes for (i = 0; i < vm->maxcpus; i++) 4915fcf252fSNeel Natu vcpu_init(vm, i, create); 4925fcf252fSNeel Natu } 4935fcf252fSNeel Natu 49401d822d3SRodney W. Grimes /* 49501d822d3SRodney W. Grimes * The default CPU topology is a single thread per package. 49601d822d3SRodney W. Grimes */ 49701d822d3SRodney W. Grimes u_int cores_per_package = 1; 49801d822d3SRodney W. Grimes u_int threads_per_core = 1; 49901d822d3SRodney W. Grimes 500d5408b1dSNeel Natu int 501d5408b1dSNeel Natu vm_create(const char *name, struct vm **retvm) 502366f6083SPeter Grehan { 503366f6083SPeter Grehan struct vm *vm; 504318224bbSNeel Natu struct vmspace *vmspace; 505366f6083SPeter Grehan 506d5408b1dSNeel Natu /* 507d5408b1dSNeel Natu * If vmm.ko could not be successfully initialized then don't attempt 508d5408b1dSNeel Natu * to create the virtual machine. 509d5408b1dSNeel Natu */ 510d5408b1dSNeel Natu if (!vmm_initialized) 511d5408b1dSNeel Natu return (ENXIO); 512d5408b1dSNeel Natu 513df95cc76SKa Ho Ng if (name == NULL || strnlen(name, VM_MAX_NAMELEN + 1) == 514df95cc76SKa Ho Ng VM_MAX_NAMELEN + 1) 515d5408b1dSNeel Natu return (EINVAL); 516366f6083SPeter Grehan 5173c48106aSKonstantin Belousov vmspace = vmmops_vmspace_alloc(0, VM_MAXUSER_ADDRESS_LA48); 518318224bbSNeel Natu if (vmspace == NULL) 519318224bbSNeel Natu return (ENOMEM); 520318224bbSNeel Natu 521366f6083SPeter Grehan vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 522366f6083SPeter Grehan strcpy(vm->name, name); 52388c4b8d1SNeel Natu vm->vmspace = vmspace; 5245b8a8cd1SNeel Natu mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); 525366f6083SPeter Grehan 52601d822d3SRodney W. Grimes vm->sockets = 1; 52701d822d3SRodney W. Grimes vm->cores = cores_per_package; /* XXX backwards compatibility */ 52801d822d3SRodney W. Grimes vm->threads = threads_per_core; /* XXX backwards compatibility */ 529a488c9c9SRodney W. Grimes vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ 53001d822d3SRodney W. Grimes 5315fcf252fSNeel Natu vm_init(vm, true); 532366f6083SPeter Grehan 533d5408b1dSNeel Natu *retvm = vm; 534d5408b1dSNeel Natu return (0); 535366f6083SPeter Grehan } 536366f6083SPeter Grehan 53701d822d3SRodney W. Grimes void 53801d822d3SRodney W. Grimes vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, 53901d822d3SRodney W. Grimes uint16_t *threads, uint16_t *maxcpus) 54001d822d3SRodney W. Grimes { 54101d822d3SRodney W. Grimes *sockets = vm->sockets; 54201d822d3SRodney W. Grimes *cores = vm->cores; 54301d822d3SRodney W. Grimes *threads = vm->threads; 54401d822d3SRodney W. Grimes *maxcpus = vm->maxcpus; 54501d822d3SRodney W. Grimes } 54601d822d3SRodney W. Grimes 547a488c9c9SRodney W. Grimes uint16_t 548a488c9c9SRodney W. Grimes vm_get_maxcpus(struct vm *vm) 549a488c9c9SRodney W. Grimes { 550a488c9c9SRodney W. Grimes return (vm->maxcpus); 551a488c9c9SRodney W. Grimes } 552a488c9c9SRodney W. Grimes 55301d822d3SRodney W. Grimes int 55401d822d3SRodney W. Grimes vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, 55501d822d3SRodney W. Grimes uint16_t threads, uint16_t maxcpus) 55601d822d3SRodney W. Grimes { 55701d822d3SRodney W. Grimes if (maxcpus != 0) 55801d822d3SRodney W. Grimes return (EINVAL); /* XXX remove when supported */ 559a488c9c9SRodney W. Grimes if ((sockets * cores * threads) > vm->maxcpus) 56001d822d3SRodney W. Grimes return (EINVAL); 56101d822d3SRodney W. Grimes /* XXX need to check sockets * cores * threads == vCPU, how? */ 56201d822d3SRodney W. Grimes vm->sockets = sockets; 56301d822d3SRodney W. Grimes vm->cores = cores; 56401d822d3SRodney W. Grimes vm->threads = threads; 565a488c9c9SRodney W. Grimes vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ 56601d822d3SRodney W. Grimes return(0); 56701d822d3SRodney W. Grimes } 56801d822d3SRodney W. Grimes 569f7d51510SNeel Natu static void 5705fcf252fSNeel Natu vm_cleanup(struct vm *vm, bool destroy) 571366f6083SPeter Grehan { 5729b1aa8d6SNeel Natu struct mem_map *mm; 573366f6083SPeter Grehan int i; 574366f6083SPeter Grehan 575366f6083SPeter Grehan ppt_unassign_all(vm); 576366f6083SPeter Grehan 577318224bbSNeel Natu if (vm->iommu != NULL) 578318224bbSNeel Natu iommu_destroy_domain(vm->iommu); 579318224bbSNeel Natu 5800dafa5cdSNeel Natu if (destroy) 5810dafa5cdSNeel Natu vrtc_cleanup(vm->vrtc); 5820dafa5cdSNeel Natu else 5830dafa5cdSNeel Natu vrtc_reset(vm->vrtc); 584160ef77aSNeel Natu vpmtmr_cleanup(vm->vpmtmr); 585e883c9bbSTycho Nightingale vatpit_cleanup(vm->vatpit); 58608e3ff32SNeel Natu vhpet_cleanup(vm->vhpet); 587762fd208STycho Nightingale vatpic_cleanup(vm->vatpic); 58808e3ff32SNeel Natu vioapic_cleanup(vm->vioapic); 58908e3ff32SNeel Natu 590a488c9c9SRodney W. Grimes for (i = 0; i < vm->maxcpus; i++) 5915fcf252fSNeel Natu vcpu_cleanup(vm, i, destroy); 5925fcf252fSNeel Natu 59315add60dSPeter Grehan vmmops_cleanup(vm->cookie); 5945fcf252fSNeel Natu 5959b1aa8d6SNeel Natu /* 5969b1aa8d6SNeel Natu * System memory is removed from the guest address space only when 5979b1aa8d6SNeel Natu * the VM is destroyed. This is because the mapping remains the same 5989b1aa8d6SNeel Natu * across VM reset. 5999b1aa8d6SNeel Natu * 6009b1aa8d6SNeel Natu * Device memory can be relocated by the guest (e.g. using PCI BARs) 6019b1aa8d6SNeel Natu * so those mappings are removed on a VM reset. 6029b1aa8d6SNeel Natu */ 6039b1aa8d6SNeel Natu for (i = 0; i < VM_MAX_MEMMAPS; i++) { 6049b1aa8d6SNeel Natu mm = &vm->mem_maps[i]; 6059b1aa8d6SNeel Natu if (destroy || !sysmem_mapping(vm, mm)) 6069b1aa8d6SNeel Natu vm_free_memmap(vm, i); 6079b1aa8d6SNeel Natu } 608f7d51510SNeel Natu 6099b1aa8d6SNeel Natu if (destroy) { 6109b1aa8d6SNeel Natu for (i = 0; i < VM_MAX_MEMSEGS; i++) 6119b1aa8d6SNeel Natu vm_free_memseg(vm, i); 612366f6083SPeter Grehan 61315add60dSPeter Grehan vmmops_vmspace_free(vm->vmspace); 6145fcf252fSNeel Natu vm->vmspace = NULL; 6155fcf252fSNeel Natu } 6165fcf252fSNeel Natu } 617366f6083SPeter Grehan 6185fcf252fSNeel Natu void 6195fcf252fSNeel Natu vm_destroy(struct vm *vm) 6205fcf252fSNeel Natu { 6215fcf252fSNeel Natu vm_cleanup(vm, true); 622366f6083SPeter Grehan free(vm, M_VM); 623366f6083SPeter Grehan } 624366f6083SPeter Grehan 6255fcf252fSNeel Natu int 6265fcf252fSNeel Natu vm_reinit(struct vm *vm) 6275fcf252fSNeel Natu { 6285fcf252fSNeel Natu int error; 6295fcf252fSNeel Natu 6305fcf252fSNeel Natu /* 6315fcf252fSNeel Natu * A virtual machine can be reset only if all vcpus are suspended. 6325fcf252fSNeel Natu */ 6335fcf252fSNeel Natu if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 6345fcf252fSNeel Natu vm_cleanup(vm, false); 6355fcf252fSNeel Natu vm_init(vm, false); 6365fcf252fSNeel Natu error = 0; 6375fcf252fSNeel Natu } else { 6385fcf252fSNeel Natu error = EBUSY; 6395fcf252fSNeel Natu } 6405fcf252fSNeel Natu 6415fcf252fSNeel Natu return (error); 6425fcf252fSNeel Natu } 6435fcf252fSNeel Natu 644366f6083SPeter Grehan const char * 645366f6083SPeter Grehan vm_name(struct vm *vm) 646366f6083SPeter Grehan { 647366f6083SPeter Grehan return (vm->name); 648366f6083SPeter Grehan } 649366f6083SPeter Grehan 650366f6083SPeter Grehan int 651366f6083SPeter Grehan vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 652366f6083SPeter Grehan { 653318224bbSNeel Natu vm_object_t obj; 654366f6083SPeter Grehan 655318224bbSNeel Natu if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 656318224bbSNeel Natu return (ENOMEM); 657318224bbSNeel Natu else 658318224bbSNeel Natu return (0); 659366f6083SPeter Grehan } 660366f6083SPeter Grehan 661366f6083SPeter Grehan int 662366f6083SPeter Grehan vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 663366f6083SPeter Grehan { 664366f6083SPeter Grehan 665318224bbSNeel Natu vmm_mmio_free(vm->vmspace, gpa, len); 666318224bbSNeel Natu return (0); 667366f6083SPeter Grehan } 668366f6083SPeter Grehan 6699b1aa8d6SNeel Natu /* 6709b1aa8d6SNeel Natu * Return 'true' if 'gpa' is allocated in the guest address space. 6719b1aa8d6SNeel Natu * 6729b1aa8d6SNeel Natu * This function is called in the context of a running vcpu which acts as 6739b1aa8d6SNeel Natu * an implicit lock on 'vm->mem_maps[]'. 6749b1aa8d6SNeel Natu */ 6759b1aa8d6SNeel Natu bool 67680cb5d84SJohn Baldwin vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa) 677366f6083SPeter Grehan { 67880cb5d84SJohn Baldwin struct vm *vm = vcpu->vm; 6799b1aa8d6SNeel Natu struct mem_map *mm; 680341f19c9SNeel Natu int i; 681341f19c9SNeel Natu 6829b1aa8d6SNeel Natu #ifdef INVARIANTS 6839b1aa8d6SNeel Natu int hostcpu, state; 68480cb5d84SJohn Baldwin state = vcpu_get_state(vcpu, &hostcpu); 6859b1aa8d6SNeel Natu KASSERT(state == VCPU_RUNNING && hostcpu == curcpu, 6869b1aa8d6SNeel Natu ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); 6879b1aa8d6SNeel Natu #endif 6889b1aa8d6SNeel Natu 6899b1aa8d6SNeel Natu for (i = 0; i < VM_MAX_MEMMAPS; i++) { 6909b1aa8d6SNeel Natu mm = &vm->mem_maps[i]; 6919b1aa8d6SNeel Natu if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) 6929b1aa8d6SNeel Natu return (true); /* 'gpa' is sysmem or devmem */ 693341f19c9SNeel Natu } 694341f19c9SNeel Natu 695318224bbSNeel Natu if (ppt_is_mmio(vm, gpa)) 6969b1aa8d6SNeel Natu return (true); /* 'gpa' is pci passthru mmio */ 697318224bbSNeel Natu 6989b1aa8d6SNeel Natu return (false); 699341f19c9SNeel Natu } 700341f19c9SNeel Natu 701341f19c9SNeel Natu int 7029b1aa8d6SNeel Natu vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) 703341f19c9SNeel Natu { 704318224bbSNeel Natu struct mem_seg *seg; 7059b1aa8d6SNeel Natu vm_object_t obj; 706366f6083SPeter Grehan 7079b1aa8d6SNeel Natu if (ident < 0 || ident >= VM_MAX_MEMSEGS) 708341f19c9SNeel Natu return (EINVAL); 709341f19c9SNeel Natu 7109b1aa8d6SNeel Natu if (len == 0 || (len & PAGE_MASK)) 7119b1aa8d6SNeel Natu return (EINVAL); 712341f19c9SNeel Natu 7139b1aa8d6SNeel Natu seg = &vm->mem_segs[ident]; 7149b1aa8d6SNeel Natu if (seg->object != NULL) { 7159b1aa8d6SNeel Natu if (seg->len == len && seg->sysmem == sysmem) 7169b1aa8d6SNeel Natu return (EEXIST); 7179b1aa8d6SNeel Natu else 7189b1aa8d6SNeel Natu return (EINVAL); 719341f19c9SNeel Natu } 720341f19c9SNeel Natu 7216b389740SMark Johnston obj = vm_object_allocate(OBJT_SWAP, len >> PAGE_SHIFT); 7229b1aa8d6SNeel Natu if (obj == NULL) 723318224bbSNeel Natu return (ENOMEM); 724318224bbSNeel Natu 725318224bbSNeel Natu seg->len = len; 7269b1aa8d6SNeel Natu seg->object = obj; 7279b1aa8d6SNeel Natu seg->sysmem = sysmem; 728366f6083SPeter Grehan return (0); 729366f6083SPeter Grehan } 730366f6083SPeter Grehan 7319b1aa8d6SNeel Natu int 7329b1aa8d6SNeel Natu vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, 7339b1aa8d6SNeel Natu vm_object_t *objptr) 734477867a0SNeel Natu { 7359b1aa8d6SNeel Natu struct mem_seg *seg; 736477867a0SNeel Natu 7379b1aa8d6SNeel Natu if (ident < 0 || ident >= VM_MAX_MEMSEGS) 7389b1aa8d6SNeel Natu return (EINVAL); 7399b1aa8d6SNeel Natu 7409b1aa8d6SNeel Natu seg = &vm->mem_segs[ident]; 7419b1aa8d6SNeel Natu if (len) 7429b1aa8d6SNeel Natu *len = seg->len; 7439b1aa8d6SNeel Natu if (sysmem) 7449b1aa8d6SNeel Natu *sysmem = seg->sysmem; 7459b1aa8d6SNeel Natu if (objptr) 7469b1aa8d6SNeel Natu *objptr = seg->object; 7479b1aa8d6SNeel Natu return (0); 748477867a0SNeel Natu } 7499b1aa8d6SNeel Natu 7509b1aa8d6SNeel Natu void 7519b1aa8d6SNeel Natu vm_free_memseg(struct vm *vm, int ident) 7529b1aa8d6SNeel Natu { 7539b1aa8d6SNeel Natu struct mem_seg *seg; 7549b1aa8d6SNeel Natu 7559b1aa8d6SNeel Natu KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, 7569b1aa8d6SNeel Natu ("%s: invalid memseg ident %d", __func__, ident)); 7579b1aa8d6SNeel Natu 7589b1aa8d6SNeel Natu seg = &vm->mem_segs[ident]; 7599b1aa8d6SNeel Natu if (seg->object != NULL) { 7609b1aa8d6SNeel Natu vm_object_deallocate(seg->object); 7619b1aa8d6SNeel Natu bzero(seg, sizeof(struct mem_seg)); 7629b1aa8d6SNeel Natu } 7639b1aa8d6SNeel Natu } 7649b1aa8d6SNeel Natu 7659b1aa8d6SNeel Natu int 7669b1aa8d6SNeel Natu vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, 7679b1aa8d6SNeel Natu size_t len, int prot, int flags) 7689b1aa8d6SNeel Natu { 7699b1aa8d6SNeel Natu struct mem_seg *seg; 7709b1aa8d6SNeel Natu struct mem_map *m, *map; 7719b1aa8d6SNeel Natu vm_ooffset_t last; 7729b1aa8d6SNeel Natu int i, error; 7739b1aa8d6SNeel Natu 7749b1aa8d6SNeel Natu if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0) 7759b1aa8d6SNeel Natu return (EINVAL); 7769b1aa8d6SNeel Natu 7779b1aa8d6SNeel Natu if (flags & ~VM_MEMMAP_F_WIRED) 7789b1aa8d6SNeel Natu return (EINVAL); 7799b1aa8d6SNeel Natu 7809b1aa8d6SNeel Natu if (segid < 0 || segid >= VM_MAX_MEMSEGS) 7819b1aa8d6SNeel Natu return (EINVAL); 7829b1aa8d6SNeel Natu 7839b1aa8d6SNeel Natu seg = &vm->mem_segs[segid]; 7849b1aa8d6SNeel Natu if (seg->object == NULL) 7859b1aa8d6SNeel Natu return (EINVAL); 7869b1aa8d6SNeel Natu 7879b1aa8d6SNeel Natu last = first + len; 7889b1aa8d6SNeel Natu if (first < 0 || first >= last || last > seg->len) 7899b1aa8d6SNeel Natu return (EINVAL); 7909b1aa8d6SNeel Natu 7919b1aa8d6SNeel Natu if ((gpa | first | last) & PAGE_MASK) 7929b1aa8d6SNeel Natu return (EINVAL); 7939b1aa8d6SNeel Natu 7949b1aa8d6SNeel Natu map = NULL; 7959b1aa8d6SNeel Natu for (i = 0; i < VM_MAX_MEMMAPS; i++) { 7969b1aa8d6SNeel Natu m = &vm->mem_maps[i]; 7979b1aa8d6SNeel Natu if (m->len == 0) { 7989b1aa8d6SNeel Natu map = m; 7999b1aa8d6SNeel Natu break; 8009b1aa8d6SNeel Natu } 8019b1aa8d6SNeel Natu } 8029b1aa8d6SNeel Natu 8039b1aa8d6SNeel Natu if (map == NULL) 8049b1aa8d6SNeel Natu return (ENOSPC); 8059b1aa8d6SNeel Natu 8069b1aa8d6SNeel Natu error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa, 8079b1aa8d6SNeel Natu len, 0, VMFS_NO_SPACE, prot, prot, 0); 8089b1aa8d6SNeel Natu if (error != KERN_SUCCESS) 8099b1aa8d6SNeel Natu return (EFAULT); 8109b1aa8d6SNeel Natu 8119b1aa8d6SNeel Natu vm_object_reference(seg->object); 8129b1aa8d6SNeel Natu 8139b1aa8d6SNeel Natu if (flags & VM_MEMMAP_F_WIRED) { 8149b1aa8d6SNeel Natu error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len, 8159b1aa8d6SNeel Natu VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 8169b1aa8d6SNeel Natu if (error != KERN_SUCCESS) { 8179b1aa8d6SNeel Natu vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len); 81854a3a114SMark Johnston return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM : 81954a3a114SMark Johnston EFAULT); 8209b1aa8d6SNeel Natu } 8219b1aa8d6SNeel Natu } 8229b1aa8d6SNeel Natu 8239b1aa8d6SNeel Natu map->gpa = gpa; 8249b1aa8d6SNeel Natu map->len = len; 8259b1aa8d6SNeel Natu map->segoff = first; 8269b1aa8d6SNeel Natu map->segid = segid; 8279b1aa8d6SNeel Natu map->prot = prot; 8289b1aa8d6SNeel Natu map->flags = flags; 8299b1aa8d6SNeel Natu return (0); 8309b1aa8d6SNeel Natu } 8319b1aa8d6SNeel Natu 8329b1aa8d6SNeel Natu int 833f8a6ec2dSD Scott Phillips vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) 834f8a6ec2dSD Scott Phillips { 835f8a6ec2dSD Scott Phillips struct mem_map *m; 836f8a6ec2dSD Scott Phillips int i; 837f8a6ec2dSD Scott Phillips 838f8a6ec2dSD Scott Phillips for (i = 0; i < VM_MAX_MEMMAPS; i++) { 839f8a6ec2dSD Scott Phillips m = &vm->mem_maps[i]; 840f8a6ec2dSD Scott Phillips if (m->gpa == gpa && m->len == len && 841f8a6ec2dSD Scott Phillips (m->flags & VM_MEMMAP_F_IOMMU) == 0) { 842f8a6ec2dSD Scott Phillips vm_free_memmap(vm, i); 843f8a6ec2dSD Scott Phillips return (0); 844f8a6ec2dSD Scott Phillips } 845f8a6ec2dSD Scott Phillips } 846f8a6ec2dSD Scott Phillips 847f8a6ec2dSD Scott Phillips return (EINVAL); 848f8a6ec2dSD Scott Phillips } 849f8a6ec2dSD Scott Phillips 850f8a6ec2dSD Scott Phillips int 8519b1aa8d6SNeel Natu vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, 8529b1aa8d6SNeel Natu vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) 8539b1aa8d6SNeel Natu { 8549b1aa8d6SNeel Natu struct mem_map *mm, *mmnext; 8559b1aa8d6SNeel Natu int i; 8569b1aa8d6SNeel Natu 8579b1aa8d6SNeel Natu mmnext = NULL; 8589b1aa8d6SNeel Natu for (i = 0; i < VM_MAX_MEMMAPS; i++) { 8599b1aa8d6SNeel Natu mm = &vm->mem_maps[i]; 8609b1aa8d6SNeel Natu if (mm->len == 0 || mm->gpa < *gpa) 8619b1aa8d6SNeel Natu continue; 8629b1aa8d6SNeel Natu if (mmnext == NULL || mm->gpa < mmnext->gpa) 8639b1aa8d6SNeel Natu mmnext = mm; 8649b1aa8d6SNeel Natu } 8659b1aa8d6SNeel Natu 8669b1aa8d6SNeel Natu if (mmnext != NULL) { 8679b1aa8d6SNeel Natu *gpa = mmnext->gpa; 8689b1aa8d6SNeel Natu if (segid) 8699b1aa8d6SNeel Natu *segid = mmnext->segid; 8709b1aa8d6SNeel Natu if (segoff) 8719b1aa8d6SNeel Natu *segoff = mmnext->segoff; 8729b1aa8d6SNeel Natu if (len) 8739b1aa8d6SNeel Natu *len = mmnext->len; 8749b1aa8d6SNeel Natu if (prot) 8759b1aa8d6SNeel Natu *prot = mmnext->prot; 8769b1aa8d6SNeel Natu if (flags) 8779b1aa8d6SNeel Natu *flags = mmnext->flags; 8789b1aa8d6SNeel Natu return (0); 8799b1aa8d6SNeel Natu } else { 8809b1aa8d6SNeel Natu return (ENOENT); 8819b1aa8d6SNeel Natu } 882477867a0SNeel Natu } 883477867a0SNeel Natu 884318224bbSNeel Natu static void 8859b1aa8d6SNeel Natu vm_free_memmap(struct vm *vm, int ident) 886366f6083SPeter Grehan { 8879b1aa8d6SNeel Natu struct mem_map *mm; 88873505a10SRobert Wing int error __diagused; 8894db4fb2cSNeel Natu 8909b1aa8d6SNeel Natu mm = &vm->mem_maps[ident]; 8919b1aa8d6SNeel Natu if (mm->len) { 8929b1aa8d6SNeel Natu error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa, 8939b1aa8d6SNeel Natu mm->gpa + mm->len); 8949b1aa8d6SNeel Natu KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d", 8959b1aa8d6SNeel Natu __func__, error)); 8969b1aa8d6SNeel Natu bzero(mm, sizeof(struct mem_map)); 897318224bbSNeel Natu } 898318224bbSNeel Natu } 899318224bbSNeel Natu 9009b1aa8d6SNeel Natu static __inline bool 9019b1aa8d6SNeel Natu sysmem_mapping(struct vm *vm, struct mem_map *mm) 902318224bbSNeel Natu { 903318224bbSNeel Natu 9049b1aa8d6SNeel Natu if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem) 9059b1aa8d6SNeel Natu return (true); 9069b1aa8d6SNeel Natu else 9079b1aa8d6SNeel Natu return (false); 908318224bbSNeel Natu } 909318224bbSNeel Natu 910147d12a7SAntoine Brodin vm_paddr_t 911147d12a7SAntoine Brodin vmm_sysmem_maxaddr(struct vm *vm) 9129b1aa8d6SNeel Natu { 9139b1aa8d6SNeel Natu struct mem_map *mm; 9149b1aa8d6SNeel Natu vm_paddr_t maxaddr; 9159b1aa8d6SNeel Natu int i; 916318224bbSNeel Natu 9179b1aa8d6SNeel Natu maxaddr = 0; 9189b1aa8d6SNeel Natu for (i = 0; i < VM_MAX_MEMMAPS; i++) { 9199b1aa8d6SNeel Natu mm = &vm->mem_maps[i]; 9209b1aa8d6SNeel Natu if (sysmem_mapping(vm, mm)) { 9219b1aa8d6SNeel Natu if (maxaddr < mm->gpa + mm->len) 9229b1aa8d6SNeel Natu maxaddr = mm->gpa + mm->len; 9239b1aa8d6SNeel Natu } 9249b1aa8d6SNeel Natu } 9259b1aa8d6SNeel Natu return (maxaddr); 926318224bbSNeel Natu } 927318224bbSNeel Natu 928318224bbSNeel Natu static void 929490d56c5SEd Maste vm_iommu_modify(struct vm *vm, bool map) 930318224bbSNeel Natu { 931318224bbSNeel Natu int i, sz; 932318224bbSNeel Natu vm_paddr_t gpa, hpa; 9339b1aa8d6SNeel Natu struct mem_map *mm; 934318224bbSNeel Natu void *vp, *cookie, *host_domain; 935318224bbSNeel Natu 936318224bbSNeel Natu sz = PAGE_SIZE; 937318224bbSNeel Natu host_domain = iommu_host_domain(); 938318224bbSNeel Natu 9399b1aa8d6SNeel Natu for (i = 0; i < VM_MAX_MEMMAPS; i++) { 9409b1aa8d6SNeel Natu mm = &vm->mem_maps[i]; 9419b1aa8d6SNeel Natu if (!sysmem_mapping(vm, mm)) 9429b1aa8d6SNeel Natu continue; 943318224bbSNeel Natu 9449b1aa8d6SNeel Natu if (map) { 9459b1aa8d6SNeel Natu KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0, 9469b1aa8d6SNeel Natu ("iommu map found invalid memmap %#lx/%#lx/%#x", 9479b1aa8d6SNeel Natu mm->gpa, mm->len, mm->flags)); 9489b1aa8d6SNeel Natu if ((mm->flags & VM_MEMMAP_F_WIRED) == 0) 9499b1aa8d6SNeel Natu continue; 9509b1aa8d6SNeel Natu mm->flags |= VM_MEMMAP_F_IOMMU; 9519b1aa8d6SNeel Natu } else { 9529b1aa8d6SNeel Natu if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0) 9539b1aa8d6SNeel Natu continue; 9549b1aa8d6SNeel Natu mm->flags &= ~VM_MEMMAP_F_IOMMU; 9559b1aa8d6SNeel Natu KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0, 9569b1aa8d6SNeel Natu ("iommu unmap found invalid memmap %#lx/%#lx/%#x", 9579b1aa8d6SNeel Natu mm->gpa, mm->len, mm->flags)); 9589b1aa8d6SNeel Natu } 9599b1aa8d6SNeel Natu 9609b1aa8d6SNeel Natu gpa = mm->gpa; 9619b1aa8d6SNeel Natu while (gpa < mm->gpa + mm->len) { 96228b561adSJohn Baldwin vp = vm_gpa_hold_global(vm, gpa, PAGE_SIZE, 96328b561adSJohn Baldwin VM_PROT_WRITE, &cookie); 964318224bbSNeel Natu KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx", 965318224bbSNeel Natu vm_name(vm), gpa)); 966318224bbSNeel Natu 967318224bbSNeel Natu vm_gpa_release(cookie); 968318224bbSNeel Natu 969318224bbSNeel Natu hpa = DMAP_TO_PHYS((uintptr_t)vp); 970318224bbSNeel Natu if (map) { 971318224bbSNeel Natu iommu_create_mapping(vm->iommu, gpa, hpa, sz); 972318224bbSNeel Natu } else { 973318224bbSNeel Natu iommu_remove_mapping(vm->iommu, gpa, sz); 974318224bbSNeel Natu } 975318224bbSNeel Natu 976318224bbSNeel Natu gpa += PAGE_SIZE; 977318224bbSNeel Natu } 978318224bbSNeel Natu } 979318224bbSNeel Natu 980318224bbSNeel Natu /* 981318224bbSNeel Natu * Invalidate the cached translations associated with the domain 982318224bbSNeel Natu * from which pages were removed. 983318224bbSNeel Natu */ 984318224bbSNeel Natu if (map) 985318224bbSNeel Natu iommu_invalidate_tlb(host_domain); 986318224bbSNeel Natu else 987318224bbSNeel Natu iommu_invalidate_tlb(vm->iommu); 988318224bbSNeel Natu } 989318224bbSNeel Natu 990490d56c5SEd Maste #define vm_iommu_unmap(vm) vm_iommu_modify((vm), false) 991490d56c5SEd Maste #define vm_iommu_map(vm) vm_iommu_modify((vm), true) 992318224bbSNeel Natu 993318224bbSNeel Natu int 994318224bbSNeel Natu vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) 995318224bbSNeel Natu { 996318224bbSNeel Natu int error; 997318224bbSNeel Natu 998318224bbSNeel Natu error = ppt_unassign_device(vm, bus, slot, func); 999318224bbSNeel Natu if (error) 1000318224bbSNeel Natu return (error); 1001318224bbSNeel Natu 10029b1aa8d6SNeel Natu if (ppt_assigned_devices(vm) == 0) 1003318224bbSNeel Natu vm_iommu_unmap(vm); 10049b1aa8d6SNeel Natu 1005318224bbSNeel Natu return (0); 1006318224bbSNeel Natu } 1007318224bbSNeel Natu 1008318224bbSNeel Natu int 1009318224bbSNeel Natu vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) 1010318224bbSNeel Natu { 1011318224bbSNeel Natu int error; 1012318224bbSNeel Natu vm_paddr_t maxaddr; 1013318224bbSNeel Natu 10149b1aa8d6SNeel Natu /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */ 101551f45d01SNeel Natu if (ppt_assigned_devices(vm) == 0) { 1016318224bbSNeel Natu KASSERT(vm->iommu == NULL, 1017318224bbSNeel Natu ("vm_assign_pptdev: iommu must be NULL")); 1018147d12a7SAntoine Brodin maxaddr = vmm_sysmem_maxaddr(vm); 1019318224bbSNeel Natu vm->iommu = iommu_create_domain(maxaddr); 1020ffe1b10dSJohn Baldwin if (vm->iommu == NULL) 1021ffe1b10dSJohn Baldwin return (ENXIO); 1022318224bbSNeel Natu vm_iommu_map(vm); 1023318224bbSNeel Natu } 1024318224bbSNeel Natu 1025318224bbSNeel Natu error = ppt_assign_device(vm, bus, slot, func); 1026318224bbSNeel Natu return (error); 1027318224bbSNeel Natu } 1028318224bbSNeel Natu 102928b561adSJohn Baldwin static void * 103028b561adSJohn Baldwin _vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 1031318224bbSNeel Natu void **cookie) 1032318224bbSNeel Natu { 10339b1aa8d6SNeel Natu int i, count, pageoff; 10349b1aa8d6SNeel Natu struct mem_map *mm; 1035318224bbSNeel Natu vm_page_t m; 103628b561adSJohn Baldwin 1037318224bbSNeel Natu pageoff = gpa & PAGE_MASK; 1038318224bbSNeel Natu if (len > PAGE_SIZE - pageoff) 1039318224bbSNeel Natu panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); 1040318224bbSNeel Natu 10419b1aa8d6SNeel Natu count = 0; 10429b1aa8d6SNeel Natu for (i = 0; i < VM_MAX_MEMMAPS; i++) { 10439b1aa8d6SNeel Natu mm = &vm->mem_maps[i]; 104446567b4fSPeter Grehan if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) { 1045318224bbSNeel Natu count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, 1046318224bbSNeel Natu trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); 10479b1aa8d6SNeel Natu break; 10489b1aa8d6SNeel Natu } 10499b1aa8d6SNeel Natu } 1050318224bbSNeel Natu 1051318224bbSNeel Natu if (count == 1) { 1052318224bbSNeel Natu *cookie = m; 1053318224bbSNeel Natu return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); 1054318224bbSNeel Natu } else { 1055318224bbSNeel Natu *cookie = NULL; 1056318224bbSNeel Natu return (NULL); 1057318224bbSNeel Natu } 1058318224bbSNeel Natu } 1059318224bbSNeel Natu 106028b561adSJohn Baldwin void * 1061d3956e46SJohn Baldwin vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot, 106228b561adSJohn Baldwin void **cookie) 106328b561adSJohn Baldwin { 106428b561adSJohn Baldwin #ifdef INVARIANTS 106528b561adSJohn Baldwin /* 106628b561adSJohn Baldwin * The current vcpu should be frozen to ensure 'vm_memmap[]' 106728b561adSJohn Baldwin * stability. 106828b561adSJohn Baldwin */ 1069d3956e46SJohn Baldwin int state = vcpu_get_state(vcpu, NULL); 107028b561adSJohn Baldwin KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d", 107128b561adSJohn Baldwin __func__, state)); 107228b561adSJohn Baldwin #endif 1073d3956e46SJohn Baldwin return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie)); 107428b561adSJohn Baldwin } 107528b561adSJohn Baldwin 107628b561adSJohn Baldwin void * 107728b561adSJohn Baldwin vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 107828b561adSJohn Baldwin void **cookie) 107928b561adSJohn Baldwin { 108028b561adSJohn Baldwin #ifdef INVARIANTS 108128b561adSJohn Baldwin /* 108228b561adSJohn Baldwin * All vcpus are frozen by ioctls that modify the memory map 108328b561adSJohn Baldwin * (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is 108428b561adSJohn Baldwin * guaranteed if at least one vcpu is in the VCPU_FROZEN state. 108528b561adSJohn Baldwin */ 108628b561adSJohn Baldwin int state; 108728b561adSJohn Baldwin for (int i = 0; i < vm->maxcpus; i++) { 1088d3956e46SJohn Baldwin state = vcpu_get_state(vm_vcpu(vm, i), NULL); 108928b561adSJohn Baldwin KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d", 109028b561adSJohn Baldwin __func__, state)); 109128b561adSJohn Baldwin } 109228b561adSJohn Baldwin #endif 109328b561adSJohn Baldwin return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie)); 109428b561adSJohn Baldwin } 109528b561adSJohn Baldwin 1096318224bbSNeel Natu void 1097318224bbSNeel Natu vm_gpa_release(void *cookie) 1098318224bbSNeel Natu { 1099318224bbSNeel Natu vm_page_t m = cookie; 1100318224bbSNeel Natu 1101eeacb3b0SMark Johnston vm_page_unwire(m, PQ_ACTIVE); 1102366f6083SPeter Grehan } 1103366f6083SPeter Grehan 1104366f6083SPeter Grehan int 1105d3956e46SJohn Baldwin vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval) 1106366f6083SPeter Grehan { 1107366f6083SPeter Grehan 1108366f6083SPeter Grehan if (reg >= VM_REG_LAST) 1109366f6083SPeter Grehan return (EINVAL); 1110366f6083SPeter Grehan 1111d3956e46SJohn Baldwin return (vmmops_getreg(vcpu->cookie, reg, retval)); 1112366f6083SPeter Grehan } 1113366f6083SPeter Grehan 1114366f6083SPeter Grehan int 1115d3956e46SJohn Baldwin vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) 1116366f6083SPeter Grehan { 1117d087a399SNeel Natu int error; 1118366f6083SPeter Grehan 1119366f6083SPeter Grehan if (reg >= VM_REG_LAST) 1120366f6083SPeter Grehan return (EINVAL); 1121366f6083SPeter Grehan 1122869c8d19SJohn Baldwin error = vmmops_setreg(vcpu->cookie, reg, val); 1123d087a399SNeel Natu if (error || reg != VM_REG_GUEST_RIP) 1124d087a399SNeel Natu return (error); 1125d087a399SNeel Natu 1126d087a399SNeel Natu /* Set 'nextrip' to match the value of %rip */ 1127d3956e46SJohn Baldwin VMM_CTR1(vcpu, "Setting nextrip to %#lx", val); 1128d087a399SNeel Natu vcpu->nextrip = val; 1129d087a399SNeel Natu return (0); 1130366f6083SPeter Grehan } 1131366f6083SPeter Grehan 1132490d56c5SEd Maste static bool 1133366f6083SPeter Grehan is_descriptor_table(int reg) 1134366f6083SPeter Grehan { 1135366f6083SPeter Grehan 1136366f6083SPeter Grehan switch (reg) { 1137366f6083SPeter Grehan case VM_REG_GUEST_IDTR: 1138366f6083SPeter Grehan case VM_REG_GUEST_GDTR: 1139490d56c5SEd Maste return (true); 1140366f6083SPeter Grehan default: 1141490d56c5SEd Maste return (false); 1142366f6083SPeter Grehan } 1143366f6083SPeter Grehan } 1144366f6083SPeter Grehan 1145490d56c5SEd Maste static bool 1146366f6083SPeter Grehan is_segment_register(int reg) 1147366f6083SPeter Grehan { 1148366f6083SPeter Grehan 1149366f6083SPeter Grehan switch (reg) { 1150366f6083SPeter Grehan case VM_REG_GUEST_ES: 1151366f6083SPeter Grehan case VM_REG_GUEST_CS: 1152366f6083SPeter Grehan case VM_REG_GUEST_SS: 1153366f6083SPeter Grehan case VM_REG_GUEST_DS: 1154366f6083SPeter Grehan case VM_REG_GUEST_FS: 1155366f6083SPeter Grehan case VM_REG_GUEST_GS: 1156366f6083SPeter Grehan case VM_REG_GUEST_TR: 1157366f6083SPeter Grehan case VM_REG_GUEST_LDTR: 1158490d56c5SEd Maste return (true); 1159366f6083SPeter Grehan default: 1160490d56c5SEd Maste return (false); 1161366f6083SPeter Grehan } 1162366f6083SPeter Grehan } 1163366f6083SPeter Grehan 1164366f6083SPeter Grehan int 1165d3956e46SJohn Baldwin vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc) 1166366f6083SPeter Grehan { 1167366f6083SPeter Grehan 1168366f6083SPeter Grehan if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1169366f6083SPeter Grehan return (EINVAL); 1170366f6083SPeter Grehan 1171d3956e46SJohn Baldwin return (vmmops_getdesc(vcpu->cookie, reg, desc)); 1172366f6083SPeter Grehan } 1173366f6083SPeter Grehan 1174366f6083SPeter Grehan int 1175366f6083SPeter Grehan vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 1176366f6083SPeter Grehan struct seg_desc *desc) 1177366f6083SPeter Grehan { 1178a488c9c9SRodney W. Grimes if (vcpu < 0 || vcpu >= vm->maxcpus) 1179366f6083SPeter Grehan return (EINVAL); 1180366f6083SPeter Grehan 1181366f6083SPeter Grehan if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1182366f6083SPeter Grehan return (EINVAL); 1183366f6083SPeter Grehan 1184869c8d19SJohn Baldwin return (vmmops_setdesc(vcpu_cookie(vm, vcpu), reg, desc)); 1185366f6083SPeter Grehan } 1186366f6083SPeter Grehan 1187366f6083SPeter Grehan static void 1188366f6083SPeter Grehan restore_guest_fpustate(struct vcpu *vcpu) 1189366f6083SPeter Grehan { 1190366f6083SPeter Grehan 119138f1b189SPeter Grehan /* flush host state to the pcb */ 119238f1b189SPeter Grehan fpuexit(curthread); 1193bd8572e0SNeel Natu 1194bd8572e0SNeel Natu /* restore guest FPU state */ 1195366f6083SPeter Grehan fpu_stop_emulating(); 119638f1b189SPeter Grehan fpurestore(vcpu->guestfpu); 1197bd8572e0SNeel Natu 1198abb023fbSJohn Baldwin /* restore guest XCR0 if XSAVE is enabled in the host */ 1199abb023fbSJohn Baldwin if (rcr4() & CR4_XSAVE) 1200abb023fbSJohn Baldwin load_xcr(0, vcpu->guest_xcr0); 1201abb023fbSJohn Baldwin 1202bd8572e0SNeel Natu /* 1203bd8572e0SNeel Natu * The FPU is now "dirty" with the guest's state so turn on emulation 1204bd8572e0SNeel Natu * to trap any access to the FPU by the host. 1205bd8572e0SNeel Natu */ 1206bd8572e0SNeel Natu fpu_start_emulating(); 1207366f6083SPeter Grehan } 1208366f6083SPeter Grehan 1209366f6083SPeter Grehan static void 1210366f6083SPeter Grehan save_guest_fpustate(struct vcpu *vcpu) 1211366f6083SPeter Grehan { 1212366f6083SPeter Grehan 1213bd8572e0SNeel Natu if ((rcr0() & CR0_TS) == 0) 1214bd8572e0SNeel Natu panic("fpu emulation not enabled in host!"); 1215bd8572e0SNeel Natu 1216abb023fbSJohn Baldwin /* save guest XCR0 and restore host XCR0 */ 1217abb023fbSJohn Baldwin if (rcr4() & CR4_XSAVE) { 1218abb023fbSJohn Baldwin vcpu->guest_xcr0 = rxcr(0); 1219abb023fbSJohn Baldwin load_xcr(0, vmm_get_host_xcr0()); 1220abb023fbSJohn Baldwin } 1221abb023fbSJohn Baldwin 1222bd8572e0SNeel Natu /* save guest FPU state */ 1223bd8572e0SNeel Natu fpu_stop_emulating(); 122438f1b189SPeter Grehan fpusave(vcpu->guestfpu); 1225366f6083SPeter Grehan fpu_start_emulating(); 1226366f6083SPeter Grehan } 1227366f6083SPeter Grehan 122861592433SNeel Natu static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 1229f76fc5d4SNeel Natu 1230318224bbSNeel Natu static int 1231248e6799SNeel Natu vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate, 1232f80330a8SNeel Natu bool from_idle) 1233366f6083SPeter Grehan { 1234248e6799SNeel Natu struct vcpu *vcpu; 1235318224bbSNeel Natu int error; 1236366f6083SPeter Grehan 1237248e6799SNeel Natu vcpu = &vm->vcpu[vcpuid]; 1238318224bbSNeel Natu vcpu_assert_locked(vcpu); 1239366f6083SPeter Grehan 1240f76fc5d4SNeel Natu /* 1241f80330a8SNeel Natu * State transitions from the vmmdev_ioctl() must always begin from 1242f80330a8SNeel Natu * the VCPU_IDLE state. This guarantees that there is only a single 1243f80330a8SNeel Natu * ioctl() operating on a vcpu at any point. 1244f80330a8SNeel Natu */ 1245f80330a8SNeel Natu if (from_idle) { 1246248e6799SNeel Natu while (vcpu->state != VCPU_IDLE) { 1247248e6799SNeel Natu vcpu->reqidle = 1; 1248248e6799SNeel Natu vcpu_notify_event_locked(vcpu, false); 1249248e6799SNeel Natu VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to " 1250248e6799SNeel Natu "idle requested", vcpu_state2str(vcpu->state)); 1251f80330a8SNeel Natu msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); 1252248e6799SNeel Natu } 1253f80330a8SNeel Natu } else { 1254f80330a8SNeel Natu KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 1255f80330a8SNeel Natu "vcpu idle state")); 1256f80330a8SNeel Natu } 1257f80330a8SNeel Natu 1258ef39d7e9SNeel Natu if (vcpu->state == VCPU_RUNNING) { 1259ef39d7e9SNeel Natu KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 1260ef39d7e9SNeel Natu "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 1261ef39d7e9SNeel Natu } else { 1262ef39d7e9SNeel Natu KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 1263ef39d7e9SNeel Natu "vcpu that is not running", vcpu->hostcpu)); 1264ef39d7e9SNeel Natu } 1265ef39d7e9SNeel Natu 1266f80330a8SNeel Natu /* 1267318224bbSNeel Natu * The following state transitions are allowed: 1268318224bbSNeel Natu * IDLE -> FROZEN -> IDLE 1269318224bbSNeel Natu * FROZEN -> RUNNING -> FROZEN 1270318224bbSNeel Natu * FROZEN -> SLEEPING -> FROZEN 1271f76fc5d4SNeel Natu */ 1272318224bbSNeel Natu switch (vcpu->state) { 1273318224bbSNeel Natu case VCPU_IDLE: 1274318224bbSNeel Natu case VCPU_RUNNING: 1275318224bbSNeel Natu case VCPU_SLEEPING: 1276318224bbSNeel Natu error = (newstate != VCPU_FROZEN); 1277318224bbSNeel Natu break; 1278318224bbSNeel Natu case VCPU_FROZEN: 1279318224bbSNeel Natu error = (newstate == VCPU_FROZEN); 1280318224bbSNeel Natu break; 1281318224bbSNeel Natu default: 1282318224bbSNeel Natu error = 1; 1283318224bbSNeel Natu break; 1284318224bbSNeel Natu } 1285318224bbSNeel Natu 1286f80330a8SNeel Natu if (error) 1287f80330a8SNeel Natu return (EBUSY); 1288318224bbSNeel Natu 1289248e6799SNeel Natu VCPU_CTR2(vm, vcpuid, "vcpu state changed from %s to %s", 1290248e6799SNeel Natu vcpu_state2str(vcpu->state), vcpu_state2str(newstate)); 1291248e6799SNeel Natu 1292f80330a8SNeel Natu vcpu->state = newstate; 1293ef39d7e9SNeel Natu if (newstate == VCPU_RUNNING) 1294ef39d7e9SNeel Natu vcpu->hostcpu = curcpu; 1295ef39d7e9SNeel Natu else 1296ef39d7e9SNeel Natu vcpu->hostcpu = NOCPU; 1297ef39d7e9SNeel Natu 1298f80330a8SNeel Natu if (newstate == VCPU_IDLE) 1299f80330a8SNeel Natu wakeup(&vcpu->state); 1300f80330a8SNeel Natu 1301f80330a8SNeel Natu return (0); 1302318224bbSNeel Natu } 1303318224bbSNeel Natu 1304318224bbSNeel Natu static void 1305318224bbSNeel Natu vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1306318224bbSNeel Natu { 1307318224bbSNeel Natu int error; 1308318224bbSNeel Natu 1309f80330a8SNeel Natu if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 1310318224bbSNeel Natu panic("Error %d setting state to %d\n", error, newstate); 1311318224bbSNeel Natu } 1312318224bbSNeel Natu 1313318224bbSNeel Natu static void 1314248e6799SNeel Natu vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1315318224bbSNeel Natu { 1316318224bbSNeel Natu int error; 1317318224bbSNeel Natu 1318248e6799SNeel Natu if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0) 1319318224bbSNeel Natu panic("Error %d setting state to %d", error, newstate); 1320318224bbSNeel Natu } 1321318224bbSNeel Natu 1322b837daddSKonstantin Belousov static int 1323d8be3d52SJohn Baldwin vm_handle_rendezvous(struct vcpu *vcpu) 13245b8a8cd1SNeel Natu { 1325d8be3d52SJohn Baldwin struct vm *vm = vcpu->vm; 1326b837daddSKonstantin Belousov struct thread *td; 1327d8be3d52SJohn Baldwin int error, vcpuid; 13285b8a8cd1SNeel Natu 1329b837daddSKonstantin Belousov error = 0; 1330d8be3d52SJohn Baldwin vcpuid = vcpu->vcpuid; 1331b837daddSKonstantin Belousov td = curthread; 13325b8a8cd1SNeel Natu mtx_lock(&vm->rendezvous_mtx); 13335b8a8cd1SNeel Natu while (vm->rendezvous_func != NULL) { 133422d822c6SNeel Natu /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ 1335e2650af1SStefan Eßer CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus); 133622d822c6SNeel Natu 1337949f0f47SJohn Baldwin if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && 133822d822c6SNeel Natu !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { 1339d8be3d52SJohn Baldwin VMM_CTR0(vcpu, "Calling rendezvous func"); 1340d8be3d52SJohn Baldwin (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg); 13415b8a8cd1SNeel Natu CPU_SET(vcpuid, &vm->rendezvous_done_cpus); 13425b8a8cd1SNeel Natu } 13435b8a8cd1SNeel Natu if (CPU_CMP(&vm->rendezvous_req_cpus, 13445b8a8cd1SNeel Natu &vm->rendezvous_done_cpus) == 0) { 1345d8be3d52SJohn Baldwin VMM_CTR0(vcpu, "Rendezvous completed"); 1346869dbab7SAndriy Gapon vm->rendezvous_func = NULL; 13475b8a8cd1SNeel Natu wakeup(&vm->rendezvous_func); 13485b8a8cd1SNeel Natu break; 13495b8a8cd1SNeel Natu } 1350d8be3d52SJohn Baldwin VMM_CTR0(vcpu, "Wait for rendezvous completion"); 13515b8a8cd1SNeel Natu mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, 1352b837daddSKonstantin Belousov "vmrndv", hz); 1353c6d31b83SKonstantin Belousov if (td_ast_pending(td, TDA_SUSPEND)) { 1354b837daddSKonstantin Belousov mtx_unlock(&vm->rendezvous_mtx); 1355b837daddSKonstantin Belousov error = thread_check_susp(td, true); 1356b837daddSKonstantin Belousov if (error != 0) 1357b837daddSKonstantin Belousov return (error); 1358b837daddSKonstantin Belousov mtx_lock(&vm->rendezvous_mtx); 1359b837daddSKonstantin Belousov } 13605b8a8cd1SNeel Natu } 13615b8a8cd1SNeel Natu mtx_unlock(&vm->rendezvous_mtx); 1362b837daddSKonstantin Belousov return (0); 13635b8a8cd1SNeel Natu } 13645b8a8cd1SNeel Natu 1365318224bbSNeel Natu /* 1366318224bbSNeel Natu * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1367318224bbSNeel Natu */ 1368318224bbSNeel Natu static int 1369becd9849SNeel Natu vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) 1370318224bbSNeel Natu { 1371318224bbSNeel Natu struct vcpu *vcpu; 1372c6a0cc2eSNeel Natu const char *wmesg; 1373b837daddSKonstantin Belousov struct thread *td; 1374b837daddSKonstantin Belousov int error, t, vcpu_halted, vm_halted; 1375e50ce2aaSNeel Natu 1376e50ce2aaSNeel Natu KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1377318224bbSNeel Natu 1378318224bbSNeel Natu vcpu = &vm->vcpu[vcpuid]; 1379e50ce2aaSNeel Natu vcpu_halted = 0; 1380e50ce2aaSNeel Natu vm_halted = 0; 1381b837daddSKonstantin Belousov error = 0; 1382b837daddSKonstantin Belousov td = curthread; 1383318224bbSNeel Natu 1384f76fc5d4SNeel Natu vcpu_lock(vcpu); 1385c6a0cc2eSNeel Natu while (1) { 1386f76fc5d4SNeel Natu /* 1387f76fc5d4SNeel Natu * Do a final check for pending NMI or interrupts before 1388c6a0cc2eSNeel Natu * really putting this thread to sleep. Also check for 1389c6a0cc2eSNeel Natu * software events that would cause this vcpu to wakeup. 1390f76fc5d4SNeel Natu * 1391c6a0cc2eSNeel Natu * These interrupts/events could have happened after the 139215add60dSPeter Grehan * vcpu returned from vmmops_run() and before it acquired the 1393c6a0cc2eSNeel Natu * vcpu lock above. 1394f76fc5d4SNeel Natu */ 1395248e6799SNeel Natu if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle) 1396c6a0cc2eSNeel Natu break; 139780cb5d84SJohn Baldwin if (vm_nmi_pending(vcpu)) 1398c6a0cc2eSNeel Natu break; 1399c6a0cc2eSNeel Natu if (!intr_disabled) { 140080cb5d84SJohn Baldwin if (vm_extint_pending(vcpu) || 1401c6a0cc2eSNeel Natu vlapic_pending_intr(vcpu->vlapic, NULL)) { 1402c6a0cc2eSNeel Natu break; 1403c6a0cc2eSNeel Natu } 1404c6a0cc2eSNeel Natu } 1405c6a0cc2eSNeel Natu 1406f008d157SNeel Natu /* Don't go to sleep if the vcpu thread needs to yield */ 140780cb5d84SJohn Baldwin if (vcpu_should_yield(vcpu)) 1408f008d157SNeel Natu break; 1409f008d157SNeel Natu 141080cb5d84SJohn Baldwin if (vcpu_debugged(vcpu)) 1411fc276d92SJohn Baldwin break; 1412fc276d92SJohn Baldwin 1413e50ce2aaSNeel Natu /* 1414e50ce2aaSNeel Natu * Some Linux guests implement "halt" by having all vcpus 1415e50ce2aaSNeel Natu * execute HLT with interrupts disabled. 'halted_cpus' keeps 1416e50ce2aaSNeel Natu * track of the vcpus that have entered this state. When all 1417e50ce2aaSNeel Natu * vcpus enter the halted state the virtual machine is halted. 1418e50ce2aaSNeel Natu */ 1419e50ce2aaSNeel Natu if (intr_disabled) { 1420c6a0cc2eSNeel Natu wmesg = "vmhalt"; 1421e50ce2aaSNeel Natu VCPU_CTR0(vm, vcpuid, "Halted"); 1422055fc2cbSNeel Natu if (!vcpu_halted && halt_detection_enabled) { 1423e50ce2aaSNeel Natu vcpu_halted = 1; 1424e50ce2aaSNeel Natu CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1425e50ce2aaSNeel Natu } 1426e50ce2aaSNeel Natu if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1427e50ce2aaSNeel Natu vm_halted = 1; 1428e50ce2aaSNeel Natu break; 1429e50ce2aaSNeel Natu } 1430e50ce2aaSNeel Natu } else { 1431e50ce2aaSNeel Natu wmesg = "vmidle"; 1432e50ce2aaSNeel Natu } 1433c6a0cc2eSNeel Natu 1434f76fc5d4SNeel Natu t = ticks; 1435248e6799SNeel Natu vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1436f008d157SNeel Natu /* 1437f008d157SNeel Natu * XXX msleep_spin() cannot be interrupted by signals so 1438f008d157SNeel Natu * wake up periodically to check pending signals. 1439f008d157SNeel Natu */ 1440f008d157SNeel Natu msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); 1441248e6799SNeel Natu vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 14423dc3d32aSJohn Baldwin vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t); 1443c6d31b83SKonstantin Belousov if (td_ast_pending(td, TDA_SUSPEND)) { 1444b837daddSKonstantin Belousov vcpu_unlock(vcpu); 1445b837daddSKonstantin Belousov error = thread_check_susp(td, false); 14464d447b30SKonstantin Belousov if (error != 0) { 14474d447b30SKonstantin Belousov if (vcpu_halted) { 14484d447b30SKonstantin Belousov CPU_CLR_ATOMIC(vcpuid, 14494d447b30SKonstantin Belousov &vm->halted_cpus); 14504d447b30SKonstantin Belousov } 1451b837daddSKonstantin Belousov return (error); 14524d447b30SKonstantin Belousov } 1453b837daddSKonstantin Belousov vcpu_lock(vcpu); 1454b837daddSKonstantin Belousov } 1455f76fc5d4SNeel Natu } 1456e50ce2aaSNeel Natu 1457e50ce2aaSNeel Natu if (vcpu_halted) 1458e50ce2aaSNeel Natu CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1459e50ce2aaSNeel Natu 1460f76fc5d4SNeel Natu vcpu_unlock(vcpu); 1461f76fc5d4SNeel Natu 1462e50ce2aaSNeel Natu if (vm_halted) 1463e50ce2aaSNeel Natu vm_suspend(vm, VM_SUSPEND_HALT); 1464e50ce2aaSNeel Natu 1465318224bbSNeel Natu return (0); 1466318224bbSNeel Natu } 1467318224bbSNeel Natu 1468318224bbSNeel Natu static int 1469becd9849SNeel Natu vm_handle_paging(struct vm *vm, int vcpuid, bool *retu) 1470318224bbSNeel Natu { 1471318224bbSNeel Natu int rv, ftype; 1472318224bbSNeel Natu struct vm_map *map; 1473318224bbSNeel Natu struct vcpu *vcpu; 1474318224bbSNeel Natu struct vm_exit *vme; 1475318224bbSNeel Natu 1476318224bbSNeel Natu vcpu = &vm->vcpu[vcpuid]; 1477318224bbSNeel Natu vme = &vcpu->exitinfo; 1478318224bbSNeel Natu 1479d087a399SNeel Natu KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 1480d087a399SNeel Natu __func__, vme->inst_length)); 1481d087a399SNeel Natu 1482318224bbSNeel Natu ftype = vme->u.paging.fault_type; 1483318224bbSNeel Natu KASSERT(ftype == VM_PROT_READ || 1484318224bbSNeel Natu ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, 1485318224bbSNeel Natu ("vm_handle_paging: invalid fault_type %d", ftype)); 1486318224bbSNeel Natu 1487318224bbSNeel Natu if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 1488318224bbSNeel Natu rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), 1489318224bbSNeel Natu vme->u.paging.gpa, ftype); 14909d8d8e3eSNeel Natu if (rv == 0) { 14919d8d8e3eSNeel Natu VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx", 14929d8d8e3eSNeel Natu ftype == VM_PROT_READ ? "accessed" : "dirty", 14939d8d8e3eSNeel Natu vme->u.paging.gpa); 1494318224bbSNeel Natu goto done; 1495318224bbSNeel Natu } 14969d8d8e3eSNeel Natu } 1497318224bbSNeel Natu 1498318224bbSNeel Natu map = &vm->vmspace->vm_map; 1499df08823dSKonstantin Belousov rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL); 1500318224bbSNeel Natu 1501513c8d33SNeel Natu VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, " 1502513c8d33SNeel Natu "ftype = %d", rv, vme->u.paging.gpa, ftype); 1503318224bbSNeel Natu 1504318224bbSNeel Natu if (rv != KERN_SUCCESS) 1505318224bbSNeel Natu return (EFAULT); 1506318224bbSNeel Natu done: 1507318224bbSNeel Natu return (0); 1508318224bbSNeel Natu } 1509318224bbSNeel Natu 1510318224bbSNeel Natu static int 1511becd9849SNeel Natu vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu) 1512318224bbSNeel Natu { 1513318224bbSNeel Natu struct vie *vie; 1514318224bbSNeel Natu struct vcpu *vcpu; 1515318224bbSNeel Natu struct vm_exit *vme; 1516e4f605eeSTycho Nightingale uint64_t gla, gpa, cs_base; 1517e813a873SNeel Natu struct vm_guest_paging *paging; 1518565bbb86SNeel Natu mem_region_read_t mread; 1519565bbb86SNeel Natu mem_region_write_t mwrite; 1520f7a9f178SNeel Natu enum vm_cpu_mode cpu_mode; 15211c73ea3eSNeel Natu int cs_d, error, fault; 1522318224bbSNeel Natu 1523318224bbSNeel Natu vcpu = &vm->vcpu[vcpuid]; 1524318224bbSNeel Natu vme = &vcpu->exitinfo; 1525318224bbSNeel Natu 15261c73ea3eSNeel Natu KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 15271c73ea3eSNeel Natu __func__, vme->inst_length)); 15281c73ea3eSNeel Natu 1529318224bbSNeel Natu gla = vme->u.inst_emul.gla; 1530318224bbSNeel Natu gpa = vme->u.inst_emul.gpa; 1531e4f605eeSTycho Nightingale cs_base = vme->u.inst_emul.cs_base; 1532f7a9f178SNeel Natu cs_d = vme->u.inst_emul.cs_d; 1533318224bbSNeel Natu vie = &vme->u.inst_emul.vie; 1534e813a873SNeel Natu paging = &vme->u.inst_emul.paging; 1535f7a9f178SNeel Natu cpu_mode = paging->cpu_mode; 1536318224bbSNeel Natu 15379d8d8e3eSNeel Natu VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa); 15389d8d8e3eSNeel Natu 1539318224bbSNeel Natu /* Fetch, decode and emulate the faulting instruction */ 1540c2a875f9SNeel Natu if (vie->num_valid == 0) { 1541d3956e46SJohn Baldwin error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base, 1542d3956e46SJohn Baldwin VIE_INST_SIZE, vie, &fault); 1543c2a875f9SNeel Natu } else { 1544c2a875f9SNeel Natu /* 1545c2a875f9SNeel Natu * The instruction bytes have already been copied into 'vie' 1546c2a875f9SNeel Natu */ 15479c4d5478SNeel Natu error = fault = 0; 1548c2a875f9SNeel Natu } 15499c4d5478SNeel Natu if (error || fault) 15509c4d5478SNeel Natu return (error); 1551318224bbSNeel Natu 1552d3956e46SJohn Baldwin if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) { 1553c07a0648SNeel Natu VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx", 1554c07a0648SNeel Natu vme->rip + cs_base); 1555c07a0648SNeel Natu *retu = true; /* dump instruction bytes in userspace */ 1556c07a0648SNeel Natu return (0); 1557c07a0648SNeel Natu } 1558318224bbSNeel Natu 1559a0b78f09SPeter Grehan /* 15601c73ea3eSNeel Natu * Update 'nextrip' based on the length of the emulated instruction. 1561a0b78f09SPeter Grehan */ 1562a0b78f09SPeter Grehan vme->inst_length = vie->num_processed; 1563d087a399SNeel Natu vcpu->nextrip += vie->num_processed; 15641c73ea3eSNeel Natu VCPU_CTR1(vm, vcpuid, "nextrip updated to %#lx after instruction " 15651c73ea3eSNeel Natu "decoding", vcpu->nextrip); 1566a0b78f09SPeter Grehan 156708e3ff32SNeel Natu /* return to userland unless this is an in-kernel emulated device */ 1568565bbb86SNeel Natu if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1569565bbb86SNeel Natu mread = lapic_mmio_read; 1570565bbb86SNeel Natu mwrite = lapic_mmio_write; 1571565bbb86SNeel Natu } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1572565bbb86SNeel Natu mread = vioapic_mmio_read; 1573565bbb86SNeel Natu mwrite = vioapic_mmio_write; 157408e3ff32SNeel Natu } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 157508e3ff32SNeel Natu mread = vhpet_mmio_read; 157608e3ff32SNeel Natu mwrite = vhpet_mmio_write; 1577565bbb86SNeel Natu } else { 1578becd9849SNeel Natu *retu = true; 1579318224bbSNeel Natu return (0); 1580318224bbSNeel Natu } 1581318224bbSNeel Natu 1582d3956e46SJohn Baldwin error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite, 1583d3956e46SJohn Baldwin retu); 1584318224bbSNeel Natu 1585318224bbSNeel Natu return (error); 1586318224bbSNeel Natu } 1587318224bbSNeel Natu 1588b15a09c0SNeel Natu static int 1589b15a09c0SNeel Natu vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu) 1590b15a09c0SNeel Natu { 1591b837daddSKonstantin Belousov int error, i; 1592b15a09c0SNeel Natu struct vcpu *vcpu; 1593b837daddSKonstantin Belousov struct thread *td; 1594b15a09c0SNeel Natu 1595b837daddSKonstantin Belousov error = 0; 1596b15a09c0SNeel Natu vcpu = &vm->vcpu[vcpuid]; 1597b837daddSKonstantin Belousov td = curthread; 1598b15a09c0SNeel Natu 1599b15a09c0SNeel Natu CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 1600b15a09c0SNeel Natu 1601b15a09c0SNeel Natu /* 1602b15a09c0SNeel Natu * Wait until all 'active_cpus' have suspended themselves. 1603b15a09c0SNeel Natu * 1604b15a09c0SNeel Natu * Since a VM may be suspended at any time including when one or 1605b15a09c0SNeel Natu * more vcpus are doing a rendezvous we need to call the rendezvous 1606b15a09c0SNeel Natu * handler while we are waiting to prevent a deadlock. 1607b15a09c0SNeel Natu */ 1608b15a09c0SNeel Natu vcpu_lock(vcpu); 1609b837daddSKonstantin Belousov while (error == 0) { 1610b15a09c0SNeel Natu if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1611b15a09c0SNeel Natu VCPU_CTR0(vm, vcpuid, "All vcpus suspended"); 1612b15a09c0SNeel Natu break; 1613b15a09c0SNeel Natu } 1614b15a09c0SNeel Natu 1615b15a09c0SNeel Natu if (vm->rendezvous_func == NULL) { 1616b15a09c0SNeel Natu VCPU_CTR0(vm, vcpuid, "Sleeping during suspend"); 1617248e6799SNeel Natu vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1618b15a09c0SNeel Natu msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); 1619248e6799SNeel Natu vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1620c6d31b83SKonstantin Belousov if (td_ast_pending(td, TDA_SUSPEND)) { 1621b837daddSKonstantin Belousov vcpu_unlock(vcpu); 1622b837daddSKonstantin Belousov error = thread_check_susp(td, false); 1623b837daddSKonstantin Belousov vcpu_lock(vcpu); 1624b837daddSKonstantin Belousov } 1625b15a09c0SNeel Natu } else { 1626b15a09c0SNeel Natu VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend"); 1627b15a09c0SNeel Natu vcpu_unlock(vcpu); 1628d8be3d52SJohn Baldwin error = vm_handle_rendezvous(vcpu); 1629b15a09c0SNeel Natu vcpu_lock(vcpu); 1630b15a09c0SNeel Natu } 1631b15a09c0SNeel Natu } 1632b15a09c0SNeel Natu vcpu_unlock(vcpu); 1633b15a09c0SNeel Natu 1634b15a09c0SNeel Natu /* 1635b15a09c0SNeel Natu * Wakeup the other sleeping vcpus and return to userspace. 1636b15a09c0SNeel Natu */ 1637a488c9c9SRodney W. Grimes for (i = 0; i < vm->maxcpus; i++) { 1638b15a09c0SNeel Natu if (CPU_ISSET(i, &vm->suspended_cpus)) { 1639b15a09c0SNeel Natu vcpu_notify_event(vm, i, false); 1640b15a09c0SNeel Natu } 1641b15a09c0SNeel Natu } 1642b15a09c0SNeel Natu 1643b15a09c0SNeel Natu *retu = true; 1644b837daddSKonstantin Belousov return (error); 1645b15a09c0SNeel Natu } 1646b15a09c0SNeel Natu 1647248e6799SNeel Natu static int 1648248e6799SNeel Natu vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu) 1649248e6799SNeel Natu { 1650248e6799SNeel Natu struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1651248e6799SNeel Natu 1652248e6799SNeel Natu vcpu_lock(vcpu); 1653248e6799SNeel Natu KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle)); 1654248e6799SNeel Natu vcpu->reqidle = 0; 1655248e6799SNeel Natu vcpu_unlock(vcpu); 1656248e6799SNeel Natu *retu = true; 1657248e6799SNeel Natu return (0); 1658248e6799SNeel Natu } 1659248e6799SNeel Natu 1660b15a09c0SNeel Natu int 1661f0fdcfe2SNeel Natu vm_suspend(struct vm *vm, enum vm_suspend_how how) 1662b15a09c0SNeel Natu { 1663f0fdcfe2SNeel Natu int i; 1664b15a09c0SNeel Natu 1665f0fdcfe2SNeel Natu if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 1666f0fdcfe2SNeel Natu return (EINVAL); 1667f0fdcfe2SNeel Natu 1668f0fdcfe2SNeel Natu if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { 1669f0fdcfe2SNeel Natu VM_CTR2(vm, "virtual machine already suspended %d/%d", 1670f0fdcfe2SNeel Natu vm->suspend, how); 1671b15a09c0SNeel Natu return (EALREADY); 1672b15a09c0SNeel Natu } 1673f0fdcfe2SNeel Natu 1674f0fdcfe2SNeel Natu VM_CTR1(vm, "virtual machine successfully suspended %d", how); 1675f0fdcfe2SNeel Natu 1676f0fdcfe2SNeel Natu /* 1677f0fdcfe2SNeel Natu * Notify all active vcpus that they are now suspended. 1678f0fdcfe2SNeel Natu */ 1679a488c9c9SRodney W. Grimes for (i = 0; i < vm->maxcpus; i++) { 1680f0fdcfe2SNeel Natu if (CPU_ISSET(i, &vm->active_cpus)) 1681f0fdcfe2SNeel Natu vcpu_notify_event(vm, i, false); 1682f0fdcfe2SNeel Natu } 1683f0fdcfe2SNeel Natu 1684f0fdcfe2SNeel Natu return (0); 1685f0fdcfe2SNeel Natu } 1686f0fdcfe2SNeel Natu 1687f0fdcfe2SNeel Natu void 168880cb5d84SJohn Baldwin vm_exit_suspended(struct vcpu *vcpu, uint64_t rip) 1689f0fdcfe2SNeel Natu { 169080cb5d84SJohn Baldwin struct vm *vm = vcpu->vm; 1691f0fdcfe2SNeel Natu struct vm_exit *vmexit; 1692f0fdcfe2SNeel Natu 1693f0fdcfe2SNeel Natu KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, 1694f0fdcfe2SNeel Natu ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); 1695f0fdcfe2SNeel Natu 169680cb5d84SJohn Baldwin vmexit = vm_exitinfo(vcpu); 1697f0fdcfe2SNeel Natu vmexit->rip = rip; 1698f0fdcfe2SNeel Natu vmexit->inst_length = 0; 1699f0fdcfe2SNeel Natu vmexit->exitcode = VM_EXITCODE_SUSPENDED; 1700f0fdcfe2SNeel Natu vmexit->u.suspended.how = vm->suspend; 1701b15a09c0SNeel Natu } 1702b15a09c0SNeel Natu 170340487465SNeel Natu void 170480cb5d84SJohn Baldwin vm_exit_debug(struct vcpu *vcpu, uint64_t rip) 1705fc276d92SJohn Baldwin { 1706fc276d92SJohn Baldwin struct vm_exit *vmexit; 1707fc276d92SJohn Baldwin 170880cb5d84SJohn Baldwin vmexit = vm_exitinfo(vcpu); 1709fc276d92SJohn Baldwin vmexit->rip = rip; 1710fc276d92SJohn Baldwin vmexit->inst_length = 0; 1711fc276d92SJohn Baldwin vmexit->exitcode = VM_EXITCODE_DEBUG; 1712fc276d92SJohn Baldwin } 1713fc276d92SJohn Baldwin 1714fc276d92SJohn Baldwin void 171580cb5d84SJohn Baldwin vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip) 171640487465SNeel Natu { 171740487465SNeel Natu struct vm_exit *vmexit; 171840487465SNeel Natu 171980cb5d84SJohn Baldwin KASSERT(vcpu->vm->rendezvous_func != NULL, 172080cb5d84SJohn Baldwin ("rendezvous not in progress")); 172140487465SNeel Natu 172280cb5d84SJohn Baldwin vmexit = vm_exitinfo(vcpu); 172340487465SNeel Natu vmexit->rip = rip; 172440487465SNeel Natu vmexit->inst_length = 0; 172540487465SNeel Natu vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 172680cb5d84SJohn Baldwin vmm_stat_incr(vcpu, VMEXIT_RENDEZVOUS, 1); 172740487465SNeel Natu } 172840487465SNeel Natu 172940487465SNeel Natu void 173080cb5d84SJohn Baldwin vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip) 1731248e6799SNeel Natu { 1732248e6799SNeel Natu struct vm_exit *vmexit; 1733248e6799SNeel Natu 173480cb5d84SJohn Baldwin vmexit = vm_exitinfo(vcpu); 1735248e6799SNeel Natu vmexit->rip = rip; 1736248e6799SNeel Natu vmexit->inst_length = 0; 1737248e6799SNeel Natu vmexit->exitcode = VM_EXITCODE_REQIDLE; 173880cb5d84SJohn Baldwin vmm_stat_incr(vcpu, VMEXIT_REQIDLE, 1); 1739248e6799SNeel Natu } 1740248e6799SNeel Natu 1741248e6799SNeel Natu void 174280cb5d84SJohn Baldwin vm_exit_astpending(struct vcpu *vcpu, uint64_t rip) 174340487465SNeel Natu { 174440487465SNeel Natu struct vm_exit *vmexit; 174540487465SNeel Natu 174680cb5d84SJohn Baldwin vmexit = vm_exitinfo(vcpu); 174740487465SNeel Natu vmexit->rip = rip; 174840487465SNeel Natu vmexit->inst_length = 0; 174940487465SNeel Natu vmexit->exitcode = VM_EXITCODE_BOGUS; 175080cb5d84SJohn Baldwin vmm_stat_incr(vcpu, VMEXIT_ASTPENDING, 1); 175140487465SNeel Natu } 175240487465SNeel Natu 1753318224bbSNeel Natu int 1754318224bbSNeel Natu vm_run(struct vm *vm, struct vm_run *vmrun) 1755318224bbSNeel Natu { 1756248e6799SNeel Natu struct vm_eventinfo evinfo; 1757318224bbSNeel Natu int error, vcpuid; 1758318224bbSNeel Natu struct vcpu *vcpu; 1759318224bbSNeel Natu struct pcb *pcb; 1760d087a399SNeel Natu uint64_t tscval; 1761318224bbSNeel Natu struct vm_exit *vme; 1762becd9849SNeel Natu bool retu, intr_disabled; 1763318224bbSNeel Natu pmap_t pmap; 1764318224bbSNeel Natu 1765318224bbSNeel Natu vcpuid = vmrun->cpuid; 1766318224bbSNeel Natu 1767a488c9c9SRodney W. Grimes if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1768318224bbSNeel Natu return (EINVAL); 1769318224bbSNeel Natu 177095ebc360SNeel Natu if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 177195ebc360SNeel Natu return (EINVAL); 177295ebc360SNeel Natu 177395ebc360SNeel Natu if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) 177495ebc360SNeel Natu return (EINVAL); 177595ebc360SNeel Natu 1776318224bbSNeel Natu pmap = vmspace_pmap(vm->vmspace); 1777318224bbSNeel Natu vcpu = &vm->vcpu[vcpuid]; 1778318224bbSNeel Natu vme = &vcpu->exitinfo; 1779248e6799SNeel Natu evinfo.rptr = &vm->rendezvous_func; 1780248e6799SNeel Natu evinfo.sptr = &vm->suspend; 1781248e6799SNeel Natu evinfo.iptr = &vcpu->reqidle; 1782318224bbSNeel Natu restart: 1783318224bbSNeel Natu critical_enter(); 1784318224bbSNeel Natu 1785318224bbSNeel Natu KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1786318224bbSNeel Natu ("vm_run: absurd pm_active")); 1787318224bbSNeel Natu 1788318224bbSNeel Natu tscval = rdtsc(); 1789318224bbSNeel Natu 1790318224bbSNeel Natu pcb = PCPU_GET(curpcb); 1791318224bbSNeel Natu set_pcb_flags(pcb, PCB_FULL_IRET); 1792318224bbSNeel Natu 1793318224bbSNeel Natu restore_guest_fpustate(vcpu); 1794318224bbSNeel Natu 1795318224bbSNeel Natu vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 1796869c8d19SJohn Baldwin error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo); 1797318224bbSNeel Natu vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 1798318224bbSNeel Natu 1799318224bbSNeel Natu save_guest_fpustate(vcpu); 1800318224bbSNeel Natu 18013dc3d32aSJohn Baldwin vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 1802318224bbSNeel Natu 1803318224bbSNeel Natu critical_exit(); 1804318224bbSNeel Natu 1805318224bbSNeel Natu if (error == 0) { 1806becd9849SNeel Natu retu = false; 1807d087a399SNeel Natu vcpu->nextrip = vme->rip + vme->inst_length; 1808318224bbSNeel Natu switch (vme->exitcode) { 1809248e6799SNeel Natu case VM_EXITCODE_REQIDLE: 1810248e6799SNeel Natu error = vm_handle_reqidle(vm, vcpuid, &retu); 1811248e6799SNeel Natu break; 1812b15a09c0SNeel Natu case VM_EXITCODE_SUSPENDED: 1813b15a09c0SNeel Natu error = vm_handle_suspend(vm, vcpuid, &retu); 1814b15a09c0SNeel Natu break; 181530b94db8SNeel Natu case VM_EXITCODE_IOAPIC_EOI: 1816*e42c24d5SJohn Baldwin vioapic_process_eoi(vm, vme->u.ioapic_eoi.vector); 181730b94db8SNeel Natu break; 18185b8a8cd1SNeel Natu case VM_EXITCODE_RENDEZVOUS: 1819d8be3d52SJohn Baldwin error = vm_handle_rendezvous(vcpu); 18205b8a8cd1SNeel Natu break; 1821318224bbSNeel Natu case VM_EXITCODE_HLT: 1822becd9849SNeel Natu intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 18231c052192SNeel Natu error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); 1824318224bbSNeel Natu break; 1825318224bbSNeel Natu case VM_EXITCODE_PAGING: 1826318224bbSNeel Natu error = vm_handle_paging(vm, vcpuid, &retu); 1827318224bbSNeel Natu break; 1828318224bbSNeel Natu case VM_EXITCODE_INST_EMUL: 1829318224bbSNeel Natu error = vm_handle_inst_emul(vm, vcpuid, &retu); 1830318224bbSNeel Natu break; 1831d17b5104SNeel Natu case VM_EXITCODE_INOUT: 1832d17b5104SNeel Natu case VM_EXITCODE_INOUT_STR: 1833d17b5104SNeel Natu error = vm_handle_inout(vm, vcpuid, vme, &retu); 1834d17b5104SNeel Natu break; 183565145c7fSNeel Natu case VM_EXITCODE_MONITOR: 183665145c7fSNeel Natu case VM_EXITCODE_MWAIT: 183727d26457SAndrew Turner case VM_EXITCODE_VMINSN: 1838d3956e46SJohn Baldwin vm_inject_ud(vcpu); 183965145c7fSNeel Natu break; 1840318224bbSNeel Natu default: 1841becd9849SNeel Natu retu = true; /* handled in userland */ 1842318224bbSNeel Natu break; 1843318224bbSNeel Natu } 1844318224bbSNeel Natu } 1845318224bbSNeel Natu 18460bda8d3eSCorvin Köhne /* 18470bda8d3eSCorvin Köhne * VM_EXITCODE_INST_EMUL could access the apic which could transform the 18480bda8d3eSCorvin Köhne * exit code into VM_EXITCODE_IPI. 18490bda8d3eSCorvin Köhne */ 18500bda8d3eSCorvin Köhne if (error == 0 && vme->exitcode == VM_EXITCODE_IPI) { 18510bda8d3eSCorvin Köhne retu = false; 1852d8be3d52SJohn Baldwin error = vm_handle_ipi(vcpu, vme, &retu); 18530bda8d3eSCorvin Köhne } 18540bda8d3eSCorvin Köhne 1855d087a399SNeel Natu if (error == 0 && retu == false) 1856f76fc5d4SNeel Natu goto restart; 1857f76fc5d4SNeel Natu 18583dc3d32aSJohn Baldwin vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1); 1859248e6799SNeel Natu VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode); 1860248e6799SNeel Natu 1861318224bbSNeel Natu /* copy the exit information */ 1862318224bbSNeel Natu bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit)); 1863366f6083SPeter Grehan return (error); 1864366f6083SPeter Grehan } 1865366f6083SPeter Grehan 1866366f6083SPeter Grehan int 1867d3956e46SJohn Baldwin vm_restart_instruction(struct vcpu *vcpu) 1868c9c75df4SNeel Natu { 1869d087a399SNeel Natu enum vcpu_state state; 1870d087a399SNeel Natu uint64_t rip; 187173505a10SRobert Wing int error __diagused; 1872c9c75df4SNeel Natu 1873d3956e46SJohn Baldwin state = vcpu_get_state(vcpu, NULL); 1874d087a399SNeel Natu if (state == VCPU_RUNNING) { 1875d087a399SNeel Natu /* 1876d087a399SNeel Natu * When a vcpu is "running" the next instruction is determined 1877d087a399SNeel Natu * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. 1878d087a399SNeel Natu * Thus setting 'inst_length' to zero will cause the current 1879d087a399SNeel Natu * instruction to be restarted. 1880d087a399SNeel Natu */ 1881c9c75df4SNeel Natu vcpu->exitinfo.inst_length = 0; 1882d3956e46SJohn Baldwin VMM_CTR1(vcpu, "restarting instruction at %#lx by " 1883d087a399SNeel Natu "setting inst_length to zero", vcpu->exitinfo.rip); 1884d087a399SNeel Natu } else if (state == VCPU_FROZEN) { 1885d087a399SNeel Natu /* 1886d087a399SNeel Natu * When a vcpu is "frozen" it is outside the critical section 188715add60dSPeter Grehan * around vmmops_run() and 'nextrip' points to the next 188815add60dSPeter Grehan * instruction. Thus instruction restart is achieved by setting 188915add60dSPeter Grehan * 'nextrip' to the vcpu's %rip. 1890d087a399SNeel Natu */ 1891d3956e46SJohn Baldwin error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip); 1892d087a399SNeel Natu KASSERT(!error, ("%s: error %d getting rip", __func__, error)); 1893d3956e46SJohn Baldwin VMM_CTR2(vcpu, "restarting instruction by updating " 1894d087a399SNeel Natu "nextrip from %#lx to %#lx", vcpu->nextrip, rip); 1895d087a399SNeel Natu vcpu->nextrip = rip; 1896d087a399SNeel Natu } else { 1897d087a399SNeel Natu panic("%s: invalid state %d", __func__, state); 1898d087a399SNeel Natu } 1899c9c75df4SNeel Natu return (0); 1900c9c75df4SNeel Natu } 1901c9c75df4SNeel Natu 1902c9c75df4SNeel Natu int 190380cb5d84SJohn Baldwin vm_exit_intinfo(struct vcpu *vcpu, uint64_t info) 1904091d4532SNeel Natu { 1905091d4532SNeel Natu int type, vector; 1906091d4532SNeel Natu 1907091d4532SNeel Natu if (info & VM_INTINFO_VALID) { 1908091d4532SNeel Natu type = info & VM_INTINFO_TYPE; 1909091d4532SNeel Natu vector = info & 0xff; 1910091d4532SNeel Natu if (type == VM_INTINFO_NMI && vector != IDT_NMI) 1911091d4532SNeel Natu return (EINVAL); 1912091d4532SNeel Natu if (type == VM_INTINFO_HWEXCEPTION && vector >= 32) 1913091d4532SNeel Natu return (EINVAL); 1914091d4532SNeel Natu if (info & VM_INTINFO_RSVD) 1915091d4532SNeel Natu return (EINVAL); 1916091d4532SNeel Natu } else { 1917091d4532SNeel Natu info = 0; 1918091d4532SNeel Natu } 191980cb5d84SJohn Baldwin VMM_CTR2(vcpu, "%s: info1(%#lx)", __func__, info); 1920091d4532SNeel Natu vcpu->exitintinfo = info; 1921091d4532SNeel Natu return (0); 1922091d4532SNeel Natu } 1923091d4532SNeel Natu 1924091d4532SNeel Natu enum exc_class { 1925091d4532SNeel Natu EXC_BENIGN, 1926091d4532SNeel Natu EXC_CONTRIBUTORY, 1927091d4532SNeel Natu EXC_PAGEFAULT 1928091d4532SNeel Natu }; 1929091d4532SNeel Natu 1930091d4532SNeel Natu #define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 1931091d4532SNeel Natu 1932091d4532SNeel Natu static enum exc_class 1933091d4532SNeel Natu exception_class(uint64_t info) 1934091d4532SNeel Natu { 1935091d4532SNeel Natu int type, vector; 1936091d4532SNeel Natu 1937091d4532SNeel Natu KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info)); 1938091d4532SNeel Natu type = info & VM_INTINFO_TYPE; 1939091d4532SNeel Natu vector = info & 0xff; 1940091d4532SNeel Natu 1941091d4532SNeel Natu /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 1942091d4532SNeel Natu switch (type) { 1943091d4532SNeel Natu case VM_INTINFO_HWINTR: 1944091d4532SNeel Natu case VM_INTINFO_SWINTR: 1945091d4532SNeel Natu case VM_INTINFO_NMI: 1946091d4532SNeel Natu return (EXC_BENIGN); 1947091d4532SNeel Natu default: 1948091d4532SNeel Natu /* 1949091d4532SNeel Natu * Hardware exception. 1950091d4532SNeel Natu * 1951091d4532SNeel Natu * SVM and VT-x use identical type values to represent NMI, 1952091d4532SNeel Natu * hardware interrupt and software interrupt. 1953091d4532SNeel Natu * 1954091d4532SNeel Natu * SVM uses type '3' for all exceptions. VT-x uses type '3' 1955091d4532SNeel Natu * for exceptions except #BP and #OF. #BP and #OF use a type 1956091d4532SNeel Natu * value of '5' or '6'. Therefore we don't check for explicit 1957091d4532SNeel Natu * values of 'type' to classify 'intinfo' into a hardware 1958091d4532SNeel Natu * exception. 1959091d4532SNeel Natu */ 1960091d4532SNeel Natu break; 1961091d4532SNeel Natu } 1962091d4532SNeel Natu 1963091d4532SNeel Natu switch (vector) { 1964091d4532SNeel Natu case IDT_PF: 1965091d4532SNeel Natu case IDT_VE: 1966091d4532SNeel Natu return (EXC_PAGEFAULT); 1967091d4532SNeel Natu case IDT_DE: 1968091d4532SNeel Natu case IDT_TS: 1969091d4532SNeel Natu case IDT_NP: 1970091d4532SNeel Natu case IDT_SS: 1971091d4532SNeel Natu case IDT_GP: 1972091d4532SNeel Natu return (EXC_CONTRIBUTORY); 1973091d4532SNeel Natu default: 1974091d4532SNeel Natu return (EXC_BENIGN); 1975091d4532SNeel Natu } 1976091d4532SNeel Natu } 1977091d4532SNeel Natu 1978091d4532SNeel Natu static int 197980cb5d84SJohn Baldwin nested_fault(struct vcpu *vcpu, uint64_t info1, uint64_t info2, 1980091d4532SNeel Natu uint64_t *retinfo) 1981091d4532SNeel Natu { 1982091d4532SNeel Natu enum exc_class exc1, exc2; 1983091d4532SNeel Natu int type1, vector1; 1984091d4532SNeel Natu 1985091d4532SNeel Natu KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1)); 1986091d4532SNeel Natu KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2)); 1987091d4532SNeel Natu 1988091d4532SNeel Natu /* 1989091d4532SNeel Natu * If an exception occurs while attempting to call the double-fault 1990091d4532SNeel Natu * handler the processor enters shutdown mode (aka triple fault). 1991091d4532SNeel Natu */ 1992091d4532SNeel Natu type1 = info1 & VM_INTINFO_TYPE; 1993091d4532SNeel Natu vector1 = info1 & 0xff; 1994091d4532SNeel Natu if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) { 199580cb5d84SJohn Baldwin VMM_CTR2(vcpu, "triple fault: info1(%#lx), info2(%#lx)", 1996091d4532SNeel Natu info1, info2); 199780cb5d84SJohn Baldwin vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT); 1998091d4532SNeel Natu *retinfo = 0; 1999091d4532SNeel Natu return (0); 2000091d4532SNeel Natu } 2001091d4532SNeel Natu 2002091d4532SNeel Natu /* 2003091d4532SNeel Natu * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3 2004091d4532SNeel Natu */ 2005091d4532SNeel Natu exc1 = exception_class(info1); 2006091d4532SNeel Natu exc2 = exception_class(info2); 2007091d4532SNeel Natu if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 2008091d4532SNeel Natu (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 2009091d4532SNeel Natu /* Convert nested fault into a double fault. */ 2010091d4532SNeel Natu *retinfo = IDT_DF; 2011091d4532SNeel Natu *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 2012091d4532SNeel Natu *retinfo |= VM_INTINFO_DEL_ERRCODE; 2013091d4532SNeel Natu } else { 2014091d4532SNeel Natu /* Handle exceptions serially */ 2015091d4532SNeel Natu *retinfo = info2; 2016091d4532SNeel Natu } 2017091d4532SNeel Natu return (1); 2018091d4532SNeel Natu } 2019091d4532SNeel Natu 2020091d4532SNeel Natu static uint64_t 2021091d4532SNeel Natu vcpu_exception_intinfo(struct vcpu *vcpu) 2022091d4532SNeel Natu { 2023091d4532SNeel Natu uint64_t info = 0; 2024091d4532SNeel Natu 2025091d4532SNeel Natu if (vcpu->exception_pending) { 2026c9c75df4SNeel Natu info = vcpu->exc_vector & 0xff; 2027091d4532SNeel Natu info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 2028c9c75df4SNeel Natu if (vcpu->exc_errcode_valid) { 2029091d4532SNeel Natu info |= VM_INTINFO_DEL_ERRCODE; 2030c9c75df4SNeel Natu info |= (uint64_t)vcpu->exc_errcode << 32; 2031091d4532SNeel Natu } 2032091d4532SNeel Natu } 2033091d4532SNeel Natu return (info); 2034091d4532SNeel Natu } 2035091d4532SNeel Natu 2036091d4532SNeel Natu int 203780cb5d84SJohn Baldwin vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo) 2038091d4532SNeel Natu { 2039091d4532SNeel Natu uint64_t info1, info2; 2040091d4532SNeel Natu int valid; 2041091d4532SNeel Natu 2042091d4532SNeel Natu info1 = vcpu->exitintinfo; 2043091d4532SNeel Natu vcpu->exitintinfo = 0; 2044091d4532SNeel Natu 2045091d4532SNeel Natu info2 = 0; 2046091d4532SNeel Natu if (vcpu->exception_pending) { 2047091d4532SNeel Natu info2 = vcpu_exception_intinfo(vcpu); 2048091d4532SNeel Natu vcpu->exception_pending = 0; 204980cb5d84SJohn Baldwin VMM_CTR2(vcpu, "Exception %d delivered: %#lx", 2050c9c75df4SNeel Natu vcpu->exc_vector, info2); 2051091d4532SNeel Natu } 2052091d4532SNeel Natu 2053091d4532SNeel Natu if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) { 205480cb5d84SJohn Baldwin valid = nested_fault(vcpu, info1, info2, retinfo); 2055091d4532SNeel Natu } else if (info1 & VM_INTINFO_VALID) { 2056091d4532SNeel Natu *retinfo = info1; 2057091d4532SNeel Natu valid = 1; 2058091d4532SNeel Natu } else if (info2 & VM_INTINFO_VALID) { 2059091d4532SNeel Natu *retinfo = info2; 2060091d4532SNeel Natu valid = 1; 2061091d4532SNeel Natu } else { 2062091d4532SNeel Natu valid = 0; 2063091d4532SNeel Natu } 2064091d4532SNeel Natu 2065091d4532SNeel Natu if (valid) { 2066d3956e46SJohn Baldwin VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), " 2067091d4532SNeel Natu "retinfo(%#lx)", __func__, info1, info2, *retinfo); 2068091d4532SNeel Natu } 2069091d4532SNeel Natu 2070091d4532SNeel Natu return (valid); 2071091d4532SNeel Natu } 2072091d4532SNeel Natu 2073091d4532SNeel Natu int 2074091d4532SNeel Natu vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) 2075091d4532SNeel Natu { 2076091d4532SNeel Natu struct vcpu *vcpu; 2077091d4532SNeel Natu 2078a488c9c9SRodney W. Grimes if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2079091d4532SNeel Natu return (EINVAL); 2080091d4532SNeel Natu 2081091d4532SNeel Natu vcpu = &vm->vcpu[vcpuid]; 2082091d4532SNeel Natu *info1 = vcpu->exitintinfo; 2083091d4532SNeel Natu *info2 = vcpu_exception_intinfo(vcpu); 2084091d4532SNeel Natu return (0); 2085091d4532SNeel Natu } 2086091d4532SNeel Natu 2087091d4532SNeel Natu int 2088d3956e46SJohn Baldwin vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid, 2089c9c75df4SNeel Natu uint32_t errcode, int restart_instruction) 2090366f6083SPeter Grehan { 209147b9935dSNeel Natu uint64_t regval; 209273505a10SRobert Wing int error __diagused; 2093dc506506SNeel Natu 2094c9c75df4SNeel Natu if (vector < 0 || vector >= 32) 2095366f6083SPeter Grehan return (EINVAL); 2096366f6083SPeter Grehan 2097091d4532SNeel Natu /* 2098091d4532SNeel Natu * A double fault exception should never be injected directly into 2099091d4532SNeel Natu * the guest. It is a derived exception that results from specific 2100091d4532SNeel Natu * combinations of nested faults. 2101091d4532SNeel Natu */ 2102c9c75df4SNeel Natu if (vector == IDT_DF) 2103091d4532SNeel Natu return (EINVAL); 2104091d4532SNeel Natu 2105dc506506SNeel Natu if (vcpu->exception_pending) { 2106d3956e46SJohn Baldwin VMM_CTR2(vcpu, "Unable to inject exception %d due to " 2107c9c75df4SNeel Natu "pending exception %d", vector, vcpu->exc_vector); 2108dc506506SNeel Natu return (EBUSY); 2109dc506506SNeel Natu } 2110dc506506SNeel Natu 211147b9935dSNeel Natu if (errcode_valid) { 211247b9935dSNeel Natu /* 211347b9935dSNeel Natu * Exceptions don't deliver an error code in real mode. 211447b9935dSNeel Natu */ 2115d3956e46SJohn Baldwin error = vm_get_register(vcpu, VM_REG_GUEST_CR0, ®val); 211647b9935dSNeel Natu KASSERT(!error, ("%s: error %d getting CR0", __func__, error)); 211747b9935dSNeel Natu if (!(regval & CR0_PE)) 211847b9935dSNeel Natu errcode_valid = 0; 211947b9935dSNeel Natu } 212047b9935dSNeel Natu 21212ce12423SNeel Natu /* 21222ce12423SNeel Natu * From section 26.6.1 "Interruptibility State" in Intel SDM: 21232ce12423SNeel Natu * 21242ce12423SNeel Natu * Event blocking by "STI" or "MOV SS" is cleared after guest executes 21252ce12423SNeel Natu * one instruction or incurs an exception. 21262ce12423SNeel Natu */ 2127d3956e46SJohn Baldwin error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0); 21282ce12423SNeel Natu KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", 21292ce12423SNeel Natu __func__, error)); 21302ce12423SNeel Natu 2131c9c75df4SNeel Natu if (restart_instruction) 2132d3956e46SJohn Baldwin vm_restart_instruction(vcpu); 2133c9c75df4SNeel Natu 2134dc506506SNeel Natu vcpu->exception_pending = 1; 2135c9c75df4SNeel Natu vcpu->exc_vector = vector; 2136c9c75df4SNeel Natu vcpu->exc_errcode = errcode; 2137c9c75df4SNeel Natu vcpu->exc_errcode_valid = errcode_valid; 2138d3956e46SJohn Baldwin VMM_CTR1(vcpu, "Exception %d pending", vector); 2139dc506506SNeel Natu return (0); 2140dc506506SNeel Natu } 2141dc506506SNeel Natu 2142d37f2adbSNeel Natu void 2143d3956e46SJohn Baldwin vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode) 2144dc506506SNeel Natu { 214573505a10SRobert Wing int error __diagused, restart_instruction; 2146dc506506SNeel Natu 2147c9c75df4SNeel Natu restart_instruction = 1; 2148d37f2adbSNeel Natu 2149d3956e46SJohn Baldwin error = vm_inject_exception(vcpu, vector, errcode_valid, 2150c9c75df4SNeel Natu errcode, restart_instruction); 2151dc506506SNeel Natu KASSERT(error == 0, ("vm_inject_exception error %d", error)); 2152dc506506SNeel Natu } 2153dc506506SNeel Natu 2154dc506506SNeel Natu void 2155d3956e46SJohn Baldwin vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2) 2156fd949af6SNeel Natu { 215773505a10SRobert Wing int error __diagused; 215837a723a5SNeel Natu 2159d3956e46SJohn Baldwin VMM_CTR2(vcpu, "Injecting page fault: error_code %#x, cr2 %#lx", 216037a723a5SNeel Natu error_code, cr2); 216137a723a5SNeel Natu 2162d3956e46SJohn Baldwin error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2); 216337a723a5SNeel Natu KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); 2164fd949af6SNeel Natu 2165d3956e46SJohn Baldwin vm_inject_fault(vcpu, IDT_PF, 1, error_code); 2166366f6083SPeter Grehan } 2167366f6083SPeter Grehan 216861592433SNeel Natu static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 2169366f6083SPeter Grehan 2170f352ff0cSNeel Natu int 2171f352ff0cSNeel Natu vm_inject_nmi(struct vm *vm, int vcpuid) 2172f352ff0cSNeel Natu { 2173f352ff0cSNeel Natu struct vcpu *vcpu; 2174f352ff0cSNeel Natu 2175a488c9c9SRodney W. Grimes if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2176366f6083SPeter Grehan return (EINVAL); 2177366f6083SPeter Grehan 2178f352ff0cSNeel Natu vcpu = &vm->vcpu[vcpuid]; 2179f352ff0cSNeel Natu 2180f352ff0cSNeel Natu vcpu->nmi_pending = 1; 2181de5ea6b6SNeel Natu vcpu_notify_event(vm, vcpuid, false); 2182f352ff0cSNeel Natu return (0); 2183f352ff0cSNeel Natu } 2184f352ff0cSNeel Natu 2185f352ff0cSNeel Natu int 218680cb5d84SJohn Baldwin vm_nmi_pending(struct vcpu *vcpu) 2187f352ff0cSNeel Natu { 2188f352ff0cSNeel Natu return (vcpu->nmi_pending); 2189f352ff0cSNeel Natu } 2190f352ff0cSNeel Natu 2191f352ff0cSNeel Natu void 219280cb5d84SJohn Baldwin vm_nmi_clear(struct vcpu *vcpu) 2193f352ff0cSNeel Natu { 2194f352ff0cSNeel Natu if (vcpu->nmi_pending == 0) 2195f352ff0cSNeel Natu panic("vm_nmi_clear: inconsistent nmi_pending state"); 2196f352ff0cSNeel Natu 2197f352ff0cSNeel Natu vcpu->nmi_pending = 0; 21983dc3d32aSJohn Baldwin vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1); 2199366f6083SPeter Grehan } 2200366f6083SPeter Grehan 22010775fbb4STycho Nightingale static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 22020775fbb4STycho Nightingale 22030775fbb4STycho Nightingale int 22040775fbb4STycho Nightingale vm_inject_extint(struct vm *vm, int vcpuid) 22050775fbb4STycho Nightingale { 22060775fbb4STycho Nightingale struct vcpu *vcpu; 22070775fbb4STycho Nightingale 2208a488c9c9SRodney W. Grimes if (vcpuid < 0 || vcpuid >= vm->maxcpus) 22090775fbb4STycho Nightingale return (EINVAL); 22100775fbb4STycho Nightingale 22110775fbb4STycho Nightingale vcpu = &vm->vcpu[vcpuid]; 22120775fbb4STycho Nightingale 22130775fbb4STycho Nightingale vcpu->extint_pending = 1; 22140775fbb4STycho Nightingale vcpu_notify_event(vm, vcpuid, false); 22150775fbb4STycho Nightingale return (0); 22160775fbb4STycho Nightingale } 22170775fbb4STycho Nightingale 22180775fbb4STycho Nightingale int 221980cb5d84SJohn Baldwin vm_extint_pending(struct vcpu *vcpu) 22200775fbb4STycho Nightingale { 22210775fbb4STycho Nightingale return (vcpu->extint_pending); 22220775fbb4STycho Nightingale } 22230775fbb4STycho Nightingale 22240775fbb4STycho Nightingale void 222580cb5d84SJohn Baldwin vm_extint_clear(struct vcpu *vcpu) 22260775fbb4STycho Nightingale { 22270775fbb4STycho Nightingale if (vcpu->extint_pending == 0) 22280775fbb4STycho Nightingale panic("vm_extint_clear: inconsistent extint_pending state"); 22290775fbb4STycho Nightingale 22300775fbb4STycho Nightingale vcpu->extint_pending = 0; 22313dc3d32aSJohn Baldwin vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1); 22320775fbb4STycho Nightingale } 22330775fbb4STycho Nightingale 2234366f6083SPeter Grehan int 2235366f6083SPeter Grehan vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 2236366f6083SPeter Grehan { 2237a488c9c9SRodney W. Grimes if (vcpu < 0 || vcpu >= vm->maxcpus) 2238366f6083SPeter Grehan return (EINVAL); 2239366f6083SPeter Grehan 2240366f6083SPeter Grehan if (type < 0 || type >= VM_CAP_MAX) 2241366f6083SPeter Grehan return (EINVAL); 2242366f6083SPeter Grehan 2243869c8d19SJohn Baldwin return (vmmops_getcap(vcpu_cookie(vm, vcpu), type, retval)); 2244366f6083SPeter Grehan } 2245366f6083SPeter Grehan 2246366f6083SPeter Grehan int 2247366f6083SPeter Grehan vm_set_capability(struct vm *vm, int vcpu, int type, int val) 2248366f6083SPeter Grehan { 2249a488c9c9SRodney W. Grimes if (vcpu < 0 || vcpu >= vm->maxcpus) 2250366f6083SPeter Grehan return (EINVAL); 2251366f6083SPeter Grehan 2252366f6083SPeter Grehan if (type < 0 || type >= VM_CAP_MAX) 2253366f6083SPeter Grehan return (EINVAL); 2254366f6083SPeter Grehan 2255869c8d19SJohn Baldwin return (vmmops_setcap(vcpu_cookie(vm, vcpu), type, val)); 2256366f6083SPeter Grehan } 2257366f6083SPeter Grehan 2258950af9ffSJohn Baldwin struct vm * 2259950af9ffSJohn Baldwin vcpu_vm(struct vcpu *vcpu) 2260950af9ffSJohn Baldwin { 2261950af9ffSJohn Baldwin return (vcpu->vm); 2262950af9ffSJohn Baldwin } 2263950af9ffSJohn Baldwin 2264950af9ffSJohn Baldwin int 2265950af9ffSJohn Baldwin vcpu_vcpuid(struct vcpu *vcpu) 2266950af9ffSJohn Baldwin { 2267950af9ffSJohn Baldwin return (vcpu->vcpuid); 2268950af9ffSJohn Baldwin } 2269950af9ffSJohn Baldwin 2270950af9ffSJohn Baldwin struct vcpu * 2271950af9ffSJohn Baldwin vm_vcpu(struct vm *vm, int vcpuid) 2272950af9ffSJohn Baldwin { 2273950af9ffSJohn Baldwin return (&vm->vcpu[vcpuid]); 2274950af9ffSJohn Baldwin } 2275950af9ffSJohn Baldwin 2276366f6083SPeter Grehan struct vlapic * 2277d3956e46SJohn Baldwin vm_lapic(struct vcpu *vcpu) 2278366f6083SPeter Grehan { 2279d3956e46SJohn Baldwin return (vcpu->vlapic); 2280366f6083SPeter Grehan } 2281366f6083SPeter Grehan 2282565bbb86SNeel Natu struct vioapic * 2283565bbb86SNeel Natu vm_ioapic(struct vm *vm) 2284565bbb86SNeel Natu { 2285565bbb86SNeel Natu 2286565bbb86SNeel Natu return (vm->vioapic); 2287565bbb86SNeel Natu } 2288565bbb86SNeel Natu 228908e3ff32SNeel Natu struct vhpet * 229008e3ff32SNeel Natu vm_hpet(struct vm *vm) 229108e3ff32SNeel Natu { 229208e3ff32SNeel Natu 229308e3ff32SNeel Natu return (vm->vhpet); 229408e3ff32SNeel Natu } 229508e3ff32SNeel Natu 2296490d56c5SEd Maste bool 2297366f6083SPeter Grehan vmm_is_pptdev(int bus, int slot, int func) 2298366f6083SPeter Grehan { 2299490d56c5SEd Maste int b, f, i, n, s; 2300366f6083SPeter Grehan char *val, *cp, *cp2; 2301490d56c5SEd Maste bool found; 2302366f6083SPeter Grehan 2303366f6083SPeter Grehan /* 230407044a96SNeel Natu * XXX 230507044a96SNeel Natu * The length of an environment variable is limited to 128 bytes which 230607044a96SNeel Natu * puts an upper limit on the number of passthru devices that may be 230707044a96SNeel Natu * specified using a single environment variable. 230807044a96SNeel Natu * 230907044a96SNeel Natu * Work around this by scanning multiple environment variable 231007044a96SNeel Natu * names instead of a single one - yuck! 2311366f6083SPeter Grehan */ 231207044a96SNeel Natu const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 231307044a96SNeel Natu 231407044a96SNeel Natu /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 2315490d56c5SEd Maste found = false; 231607044a96SNeel Natu for (i = 0; names[i] != NULL && !found; i++) { 23172be111bfSDavide Italiano cp = val = kern_getenv(names[i]); 2318366f6083SPeter Grehan while (cp != NULL && *cp != '\0') { 2319366f6083SPeter Grehan if ((cp2 = strchr(cp, ' ')) != NULL) 2320366f6083SPeter Grehan *cp2 = '\0'; 2321366f6083SPeter Grehan 2322366f6083SPeter Grehan n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 2323366f6083SPeter Grehan if (n == 3 && bus == b && slot == s && func == f) { 2324490d56c5SEd Maste found = true; 2325366f6083SPeter Grehan break; 2326366f6083SPeter Grehan } 2327366f6083SPeter Grehan 2328366f6083SPeter Grehan if (cp2 != NULL) 2329366f6083SPeter Grehan *cp2++ = ' '; 2330366f6083SPeter Grehan 2331366f6083SPeter Grehan cp = cp2; 2332366f6083SPeter Grehan } 2333366f6083SPeter Grehan freeenv(val); 233407044a96SNeel Natu } 2335366f6083SPeter Grehan return (found); 2336366f6083SPeter Grehan } 2337366f6083SPeter Grehan 2338366f6083SPeter Grehan void * 2339366f6083SPeter Grehan vm_iommu_domain(struct vm *vm) 2340366f6083SPeter Grehan { 2341366f6083SPeter Grehan 2342366f6083SPeter Grehan return (vm->iommu); 2343366f6083SPeter Grehan } 2344366f6083SPeter Grehan 234575dd3366SNeel Natu int 2346f80330a8SNeel Natu vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 2347f80330a8SNeel Natu bool from_idle) 2348366f6083SPeter Grehan { 234975dd3366SNeel Natu int error; 2350366f6083SPeter Grehan struct vcpu *vcpu; 2351366f6083SPeter Grehan 2352a488c9c9SRodney W. Grimes if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2353366f6083SPeter Grehan panic("vm_set_run_state: invalid vcpuid %d", vcpuid); 2354366f6083SPeter Grehan 2355366f6083SPeter Grehan vcpu = &vm->vcpu[vcpuid]; 2356366f6083SPeter Grehan 235775dd3366SNeel Natu vcpu_lock(vcpu); 2358248e6799SNeel Natu error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle); 235975dd3366SNeel Natu vcpu_unlock(vcpu); 236075dd3366SNeel Natu 236175dd3366SNeel Natu return (error); 236275dd3366SNeel Natu } 236375dd3366SNeel Natu 236475dd3366SNeel Natu enum vcpu_state 2365d3956e46SJohn Baldwin vcpu_get_state(struct vcpu *vcpu, int *hostcpu) 2366366f6083SPeter Grehan { 236775dd3366SNeel Natu enum vcpu_state state; 2368366f6083SPeter Grehan 236975dd3366SNeel Natu vcpu_lock(vcpu); 237075dd3366SNeel Natu state = vcpu->state; 2371d3c11f40SPeter Grehan if (hostcpu != NULL) 2372d3c11f40SPeter Grehan *hostcpu = vcpu->hostcpu; 237375dd3366SNeel Natu vcpu_unlock(vcpu); 2374366f6083SPeter Grehan 237575dd3366SNeel Natu return (state); 2376366f6083SPeter Grehan } 2377366f6083SPeter Grehan 237895ebc360SNeel Natu int 2379366f6083SPeter Grehan vm_activate_cpu(struct vm *vm, int vcpuid) 2380366f6083SPeter Grehan { 2381366f6083SPeter Grehan 2382a488c9c9SRodney W. Grimes if (vcpuid < 0 || vcpuid >= vm->maxcpus) 238395ebc360SNeel Natu return (EINVAL); 238495ebc360SNeel Natu 238595ebc360SNeel Natu if (CPU_ISSET(vcpuid, &vm->active_cpus)) 238695ebc360SNeel Natu return (EBUSY); 238722d822c6SNeel Natu 238822d822c6SNeel Natu VCPU_CTR0(vm, vcpuid, "activated"); 238922d822c6SNeel Natu CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); 239095ebc360SNeel Natu return (0); 2391366f6083SPeter Grehan } 2392366f6083SPeter Grehan 2393fc276d92SJohn Baldwin int 2394fc276d92SJohn Baldwin vm_suspend_cpu(struct vm *vm, int vcpuid) 2395fc276d92SJohn Baldwin { 2396fc276d92SJohn Baldwin int i; 2397fc276d92SJohn Baldwin 2398a488c9c9SRodney W. Grimes if (vcpuid < -1 || vcpuid >= vm->maxcpus) 2399fc276d92SJohn Baldwin return (EINVAL); 2400fc276d92SJohn Baldwin 2401fc276d92SJohn Baldwin if (vcpuid == -1) { 2402fc276d92SJohn Baldwin vm->debug_cpus = vm->active_cpus; 2403a488c9c9SRodney W. Grimes for (i = 0; i < vm->maxcpus; i++) { 2404fc276d92SJohn Baldwin if (CPU_ISSET(i, &vm->active_cpus)) 2405fc276d92SJohn Baldwin vcpu_notify_event(vm, i, false); 2406fc276d92SJohn Baldwin } 2407fc276d92SJohn Baldwin } else { 2408fc276d92SJohn Baldwin if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 2409fc276d92SJohn Baldwin return (EINVAL); 2410fc276d92SJohn Baldwin 2411fc276d92SJohn Baldwin CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus); 2412fc276d92SJohn Baldwin vcpu_notify_event(vm, vcpuid, false); 2413fc276d92SJohn Baldwin } 2414fc276d92SJohn Baldwin return (0); 2415fc276d92SJohn Baldwin } 2416fc276d92SJohn Baldwin 2417fc276d92SJohn Baldwin int 2418fc276d92SJohn Baldwin vm_resume_cpu(struct vm *vm, int vcpuid) 2419fc276d92SJohn Baldwin { 2420fc276d92SJohn Baldwin 2421a488c9c9SRodney W. Grimes if (vcpuid < -1 || vcpuid >= vm->maxcpus) 2422fc276d92SJohn Baldwin return (EINVAL); 2423fc276d92SJohn Baldwin 2424fc276d92SJohn Baldwin if (vcpuid == -1) { 2425fc276d92SJohn Baldwin CPU_ZERO(&vm->debug_cpus); 2426fc276d92SJohn Baldwin } else { 2427fc276d92SJohn Baldwin if (!CPU_ISSET(vcpuid, &vm->debug_cpus)) 2428fc276d92SJohn Baldwin return (EINVAL); 2429fc276d92SJohn Baldwin 2430fc276d92SJohn Baldwin CPU_CLR_ATOMIC(vcpuid, &vm->debug_cpus); 2431fc276d92SJohn Baldwin } 2432fc276d92SJohn Baldwin return (0); 2433fc276d92SJohn Baldwin } 2434fc276d92SJohn Baldwin 2435fc276d92SJohn Baldwin int 243680cb5d84SJohn Baldwin vcpu_debugged(struct vcpu *vcpu) 2437fc276d92SJohn Baldwin { 2438fc276d92SJohn Baldwin 243980cb5d84SJohn Baldwin return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); 2440fc276d92SJohn Baldwin } 2441fc276d92SJohn Baldwin 2442a5615c90SPeter Grehan cpuset_t 2443366f6083SPeter Grehan vm_active_cpus(struct vm *vm) 2444366f6083SPeter Grehan { 2445366f6083SPeter Grehan 2446366f6083SPeter Grehan return (vm->active_cpus); 2447366f6083SPeter Grehan } 2448366f6083SPeter Grehan 244995ebc360SNeel Natu cpuset_t 2450fc276d92SJohn Baldwin vm_debug_cpus(struct vm *vm) 2451fc276d92SJohn Baldwin { 2452fc276d92SJohn Baldwin 2453fc276d92SJohn Baldwin return (vm->debug_cpus); 2454fc276d92SJohn Baldwin } 2455fc276d92SJohn Baldwin 2456fc276d92SJohn Baldwin cpuset_t 245795ebc360SNeel Natu vm_suspended_cpus(struct vm *vm) 245895ebc360SNeel Natu { 245995ebc360SNeel Natu 246095ebc360SNeel Natu return (vm->suspended_cpus); 246195ebc360SNeel Natu } 246295ebc360SNeel Natu 2463366f6083SPeter Grehan void * 24643dc3d32aSJohn Baldwin vcpu_stats(struct vcpu *vcpu) 2465366f6083SPeter Grehan { 2466366f6083SPeter Grehan 24673dc3d32aSJohn Baldwin return (vcpu->stats); 2468366f6083SPeter Grehan } 2469e9027382SNeel Natu 2470e9027382SNeel Natu int 2471e9027382SNeel Natu vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 2472e9027382SNeel Natu { 2473a488c9c9SRodney W. Grimes if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2474e9027382SNeel Natu return (EINVAL); 2475e9027382SNeel Natu 2476e9027382SNeel Natu *state = vm->vcpu[vcpuid].x2apic_state; 2477e9027382SNeel Natu 2478e9027382SNeel Natu return (0); 2479e9027382SNeel Natu } 2480e9027382SNeel Natu 2481e9027382SNeel Natu int 2482e9027382SNeel Natu vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 2483e9027382SNeel Natu { 2484d3956e46SJohn Baldwin struct vcpu *vcpu; 2485d3956e46SJohn Baldwin 2486a488c9c9SRodney W. Grimes if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2487e9027382SNeel Natu return (EINVAL); 2488e9027382SNeel Natu 24893f23d3caSNeel Natu if (state >= X2APIC_STATE_LAST) 2490e9027382SNeel Natu return (EINVAL); 2491e9027382SNeel Natu 2492d3956e46SJohn Baldwin vcpu = &vm->vcpu[vcpuid]; 2493d3956e46SJohn Baldwin vcpu->x2apic_state = state; 2494e9027382SNeel Natu 2495d3956e46SJohn Baldwin vlapic_set_x2apic_state(vcpu, state); 249673820fb0SNeel Natu 2497e9027382SNeel Natu return (0); 2498e9027382SNeel Natu } 249975dd3366SNeel Natu 250022821874SNeel Natu /* 250122821874SNeel Natu * This function is called to ensure that a vcpu "sees" a pending event 250222821874SNeel Natu * as soon as possible: 250322821874SNeel Natu * - If the vcpu thread is sleeping then it is woken up. 250422821874SNeel Natu * - If the vcpu is running on a different host_cpu then an IPI will be directed 250522821874SNeel Natu * to the host_cpu to cause the vcpu to trap into the hypervisor. 250622821874SNeel Natu */ 2507248e6799SNeel Natu static void 2508248e6799SNeel Natu vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr) 250975dd3366SNeel Natu { 251075dd3366SNeel Natu int hostcpu; 251175dd3366SNeel Natu 251275dd3366SNeel Natu hostcpu = vcpu->hostcpu; 2513ef39d7e9SNeel Natu if (vcpu->state == VCPU_RUNNING) { 2514ef39d7e9SNeel Natu KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 2515de5ea6b6SNeel Natu if (hostcpu != curcpu) { 2516ef39d7e9SNeel Natu if (lapic_intr) { 2517add611fdSNeel Natu vlapic_post_intr(vcpu->vlapic, hostcpu, 2518add611fdSNeel Natu vmm_ipinum); 2519ef39d7e9SNeel Natu } else { 252075dd3366SNeel Natu ipi_cpu(hostcpu, vmm_ipinum); 252175dd3366SNeel Natu } 2522ef39d7e9SNeel Natu } else { 2523ef39d7e9SNeel Natu /* 2524ef39d7e9SNeel Natu * If the 'vcpu' is running on 'curcpu' then it must 2525ef39d7e9SNeel Natu * be sending a notification to itself (e.g. SELF_IPI). 2526ef39d7e9SNeel Natu * The pending event will be picked up when the vcpu 2527ef39d7e9SNeel Natu * transitions back to guest context. 2528ef39d7e9SNeel Natu */ 2529ef39d7e9SNeel Natu } 2530ef39d7e9SNeel Natu } else { 2531ef39d7e9SNeel Natu KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 2532ef39d7e9SNeel Natu "with hostcpu %d", vcpu->state, hostcpu)); 2533366f6083SPeter Grehan if (vcpu->state == VCPU_SLEEPING) 2534366f6083SPeter Grehan wakeup_one(vcpu); 2535366f6083SPeter Grehan } 2536248e6799SNeel Natu } 2537248e6799SNeel Natu 2538248e6799SNeel Natu void 2539248e6799SNeel Natu vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr) 2540248e6799SNeel Natu { 2541248e6799SNeel Natu struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2542248e6799SNeel Natu 2543248e6799SNeel Natu vcpu_lock(vcpu); 2544248e6799SNeel Natu vcpu_notify_event_locked(vcpu, lapic_intr); 2545f76fc5d4SNeel Natu vcpu_unlock(vcpu); 2546f76fc5d4SNeel Natu } 2547318224bbSNeel Natu 2548318224bbSNeel Natu struct vmspace * 2549318224bbSNeel Natu vm_get_vmspace(struct vm *vm) 2550318224bbSNeel Natu { 2551318224bbSNeel Natu 2552318224bbSNeel Natu return (vm->vmspace); 2553318224bbSNeel Natu } 2554565bbb86SNeel Natu 2555565bbb86SNeel Natu int 2556565bbb86SNeel Natu vm_apicid2vcpuid(struct vm *vm, int apicid) 2557565bbb86SNeel Natu { 2558565bbb86SNeel Natu /* 2559565bbb86SNeel Natu * XXX apic id is assumed to be numerically identical to vcpu id 2560565bbb86SNeel Natu */ 2561565bbb86SNeel Natu return (apicid); 2562565bbb86SNeel Natu } 25635b8a8cd1SNeel Natu 2564b837daddSKonstantin Belousov int 2565d8be3d52SJohn Baldwin vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest, 25665b8a8cd1SNeel Natu vm_rendezvous_func_t func, void *arg) 25675b8a8cd1SNeel Natu { 2568d8be3d52SJohn Baldwin struct vm *vm = vcpu->vm; 2569b837daddSKonstantin Belousov int error, i; 2570970955e4SNeel Natu 25715b8a8cd1SNeel Natu /* 25725b8a8cd1SNeel Natu * Enforce that this function is called without any locks 25735b8a8cd1SNeel Natu */ 25745b8a8cd1SNeel Natu WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); 25755b8a8cd1SNeel Natu 25765b8a8cd1SNeel Natu restart: 25775b8a8cd1SNeel Natu mtx_lock(&vm->rendezvous_mtx); 25785b8a8cd1SNeel Natu if (vm->rendezvous_func != NULL) { 25795b8a8cd1SNeel Natu /* 25805b8a8cd1SNeel Natu * If a rendezvous is already in progress then we need to 25815b8a8cd1SNeel Natu * call the rendezvous handler in case this 'vcpuid' is one 25825b8a8cd1SNeel Natu * of the targets of the rendezvous. 25835b8a8cd1SNeel Natu */ 2584d8be3d52SJohn Baldwin VMM_CTR0(vcpu, "Rendezvous already in progress"); 25855b8a8cd1SNeel Natu mtx_unlock(&vm->rendezvous_mtx); 2586d8be3d52SJohn Baldwin error = vm_handle_rendezvous(vcpu); 2587b837daddSKonstantin Belousov if (error != 0) 2588b837daddSKonstantin Belousov return (error); 25895b8a8cd1SNeel Natu goto restart; 25905b8a8cd1SNeel Natu } 25915b8a8cd1SNeel Natu KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " 25925b8a8cd1SNeel Natu "rendezvous is still in progress")); 25935b8a8cd1SNeel Natu 2594d8be3d52SJohn Baldwin VMM_CTR0(vcpu, "Initiating rendezvous"); 25955b8a8cd1SNeel Natu vm->rendezvous_req_cpus = dest; 25965b8a8cd1SNeel Natu CPU_ZERO(&vm->rendezvous_done_cpus); 25975b8a8cd1SNeel Natu vm->rendezvous_arg = arg; 2598869dbab7SAndriy Gapon vm->rendezvous_func = func; 25995b8a8cd1SNeel Natu mtx_unlock(&vm->rendezvous_mtx); 26005b8a8cd1SNeel Natu 2601970955e4SNeel Natu /* 2602970955e4SNeel Natu * Wake up any sleeping vcpus and trigger a VM-exit in any running 2603970955e4SNeel Natu * vcpus so they handle the rendezvous as soon as possible. 2604970955e4SNeel Natu */ 2605a488c9c9SRodney W. Grimes for (i = 0; i < vm->maxcpus; i++) { 2606970955e4SNeel Natu if (CPU_ISSET(i, &dest)) 2607970955e4SNeel Natu vcpu_notify_event(vm, i, false); 2608970955e4SNeel Natu } 2609970955e4SNeel Natu 2610d8be3d52SJohn Baldwin return (vm_handle_rendezvous(vcpu)); 26115b8a8cd1SNeel Natu } 2612762fd208STycho Nightingale 2613762fd208STycho Nightingale struct vatpic * 2614762fd208STycho Nightingale vm_atpic(struct vm *vm) 2615762fd208STycho Nightingale { 2616762fd208STycho Nightingale return (vm->vatpic); 2617762fd208STycho Nightingale } 2618e883c9bbSTycho Nightingale 2619e883c9bbSTycho Nightingale struct vatpit * 2620e883c9bbSTycho Nightingale vm_atpit(struct vm *vm) 2621e883c9bbSTycho Nightingale { 2622e883c9bbSTycho Nightingale return (vm->vatpit); 2623e883c9bbSTycho Nightingale } 2624d17b5104SNeel Natu 2625160ef77aSNeel Natu struct vpmtmr * 2626160ef77aSNeel Natu vm_pmtmr(struct vm *vm) 2627160ef77aSNeel Natu { 2628160ef77aSNeel Natu 2629160ef77aSNeel Natu return (vm->vpmtmr); 2630160ef77aSNeel Natu } 2631160ef77aSNeel Natu 26320dafa5cdSNeel Natu struct vrtc * 26330dafa5cdSNeel Natu vm_rtc(struct vm *vm) 26340dafa5cdSNeel Natu { 26350dafa5cdSNeel Natu 26360dafa5cdSNeel Natu return (vm->vrtc); 26370dafa5cdSNeel Natu } 26380dafa5cdSNeel Natu 2639d17b5104SNeel Natu enum vm_reg_name 2640d17b5104SNeel Natu vm_segment_name(int seg) 2641d17b5104SNeel Natu { 2642d17b5104SNeel Natu static enum vm_reg_name seg_names[] = { 2643d17b5104SNeel Natu VM_REG_GUEST_ES, 2644d17b5104SNeel Natu VM_REG_GUEST_CS, 2645d17b5104SNeel Natu VM_REG_GUEST_SS, 2646d17b5104SNeel Natu VM_REG_GUEST_DS, 2647d17b5104SNeel Natu VM_REG_GUEST_FS, 2648d17b5104SNeel Natu VM_REG_GUEST_GS 2649d17b5104SNeel Natu }; 2650d17b5104SNeel Natu 2651d17b5104SNeel Natu KASSERT(seg >= 0 && seg < nitems(seg_names), 2652d17b5104SNeel Natu ("%s: invalid segment encoding %d", __func__, seg)); 2653d17b5104SNeel Natu return (seg_names[seg]); 2654d17b5104SNeel Natu } 2655cf1d80d8SPeter Grehan 2656d665d229SNeel Natu void 26572b4fe856SJohn Baldwin vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo) 2658d665d229SNeel Natu { 2659d665d229SNeel Natu int idx; 2660d665d229SNeel Natu 2661d665d229SNeel Natu for (idx = 0; idx < num_copyinfo; idx++) { 2662d665d229SNeel Natu if (copyinfo[idx].cookie != NULL) 2663d665d229SNeel Natu vm_gpa_release(copyinfo[idx].cookie); 2664d665d229SNeel Natu } 2665d665d229SNeel Natu bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo)); 2666d665d229SNeel Natu } 2667d665d229SNeel Natu 2668d665d229SNeel Natu int 2669d3956e46SJohn Baldwin vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging, 2670d665d229SNeel Natu uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 26719c4d5478SNeel Natu int num_copyinfo, int *fault) 2672d665d229SNeel Natu { 2673d665d229SNeel Natu int error, idx, nused; 2674d665d229SNeel Natu size_t n, off, remaining; 2675d665d229SNeel Natu void *hva, *cookie; 2676d665d229SNeel Natu uint64_t gpa; 2677d665d229SNeel Natu 2678d665d229SNeel Natu bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo); 2679d665d229SNeel Natu 2680d665d229SNeel Natu nused = 0; 2681d665d229SNeel Natu remaining = len; 2682d665d229SNeel Natu while (remaining > 0) { 2683d665d229SNeel Natu KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 2684d3956e46SJohn Baldwin error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault); 26859c4d5478SNeel Natu if (error || *fault) 2686d665d229SNeel Natu return (error); 2687d665d229SNeel Natu off = gpa & PAGE_MASK; 2688d665d229SNeel Natu n = min(remaining, PAGE_SIZE - off); 2689d665d229SNeel Natu copyinfo[nused].gpa = gpa; 2690d665d229SNeel Natu copyinfo[nused].len = n; 2691d665d229SNeel Natu remaining -= n; 2692d665d229SNeel Natu gla += n; 2693d665d229SNeel Natu nused++; 2694d665d229SNeel Natu } 2695d665d229SNeel Natu 2696d665d229SNeel Natu for (idx = 0; idx < nused; idx++) { 2697d3956e46SJohn Baldwin hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa, 26989b1aa8d6SNeel Natu copyinfo[idx].len, prot, &cookie); 2699d665d229SNeel Natu if (hva == NULL) 2700d665d229SNeel Natu break; 2701d665d229SNeel Natu copyinfo[idx].hva = hva; 2702d665d229SNeel Natu copyinfo[idx].cookie = cookie; 2703d665d229SNeel Natu } 2704d665d229SNeel Natu 2705d665d229SNeel Natu if (idx != nused) { 27062b4fe856SJohn Baldwin vm_copy_teardown(copyinfo, num_copyinfo); 27079c4d5478SNeel Natu return (EFAULT); 2708d665d229SNeel Natu } else { 27099c4d5478SNeel Natu *fault = 0; 2710d665d229SNeel Natu return (0); 2711d665d229SNeel Natu } 2712d665d229SNeel Natu } 2713d665d229SNeel Natu 2714d665d229SNeel Natu void 27152b4fe856SJohn Baldwin vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len) 2716d665d229SNeel Natu { 2717d665d229SNeel Natu char *dst; 2718d665d229SNeel Natu int idx; 2719d665d229SNeel Natu 2720d665d229SNeel Natu dst = kaddr; 2721d665d229SNeel Natu idx = 0; 2722d665d229SNeel Natu while (len > 0) { 2723d665d229SNeel Natu bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 2724d665d229SNeel Natu len -= copyinfo[idx].len; 2725d665d229SNeel Natu dst += copyinfo[idx].len; 2726d665d229SNeel Natu idx++; 2727d665d229SNeel Natu } 2728d665d229SNeel Natu } 2729d665d229SNeel Natu 2730d665d229SNeel Natu void 27312b4fe856SJohn Baldwin vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len) 2732d665d229SNeel Natu { 2733d665d229SNeel Natu const char *src; 2734d665d229SNeel Natu int idx; 2735d665d229SNeel Natu 2736d665d229SNeel Natu src = kaddr; 2737d665d229SNeel Natu idx = 0; 2738d665d229SNeel Natu while (len > 0) { 2739d665d229SNeel Natu bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 2740d665d229SNeel Natu len -= copyinfo[idx].len; 2741d665d229SNeel Natu src += copyinfo[idx].len; 2742d665d229SNeel Natu idx++; 2743d665d229SNeel Natu } 2744d665d229SNeel Natu } 2745cf1d80d8SPeter Grehan 2746cf1d80d8SPeter Grehan /* 2747cf1d80d8SPeter Grehan * Return the amount of in-use and wired memory for the VM. Since 2748cf1d80d8SPeter Grehan * these are global stats, only return the values with for vCPU 0 2749cf1d80d8SPeter Grehan */ 2750cf1d80d8SPeter Grehan VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 2751cf1d80d8SPeter Grehan VMM_STAT_DECLARE(VMM_MEM_WIRED); 2752cf1d80d8SPeter Grehan 2753cf1d80d8SPeter Grehan static void 2754cf1d80d8SPeter Grehan vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2755cf1d80d8SPeter Grehan { 2756cf1d80d8SPeter Grehan 2757cf1d80d8SPeter Grehan if (vcpu == 0) { 27583dc3d32aSJohn Baldwin vmm_stat_set(vm_vcpu(vm, vcpu), VMM_MEM_RESIDENT, 2759cf1d80d8SPeter Grehan PAGE_SIZE * vmspace_resident_count(vm->vmspace)); 2760cf1d80d8SPeter Grehan } 2761cf1d80d8SPeter Grehan } 2762cf1d80d8SPeter Grehan 2763cf1d80d8SPeter Grehan static void 2764cf1d80d8SPeter Grehan vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2765cf1d80d8SPeter Grehan { 2766cf1d80d8SPeter Grehan 2767cf1d80d8SPeter Grehan if (vcpu == 0) { 27683dc3d32aSJohn Baldwin vmm_stat_set(vm_vcpu(vm, vcpu), VMM_MEM_WIRED, 2769cf1d80d8SPeter Grehan PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace))); 2770cf1d80d8SPeter Grehan } 2771cf1d80d8SPeter Grehan } 2772cf1d80d8SPeter Grehan 2773cf1d80d8SPeter Grehan VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 2774cf1d80d8SPeter Grehan VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt); 2775483d953aSJohn Baldwin 2776483d953aSJohn Baldwin #ifdef BHYVE_SNAPSHOT 2777483d953aSJohn Baldwin static int 2778483d953aSJohn Baldwin vm_snapshot_vcpus(struct vm *vm, struct vm_snapshot_meta *meta) 2779483d953aSJohn Baldwin { 2780a7db532eSJohn Baldwin uint64_t tsc, now; 2781483d953aSJohn Baldwin int ret; 2782483d953aSJohn Baldwin struct vcpu *vcpu; 278335abc6c2SJohn Baldwin uint16_t i, maxcpus; 2784483d953aSJohn Baldwin 2785a7db532eSJohn Baldwin now = rdtsc(); 278635abc6c2SJohn Baldwin maxcpus = vm_get_maxcpus(vm); 278735abc6c2SJohn Baldwin for (i = 0; i < maxcpus; i++) { 2788483d953aSJohn Baldwin vcpu = &vm->vcpu[i]; 2789483d953aSJohn Baldwin 2790483d953aSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done); 2791483d953aSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done); 2792483d953aSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done); 2793483d953aSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done); 2794483d953aSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done); 2795483d953aSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done); 2796483d953aSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done); 2797483d953aSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done); 2798a7db532eSJohn Baldwin 2799a7db532eSJohn Baldwin /* 2800a7db532eSJohn Baldwin * Save the absolute TSC value by adding now to tsc_offset. 2801483d953aSJohn Baldwin * 2802483d953aSJohn Baldwin * It will be turned turned back into an actual offset when the 2803483d953aSJohn Baldwin * TSC restore function is called 2804483d953aSJohn Baldwin */ 2805a7db532eSJohn Baldwin tsc = now + vcpu->tsc_offset; 2806a7db532eSJohn Baldwin SNAPSHOT_VAR_OR_LEAVE(tsc, meta, ret, done); 2807483d953aSJohn Baldwin } 2808483d953aSJohn Baldwin 2809483d953aSJohn Baldwin done: 2810483d953aSJohn Baldwin return (ret); 2811483d953aSJohn Baldwin } 2812483d953aSJohn Baldwin 2813483d953aSJohn Baldwin static int 2814483d953aSJohn Baldwin vm_snapshot_vm(struct vm *vm, struct vm_snapshot_meta *meta) 2815483d953aSJohn Baldwin { 2816483d953aSJohn Baldwin int ret; 2817483d953aSJohn Baldwin 2818483d953aSJohn Baldwin ret = vm_snapshot_vcpus(vm, meta); 2819a7db532eSJohn Baldwin if (ret != 0) 2820483d953aSJohn Baldwin goto done; 2821483d953aSJohn Baldwin 2822483d953aSJohn Baldwin done: 2823483d953aSJohn Baldwin return (ret); 2824483d953aSJohn Baldwin } 2825483d953aSJohn Baldwin 2826483d953aSJohn Baldwin static int 28271aa51504SJohn Baldwin vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta) 2828483d953aSJohn Baldwin { 282935abc6c2SJohn Baldwin int error; 28301aa51504SJohn Baldwin struct vcpu *vcpu; 283135abc6c2SJohn Baldwin uint16_t i, maxcpus; 2832483d953aSJohn Baldwin 2833483d953aSJohn Baldwin error = 0; 2834483d953aSJohn Baldwin 283535abc6c2SJohn Baldwin maxcpus = vm_get_maxcpus(vm); 283635abc6c2SJohn Baldwin for (i = 0; i < maxcpus; i++) { 28371aa51504SJohn Baldwin vcpu = &vm->vcpu[i]; 28381aa51504SJohn Baldwin 2839869c8d19SJohn Baldwin error = vmmops_vcpu_snapshot(vcpu->cookie, meta); 2840483d953aSJohn Baldwin if (error != 0) { 2841483d953aSJohn Baldwin printf("%s: failed to snapshot vmcs/vmcb data for " 2842483d953aSJohn Baldwin "vCPU: %d; error: %d\n", __func__, i, error); 2843483d953aSJohn Baldwin goto done; 2844483d953aSJohn Baldwin } 2845483d953aSJohn Baldwin } 2846483d953aSJohn Baldwin 2847483d953aSJohn Baldwin done: 2848483d953aSJohn Baldwin return (error); 2849483d953aSJohn Baldwin } 2850483d953aSJohn Baldwin 2851483d953aSJohn Baldwin /* 2852483d953aSJohn Baldwin * Save kernel-side structures to user-space for snapshotting. 2853483d953aSJohn Baldwin */ 2854483d953aSJohn Baldwin int 2855483d953aSJohn Baldwin vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta) 2856483d953aSJohn Baldwin { 2857483d953aSJohn Baldwin int ret = 0; 2858483d953aSJohn Baldwin 2859483d953aSJohn Baldwin switch (meta->dev_req) { 2860483d953aSJohn Baldwin case STRUCT_VMX: 286115add60dSPeter Grehan ret = vmmops_snapshot(vm->cookie, meta); 2862483d953aSJohn Baldwin break; 2863483d953aSJohn Baldwin case STRUCT_VMCX: 28641aa51504SJohn Baldwin ret = vm_snapshot_vcpu(vm, meta); 2865483d953aSJohn Baldwin break; 2866483d953aSJohn Baldwin case STRUCT_VM: 2867483d953aSJohn Baldwin ret = vm_snapshot_vm(vm, meta); 2868483d953aSJohn Baldwin break; 2869483d953aSJohn Baldwin case STRUCT_VIOAPIC: 2870483d953aSJohn Baldwin ret = vioapic_snapshot(vm_ioapic(vm), meta); 2871483d953aSJohn Baldwin break; 2872483d953aSJohn Baldwin case STRUCT_VLAPIC: 2873483d953aSJohn Baldwin ret = vlapic_snapshot(vm, meta); 2874483d953aSJohn Baldwin break; 2875483d953aSJohn Baldwin case STRUCT_VHPET: 2876483d953aSJohn Baldwin ret = vhpet_snapshot(vm_hpet(vm), meta); 2877483d953aSJohn Baldwin break; 2878483d953aSJohn Baldwin case STRUCT_VATPIC: 2879483d953aSJohn Baldwin ret = vatpic_snapshot(vm_atpic(vm), meta); 2880483d953aSJohn Baldwin break; 2881483d953aSJohn Baldwin case STRUCT_VATPIT: 2882483d953aSJohn Baldwin ret = vatpit_snapshot(vm_atpit(vm), meta); 2883483d953aSJohn Baldwin break; 2884483d953aSJohn Baldwin case STRUCT_VPMTMR: 2885483d953aSJohn Baldwin ret = vpmtmr_snapshot(vm_pmtmr(vm), meta); 2886483d953aSJohn Baldwin break; 2887483d953aSJohn Baldwin case STRUCT_VRTC: 2888483d953aSJohn Baldwin ret = vrtc_snapshot(vm_rtc(vm), meta); 2889483d953aSJohn Baldwin break; 2890483d953aSJohn Baldwin default: 2891483d953aSJohn Baldwin printf("%s: failed to find the requested type %#x\n", 2892483d953aSJohn Baldwin __func__, meta->dev_req); 2893483d953aSJohn Baldwin ret = (EINVAL); 2894483d953aSJohn Baldwin } 2895483d953aSJohn Baldwin return (ret); 2896483d953aSJohn Baldwin } 2897483d953aSJohn Baldwin 289880cb5d84SJohn Baldwin void 289980cb5d84SJohn Baldwin vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset) 2900483d953aSJohn Baldwin { 2901483d953aSJohn Baldwin vcpu->tsc_offset = offset; 2902483d953aSJohn Baldwin } 2903483d953aSJohn Baldwin 2904483d953aSJohn Baldwin int 2905483d953aSJohn Baldwin vm_restore_time(struct vm *vm) 2906483d953aSJohn Baldwin { 290735abc6c2SJohn Baldwin int error; 2908483d953aSJohn Baldwin uint64_t now; 2909483d953aSJohn Baldwin struct vcpu *vcpu; 291035abc6c2SJohn Baldwin uint16_t i, maxcpus; 2911483d953aSJohn Baldwin 2912483d953aSJohn Baldwin now = rdtsc(); 2913483d953aSJohn Baldwin 2914483d953aSJohn Baldwin error = vhpet_restore_time(vm_hpet(vm)); 2915483d953aSJohn Baldwin if (error) 2916483d953aSJohn Baldwin return (error); 2917483d953aSJohn Baldwin 291835abc6c2SJohn Baldwin maxcpus = vm_get_maxcpus(vm); 291935abc6c2SJohn Baldwin for (i = 0; i < maxcpus; i++) { 2920483d953aSJohn Baldwin vcpu = &vm->vcpu[i]; 2921483d953aSJohn Baldwin 2922869c8d19SJohn Baldwin error = vmmops_restore_tsc(vcpu->cookie, 29231aa51504SJohn Baldwin vcpu->tsc_offset - now); 2924483d953aSJohn Baldwin if (error) 2925483d953aSJohn Baldwin return (error); 2926483d953aSJohn Baldwin } 2927483d953aSJohn Baldwin 2928483d953aSJohn Baldwin return (0); 2929483d953aSJohn Baldwin } 2930483d953aSJohn Baldwin #endif 2931