xref: /freebsd/sys/amd64/vmm/vmm.c (revision 51f45d01467047fed6e9292d272ac27f7f9c1d23)
1366f6083SPeter Grehan /*-
2366f6083SPeter Grehan  * Copyright (c) 2011 NetApp, Inc.
3366f6083SPeter Grehan  * All rights reserved.
4366f6083SPeter Grehan  *
5366f6083SPeter Grehan  * Redistribution and use in source and binary forms, with or without
6366f6083SPeter Grehan  * modification, are permitted provided that the following conditions
7366f6083SPeter Grehan  * are met:
8366f6083SPeter Grehan  * 1. Redistributions of source code must retain the above copyright
9366f6083SPeter Grehan  *    notice, this list of conditions and the following disclaimer.
10366f6083SPeter Grehan  * 2. Redistributions in binary form must reproduce the above copyright
11366f6083SPeter Grehan  *    notice, this list of conditions and the following disclaimer in the
12366f6083SPeter Grehan  *    documentation and/or other materials provided with the distribution.
13366f6083SPeter Grehan  *
14366f6083SPeter Grehan  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15366f6083SPeter Grehan  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16366f6083SPeter Grehan  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17366f6083SPeter Grehan  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18366f6083SPeter Grehan  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19366f6083SPeter Grehan  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20366f6083SPeter Grehan  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21366f6083SPeter Grehan  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22366f6083SPeter Grehan  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23366f6083SPeter Grehan  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24366f6083SPeter Grehan  * SUCH DAMAGE.
25366f6083SPeter Grehan  *
26366f6083SPeter Grehan  * $FreeBSD$
27366f6083SPeter Grehan  */
28366f6083SPeter Grehan 
29366f6083SPeter Grehan #include <sys/cdefs.h>
30366f6083SPeter Grehan __FBSDID("$FreeBSD$");
31366f6083SPeter Grehan 
32366f6083SPeter Grehan #include <sys/param.h>
3338f1b189SPeter Grehan #include <sys/systm.h>
34366f6083SPeter Grehan #include <sys/kernel.h>
35366f6083SPeter Grehan #include <sys/module.h>
36366f6083SPeter Grehan #include <sys/sysctl.h>
37366f6083SPeter Grehan #include <sys/malloc.h>
38366f6083SPeter Grehan #include <sys/pcpu.h>
39366f6083SPeter Grehan #include <sys/lock.h>
40366f6083SPeter Grehan #include <sys/mutex.h>
41366f6083SPeter Grehan #include <sys/proc.h>
42318224bbSNeel Natu #include <sys/rwlock.h>
43366f6083SPeter Grehan #include <sys/sched.h>
44366f6083SPeter Grehan #include <sys/smp.h>
45366f6083SPeter Grehan #include <sys/systm.h>
46366f6083SPeter Grehan 
47366f6083SPeter Grehan #include <vm/vm.h>
48318224bbSNeel Natu #include <vm/vm_object.h>
49318224bbSNeel Natu #include <vm/vm_page.h>
50318224bbSNeel Natu #include <vm/pmap.h>
51318224bbSNeel Natu #include <vm/vm_map.h>
52318224bbSNeel Natu #include <vm/vm_extern.h>
53318224bbSNeel Natu #include <vm/vm_param.h>
54366f6083SPeter Grehan 
5563e62d39SJohn Baldwin #include <machine/cpu.h>
56366f6083SPeter Grehan #include <machine/vm.h>
57366f6083SPeter Grehan #include <machine/pcb.h>
5875dd3366SNeel Natu #include <machine/smp.h>
591c052192SNeel Natu #include <x86/psl.h>
6034a6b2d6SJohn Baldwin #include <x86/apicreg.h>
61318224bbSNeel Natu #include <machine/vmparam.h>
62366f6083SPeter Grehan 
63366f6083SPeter Grehan #include <machine/vmm.h>
64565bbb86SNeel Natu #include <machine/vmm_dev.h>
65565bbb86SNeel Natu 
66318224bbSNeel Natu #include "vmm_ktr.h"
67b01c2033SNeel Natu #include "vmm_host.h"
68366f6083SPeter Grehan #include "vmm_mem.h"
69366f6083SPeter Grehan #include "vmm_util.h"
7008e3ff32SNeel Natu #include "vhpet.h"
71565bbb86SNeel Natu #include "vioapic.h"
72366f6083SPeter Grehan #include "vlapic.h"
73366f6083SPeter Grehan #include "vmm_msr.h"
74366f6083SPeter Grehan #include "vmm_ipi.h"
75366f6083SPeter Grehan #include "vmm_stat.h"
76f76fc5d4SNeel Natu #include "vmm_lapic.h"
77366f6083SPeter Grehan 
78366f6083SPeter Grehan #include "io/ppt.h"
79366f6083SPeter Grehan #include "io/iommu.h"
80366f6083SPeter Grehan 
81366f6083SPeter Grehan struct vlapic;
82366f6083SPeter Grehan 
83366f6083SPeter Grehan struct vcpu {
84366f6083SPeter Grehan 	int		flags;
8575dd3366SNeel Natu 	enum vcpu_state	state;
8675dd3366SNeel Natu 	struct mtx	mtx;
87366f6083SPeter Grehan 	int		hostcpu;	/* host cpuid this vcpu last ran on */
88366f6083SPeter Grehan 	uint64_t	guest_msrs[VMM_MSR_NUM];
89366f6083SPeter Grehan 	struct vlapic	*vlapic;
90366f6083SPeter Grehan 	int		 vcpuid;
9138f1b189SPeter Grehan 	struct savefpu	*guestfpu;	/* guest fpu state */
92366f6083SPeter Grehan 	void		*stats;
9398ed632cSNeel Natu 	struct vm_exit	exitinfo;
94e9027382SNeel Natu 	enum x2apic_state x2apic_state;
95f352ff0cSNeel Natu 	int		nmi_pending;
96366f6083SPeter Grehan };
97366f6083SPeter Grehan 
98f76fc5d4SNeel Natu #define	vcpu_lock_init(v)	mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
99f76fc5d4SNeel Natu #define	vcpu_lock(v)		mtx_lock_spin(&((v)->mtx))
100f76fc5d4SNeel Natu #define	vcpu_unlock(v)		mtx_unlock_spin(&((v)->mtx))
101318224bbSNeel Natu #define	vcpu_assert_locked(v)	mtx_assert(&((v)->mtx), MA_OWNED)
10275dd3366SNeel Natu 
103318224bbSNeel Natu struct mem_seg {
104318224bbSNeel Natu 	vm_paddr_t	gpa;
105318224bbSNeel Natu 	size_t		len;
106318224bbSNeel Natu 	boolean_t	wired;
107318224bbSNeel Natu 	vm_object_t	object;
108318224bbSNeel Natu };
109366f6083SPeter Grehan #define	VM_MAX_MEMORY_SEGMENTS	2
110366f6083SPeter Grehan 
111366f6083SPeter Grehan struct vm {
112366f6083SPeter Grehan 	void		*cookie;	/* processor-specific data */
113366f6083SPeter Grehan 	void		*iommu;		/* iommu-specific data */
11408e3ff32SNeel Natu 	struct vhpet	*vhpet;		/* virtual HPET */
115565bbb86SNeel Natu 	struct vioapic	*vioapic;	/* virtual ioapic */
116318224bbSNeel Natu 	struct vmspace	*vmspace;	/* guest's address space */
117366f6083SPeter Grehan 	struct vcpu	vcpu[VM_MAXCPU];
118366f6083SPeter Grehan 	int		num_mem_segs;
119318224bbSNeel Natu 	struct mem_seg	mem_segs[VM_MAX_MEMORY_SEGMENTS];
120366f6083SPeter Grehan 	char		name[VM_MAX_NAMELEN];
121366f6083SPeter Grehan 
122366f6083SPeter Grehan 	/*
123a5615c90SPeter Grehan 	 * Set of active vcpus.
124366f6083SPeter Grehan 	 * An active vcpu is one that has been started implicitly (BSP) or
125366f6083SPeter Grehan 	 * explicitly (AP) by sending it a startup ipi.
126366f6083SPeter Grehan 	 */
127a5615c90SPeter Grehan 	cpuset_t	active_cpus;
1285b8a8cd1SNeel Natu 
1295b8a8cd1SNeel Natu 	struct mtx	rendezvous_mtx;
1305b8a8cd1SNeel Natu 	cpuset_t	rendezvous_req_cpus;
1315b8a8cd1SNeel Natu 	cpuset_t	rendezvous_done_cpus;
1325b8a8cd1SNeel Natu 	void		*rendezvous_arg;
1335b8a8cd1SNeel Natu 	vm_rendezvous_func_t rendezvous_func;
134366f6083SPeter Grehan };
135366f6083SPeter Grehan 
136d5408b1dSNeel Natu static int vmm_initialized;
137d5408b1dSNeel Natu 
138366f6083SPeter Grehan static struct vmm_ops *ops;
139add611fdSNeel Natu #define	VMM_INIT(num)	(ops != NULL ? (*ops->init)(num) : 0)
140366f6083SPeter Grehan #define	VMM_CLEANUP()	(ops != NULL ? (*ops->cleanup)() : 0)
14163e62d39SJohn Baldwin #define	VMM_RESUME()	(ops != NULL ? (*ops->resume)() : 0)
142366f6083SPeter Grehan 
143318224bbSNeel Natu #define	VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
1445b8a8cd1SNeel Natu #define	VMRUN(vmi, vcpu, rip, pmap, rptr) \
1455b8a8cd1SNeel Natu 	(ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, rptr) : ENXIO)
146366f6083SPeter Grehan #define	VMCLEANUP(vmi)	(ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
147318224bbSNeel Natu #define	VMSPACE_ALLOC(min, max) \
148318224bbSNeel Natu 	(ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
149318224bbSNeel Natu #define	VMSPACE_FREE(vmspace) \
150318224bbSNeel Natu 	(ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
151366f6083SPeter Grehan #define	VMGETREG(vmi, vcpu, num, retval)		\
152366f6083SPeter Grehan 	(ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
153366f6083SPeter Grehan #define	VMSETREG(vmi, vcpu, num, val)		\
154366f6083SPeter Grehan 	(ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
155366f6083SPeter Grehan #define	VMGETDESC(vmi, vcpu, num, desc)		\
156366f6083SPeter Grehan 	(ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
157366f6083SPeter Grehan #define	VMSETDESC(vmi, vcpu, num, desc)		\
158366f6083SPeter Grehan 	(ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
159366f6083SPeter Grehan #define	VMINJECT(vmi, vcpu, type, vec, ec, ecv)	\
160366f6083SPeter Grehan 	(ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO)
161366f6083SPeter Grehan #define	VMGETCAP(vmi, vcpu, num, retval)	\
162366f6083SPeter Grehan 	(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
163366f6083SPeter Grehan #define	VMSETCAP(vmi, vcpu, num, val)		\
164366f6083SPeter Grehan 	(ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
165de5ea6b6SNeel Natu #define	VLAPIC_INIT(vmi, vcpu)			\
166de5ea6b6SNeel Natu 	(ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL)
167de5ea6b6SNeel Natu #define	VLAPIC_CLEANUP(vmi, vlapic)		\
168de5ea6b6SNeel Natu 	(ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL)
169366f6083SPeter Grehan 
170014a52f3SNeel Natu #define	fpu_start_emulating()	load_cr0(rcr0() | CR0_TS)
171014a52f3SNeel Natu #define	fpu_stop_emulating()	clts()
172366f6083SPeter Grehan 
173366f6083SPeter Grehan static MALLOC_DEFINE(M_VM, "vm", "vm");
174366f6083SPeter Grehan CTASSERT(VMM_MSR_NUM <= 64);	/* msr_mask can keep track of up to 64 msrs */
175366f6083SPeter Grehan 
176366f6083SPeter Grehan /* statistics */
17761592433SNeel Natu static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
178366f6083SPeter Grehan 
179add611fdSNeel Natu SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
180add611fdSNeel Natu 
181add611fdSNeel Natu static int vmm_ipinum;
182add611fdSNeel Natu SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
183add611fdSNeel Natu     "IPI vector used for vcpu notifications");
184add611fdSNeel Natu 
1855b8a8cd1SNeel Natu static void vm_deactivate_cpu(struct vm *vm, int vcpuid);
1865b8a8cd1SNeel Natu 
187366f6083SPeter Grehan static void
188de5ea6b6SNeel Natu vcpu_cleanup(struct vm *vm, int i)
189366f6083SPeter Grehan {
190de5ea6b6SNeel Natu 	struct vcpu *vcpu = &vm->vcpu[i];
191de5ea6b6SNeel Natu 
192de5ea6b6SNeel Natu 	VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
193366f6083SPeter Grehan 	vmm_stat_free(vcpu->stats);
19438f1b189SPeter Grehan 	fpu_save_area_free(vcpu->guestfpu);
195366f6083SPeter Grehan }
196366f6083SPeter Grehan 
197366f6083SPeter Grehan static void
198366f6083SPeter Grehan vcpu_init(struct vm *vm, uint32_t vcpu_id)
199366f6083SPeter Grehan {
200366f6083SPeter Grehan 	struct vcpu *vcpu;
201366f6083SPeter Grehan 
202366f6083SPeter Grehan 	vcpu = &vm->vcpu[vcpu_id];
203366f6083SPeter Grehan 
20475dd3366SNeel Natu 	vcpu_lock_init(vcpu);
20575dd3366SNeel Natu 	vcpu->hostcpu = NOCPU;
206366f6083SPeter Grehan 	vcpu->vcpuid = vcpu_id;
207de5ea6b6SNeel Natu 	vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
20873820fb0SNeel Natu 	vm_set_x2apic_state(vm, vcpu_id, X2APIC_ENABLED);
20938f1b189SPeter Grehan 	vcpu->guestfpu = fpu_save_area_alloc();
21038f1b189SPeter Grehan 	fpu_save_area_reset(vcpu->guestfpu);
211366f6083SPeter Grehan 	vcpu->stats = vmm_stat_alloc();
212366f6083SPeter Grehan }
213366f6083SPeter Grehan 
21498ed632cSNeel Natu struct vm_exit *
21598ed632cSNeel Natu vm_exitinfo(struct vm *vm, int cpuid)
21698ed632cSNeel Natu {
21798ed632cSNeel Natu 	struct vcpu *vcpu;
21898ed632cSNeel Natu 
21998ed632cSNeel Natu 	if (cpuid < 0 || cpuid >= VM_MAXCPU)
22098ed632cSNeel Natu 		panic("vm_exitinfo: invalid cpuid %d", cpuid);
22198ed632cSNeel Natu 
22298ed632cSNeel Natu 	vcpu = &vm->vcpu[cpuid];
22398ed632cSNeel Natu 
22498ed632cSNeel Natu 	return (&vcpu->exitinfo);
22598ed632cSNeel Natu }
22698ed632cSNeel Natu 
22763e62d39SJohn Baldwin static void
22863e62d39SJohn Baldwin vmm_resume(void)
22963e62d39SJohn Baldwin {
23063e62d39SJohn Baldwin 	VMM_RESUME();
23163e62d39SJohn Baldwin }
23263e62d39SJohn Baldwin 
233366f6083SPeter Grehan static int
234366f6083SPeter Grehan vmm_init(void)
235366f6083SPeter Grehan {
236366f6083SPeter Grehan 	int error;
237366f6083SPeter Grehan 
238b01c2033SNeel Natu 	vmm_host_state_init();
239add611fdSNeel Natu 
240add611fdSNeel Natu 	vmm_ipinum = vmm_ipi_alloc();
241add611fdSNeel Natu 	if (vmm_ipinum == 0)
242add611fdSNeel Natu 		vmm_ipinum = IPI_AST;
243366f6083SPeter Grehan 
244366f6083SPeter Grehan 	error = vmm_mem_init();
245366f6083SPeter Grehan 	if (error)
246366f6083SPeter Grehan 		return (error);
247366f6083SPeter Grehan 
248366f6083SPeter Grehan 	if (vmm_is_intel())
249366f6083SPeter Grehan 		ops = &vmm_ops_intel;
250366f6083SPeter Grehan 	else if (vmm_is_amd())
251366f6083SPeter Grehan 		ops = &vmm_ops_amd;
252366f6083SPeter Grehan 	else
253366f6083SPeter Grehan 		return (ENXIO);
254366f6083SPeter Grehan 
255366f6083SPeter Grehan 	vmm_msr_init();
25663e62d39SJohn Baldwin 	vmm_resume_p = vmm_resume;
257366f6083SPeter Grehan 
258add611fdSNeel Natu 	return (VMM_INIT(vmm_ipinum));
259366f6083SPeter Grehan }
260366f6083SPeter Grehan 
261366f6083SPeter Grehan static int
262366f6083SPeter Grehan vmm_handler(module_t mod, int what, void *arg)
263366f6083SPeter Grehan {
264366f6083SPeter Grehan 	int error;
265366f6083SPeter Grehan 
266366f6083SPeter Grehan 	switch (what) {
267366f6083SPeter Grehan 	case MOD_LOAD:
268366f6083SPeter Grehan 		vmmdev_init();
269*51f45d01SNeel Natu 		if (ppt_avail_devices() > 0)
270366f6083SPeter Grehan 			iommu_init();
271366f6083SPeter Grehan 		error = vmm_init();
272d5408b1dSNeel Natu 		if (error == 0)
273d5408b1dSNeel Natu 			vmm_initialized = 1;
274366f6083SPeter Grehan 		break;
275366f6083SPeter Grehan 	case MOD_UNLOAD:
276cdc5b9e7SNeel Natu 		error = vmmdev_cleanup();
277cdc5b9e7SNeel Natu 		if (error == 0) {
27863e62d39SJohn Baldwin 			vmm_resume_p = NULL;
279366f6083SPeter Grehan 			iommu_cleanup();
280add611fdSNeel Natu 			if (vmm_ipinum != IPI_AST)
281add611fdSNeel Natu 				vmm_ipi_free(vmm_ipinum);
282366f6083SPeter Grehan 			error = VMM_CLEANUP();
28381ef6611SPeter Grehan 			/*
28481ef6611SPeter Grehan 			 * Something bad happened - prevent new
28581ef6611SPeter Grehan 			 * VMs from being created
28681ef6611SPeter Grehan 			 */
28781ef6611SPeter Grehan 			if (error)
288d5408b1dSNeel Natu 				vmm_initialized = 0;
28981ef6611SPeter Grehan 		}
290366f6083SPeter Grehan 		break;
291366f6083SPeter Grehan 	default:
292366f6083SPeter Grehan 		error = 0;
293366f6083SPeter Grehan 		break;
294366f6083SPeter Grehan 	}
295366f6083SPeter Grehan 	return (error);
296366f6083SPeter Grehan }
297366f6083SPeter Grehan 
298366f6083SPeter Grehan static moduledata_t vmm_kmod = {
299366f6083SPeter Grehan 	"vmm",
300366f6083SPeter Grehan 	vmm_handler,
301366f6083SPeter Grehan 	NULL
302366f6083SPeter Grehan };
303366f6083SPeter Grehan 
304366f6083SPeter Grehan /*
305e3f0800bSNeel Natu  * vmm initialization has the following dependencies:
306e3f0800bSNeel Natu  *
307e3f0800bSNeel Natu  * - iommu initialization must happen after the pci passthru driver has had
308e3f0800bSNeel Natu  *   a chance to attach to any passthru devices (after SI_SUB_CONFIGURE).
309e3f0800bSNeel Natu  *
310e3f0800bSNeel Natu  * - VT-x initialization requires smp_rendezvous() and therefore must happen
311e3f0800bSNeel Natu  *   after SMP is fully functional (after SI_SUB_SMP).
312366f6083SPeter Grehan  */
313e3f0800bSNeel Natu DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
314366f6083SPeter Grehan MODULE_VERSION(vmm, 1);
315366f6083SPeter Grehan 
316d5408b1dSNeel Natu int
317d5408b1dSNeel Natu vm_create(const char *name, struct vm **retvm)
318366f6083SPeter Grehan {
319366f6083SPeter Grehan 	int i;
320366f6083SPeter Grehan 	struct vm *vm;
321318224bbSNeel Natu 	struct vmspace *vmspace;
322366f6083SPeter Grehan 
323366f6083SPeter Grehan 	const int BSP = 0;
324366f6083SPeter Grehan 
325d5408b1dSNeel Natu 	/*
326d5408b1dSNeel Natu 	 * If vmm.ko could not be successfully initialized then don't attempt
327d5408b1dSNeel Natu 	 * to create the virtual machine.
328d5408b1dSNeel Natu 	 */
329d5408b1dSNeel Natu 	if (!vmm_initialized)
330d5408b1dSNeel Natu 		return (ENXIO);
331d5408b1dSNeel Natu 
332366f6083SPeter Grehan 	if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
333d5408b1dSNeel Natu 		return (EINVAL);
334366f6083SPeter Grehan 
335318224bbSNeel Natu 	vmspace = VMSPACE_ALLOC(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
336318224bbSNeel Natu 	if (vmspace == NULL)
337318224bbSNeel Natu 		return (ENOMEM);
338318224bbSNeel Natu 
339366f6083SPeter Grehan 	vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
340366f6083SPeter Grehan 	strcpy(vm->name, name);
34188c4b8d1SNeel Natu 	vm->vmspace = vmspace;
3425b8a8cd1SNeel Natu 	mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
343318224bbSNeel Natu 	vm->cookie = VMINIT(vm, vmspace_pmap(vmspace));
344565bbb86SNeel Natu 	vm->vioapic = vioapic_init(vm);
34508e3ff32SNeel Natu 	vm->vhpet = vhpet_init(vm);
346366f6083SPeter Grehan 
347366f6083SPeter Grehan 	for (i = 0; i < VM_MAXCPU; i++) {
348366f6083SPeter Grehan 		vcpu_init(vm, i);
349366f6083SPeter Grehan 		guest_msrs_init(vm, i);
350366f6083SPeter Grehan 	}
351366f6083SPeter Grehan 
352366f6083SPeter Grehan 	vm_activate_cpu(vm, BSP);
353366f6083SPeter Grehan 
354d5408b1dSNeel Natu 	*retvm = vm;
355d5408b1dSNeel Natu 	return (0);
356366f6083SPeter Grehan }
357366f6083SPeter Grehan 
358f7d51510SNeel Natu static void
359318224bbSNeel Natu vm_free_mem_seg(struct vm *vm, struct mem_seg *seg)
360f7d51510SNeel Natu {
3617ce04d0aSNeel Natu 
362318224bbSNeel Natu 	if (seg->object != NULL)
363318224bbSNeel Natu 		vmm_mem_free(vm->vmspace, seg->gpa, seg->len);
364f7d51510SNeel Natu 
365318224bbSNeel Natu 	bzero(seg, sizeof(*seg));
366f7d51510SNeel Natu }
367f7d51510SNeel Natu 
368366f6083SPeter Grehan void
369366f6083SPeter Grehan vm_destroy(struct vm *vm)
370366f6083SPeter Grehan {
371366f6083SPeter Grehan 	int i;
372366f6083SPeter Grehan 
373366f6083SPeter Grehan 	ppt_unassign_all(vm);
374366f6083SPeter Grehan 
375318224bbSNeel Natu 	if (vm->iommu != NULL)
376318224bbSNeel Natu 		iommu_destroy_domain(vm->iommu);
377318224bbSNeel Natu 
37808e3ff32SNeel Natu 	vhpet_cleanup(vm->vhpet);
37908e3ff32SNeel Natu 	vioapic_cleanup(vm->vioapic);
38008e3ff32SNeel Natu 
381366f6083SPeter Grehan 	for (i = 0; i < vm->num_mem_segs; i++)
382f7d51510SNeel Natu 		vm_free_mem_seg(vm, &vm->mem_segs[i]);
383f7d51510SNeel Natu 
384f7d51510SNeel Natu 	vm->num_mem_segs = 0;
385366f6083SPeter Grehan 
386366f6083SPeter Grehan 	for (i = 0; i < VM_MAXCPU; i++)
387de5ea6b6SNeel Natu 		vcpu_cleanup(vm, i);
388366f6083SPeter Grehan 
389318224bbSNeel Natu 	VMSPACE_FREE(vm->vmspace);
390366f6083SPeter Grehan 
391366f6083SPeter Grehan 	VMCLEANUP(vm->cookie);
392366f6083SPeter Grehan 
393366f6083SPeter Grehan 	free(vm, M_VM);
394366f6083SPeter Grehan }
395366f6083SPeter Grehan 
396366f6083SPeter Grehan const char *
397366f6083SPeter Grehan vm_name(struct vm *vm)
398366f6083SPeter Grehan {
399366f6083SPeter Grehan 	return (vm->name);
400366f6083SPeter Grehan }
401366f6083SPeter Grehan 
402366f6083SPeter Grehan int
403366f6083SPeter Grehan vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
404366f6083SPeter Grehan {
405318224bbSNeel Natu 	vm_object_t obj;
406366f6083SPeter Grehan 
407318224bbSNeel Natu 	if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
408318224bbSNeel Natu 		return (ENOMEM);
409318224bbSNeel Natu 	else
410318224bbSNeel Natu 		return (0);
411366f6083SPeter Grehan }
412366f6083SPeter Grehan 
413366f6083SPeter Grehan int
414366f6083SPeter Grehan vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
415366f6083SPeter Grehan {
416366f6083SPeter Grehan 
417318224bbSNeel Natu 	vmm_mmio_free(vm->vmspace, gpa, len);
418318224bbSNeel Natu 	return (0);
419366f6083SPeter Grehan }
420366f6083SPeter Grehan 
421318224bbSNeel Natu boolean_t
422318224bbSNeel Natu vm_mem_allocated(struct vm *vm, vm_paddr_t gpa)
423366f6083SPeter Grehan {
424341f19c9SNeel Natu 	int i;
425341f19c9SNeel Natu 	vm_paddr_t gpabase, gpalimit;
426341f19c9SNeel Natu 
427341f19c9SNeel Natu 	for (i = 0; i < vm->num_mem_segs; i++) {
428341f19c9SNeel Natu 		gpabase = vm->mem_segs[i].gpa;
429341f19c9SNeel Natu 		gpalimit = gpabase + vm->mem_segs[i].len;
430341f19c9SNeel Natu 		if (gpa >= gpabase && gpa < gpalimit)
431318224bbSNeel Natu 			return (TRUE);		/* 'gpa' is regular memory */
432341f19c9SNeel Natu 	}
433341f19c9SNeel Natu 
434318224bbSNeel Natu 	if (ppt_is_mmio(vm, gpa))
435318224bbSNeel Natu 		return (TRUE);			/* 'gpa' is pci passthru mmio */
436318224bbSNeel Natu 
437318224bbSNeel Natu 	return (FALSE);
438341f19c9SNeel Natu }
439341f19c9SNeel Natu 
440341f19c9SNeel Natu int
441341f19c9SNeel Natu vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
442341f19c9SNeel Natu {
443318224bbSNeel Natu 	int available, allocated;
444318224bbSNeel Natu 	struct mem_seg *seg;
445318224bbSNeel Natu 	vm_object_t object;
446318224bbSNeel Natu 	vm_paddr_t g;
447366f6083SPeter Grehan 
448341f19c9SNeel Natu 	if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0)
449341f19c9SNeel Natu 		return (EINVAL);
450341f19c9SNeel Natu 
451341f19c9SNeel Natu 	available = allocated = 0;
452341f19c9SNeel Natu 	g = gpa;
453341f19c9SNeel Natu 	while (g < gpa + len) {
454318224bbSNeel Natu 		if (vm_mem_allocated(vm, g))
455341f19c9SNeel Natu 			allocated++;
456318224bbSNeel Natu 		else
457318224bbSNeel Natu 			available++;
458341f19c9SNeel Natu 
459341f19c9SNeel Natu 		g += PAGE_SIZE;
460341f19c9SNeel Natu 	}
461341f19c9SNeel Natu 
462366f6083SPeter Grehan 	/*
463341f19c9SNeel Natu 	 * If there are some allocated and some available pages in the address
464341f19c9SNeel Natu 	 * range then it is an error.
465366f6083SPeter Grehan 	 */
466341f19c9SNeel Natu 	if (allocated && available)
467341f19c9SNeel Natu 		return (EINVAL);
468341f19c9SNeel Natu 
469341f19c9SNeel Natu 	/*
470341f19c9SNeel Natu 	 * If the entire address range being requested has already been
471341f19c9SNeel Natu 	 * allocated then there isn't anything more to do.
472341f19c9SNeel Natu 	 */
473341f19c9SNeel Natu 	if (allocated && available == 0)
474341f19c9SNeel Natu 		return (0);
475366f6083SPeter Grehan 
476366f6083SPeter Grehan 	if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
477366f6083SPeter Grehan 		return (E2BIG);
478366f6083SPeter Grehan 
479f7d51510SNeel Natu 	seg = &vm->mem_segs[vm->num_mem_segs];
480366f6083SPeter Grehan 
481318224bbSNeel Natu 	if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL)
482318224bbSNeel Natu 		return (ENOMEM);
483318224bbSNeel Natu 
484f7d51510SNeel Natu 	seg->gpa = gpa;
485318224bbSNeel Natu 	seg->len = len;
486318224bbSNeel Natu 	seg->object = object;
487318224bbSNeel Natu 	seg->wired = FALSE;
4887ce04d0aSNeel Natu 
489366f6083SPeter Grehan 	vm->num_mem_segs++;
490341f19c9SNeel Natu 
491366f6083SPeter Grehan 	return (0);
492366f6083SPeter Grehan }
493366f6083SPeter Grehan 
494318224bbSNeel Natu static void
495318224bbSNeel Natu vm_gpa_unwire(struct vm *vm)
496366f6083SPeter Grehan {
497318224bbSNeel Natu 	int i, rv;
498318224bbSNeel Natu 	struct mem_seg *seg;
4994db4fb2cSNeel Natu 
500318224bbSNeel Natu 	for (i = 0; i < vm->num_mem_segs; i++) {
501318224bbSNeel Natu 		seg = &vm->mem_segs[i];
502318224bbSNeel Natu 		if (!seg->wired)
503318224bbSNeel Natu 			continue;
504366f6083SPeter Grehan 
505318224bbSNeel Natu 		rv = vm_map_unwire(&vm->vmspace->vm_map,
506318224bbSNeel Natu 				   seg->gpa, seg->gpa + seg->len,
507318224bbSNeel Natu 				   VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
508318224bbSNeel Natu 		KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment "
509318224bbSNeel Natu 		    "%#lx/%ld could not be unwired: %d",
510318224bbSNeel Natu 		    vm_name(vm), seg->gpa, seg->len, rv));
511318224bbSNeel Natu 
512318224bbSNeel Natu 		seg->wired = FALSE;
513318224bbSNeel Natu 	}
514318224bbSNeel Natu }
515318224bbSNeel Natu 
516318224bbSNeel Natu static int
517318224bbSNeel Natu vm_gpa_wire(struct vm *vm)
518318224bbSNeel Natu {
519318224bbSNeel Natu 	int i, rv;
520318224bbSNeel Natu 	struct mem_seg *seg;
521318224bbSNeel Natu 
522318224bbSNeel Natu 	for (i = 0; i < vm->num_mem_segs; i++) {
523318224bbSNeel Natu 		seg = &vm->mem_segs[i];
524318224bbSNeel Natu 		if (seg->wired)
525318224bbSNeel Natu 			continue;
526318224bbSNeel Natu 
527318224bbSNeel Natu 		/* XXX rlimits? */
528318224bbSNeel Natu 		rv = vm_map_wire(&vm->vmspace->vm_map,
529318224bbSNeel Natu 				 seg->gpa, seg->gpa + seg->len,
530318224bbSNeel Natu 				 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
531318224bbSNeel Natu 		if (rv != KERN_SUCCESS)
532318224bbSNeel Natu 			break;
533318224bbSNeel Natu 
534318224bbSNeel Natu 		seg->wired = TRUE;
535318224bbSNeel Natu 	}
536318224bbSNeel Natu 
537318224bbSNeel Natu 	if (i < vm->num_mem_segs) {
538318224bbSNeel Natu 		/*
539318224bbSNeel Natu 		 * Undo the wiring before returning an error.
540318224bbSNeel Natu 		 */
541318224bbSNeel Natu 		vm_gpa_unwire(vm);
542318224bbSNeel Natu 		return (EAGAIN);
543318224bbSNeel Natu 	}
544318224bbSNeel Natu 
545318224bbSNeel Natu 	return (0);
546318224bbSNeel Natu }
547318224bbSNeel Natu 
548318224bbSNeel Natu static void
549318224bbSNeel Natu vm_iommu_modify(struct vm *vm, boolean_t map)
550318224bbSNeel Natu {
551318224bbSNeel Natu 	int i, sz;
552318224bbSNeel Natu 	vm_paddr_t gpa, hpa;
553318224bbSNeel Natu 	struct mem_seg *seg;
554318224bbSNeel Natu 	void *vp, *cookie, *host_domain;
555318224bbSNeel Natu 
556318224bbSNeel Natu 	sz = PAGE_SIZE;
557318224bbSNeel Natu 	host_domain = iommu_host_domain();
558318224bbSNeel Natu 
559318224bbSNeel Natu 	for (i = 0; i < vm->num_mem_segs; i++) {
560318224bbSNeel Natu 		seg = &vm->mem_segs[i];
561318224bbSNeel Natu 		KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired",
562318224bbSNeel Natu 		    vm_name(vm), seg->gpa, seg->len));
563318224bbSNeel Natu 
564318224bbSNeel Natu 		gpa = seg->gpa;
565318224bbSNeel Natu 		while (gpa < seg->gpa + seg->len) {
566318224bbSNeel Natu 			vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE,
567318224bbSNeel Natu 					 &cookie);
568318224bbSNeel Natu 			KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
569318224bbSNeel Natu 			    vm_name(vm), gpa));
570318224bbSNeel Natu 
571318224bbSNeel Natu 			vm_gpa_release(cookie);
572318224bbSNeel Natu 
573318224bbSNeel Natu 			hpa = DMAP_TO_PHYS((uintptr_t)vp);
574318224bbSNeel Natu 			if (map) {
575318224bbSNeel Natu 				iommu_create_mapping(vm->iommu, gpa, hpa, sz);
576318224bbSNeel Natu 				iommu_remove_mapping(host_domain, hpa, sz);
577318224bbSNeel Natu 			} else {
578318224bbSNeel Natu 				iommu_remove_mapping(vm->iommu, gpa, sz);
579318224bbSNeel Natu 				iommu_create_mapping(host_domain, hpa, hpa, sz);
580318224bbSNeel Natu 			}
581318224bbSNeel Natu 
582318224bbSNeel Natu 			gpa += PAGE_SIZE;
583318224bbSNeel Natu 		}
584318224bbSNeel Natu 	}
585318224bbSNeel Natu 
586318224bbSNeel Natu 	/*
587318224bbSNeel Natu 	 * Invalidate the cached translations associated with the domain
588318224bbSNeel Natu 	 * from which pages were removed.
589318224bbSNeel Natu 	 */
590318224bbSNeel Natu 	if (map)
591318224bbSNeel Natu 		iommu_invalidate_tlb(host_domain);
592318224bbSNeel Natu 	else
593318224bbSNeel Natu 		iommu_invalidate_tlb(vm->iommu);
594318224bbSNeel Natu }
595318224bbSNeel Natu 
596318224bbSNeel Natu #define	vm_iommu_unmap(vm)	vm_iommu_modify((vm), FALSE)
597318224bbSNeel Natu #define	vm_iommu_map(vm)	vm_iommu_modify((vm), TRUE)
598318224bbSNeel Natu 
599318224bbSNeel Natu int
600318224bbSNeel Natu vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
601318224bbSNeel Natu {
602318224bbSNeel Natu 	int error;
603318224bbSNeel Natu 
604318224bbSNeel Natu 	error = ppt_unassign_device(vm, bus, slot, func);
605318224bbSNeel Natu 	if (error)
606318224bbSNeel Natu 		return (error);
607318224bbSNeel Natu 
608*51f45d01SNeel Natu 	if (ppt_assigned_devices(vm) == 0) {
609318224bbSNeel Natu 		vm_iommu_unmap(vm);
610318224bbSNeel Natu 		vm_gpa_unwire(vm);
611318224bbSNeel Natu 	}
612318224bbSNeel Natu 	return (0);
613318224bbSNeel Natu }
614318224bbSNeel Natu 
615318224bbSNeel Natu int
616318224bbSNeel Natu vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
617318224bbSNeel Natu {
618318224bbSNeel Natu 	int error;
619318224bbSNeel Natu 	vm_paddr_t maxaddr;
620318224bbSNeel Natu 
621318224bbSNeel Natu 	/*
622318224bbSNeel Natu 	 * Virtual machines with pci passthru devices get special treatment:
623318224bbSNeel Natu 	 * - the guest physical memory is wired
624318224bbSNeel Natu 	 * - the iommu is programmed to do the 'gpa' to 'hpa' translation
625318224bbSNeel Natu 	 *
626318224bbSNeel Natu 	 * We need to do this before the first pci passthru device is attached.
627318224bbSNeel Natu 	 */
628*51f45d01SNeel Natu 	if (ppt_assigned_devices(vm) == 0) {
629318224bbSNeel Natu 		KASSERT(vm->iommu == NULL,
630318224bbSNeel Natu 		    ("vm_assign_pptdev: iommu must be NULL"));
631318224bbSNeel Natu 		maxaddr = vmm_mem_maxaddr();
632318224bbSNeel Natu 		vm->iommu = iommu_create_domain(maxaddr);
633318224bbSNeel Natu 
634318224bbSNeel Natu 		error = vm_gpa_wire(vm);
635318224bbSNeel Natu 		if (error)
636318224bbSNeel Natu 			return (error);
637318224bbSNeel Natu 
638318224bbSNeel Natu 		vm_iommu_map(vm);
639318224bbSNeel Natu 	}
640318224bbSNeel Natu 
641318224bbSNeel Natu 	error = ppt_assign_device(vm, bus, slot, func);
642318224bbSNeel Natu 	return (error);
643318224bbSNeel Natu }
644318224bbSNeel Natu 
645318224bbSNeel Natu void *
646318224bbSNeel Natu vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
647318224bbSNeel Natu 	    void **cookie)
648318224bbSNeel Natu {
649318224bbSNeel Natu 	int count, pageoff;
650318224bbSNeel Natu 	vm_page_t m;
651318224bbSNeel Natu 
652318224bbSNeel Natu 	pageoff = gpa & PAGE_MASK;
653318224bbSNeel Natu 	if (len > PAGE_SIZE - pageoff)
654318224bbSNeel Natu 		panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
655318224bbSNeel Natu 
656318224bbSNeel Natu 	count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
657318224bbSNeel Natu 	    trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
658318224bbSNeel Natu 
659318224bbSNeel Natu 	if (count == 1) {
660318224bbSNeel Natu 		*cookie = m;
661318224bbSNeel Natu 		return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
662318224bbSNeel Natu 	} else {
663318224bbSNeel Natu 		*cookie = NULL;
664318224bbSNeel Natu 		return (NULL);
665318224bbSNeel Natu 	}
666318224bbSNeel Natu }
667318224bbSNeel Natu 
668318224bbSNeel Natu void
669318224bbSNeel Natu vm_gpa_release(void *cookie)
670318224bbSNeel Natu {
671318224bbSNeel Natu 	vm_page_t m = cookie;
672318224bbSNeel Natu 
673318224bbSNeel Natu 	vm_page_lock(m);
674318224bbSNeel Natu 	vm_page_unhold(m);
675318224bbSNeel Natu 	vm_page_unlock(m);
676366f6083SPeter Grehan }
677366f6083SPeter Grehan 
678366f6083SPeter Grehan int
679366f6083SPeter Grehan vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
680366f6083SPeter Grehan 		  struct vm_memory_segment *seg)
681366f6083SPeter Grehan {
682366f6083SPeter Grehan 	int i;
683366f6083SPeter Grehan 
684366f6083SPeter Grehan 	for (i = 0; i < vm->num_mem_segs; i++) {
685366f6083SPeter Grehan 		if (gpabase == vm->mem_segs[i].gpa) {
686318224bbSNeel Natu 			seg->gpa = vm->mem_segs[i].gpa;
687318224bbSNeel Natu 			seg->len = vm->mem_segs[i].len;
688318224bbSNeel Natu 			seg->wired = vm->mem_segs[i].wired;
689366f6083SPeter Grehan 			return (0);
690366f6083SPeter Grehan 		}
691366f6083SPeter Grehan 	}
692366f6083SPeter Grehan 	return (-1);
693366f6083SPeter Grehan }
694366f6083SPeter Grehan 
695366f6083SPeter Grehan int
696318224bbSNeel Natu vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len,
697318224bbSNeel Natu 	      vm_offset_t *offset, struct vm_object **object)
698318224bbSNeel Natu {
699318224bbSNeel Natu 	int i;
700318224bbSNeel Natu 	size_t seg_len;
701318224bbSNeel Natu 	vm_paddr_t seg_gpa;
702318224bbSNeel Natu 	vm_object_t seg_obj;
703318224bbSNeel Natu 
704318224bbSNeel Natu 	for (i = 0; i < vm->num_mem_segs; i++) {
705318224bbSNeel Natu 		if ((seg_obj = vm->mem_segs[i].object) == NULL)
706318224bbSNeel Natu 			continue;
707318224bbSNeel Natu 
708318224bbSNeel Natu 		seg_gpa = vm->mem_segs[i].gpa;
709318224bbSNeel Natu 		seg_len = vm->mem_segs[i].len;
710318224bbSNeel Natu 
711318224bbSNeel Natu 		if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) {
712318224bbSNeel Natu 			*offset = gpa - seg_gpa;
713318224bbSNeel Natu 			*object = seg_obj;
714318224bbSNeel Natu 			vm_object_reference(seg_obj);
715318224bbSNeel Natu 			return (0);
716318224bbSNeel Natu 		}
717318224bbSNeel Natu 	}
718318224bbSNeel Natu 
719318224bbSNeel Natu 	return (EINVAL);
720318224bbSNeel Natu }
721318224bbSNeel Natu 
722318224bbSNeel Natu int
723366f6083SPeter Grehan vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
724366f6083SPeter Grehan {
725366f6083SPeter Grehan 
726366f6083SPeter Grehan 	if (vcpu < 0 || vcpu >= VM_MAXCPU)
727366f6083SPeter Grehan 		return (EINVAL);
728366f6083SPeter Grehan 
729366f6083SPeter Grehan 	if (reg >= VM_REG_LAST)
730366f6083SPeter Grehan 		return (EINVAL);
731366f6083SPeter Grehan 
732366f6083SPeter Grehan 	return (VMGETREG(vm->cookie, vcpu, reg, retval));
733366f6083SPeter Grehan }
734366f6083SPeter Grehan 
735366f6083SPeter Grehan int
736366f6083SPeter Grehan vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val)
737366f6083SPeter Grehan {
738366f6083SPeter Grehan 
739366f6083SPeter Grehan 	if (vcpu < 0 || vcpu >= VM_MAXCPU)
740366f6083SPeter Grehan 		return (EINVAL);
741366f6083SPeter Grehan 
742366f6083SPeter Grehan 	if (reg >= VM_REG_LAST)
743366f6083SPeter Grehan 		return (EINVAL);
744366f6083SPeter Grehan 
745366f6083SPeter Grehan 	return (VMSETREG(vm->cookie, vcpu, reg, val));
746366f6083SPeter Grehan }
747366f6083SPeter Grehan 
748366f6083SPeter Grehan static boolean_t
749366f6083SPeter Grehan is_descriptor_table(int reg)
750366f6083SPeter Grehan {
751366f6083SPeter Grehan 
752366f6083SPeter Grehan 	switch (reg) {
753366f6083SPeter Grehan 	case VM_REG_GUEST_IDTR:
754366f6083SPeter Grehan 	case VM_REG_GUEST_GDTR:
755366f6083SPeter Grehan 		return (TRUE);
756366f6083SPeter Grehan 	default:
757366f6083SPeter Grehan 		return (FALSE);
758366f6083SPeter Grehan 	}
759366f6083SPeter Grehan }
760366f6083SPeter Grehan 
761366f6083SPeter Grehan static boolean_t
762366f6083SPeter Grehan is_segment_register(int reg)
763366f6083SPeter Grehan {
764366f6083SPeter Grehan 
765366f6083SPeter Grehan 	switch (reg) {
766366f6083SPeter Grehan 	case VM_REG_GUEST_ES:
767366f6083SPeter Grehan 	case VM_REG_GUEST_CS:
768366f6083SPeter Grehan 	case VM_REG_GUEST_SS:
769366f6083SPeter Grehan 	case VM_REG_GUEST_DS:
770366f6083SPeter Grehan 	case VM_REG_GUEST_FS:
771366f6083SPeter Grehan 	case VM_REG_GUEST_GS:
772366f6083SPeter Grehan 	case VM_REG_GUEST_TR:
773366f6083SPeter Grehan 	case VM_REG_GUEST_LDTR:
774366f6083SPeter Grehan 		return (TRUE);
775366f6083SPeter Grehan 	default:
776366f6083SPeter Grehan 		return (FALSE);
777366f6083SPeter Grehan 	}
778366f6083SPeter Grehan }
779366f6083SPeter Grehan 
780366f6083SPeter Grehan int
781366f6083SPeter Grehan vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
782366f6083SPeter Grehan 		struct seg_desc *desc)
783366f6083SPeter Grehan {
784366f6083SPeter Grehan 
785366f6083SPeter Grehan 	if (vcpu < 0 || vcpu >= VM_MAXCPU)
786366f6083SPeter Grehan 		return (EINVAL);
787366f6083SPeter Grehan 
788366f6083SPeter Grehan 	if (!is_segment_register(reg) && !is_descriptor_table(reg))
789366f6083SPeter Grehan 		return (EINVAL);
790366f6083SPeter Grehan 
791366f6083SPeter Grehan 	return (VMGETDESC(vm->cookie, vcpu, reg, desc));
792366f6083SPeter Grehan }
793366f6083SPeter Grehan 
794366f6083SPeter Grehan int
795366f6083SPeter Grehan vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
796366f6083SPeter Grehan 		struct seg_desc *desc)
797366f6083SPeter Grehan {
798366f6083SPeter Grehan 	if (vcpu < 0 || vcpu >= VM_MAXCPU)
799366f6083SPeter Grehan 		return (EINVAL);
800366f6083SPeter Grehan 
801366f6083SPeter Grehan 	if (!is_segment_register(reg) && !is_descriptor_table(reg))
802366f6083SPeter Grehan 		return (EINVAL);
803366f6083SPeter Grehan 
804366f6083SPeter Grehan 	return (VMSETDESC(vm->cookie, vcpu, reg, desc));
805366f6083SPeter Grehan }
806366f6083SPeter Grehan 
807366f6083SPeter Grehan static void
808366f6083SPeter Grehan restore_guest_fpustate(struct vcpu *vcpu)
809366f6083SPeter Grehan {
810366f6083SPeter Grehan 
81138f1b189SPeter Grehan 	/* flush host state to the pcb */
81238f1b189SPeter Grehan 	fpuexit(curthread);
813bd8572e0SNeel Natu 
814bd8572e0SNeel Natu 	/* restore guest FPU state */
815366f6083SPeter Grehan 	fpu_stop_emulating();
81638f1b189SPeter Grehan 	fpurestore(vcpu->guestfpu);
817bd8572e0SNeel Natu 
818bd8572e0SNeel Natu 	/*
819bd8572e0SNeel Natu 	 * The FPU is now "dirty" with the guest's state so turn on emulation
820bd8572e0SNeel Natu 	 * to trap any access to the FPU by the host.
821bd8572e0SNeel Natu 	 */
822bd8572e0SNeel Natu 	fpu_start_emulating();
823366f6083SPeter Grehan }
824366f6083SPeter Grehan 
825366f6083SPeter Grehan static void
826366f6083SPeter Grehan save_guest_fpustate(struct vcpu *vcpu)
827366f6083SPeter Grehan {
828366f6083SPeter Grehan 
829bd8572e0SNeel Natu 	if ((rcr0() & CR0_TS) == 0)
830bd8572e0SNeel Natu 		panic("fpu emulation not enabled in host!");
831bd8572e0SNeel Natu 
832bd8572e0SNeel Natu 	/* save guest FPU state */
833bd8572e0SNeel Natu 	fpu_stop_emulating();
83438f1b189SPeter Grehan 	fpusave(vcpu->guestfpu);
835366f6083SPeter Grehan 	fpu_start_emulating();
836366f6083SPeter Grehan }
837366f6083SPeter Grehan 
83861592433SNeel Natu static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
839f76fc5d4SNeel Natu 
840318224bbSNeel Natu static int
841f80330a8SNeel Natu vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
842f80330a8SNeel Natu     bool from_idle)
843366f6083SPeter Grehan {
844318224bbSNeel Natu 	int error;
845366f6083SPeter Grehan 
846318224bbSNeel Natu 	vcpu_assert_locked(vcpu);
847366f6083SPeter Grehan 
848f76fc5d4SNeel Natu 	/*
849f80330a8SNeel Natu 	 * State transitions from the vmmdev_ioctl() must always begin from
850f80330a8SNeel Natu 	 * the VCPU_IDLE state. This guarantees that there is only a single
851f80330a8SNeel Natu 	 * ioctl() operating on a vcpu at any point.
852f80330a8SNeel Natu 	 */
853f80330a8SNeel Natu 	if (from_idle) {
854f80330a8SNeel Natu 		while (vcpu->state != VCPU_IDLE)
855f80330a8SNeel Natu 			msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
856f80330a8SNeel Natu 	} else {
857f80330a8SNeel Natu 		KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
858f80330a8SNeel Natu 		    "vcpu idle state"));
859f80330a8SNeel Natu 	}
860f80330a8SNeel Natu 
861f80330a8SNeel Natu 	/*
862318224bbSNeel Natu 	 * The following state transitions are allowed:
863318224bbSNeel Natu 	 * IDLE -> FROZEN -> IDLE
864318224bbSNeel Natu 	 * FROZEN -> RUNNING -> FROZEN
865318224bbSNeel Natu 	 * FROZEN -> SLEEPING -> FROZEN
866f76fc5d4SNeel Natu 	 */
867318224bbSNeel Natu 	switch (vcpu->state) {
868318224bbSNeel Natu 	case VCPU_IDLE:
869318224bbSNeel Natu 	case VCPU_RUNNING:
870318224bbSNeel Natu 	case VCPU_SLEEPING:
871318224bbSNeel Natu 		error = (newstate != VCPU_FROZEN);
872318224bbSNeel Natu 		break;
873318224bbSNeel Natu 	case VCPU_FROZEN:
874318224bbSNeel Natu 		error = (newstate == VCPU_FROZEN);
875318224bbSNeel Natu 		break;
876318224bbSNeel Natu 	default:
877318224bbSNeel Natu 		error = 1;
878318224bbSNeel Natu 		break;
879318224bbSNeel Natu 	}
880318224bbSNeel Natu 
881f80330a8SNeel Natu 	if (error)
882f80330a8SNeel Natu 		return (EBUSY);
883318224bbSNeel Natu 
884f80330a8SNeel Natu 	vcpu->state = newstate;
885f80330a8SNeel Natu 	if (newstate == VCPU_IDLE)
886f80330a8SNeel Natu 		wakeup(&vcpu->state);
887f80330a8SNeel Natu 
888f80330a8SNeel Natu 	return (0);
889318224bbSNeel Natu }
890318224bbSNeel Natu 
891318224bbSNeel Natu static void
892318224bbSNeel Natu vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
893318224bbSNeel Natu {
894318224bbSNeel Natu 	int error;
895318224bbSNeel Natu 
896f80330a8SNeel Natu 	if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
897318224bbSNeel Natu 		panic("Error %d setting state to %d\n", error, newstate);
898318224bbSNeel Natu }
899318224bbSNeel Natu 
900318224bbSNeel Natu static void
901318224bbSNeel Natu vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
902318224bbSNeel Natu {
903318224bbSNeel Natu 	int error;
904318224bbSNeel Natu 
905f80330a8SNeel Natu 	if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0)
906318224bbSNeel Natu 		panic("Error %d setting state to %d", error, newstate);
907318224bbSNeel Natu }
908318224bbSNeel Natu 
9095b8a8cd1SNeel Natu static void
9105b8a8cd1SNeel Natu vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func)
9115b8a8cd1SNeel Natu {
9125b8a8cd1SNeel Natu 
9135b8a8cd1SNeel Natu 	KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked"));
9145b8a8cd1SNeel Natu 
9155b8a8cd1SNeel Natu 	/*
9165b8a8cd1SNeel Natu 	 * Update 'rendezvous_func' and execute a write memory barrier to
9175b8a8cd1SNeel Natu 	 * ensure that it is visible across all host cpus. This is not needed
9185b8a8cd1SNeel Natu 	 * for correctness but it does ensure that all the vcpus will notice
9195b8a8cd1SNeel Natu 	 * that the rendezvous is requested immediately.
9205b8a8cd1SNeel Natu 	 */
9215b8a8cd1SNeel Natu 	vm->rendezvous_func = func;
9225b8a8cd1SNeel Natu 	wmb();
9235b8a8cd1SNeel Natu }
9245b8a8cd1SNeel Natu 
9255b8a8cd1SNeel Natu #define	RENDEZVOUS_CTR0(vm, vcpuid, fmt)				\
9265b8a8cd1SNeel Natu 	do {								\
9275b8a8cd1SNeel Natu 		if (vcpuid >= 0)					\
9285b8a8cd1SNeel Natu 			VCPU_CTR0(vm, vcpuid, fmt);			\
9295b8a8cd1SNeel Natu 		else							\
9305b8a8cd1SNeel Natu 			VM_CTR0(vm, fmt);				\
9315b8a8cd1SNeel Natu 	} while (0)
9325b8a8cd1SNeel Natu 
9335b8a8cd1SNeel Natu static void
9345b8a8cd1SNeel Natu vm_handle_rendezvous(struct vm *vm, int vcpuid)
9355b8a8cd1SNeel Natu {
9365b8a8cd1SNeel Natu 
9375b8a8cd1SNeel Natu 	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
9385b8a8cd1SNeel Natu 	    ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
9395b8a8cd1SNeel Natu 
9405b8a8cd1SNeel Natu 	mtx_lock(&vm->rendezvous_mtx);
9415b8a8cd1SNeel Natu 	while (vm->rendezvous_func != NULL) {
9425b8a8cd1SNeel Natu 		if (vcpuid != -1 &&
9435b8a8cd1SNeel Natu 		    CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus)) {
9445b8a8cd1SNeel Natu 			VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
9455b8a8cd1SNeel Natu 			(*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
9465b8a8cd1SNeel Natu 			CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
9475b8a8cd1SNeel Natu 		}
9485b8a8cd1SNeel Natu 		if (CPU_CMP(&vm->rendezvous_req_cpus,
9495b8a8cd1SNeel Natu 		    &vm->rendezvous_done_cpus) == 0) {
9505b8a8cd1SNeel Natu 			VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
9515b8a8cd1SNeel Natu 			vm_set_rendezvous_func(vm, NULL);
9525b8a8cd1SNeel Natu 			wakeup(&vm->rendezvous_func);
9535b8a8cd1SNeel Natu 			break;
9545b8a8cd1SNeel Natu 		}
9555b8a8cd1SNeel Natu 		RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
9565b8a8cd1SNeel Natu 		mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
9575b8a8cd1SNeel Natu 		    "vmrndv", 0);
9585b8a8cd1SNeel Natu 	}
9595b8a8cd1SNeel Natu 	mtx_unlock(&vm->rendezvous_mtx);
9605b8a8cd1SNeel Natu }
9615b8a8cd1SNeel Natu 
962318224bbSNeel Natu /*
963318224bbSNeel Natu  * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
964318224bbSNeel Natu  */
965318224bbSNeel Natu static int
966becd9849SNeel Natu vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
967318224bbSNeel Natu {
9681c052192SNeel Natu 	struct vm_exit *vmexit;
969318224bbSNeel Natu 	struct vcpu *vcpu;
970fb03ca4eSNeel Natu 	int t, timo;
971318224bbSNeel Natu 
972318224bbSNeel Natu 	vcpu = &vm->vcpu[vcpuid];
973318224bbSNeel Natu 
974f76fc5d4SNeel Natu 	vcpu_lock(vcpu);
975f76fc5d4SNeel Natu 
976f76fc5d4SNeel Natu 	/*
977f76fc5d4SNeel Natu 	 * Do a final check for pending NMI or interrupts before
978f76fc5d4SNeel Natu 	 * really putting this thread to sleep.
979f76fc5d4SNeel Natu 	 *
980f76fc5d4SNeel Natu 	 * These interrupts could have happened any time after we
981f76fc5d4SNeel Natu 	 * returned from VMRUN() and before we grabbed the vcpu lock.
982f76fc5d4SNeel Natu 	 */
9831c052192SNeel Natu 	if (!vm_nmi_pending(vm, vcpuid) &&
9844d1e82a8SNeel Natu 	    (intr_disabled || !vlapic_pending_intr(vcpu->vlapic, NULL))) {
985f76fc5d4SNeel Natu 		t = ticks;
986318224bbSNeel Natu 		vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
9871c052192SNeel Natu 		if (vlapic_enabled(vcpu->vlapic)) {
988fb03ca4eSNeel Natu 			/*
989fb03ca4eSNeel Natu 			 * XXX msleep_spin() is not interruptible so use the
990fb03ca4eSNeel Natu 			 * 'timo' to put an upper bound on the sleep time.
991fb03ca4eSNeel Natu 			 */
992fb03ca4eSNeel Natu 			timo = hz;
993fb03ca4eSNeel Natu 			msleep_spin(vcpu, &vcpu->mtx, "vmidle", timo);
9941c052192SNeel Natu 		} else {
9951c052192SNeel Natu 			/*
9961c052192SNeel Natu 			 * Spindown the vcpu if the apic is disabled and it
9971c052192SNeel Natu 			 * had entered the halted state.
9981c052192SNeel Natu 			 */
999becd9849SNeel Natu 			*retu = true;
10001c052192SNeel Natu 			vmexit = vm_exitinfo(vm, vcpuid);
10011c052192SNeel Natu 			vmexit->exitcode = VM_EXITCODE_SPINDOWN_CPU;
10025b8a8cd1SNeel Natu 			vm_deactivate_cpu(vm, vcpuid);
10031c052192SNeel Natu 			VCPU_CTR0(vm, vcpuid, "spinning down cpu");
10041c052192SNeel Natu 		}
1005318224bbSNeel Natu 		vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1006f76fc5d4SNeel Natu 		vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
1007f76fc5d4SNeel Natu 	}
1008f76fc5d4SNeel Natu 	vcpu_unlock(vcpu);
1009f76fc5d4SNeel Natu 
1010318224bbSNeel Natu 	return (0);
1011318224bbSNeel Natu }
1012318224bbSNeel Natu 
1013318224bbSNeel Natu static int
1014becd9849SNeel Natu vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
1015318224bbSNeel Natu {
1016318224bbSNeel Natu 	int rv, ftype;
1017318224bbSNeel Natu 	struct vm_map *map;
1018318224bbSNeel Natu 	struct vcpu *vcpu;
1019318224bbSNeel Natu 	struct vm_exit *vme;
1020318224bbSNeel Natu 
1021318224bbSNeel Natu 	vcpu = &vm->vcpu[vcpuid];
1022318224bbSNeel Natu 	vme = &vcpu->exitinfo;
1023318224bbSNeel Natu 
1024318224bbSNeel Natu 	ftype = vme->u.paging.fault_type;
1025318224bbSNeel Natu 	KASSERT(ftype == VM_PROT_READ ||
1026318224bbSNeel Natu 	    ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
1027318224bbSNeel Natu 	    ("vm_handle_paging: invalid fault_type %d", ftype));
1028318224bbSNeel Natu 
1029318224bbSNeel Natu 	if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
1030318224bbSNeel Natu 		rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1031318224bbSNeel Natu 		    vme->u.paging.gpa, ftype);
1032318224bbSNeel Natu 		if (rv == 0)
1033318224bbSNeel Natu 			goto done;
1034318224bbSNeel Natu 	}
1035318224bbSNeel Natu 
1036318224bbSNeel Natu 	map = &vm->vmspace->vm_map;
1037318224bbSNeel Natu 	rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
1038318224bbSNeel Natu 
1039513c8d33SNeel Natu 	VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
1040513c8d33SNeel Natu 	    "ftype = %d", rv, vme->u.paging.gpa, ftype);
1041318224bbSNeel Natu 
1042318224bbSNeel Natu 	if (rv != KERN_SUCCESS)
1043318224bbSNeel Natu 		return (EFAULT);
1044318224bbSNeel Natu done:
1045318224bbSNeel Natu 	/* restart execution at the faulting instruction */
1046318224bbSNeel Natu 	vme->inst_length = 0;
1047318224bbSNeel Natu 
1048318224bbSNeel Natu 	return (0);
1049318224bbSNeel Natu }
1050318224bbSNeel Natu 
1051318224bbSNeel Natu static int
1052becd9849SNeel Natu vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
1053318224bbSNeel Natu {
1054318224bbSNeel Natu 	struct vie *vie;
1055318224bbSNeel Natu 	struct vcpu *vcpu;
1056318224bbSNeel Natu 	struct vm_exit *vme;
1057318224bbSNeel Natu 	int error, inst_length;
1058318224bbSNeel Natu 	uint64_t rip, gla, gpa, cr3;
1059565bbb86SNeel Natu 	mem_region_read_t mread;
1060565bbb86SNeel Natu 	mem_region_write_t mwrite;
1061318224bbSNeel Natu 
1062318224bbSNeel Natu 	vcpu = &vm->vcpu[vcpuid];
1063318224bbSNeel Natu 	vme = &vcpu->exitinfo;
1064318224bbSNeel Natu 
1065318224bbSNeel Natu 	rip = vme->rip;
1066318224bbSNeel Natu 	inst_length = vme->inst_length;
1067318224bbSNeel Natu 
1068318224bbSNeel Natu 	gla = vme->u.inst_emul.gla;
1069318224bbSNeel Natu 	gpa = vme->u.inst_emul.gpa;
1070318224bbSNeel Natu 	cr3 = vme->u.inst_emul.cr3;
1071318224bbSNeel Natu 	vie = &vme->u.inst_emul.vie;
1072318224bbSNeel Natu 
1073318224bbSNeel Natu 	vie_init(vie);
1074318224bbSNeel Natu 
1075318224bbSNeel Natu 	/* Fetch, decode and emulate the faulting instruction */
1076318224bbSNeel Natu 	if (vmm_fetch_instruction(vm, vcpuid, rip, inst_length, cr3, vie) != 0)
1077318224bbSNeel Natu 		return (EFAULT);
1078318224bbSNeel Natu 
1079318224bbSNeel Natu 	if (vmm_decode_instruction(vm, vcpuid, gla, vie) != 0)
1080318224bbSNeel Natu 		return (EFAULT);
1081318224bbSNeel Natu 
108208e3ff32SNeel Natu 	/* return to userland unless this is an in-kernel emulated device */
1083565bbb86SNeel Natu 	if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1084565bbb86SNeel Natu 		mread = lapic_mmio_read;
1085565bbb86SNeel Natu 		mwrite = lapic_mmio_write;
1086565bbb86SNeel Natu 	} else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1087565bbb86SNeel Natu 		mread = vioapic_mmio_read;
1088565bbb86SNeel Natu 		mwrite = vioapic_mmio_write;
108908e3ff32SNeel Natu 	} else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
109008e3ff32SNeel Natu 		mread = vhpet_mmio_read;
109108e3ff32SNeel Natu 		mwrite = vhpet_mmio_write;
1092565bbb86SNeel Natu 	} else {
1093becd9849SNeel Natu 		*retu = true;
1094318224bbSNeel Natu 		return (0);
1095318224bbSNeel Natu 	}
1096318224bbSNeel Natu 
1097becd9849SNeel Natu 	error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, mread, mwrite,
1098becd9849SNeel Natu 	    retu);
1099318224bbSNeel Natu 
1100318224bbSNeel Natu 	return (error);
1101318224bbSNeel Natu }
1102318224bbSNeel Natu 
1103318224bbSNeel Natu int
1104318224bbSNeel Natu vm_run(struct vm *vm, struct vm_run *vmrun)
1105318224bbSNeel Natu {
1106318224bbSNeel Natu 	int error, vcpuid;
1107318224bbSNeel Natu 	struct vcpu *vcpu;
1108318224bbSNeel Natu 	struct pcb *pcb;
1109318224bbSNeel Natu 	uint64_t tscval, rip;
1110318224bbSNeel Natu 	struct vm_exit *vme;
1111becd9849SNeel Natu 	bool retu, intr_disabled;
1112318224bbSNeel Natu 	pmap_t pmap;
1113318224bbSNeel Natu 
1114318224bbSNeel Natu 	vcpuid = vmrun->cpuid;
1115318224bbSNeel Natu 
1116318224bbSNeel Natu 	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1117318224bbSNeel Natu 		return (EINVAL);
1118318224bbSNeel Natu 
1119318224bbSNeel Natu 	pmap = vmspace_pmap(vm->vmspace);
1120318224bbSNeel Natu 	vcpu = &vm->vcpu[vcpuid];
1121318224bbSNeel Natu 	vme = &vcpu->exitinfo;
1122318224bbSNeel Natu 	rip = vmrun->rip;
1123318224bbSNeel Natu restart:
1124318224bbSNeel Natu 	critical_enter();
1125318224bbSNeel Natu 
1126318224bbSNeel Natu 	KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1127318224bbSNeel Natu 	    ("vm_run: absurd pm_active"));
1128318224bbSNeel Natu 
1129318224bbSNeel Natu 	tscval = rdtsc();
1130318224bbSNeel Natu 
1131318224bbSNeel Natu 	pcb = PCPU_GET(curpcb);
1132318224bbSNeel Natu 	set_pcb_flags(pcb, PCB_FULL_IRET);
1133318224bbSNeel Natu 
1134318224bbSNeel Natu 	restore_guest_msrs(vm, vcpuid);
1135318224bbSNeel Natu 	restore_guest_fpustate(vcpu);
1136318224bbSNeel Natu 
1137318224bbSNeel Natu 	vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1138318224bbSNeel Natu 	vcpu->hostcpu = curcpu;
11395b8a8cd1SNeel Natu 	error = VMRUN(vm->cookie, vcpuid, rip, pmap, &vm->rendezvous_func);
1140318224bbSNeel Natu 	vcpu->hostcpu = NOCPU;
1141318224bbSNeel Natu 	vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1142318224bbSNeel Natu 
1143318224bbSNeel Natu 	save_guest_fpustate(vcpu);
1144318224bbSNeel Natu 	restore_host_msrs(vm, vcpuid);
1145318224bbSNeel Natu 
1146318224bbSNeel Natu 	vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1147318224bbSNeel Natu 
1148318224bbSNeel Natu 	critical_exit();
1149318224bbSNeel Natu 
1150318224bbSNeel Natu 	if (error == 0) {
1151becd9849SNeel Natu 		retu = false;
1152318224bbSNeel Natu 		switch (vme->exitcode) {
11535b8a8cd1SNeel Natu 		case VM_EXITCODE_RENDEZVOUS:
11545b8a8cd1SNeel Natu 			vm_handle_rendezvous(vm, vcpuid);
11555b8a8cd1SNeel Natu 			error = 0;
11565b8a8cd1SNeel Natu 			break;
1157318224bbSNeel Natu 		case VM_EXITCODE_HLT:
1158becd9849SNeel Natu 			intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
11591c052192SNeel Natu 			error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
1160318224bbSNeel Natu 			break;
1161318224bbSNeel Natu 		case VM_EXITCODE_PAGING:
1162318224bbSNeel Natu 			error = vm_handle_paging(vm, vcpuid, &retu);
1163318224bbSNeel Natu 			break;
1164318224bbSNeel Natu 		case VM_EXITCODE_INST_EMUL:
1165318224bbSNeel Natu 			error = vm_handle_inst_emul(vm, vcpuid, &retu);
1166318224bbSNeel Natu 			break;
1167318224bbSNeel Natu 		default:
1168becd9849SNeel Natu 			retu = true;	/* handled in userland */
1169318224bbSNeel Natu 			break;
1170318224bbSNeel Natu 		}
1171318224bbSNeel Natu 	}
1172318224bbSNeel Natu 
1173becd9849SNeel Natu 	if (error == 0 && retu == false) {
1174f76fc5d4SNeel Natu 		rip = vme->rip + vme->inst_length;
1175f76fc5d4SNeel Natu 		goto restart;
1176f76fc5d4SNeel Natu 	}
1177f76fc5d4SNeel Natu 
1178318224bbSNeel Natu 	/* copy the exit information */
1179318224bbSNeel Natu 	bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1180366f6083SPeter Grehan 	return (error);
1181366f6083SPeter Grehan }
1182366f6083SPeter Grehan 
1183366f6083SPeter Grehan int
1184366f6083SPeter Grehan vm_inject_event(struct vm *vm, int vcpuid, int type,
1185366f6083SPeter Grehan 		int vector, uint32_t code, int code_valid)
1186366f6083SPeter Grehan {
1187366f6083SPeter Grehan 	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1188366f6083SPeter Grehan 		return (EINVAL);
1189366f6083SPeter Grehan 
1190366f6083SPeter Grehan 	if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0)
1191366f6083SPeter Grehan 		return (EINVAL);
1192366f6083SPeter Grehan 
1193366f6083SPeter Grehan 	if (vector < 0 || vector > 255)
1194366f6083SPeter Grehan 		return (EINVAL);
1195366f6083SPeter Grehan 
1196366f6083SPeter Grehan 	return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
1197366f6083SPeter Grehan }
1198366f6083SPeter Grehan 
119961592433SNeel Natu static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
1200366f6083SPeter Grehan 
1201f352ff0cSNeel Natu int
1202f352ff0cSNeel Natu vm_inject_nmi(struct vm *vm, int vcpuid)
1203f352ff0cSNeel Natu {
1204f352ff0cSNeel Natu 	struct vcpu *vcpu;
1205f352ff0cSNeel Natu 
1206f352ff0cSNeel Natu 	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1207366f6083SPeter Grehan 		return (EINVAL);
1208366f6083SPeter Grehan 
1209f352ff0cSNeel Natu 	vcpu = &vm->vcpu[vcpuid];
1210f352ff0cSNeel Natu 
1211f352ff0cSNeel Natu 	vcpu->nmi_pending = 1;
1212de5ea6b6SNeel Natu 	vcpu_notify_event(vm, vcpuid, false);
1213f352ff0cSNeel Natu 	return (0);
1214f352ff0cSNeel Natu }
1215f352ff0cSNeel Natu 
1216f352ff0cSNeel Natu int
1217f352ff0cSNeel Natu vm_nmi_pending(struct vm *vm, int vcpuid)
1218f352ff0cSNeel Natu {
1219f352ff0cSNeel Natu 	struct vcpu *vcpu;
1220f352ff0cSNeel Natu 
1221f352ff0cSNeel Natu 	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1222f352ff0cSNeel Natu 		panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1223f352ff0cSNeel Natu 
1224f352ff0cSNeel Natu 	vcpu = &vm->vcpu[vcpuid];
1225f352ff0cSNeel Natu 
1226f352ff0cSNeel Natu 	return (vcpu->nmi_pending);
1227f352ff0cSNeel Natu }
1228f352ff0cSNeel Natu 
1229f352ff0cSNeel Natu void
1230f352ff0cSNeel Natu vm_nmi_clear(struct vm *vm, int vcpuid)
1231f352ff0cSNeel Natu {
1232f352ff0cSNeel Natu 	struct vcpu *vcpu;
1233f352ff0cSNeel Natu 
1234f352ff0cSNeel Natu 	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1235f352ff0cSNeel Natu 		panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1236f352ff0cSNeel Natu 
1237f352ff0cSNeel Natu 	vcpu = &vm->vcpu[vcpuid];
1238f352ff0cSNeel Natu 
1239f352ff0cSNeel Natu 	if (vcpu->nmi_pending == 0)
1240f352ff0cSNeel Natu 		panic("vm_nmi_clear: inconsistent nmi_pending state");
1241f352ff0cSNeel Natu 
1242f352ff0cSNeel Natu 	vcpu->nmi_pending = 0;
1243f352ff0cSNeel Natu 	vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
1244366f6083SPeter Grehan }
1245366f6083SPeter Grehan 
1246366f6083SPeter Grehan int
1247366f6083SPeter Grehan vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
1248366f6083SPeter Grehan {
1249366f6083SPeter Grehan 	if (vcpu < 0 || vcpu >= VM_MAXCPU)
1250366f6083SPeter Grehan 		return (EINVAL);
1251366f6083SPeter Grehan 
1252366f6083SPeter Grehan 	if (type < 0 || type >= VM_CAP_MAX)
1253366f6083SPeter Grehan 		return (EINVAL);
1254366f6083SPeter Grehan 
1255366f6083SPeter Grehan 	return (VMGETCAP(vm->cookie, vcpu, type, retval));
1256366f6083SPeter Grehan }
1257366f6083SPeter Grehan 
1258366f6083SPeter Grehan int
1259366f6083SPeter Grehan vm_set_capability(struct vm *vm, int vcpu, int type, int val)
1260366f6083SPeter Grehan {
1261366f6083SPeter Grehan 	if (vcpu < 0 || vcpu >= VM_MAXCPU)
1262366f6083SPeter Grehan 		return (EINVAL);
1263366f6083SPeter Grehan 
1264366f6083SPeter Grehan 	if (type < 0 || type >= VM_CAP_MAX)
1265366f6083SPeter Grehan 		return (EINVAL);
1266366f6083SPeter Grehan 
1267366f6083SPeter Grehan 	return (VMSETCAP(vm->cookie, vcpu, type, val));
1268366f6083SPeter Grehan }
1269366f6083SPeter Grehan 
1270366f6083SPeter Grehan uint64_t *
1271366f6083SPeter Grehan vm_guest_msrs(struct vm *vm, int cpu)
1272366f6083SPeter Grehan {
1273366f6083SPeter Grehan 	return (vm->vcpu[cpu].guest_msrs);
1274366f6083SPeter Grehan }
1275366f6083SPeter Grehan 
1276366f6083SPeter Grehan struct vlapic *
1277366f6083SPeter Grehan vm_lapic(struct vm *vm, int cpu)
1278366f6083SPeter Grehan {
1279366f6083SPeter Grehan 	return (vm->vcpu[cpu].vlapic);
1280366f6083SPeter Grehan }
1281366f6083SPeter Grehan 
1282565bbb86SNeel Natu struct vioapic *
1283565bbb86SNeel Natu vm_ioapic(struct vm *vm)
1284565bbb86SNeel Natu {
1285565bbb86SNeel Natu 
1286565bbb86SNeel Natu 	return (vm->vioapic);
1287565bbb86SNeel Natu }
1288565bbb86SNeel Natu 
128908e3ff32SNeel Natu struct vhpet *
129008e3ff32SNeel Natu vm_hpet(struct vm *vm)
129108e3ff32SNeel Natu {
129208e3ff32SNeel Natu 
129308e3ff32SNeel Natu 	return (vm->vhpet);
129408e3ff32SNeel Natu }
129508e3ff32SNeel Natu 
1296366f6083SPeter Grehan boolean_t
1297366f6083SPeter Grehan vmm_is_pptdev(int bus, int slot, int func)
1298366f6083SPeter Grehan {
129907044a96SNeel Natu 	int found, i, n;
130007044a96SNeel Natu 	int b, s, f;
1301366f6083SPeter Grehan 	char *val, *cp, *cp2;
1302366f6083SPeter Grehan 
1303366f6083SPeter Grehan 	/*
130407044a96SNeel Natu 	 * XXX
130507044a96SNeel Natu 	 * The length of an environment variable is limited to 128 bytes which
130607044a96SNeel Natu 	 * puts an upper limit on the number of passthru devices that may be
130707044a96SNeel Natu 	 * specified using a single environment variable.
130807044a96SNeel Natu 	 *
130907044a96SNeel Natu 	 * Work around this by scanning multiple environment variable
131007044a96SNeel Natu 	 * names instead of a single one - yuck!
1311366f6083SPeter Grehan 	 */
131207044a96SNeel Natu 	const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
131307044a96SNeel Natu 
131407044a96SNeel Natu 	/* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
1315366f6083SPeter Grehan 	found = 0;
131607044a96SNeel Natu 	for (i = 0; names[i] != NULL && !found; i++) {
131707044a96SNeel Natu 		cp = val = getenv(names[i]);
1318366f6083SPeter Grehan 		while (cp != NULL && *cp != '\0') {
1319366f6083SPeter Grehan 			if ((cp2 = strchr(cp, ' ')) != NULL)
1320366f6083SPeter Grehan 				*cp2 = '\0';
1321366f6083SPeter Grehan 
1322366f6083SPeter Grehan 			n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
1323366f6083SPeter Grehan 			if (n == 3 && bus == b && slot == s && func == f) {
1324366f6083SPeter Grehan 				found = 1;
1325366f6083SPeter Grehan 				break;
1326366f6083SPeter Grehan 			}
1327366f6083SPeter Grehan 
1328366f6083SPeter Grehan 			if (cp2 != NULL)
1329366f6083SPeter Grehan 				*cp2++ = ' ';
1330366f6083SPeter Grehan 
1331366f6083SPeter Grehan 			cp = cp2;
1332366f6083SPeter Grehan 		}
1333366f6083SPeter Grehan 		freeenv(val);
133407044a96SNeel Natu 	}
1335366f6083SPeter Grehan 	return (found);
1336366f6083SPeter Grehan }
1337366f6083SPeter Grehan 
1338366f6083SPeter Grehan void *
1339366f6083SPeter Grehan vm_iommu_domain(struct vm *vm)
1340366f6083SPeter Grehan {
1341366f6083SPeter Grehan 
1342366f6083SPeter Grehan 	return (vm->iommu);
1343366f6083SPeter Grehan }
1344366f6083SPeter Grehan 
134575dd3366SNeel Natu int
1346f80330a8SNeel Natu vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
1347f80330a8SNeel Natu     bool from_idle)
1348366f6083SPeter Grehan {
134975dd3366SNeel Natu 	int error;
1350366f6083SPeter Grehan 	struct vcpu *vcpu;
1351366f6083SPeter Grehan 
1352366f6083SPeter Grehan 	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1353366f6083SPeter Grehan 		panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
1354366f6083SPeter Grehan 
1355366f6083SPeter Grehan 	vcpu = &vm->vcpu[vcpuid];
1356366f6083SPeter Grehan 
135775dd3366SNeel Natu 	vcpu_lock(vcpu);
1358f80330a8SNeel Natu 	error = vcpu_set_state_locked(vcpu, newstate, from_idle);
135975dd3366SNeel Natu 	vcpu_unlock(vcpu);
136075dd3366SNeel Natu 
136175dd3366SNeel Natu 	return (error);
136275dd3366SNeel Natu }
136375dd3366SNeel Natu 
136475dd3366SNeel Natu enum vcpu_state
1365d3c11f40SPeter Grehan vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
1366366f6083SPeter Grehan {
1367366f6083SPeter Grehan 	struct vcpu *vcpu;
136875dd3366SNeel Natu 	enum vcpu_state state;
1369366f6083SPeter Grehan 
1370366f6083SPeter Grehan 	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1371366f6083SPeter Grehan 		panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
1372366f6083SPeter Grehan 
1373366f6083SPeter Grehan 	vcpu = &vm->vcpu[vcpuid];
1374366f6083SPeter Grehan 
137575dd3366SNeel Natu 	vcpu_lock(vcpu);
137675dd3366SNeel Natu 	state = vcpu->state;
1377d3c11f40SPeter Grehan 	if (hostcpu != NULL)
1378d3c11f40SPeter Grehan 		*hostcpu = vcpu->hostcpu;
137975dd3366SNeel Natu 	vcpu_unlock(vcpu);
1380366f6083SPeter Grehan 
138175dd3366SNeel Natu 	return (state);
1382366f6083SPeter Grehan }
1383366f6083SPeter Grehan 
1384366f6083SPeter Grehan void
1385366f6083SPeter Grehan vm_activate_cpu(struct vm *vm, int vcpuid)
1386366f6083SPeter Grehan {
1387366f6083SPeter Grehan 
1388366f6083SPeter Grehan 	if (vcpuid >= 0 && vcpuid < VM_MAXCPU)
1389a5615c90SPeter Grehan 		CPU_SET(vcpuid, &vm->active_cpus);
1390366f6083SPeter Grehan }
1391366f6083SPeter Grehan 
13925b8a8cd1SNeel Natu static void
13935b8a8cd1SNeel Natu vm_deactivate_cpu(struct vm *vm, int vcpuid)
13945b8a8cd1SNeel Natu {
13955b8a8cd1SNeel Natu 
13965b8a8cd1SNeel Natu 	if (vcpuid >= 0 && vcpuid < VM_MAXCPU)
13975b8a8cd1SNeel Natu 		CPU_CLR(vcpuid, &vm->active_cpus);
13985b8a8cd1SNeel Natu }
13995b8a8cd1SNeel Natu 
1400a5615c90SPeter Grehan cpuset_t
1401366f6083SPeter Grehan vm_active_cpus(struct vm *vm)
1402366f6083SPeter Grehan {
1403366f6083SPeter Grehan 
1404366f6083SPeter Grehan 	return (vm->active_cpus);
1405366f6083SPeter Grehan }
1406366f6083SPeter Grehan 
1407366f6083SPeter Grehan void *
1408366f6083SPeter Grehan vcpu_stats(struct vm *vm, int vcpuid)
1409366f6083SPeter Grehan {
1410366f6083SPeter Grehan 
1411366f6083SPeter Grehan 	return (vm->vcpu[vcpuid].stats);
1412366f6083SPeter Grehan }
1413e9027382SNeel Natu 
1414e9027382SNeel Natu int
1415e9027382SNeel Natu vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
1416e9027382SNeel Natu {
1417e9027382SNeel Natu 	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1418e9027382SNeel Natu 		return (EINVAL);
1419e9027382SNeel Natu 
1420e9027382SNeel Natu 	*state = vm->vcpu[vcpuid].x2apic_state;
1421e9027382SNeel Natu 
1422e9027382SNeel Natu 	return (0);
1423e9027382SNeel Natu }
1424e9027382SNeel Natu 
1425e9027382SNeel Natu int
1426e9027382SNeel Natu vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
1427e9027382SNeel Natu {
1428e9027382SNeel Natu 	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1429e9027382SNeel Natu 		return (EINVAL);
1430e9027382SNeel Natu 
14313f23d3caSNeel Natu 	if (state >= X2APIC_STATE_LAST)
1432e9027382SNeel Natu 		return (EINVAL);
1433e9027382SNeel Natu 
1434e9027382SNeel Natu 	vm->vcpu[vcpuid].x2apic_state = state;
1435e9027382SNeel Natu 
143673820fb0SNeel Natu 	vlapic_set_x2apic_state(vm, vcpuid, state);
143773820fb0SNeel Natu 
1438e9027382SNeel Natu 	return (0);
1439e9027382SNeel Natu }
144075dd3366SNeel Natu 
144122821874SNeel Natu /*
144222821874SNeel Natu  * This function is called to ensure that a vcpu "sees" a pending event
144322821874SNeel Natu  * as soon as possible:
144422821874SNeel Natu  * - If the vcpu thread is sleeping then it is woken up.
144522821874SNeel Natu  * - If the vcpu is running on a different host_cpu then an IPI will be directed
144622821874SNeel Natu  *   to the host_cpu to cause the vcpu to trap into the hypervisor.
144722821874SNeel Natu  */
144875dd3366SNeel Natu void
1449de5ea6b6SNeel Natu vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
145075dd3366SNeel Natu {
145175dd3366SNeel Natu 	int hostcpu;
145275dd3366SNeel Natu 	struct vcpu *vcpu;
145375dd3366SNeel Natu 
145475dd3366SNeel Natu 	vcpu = &vm->vcpu[vcpuid];
145575dd3366SNeel Natu 
1456f76fc5d4SNeel Natu 	vcpu_lock(vcpu);
145775dd3366SNeel Natu 	hostcpu = vcpu->hostcpu;
1458f76fc5d4SNeel Natu 	if (hostcpu == NOCPU) {
1459318224bbSNeel Natu 		if (vcpu->state == VCPU_SLEEPING)
1460f76fc5d4SNeel Natu 			wakeup_one(vcpu);
1461f76fc5d4SNeel Natu 	} else {
1462f76fc5d4SNeel Natu 		if (vcpu->state != VCPU_RUNNING)
1463f76fc5d4SNeel Natu 			panic("invalid vcpu state %d", vcpu->state);
1464de5ea6b6SNeel Natu 		if (hostcpu != curcpu) {
1465de5ea6b6SNeel Natu 			if (lapic_intr)
1466add611fdSNeel Natu 				vlapic_post_intr(vcpu->vlapic, hostcpu,
1467add611fdSNeel Natu 				    vmm_ipinum);
1468de5ea6b6SNeel Natu 			else
146975dd3366SNeel Natu 				ipi_cpu(hostcpu, vmm_ipinum);
147075dd3366SNeel Natu 		}
1471de5ea6b6SNeel Natu 	}
1472f76fc5d4SNeel Natu 	vcpu_unlock(vcpu);
1473f76fc5d4SNeel Natu }
1474318224bbSNeel Natu 
1475318224bbSNeel Natu struct vmspace *
1476318224bbSNeel Natu vm_get_vmspace(struct vm *vm)
1477318224bbSNeel Natu {
1478318224bbSNeel Natu 
1479318224bbSNeel Natu 	return (vm->vmspace);
1480318224bbSNeel Natu }
1481565bbb86SNeel Natu 
1482565bbb86SNeel Natu int
1483565bbb86SNeel Natu vm_apicid2vcpuid(struct vm *vm, int apicid)
1484565bbb86SNeel Natu {
1485565bbb86SNeel Natu 	/*
1486565bbb86SNeel Natu 	 * XXX apic id is assumed to be numerically identical to vcpu id
1487565bbb86SNeel Natu 	 */
1488565bbb86SNeel Natu 	return (apicid);
1489565bbb86SNeel Natu }
14905b8a8cd1SNeel Natu 
14915b8a8cd1SNeel Natu void
14925b8a8cd1SNeel Natu vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
14935b8a8cd1SNeel Natu     vm_rendezvous_func_t func, void *arg)
14945b8a8cd1SNeel Natu {
14955b8a8cd1SNeel Natu 	/*
14965b8a8cd1SNeel Natu 	 * Enforce that this function is called without any locks
14975b8a8cd1SNeel Natu 	 */
14985b8a8cd1SNeel Natu 	WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
14995b8a8cd1SNeel Natu 	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
15005b8a8cd1SNeel Natu 	    ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
15015b8a8cd1SNeel Natu 
15025b8a8cd1SNeel Natu restart:
15035b8a8cd1SNeel Natu 	mtx_lock(&vm->rendezvous_mtx);
15045b8a8cd1SNeel Natu 	if (vm->rendezvous_func != NULL) {
15055b8a8cd1SNeel Natu 		/*
15065b8a8cd1SNeel Natu 		 * If a rendezvous is already in progress then we need to
15075b8a8cd1SNeel Natu 		 * call the rendezvous handler in case this 'vcpuid' is one
15085b8a8cd1SNeel Natu 		 * of the targets of the rendezvous.
15095b8a8cd1SNeel Natu 		 */
15105b8a8cd1SNeel Natu 		RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress");
15115b8a8cd1SNeel Natu 		mtx_unlock(&vm->rendezvous_mtx);
15125b8a8cd1SNeel Natu 		vm_handle_rendezvous(vm, vcpuid);
15135b8a8cd1SNeel Natu 		goto restart;
15145b8a8cd1SNeel Natu 	}
15155b8a8cd1SNeel Natu 	KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
15165b8a8cd1SNeel Natu 	    "rendezvous is still in progress"));
15175b8a8cd1SNeel Natu 
15185b8a8cd1SNeel Natu 	RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous");
15195b8a8cd1SNeel Natu 	vm->rendezvous_req_cpus = dest;
15205b8a8cd1SNeel Natu 	CPU_ZERO(&vm->rendezvous_done_cpus);
15215b8a8cd1SNeel Natu 	vm->rendezvous_arg = arg;
15225b8a8cd1SNeel Natu 	vm_set_rendezvous_func(vm, func);
15235b8a8cd1SNeel Natu 	mtx_unlock(&vm->rendezvous_mtx);
15245b8a8cd1SNeel Natu 
15255b8a8cd1SNeel Natu 	vm_handle_rendezvous(vm, vcpuid);
15265b8a8cd1SNeel Natu }
1527