xref: /freebsd/sys/amd64/vmm/vmm.c (revision cc698b490001646c7174d14af6500400be9bd4ff)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <sys/pcpu.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/rwlock.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/systm.h>
48 
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_param.h>
56 
57 #include <machine/cpu.h>
58 #include <machine/pcb.h>
59 #include <machine/smp.h>
60 #include <machine/md_var.h>
61 #include <x86/psl.h>
62 #include <x86/apicreg.h>
63 
64 #include <machine/vmm.h>
65 #include <machine/vmm_dev.h>
66 #include <machine/vmm_instruction_emul.h>
67 
68 #include "vmm_ioport.h"
69 #include "vmm_ktr.h"
70 #include "vmm_host.h"
71 #include "vmm_mem.h"
72 #include "vmm_util.h"
73 #include "vatpic.h"
74 #include "vatpit.h"
75 #include "vhpet.h"
76 #include "vioapic.h"
77 #include "vlapic.h"
78 #include "vpmtmr.h"
79 #include "vrtc.h"
80 #include "vmm_stat.h"
81 #include "vmm_lapic.h"
82 
83 #include "io/ppt.h"
84 #include "io/iommu.h"
85 
86 struct vlapic;
87 
88 /*
89  * Initialization:
90  * (a) allocated when vcpu is created
91  * (i) initialized when vcpu is created and when it is reinitialized
92  * (o) initialized the first time the vcpu is created
93  * (x) initialized before use
94  */
95 struct vcpu {
96 	struct mtx 	mtx;		/* (o) protects 'state' and 'hostcpu' */
97 	enum vcpu_state	state;		/* (o) vcpu state */
98 	int		hostcpu;	/* (o) vcpu's host cpu */
99 	int		reqidle;	/* (i) request vcpu to idle */
100 	struct vlapic	*vlapic;	/* (i) APIC device model */
101 	enum x2apic_state x2apic_state;	/* (i) APIC mode */
102 	uint64_t	exitintinfo;	/* (i) events pending at VM exit */
103 	int		nmi_pending;	/* (i) NMI pending */
104 	int		extint_pending;	/* (i) INTR pending */
105 	int	exception_pending;	/* (i) exception pending */
106 	int	exc_vector;		/* (x) exception collateral */
107 	int	exc_errcode_valid;
108 	uint32_t exc_errcode;
109 	struct savefpu	*guestfpu;	/* (a,i) guest fpu state */
110 	uint64_t	guest_xcr0;	/* (i) guest %xcr0 register */
111 	void		*stats;		/* (a,i) statistics */
112 	struct vm_exit	exitinfo;	/* (x) exit reason and collateral */
113 	uint64_t	nextrip;	/* (x) next instruction to execute */
114 };
115 
116 #define	vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
117 #define	vcpu_lock_init(v)	mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
118 #define	vcpu_lock(v)		mtx_lock_spin(&((v)->mtx))
119 #define	vcpu_unlock(v)		mtx_unlock_spin(&((v)->mtx))
120 #define	vcpu_assert_locked(v)	mtx_assert(&((v)->mtx), MA_OWNED)
121 
122 struct mem_seg {
123 	size_t	len;
124 	bool	sysmem;
125 	struct vm_object *object;
126 };
127 #define	VM_MAX_MEMSEGS	3
128 
129 struct mem_map {
130 	vm_paddr_t	gpa;
131 	size_t		len;
132 	vm_ooffset_t	segoff;
133 	int		segid;
134 	int		prot;
135 	int		flags;
136 };
137 #define	VM_MAX_MEMMAPS	4
138 
139 /*
140  * Initialization:
141  * (o) initialized the first time the VM is created
142  * (i) initialized when VM is created and when it is reinitialized
143  * (x) initialized before use
144  */
145 struct vm {
146 	void		*cookie;		/* (i) cpu-specific data */
147 	void		*iommu;			/* (x) iommu-specific data */
148 	struct vhpet	*vhpet;			/* (i) virtual HPET */
149 	struct vioapic	*vioapic;		/* (i) virtual ioapic */
150 	struct vatpic	*vatpic;		/* (i) virtual atpic */
151 	struct vatpit	*vatpit;		/* (i) virtual atpit */
152 	struct vpmtmr	*vpmtmr;		/* (i) virtual ACPI PM timer */
153 	struct vrtc	*vrtc;			/* (o) virtual RTC */
154 	volatile cpuset_t active_cpus;		/* (i) active vcpus */
155 	volatile cpuset_t debug_cpus;		/* (i) vcpus stopped for debug */
156 	int		suspend;		/* (i) stop VM execution */
157 	volatile cpuset_t suspended_cpus; 	/* (i) suspended vcpus */
158 	volatile cpuset_t halted_cpus;		/* (x) cpus in a hard halt */
159 	cpuset_t	rendezvous_req_cpus;	/* (x) rendezvous requested */
160 	cpuset_t	rendezvous_done_cpus;	/* (x) rendezvous finished */
161 	void		*rendezvous_arg;	/* (x) rendezvous func/arg */
162 	vm_rendezvous_func_t rendezvous_func;
163 	struct mtx	rendezvous_mtx;		/* (o) rendezvous lock */
164 	struct mem_map	mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */
165 	struct mem_seg	mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */
166 	struct vmspace	*vmspace;		/* (o) guest's address space */
167 	char		name[VM_MAX_NAMELEN];	/* (o) virtual machine name */
168 	struct vcpu	vcpu[VM_MAXCPU];	/* (i) guest vcpus */
169 	/* The following describe the vm cpu topology */
170 	uint16_t	sockets;		/* (o) num of sockets */
171 	uint16_t	cores;			/* (o) num of cores/socket */
172 	uint16_t	threads;		/* (o) num of threads/core */
173 	uint16_t	maxcpus;		/* (o) max pluggable cpus */
174 };
175 
176 static int vmm_initialized;
177 
178 static struct vmm_ops *ops;
179 #define	VMM_INIT(num)	(ops != NULL ? (*ops->init)(num) : 0)
180 #define	VMM_CLEANUP()	(ops != NULL ? (*ops->cleanup)() : 0)
181 #define	VMM_RESUME()	(ops != NULL ? (*ops->resume)() : 0)
182 
183 #define	VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
184 #define	VMRUN(vmi, vcpu, rip, pmap, evinfo) \
185 	(ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, evinfo) : ENXIO)
186 #define	VMCLEANUP(vmi)	(ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
187 #define	VMSPACE_ALLOC(min, max) \
188 	(ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
189 #define	VMSPACE_FREE(vmspace) \
190 	(ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
191 #define	VMGETREG(vmi, vcpu, num, retval)		\
192 	(ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
193 #define	VMSETREG(vmi, vcpu, num, val)		\
194 	(ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
195 #define	VMGETDESC(vmi, vcpu, num, desc)		\
196 	(ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
197 #define	VMSETDESC(vmi, vcpu, num, desc)		\
198 	(ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
199 #define	VMGETCAP(vmi, vcpu, num, retval)	\
200 	(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
201 #define	VMSETCAP(vmi, vcpu, num, val)		\
202 	(ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
203 #define	VLAPIC_INIT(vmi, vcpu)			\
204 	(ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL)
205 #define	VLAPIC_CLEANUP(vmi, vlapic)		\
206 	(ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL)
207 
208 #define	fpu_start_emulating()	load_cr0(rcr0() | CR0_TS)
209 #define	fpu_stop_emulating()	clts()
210 
211 SDT_PROVIDER_DEFINE(vmm);
212 
213 static MALLOC_DEFINE(M_VM, "vm", "vm");
214 
215 /* statistics */
216 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
217 
218 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
219 
220 /*
221  * Halt the guest if all vcpus are executing a HLT instruction with
222  * interrupts disabled.
223  */
224 static int halt_detection_enabled = 1;
225 SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
226     &halt_detection_enabled, 0,
227     "Halt VM if all vcpus execute HLT with interrupts disabled");
228 
229 static int vmm_ipinum;
230 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
231     "IPI vector used for vcpu notifications");
232 
233 static int trace_guest_exceptions;
234 SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
235     &trace_guest_exceptions, 0,
236     "Trap into hypervisor on all guest exceptions and reflect them back");
237 
238 static void vm_free_memmap(struct vm *vm, int ident);
239 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
240 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
241 
242 #ifdef KTR
243 static const char *
244 vcpu_state2str(enum vcpu_state state)
245 {
246 
247 	switch (state) {
248 	case VCPU_IDLE:
249 		return ("idle");
250 	case VCPU_FROZEN:
251 		return ("frozen");
252 	case VCPU_RUNNING:
253 		return ("running");
254 	case VCPU_SLEEPING:
255 		return ("sleeping");
256 	default:
257 		return ("unknown");
258 	}
259 }
260 #endif
261 
262 static void
263 vcpu_cleanup(struct vm *vm, int i, bool destroy)
264 {
265 	struct vcpu *vcpu = &vm->vcpu[i];
266 
267 	VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
268 	if (destroy) {
269 		vmm_stat_free(vcpu->stats);
270 		fpu_save_area_free(vcpu->guestfpu);
271 	}
272 }
273 
274 static void
275 vcpu_init(struct vm *vm, int vcpu_id, bool create)
276 {
277 	struct vcpu *vcpu;
278 
279 	KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus,
280 	    ("vcpu_init: invalid vcpu %d", vcpu_id));
281 
282 	vcpu = &vm->vcpu[vcpu_id];
283 
284 	if (create) {
285 		KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already "
286 		    "initialized", vcpu_id));
287 		vcpu_lock_init(vcpu);
288 		vcpu->state = VCPU_IDLE;
289 		vcpu->hostcpu = NOCPU;
290 		vcpu->guestfpu = fpu_save_area_alloc();
291 		vcpu->stats = vmm_stat_alloc();
292 	}
293 
294 	vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
295 	vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
296 	vcpu->reqidle = 0;
297 	vcpu->exitintinfo = 0;
298 	vcpu->nmi_pending = 0;
299 	vcpu->extint_pending = 0;
300 	vcpu->exception_pending = 0;
301 	vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
302 	fpu_save_area_reset(vcpu->guestfpu);
303 	vmm_stat_init(vcpu->stats);
304 }
305 
306 int
307 vcpu_trace_exceptions(struct vm *vm, int vcpuid)
308 {
309 
310 	return (trace_guest_exceptions);
311 }
312 
313 struct vm_exit *
314 vm_exitinfo(struct vm *vm, int cpuid)
315 {
316 	struct vcpu *vcpu;
317 
318 	if (cpuid < 0 || cpuid >= vm->maxcpus)
319 		panic("vm_exitinfo: invalid cpuid %d", cpuid);
320 
321 	vcpu = &vm->vcpu[cpuid];
322 
323 	return (&vcpu->exitinfo);
324 }
325 
326 static void
327 vmm_resume(void)
328 {
329 	VMM_RESUME();
330 }
331 
332 static int
333 vmm_init(void)
334 {
335 	int error;
336 
337 	vmm_host_state_init();
338 
339 	vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
340 	    &IDTVEC(justreturn));
341 	if (vmm_ipinum < 0)
342 		vmm_ipinum = IPI_AST;
343 
344 	error = vmm_mem_init();
345 	if (error)
346 		return (error);
347 
348 	if (vmm_is_intel())
349 		ops = &vmm_ops_intel;
350 	else if (vmm_is_amd())
351 		ops = &vmm_ops_amd;
352 	else
353 		return (ENXIO);
354 
355 	vmm_resume_p = vmm_resume;
356 
357 	return (VMM_INIT(vmm_ipinum));
358 }
359 
360 static int
361 vmm_handler(module_t mod, int what, void *arg)
362 {
363 	int error;
364 
365 	switch (what) {
366 	case MOD_LOAD:
367 		vmmdev_init();
368 		error = vmm_init();
369 		if (error == 0)
370 			vmm_initialized = 1;
371 		break;
372 	case MOD_UNLOAD:
373 		error = vmmdev_cleanup();
374 		if (error == 0) {
375 			vmm_resume_p = NULL;
376 			iommu_cleanup();
377 			if (vmm_ipinum != IPI_AST)
378 				lapic_ipi_free(vmm_ipinum);
379 			error = VMM_CLEANUP();
380 			/*
381 			 * Something bad happened - prevent new
382 			 * VMs from being created
383 			 */
384 			if (error)
385 				vmm_initialized = 0;
386 		}
387 		break;
388 	default:
389 		error = 0;
390 		break;
391 	}
392 	return (error);
393 }
394 
395 static moduledata_t vmm_kmod = {
396 	"vmm",
397 	vmm_handler,
398 	NULL
399 };
400 
401 /*
402  * vmm initialization has the following dependencies:
403  *
404  * - VT-x initialization requires smp_rendezvous() and therefore must happen
405  *   after SMP is fully functional (after SI_SUB_SMP).
406  */
407 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
408 MODULE_VERSION(vmm, 1);
409 
410 static void
411 vm_init(struct vm *vm, bool create)
412 {
413 	int i;
414 
415 	vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace));
416 	vm->iommu = NULL;
417 	vm->vioapic = vioapic_init(vm);
418 	vm->vhpet = vhpet_init(vm);
419 	vm->vatpic = vatpic_init(vm);
420 	vm->vatpit = vatpit_init(vm);
421 	vm->vpmtmr = vpmtmr_init(vm);
422 	if (create)
423 		vm->vrtc = vrtc_init(vm);
424 
425 	CPU_ZERO(&vm->active_cpus);
426 	CPU_ZERO(&vm->debug_cpus);
427 
428 	vm->suspend = 0;
429 	CPU_ZERO(&vm->suspended_cpus);
430 
431 	for (i = 0; i < vm->maxcpus; i++)
432 		vcpu_init(vm, i, create);
433 }
434 
435 /*
436  * The default CPU topology is a single thread per package.
437  */
438 u_int cores_per_package = 1;
439 u_int threads_per_core = 1;
440 
441 int
442 vm_create(const char *name, struct vm **retvm)
443 {
444 	struct vm *vm;
445 	struct vmspace *vmspace;
446 
447 	/*
448 	 * If vmm.ko could not be successfully initialized then don't attempt
449 	 * to create the virtual machine.
450 	 */
451 	if (!vmm_initialized)
452 		return (ENXIO);
453 
454 	if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
455 		return (EINVAL);
456 
457 	vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS);
458 	if (vmspace == NULL)
459 		return (ENOMEM);
460 
461 	vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
462 	strcpy(vm->name, name);
463 	vm->vmspace = vmspace;
464 	mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
465 
466 	vm->sockets = 1;
467 	vm->cores = cores_per_package;	/* XXX backwards compatibility */
468 	vm->threads = threads_per_core;	/* XXX backwards compatibility */
469 	vm->maxcpus = VM_MAXCPU;	/* XXX temp to keep code working */
470 
471 	vm_init(vm, true);
472 
473 	*retvm = vm;
474 	return (0);
475 }
476 
477 void
478 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
479     uint16_t *threads, uint16_t *maxcpus)
480 {
481 	*sockets = vm->sockets;
482 	*cores = vm->cores;
483 	*threads = vm->threads;
484 	*maxcpus = vm->maxcpus;
485 }
486 
487 uint16_t
488 vm_get_maxcpus(struct vm *vm)
489 {
490 	return (vm->maxcpus);
491 }
492 
493 int
494 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
495     uint16_t threads, uint16_t maxcpus)
496 {
497 	if (maxcpus != 0)
498 		return (EINVAL);	/* XXX remove when supported */
499 	if ((sockets * cores * threads) > vm->maxcpus)
500 		return (EINVAL);
501 	/* XXX need to check sockets * cores * threads == vCPU, how? */
502 	vm->sockets = sockets;
503 	vm->cores = cores;
504 	vm->threads = threads;
505 	vm->maxcpus = VM_MAXCPU;	/* XXX temp to keep code working */
506 	return(0);
507 }
508 
509 static void
510 vm_cleanup(struct vm *vm, bool destroy)
511 {
512 	struct mem_map *mm;
513 	int i;
514 
515 	ppt_unassign_all(vm);
516 
517 	if (vm->iommu != NULL)
518 		iommu_destroy_domain(vm->iommu);
519 
520 	if (destroy)
521 		vrtc_cleanup(vm->vrtc);
522 	else
523 		vrtc_reset(vm->vrtc);
524 	vpmtmr_cleanup(vm->vpmtmr);
525 	vatpit_cleanup(vm->vatpit);
526 	vhpet_cleanup(vm->vhpet);
527 	vatpic_cleanup(vm->vatpic);
528 	vioapic_cleanup(vm->vioapic);
529 
530 	for (i = 0; i < vm->maxcpus; i++)
531 		vcpu_cleanup(vm, i, destroy);
532 
533 	VMCLEANUP(vm->cookie);
534 
535 	/*
536 	 * System memory is removed from the guest address space only when
537 	 * the VM is destroyed. This is because the mapping remains the same
538 	 * across VM reset.
539 	 *
540 	 * Device memory can be relocated by the guest (e.g. using PCI BARs)
541 	 * so those mappings are removed on a VM reset.
542 	 */
543 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
544 		mm = &vm->mem_maps[i];
545 		if (destroy || !sysmem_mapping(vm, mm))
546 			vm_free_memmap(vm, i);
547 	}
548 
549 	if (destroy) {
550 		for (i = 0; i < VM_MAX_MEMSEGS; i++)
551 			vm_free_memseg(vm, i);
552 
553 		VMSPACE_FREE(vm->vmspace);
554 		vm->vmspace = NULL;
555 	}
556 }
557 
558 void
559 vm_destroy(struct vm *vm)
560 {
561 	vm_cleanup(vm, true);
562 	free(vm, M_VM);
563 }
564 
565 int
566 vm_reinit(struct vm *vm)
567 {
568 	int error;
569 
570 	/*
571 	 * A virtual machine can be reset only if all vcpus are suspended.
572 	 */
573 	if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
574 		vm_cleanup(vm, false);
575 		vm_init(vm, false);
576 		error = 0;
577 	} else {
578 		error = EBUSY;
579 	}
580 
581 	return (error);
582 }
583 
584 const char *
585 vm_name(struct vm *vm)
586 {
587 	return (vm->name);
588 }
589 
590 int
591 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
592 {
593 	vm_object_t obj;
594 
595 	if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
596 		return (ENOMEM);
597 	else
598 		return (0);
599 }
600 
601 int
602 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
603 {
604 
605 	vmm_mmio_free(vm->vmspace, gpa, len);
606 	return (0);
607 }
608 
609 /*
610  * Return 'true' if 'gpa' is allocated in the guest address space.
611  *
612  * This function is called in the context of a running vcpu which acts as
613  * an implicit lock on 'vm->mem_maps[]'.
614  */
615 bool
616 vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa)
617 {
618 	struct mem_map *mm;
619 	int i;
620 
621 #ifdef INVARIANTS
622 	int hostcpu, state;
623 	state = vcpu_get_state(vm, vcpuid, &hostcpu);
624 	KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
625 	    ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
626 #endif
627 
628 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
629 		mm = &vm->mem_maps[i];
630 		if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
631 			return (true);		/* 'gpa' is sysmem or devmem */
632 	}
633 
634 	if (ppt_is_mmio(vm, gpa))
635 		return (true);			/* 'gpa' is pci passthru mmio */
636 
637 	return (false);
638 }
639 
640 int
641 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem)
642 {
643 	struct mem_seg *seg;
644 	vm_object_t obj;
645 
646 	if (ident < 0 || ident >= VM_MAX_MEMSEGS)
647 		return (EINVAL);
648 
649 	if (len == 0 || (len & PAGE_MASK))
650 		return (EINVAL);
651 
652 	seg = &vm->mem_segs[ident];
653 	if (seg->object != NULL) {
654 		if (seg->len == len && seg->sysmem == sysmem)
655 			return (EEXIST);
656 		else
657 			return (EINVAL);
658 	}
659 
660 	obj = vm_object_allocate(OBJT_DEFAULT, len >> PAGE_SHIFT);
661 	if (obj == NULL)
662 		return (ENOMEM);
663 
664 	seg->len = len;
665 	seg->object = obj;
666 	seg->sysmem = sysmem;
667 	return (0);
668 }
669 
670 int
671 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
672     vm_object_t *objptr)
673 {
674 	struct mem_seg *seg;
675 
676 	if (ident < 0 || ident >= VM_MAX_MEMSEGS)
677 		return (EINVAL);
678 
679 	seg = &vm->mem_segs[ident];
680 	if (len)
681 		*len = seg->len;
682 	if (sysmem)
683 		*sysmem = seg->sysmem;
684 	if (objptr)
685 		*objptr = seg->object;
686 	return (0);
687 }
688 
689 void
690 vm_free_memseg(struct vm *vm, int ident)
691 {
692 	struct mem_seg *seg;
693 
694 	KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
695 	    ("%s: invalid memseg ident %d", __func__, ident));
696 
697 	seg = &vm->mem_segs[ident];
698 	if (seg->object != NULL) {
699 		vm_object_deallocate(seg->object);
700 		bzero(seg, sizeof(struct mem_seg));
701 	}
702 }
703 
704 int
705 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
706     size_t len, int prot, int flags)
707 {
708 	struct mem_seg *seg;
709 	struct mem_map *m, *map;
710 	vm_ooffset_t last;
711 	int i, error;
712 
713 	if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
714 		return (EINVAL);
715 
716 	if (flags & ~VM_MEMMAP_F_WIRED)
717 		return (EINVAL);
718 
719 	if (segid < 0 || segid >= VM_MAX_MEMSEGS)
720 		return (EINVAL);
721 
722 	seg = &vm->mem_segs[segid];
723 	if (seg->object == NULL)
724 		return (EINVAL);
725 
726 	last = first + len;
727 	if (first < 0 || first >= last || last > seg->len)
728 		return (EINVAL);
729 
730 	if ((gpa | first | last) & PAGE_MASK)
731 		return (EINVAL);
732 
733 	map = NULL;
734 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
735 		m = &vm->mem_maps[i];
736 		if (m->len == 0) {
737 			map = m;
738 			break;
739 		}
740 	}
741 
742 	if (map == NULL)
743 		return (ENOSPC);
744 
745 	error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
746 	    len, 0, VMFS_NO_SPACE, prot, prot, 0);
747 	if (error != KERN_SUCCESS)
748 		return (EFAULT);
749 
750 	vm_object_reference(seg->object);
751 
752 	if (flags & VM_MEMMAP_F_WIRED) {
753 		error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
754 		    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
755 		if (error != KERN_SUCCESS) {
756 			vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
757 			return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM :
758 			    EFAULT);
759 		}
760 	}
761 
762 	map->gpa = gpa;
763 	map->len = len;
764 	map->segoff = first;
765 	map->segid = segid;
766 	map->prot = prot;
767 	map->flags = flags;
768 	return (0);
769 }
770 
771 int
772 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
773     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
774 {
775 	struct mem_map *mm, *mmnext;
776 	int i;
777 
778 	mmnext = NULL;
779 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
780 		mm = &vm->mem_maps[i];
781 		if (mm->len == 0 || mm->gpa < *gpa)
782 			continue;
783 		if (mmnext == NULL || mm->gpa < mmnext->gpa)
784 			mmnext = mm;
785 	}
786 
787 	if (mmnext != NULL) {
788 		*gpa = mmnext->gpa;
789 		if (segid)
790 			*segid = mmnext->segid;
791 		if (segoff)
792 			*segoff = mmnext->segoff;
793 		if (len)
794 			*len = mmnext->len;
795 		if (prot)
796 			*prot = mmnext->prot;
797 		if (flags)
798 			*flags = mmnext->flags;
799 		return (0);
800 	} else {
801 		return (ENOENT);
802 	}
803 }
804 
805 static void
806 vm_free_memmap(struct vm *vm, int ident)
807 {
808 	struct mem_map *mm;
809 	int error;
810 
811 	mm = &vm->mem_maps[ident];
812 	if (mm->len) {
813 		error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
814 		    mm->gpa + mm->len);
815 		KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
816 		    __func__, error));
817 		bzero(mm, sizeof(struct mem_map));
818 	}
819 }
820 
821 static __inline bool
822 sysmem_mapping(struct vm *vm, struct mem_map *mm)
823 {
824 
825 	if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem)
826 		return (true);
827 	else
828 		return (false);
829 }
830 
831 vm_paddr_t
832 vmm_sysmem_maxaddr(struct vm *vm)
833 {
834 	struct mem_map *mm;
835 	vm_paddr_t maxaddr;
836 	int i;
837 
838 	maxaddr = 0;
839 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
840 		mm = &vm->mem_maps[i];
841 		if (sysmem_mapping(vm, mm)) {
842 			if (maxaddr < mm->gpa + mm->len)
843 				maxaddr = mm->gpa + mm->len;
844 		}
845 	}
846 	return (maxaddr);
847 }
848 
849 static void
850 vm_iommu_modify(struct vm *vm, bool map)
851 {
852 	int i, sz;
853 	vm_paddr_t gpa, hpa;
854 	struct mem_map *mm;
855 	void *vp, *cookie, *host_domain;
856 
857 	sz = PAGE_SIZE;
858 	host_domain = iommu_host_domain();
859 
860 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
861 		mm = &vm->mem_maps[i];
862 		if (!sysmem_mapping(vm, mm))
863 			continue;
864 
865 		if (map) {
866 			KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0,
867 			    ("iommu map found invalid memmap %#lx/%#lx/%#x",
868 			    mm->gpa, mm->len, mm->flags));
869 			if ((mm->flags & VM_MEMMAP_F_WIRED) == 0)
870 				continue;
871 			mm->flags |= VM_MEMMAP_F_IOMMU;
872 		} else {
873 			if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0)
874 				continue;
875 			mm->flags &= ~VM_MEMMAP_F_IOMMU;
876 			KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0,
877 			    ("iommu unmap found invalid memmap %#lx/%#lx/%#x",
878 			    mm->gpa, mm->len, mm->flags));
879 		}
880 
881 		gpa = mm->gpa;
882 		while (gpa < mm->gpa + mm->len) {
883 			vp = vm_gpa_hold(vm, -1, gpa, PAGE_SIZE, VM_PROT_WRITE,
884 					 &cookie);
885 			KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
886 			    vm_name(vm), gpa));
887 
888 			vm_gpa_release(cookie);
889 
890 			hpa = DMAP_TO_PHYS((uintptr_t)vp);
891 			if (map) {
892 				iommu_create_mapping(vm->iommu, gpa, hpa, sz);
893 				iommu_remove_mapping(host_domain, hpa, sz);
894 			} else {
895 				iommu_remove_mapping(vm->iommu, gpa, sz);
896 				iommu_create_mapping(host_domain, hpa, hpa, sz);
897 			}
898 
899 			gpa += PAGE_SIZE;
900 		}
901 	}
902 
903 	/*
904 	 * Invalidate the cached translations associated with the domain
905 	 * from which pages were removed.
906 	 */
907 	if (map)
908 		iommu_invalidate_tlb(host_domain);
909 	else
910 		iommu_invalidate_tlb(vm->iommu);
911 }
912 
913 #define	vm_iommu_unmap(vm)	vm_iommu_modify((vm), false)
914 #define	vm_iommu_map(vm)	vm_iommu_modify((vm), true)
915 
916 int
917 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
918 {
919 	int error;
920 
921 	error = ppt_unassign_device(vm, bus, slot, func);
922 	if (error)
923 		return (error);
924 
925 	if (ppt_assigned_devices(vm) == 0)
926 		vm_iommu_unmap(vm);
927 
928 	return (0);
929 }
930 
931 int
932 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
933 {
934 	int error;
935 	vm_paddr_t maxaddr;
936 
937 	/* Set up the IOMMU to do the 'gpa' to 'hpa' translation */
938 	if (ppt_assigned_devices(vm) == 0) {
939 		KASSERT(vm->iommu == NULL,
940 		    ("vm_assign_pptdev: iommu must be NULL"));
941 		maxaddr = vmm_sysmem_maxaddr(vm);
942 		vm->iommu = iommu_create_domain(maxaddr);
943 		if (vm->iommu == NULL)
944 			return (ENXIO);
945 		vm_iommu_map(vm);
946 	}
947 
948 	error = ppt_assign_device(vm, bus, slot, func);
949 	return (error);
950 }
951 
952 void *
953 vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
954 	    void **cookie)
955 {
956 	int i, count, pageoff;
957 	struct mem_map *mm;
958 	vm_page_t m;
959 #ifdef INVARIANTS
960 	/*
961 	 * All vcpus are frozen by ioctls that modify the memory map
962 	 * (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is
963 	 * guaranteed if at least one vcpu is in the VCPU_FROZEN state.
964 	 */
965 	int state;
966 	KASSERT(vcpuid >= -1 && vcpuid < vm->maxcpus, ("%s: invalid vcpuid %d",
967 	    __func__, vcpuid));
968 	for (i = 0; i < vm->maxcpus; i++) {
969 		if (vcpuid != -1 && vcpuid != i)
970 			continue;
971 		state = vcpu_get_state(vm, i, NULL);
972 		KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
973 		    __func__, state));
974 	}
975 #endif
976 	pageoff = gpa & PAGE_MASK;
977 	if (len > PAGE_SIZE - pageoff)
978 		panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
979 
980 	count = 0;
981 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
982 		mm = &vm->mem_maps[i];
983 		if (sysmem_mapping(vm, mm) && gpa >= mm->gpa &&
984 		    gpa < mm->gpa + mm->len) {
985 			count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
986 			    trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
987 			break;
988 		}
989 	}
990 
991 	if (count == 1) {
992 		*cookie = m;
993 		return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
994 	} else {
995 		*cookie = NULL;
996 		return (NULL);
997 	}
998 }
999 
1000 void
1001 vm_gpa_release(void *cookie)
1002 {
1003 	vm_page_t m = cookie;
1004 
1005 	vm_page_unwire(m, PQ_ACTIVE);
1006 }
1007 
1008 int
1009 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
1010 {
1011 
1012 	if (vcpu < 0 || vcpu >= vm->maxcpus)
1013 		return (EINVAL);
1014 
1015 	if (reg >= VM_REG_LAST)
1016 		return (EINVAL);
1017 
1018 	return (VMGETREG(vm->cookie, vcpu, reg, retval));
1019 }
1020 
1021 int
1022 vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
1023 {
1024 	struct vcpu *vcpu;
1025 	int error;
1026 
1027 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1028 		return (EINVAL);
1029 
1030 	if (reg >= VM_REG_LAST)
1031 		return (EINVAL);
1032 
1033 	error = VMSETREG(vm->cookie, vcpuid, reg, val);
1034 	if (error || reg != VM_REG_GUEST_RIP)
1035 		return (error);
1036 
1037 	/* Set 'nextrip' to match the value of %rip */
1038 	VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
1039 	vcpu = &vm->vcpu[vcpuid];
1040 	vcpu->nextrip = val;
1041 	return (0);
1042 }
1043 
1044 static bool
1045 is_descriptor_table(int reg)
1046 {
1047 
1048 	switch (reg) {
1049 	case VM_REG_GUEST_IDTR:
1050 	case VM_REG_GUEST_GDTR:
1051 		return (true);
1052 	default:
1053 		return (false);
1054 	}
1055 }
1056 
1057 static bool
1058 is_segment_register(int reg)
1059 {
1060 
1061 	switch (reg) {
1062 	case VM_REG_GUEST_ES:
1063 	case VM_REG_GUEST_CS:
1064 	case VM_REG_GUEST_SS:
1065 	case VM_REG_GUEST_DS:
1066 	case VM_REG_GUEST_FS:
1067 	case VM_REG_GUEST_GS:
1068 	case VM_REG_GUEST_TR:
1069 	case VM_REG_GUEST_LDTR:
1070 		return (true);
1071 	default:
1072 		return (false);
1073 	}
1074 }
1075 
1076 int
1077 vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
1078 		struct seg_desc *desc)
1079 {
1080 
1081 	if (vcpu < 0 || vcpu >= vm->maxcpus)
1082 		return (EINVAL);
1083 
1084 	if (!is_segment_register(reg) && !is_descriptor_table(reg))
1085 		return (EINVAL);
1086 
1087 	return (VMGETDESC(vm->cookie, vcpu, reg, desc));
1088 }
1089 
1090 int
1091 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
1092 		struct seg_desc *desc)
1093 {
1094 	if (vcpu < 0 || vcpu >= vm->maxcpus)
1095 		return (EINVAL);
1096 
1097 	if (!is_segment_register(reg) && !is_descriptor_table(reg))
1098 		return (EINVAL);
1099 
1100 	return (VMSETDESC(vm->cookie, vcpu, reg, desc));
1101 }
1102 
1103 static void
1104 restore_guest_fpustate(struct vcpu *vcpu)
1105 {
1106 
1107 	/* flush host state to the pcb */
1108 	fpuexit(curthread);
1109 
1110 	/* restore guest FPU state */
1111 	fpu_stop_emulating();
1112 	fpurestore(vcpu->guestfpu);
1113 
1114 	/* restore guest XCR0 if XSAVE is enabled in the host */
1115 	if (rcr4() & CR4_XSAVE)
1116 		load_xcr(0, vcpu->guest_xcr0);
1117 
1118 	/*
1119 	 * The FPU is now "dirty" with the guest's state so turn on emulation
1120 	 * to trap any access to the FPU by the host.
1121 	 */
1122 	fpu_start_emulating();
1123 }
1124 
1125 static void
1126 save_guest_fpustate(struct vcpu *vcpu)
1127 {
1128 
1129 	if ((rcr0() & CR0_TS) == 0)
1130 		panic("fpu emulation not enabled in host!");
1131 
1132 	/* save guest XCR0 and restore host XCR0 */
1133 	if (rcr4() & CR4_XSAVE) {
1134 		vcpu->guest_xcr0 = rxcr(0);
1135 		load_xcr(0, vmm_get_host_xcr0());
1136 	}
1137 
1138 	/* save guest FPU state */
1139 	fpu_stop_emulating();
1140 	fpusave(vcpu->guestfpu);
1141 	fpu_start_emulating();
1142 }
1143 
1144 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
1145 
1146 static int
1147 vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
1148     bool from_idle)
1149 {
1150 	struct vcpu *vcpu;
1151 	int error;
1152 
1153 	vcpu = &vm->vcpu[vcpuid];
1154 	vcpu_assert_locked(vcpu);
1155 
1156 	/*
1157 	 * State transitions from the vmmdev_ioctl() must always begin from
1158 	 * the VCPU_IDLE state. This guarantees that there is only a single
1159 	 * ioctl() operating on a vcpu at any point.
1160 	 */
1161 	if (from_idle) {
1162 		while (vcpu->state != VCPU_IDLE) {
1163 			vcpu->reqidle = 1;
1164 			vcpu_notify_event_locked(vcpu, false);
1165 			VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to "
1166 			    "idle requested", vcpu_state2str(vcpu->state));
1167 			msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
1168 		}
1169 	} else {
1170 		KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
1171 		    "vcpu idle state"));
1172 	}
1173 
1174 	if (vcpu->state == VCPU_RUNNING) {
1175 		KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1176 		    "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1177 	} else {
1178 		KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1179 		    "vcpu that is not running", vcpu->hostcpu));
1180 	}
1181 
1182 	/*
1183 	 * The following state transitions are allowed:
1184 	 * IDLE -> FROZEN -> IDLE
1185 	 * FROZEN -> RUNNING -> FROZEN
1186 	 * FROZEN -> SLEEPING -> FROZEN
1187 	 */
1188 	switch (vcpu->state) {
1189 	case VCPU_IDLE:
1190 	case VCPU_RUNNING:
1191 	case VCPU_SLEEPING:
1192 		error = (newstate != VCPU_FROZEN);
1193 		break;
1194 	case VCPU_FROZEN:
1195 		error = (newstate == VCPU_FROZEN);
1196 		break;
1197 	default:
1198 		error = 1;
1199 		break;
1200 	}
1201 
1202 	if (error)
1203 		return (EBUSY);
1204 
1205 	VCPU_CTR2(vm, vcpuid, "vcpu state changed from %s to %s",
1206 	    vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
1207 
1208 	vcpu->state = newstate;
1209 	if (newstate == VCPU_RUNNING)
1210 		vcpu->hostcpu = curcpu;
1211 	else
1212 		vcpu->hostcpu = NOCPU;
1213 
1214 	if (newstate == VCPU_IDLE)
1215 		wakeup(&vcpu->state);
1216 
1217 	return (0);
1218 }
1219 
1220 static void
1221 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1222 {
1223 	int error;
1224 
1225 	if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
1226 		panic("Error %d setting state to %d\n", error, newstate);
1227 }
1228 
1229 static void
1230 vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1231 {
1232 	int error;
1233 
1234 	if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0)
1235 		panic("Error %d setting state to %d", error, newstate);
1236 }
1237 
1238 static void
1239 vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func)
1240 {
1241 
1242 	KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked"));
1243 
1244 	/*
1245 	 * Update 'rendezvous_func' and execute a write memory barrier to
1246 	 * ensure that it is visible across all host cpus. This is not needed
1247 	 * for correctness but it does ensure that all the vcpus will notice
1248 	 * that the rendezvous is requested immediately.
1249 	 */
1250 	vm->rendezvous_func = func;
1251 	wmb();
1252 }
1253 
1254 #define	RENDEZVOUS_CTR0(vm, vcpuid, fmt)				\
1255 	do {								\
1256 		if (vcpuid >= 0)					\
1257 			VCPU_CTR0(vm, vcpuid, fmt);			\
1258 		else							\
1259 			VM_CTR0(vm, fmt);				\
1260 	} while (0)
1261 
1262 static void
1263 vm_handle_rendezvous(struct vm *vm, int vcpuid)
1264 {
1265 
1266 	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus),
1267 	    ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
1268 
1269 	mtx_lock(&vm->rendezvous_mtx);
1270 	while (vm->rendezvous_func != NULL) {
1271 		/* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
1272 		CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
1273 
1274 		if (vcpuid != -1 &&
1275 		    CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
1276 		    !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
1277 			VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
1278 			(*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
1279 			CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
1280 		}
1281 		if (CPU_CMP(&vm->rendezvous_req_cpus,
1282 		    &vm->rendezvous_done_cpus) == 0) {
1283 			VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
1284 			vm_set_rendezvous_func(vm, NULL);
1285 			wakeup(&vm->rendezvous_func);
1286 			break;
1287 		}
1288 		RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
1289 		mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
1290 		    "vmrndv", 0);
1291 	}
1292 	mtx_unlock(&vm->rendezvous_mtx);
1293 }
1294 
1295 /*
1296  * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1297  */
1298 static int
1299 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
1300 {
1301 	struct vcpu *vcpu;
1302 	const char *wmesg;
1303 	int t, vcpu_halted, vm_halted;
1304 
1305 	KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1306 
1307 	vcpu = &vm->vcpu[vcpuid];
1308 	vcpu_halted = 0;
1309 	vm_halted = 0;
1310 
1311 	vcpu_lock(vcpu);
1312 	while (1) {
1313 		/*
1314 		 * Do a final check for pending NMI or interrupts before
1315 		 * really putting this thread to sleep. Also check for
1316 		 * software events that would cause this vcpu to wakeup.
1317 		 *
1318 		 * These interrupts/events could have happened after the
1319 		 * vcpu returned from VMRUN() and before it acquired the
1320 		 * vcpu lock above.
1321 		 */
1322 		if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
1323 			break;
1324 		if (vm_nmi_pending(vm, vcpuid))
1325 			break;
1326 		if (!intr_disabled) {
1327 			if (vm_extint_pending(vm, vcpuid) ||
1328 			    vlapic_pending_intr(vcpu->vlapic, NULL)) {
1329 				break;
1330 			}
1331 		}
1332 
1333 		/* Don't go to sleep if the vcpu thread needs to yield */
1334 		if (vcpu_should_yield(vm, vcpuid))
1335 			break;
1336 
1337 		if (vcpu_debugged(vm, vcpuid))
1338 			break;
1339 
1340 		/*
1341 		 * Some Linux guests implement "halt" by having all vcpus
1342 		 * execute HLT with interrupts disabled. 'halted_cpus' keeps
1343 		 * track of the vcpus that have entered this state. When all
1344 		 * vcpus enter the halted state the virtual machine is halted.
1345 		 */
1346 		if (intr_disabled) {
1347 			wmesg = "vmhalt";
1348 			VCPU_CTR0(vm, vcpuid, "Halted");
1349 			if (!vcpu_halted && halt_detection_enabled) {
1350 				vcpu_halted = 1;
1351 				CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
1352 			}
1353 			if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
1354 				vm_halted = 1;
1355 				break;
1356 			}
1357 		} else {
1358 			wmesg = "vmidle";
1359 		}
1360 
1361 		t = ticks;
1362 		vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1363 		/*
1364 		 * XXX msleep_spin() cannot be interrupted by signals so
1365 		 * wake up periodically to check pending signals.
1366 		 */
1367 		msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1368 		vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1369 		vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
1370 	}
1371 
1372 	if (vcpu_halted)
1373 		CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
1374 
1375 	vcpu_unlock(vcpu);
1376 
1377 	if (vm_halted)
1378 		vm_suspend(vm, VM_SUSPEND_HALT);
1379 
1380 	return (0);
1381 }
1382 
1383 static int
1384 vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
1385 {
1386 	int rv, ftype;
1387 	struct vm_map *map;
1388 	struct vcpu *vcpu;
1389 	struct vm_exit *vme;
1390 
1391 	vcpu = &vm->vcpu[vcpuid];
1392 	vme = &vcpu->exitinfo;
1393 
1394 	KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1395 	    __func__, vme->inst_length));
1396 
1397 	ftype = vme->u.paging.fault_type;
1398 	KASSERT(ftype == VM_PROT_READ ||
1399 	    ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
1400 	    ("vm_handle_paging: invalid fault_type %d", ftype));
1401 
1402 	if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
1403 		rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1404 		    vme->u.paging.gpa, ftype);
1405 		if (rv == 0) {
1406 			VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
1407 			    ftype == VM_PROT_READ ? "accessed" : "dirty",
1408 			    vme->u.paging.gpa);
1409 			goto done;
1410 		}
1411 	}
1412 
1413 	map = &vm->vmspace->vm_map;
1414 	rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
1415 
1416 	VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
1417 	    "ftype = %d", rv, vme->u.paging.gpa, ftype);
1418 
1419 	if (rv != KERN_SUCCESS)
1420 		return (EFAULT);
1421 done:
1422 	return (0);
1423 }
1424 
1425 static int
1426 vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
1427 {
1428 	struct vie *vie;
1429 	struct vcpu *vcpu;
1430 	struct vm_exit *vme;
1431 	uint64_t gla, gpa, cs_base;
1432 	struct vm_guest_paging *paging;
1433 	mem_region_read_t mread;
1434 	mem_region_write_t mwrite;
1435 	enum vm_cpu_mode cpu_mode;
1436 	int cs_d, error, fault;
1437 
1438 	vcpu = &vm->vcpu[vcpuid];
1439 	vme = &vcpu->exitinfo;
1440 
1441 	KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1442 	    __func__, vme->inst_length));
1443 
1444 	gla = vme->u.inst_emul.gla;
1445 	gpa = vme->u.inst_emul.gpa;
1446 	cs_base = vme->u.inst_emul.cs_base;
1447 	cs_d = vme->u.inst_emul.cs_d;
1448 	vie = &vme->u.inst_emul.vie;
1449 	paging = &vme->u.inst_emul.paging;
1450 	cpu_mode = paging->cpu_mode;
1451 
1452 	VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
1453 
1454 	/* Fetch, decode and emulate the faulting instruction */
1455 	if (vie->num_valid == 0) {
1456 		error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip +
1457 		    cs_base, VIE_INST_SIZE, vie, &fault);
1458 	} else {
1459 		/*
1460 		 * The instruction bytes have already been copied into 'vie'
1461 		 */
1462 		error = fault = 0;
1463 	}
1464 	if (error || fault)
1465 		return (error);
1466 
1467 	if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) {
1468 		VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx",
1469 		    vme->rip + cs_base);
1470 		*retu = true;	    /* dump instruction bytes in userspace */
1471 		return (0);
1472 	}
1473 
1474 	/*
1475 	 * Update 'nextrip' based on the length of the emulated instruction.
1476 	 */
1477 	vme->inst_length = vie->num_processed;
1478 	vcpu->nextrip += vie->num_processed;
1479 	VCPU_CTR1(vm, vcpuid, "nextrip updated to %#lx after instruction "
1480 	    "decoding", vcpu->nextrip);
1481 
1482 	/* return to userland unless this is an in-kernel emulated device */
1483 	if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1484 		mread = lapic_mmio_read;
1485 		mwrite = lapic_mmio_write;
1486 	} else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1487 		mread = vioapic_mmio_read;
1488 		mwrite = vioapic_mmio_write;
1489 	} else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
1490 		mread = vhpet_mmio_read;
1491 		mwrite = vhpet_mmio_write;
1492 	} else {
1493 		*retu = true;
1494 		return (0);
1495 	}
1496 
1497 	error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
1498 	    mread, mwrite, retu);
1499 
1500 	return (error);
1501 }
1502 
1503 static int
1504 vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
1505 {
1506 	int i, done;
1507 	struct vcpu *vcpu;
1508 
1509 	done = 0;
1510 	vcpu = &vm->vcpu[vcpuid];
1511 
1512 	CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
1513 
1514 	/*
1515 	 * Wait until all 'active_cpus' have suspended themselves.
1516 	 *
1517 	 * Since a VM may be suspended at any time including when one or
1518 	 * more vcpus are doing a rendezvous we need to call the rendezvous
1519 	 * handler while we are waiting to prevent a deadlock.
1520 	 */
1521 	vcpu_lock(vcpu);
1522 	while (1) {
1523 		if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
1524 			VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
1525 			break;
1526 		}
1527 
1528 		if (vm->rendezvous_func == NULL) {
1529 			VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
1530 			vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1531 			msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1532 			vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1533 		} else {
1534 			VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
1535 			vcpu_unlock(vcpu);
1536 			vm_handle_rendezvous(vm, vcpuid);
1537 			vcpu_lock(vcpu);
1538 		}
1539 	}
1540 	vcpu_unlock(vcpu);
1541 
1542 	/*
1543 	 * Wakeup the other sleeping vcpus and return to userspace.
1544 	 */
1545 	for (i = 0; i < vm->maxcpus; i++) {
1546 		if (CPU_ISSET(i, &vm->suspended_cpus)) {
1547 			vcpu_notify_event(vm, i, false);
1548 		}
1549 	}
1550 
1551 	*retu = true;
1552 	return (0);
1553 }
1554 
1555 static int
1556 vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu)
1557 {
1558 	struct vcpu *vcpu = &vm->vcpu[vcpuid];
1559 
1560 	vcpu_lock(vcpu);
1561 	KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
1562 	vcpu->reqidle = 0;
1563 	vcpu_unlock(vcpu);
1564 	*retu = true;
1565 	return (0);
1566 }
1567 
1568 int
1569 vm_suspend(struct vm *vm, enum vm_suspend_how how)
1570 {
1571 	int i;
1572 
1573 	if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
1574 		return (EINVAL);
1575 
1576 	if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
1577 		VM_CTR2(vm, "virtual machine already suspended %d/%d",
1578 		    vm->suspend, how);
1579 		return (EALREADY);
1580 	}
1581 
1582 	VM_CTR1(vm, "virtual machine successfully suspended %d", how);
1583 
1584 	/*
1585 	 * Notify all active vcpus that they are now suspended.
1586 	 */
1587 	for (i = 0; i < vm->maxcpus; i++) {
1588 		if (CPU_ISSET(i, &vm->active_cpus))
1589 			vcpu_notify_event(vm, i, false);
1590 	}
1591 
1592 	return (0);
1593 }
1594 
1595 void
1596 vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
1597 {
1598 	struct vm_exit *vmexit;
1599 
1600 	KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1601 	    ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1602 
1603 	vmexit = vm_exitinfo(vm, vcpuid);
1604 	vmexit->rip = rip;
1605 	vmexit->inst_length = 0;
1606 	vmexit->exitcode = VM_EXITCODE_SUSPENDED;
1607 	vmexit->u.suspended.how = vm->suspend;
1608 }
1609 
1610 void
1611 vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip)
1612 {
1613 	struct vm_exit *vmexit;
1614 
1615 	vmexit = vm_exitinfo(vm, vcpuid);
1616 	vmexit->rip = rip;
1617 	vmexit->inst_length = 0;
1618 	vmexit->exitcode = VM_EXITCODE_DEBUG;
1619 }
1620 
1621 void
1622 vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
1623 {
1624 	struct vm_exit *vmexit;
1625 
1626 	KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
1627 
1628 	vmexit = vm_exitinfo(vm, vcpuid);
1629 	vmexit->rip = rip;
1630 	vmexit->inst_length = 0;
1631 	vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
1632 	vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
1633 }
1634 
1635 void
1636 vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip)
1637 {
1638 	struct vm_exit *vmexit;
1639 
1640 	vmexit = vm_exitinfo(vm, vcpuid);
1641 	vmexit->rip = rip;
1642 	vmexit->inst_length = 0;
1643 	vmexit->exitcode = VM_EXITCODE_REQIDLE;
1644 	vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1);
1645 }
1646 
1647 void
1648 vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
1649 {
1650 	struct vm_exit *vmexit;
1651 
1652 	vmexit = vm_exitinfo(vm, vcpuid);
1653 	vmexit->rip = rip;
1654 	vmexit->inst_length = 0;
1655 	vmexit->exitcode = VM_EXITCODE_BOGUS;
1656 	vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
1657 }
1658 
1659 int
1660 vm_run(struct vm *vm, struct vm_run *vmrun)
1661 {
1662 	struct vm_eventinfo evinfo;
1663 	int error, vcpuid;
1664 	struct vcpu *vcpu;
1665 	struct pcb *pcb;
1666 	uint64_t tscval;
1667 	struct vm_exit *vme;
1668 	bool retu, intr_disabled;
1669 	pmap_t pmap;
1670 
1671 	vcpuid = vmrun->cpuid;
1672 
1673 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1674 		return (EINVAL);
1675 
1676 	if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1677 		return (EINVAL);
1678 
1679 	if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1680 		return (EINVAL);
1681 
1682 	pmap = vmspace_pmap(vm->vmspace);
1683 	vcpu = &vm->vcpu[vcpuid];
1684 	vme = &vcpu->exitinfo;
1685 	evinfo.rptr = &vm->rendezvous_func;
1686 	evinfo.sptr = &vm->suspend;
1687 	evinfo.iptr = &vcpu->reqidle;
1688 restart:
1689 	critical_enter();
1690 
1691 	KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1692 	    ("vm_run: absurd pm_active"));
1693 
1694 	tscval = rdtsc();
1695 
1696 	pcb = PCPU_GET(curpcb);
1697 	set_pcb_flags(pcb, PCB_FULL_IRET);
1698 
1699 	restore_guest_fpustate(vcpu);
1700 
1701 	vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1702 	error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo);
1703 	vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1704 
1705 	save_guest_fpustate(vcpu);
1706 
1707 	vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1708 
1709 	critical_exit();
1710 
1711 	if (error == 0) {
1712 		retu = false;
1713 		vcpu->nextrip = vme->rip + vme->inst_length;
1714 		switch (vme->exitcode) {
1715 		case VM_EXITCODE_REQIDLE:
1716 			error = vm_handle_reqidle(vm, vcpuid, &retu);
1717 			break;
1718 		case VM_EXITCODE_SUSPENDED:
1719 			error = vm_handle_suspend(vm, vcpuid, &retu);
1720 			break;
1721 		case VM_EXITCODE_IOAPIC_EOI:
1722 			vioapic_process_eoi(vm, vcpuid,
1723 			    vme->u.ioapic_eoi.vector);
1724 			break;
1725 		case VM_EXITCODE_RENDEZVOUS:
1726 			vm_handle_rendezvous(vm, vcpuid);
1727 			error = 0;
1728 			break;
1729 		case VM_EXITCODE_HLT:
1730 			intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
1731 			error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
1732 			break;
1733 		case VM_EXITCODE_PAGING:
1734 			error = vm_handle_paging(vm, vcpuid, &retu);
1735 			break;
1736 		case VM_EXITCODE_INST_EMUL:
1737 			error = vm_handle_inst_emul(vm, vcpuid, &retu);
1738 			break;
1739 		case VM_EXITCODE_INOUT:
1740 		case VM_EXITCODE_INOUT_STR:
1741 			error = vm_handle_inout(vm, vcpuid, vme, &retu);
1742 			break;
1743 		case VM_EXITCODE_MONITOR:
1744 		case VM_EXITCODE_MWAIT:
1745 		case VM_EXITCODE_VMINSN:
1746 			vm_inject_ud(vm, vcpuid);
1747 			break;
1748 		default:
1749 			retu = true;	/* handled in userland */
1750 			break;
1751 		}
1752 	}
1753 
1754 	if (error == 0 && retu == false)
1755 		goto restart;
1756 
1757 	VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
1758 
1759 	/* copy the exit information */
1760 	bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1761 	return (error);
1762 }
1763 
1764 int
1765 vm_restart_instruction(void *arg, int vcpuid)
1766 {
1767 	struct vm *vm;
1768 	struct vcpu *vcpu;
1769 	enum vcpu_state state;
1770 	uint64_t rip;
1771 	int error;
1772 
1773 	vm = arg;
1774 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1775 		return (EINVAL);
1776 
1777 	vcpu = &vm->vcpu[vcpuid];
1778 	state = vcpu_get_state(vm, vcpuid, NULL);
1779 	if (state == VCPU_RUNNING) {
1780 		/*
1781 		 * When a vcpu is "running" the next instruction is determined
1782 		 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
1783 		 * Thus setting 'inst_length' to zero will cause the current
1784 		 * instruction to be restarted.
1785 		 */
1786 		vcpu->exitinfo.inst_length = 0;
1787 		VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by "
1788 		    "setting inst_length to zero", vcpu->exitinfo.rip);
1789 	} else if (state == VCPU_FROZEN) {
1790 		/*
1791 		 * When a vcpu is "frozen" it is outside the critical section
1792 		 * around VMRUN() and 'nextrip' points to the next instruction.
1793 		 * Thus instruction restart is achieved by setting 'nextrip'
1794 		 * to the vcpu's %rip.
1795 		 */
1796 		error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
1797 		KASSERT(!error, ("%s: error %d getting rip", __func__, error));
1798 		VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
1799 		    "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
1800 		vcpu->nextrip = rip;
1801 	} else {
1802 		panic("%s: invalid state %d", __func__, state);
1803 	}
1804 	return (0);
1805 }
1806 
1807 int
1808 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
1809 {
1810 	struct vcpu *vcpu;
1811 	int type, vector;
1812 
1813 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1814 		return (EINVAL);
1815 
1816 	vcpu = &vm->vcpu[vcpuid];
1817 
1818 	if (info & VM_INTINFO_VALID) {
1819 		type = info & VM_INTINFO_TYPE;
1820 		vector = info & 0xff;
1821 		if (type == VM_INTINFO_NMI && vector != IDT_NMI)
1822 			return (EINVAL);
1823 		if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
1824 			return (EINVAL);
1825 		if (info & VM_INTINFO_RSVD)
1826 			return (EINVAL);
1827 	} else {
1828 		info = 0;
1829 	}
1830 	VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info);
1831 	vcpu->exitintinfo = info;
1832 	return (0);
1833 }
1834 
1835 enum exc_class {
1836 	EXC_BENIGN,
1837 	EXC_CONTRIBUTORY,
1838 	EXC_PAGEFAULT
1839 };
1840 
1841 #define	IDT_VE	20	/* Virtualization Exception (Intel specific) */
1842 
1843 static enum exc_class
1844 exception_class(uint64_t info)
1845 {
1846 	int type, vector;
1847 
1848 	KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info));
1849 	type = info & VM_INTINFO_TYPE;
1850 	vector = info & 0xff;
1851 
1852 	/* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
1853 	switch (type) {
1854 	case VM_INTINFO_HWINTR:
1855 	case VM_INTINFO_SWINTR:
1856 	case VM_INTINFO_NMI:
1857 		return (EXC_BENIGN);
1858 	default:
1859 		/*
1860 		 * Hardware exception.
1861 		 *
1862 		 * SVM and VT-x use identical type values to represent NMI,
1863 		 * hardware interrupt and software interrupt.
1864 		 *
1865 		 * SVM uses type '3' for all exceptions. VT-x uses type '3'
1866 		 * for exceptions except #BP and #OF. #BP and #OF use a type
1867 		 * value of '5' or '6'. Therefore we don't check for explicit
1868 		 * values of 'type' to classify 'intinfo' into a hardware
1869 		 * exception.
1870 		 */
1871 		break;
1872 	}
1873 
1874 	switch (vector) {
1875 	case IDT_PF:
1876 	case IDT_VE:
1877 		return (EXC_PAGEFAULT);
1878 	case IDT_DE:
1879 	case IDT_TS:
1880 	case IDT_NP:
1881 	case IDT_SS:
1882 	case IDT_GP:
1883 		return (EXC_CONTRIBUTORY);
1884 	default:
1885 		return (EXC_BENIGN);
1886 	}
1887 }
1888 
1889 static int
1890 nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
1891     uint64_t *retinfo)
1892 {
1893 	enum exc_class exc1, exc2;
1894 	int type1, vector1;
1895 
1896 	KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1));
1897 	KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2));
1898 
1899 	/*
1900 	 * If an exception occurs while attempting to call the double-fault
1901 	 * handler the processor enters shutdown mode (aka triple fault).
1902 	 */
1903 	type1 = info1 & VM_INTINFO_TYPE;
1904 	vector1 = info1 & 0xff;
1905 	if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
1906 		VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)",
1907 		    info1, info2);
1908 		vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
1909 		*retinfo = 0;
1910 		return (0);
1911 	}
1912 
1913 	/*
1914 	 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
1915 	 */
1916 	exc1 = exception_class(info1);
1917 	exc2 = exception_class(info2);
1918 	if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
1919 	    (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
1920 		/* Convert nested fault into a double fault. */
1921 		*retinfo = IDT_DF;
1922 		*retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1923 		*retinfo |= VM_INTINFO_DEL_ERRCODE;
1924 	} else {
1925 		/* Handle exceptions serially */
1926 		*retinfo = info2;
1927 	}
1928 	return (1);
1929 }
1930 
1931 static uint64_t
1932 vcpu_exception_intinfo(struct vcpu *vcpu)
1933 {
1934 	uint64_t info = 0;
1935 
1936 	if (vcpu->exception_pending) {
1937 		info = vcpu->exc_vector & 0xff;
1938 		info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1939 		if (vcpu->exc_errcode_valid) {
1940 			info |= VM_INTINFO_DEL_ERRCODE;
1941 			info |= (uint64_t)vcpu->exc_errcode << 32;
1942 		}
1943 	}
1944 	return (info);
1945 }
1946 
1947 int
1948 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
1949 {
1950 	struct vcpu *vcpu;
1951 	uint64_t info1, info2;
1952 	int valid;
1953 
1954 	KASSERT(vcpuid >= 0 &&
1955 	    vcpuid < vm->maxcpus, ("invalid vcpu %d", vcpuid));
1956 
1957 	vcpu = &vm->vcpu[vcpuid];
1958 
1959 	info1 = vcpu->exitintinfo;
1960 	vcpu->exitintinfo = 0;
1961 
1962 	info2 = 0;
1963 	if (vcpu->exception_pending) {
1964 		info2 = vcpu_exception_intinfo(vcpu);
1965 		vcpu->exception_pending = 0;
1966 		VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx",
1967 		    vcpu->exc_vector, info2);
1968 	}
1969 
1970 	if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
1971 		valid = nested_fault(vm, vcpuid, info1, info2, retinfo);
1972 	} else if (info1 & VM_INTINFO_VALID) {
1973 		*retinfo = info1;
1974 		valid = 1;
1975 	} else if (info2 & VM_INTINFO_VALID) {
1976 		*retinfo = info2;
1977 		valid = 1;
1978 	} else {
1979 		valid = 0;
1980 	}
1981 
1982 	if (valid) {
1983 		VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), "
1984 		    "retinfo(%#lx)", __func__, info1, info2, *retinfo);
1985 	}
1986 
1987 	return (valid);
1988 }
1989 
1990 int
1991 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
1992 {
1993 	struct vcpu *vcpu;
1994 
1995 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1996 		return (EINVAL);
1997 
1998 	vcpu = &vm->vcpu[vcpuid];
1999 	*info1 = vcpu->exitintinfo;
2000 	*info2 = vcpu_exception_intinfo(vcpu);
2001 	return (0);
2002 }
2003 
2004 int
2005 vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
2006     uint32_t errcode, int restart_instruction)
2007 {
2008 	struct vcpu *vcpu;
2009 	uint64_t regval;
2010 	int error;
2011 
2012 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2013 		return (EINVAL);
2014 
2015 	if (vector < 0 || vector >= 32)
2016 		return (EINVAL);
2017 
2018 	/*
2019 	 * A double fault exception should never be injected directly into
2020 	 * the guest. It is a derived exception that results from specific
2021 	 * combinations of nested faults.
2022 	 */
2023 	if (vector == IDT_DF)
2024 		return (EINVAL);
2025 
2026 	vcpu = &vm->vcpu[vcpuid];
2027 
2028 	if (vcpu->exception_pending) {
2029 		VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
2030 		    "pending exception %d", vector, vcpu->exc_vector);
2031 		return (EBUSY);
2032 	}
2033 
2034 	if (errcode_valid) {
2035 		/*
2036 		 * Exceptions don't deliver an error code in real mode.
2037 		 */
2038 		error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, &regval);
2039 		KASSERT(!error, ("%s: error %d getting CR0", __func__, error));
2040 		if (!(regval & CR0_PE))
2041 			errcode_valid = 0;
2042 	}
2043 
2044 	/*
2045 	 * From section 26.6.1 "Interruptibility State" in Intel SDM:
2046 	 *
2047 	 * Event blocking by "STI" or "MOV SS" is cleared after guest executes
2048 	 * one instruction or incurs an exception.
2049 	 */
2050 	error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
2051 	KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
2052 	    __func__, error));
2053 
2054 	if (restart_instruction)
2055 		vm_restart_instruction(vm, vcpuid);
2056 
2057 	vcpu->exception_pending = 1;
2058 	vcpu->exc_vector = vector;
2059 	vcpu->exc_errcode = errcode;
2060 	vcpu->exc_errcode_valid = errcode_valid;
2061 	VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
2062 	return (0);
2063 }
2064 
2065 void
2066 vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid,
2067     int errcode)
2068 {
2069 	struct vm *vm;
2070 	int error, restart_instruction;
2071 
2072 	vm = vmarg;
2073 	restart_instruction = 1;
2074 
2075 	error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
2076 	    errcode, restart_instruction);
2077 	KASSERT(error == 0, ("vm_inject_exception error %d", error));
2078 }
2079 
2080 void
2081 vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2)
2082 {
2083 	struct vm *vm;
2084 	int error;
2085 
2086 	vm = vmarg;
2087 	VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
2088 	    error_code, cr2);
2089 
2090 	error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
2091 	KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
2092 
2093 	vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
2094 }
2095 
2096 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
2097 
2098 int
2099 vm_inject_nmi(struct vm *vm, int vcpuid)
2100 {
2101 	struct vcpu *vcpu;
2102 
2103 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2104 		return (EINVAL);
2105 
2106 	vcpu = &vm->vcpu[vcpuid];
2107 
2108 	vcpu->nmi_pending = 1;
2109 	vcpu_notify_event(vm, vcpuid, false);
2110 	return (0);
2111 }
2112 
2113 int
2114 vm_nmi_pending(struct vm *vm, int vcpuid)
2115 {
2116 	struct vcpu *vcpu;
2117 
2118 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2119 		panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
2120 
2121 	vcpu = &vm->vcpu[vcpuid];
2122 
2123 	return (vcpu->nmi_pending);
2124 }
2125 
2126 void
2127 vm_nmi_clear(struct vm *vm, int vcpuid)
2128 {
2129 	struct vcpu *vcpu;
2130 
2131 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2132 		panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
2133 
2134 	vcpu = &vm->vcpu[vcpuid];
2135 
2136 	if (vcpu->nmi_pending == 0)
2137 		panic("vm_nmi_clear: inconsistent nmi_pending state");
2138 
2139 	vcpu->nmi_pending = 0;
2140 	vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
2141 }
2142 
2143 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
2144 
2145 int
2146 vm_inject_extint(struct vm *vm, int vcpuid)
2147 {
2148 	struct vcpu *vcpu;
2149 
2150 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2151 		return (EINVAL);
2152 
2153 	vcpu = &vm->vcpu[vcpuid];
2154 
2155 	vcpu->extint_pending = 1;
2156 	vcpu_notify_event(vm, vcpuid, false);
2157 	return (0);
2158 }
2159 
2160 int
2161 vm_extint_pending(struct vm *vm, int vcpuid)
2162 {
2163 	struct vcpu *vcpu;
2164 
2165 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2166 		panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
2167 
2168 	vcpu = &vm->vcpu[vcpuid];
2169 
2170 	return (vcpu->extint_pending);
2171 }
2172 
2173 void
2174 vm_extint_clear(struct vm *vm, int vcpuid)
2175 {
2176 	struct vcpu *vcpu;
2177 
2178 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2179 		panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
2180 
2181 	vcpu = &vm->vcpu[vcpuid];
2182 
2183 	if (vcpu->extint_pending == 0)
2184 		panic("vm_extint_clear: inconsistent extint_pending state");
2185 
2186 	vcpu->extint_pending = 0;
2187 	vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
2188 }
2189 
2190 int
2191 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
2192 {
2193 	if (vcpu < 0 || vcpu >= vm->maxcpus)
2194 		return (EINVAL);
2195 
2196 	if (type < 0 || type >= VM_CAP_MAX)
2197 		return (EINVAL);
2198 
2199 	return (VMGETCAP(vm->cookie, vcpu, type, retval));
2200 }
2201 
2202 int
2203 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
2204 {
2205 	if (vcpu < 0 || vcpu >= vm->maxcpus)
2206 		return (EINVAL);
2207 
2208 	if (type < 0 || type >= VM_CAP_MAX)
2209 		return (EINVAL);
2210 
2211 	return (VMSETCAP(vm->cookie, vcpu, type, val));
2212 }
2213 
2214 struct vlapic *
2215 vm_lapic(struct vm *vm, int cpu)
2216 {
2217 	return (vm->vcpu[cpu].vlapic);
2218 }
2219 
2220 struct vioapic *
2221 vm_ioapic(struct vm *vm)
2222 {
2223 
2224 	return (vm->vioapic);
2225 }
2226 
2227 struct vhpet *
2228 vm_hpet(struct vm *vm)
2229 {
2230 
2231 	return (vm->vhpet);
2232 }
2233 
2234 bool
2235 vmm_is_pptdev(int bus, int slot, int func)
2236 {
2237 	int b, f, i, n, s;
2238 	char *val, *cp, *cp2;
2239 	bool found;
2240 
2241 	/*
2242 	 * XXX
2243 	 * The length of an environment variable is limited to 128 bytes which
2244 	 * puts an upper limit on the number of passthru devices that may be
2245 	 * specified using a single environment variable.
2246 	 *
2247 	 * Work around this by scanning multiple environment variable
2248 	 * names instead of a single one - yuck!
2249 	 */
2250 	const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
2251 
2252 	/* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
2253 	found = false;
2254 	for (i = 0; names[i] != NULL && !found; i++) {
2255 		cp = val = kern_getenv(names[i]);
2256 		while (cp != NULL && *cp != '\0') {
2257 			if ((cp2 = strchr(cp, ' ')) != NULL)
2258 				*cp2 = '\0';
2259 
2260 			n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
2261 			if (n == 3 && bus == b && slot == s && func == f) {
2262 				found = true;
2263 				break;
2264 			}
2265 
2266 			if (cp2 != NULL)
2267 				*cp2++ = ' ';
2268 
2269 			cp = cp2;
2270 		}
2271 		freeenv(val);
2272 	}
2273 	return (found);
2274 }
2275 
2276 void *
2277 vm_iommu_domain(struct vm *vm)
2278 {
2279 
2280 	return (vm->iommu);
2281 }
2282 
2283 int
2284 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
2285     bool from_idle)
2286 {
2287 	int error;
2288 	struct vcpu *vcpu;
2289 
2290 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2291 		panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
2292 
2293 	vcpu = &vm->vcpu[vcpuid];
2294 
2295 	vcpu_lock(vcpu);
2296 	error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle);
2297 	vcpu_unlock(vcpu);
2298 
2299 	return (error);
2300 }
2301 
2302 enum vcpu_state
2303 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
2304 {
2305 	struct vcpu *vcpu;
2306 	enum vcpu_state state;
2307 
2308 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2309 		panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
2310 
2311 	vcpu = &vm->vcpu[vcpuid];
2312 
2313 	vcpu_lock(vcpu);
2314 	state = vcpu->state;
2315 	if (hostcpu != NULL)
2316 		*hostcpu = vcpu->hostcpu;
2317 	vcpu_unlock(vcpu);
2318 
2319 	return (state);
2320 }
2321 
2322 int
2323 vm_activate_cpu(struct vm *vm, int vcpuid)
2324 {
2325 
2326 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2327 		return (EINVAL);
2328 
2329 	if (CPU_ISSET(vcpuid, &vm->active_cpus))
2330 		return (EBUSY);
2331 
2332 	VCPU_CTR0(vm, vcpuid, "activated");
2333 	CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
2334 	return (0);
2335 }
2336 
2337 int
2338 vm_suspend_cpu(struct vm *vm, int vcpuid)
2339 {
2340 	int i;
2341 
2342 	if (vcpuid < -1 || vcpuid >= vm->maxcpus)
2343 		return (EINVAL);
2344 
2345 	if (vcpuid == -1) {
2346 		vm->debug_cpus = vm->active_cpus;
2347 		for (i = 0; i < vm->maxcpus; i++) {
2348 			if (CPU_ISSET(i, &vm->active_cpus))
2349 				vcpu_notify_event(vm, i, false);
2350 		}
2351 	} else {
2352 		if (!CPU_ISSET(vcpuid, &vm->active_cpus))
2353 			return (EINVAL);
2354 
2355 		CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus);
2356 		vcpu_notify_event(vm, vcpuid, false);
2357 	}
2358 	return (0);
2359 }
2360 
2361 int
2362 vm_resume_cpu(struct vm *vm, int vcpuid)
2363 {
2364 
2365 	if (vcpuid < -1 || vcpuid >= vm->maxcpus)
2366 		return (EINVAL);
2367 
2368 	if (vcpuid == -1) {
2369 		CPU_ZERO(&vm->debug_cpus);
2370 	} else {
2371 		if (!CPU_ISSET(vcpuid, &vm->debug_cpus))
2372 			return (EINVAL);
2373 
2374 		CPU_CLR_ATOMIC(vcpuid, &vm->debug_cpus);
2375 	}
2376 	return (0);
2377 }
2378 
2379 int
2380 vcpu_debugged(struct vm *vm, int vcpuid)
2381 {
2382 
2383 	return (CPU_ISSET(vcpuid, &vm->debug_cpus));
2384 }
2385 
2386 cpuset_t
2387 vm_active_cpus(struct vm *vm)
2388 {
2389 
2390 	return (vm->active_cpus);
2391 }
2392 
2393 cpuset_t
2394 vm_debug_cpus(struct vm *vm)
2395 {
2396 
2397 	return (vm->debug_cpus);
2398 }
2399 
2400 cpuset_t
2401 vm_suspended_cpus(struct vm *vm)
2402 {
2403 
2404 	return (vm->suspended_cpus);
2405 }
2406 
2407 void *
2408 vcpu_stats(struct vm *vm, int vcpuid)
2409 {
2410 
2411 	return (vm->vcpu[vcpuid].stats);
2412 }
2413 
2414 int
2415 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
2416 {
2417 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2418 		return (EINVAL);
2419 
2420 	*state = vm->vcpu[vcpuid].x2apic_state;
2421 
2422 	return (0);
2423 }
2424 
2425 int
2426 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
2427 {
2428 	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2429 		return (EINVAL);
2430 
2431 	if (state >= X2APIC_STATE_LAST)
2432 		return (EINVAL);
2433 
2434 	vm->vcpu[vcpuid].x2apic_state = state;
2435 
2436 	vlapic_set_x2apic_state(vm, vcpuid, state);
2437 
2438 	return (0);
2439 }
2440 
2441 /*
2442  * This function is called to ensure that a vcpu "sees" a pending event
2443  * as soon as possible:
2444  * - If the vcpu thread is sleeping then it is woken up.
2445  * - If the vcpu is running on a different host_cpu then an IPI will be directed
2446  *   to the host_cpu to cause the vcpu to trap into the hypervisor.
2447  */
2448 static void
2449 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
2450 {
2451 	int hostcpu;
2452 
2453 	hostcpu = vcpu->hostcpu;
2454 	if (vcpu->state == VCPU_RUNNING) {
2455 		KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
2456 		if (hostcpu != curcpu) {
2457 			if (lapic_intr) {
2458 				vlapic_post_intr(vcpu->vlapic, hostcpu,
2459 				    vmm_ipinum);
2460 			} else {
2461 				ipi_cpu(hostcpu, vmm_ipinum);
2462 			}
2463 		} else {
2464 			/*
2465 			 * If the 'vcpu' is running on 'curcpu' then it must
2466 			 * be sending a notification to itself (e.g. SELF_IPI).
2467 			 * The pending event will be picked up when the vcpu
2468 			 * transitions back to guest context.
2469 			 */
2470 		}
2471 	} else {
2472 		KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
2473 		    "with hostcpu %d", vcpu->state, hostcpu));
2474 		if (vcpu->state == VCPU_SLEEPING)
2475 			wakeup_one(vcpu);
2476 	}
2477 }
2478 
2479 void
2480 vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
2481 {
2482 	struct vcpu *vcpu = &vm->vcpu[vcpuid];
2483 
2484 	vcpu_lock(vcpu);
2485 	vcpu_notify_event_locked(vcpu, lapic_intr);
2486 	vcpu_unlock(vcpu);
2487 }
2488 
2489 struct vmspace *
2490 vm_get_vmspace(struct vm *vm)
2491 {
2492 
2493 	return (vm->vmspace);
2494 }
2495 
2496 int
2497 vm_apicid2vcpuid(struct vm *vm, int apicid)
2498 {
2499 	/*
2500 	 * XXX apic id is assumed to be numerically identical to vcpu id
2501 	 */
2502 	return (apicid);
2503 }
2504 
2505 void
2506 vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
2507     vm_rendezvous_func_t func, void *arg)
2508 {
2509 	int i;
2510 
2511 	/*
2512 	 * Enforce that this function is called without any locks
2513 	 */
2514 	WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
2515 	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus),
2516 	    ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
2517 
2518 restart:
2519 	mtx_lock(&vm->rendezvous_mtx);
2520 	if (vm->rendezvous_func != NULL) {
2521 		/*
2522 		 * If a rendezvous is already in progress then we need to
2523 		 * call the rendezvous handler in case this 'vcpuid' is one
2524 		 * of the targets of the rendezvous.
2525 		 */
2526 		RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress");
2527 		mtx_unlock(&vm->rendezvous_mtx);
2528 		vm_handle_rendezvous(vm, vcpuid);
2529 		goto restart;
2530 	}
2531 	KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
2532 	    "rendezvous is still in progress"));
2533 
2534 	RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous");
2535 	vm->rendezvous_req_cpus = dest;
2536 	CPU_ZERO(&vm->rendezvous_done_cpus);
2537 	vm->rendezvous_arg = arg;
2538 	vm_set_rendezvous_func(vm, func);
2539 	mtx_unlock(&vm->rendezvous_mtx);
2540 
2541 	/*
2542 	 * Wake up any sleeping vcpus and trigger a VM-exit in any running
2543 	 * vcpus so they handle the rendezvous as soon as possible.
2544 	 */
2545 	for (i = 0; i < vm->maxcpus; i++) {
2546 		if (CPU_ISSET(i, &dest))
2547 			vcpu_notify_event(vm, i, false);
2548 	}
2549 
2550 	vm_handle_rendezvous(vm, vcpuid);
2551 }
2552 
2553 struct vatpic *
2554 vm_atpic(struct vm *vm)
2555 {
2556 	return (vm->vatpic);
2557 }
2558 
2559 struct vatpit *
2560 vm_atpit(struct vm *vm)
2561 {
2562 	return (vm->vatpit);
2563 }
2564 
2565 struct vpmtmr *
2566 vm_pmtmr(struct vm *vm)
2567 {
2568 
2569 	return (vm->vpmtmr);
2570 }
2571 
2572 struct vrtc *
2573 vm_rtc(struct vm *vm)
2574 {
2575 
2576 	return (vm->vrtc);
2577 }
2578 
2579 enum vm_reg_name
2580 vm_segment_name(int seg)
2581 {
2582 	static enum vm_reg_name seg_names[] = {
2583 		VM_REG_GUEST_ES,
2584 		VM_REG_GUEST_CS,
2585 		VM_REG_GUEST_SS,
2586 		VM_REG_GUEST_DS,
2587 		VM_REG_GUEST_FS,
2588 		VM_REG_GUEST_GS
2589 	};
2590 
2591 	KASSERT(seg >= 0 && seg < nitems(seg_names),
2592 	    ("%s: invalid segment encoding %d", __func__, seg));
2593 	return (seg_names[seg]);
2594 }
2595 
2596 void
2597 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
2598     int num_copyinfo)
2599 {
2600 	int idx;
2601 
2602 	for (idx = 0; idx < num_copyinfo; idx++) {
2603 		if (copyinfo[idx].cookie != NULL)
2604 			vm_gpa_release(copyinfo[idx].cookie);
2605 	}
2606 	bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
2607 }
2608 
2609 int
2610 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2611     uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
2612     int num_copyinfo, int *fault)
2613 {
2614 	int error, idx, nused;
2615 	size_t n, off, remaining;
2616 	void *hva, *cookie;
2617 	uint64_t gpa;
2618 
2619 	bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
2620 
2621 	nused = 0;
2622 	remaining = len;
2623 	while (remaining > 0) {
2624 		KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
2625 		error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault);
2626 		if (error || *fault)
2627 			return (error);
2628 		off = gpa & PAGE_MASK;
2629 		n = min(remaining, PAGE_SIZE - off);
2630 		copyinfo[nused].gpa = gpa;
2631 		copyinfo[nused].len = n;
2632 		remaining -= n;
2633 		gla += n;
2634 		nused++;
2635 	}
2636 
2637 	for (idx = 0; idx < nused; idx++) {
2638 		hva = vm_gpa_hold(vm, vcpuid, copyinfo[idx].gpa,
2639 		    copyinfo[idx].len, prot, &cookie);
2640 		if (hva == NULL)
2641 			break;
2642 		copyinfo[idx].hva = hva;
2643 		copyinfo[idx].cookie = cookie;
2644 	}
2645 
2646 	if (idx != nused) {
2647 		vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo);
2648 		return (EFAULT);
2649 	} else {
2650 		*fault = 0;
2651 		return (0);
2652 	}
2653 }
2654 
2655 void
2656 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr,
2657     size_t len)
2658 {
2659 	char *dst;
2660 	int idx;
2661 
2662 	dst = kaddr;
2663 	idx = 0;
2664 	while (len > 0) {
2665 		bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
2666 		len -= copyinfo[idx].len;
2667 		dst += copyinfo[idx].len;
2668 		idx++;
2669 	}
2670 }
2671 
2672 void
2673 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
2674     struct vm_copyinfo *copyinfo, size_t len)
2675 {
2676 	const char *src;
2677 	int idx;
2678 
2679 	src = kaddr;
2680 	idx = 0;
2681 	while (len > 0) {
2682 		bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
2683 		len -= copyinfo[idx].len;
2684 		src += copyinfo[idx].len;
2685 		idx++;
2686 	}
2687 }
2688 
2689 /*
2690  * Return the amount of in-use and wired memory for the VM. Since
2691  * these are global stats, only return the values with for vCPU 0
2692  */
2693 VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
2694 VMM_STAT_DECLARE(VMM_MEM_WIRED);
2695 
2696 static void
2697 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2698 {
2699 
2700 	if (vcpu == 0) {
2701 		vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
2702 	       	    PAGE_SIZE * vmspace_resident_count(vm->vmspace));
2703 	}
2704 }
2705 
2706 static void
2707 vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2708 {
2709 
2710 	if (vcpu == 0) {
2711 		vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
2712 	      	    PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
2713 	}
2714 }
2715 
2716 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
2717 VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);
2718