xref: /freebsd/lib/libvmmapi/vmmapi.c (revision 2ff63af9b88c7413b7d71715b5532625752a248e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/capsicum.h>
34 #include <sys/sysctl.h>
35 #include <sys/ioctl.h>
36 #include <sys/linker.h>
37 #include <sys/mman.h>
38 #include <sys/module.h>
39 #include <sys/_iovec.h>
40 #include <sys/cpuset.h>
41 
42 #include <capsicum_helpers.h>
43 #include <errno.h>
44 #include <stdbool.h>
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <assert.h>
48 #include <string.h>
49 #include <fcntl.h>
50 #include <unistd.h>
51 
52 #include <libutil.h>
53 
54 #include <vm/vm.h>
55 #include <machine/vmm.h>
56 #include <machine/vmm_dev.h>
57 #include <machine/vmm_snapshot.h>
58 
59 #include "vmmapi.h"
60 #include "internal.h"
61 
62 #define	MB	(1024 * 1024UL)
63 #define	GB	(1024 * 1024 * 1024UL)
64 
65 /*
66  * Size of the guard region before and after the virtual address space
67  * mapping the guest physical memory. This must be a multiple of the
68  * superpage size for performance reasons.
69  */
70 #define	VM_MMAP_GUARD_SIZE	(4 * MB)
71 
72 #define	PROT_RW		(PROT_READ | PROT_WRITE)
73 #define	PROT_ALL	(PROT_READ | PROT_WRITE | PROT_EXEC)
74 
75 struct vmctx {
76 	int	fd;
77 	uint32_t lowmem_limit;
78 	int	memflags;
79 	size_t	lowmem;
80 	size_t	highmem;
81 	char	*baseaddr;
82 	char	*name;
83 };
84 
85 #define	CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
86 #define	DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
87 
88 static int
89 vm_device_open(const char *name)
90 {
91 	int fd, len;
92 	char *vmfile;
93 
94 	len = strlen("/dev/vmm/") + strlen(name) + 1;
95 	vmfile = malloc(len);
96 	assert(vmfile != NULL);
97 	snprintf(vmfile, len, "/dev/vmm/%s", name);
98 
99 	/* Open the device file */
100 	fd = open(vmfile, O_RDWR, 0);
101 
102 	free(vmfile);
103 	return (fd);
104 }
105 
106 int
107 vm_create(const char *name)
108 {
109 	/* Try to load vmm(4) module before creating a guest. */
110 	if (modfind("vmm") < 0)
111 		kldload("vmm");
112 	return (CREATE(name));
113 }
114 
115 struct vmctx *
116 vm_open(const char *name)
117 {
118 	struct vmctx *vm;
119 	int saved_errno;
120 
121 	vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
122 	assert(vm != NULL);
123 
124 	vm->fd = -1;
125 	vm->memflags = 0;
126 	vm->lowmem_limit = 3 * GB;
127 	vm->name = (char *)(vm + 1);
128 	strcpy(vm->name, name);
129 
130 	if ((vm->fd = vm_device_open(vm->name)) < 0)
131 		goto err;
132 
133 	return (vm);
134 err:
135 	saved_errno = errno;
136 	free(vm);
137 	errno = saved_errno;
138 	return (NULL);
139 }
140 
141 void
142 vm_close(struct vmctx *vm)
143 {
144 	assert(vm != NULL);
145 
146 	close(vm->fd);
147 	free(vm);
148 }
149 
150 void
151 vm_destroy(struct vmctx *vm)
152 {
153 	assert(vm != NULL);
154 
155 	if (vm->fd >= 0)
156 		close(vm->fd);
157 	DESTROY(vm->name);
158 
159 	free(vm);
160 }
161 
162 struct vcpu *
163 vm_vcpu_open(struct vmctx *ctx, int vcpuid)
164 {
165 	struct vcpu *vcpu;
166 
167 	vcpu = malloc(sizeof(*vcpu));
168 	vcpu->ctx = ctx;
169 	vcpu->vcpuid = vcpuid;
170 	return (vcpu);
171 }
172 
173 void
174 vm_vcpu_close(struct vcpu *vcpu)
175 {
176 	free(vcpu);
177 }
178 
179 int
180 vcpu_id(struct vcpu *vcpu)
181 {
182 	return (vcpu->vcpuid);
183 }
184 
185 int
186 vm_parse_memsize(const char *opt, size_t *ret_memsize)
187 {
188 	char *endptr;
189 	size_t optval;
190 	int error;
191 
192 	optval = strtoul(opt, &endptr, 0);
193 	if (*opt != '\0' && *endptr == '\0') {
194 		/*
195 		 * For the sake of backward compatibility if the memory size
196 		 * specified on the command line is less than a megabyte then
197 		 * it is interpreted as being in units of MB.
198 		 */
199 		if (optval < MB)
200 			optval *= MB;
201 		*ret_memsize = optval;
202 		error = 0;
203 	} else
204 		error = expand_number(opt, ret_memsize);
205 
206 	return (error);
207 }
208 
209 uint32_t
210 vm_get_lowmem_limit(struct vmctx *ctx)
211 {
212 
213 	return (ctx->lowmem_limit);
214 }
215 
216 void
217 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
218 {
219 
220 	ctx->lowmem_limit = limit;
221 }
222 
223 void
224 vm_set_memflags(struct vmctx *ctx, int flags)
225 {
226 
227 	ctx->memflags = flags;
228 }
229 
230 int
231 vm_get_memflags(struct vmctx *ctx)
232 {
233 
234 	return (ctx->memflags);
235 }
236 
237 /*
238  * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
239  */
240 int
241 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
242     size_t len, int prot)
243 {
244 	struct vm_memmap memmap;
245 	int error, flags;
246 
247 	memmap.gpa = gpa;
248 	memmap.segid = segid;
249 	memmap.segoff = off;
250 	memmap.len = len;
251 	memmap.prot = prot;
252 	memmap.flags = 0;
253 
254 	if (ctx->memflags & VM_MEM_F_WIRED)
255 		memmap.flags |= VM_MEMMAP_F_WIRED;
256 
257 	/*
258 	 * If this mapping already exists then don't create it again. This
259 	 * is the common case for SYSMEM mappings created by bhyveload(8).
260 	 */
261 	error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
262 	if (error == 0 && gpa == memmap.gpa) {
263 		if (segid != memmap.segid || off != memmap.segoff ||
264 		    prot != memmap.prot || flags != memmap.flags) {
265 			errno = EEXIST;
266 			return (-1);
267 		} else {
268 			return (0);
269 		}
270 	}
271 
272 	error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
273 	return (error);
274 }
275 
276 int
277 vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr,
278     size_t *lowmem_size, size_t *highmem_size)
279 {
280 
281 	*guest_baseaddr = ctx->baseaddr;
282 	*lowmem_size = ctx->lowmem;
283 	*highmem_size = ctx->highmem;
284 	return (0);
285 }
286 
287 int
288 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
289 {
290 	struct vm_munmap munmap;
291 	int error;
292 
293 	munmap.gpa = gpa;
294 	munmap.len = len;
295 
296 	error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap);
297 	return (error);
298 }
299 
300 int
301 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
302     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
303 {
304 	struct vm_memmap memmap;
305 	int error;
306 
307 	bzero(&memmap, sizeof(struct vm_memmap));
308 	memmap.gpa = *gpa;
309 	error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
310 	if (error == 0) {
311 		*gpa = memmap.gpa;
312 		*segid = memmap.segid;
313 		*segoff = memmap.segoff;
314 		*len = memmap.len;
315 		*prot = memmap.prot;
316 		*flags = memmap.flags;
317 	}
318 	return (error);
319 }
320 
321 /*
322  * Return 0 if the segments are identical and non-zero otherwise.
323  *
324  * This is slightly complicated by the fact that only device memory segments
325  * are named.
326  */
327 static int
328 cmpseg(size_t len, const char *str, size_t len2, const char *str2)
329 {
330 
331 	if (len == len2) {
332 		if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
333 			return (0);
334 	}
335 	return (-1);
336 }
337 
338 static int
339 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
340 {
341 	struct vm_memseg memseg;
342 	size_t n;
343 	int error;
344 
345 	/*
346 	 * If the memory segment has already been created then just return.
347 	 * This is the usual case for the SYSMEM segment created by userspace
348 	 * loaders like bhyveload(8).
349 	 */
350 	error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
351 	    sizeof(memseg.name));
352 	if (error)
353 		return (error);
354 
355 	if (memseg.len != 0) {
356 		if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
357 			errno = EINVAL;
358 			return (-1);
359 		} else {
360 			return (0);
361 		}
362 	}
363 
364 	bzero(&memseg, sizeof(struct vm_memseg));
365 	memseg.segid = segid;
366 	memseg.len = len;
367 	if (name != NULL) {
368 		n = strlcpy(memseg.name, name, sizeof(memseg.name));
369 		if (n >= sizeof(memseg.name)) {
370 			errno = ENAMETOOLONG;
371 			return (-1);
372 		}
373 	}
374 
375 	error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
376 	return (error);
377 }
378 
379 int
380 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
381     size_t bufsize)
382 {
383 	struct vm_memseg memseg;
384 	size_t n;
385 	int error;
386 
387 	memseg.segid = segid;
388 	error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
389 	if (error == 0) {
390 		*lenp = memseg.len;
391 		n = strlcpy(namebuf, memseg.name, bufsize);
392 		if (n >= bufsize) {
393 			errno = ENAMETOOLONG;
394 			error = -1;
395 		}
396 	}
397 	return (error);
398 }
399 
400 static int
401 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
402 {
403 	char *ptr;
404 	int error, flags;
405 
406 	/* Map 'len' bytes starting at 'gpa' in the guest address space */
407 	error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
408 	if (error)
409 		return (error);
410 
411 	flags = MAP_SHARED | MAP_FIXED;
412 	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
413 		flags |= MAP_NOCORE;
414 
415 	/* mmap into the process address space on the host */
416 	ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
417 	if (ptr == MAP_FAILED)
418 		return (-1);
419 
420 	return (0);
421 }
422 
423 int
424 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
425 {
426 	size_t objsize, len;
427 	vm_paddr_t gpa;
428 	char *baseaddr, *ptr;
429 	int error;
430 
431 	assert(vms == VM_MMAP_ALL);
432 
433 	/*
434 	 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
435 	 * create another 'highmem' segment above 4GB for the remainder.
436 	 */
437 	if (memsize > ctx->lowmem_limit) {
438 		ctx->lowmem = ctx->lowmem_limit;
439 		ctx->highmem = memsize - ctx->lowmem_limit;
440 		objsize = 4*GB + ctx->highmem;
441 	} else {
442 		ctx->lowmem = memsize;
443 		ctx->highmem = 0;
444 		objsize = ctx->lowmem;
445 	}
446 
447 	error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
448 	if (error)
449 		return (error);
450 
451 	/*
452 	 * Stake out a contiguous region covering the guest physical memory
453 	 * and the adjoining guard regions.
454 	 */
455 	len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
456 	ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0);
457 	if (ptr == MAP_FAILED)
458 		return (-1);
459 
460 	baseaddr = ptr + VM_MMAP_GUARD_SIZE;
461 	if (ctx->highmem > 0) {
462 		gpa = 4*GB;
463 		len = ctx->highmem;
464 		error = setup_memory_segment(ctx, gpa, len, baseaddr);
465 		if (error)
466 			return (error);
467 	}
468 
469 	if (ctx->lowmem > 0) {
470 		gpa = 0;
471 		len = ctx->lowmem;
472 		error = setup_memory_segment(ctx, gpa, len, baseaddr);
473 		if (error)
474 			return (error);
475 	}
476 
477 	ctx->baseaddr = baseaddr;
478 
479 	return (0);
480 }
481 
482 /*
483  * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
484  * the lowmem or highmem regions.
485  *
486  * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
487  * The instruction emulation code depends on this behavior.
488  */
489 void *
490 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
491 {
492 
493 	if (ctx->lowmem > 0) {
494 		if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
495 		    gaddr + len <= ctx->lowmem)
496 			return (ctx->baseaddr + gaddr);
497 	}
498 
499 	if (ctx->highmem > 0) {
500                 if (gaddr >= 4*GB) {
501 			if (gaddr < 4*GB + ctx->highmem &&
502 			    len <= ctx->highmem &&
503 			    gaddr + len <= 4*GB + ctx->highmem)
504 				return (ctx->baseaddr + gaddr);
505 		}
506 	}
507 
508 	return (NULL);
509 }
510 
511 vm_paddr_t
512 vm_rev_map_gpa(struct vmctx *ctx, void *addr)
513 {
514 	vm_paddr_t offaddr;
515 
516 	offaddr = (char *)addr - ctx->baseaddr;
517 
518 	if (ctx->lowmem > 0)
519 		if (offaddr <= ctx->lowmem)
520 			return (offaddr);
521 
522 	if (ctx->highmem > 0)
523 		if (offaddr >= 4*GB && offaddr < 4*GB + ctx->highmem)
524 			return (offaddr);
525 
526 	return ((vm_paddr_t)-1);
527 }
528 
529 const char *
530 vm_get_name(struct vmctx *ctx)
531 {
532 
533 	return (ctx->name);
534 }
535 
536 size_t
537 vm_get_lowmem_size(struct vmctx *ctx)
538 {
539 
540 	return (ctx->lowmem);
541 }
542 
543 size_t
544 vm_get_highmem_size(struct vmctx *ctx)
545 {
546 
547 	return (ctx->highmem);
548 }
549 
550 void *
551 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
552 {
553 	char pathname[MAXPATHLEN];
554 	size_t len2;
555 	char *base, *ptr;
556 	int fd, error, flags;
557 
558 	fd = -1;
559 	ptr = MAP_FAILED;
560 	if (name == NULL || strlen(name) == 0) {
561 		errno = EINVAL;
562 		goto done;
563 	}
564 
565 	error = vm_alloc_memseg(ctx, segid, len, name);
566 	if (error)
567 		goto done;
568 
569 	strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
570 	strlcat(pathname, ctx->name, sizeof(pathname));
571 	strlcat(pathname, ".", sizeof(pathname));
572 	strlcat(pathname, name, sizeof(pathname));
573 
574 	fd = open(pathname, O_RDWR);
575 	if (fd < 0)
576 		goto done;
577 
578 	/*
579 	 * Stake out a contiguous region covering the device memory and the
580 	 * adjoining guard regions.
581 	 */
582 	len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
583 	base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1,
584 	    0);
585 	if (base == MAP_FAILED)
586 		goto done;
587 
588 	flags = MAP_SHARED | MAP_FIXED;
589 	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
590 		flags |= MAP_NOCORE;
591 
592 	/* mmap the devmem region in the host address space */
593 	ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
594 done:
595 	if (fd >= 0)
596 		close(fd);
597 	return (ptr);
598 }
599 
600 static int
601 vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg)
602 {
603 	/*
604 	 * XXX: fragile, handle with care
605 	 * Assumes that the first field of the ioctl data
606 	 * is the vcpuid.
607 	 */
608 	*(int *)arg = vcpu->vcpuid;
609 	return (ioctl(vcpu->ctx->fd, cmd, arg));
610 }
611 
612 int
613 vm_set_desc(struct vcpu *vcpu, int reg,
614 	    uint64_t base, uint32_t limit, uint32_t access)
615 {
616 	int error;
617 	struct vm_seg_desc vmsegdesc;
618 
619 	bzero(&vmsegdesc, sizeof(vmsegdesc));
620 	vmsegdesc.regnum = reg;
621 	vmsegdesc.desc.base = base;
622 	vmsegdesc.desc.limit = limit;
623 	vmsegdesc.desc.access = access;
624 
625 	error = vcpu_ioctl(vcpu, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
626 	return (error);
627 }
628 
629 int
630 vm_get_desc(struct vcpu *vcpu, int reg, uint64_t *base, uint32_t *limit,
631     uint32_t *access)
632 {
633 	int error;
634 	struct vm_seg_desc vmsegdesc;
635 
636 	bzero(&vmsegdesc, sizeof(vmsegdesc));
637 	vmsegdesc.regnum = reg;
638 
639 	error = vcpu_ioctl(vcpu, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
640 	if (error == 0) {
641 		*base = vmsegdesc.desc.base;
642 		*limit = vmsegdesc.desc.limit;
643 		*access = vmsegdesc.desc.access;
644 	}
645 	return (error);
646 }
647 
648 int
649 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc)
650 {
651 	int error;
652 
653 	error = vm_get_desc(vcpu, reg, &seg_desc->base, &seg_desc->limit,
654 	    &seg_desc->access);
655 	return (error);
656 }
657 
658 int
659 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
660 {
661 	int error;
662 	struct vm_register vmreg;
663 
664 	bzero(&vmreg, sizeof(vmreg));
665 	vmreg.regnum = reg;
666 	vmreg.regval = val;
667 
668 	error = vcpu_ioctl(vcpu, VM_SET_REGISTER, &vmreg);
669 	return (error);
670 }
671 
672 int
673 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *ret_val)
674 {
675 	int error;
676 	struct vm_register vmreg;
677 
678 	bzero(&vmreg, sizeof(vmreg));
679 	vmreg.regnum = reg;
680 
681 	error = vcpu_ioctl(vcpu, VM_GET_REGISTER, &vmreg);
682 	*ret_val = vmreg.regval;
683 	return (error);
684 }
685 
686 int
687 vm_set_register_set(struct vcpu *vcpu, unsigned int count,
688     const int *regnums, uint64_t *regvals)
689 {
690 	int error;
691 	struct vm_register_set vmregset;
692 
693 	bzero(&vmregset, sizeof(vmregset));
694 	vmregset.count = count;
695 	vmregset.regnums = regnums;
696 	vmregset.regvals = regvals;
697 
698 	error = vcpu_ioctl(vcpu, VM_SET_REGISTER_SET, &vmregset);
699 	return (error);
700 }
701 
702 int
703 vm_get_register_set(struct vcpu *vcpu, unsigned int count,
704     const int *regnums, uint64_t *regvals)
705 {
706 	int error;
707 	struct vm_register_set vmregset;
708 
709 	bzero(&vmregset, sizeof(vmregset));
710 	vmregset.count = count;
711 	vmregset.regnums = regnums;
712 	vmregset.regvals = regvals;
713 
714 	error = vcpu_ioctl(vcpu, VM_GET_REGISTER_SET, &vmregset);
715 	return (error);
716 }
717 
718 int
719 vm_run(struct vcpu *vcpu, struct vm_run *vmrun)
720 {
721 	return (vcpu_ioctl(vcpu, VM_RUN, vmrun));
722 }
723 
724 int
725 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
726 {
727 	struct vm_suspend vmsuspend;
728 
729 	bzero(&vmsuspend, sizeof(vmsuspend));
730 	vmsuspend.how = how;
731 	return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
732 }
733 
734 int
735 vm_reinit(struct vmctx *ctx)
736 {
737 
738 	return (ioctl(ctx->fd, VM_REINIT, 0));
739 }
740 
741 int
742 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
743     uint32_t errcode, int restart_instruction)
744 {
745 	struct vm_exception exc;
746 
747 	exc.vector = vector;
748 	exc.error_code = errcode;
749 	exc.error_code_valid = errcode_valid;
750 	exc.restart_instruction = restart_instruction;
751 
752 	return (vcpu_ioctl(vcpu, VM_INJECT_EXCEPTION, &exc));
753 }
754 
755 int
756 vm_apicid2vcpu(struct vmctx *ctx __unused, int apicid)
757 {
758 	/*
759 	 * The apic id associated with the 'vcpu' has the same numerical value
760 	 * as the 'vcpu' itself.
761 	 */
762 	return (apicid);
763 }
764 
765 int
766 vm_lapic_irq(struct vcpu *vcpu, int vector)
767 {
768 	struct vm_lapic_irq vmirq;
769 
770 	bzero(&vmirq, sizeof(vmirq));
771 	vmirq.vector = vector;
772 
773 	return (vcpu_ioctl(vcpu, VM_LAPIC_IRQ, &vmirq));
774 }
775 
776 int
777 vm_lapic_local_irq(struct vcpu *vcpu, int vector)
778 {
779 	struct vm_lapic_irq vmirq;
780 
781 	bzero(&vmirq, sizeof(vmirq));
782 	vmirq.vector = vector;
783 
784 	return (vcpu_ioctl(vcpu, VM_LAPIC_LOCAL_IRQ, &vmirq));
785 }
786 
787 int
788 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
789 {
790 	struct vm_lapic_msi vmmsi;
791 
792 	bzero(&vmmsi, sizeof(vmmsi));
793 	vmmsi.addr = addr;
794 	vmmsi.msg = msg;
795 
796 	return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
797 }
798 
799 int
800 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
801 {
802 	struct vm_ioapic_irq ioapic_irq;
803 
804 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
805 	ioapic_irq.irq = irq;
806 
807 	return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
808 }
809 
810 int
811 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
812 {
813 	struct vm_ioapic_irq ioapic_irq;
814 
815 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
816 	ioapic_irq.irq = irq;
817 
818 	return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
819 }
820 
821 int
822 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
823 {
824 	struct vm_ioapic_irq ioapic_irq;
825 
826 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
827 	ioapic_irq.irq = irq;
828 
829 	return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
830 }
831 
832 int
833 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
834 {
835 
836 	return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
837 }
838 
839 int
840 vm_readwrite_kernemu_device(struct vcpu *vcpu, vm_paddr_t gpa,
841     bool write, int size, uint64_t *value)
842 {
843 	struct vm_readwrite_kernemu_device irp = {
844 		.access_width = fls(size) - 1,
845 		.gpa = gpa,
846 		.value = write ? *value : ~0ul,
847 	};
848 	long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV);
849 	int rc;
850 
851 	rc = vcpu_ioctl(vcpu, cmd, &irp);
852 	if (rc == 0 && !write)
853 		*value = irp.value;
854 	return (rc);
855 }
856 
857 int
858 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
859 {
860 	struct vm_isa_irq isa_irq;
861 
862 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
863 	isa_irq.atpic_irq = atpic_irq;
864 	isa_irq.ioapic_irq = ioapic_irq;
865 
866 	return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
867 }
868 
869 int
870 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
871 {
872 	struct vm_isa_irq isa_irq;
873 
874 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
875 	isa_irq.atpic_irq = atpic_irq;
876 	isa_irq.ioapic_irq = ioapic_irq;
877 
878 	return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
879 }
880 
881 int
882 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
883 {
884 	struct vm_isa_irq isa_irq;
885 
886 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
887 	isa_irq.atpic_irq = atpic_irq;
888 	isa_irq.ioapic_irq = ioapic_irq;
889 
890 	return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
891 }
892 
893 int
894 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
895     enum vm_intr_trigger trigger)
896 {
897 	struct vm_isa_irq_trigger isa_irq_trigger;
898 
899 	bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
900 	isa_irq_trigger.atpic_irq = atpic_irq;
901 	isa_irq_trigger.trigger = trigger;
902 
903 	return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
904 }
905 
906 int
907 vm_inject_nmi(struct vcpu *vcpu)
908 {
909 	struct vm_nmi vmnmi;
910 
911 	bzero(&vmnmi, sizeof(vmnmi));
912 
913 	return (vcpu_ioctl(vcpu, VM_INJECT_NMI, &vmnmi));
914 }
915 
916 static const char *capstrmap[] = {
917 	[VM_CAP_HALT_EXIT]  = "hlt_exit",
918 	[VM_CAP_MTRAP_EXIT] = "mtrap_exit",
919 	[VM_CAP_PAUSE_EXIT] = "pause_exit",
920 	[VM_CAP_UNRESTRICTED_GUEST] = "unrestricted_guest",
921 	[VM_CAP_ENABLE_INVPCID] = "enable_invpcid",
922 	[VM_CAP_BPT_EXIT] = "bpt_exit",
923 };
924 
925 int
926 vm_capability_name2type(const char *capname)
927 {
928 	int i;
929 
930 	for (i = 0; i < (int)nitems(capstrmap); i++) {
931 		if (strcmp(capstrmap[i], capname) == 0)
932 			return (i);
933 	}
934 
935 	return (-1);
936 }
937 
938 const char *
939 vm_capability_type2name(int type)
940 {
941 	if (type >= 0 && type < (int)nitems(capstrmap))
942 		return (capstrmap[type]);
943 
944 	return (NULL);
945 }
946 
947 int
948 vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap, int *retval)
949 {
950 	int error;
951 	struct vm_capability vmcap;
952 
953 	bzero(&vmcap, sizeof(vmcap));
954 	vmcap.captype = cap;
955 
956 	error = vcpu_ioctl(vcpu, VM_GET_CAPABILITY, &vmcap);
957 	*retval = vmcap.capval;
958 	return (error);
959 }
960 
961 int
962 vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap, int val)
963 {
964 	struct vm_capability vmcap;
965 
966 	bzero(&vmcap, sizeof(vmcap));
967 	vmcap.captype = cap;
968 	vmcap.capval = val;
969 
970 	return (vcpu_ioctl(vcpu, VM_SET_CAPABILITY, &vmcap));
971 }
972 
973 int
974 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
975 {
976 	struct vm_pptdev pptdev;
977 
978 	bzero(&pptdev, sizeof(pptdev));
979 	pptdev.bus = bus;
980 	pptdev.slot = slot;
981 	pptdev.func = func;
982 
983 	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
984 }
985 
986 int
987 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
988 {
989 	struct vm_pptdev pptdev;
990 
991 	bzero(&pptdev, sizeof(pptdev));
992 	pptdev.bus = bus;
993 	pptdev.slot = slot;
994 	pptdev.func = func;
995 
996 	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
997 }
998 
999 int
1000 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1001 		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
1002 {
1003 	struct vm_pptdev_mmio pptmmio;
1004 
1005 	bzero(&pptmmio, sizeof(pptmmio));
1006 	pptmmio.bus = bus;
1007 	pptmmio.slot = slot;
1008 	pptmmio.func = func;
1009 	pptmmio.gpa = gpa;
1010 	pptmmio.len = len;
1011 	pptmmio.hpa = hpa;
1012 
1013 	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
1014 }
1015 
1016 int
1017 vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1018 		     vm_paddr_t gpa, size_t len)
1019 {
1020 	struct vm_pptdev_mmio pptmmio;
1021 
1022 	bzero(&pptmmio, sizeof(pptmmio));
1023 	pptmmio.bus = bus;
1024 	pptmmio.slot = slot;
1025 	pptmmio.func = func;
1026 	pptmmio.gpa = gpa;
1027 	pptmmio.len = len;
1028 
1029 	return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
1030 }
1031 
1032 int
1033 vm_setup_pptdev_msi(struct vmctx *ctx, int bus, int slot, int func,
1034     uint64_t addr, uint64_t msg, int numvec)
1035 {
1036 	struct vm_pptdev_msi pptmsi;
1037 
1038 	bzero(&pptmsi, sizeof(pptmsi));
1039 	pptmsi.bus = bus;
1040 	pptmsi.slot = slot;
1041 	pptmsi.func = func;
1042 	pptmsi.msg = msg;
1043 	pptmsi.addr = addr;
1044 	pptmsi.numvec = numvec;
1045 
1046 	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1047 }
1048 
1049 int
1050 vm_setup_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func,
1051     int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
1052 {
1053 	struct vm_pptdev_msix pptmsix;
1054 
1055 	bzero(&pptmsix, sizeof(pptmsix));
1056 	pptmsix.bus = bus;
1057 	pptmsix.slot = slot;
1058 	pptmsix.func = func;
1059 	pptmsix.idx = idx;
1060 	pptmsix.msg = msg;
1061 	pptmsix.addr = addr;
1062 	pptmsix.vector_control = vector_control;
1063 
1064 	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1065 }
1066 
1067 int
1068 vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func)
1069 {
1070 	struct vm_pptdev ppt;
1071 
1072 	bzero(&ppt, sizeof(ppt));
1073 	ppt.bus = bus;
1074 	ppt.slot = slot;
1075 	ppt.func = func;
1076 
1077 	return ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &ppt);
1078 }
1079 
1080 uint64_t *
1081 vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv,
1082 	     int *ret_entries)
1083 {
1084 	static _Thread_local uint64_t *stats_buf;
1085 	static _Thread_local u_int stats_count;
1086 	uint64_t *new_stats;
1087 	struct vm_stats vmstats;
1088 	u_int count, index;
1089 	bool have_stats;
1090 
1091 	have_stats = false;
1092 	count = 0;
1093 	for (index = 0;; index += nitems(vmstats.statbuf)) {
1094 		vmstats.index = index;
1095 		if (vcpu_ioctl(vcpu, VM_STATS, &vmstats) != 0)
1096 			break;
1097 		if (stats_count < index + vmstats.num_entries) {
1098 			new_stats = realloc(stats_buf,
1099 			    (index + vmstats.num_entries) * sizeof(uint64_t));
1100 			if (new_stats == NULL) {
1101 				errno = ENOMEM;
1102 				return (NULL);
1103 			}
1104 			stats_count = index + vmstats.num_entries;
1105 			stats_buf = new_stats;
1106 		}
1107 		memcpy(stats_buf + index, vmstats.statbuf,
1108 		    vmstats.num_entries * sizeof(uint64_t));
1109 		count += vmstats.num_entries;
1110 		have_stats = true;
1111 
1112 		if (vmstats.num_entries != nitems(vmstats.statbuf))
1113 			break;
1114 	}
1115 	if (have_stats) {
1116 		if (ret_entries)
1117 			*ret_entries = count;
1118 		if (ret_tv)
1119 			*ret_tv = vmstats.tv;
1120 		return (stats_buf);
1121 	} else
1122 		return (NULL);
1123 }
1124 
1125 const char *
1126 vm_get_stat_desc(struct vmctx *ctx, int index)
1127 {
1128 	static struct vm_stat_desc statdesc;
1129 
1130 	statdesc.index = index;
1131 	if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
1132 		return (statdesc.desc);
1133 	else
1134 		return (NULL);
1135 }
1136 
1137 int
1138 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
1139 {
1140 	int error;
1141 	struct vm_x2apic x2apic;
1142 
1143 	bzero(&x2apic, sizeof(x2apic));
1144 
1145 	error = vcpu_ioctl(vcpu, VM_GET_X2APIC_STATE, &x2apic);
1146 	*state = x2apic.state;
1147 	return (error);
1148 }
1149 
1150 int
1151 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
1152 {
1153 	int error;
1154 	struct vm_x2apic x2apic;
1155 
1156 	bzero(&x2apic, sizeof(x2apic));
1157 	x2apic.state = state;
1158 
1159 	error = vcpu_ioctl(vcpu, VM_SET_X2APIC_STATE, &x2apic);
1160 
1161 	return (error);
1162 }
1163 
1164 /*
1165  * From Intel Vol 3a:
1166  * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
1167  */
1168 int
1169 vcpu_reset(struct vcpu *vcpu)
1170 {
1171 	int error;
1172 	uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
1173 	uint32_t desc_access, desc_limit;
1174 	uint16_t sel;
1175 
1176 	zero = 0;
1177 
1178 	rflags = 0x2;
1179 	error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS, rflags);
1180 	if (error)
1181 		goto done;
1182 
1183 	rip = 0xfff0;
1184 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1185 		goto done;
1186 
1187 	/*
1188 	 * According to Intels Software Developer Manual CR0 should be
1189 	 * initialized with CR0_ET | CR0_NW | CR0_CD but that crashes some
1190 	 * guests like Windows.
1191 	 */
1192 	cr0 = CR0_NE;
1193 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1194 		goto done;
1195 
1196 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR2, zero)) != 0)
1197 		goto done;
1198 
1199 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1200 		goto done;
1201 
1202 	cr4 = 0;
1203 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1204 		goto done;
1205 
1206 	/*
1207 	 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1208 	 */
1209 	desc_base = 0xffff0000;
1210 	desc_limit = 0xffff;
1211 	desc_access = 0x0093;
1212 	error = vm_set_desc(vcpu, VM_REG_GUEST_CS,
1213 			    desc_base, desc_limit, desc_access);
1214 	if (error)
1215 		goto done;
1216 
1217 	sel = 0xf000;
1218 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_CS, sel)) != 0)
1219 		goto done;
1220 
1221 	/*
1222 	 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1223 	 */
1224 	desc_base = 0;
1225 	desc_limit = 0xffff;
1226 	desc_access = 0x0093;
1227 	error = vm_set_desc(vcpu, VM_REG_GUEST_SS,
1228 			    desc_base, desc_limit, desc_access);
1229 	if (error)
1230 		goto done;
1231 
1232 	error = vm_set_desc(vcpu, VM_REG_GUEST_DS,
1233 			    desc_base, desc_limit, desc_access);
1234 	if (error)
1235 		goto done;
1236 
1237 	error = vm_set_desc(vcpu, VM_REG_GUEST_ES,
1238 			    desc_base, desc_limit, desc_access);
1239 	if (error)
1240 		goto done;
1241 
1242 	error = vm_set_desc(vcpu, VM_REG_GUEST_FS,
1243 			    desc_base, desc_limit, desc_access);
1244 	if (error)
1245 		goto done;
1246 
1247 	error = vm_set_desc(vcpu, VM_REG_GUEST_GS,
1248 			    desc_base, desc_limit, desc_access);
1249 	if (error)
1250 		goto done;
1251 
1252 	sel = 0;
1253 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_SS, sel)) != 0)
1254 		goto done;
1255 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_DS, sel)) != 0)
1256 		goto done;
1257 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_ES, sel)) != 0)
1258 		goto done;
1259 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_FS, sel)) != 0)
1260 		goto done;
1261 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_GS, sel)) != 0)
1262 		goto done;
1263 
1264 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_EFER, zero)) != 0)
1265 		goto done;
1266 
1267 	/* General purpose registers */
1268 	rdx = 0xf00;
1269 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1270 		goto done;
1271 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1272 		goto done;
1273 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1274 		goto done;
1275 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1276 		goto done;
1277 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1278 		goto done;
1279 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1280 		goto done;
1281 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1282 		goto done;
1283 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1284 		goto done;
1285 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R8, zero)) != 0)
1286 		goto done;
1287 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R9, zero)) != 0)
1288 		goto done;
1289 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R10, zero)) != 0)
1290 		goto done;
1291 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R11, zero)) != 0)
1292 		goto done;
1293 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R12, zero)) != 0)
1294 		goto done;
1295 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R13, zero)) != 0)
1296 		goto done;
1297 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R14, zero)) != 0)
1298 		goto done;
1299 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R15, zero)) != 0)
1300 		goto done;
1301 
1302 	/* GDTR, IDTR */
1303 	desc_base = 0;
1304 	desc_limit = 0xffff;
1305 	desc_access = 0;
1306 	error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR,
1307 			    desc_base, desc_limit, desc_access);
1308 	if (error != 0)
1309 		goto done;
1310 
1311 	error = vm_set_desc(vcpu, VM_REG_GUEST_IDTR,
1312 			    desc_base, desc_limit, desc_access);
1313 	if (error != 0)
1314 		goto done;
1315 
1316 	/* TR */
1317 	desc_base = 0;
1318 	desc_limit = 0xffff;
1319 	desc_access = 0x0000008b;
1320 	error = vm_set_desc(vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1321 	if (error)
1322 		goto done;
1323 
1324 	sel = 0;
1325 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_TR, sel)) != 0)
1326 		goto done;
1327 
1328 	/* LDTR */
1329 	desc_base = 0;
1330 	desc_limit = 0xffff;
1331 	desc_access = 0x00000082;
1332 	error = vm_set_desc(vcpu, VM_REG_GUEST_LDTR, desc_base,
1333 			    desc_limit, desc_access);
1334 	if (error)
1335 		goto done;
1336 
1337 	sel = 0;
1338 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1339 		goto done;
1340 
1341 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR6,
1342 		 0xffff0ff0)) != 0)
1343 		goto done;
1344 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR7, 0x400)) !=
1345 	    0)
1346 		goto done;
1347 
1348 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW,
1349 		 zero)) != 0)
1350 		goto done;
1351 
1352 	error = 0;
1353 done:
1354 	return (error);
1355 }
1356 
1357 int
1358 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1359 {
1360 	int error, i;
1361 	struct vm_gpa_pte gpapte;
1362 
1363 	bzero(&gpapte, sizeof(gpapte));
1364 	gpapte.gpa = gpa;
1365 
1366 	error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1367 
1368 	if (error == 0) {
1369 		*num = gpapte.ptenum;
1370 		for (i = 0; i < gpapte.ptenum; i++)
1371 			pte[i] = gpapte.pte[i];
1372 	}
1373 
1374 	return (error);
1375 }
1376 
1377 int
1378 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1379 {
1380 	int error;
1381 	struct vm_hpet_cap cap;
1382 
1383 	bzero(&cap, sizeof(struct vm_hpet_cap));
1384 	error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1385 	if (capabilities != NULL)
1386 		*capabilities = cap.capabilities;
1387 	return (error);
1388 }
1389 
1390 int
1391 vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
1392     uint64_t gla, int prot, uint64_t *gpa, int *fault)
1393 {
1394 	struct vm_gla2gpa gg;
1395 	int error;
1396 
1397 	bzero(&gg, sizeof(struct vm_gla2gpa));
1398 	gg.prot = prot;
1399 	gg.gla = gla;
1400 	gg.paging = *paging;
1401 
1402 	error = vcpu_ioctl(vcpu, VM_GLA2GPA, &gg);
1403 	if (error == 0) {
1404 		*fault = gg.fault;
1405 		*gpa = gg.gpa;
1406 	}
1407 	return (error);
1408 }
1409 
1410 int
1411 vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
1412     uint64_t gla, int prot, uint64_t *gpa, int *fault)
1413 {
1414 	struct vm_gla2gpa gg;
1415 	int error;
1416 
1417 	bzero(&gg, sizeof(struct vm_gla2gpa));
1418 	gg.prot = prot;
1419 	gg.gla = gla;
1420 	gg.paging = *paging;
1421 
1422 	error = vcpu_ioctl(vcpu, VM_GLA2GPA_NOFAULT, &gg);
1423 	if (error == 0) {
1424 		*fault = gg.fault;
1425 		*gpa = gg.gpa;
1426 	}
1427 	return (error);
1428 }
1429 
1430 #ifndef min
1431 #define	min(a,b)	(((a) < (b)) ? (a) : (b))
1432 #endif
1433 
1434 int
1435 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
1436     uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1437     int *fault)
1438 {
1439 	void *va;
1440 	uint64_t gpa, off;
1441 	int error, i, n;
1442 
1443 	for (i = 0; i < iovcnt; i++) {
1444 		iov[i].iov_base = 0;
1445 		iov[i].iov_len = 0;
1446 	}
1447 
1448 	while (len) {
1449 		assert(iovcnt > 0);
1450 		error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
1451 		if (error || *fault)
1452 			return (error);
1453 
1454 		off = gpa & PAGE_MASK;
1455 		n = MIN(len, PAGE_SIZE - off);
1456 
1457 		va = vm_map_gpa(vcpu->ctx, gpa, n);
1458 		if (va == NULL)
1459 			return (EFAULT);
1460 
1461 		iov->iov_base = va;
1462 		iov->iov_len = n;
1463 		iov++;
1464 		iovcnt--;
1465 
1466 		gla += n;
1467 		len -= n;
1468 	}
1469 	return (0);
1470 }
1471 
1472 void
1473 vm_copy_teardown(struct iovec *iov __unused, int iovcnt __unused)
1474 {
1475 	/*
1476 	 * Intentionally empty.  This is used by the instruction
1477 	 * emulation code shared with the kernel.  The in-kernel
1478 	 * version of this is non-empty.
1479 	 */
1480 }
1481 
1482 void
1483 vm_copyin(struct iovec *iov, void *vp, size_t len)
1484 {
1485 	const char *src;
1486 	char *dst;
1487 	size_t n;
1488 
1489 	dst = vp;
1490 	while (len) {
1491 		assert(iov->iov_len);
1492 		n = min(len, iov->iov_len);
1493 		src = iov->iov_base;
1494 		bcopy(src, dst, n);
1495 
1496 		iov++;
1497 		dst += n;
1498 		len -= n;
1499 	}
1500 }
1501 
1502 void
1503 vm_copyout(const void *vp, struct iovec *iov, size_t len)
1504 {
1505 	const char *src;
1506 	char *dst;
1507 	size_t n;
1508 
1509 	src = vp;
1510 	while (len) {
1511 		assert(iov->iov_len);
1512 		n = min(len, iov->iov_len);
1513 		dst = iov->iov_base;
1514 		bcopy(src, dst, n);
1515 
1516 		iov++;
1517 		src += n;
1518 		len -= n;
1519 	}
1520 }
1521 
1522 static int
1523 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1524 {
1525 	struct vm_cpuset vm_cpuset;
1526 	int error;
1527 
1528 	bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1529 	vm_cpuset.which = which;
1530 	vm_cpuset.cpusetsize = sizeof(cpuset_t);
1531 	vm_cpuset.cpus = cpus;
1532 
1533 	error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1534 	return (error);
1535 }
1536 
1537 int
1538 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1539 {
1540 
1541 	return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1542 }
1543 
1544 int
1545 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1546 {
1547 
1548 	return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1549 }
1550 
1551 int
1552 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus)
1553 {
1554 
1555 	return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus));
1556 }
1557 
1558 int
1559 vm_activate_cpu(struct vcpu *vcpu)
1560 {
1561 	struct vm_activate_cpu ac;
1562 	int error;
1563 
1564 	bzero(&ac, sizeof(struct vm_activate_cpu));
1565 	error = vcpu_ioctl(vcpu, VM_ACTIVATE_CPU, &ac);
1566 	return (error);
1567 }
1568 
1569 int
1570 vm_suspend_all_cpus(struct vmctx *ctx)
1571 {
1572 	struct vm_activate_cpu ac;
1573 	int error;
1574 
1575 	bzero(&ac, sizeof(struct vm_activate_cpu));
1576 	ac.vcpuid = -1;
1577 	error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
1578 	return (error);
1579 }
1580 
1581 int
1582 vm_suspend_cpu(struct vcpu *vcpu)
1583 {
1584 	struct vm_activate_cpu ac;
1585 	int error;
1586 
1587 	bzero(&ac, sizeof(struct vm_activate_cpu));
1588 	error = vcpu_ioctl(vcpu, VM_SUSPEND_CPU, &ac);
1589 	return (error);
1590 }
1591 
1592 int
1593 vm_resume_cpu(struct vcpu *vcpu)
1594 {
1595 	struct vm_activate_cpu ac;
1596 	int error;
1597 
1598 	bzero(&ac, sizeof(struct vm_activate_cpu));
1599 	error = vcpu_ioctl(vcpu, VM_RESUME_CPU, &ac);
1600 	return (error);
1601 }
1602 
1603 int
1604 vm_resume_all_cpus(struct vmctx *ctx)
1605 {
1606 	struct vm_activate_cpu ac;
1607 	int error;
1608 
1609 	bzero(&ac, sizeof(struct vm_activate_cpu));
1610 	ac.vcpuid = -1;
1611 	error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
1612 	return (error);
1613 }
1614 
1615 int
1616 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
1617 {
1618 	struct vm_intinfo vmii;
1619 	int error;
1620 
1621 	bzero(&vmii, sizeof(struct vm_intinfo));
1622 	error = vcpu_ioctl(vcpu, VM_GET_INTINFO, &vmii);
1623 	if (error == 0) {
1624 		*info1 = vmii.info1;
1625 		*info2 = vmii.info2;
1626 	}
1627 	return (error);
1628 }
1629 
1630 int
1631 vm_set_intinfo(struct vcpu *vcpu, uint64_t info1)
1632 {
1633 	struct vm_intinfo vmii;
1634 	int error;
1635 
1636 	bzero(&vmii, sizeof(struct vm_intinfo));
1637 	vmii.info1 = info1;
1638 	error = vcpu_ioctl(vcpu, VM_SET_INTINFO, &vmii);
1639 	return (error);
1640 }
1641 
1642 int
1643 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1644 {
1645 	struct vm_rtc_data rtcdata;
1646 	int error;
1647 
1648 	bzero(&rtcdata, sizeof(struct vm_rtc_data));
1649 	rtcdata.offset = offset;
1650 	rtcdata.value = value;
1651 	error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1652 	return (error);
1653 }
1654 
1655 int
1656 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1657 {
1658 	struct vm_rtc_data rtcdata;
1659 	int error;
1660 
1661 	bzero(&rtcdata, sizeof(struct vm_rtc_data));
1662 	rtcdata.offset = offset;
1663 	error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1664 	if (error == 0)
1665 		*retval = rtcdata.value;
1666 	return (error);
1667 }
1668 
1669 int
1670 vm_rtc_settime(struct vmctx *ctx, time_t secs)
1671 {
1672 	struct vm_rtc_time rtctime;
1673 	int error;
1674 
1675 	bzero(&rtctime, sizeof(struct vm_rtc_time));
1676 	rtctime.secs = secs;
1677 	error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1678 	return (error);
1679 }
1680 
1681 int
1682 vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
1683 {
1684 	struct vm_rtc_time rtctime;
1685 	int error;
1686 
1687 	bzero(&rtctime, sizeof(struct vm_rtc_time));
1688 	error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
1689 	if (error == 0)
1690 		*secs = rtctime.secs;
1691 	return (error);
1692 }
1693 
1694 int
1695 vm_restart_instruction(struct vcpu *vcpu)
1696 {
1697 	int arg;
1698 
1699 	return (vcpu_ioctl(vcpu, VM_RESTART_INSTRUCTION, &arg));
1700 }
1701 
1702 int
1703 vm_snapshot_req(struct vmctx *ctx, struct vm_snapshot_meta *meta)
1704 {
1705 
1706 	if (ioctl(ctx->fd, VM_SNAPSHOT_REQ, meta) == -1) {
1707 #ifdef SNAPSHOT_DEBUG
1708 		fprintf(stderr, "%s: snapshot failed for %s: %d\r\n",
1709 		    __func__, meta->dev_name, errno);
1710 #endif
1711 		return (-1);
1712 	}
1713 	return (0);
1714 }
1715 
1716 int
1717 vm_restore_time(struct vmctx *ctx)
1718 {
1719 	int dummy;
1720 
1721 	dummy = 0;
1722 	return (ioctl(ctx->fd, VM_RESTORE_TIME, &dummy));
1723 }
1724 
1725 int
1726 vm_set_topology(struct vmctx *ctx,
1727     uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus)
1728 {
1729 	struct vm_cpu_topology topology;
1730 
1731 	bzero(&topology, sizeof (struct vm_cpu_topology));
1732 	topology.sockets = sockets;
1733 	topology.cores = cores;
1734 	topology.threads = threads;
1735 	topology.maxcpus = maxcpus;
1736 	return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology));
1737 }
1738 
1739 int
1740 vm_get_topology(struct vmctx *ctx,
1741     uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus)
1742 {
1743 	struct vm_cpu_topology topology;
1744 	int error;
1745 
1746 	bzero(&topology, sizeof (struct vm_cpu_topology));
1747 	error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology);
1748 	if (error == 0) {
1749 		*sockets = topology.sockets;
1750 		*cores = topology.cores;
1751 		*threads = topology.threads;
1752 		*maxcpus = topology.maxcpus;
1753 	}
1754 	return (error);
1755 }
1756 
1757 /* Keep in sync with machine/vmm_dev.h. */
1758 static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT,
1759     VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG,
1760     VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER,
1761     VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR,
1762     VM_SET_REGISTER_SET, VM_GET_REGISTER_SET,
1763     VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV,
1764     VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ,
1765     VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ,
1766     VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ,
1767     VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
1768     VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
1769     VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
1770     VM_PPTDEV_MSIX, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_DISABLE_MSIX,
1771     VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
1772     VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
1773     VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
1774     VM_GLA2GPA_NOFAULT,
1775     VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU,
1776     VM_SET_INTINFO, VM_GET_INTINFO,
1777     VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME,
1778     VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY,
1779     VM_SNAPSHOT_REQ, VM_RESTORE_TIME
1780 };
1781 
1782 int
1783 vm_limit_rights(struct vmctx *ctx)
1784 {
1785 	cap_rights_t rights;
1786 	size_t ncmds;
1787 
1788 	cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW);
1789 	if (caph_rights_limit(ctx->fd, &rights) != 0)
1790 		return (-1);
1791 	ncmds = nitems(vm_ioctl_cmds);
1792 	if (caph_ioctls_limit(ctx->fd, vm_ioctl_cmds, ncmds) != 0)
1793 		return (-1);
1794 	return (0);
1795 }
1796 
1797 /*
1798  * Avoid using in new code.  Operations on the fd should be wrapped here so that
1799  * capability rights can be kept in sync.
1800  */
1801 int
1802 vm_get_device_fd(struct vmctx *ctx)
1803 {
1804 
1805 	return (ctx->fd);
1806 }
1807 
1808 /* Legacy interface, do not use. */
1809 const cap_ioctl_t *
1810 vm_get_ioctls(size_t *len)
1811 {
1812 	cap_ioctl_t *cmds;
1813 
1814 	if (len == NULL) {
1815 		cmds = malloc(sizeof(vm_ioctl_cmds));
1816 		if (cmds == NULL)
1817 			return (NULL);
1818 		bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds));
1819 		return (cmds);
1820 	}
1821 
1822 	*len = nitems(vm_ioctl_cmds);
1823 	return (NULL);
1824 }
1825