xref: /illumos-gate/usr/src/lib/libvmmapi/common/vmmapi.c (revision 1b09309c5ebed5c0bf14e2b396bf626c5aa30034)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 /*
31  * This file and its contents are supplied under the terms of the
32  * Common Development and Distribution License ("CDDL"), version 1.0.
33  * You may only use this file in accordance with the terms of version
34  * 1.0 of the CDDL.
35  *
36  * A full copy of the text of the CDDL should have accompanied this
37  * source.  A copy of the CDDL is also available via the Internet at
38  * http://www.illumos.org/license/CDDL.
39  *
40  * Copyright 2015 Pluribus Networks Inc.
41  * Copyright 2019 Joyent, Inc.
42  * Copyright 2021 Oxide Computer Company
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include <sys/param.h>
49 #include <sys/sysctl.h>
50 #include <sys/ioctl.h>
51 #ifdef	__FreeBSD__
52 #include <sys/linker.h>
53 #endif
54 #include <sys/mman.h>
55 #include <sys/module.h>
56 #include <sys/_iovec.h>
57 #include <sys/cpuset.h>
58 
59 #include <x86/segments.h>
60 #include <machine/specialreg.h>
61 
62 #include <errno.h>
63 #include <stdio.h>
64 #include <stdlib.h>
65 #include <assert.h>
66 #include <string.h>
67 #include <fcntl.h>
68 #include <unistd.h>
69 
70 #include <libutil.h>
71 
72 #include <machine/vmm.h>
73 #include <machine/vmm_dev.h>
74 
75 #include "vmmapi.h"
76 
77 #define	MB	(1024 * 1024UL)
78 #define	GB	(1024 * 1024 * 1024UL)
79 
80 #ifndef __FreeBSD__
81 /* shim to no-op for now */
82 #define	MAP_NOCORE		0
83 #define	MAP_ALIGNED_SUPER	0
84 
85 /* Rely on PROT_NONE for guard purposes */
86 #define	MAP_GUARD		(MAP_PRIVATE | MAP_ANON | MAP_NORESERVE)
87 #endif
88 
89 /*
90  * Size of the guard region before and after the virtual address space
91  * mapping the guest physical memory. This must be a multiple of the
92  * superpage size for performance reasons.
93  */
94 #define	VM_MMAP_GUARD_SIZE	(4 * MB)
95 
96 #define	PROT_RW		(PROT_READ | PROT_WRITE)
97 #define	PROT_ALL	(PROT_READ | PROT_WRITE | PROT_EXEC)
98 
99 struct vmctx {
100 	int	fd;
101 	uint32_t lowmem_limit;
102 	int	memflags;
103 	size_t	lowmem;
104 	size_t	highmem;
105 	char	*baseaddr;
106 	char	*name;
107 };
108 
109 #ifdef	__FreeBSD__
110 #define	CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
111 #define	DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
112 
113 int
114 vm_create(const char *name)
115 {
116 	/* Try to load vmm(4) module before creating a guest. */
117 	if (modfind("vmm") < 0)
118 		kldload("vmm");
119 	return (CREATE((char *)name));
120 }
121 
122 void
123 vm_destroy(struct vmctx *vm)
124 {
125 	assert(vm != NULL);
126 
127 	if (vm->fd >= 0)
128 		close(vm->fd);
129 	DESTROY(vm->name);
130 
131 	free(vm);
132 }
133 
134 #else
135 static int
136 vm_do_ctl(int cmd, void *req)
137 {
138 	int ctl_fd;
139 
140 	ctl_fd = open(VMM_CTL_DEV, O_EXCL | O_RDWR);
141 	if (ctl_fd < 0) {
142 		return (-1);
143 	}
144 
145 	if (ioctl(ctl_fd, cmd, req) == -1) {
146 		int err = errno;
147 
148 		/* Do not lose ioctl errno through the close(2) */
149 		(void) close(ctl_fd);
150 		errno = err;
151 		return (-1);
152 	}
153 	(void) close(ctl_fd);
154 
155 	return (0);
156 }
157 
158 int
159 vm_create(const char *name, uint64_t flags)
160 {
161 	struct vm_create_req req;
162 
163 	(void) strncpy(req.name, name, VM_MAX_NAMELEN);
164 	req.flags = flags;
165 
166 	return (vm_do_ctl(VMM_CREATE_VM, &req));
167 }
168 
169 void
170 vm_close(struct vmctx *vm)
171 {
172 	assert(vm != NULL);
173 	assert(vm->fd >= 0);
174 
175 	(void) close(vm->fd);
176 
177 	free(vm);
178 }
179 
180 void
181 vm_destroy(struct vmctx *vm)
182 {
183 	struct vm_destroy_req req;
184 
185 	assert(vm != NULL);
186 
187 	if (vm->fd >= 0) {
188 		(void) close(vm->fd);
189 		vm->fd = -1;
190 	}
191 
192 	(void) strncpy(req.name, vm->name, VM_MAX_NAMELEN);
193 	(void) vm_do_ctl(VMM_DESTROY_VM, &req);
194 
195 	free(vm);
196 }
197 #endif
198 
199 static int
200 vm_device_open(const char *name)
201 {
202 	int fd, len;
203 	char *vmfile;
204 
205 	len = strlen("/dev/vmm/") + strlen(name) + 1;
206 	vmfile = malloc(len);
207 	assert(vmfile != NULL);
208 	snprintf(vmfile, len, "/dev/vmm/%s", name);
209 
210 	/* Open the device file */
211 	fd = open(vmfile, O_RDWR, 0);
212 
213 	free(vmfile);
214 	return (fd);
215 }
216 
217 struct vmctx *
218 vm_open(const char *name)
219 {
220 	struct vmctx *vm;
221 
222 	vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
223 	assert(vm != NULL);
224 
225 	vm->fd = -1;
226 	vm->memflags = 0;
227 	vm->lowmem_limit = 3 * GB;
228 	vm->name = (char *)(vm + 1);
229 	strcpy(vm->name, name);
230 
231 	if ((vm->fd = vm_device_open(vm->name)) < 0)
232 		goto err;
233 
234 	return (vm);
235 err:
236 	free(vm);
237 	return (NULL);
238 }
239 
240 
241 int
242 vm_parse_memsize(const char *optarg, size_t *ret_memsize)
243 {
244 	char *endptr;
245 	size_t optval;
246 	int error;
247 
248 	optval = strtoul(optarg, &endptr, 0);
249 	if (*optarg != '\0' && *endptr == '\0') {
250 		/*
251 		 * For the sake of backward compatibility if the memory size
252 		 * specified on the command line is less than a megabyte then
253 		 * it is interpreted as being in units of MB.
254 		 */
255 		if (optval < MB)
256 			optval *= MB;
257 		*ret_memsize = optval;
258 		error = 0;
259 	} else
260 		error = expand_number(optarg, ret_memsize);
261 
262 	return (error);
263 }
264 
265 uint32_t
266 vm_get_lowmem_limit(struct vmctx *ctx)
267 {
268 
269 	return (ctx->lowmem_limit);
270 }
271 
272 void
273 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
274 {
275 
276 	ctx->lowmem_limit = limit;
277 }
278 
279 void
280 vm_set_memflags(struct vmctx *ctx, int flags)
281 {
282 
283 	ctx->memflags = flags;
284 }
285 
286 int
287 vm_get_memflags(struct vmctx *ctx)
288 {
289 
290 	return (ctx->memflags);
291 }
292 
293 /*
294  * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
295  */
296 int
297 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
298     size_t len, int prot)
299 {
300 	struct vm_memmap memmap;
301 	int error, flags;
302 
303 	memmap.gpa = gpa;
304 	memmap.segid = segid;
305 	memmap.segoff = off;
306 	memmap.len = len;
307 	memmap.prot = prot;
308 	memmap.flags = 0;
309 
310 	if (ctx->memflags & VM_MEM_F_WIRED)
311 		memmap.flags |= VM_MEMMAP_F_WIRED;
312 
313 	/*
314 	 * If this mapping already exists then don't create it again. This
315 	 * is the common case for SYSMEM mappings created by bhyveload(8).
316 	 */
317 	error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
318 	if (error == 0 && gpa == memmap.gpa) {
319 		if (segid != memmap.segid || off != memmap.segoff ||
320 		    prot != memmap.prot || flags != memmap.flags) {
321 			errno = EEXIST;
322 			return (-1);
323 		} else {
324 			return (0);
325 		}
326 	}
327 
328 	error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
329 	return (error);
330 }
331 
332 int
333 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
334 {
335 	struct vm_munmap munmap;
336 	int error;
337 
338 	munmap.gpa = gpa;
339 	munmap.len = len;
340 
341 	error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap);
342 	return (error);
343 }
344 
345 int
346 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
347     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
348 {
349 	struct vm_memmap memmap;
350 	int error;
351 
352 	bzero(&memmap, sizeof(struct vm_memmap));
353 	memmap.gpa = *gpa;
354 	error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
355 	if (error == 0) {
356 		*gpa = memmap.gpa;
357 		*segid = memmap.segid;
358 		*segoff = memmap.segoff;
359 		*len = memmap.len;
360 		*prot = memmap.prot;
361 		*flags = memmap.flags;
362 	}
363 	return (error);
364 }
365 
366 /*
367  * Return 0 if the segments are identical and non-zero otherwise.
368  *
369  * This is slightly complicated by the fact that only device memory segments
370  * are named.
371  */
372 static int
373 cmpseg(size_t len, const char *str, size_t len2, const char *str2)
374 {
375 
376 	if (len == len2) {
377 		if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
378 			return (0);
379 	}
380 	return (-1);
381 }
382 
383 static int
384 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
385 {
386 	struct vm_memseg memseg;
387 	size_t n;
388 	int error;
389 
390 	/*
391 	 * If the memory segment has already been created then just return.
392 	 * This is the usual case for the SYSMEM segment created by userspace
393 	 * loaders like bhyveload(8).
394 	 */
395 	error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
396 	    sizeof(memseg.name));
397 	if (error)
398 		return (error);
399 
400 	if (memseg.len != 0) {
401 		if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
402 			errno = EINVAL;
403 			return (-1);
404 		} else {
405 			return (0);
406 		}
407 	}
408 
409 	bzero(&memseg, sizeof(struct vm_memseg));
410 	memseg.segid = segid;
411 	memseg.len = len;
412 	if (name != NULL) {
413 		n = strlcpy(memseg.name, name, sizeof(memseg.name));
414 		if (n >= sizeof(memseg.name)) {
415 			errno = ENAMETOOLONG;
416 			return (-1);
417 		}
418 	}
419 
420 	error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
421 	return (error);
422 }
423 
424 int
425 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
426     size_t bufsize)
427 {
428 	struct vm_memseg memseg;
429 	size_t n;
430 	int error;
431 
432 	memseg.segid = segid;
433 	error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
434 	if (error == 0) {
435 		*lenp = memseg.len;
436 		n = strlcpy(namebuf, memseg.name, bufsize);
437 		if (n >= bufsize) {
438 			errno = ENAMETOOLONG;
439 			error = -1;
440 		}
441 	}
442 	return (error);
443 }
444 
445 static int
446 #ifdef __FreeBSD__
447 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
448 #else
449 setup_memory_segment(struct vmctx *ctx, int segid, vm_paddr_t gpa, size_t len,
450     char *base)
451 #endif
452 {
453 	char *ptr;
454 	int error, flags;
455 
456 	/* Map 'len' bytes starting at 'gpa' in the guest address space */
457 #ifdef __FreeBSD__
458 	error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
459 #else
460 	/*
461 	 * As we use two segments for lowmem/highmem the offset within the
462 	 * segment is 0 on illumos.
463 	 */
464 	error = vm_mmap_memseg(ctx, gpa, segid, 0, len, PROT_ALL);
465 #endif
466 	if (error)
467 		return (error);
468 
469 	flags = MAP_SHARED | MAP_FIXED;
470 	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
471 		flags |= MAP_NOCORE;
472 
473 	/* mmap into the process address space on the host */
474 	ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
475 	if (ptr == MAP_FAILED)
476 		return (-1);
477 
478 	return (0);
479 }
480 
481 int
482 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
483 {
484 	size_t objsize, len;
485 	vm_paddr_t gpa;
486 	char *baseaddr, *ptr;
487 	int error;
488 
489 	assert(vms == VM_MMAP_ALL);
490 
491 	/*
492 	 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
493 	 * create another 'highmem' segment above 4GB for the remainder.
494 	 */
495 	if (memsize > ctx->lowmem_limit) {
496 		ctx->lowmem = ctx->lowmem_limit;
497 		ctx->highmem = memsize - ctx->lowmem_limit;
498 		objsize = 4*GB + ctx->highmem;
499 	} else {
500 		ctx->lowmem = memsize;
501 		ctx->highmem = 0;
502 		objsize = ctx->lowmem;
503 	}
504 
505 #ifdef __FreeBSD__
506 	error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
507 	if (error)
508 		return (error);
509 #endif
510 
511 	/*
512 	 * Stake out a contiguous region covering the guest physical memory
513 	 * and the adjoining guard regions.
514 	 */
515 	len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
516 	ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0);
517 	if (ptr == MAP_FAILED)
518 		return (-1);
519 
520 	baseaddr = ptr + VM_MMAP_GUARD_SIZE;
521 
522 #ifdef __FreeBSD__
523 	if (ctx->highmem > 0) {
524 		gpa = 4*GB;
525 		len = ctx->highmem;
526 		error = setup_memory_segment(ctx, gpa, len, baseaddr);
527 		if (error)
528 			return (error);
529 	}
530 
531 	if (ctx->lowmem > 0) {
532 		gpa = 0;
533 		len = ctx->lowmem;
534 		error = setup_memory_segment(ctx, gpa, len, baseaddr);
535 		if (error)
536 			return (error);
537 	}
538 #else
539 	if (ctx->highmem > 0) {
540 		error = vm_alloc_memseg(ctx, VM_HIGHMEM, ctx->highmem, NULL);
541 		if (error)
542 			return (error);
543 		gpa = 4*GB;
544 		len = ctx->highmem;
545 		error = setup_memory_segment(ctx, VM_HIGHMEM, gpa, len, baseaddr);
546 		if (error)
547 			return (error);
548 	}
549 
550 	if (ctx->lowmem > 0) {
551 		error = vm_alloc_memseg(ctx, VM_LOWMEM, ctx->lowmem, NULL);
552 		if (error)
553 			return (error);
554 		gpa = 0;
555 		len = ctx->lowmem;
556 		error = setup_memory_segment(ctx, VM_LOWMEM, gpa, len, baseaddr);
557 		if (error)
558 			return (error);
559 	}
560 #endif
561 
562 	ctx->baseaddr = baseaddr;
563 
564 	return (0);
565 }
566 
567 /*
568  * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
569  * the lowmem or highmem regions.
570  *
571  * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
572  * The instruction emulation code depends on this behavior.
573  */
574 void *
575 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
576 {
577 
578 	if (ctx->lowmem > 0) {
579 		if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
580 		    gaddr + len <= ctx->lowmem)
581 			return (ctx->baseaddr + gaddr);
582 	}
583 
584 	if (ctx->highmem > 0) {
585                 if (gaddr >= 4*GB) {
586 			if (gaddr < 4*GB + ctx->highmem &&
587 			    len <= ctx->highmem &&
588 			    gaddr + len <= 4*GB + ctx->highmem)
589 				return (ctx->baseaddr + gaddr);
590 		}
591 	}
592 
593 	return (NULL);
594 }
595 
596 size_t
597 vm_get_lowmem_size(struct vmctx *ctx)
598 {
599 
600 	return (ctx->lowmem);
601 }
602 
603 size_t
604 vm_get_highmem_size(struct vmctx *ctx)
605 {
606 
607 	return (ctx->highmem);
608 }
609 
610 #ifndef __FreeBSD__
611 int
612 vm_get_devmem_offset(struct vmctx *ctx, int segid, off_t *mapoff)
613 {
614 	struct vm_devmem_offset vdo;
615 	int error;
616 
617 	vdo.segid = segid;
618 	error = ioctl(ctx->fd, VM_DEVMEM_GETOFFSET, &vdo);
619 	if (error == 0)
620 		*mapoff = vdo.offset;
621 
622 	return (error);
623 }
624 #endif
625 
626 void *
627 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
628 {
629 #ifdef	__FreeBSD__
630 	char pathname[MAXPATHLEN];
631 #endif
632 	size_t len2;
633 	char *base, *ptr;
634 	int fd, error, flags;
635 	off_t mapoff;
636 
637 	fd = -1;
638 	ptr = MAP_FAILED;
639 	if (name == NULL || strlen(name) == 0) {
640 		errno = EINVAL;
641 		goto done;
642 	}
643 
644 	error = vm_alloc_memseg(ctx, segid, len, name);
645 	if (error)
646 		goto done;
647 
648 #ifdef	__FreeBSD__
649 	strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
650 	strlcat(pathname, ctx->name, sizeof(pathname));
651 	strlcat(pathname, ".", sizeof(pathname));
652 	strlcat(pathname, name, sizeof(pathname));
653 
654 	fd = open(pathname, O_RDWR);
655 	if (fd < 0)
656 		goto done;
657 #else
658 	if (vm_get_devmem_offset(ctx, segid, &mapoff) != 0)
659 		goto done;
660 #endif
661 
662 	/*
663 	 * Stake out a contiguous region covering the device memory and the
664 	 * adjoining guard regions.
665 	 */
666 	len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
667 	base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1,
668 	    0);
669 	if (base == MAP_FAILED)
670 		goto done;
671 
672 	flags = MAP_SHARED | MAP_FIXED;
673 	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
674 		flags |= MAP_NOCORE;
675 
676 #ifdef	__FreeBSD__
677 	/* mmap the devmem region in the host address space */
678 	ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
679 #else
680 	/* mmap the devmem region in the host address space */
681 	ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, ctx->fd,
682 	    mapoff);
683 #endif
684 done:
685 	if (fd >= 0)
686 		close(fd);
687 	return (ptr);
688 }
689 
690 int
691 vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
692 	    uint64_t base, uint32_t limit, uint32_t access)
693 {
694 	int error;
695 	struct vm_seg_desc vmsegdesc;
696 
697 	bzero(&vmsegdesc, sizeof(vmsegdesc));
698 	vmsegdesc.cpuid = vcpu;
699 	vmsegdesc.regnum = reg;
700 	vmsegdesc.desc.base = base;
701 	vmsegdesc.desc.limit = limit;
702 	vmsegdesc.desc.access = access;
703 
704 	error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
705 	return (error);
706 }
707 
708 int
709 vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
710 	    uint64_t *base, uint32_t *limit, uint32_t *access)
711 {
712 	int error;
713 	struct vm_seg_desc vmsegdesc;
714 
715 	bzero(&vmsegdesc, sizeof(vmsegdesc));
716 	vmsegdesc.cpuid = vcpu;
717 	vmsegdesc.regnum = reg;
718 
719 	error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
720 	if (error == 0) {
721 		*base = vmsegdesc.desc.base;
722 		*limit = vmsegdesc.desc.limit;
723 		*access = vmsegdesc.desc.access;
724 	}
725 	return (error);
726 }
727 
728 int
729 vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *seg_desc)
730 {
731 	int error;
732 
733 	error = vm_get_desc(ctx, vcpu, reg, &seg_desc->base, &seg_desc->limit,
734 	    &seg_desc->access);
735 	return (error);
736 }
737 
738 int
739 vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
740 {
741 	int error;
742 	struct vm_register vmreg;
743 
744 	bzero(&vmreg, sizeof(vmreg));
745 	vmreg.cpuid = vcpu;
746 	vmreg.regnum = reg;
747 	vmreg.regval = val;
748 
749 	error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
750 	return (error);
751 }
752 
753 int
754 vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
755 {
756 	int error;
757 	struct vm_register vmreg;
758 
759 	bzero(&vmreg, sizeof(vmreg));
760 	vmreg.cpuid = vcpu;
761 	vmreg.regnum = reg;
762 
763 	error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
764 	*ret_val = vmreg.regval;
765 	return (error);
766 }
767 
768 int
769 vm_set_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
770     const int *regnums, uint64_t *regvals)
771 {
772 	int error;
773 	struct vm_register_set vmregset;
774 
775 	bzero(&vmregset, sizeof(vmregset));
776 	vmregset.cpuid = vcpu;
777 	vmregset.count = count;
778 	vmregset.regnums = regnums;
779 	vmregset.regvals = regvals;
780 
781 	error = ioctl(ctx->fd, VM_SET_REGISTER_SET, &vmregset);
782 	return (error);
783 }
784 
785 int
786 vm_get_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
787     const int *regnums, uint64_t *regvals)
788 {
789 	int error;
790 	struct vm_register_set vmregset;
791 
792 	bzero(&vmregset, sizeof(vmregset));
793 	vmregset.cpuid = vcpu;
794 	vmregset.count = count;
795 	vmregset.regnums = regnums;
796 	vmregset.regvals = regvals;
797 
798 	error = ioctl(ctx->fd, VM_GET_REGISTER_SET, &vmregset);
799 	return (error);
800 }
801 
802 int
803 vm_run(struct vmctx *ctx, int vcpu, const struct vm_entry *vm_entry,
804     struct vm_exit *vm_exit)
805 {
806 	struct vm_entry entry;
807 
808 	bcopy(vm_entry, &entry, sizeof (entry));
809 	entry.cpuid = vcpu;
810 	entry.exit_data = vm_exit;
811 
812 	return (ioctl(ctx->fd, VM_RUN, &entry));
813 }
814 
815 int
816 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
817 {
818 	struct vm_suspend vmsuspend;
819 
820 	bzero(&vmsuspend, sizeof(vmsuspend));
821 	vmsuspend.how = how;
822 	return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
823 }
824 
825 int
826 vm_reinit(struct vmctx *ctx)
827 {
828 
829 	return (ioctl(ctx->fd, VM_REINIT, 0));
830 }
831 
832 int
833 vm_inject_exception(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
834     uint32_t errcode, int restart_instruction)
835 {
836 	struct vm_exception exc;
837 
838 	exc.cpuid = vcpu;
839 	exc.vector = vector;
840 	exc.error_code = errcode;
841 	exc.error_code_valid = errcode_valid;
842 	exc.restart_instruction = restart_instruction;
843 
844 	return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
845 }
846 
847 #ifndef __FreeBSD__
848 void
849 vm_inject_fault(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
850     int errcode)
851 {
852 	int error;
853 	struct vm_exception exc;
854 
855 	exc.cpuid = vcpu;
856 	exc.vector = vector;
857 	exc.error_code = errcode;
858 	exc.error_code_valid = errcode_valid;
859 	exc.restart_instruction = 1;
860 	error = ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc);
861 
862 	assert(error == 0);
863 }
864 #endif /* __FreeBSD__ */
865 
866 int
867 vm_apicid2vcpu(struct vmctx *ctx, int apicid)
868 {
869 	/*
870 	 * The apic id associated with the 'vcpu' has the same numerical value
871 	 * as the 'vcpu' itself.
872 	 */
873 	return (apicid);
874 }
875 
876 int
877 vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
878 {
879 	struct vm_lapic_irq vmirq;
880 
881 	bzero(&vmirq, sizeof(vmirq));
882 	vmirq.cpuid = vcpu;
883 	vmirq.vector = vector;
884 
885 	return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
886 }
887 
888 int
889 vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
890 {
891 	struct vm_lapic_irq vmirq;
892 
893 	bzero(&vmirq, sizeof(vmirq));
894 	vmirq.cpuid = vcpu;
895 	vmirq.vector = vector;
896 
897 	return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
898 }
899 
900 int
901 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
902 {
903 	struct vm_lapic_msi vmmsi;
904 
905 	bzero(&vmmsi, sizeof(vmmsi));
906 	vmmsi.addr = addr;
907 	vmmsi.msg = msg;
908 
909 	return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
910 }
911 
912 int
913 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
914 {
915 	struct vm_ioapic_irq ioapic_irq;
916 
917 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
918 	ioapic_irq.irq = irq;
919 
920 	return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
921 }
922 
923 int
924 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
925 {
926 	struct vm_ioapic_irq ioapic_irq;
927 
928 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
929 	ioapic_irq.irq = irq;
930 
931 	return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
932 }
933 
934 int
935 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
936 {
937 	struct vm_ioapic_irq ioapic_irq;
938 
939 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
940 	ioapic_irq.irq = irq;
941 
942 	return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
943 }
944 
945 int
946 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
947 {
948 
949 	return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
950 }
951 
952 int
953 vm_readwrite_kernemu_device(struct vmctx *ctx, int vcpu, vm_paddr_t gpa,
954     bool write, int size, uint64_t *value)
955 {
956 	struct vm_readwrite_kernemu_device irp = {
957 		.vcpuid = vcpu,
958 		.access_width = fls(size) - 1,
959 		.gpa = gpa,
960 		.value = write ? *value : ~0ul,
961 	};
962 	long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV);
963 	int rc;
964 
965 	rc = ioctl(ctx->fd, cmd, &irp);
966 	if (rc == 0 && !write)
967 		*value = irp.value;
968 	return (rc);
969 }
970 
971 int
972 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
973 {
974 	struct vm_isa_irq isa_irq;
975 
976 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
977 	isa_irq.atpic_irq = atpic_irq;
978 	isa_irq.ioapic_irq = ioapic_irq;
979 
980 	return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
981 }
982 
983 int
984 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
985 {
986 	struct vm_isa_irq isa_irq;
987 
988 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
989 	isa_irq.atpic_irq = atpic_irq;
990 	isa_irq.ioapic_irq = ioapic_irq;
991 
992 	return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
993 }
994 
995 int
996 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
997 {
998 	struct vm_isa_irq isa_irq;
999 
1000 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
1001 	isa_irq.atpic_irq = atpic_irq;
1002 	isa_irq.ioapic_irq = ioapic_irq;
1003 
1004 	return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
1005 }
1006 
1007 int
1008 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
1009     enum vm_intr_trigger trigger)
1010 {
1011 	struct vm_isa_irq_trigger isa_irq_trigger;
1012 
1013 	bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
1014 	isa_irq_trigger.atpic_irq = atpic_irq;
1015 	isa_irq_trigger.trigger = trigger;
1016 
1017 	return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
1018 }
1019 
1020 int
1021 vm_inject_nmi(struct vmctx *ctx, int vcpu)
1022 {
1023 	struct vm_nmi vmnmi;
1024 
1025 	bzero(&vmnmi, sizeof(vmnmi));
1026 	vmnmi.cpuid = vcpu;
1027 
1028 	return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
1029 }
1030 
1031 static const char *capstrmap[] = {
1032 	[VM_CAP_HALT_EXIT]  = "hlt_exit",
1033 	[VM_CAP_MTRAP_EXIT] = "mtrap_exit",
1034 	[VM_CAP_PAUSE_EXIT] = "pause_exit",
1035 #ifdef __FreeBSD__
1036 	[VM_CAP_UNRESTRICTED_GUEST] = "unrestricted_guest",
1037 #endif
1038 	[VM_CAP_ENABLE_INVPCID] = "enable_invpcid",
1039 	[VM_CAP_BPT_EXIT] = "bpt_exit",
1040 };
1041 
1042 int
1043 vm_capability_name2type(const char *capname)
1044 {
1045 	int i;
1046 
1047 	for (i = 0; i < nitems(capstrmap); i++) {
1048 		if (strcmp(capstrmap[i], capname) == 0)
1049 			return (i);
1050 	}
1051 
1052 	return (-1);
1053 }
1054 
1055 const char *
1056 vm_capability_type2name(int type)
1057 {
1058 	if (type >= 0 && type < nitems(capstrmap))
1059 		return (capstrmap[type]);
1060 
1061 	return (NULL);
1062 }
1063 
1064 int
1065 vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
1066 		  int *retval)
1067 {
1068 	int error;
1069 	struct vm_capability vmcap;
1070 
1071 	bzero(&vmcap, sizeof(vmcap));
1072 	vmcap.cpuid = vcpu;
1073 	vmcap.captype = cap;
1074 
1075 	error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
1076 	*retval = vmcap.capval;
1077 	return (error);
1078 }
1079 
1080 int
1081 vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
1082 {
1083 	struct vm_capability vmcap;
1084 
1085 	bzero(&vmcap, sizeof(vmcap));
1086 	vmcap.cpuid = vcpu;
1087 	vmcap.captype = cap;
1088 	vmcap.capval = val;
1089 
1090 	return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
1091 }
1092 
1093 #ifdef __FreeBSD__
1094 int
1095 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
1096 {
1097 	struct vm_pptdev pptdev;
1098 
1099 	bzero(&pptdev, sizeof(pptdev));
1100 	pptdev.bus = bus;
1101 	pptdev.slot = slot;
1102 	pptdev.func = func;
1103 
1104 	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
1105 }
1106 
1107 int
1108 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
1109 {
1110 	struct vm_pptdev pptdev;
1111 
1112 	bzero(&pptdev, sizeof(pptdev));
1113 	pptdev.bus = bus;
1114 	pptdev.slot = slot;
1115 	pptdev.func = func;
1116 
1117 	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
1118 }
1119 
1120 int
1121 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1122 		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
1123 {
1124 	struct vm_pptdev_mmio pptmmio;
1125 
1126 	bzero(&pptmmio, sizeof(pptmmio));
1127 	pptmmio.bus = bus;
1128 	pptmmio.slot = slot;
1129 	pptmmio.func = func;
1130 	pptmmio.gpa = gpa;
1131 	pptmmio.len = len;
1132 	pptmmio.hpa = hpa;
1133 
1134 	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
1135 }
1136 
1137 int
1138 vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1139 		     vm_paddr_t gpa, size_t len)
1140 {
1141 	struct vm_pptdev_mmio pptmmio;
1142 
1143 	bzero(&pptmmio, sizeof(pptmmio));
1144 	pptmmio.bus = bus;
1145 	pptmmio.slot = slot;
1146 	pptmmio.func = func;
1147 	pptmmio.gpa = gpa;
1148 	pptmmio.len = len;
1149 
1150 	return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
1151 }
1152 
1153 int
1154 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
1155     uint64_t addr, uint64_t msg, int numvec)
1156 {
1157 	struct vm_pptdev_msi pptmsi;
1158 
1159 	bzero(&pptmsi, sizeof(pptmsi));
1160 	pptmsi.vcpu = vcpu;
1161 	pptmsi.bus = bus;
1162 	pptmsi.slot = slot;
1163 	pptmsi.func = func;
1164 	pptmsi.msg = msg;
1165 	pptmsi.addr = addr;
1166 	pptmsi.numvec = numvec;
1167 
1168 	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1169 }
1170 
1171 int
1172 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
1173     int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
1174 {
1175 	struct vm_pptdev_msix pptmsix;
1176 
1177 	bzero(&pptmsix, sizeof(pptmsix));
1178 	pptmsix.vcpu = vcpu;
1179 	pptmsix.bus = bus;
1180 	pptmsix.slot = slot;
1181 	pptmsix.func = func;
1182 	pptmsix.idx = idx;
1183 	pptmsix.msg = msg;
1184 	pptmsix.addr = addr;
1185 	pptmsix.vector_control = vector_control;
1186 
1187 	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1188 }
1189 
1190 int
1191 vm_get_pptdev_limits(struct vmctx *ctx, int bus, int slot, int func,
1192     int *msi_limit, int *msix_limit)
1193 {
1194 	struct vm_pptdev_limits pptlimits;
1195 	int error;
1196 
1197 	bzero(&pptlimits, sizeof (pptlimits));
1198 	pptlimits.bus = bus;
1199 	pptlimits.slot = slot;
1200 	pptlimits.func = func;
1201 
1202 	error = ioctl(ctx->fd, VM_GET_PPTDEV_LIMITS, &pptlimits);
1203 
1204 	*msi_limit = pptlimits.msi_limit;
1205 	*msix_limit = pptlimits.msix_limit;
1206 
1207 	return (error);
1208 }
1209 
1210 int
1211 vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func)
1212 {
1213 	struct vm_pptdev ppt;
1214 
1215 	bzero(&ppt, sizeof(ppt));
1216 	ppt.bus = bus;
1217 	ppt.slot = slot;
1218 	ppt.func = func;
1219 
1220 	return ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &ppt);
1221 }
1222 
1223 #else /* __FreeBSD__ */
1224 
1225 int
1226 vm_assign_pptdev(struct vmctx *ctx, int pptfd)
1227 {
1228 	struct vm_pptdev pptdev;
1229 
1230 	pptdev.pptfd = pptfd;
1231 	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
1232 }
1233 
1234 int
1235 vm_unassign_pptdev(struct vmctx *ctx, int pptfd)
1236 {
1237 	struct vm_pptdev pptdev;
1238 
1239 	pptdev.pptfd = pptfd;
1240 	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
1241 }
1242 
1243 int
1244 vm_map_pptdev_mmio(struct vmctx *ctx, int pptfd, vm_paddr_t gpa, size_t len,
1245     vm_paddr_t hpa)
1246 {
1247 	struct vm_pptdev_mmio pptmmio;
1248 
1249 	pptmmio.pptfd = pptfd;
1250 	pptmmio.gpa = gpa;
1251 	pptmmio.len = len;
1252 	pptmmio.hpa = hpa;
1253 	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
1254 }
1255 
1256 int
1257 vm_unmap_pptdev_mmio(struct vmctx *ctx, int pptfd, vm_paddr_t gpa, size_t len)
1258 {
1259 	struct vm_pptdev_mmio pptmmio;
1260 
1261 	bzero(&pptmmio, sizeof(pptmmio));
1262 	pptmmio.pptfd = pptfd;
1263 	pptmmio.gpa = gpa;
1264 	pptmmio.len = len;
1265 
1266 	return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
1267 }
1268 
1269 int
1270 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int pptfd, uint64_t addr,
1271     uint64_t msg, int numvec)
1272 {
1273 	struct vm_pptdev_msi pptmsi;
1274 
1275 	pptmsi.vcpu = vcpu;
1276 	pptmsi.pptfd = pptfd;
1277 	pptmsi.msg = msg;
1278 	pptmsi.addr = addr;
1279 	pptmsi.numvec = numvec;
1280 	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1281 }
1282 
1283 int
1284 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int pptfd, int idx,
1285     uint64_t addr, uint64_t msg, uint32_t vector_control)
1286 {
1287 	struct vm_pptdev_msix pptmsix;
1288 
1289 	pptmsix.vcpu = vcpu;
1290 	pptmsix.pptfd = pptfd;
1291 	pptmsix.idx = idx;
1292 	pptmsix.msg = msg;
1293 	pptmsix.addr = addr;
1294 	pptmsix.vector_control = vector_control;
1295 	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1296 }
1297 
1298 int
1299 vm_get_pptdev_limits(struct vmctx *ctx, int pptfd, int *msi_limit,
1300     int *msix_limit)
1301 {
1302 	struct vm_pptdev_limits pptlimits;
1303 	int error;
1304 
1305 	bzero(&pptlimits, sizeof (pptlimits));
1306 	pptlimits.pptfd = pptfd;
1307 	error = ioctl(ctx->fd, VM_GET_PPTDEV_LIMITS, &pptlimits);
1308 
1309 	*msi_limit = pptlimits.msi_limit;
1310 	*msix_limit = pptlimits.msix_limit;
1311 	return (error);
1312 }
1313 
1314 int
1315 vm_disable_pptdev_msix(struct vmctx *ctx, int pptfd)
1316 {
1317 	struct vm_pptdev pptdev;
1318 
1319 	pptdev.pptfd = pptfd;
1320 	return (ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &pptdev));
1321 }
1322 #endif /* __FreeBSD__ */
1323 
1324 uint64_t *
1325 vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
1326 	     int *ret_entries)
1327 {
1328 	int error;
1329 
1330 	static struct vm_stats vmstats;
1331 
1332 	vmstats.cpuid = vcpu;
1333 
1334 	error = ioctl(ctx->fd, VM_STATS_IOC, &vmstats);
1335 	if (error == 0) {
1336 		if (ret_entries)
1337 			*ret_entries = vmstats.num_entries;
1338 		if (ret_tv)
1339 			*ret_tv = vmstats.tv;
1340 		return (vmstats.statbuf);
1341 	} else
1342 		return (NULL);
1343 }
1344 
1345 const char *
1346 vm_get_stat_desc(struct vmctx *ctx, int index)
1347 {
1348 	static struct vm_stat_desc statdesc;
1349 
1350 	statdesc.index = index;
1351 	if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
1352 		return (statdesc.desc);
1353 	else
1354 		return (NULL);
1355 }
1356 
1357 int
1358 vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
1359 {
1360 	int error;
1361 	struct vm_x2apic x2apic;
1362 
1363 	bzero(&x2apic, sizeof(x2apic));
1364 	x2apic.cpuid = vcpu;
1365 
1366 	error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
1367 	*state = x2apic.state;
1368 	return (error);
1369 }
1370 
1371 int
1372 vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
1373 {
1374 	int error;
1375 	struct vm_x2apic x2apic;
1376 
1377 	bzero(&x2apic, sizeof(x2apic));
1378 	x2apic.cpuid = vcpu;
1379 	x2apic.state = state;
1380 
1381 	error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
1382 
1383 	return (error);
1384 }
1385 
1386 #ifndef __FreeBSD__
1387 int
1388 vcpu_reset(struct vmctx *vmctx, int vcpu)
1389 {
1390 	struct vm_vcpu_reset vvr;
1391 
1392 	vvr.vcpuid = vcpu;
1393 	vvr.kind = VRK_RESET;
1394 
1395 	return (ioctl(vmctx->fd, VM_RESET_CPU, &vvr));
1396 }
1397 #else /* __FreeBSD__ */
1398 /*
1399  * From Intel Vol 3a:
1400  * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
1401  */
1402 int
1403 vcpu_reset(struct vmctx *vmctx, int vcpu)
1404 {
1405 	int error;
1406 	uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
1407 	uint32_t desc_access, desc_limit;
1408 	uint16_t sel;
1409 
1410 	zero = 0;
1411 
1412 	rflags = 0x2;
1413 	error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
1414 	if (error)
1415 		goto done;
1416 
1417 	rip = 0xfff0;
1418 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1419 		goto done;
1420 
1421 	cr0 = CR0_NE;
1422 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1423 		goto done;
1424 
1425 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1426 		goto done;
1427 
1428 	cr4 = 0;
1429 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1430 		goto done;
1431 
1432 	/*
1433 	 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1434 	 */
1435 	desc_base = 0xffff0000;
1436 	desc_limit = 0xffff;
1437 	desc_access = 0x0093;
1438 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
1439 			    desc_base, desc_limit, desc_access);
1440 	if (error)
1441 		goto done;
1442 
1443 	sel = 0xf000;
1444 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
1445 		goto done;
1446 
1447 	/*
1448 	 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1449 	 */
1450 	desc_base = 0;
1451 	desc_limit = 0xffff;
1452 	desc_access = 0x0093;
1453 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
1454 			    desc_base, desc_limit, desc_access);
1455 	if (error)
1456 		goto done;
1457 
1458 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
1459 			    desc_base, desc_limit, desc_access);
1460 	if (error)
1461 		goto done;
1462 
1463 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
1464 			    desc_base, desc_limit, desc_access);
1465 	if (error)
1466 		goto done;
1467 
1468 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
1469 			    desc_base, desc_limit, desc_access);
1470 	if (error)
1471 		goto done;
1472 
1473 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
1474 			    desc_base, desc_limit, desc_access);
1475 	if (error)
1476 		goto done;
1477 
1478 	sel = 0;
1479 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
1480 		goto done;
1481 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
1482 		goto done;
1483 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
1484 		goto done;
1485 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
1486 		goto done;
1487 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
1488 		goto done;
1489 
1490 	/* General purpose registers */
1491 	rdx = 0xf00;
1492 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1493 		goto done;
1494 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1495 		goto done;
1496 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1497 		goto done;
1498 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1499 		goto done;
1500 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1501 		goto done;
1502 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1503 		goto done;
1504 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1505 		goto done;
1506 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1507 		goto done;
1508 
1509 	/* GDTR, IDTR */
1510 	desc_base = 0;
1511 	desc_limit = 0xffff;
1512 	desc_access = 0;
1513 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
1514 			    desc_base, desc_limit, desc_access);
1515 	if (error != 0)
1516 		goto done;
1517 
1518 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
1519 			    desc_base, desc_limit, desc_access);
1520 	if (error != 0)
1521 		goto done;
1522 
1523 	/* TR */
1524 	desc_base = 0;
1525 	desc_limit = 0xffff;
1526 	desc_access = 0x0000008b;
1527 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1528 	if (error)
1529 		goto done;
1530 
1531 	sel = 0;
1532 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
1533 		goto done;
1534 
1535 	/* LDTR */
1536 	desc_base = 0;
1537 	desc_limit = 0xffff;
1538 	desc_access = 0x00000082;
1539 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
1540 			    desc_limit, desc_access);
1541 	if (error)
1542 		goto done;
1543 
1544 	sel = 0;
1545 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1546 		goto done;
1547 
1548 	/* XXX cr2, debug registers */
1549 
1550 	error = 0;
1551 done:
1552 	return (error);
1553 }
1554 #endif /* __FreeBSD__ */
1555 
1556 int
1557 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1558 {
1559 	int error, i;
1560 	struct vm_gpa_pte gpapte;
1561 
1562 	bzero(&gpapte, sizeof(gpapte));
1563 	gpapte.gpa = gpa;
1564 
1565 	error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1566 
1567 	if (error == 0) {
1568 		*num = gpapte.ptenum;
1569 		for (i = 0; i < gpapte.ptenum; i++)
1570 			pte[i] = gpapte.pte[i];
1571 	}
1572 
1573 	return (error);
1574 }
1575 
1576 int
1577 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1578 {
1579 	int error;
1580 	struct vm_hpet_cap cap;
1581 
1582 	bzero(&cap, sizeof(struct vm_hpet_cap));
1583 	error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1584 	if (capabilities != NULL)
1585 		*capabilities = cap.capabilities;
1586 	return (error);
1587 }
1588 
1589 int
1590 vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1591     uint64_t gla, int prot, uint64_t *gpa, int *fault)
1592 {
1593 	struct vm_gla2gpa gg;
1594 	int error;
1595 
1596 	bzero(&gg, sizeof(struct vm_gla2gpa));
1597 	gg.vcpuid = vcpu;
1598 	gg.prot = prot;
1599 	gg.gla = gla;
1600 	gg.paging = *paging;
1601 
1602 	error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
1603 	if (error == 0) {
1604 		*fault = gg.fault;
1605 		*gpa = gg.gpa;
1606 	}
1607 	return (error);
1608 }
1609 
1610 int
1611 vm_gla2gpa_nofault(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1612     uint64_t gla, int prot, uint64_t *gpa, int *fault)
1613 {
1614 	struct vm_gla2gpa gg;
1615 	int error;
1616 
1617 	bzero(&gg, sizeof(struct vm_gla2gpa));
1618 	gg.vcpuid = vcpu;
1619 	gg.prot = prot;
1620 	gg.gla = gla;
1621 	gg.paging = *paging;
1622 
1623 	error = ioctl(ctx->fd, VM_GLA2GPA_NOFAULT, &gg);
1624 	if (error == 0) {
1625 		*fault = gg.fault;
1626 		*gpa = gg.gpa;
1627 	}
1628 	return (error);
1629 }
1630 
1631 #ifndef min
1632 #define	min(a,b)	(((a) < (b)) ? (a) : (b))
1633 #endif
1634 
1635 int
1636 vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1637     uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1638     int *fault)
1639 {
1640 	void *va;
1641 	uint64_t gpa;
1642 	int error, i, n, off;
1643 
1644 	for (i = 0; i < iovcnt; i++) {
1645 		iov[i].iov_base = 0;
1646 		iov[i].iov_len = 0;
1647 	}
1648 
1649 	while (len) {
1650 		assert(iovcnt > 0);
1651 		error = vm_gla2gpa(ctx, vcpu, paging, gla, prot, &gpa, fault);
1652 		if (error || *fault)
1653 			return (error);
1654 
1655 		off = gpa & PAGE_MASK;
1656 		n = min(len, PAGE_SIZE - off);
1657 
1658 		va = vm_map_gpa(ctx, gpa, n);
1659 		if (va == NULL)
1660 			return (EFAULT);
1661 
1662 		iov->iov_base = va;
1663 		iov->iov_len = n;
1664 		iov++;
1665 		iovcnt--;
1666 
1667 		gla += n;
1668 		len -= n;
1669 	}
1670 	return (0);
1671 }
1672 
1673 void
1674 vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov, int iovcnt)
1675 {
1676 
1677 	return;
1678 }
1679 
1680 void
1681 vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len)
1682 {
1683 	const char *src;
1684 	char *dst;
1685 	size_t n;
1686 
1687 	dst = vp;
1688 	while (len) {
1689 		assert(iov->iov_len);
1690 		n = min(len, iov->iov_len);
1691 		src = iov->iov_base;
1692 		bcopy(src, dst, n);
1693 
1694 		iov++;
1695 		dst += n;
1696 		len -= n;
1697 	}
1698 }
1699 
1700 void
1701 vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov,
1702     size_t len)
1703 {
1704 	const char *src;
1705 	char *dst;
1706 	size_t n;
1707 
1708 	src = vp;
1709 	while (len) {
1710 		assert(iov->iov_len);
1711 		n = min(len, iov->iov_len);
1712 		dst = iov->iov_base;
1713 		bcopy(src, dst, n);
1714 
1715 		iov++;
1716 		src += n;
1717 		len -= n;
1718 	}
1719 }
1720 
1721 static int
1722 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1723 {
1724 	struct vm_cpuset vm_cpuset;
1725 	int error;
1726 
1727 	bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1728 	vm_cpuset.which = which;
1729 	vm_cpuset.cpusetsize = sizeof(cpuset_t);
1730 	vm_cpuset.cpus = cpus;
1731 
1732 	error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1733 	return (error);
1734 }
1735 
1736 int
1737 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1738 {
1739 
1740 	return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1741 }
1742 
1743 int
1744 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1745 {
1746 
1747 	return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1748 }
1749 
1750 int
1751 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus)
1752 {
1753 
1754 	return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus));
1755 }
1756 
1757 int
1758 vm_activate_cpu(struct vmctx *ctx, int vcpu)
1759 {
1760 	struct vm_activate_cpu ac;
1761 	int error;
1762 
1763 	bzero(&ac, sizeof(struct vm_activate_cpu));
1764 	ac.vcpuid = vcpu;
1765 	error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac);
1766 	return (error);
1767 }
1768 
1769 int
1770 vm_suspend_cpu(struct vmctx *ctx, int vcpu)
1771 {
1772 	struct vm_activate_cpu ac;
1773 	int error;
1774 
1775 	bzero(&ac, sizeof(struct vm_activate_cpu));
1776 	ac.vcpuid = vcpu;
1777 	error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
1778 	return (error);
1779 }
1780 
1781 int
1782 vm_resume_cpu(struct vmctx *ctx, int vcpu)
1783 {
1784 	struct vm_activate_cpu ac;
1785 	int error;
1786 
1787 	bzero(&ac, sizeof(struct vm_activate_cpu));
1788 	ac.vcpuid = vcpu;
1789 	error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
1790 	return (error);
1791 }
1792 
1793 int
1794 vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2)
1795 {
1796 	struct vm_intinfo vmii;
1797 	int error;
1798 
1799 	bzero(&vmii, sizeof(struct vm_intinfo));
1800 	vmii.vcpuid = vcpu;
1801 	error = ioctl(ctx->fd, VM_GET_INTINFO, &vmii);
1802 	if (error == 0) {
1803 		*info1 = vmii.info1;
1804 		*info2 = vmii.info2;
1805 	}
1806 	return (error);
1807 }
1808 
1809 int
1810 vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1)
1811 {
1812 	struct vm_intinfo vmii;
1813 	int error;
1814 
1815 	bzero(&vmii, sizeof(struct vm_intinfo));
1816 	vmii.vcpuid = vcpu;
1817 	vmii.info1 = info1;
1818 	error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii);
1819 	return (error);
1820 }
1821 
1822 int
1823 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1824 {
1825 	struct vm_rtc_data rtcdata;
1826 	int error;
1827 
1828 	bzero(&rtcdata, sizeof(struct vm_rtc_data));
1829 	rtcdata.offset = offset;
1830 	rtcdata.value = value;
1831 	error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1832 	return (error);
1833 }
1834 
1835 int
1836 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1837 {
1838 	struct vm_rtc_data rtcdata;
1839 	int error;
1840 
1841 	bzero(&rtcdata, sizeof(struct vm_rtc_data));
1842 	rtcdata.offset = offset;
1843 	error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1844 	if (error == 0)
1845 		*retval = rtcdata.value;
1846 	return (error);
1847 }
1848 
1849 int
1850 vm_rtc_settime(struct vmctx *ctx, time_t secs)
1851 {
1852 	struct vm_rtc_time rtctime;
1853 	int error;
1854 
1855 	bzero(&rtctime, sizeof(struct vm_rtc_time));
1856 	rtctime.secs = secs;
1857 	error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1858 	return (error);
1859 }
1860 
1861 int
1862 vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
1863 {
1864 	struct vm_rtc_time rtctime;
1865 	int error;
1866 
1867 	bzero(&rtctime, sizeof(struct vm_rtc_time));
1868 	error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
1869 	if (error == 0)
1870 		*secs = rtctime.secs;
1871 	return (error);
1872 }
1873 
1874 int
1875 vm_restart_instruction(void *arg, int vcpu)
1876 {
1877 	struct vmctx *ctx = arg;
1878 
1879 	return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu));
1880 }
1881 
1882 int
1883 vm_set_topology(struct vmctx *ctx,
1884     uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus)
1885 {
1886 	struct vm_cpu_topology topology;
1887 
1888 	bzero(&topology, sizeof (struct vm_cpu_topology));
1889 	topology.sockets = sockets;
1890 	topology.cores = cores;
1891 	topology.threads = threads;
1892 	topology.maxcpus = maxcpus;
1893 	return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology));
1894 }
1895 
1896 int
1897 vm_get_topology(struct vmctx *ctx,
1898     uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus)
1899 {
1900 	struct vm_cpu_topology topology;
1901 	int error;
1902 
1903 	bzero(&topology, sizeof (struct vm_cpu_topology));
1904 	error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology);
1905 	if (error == 0) {
1906 		*sockets = topology.sockets;
1907 		*cores = topology.cores;
1908 		*threads = topology.threads;
1909 		*maxcpus = topology.maxcpus;
1910 	}
1911 	return (error);
1912 }
1913 
1914 int
1915 vm_get_device_fd(struct vmctx *ctx)
1916 {
1917 
1918 	return (ctx->fd);
1919 }
1920 
1921 #ifndef __FreeBSD__
1922 int
1923 vm_pmtmr_set_location(struct vmctx *ctx, uint16_t ioport)
1924 {
1925 	return (ioctl(ctx->fd, VM_PMTMR_LOCATE, ioport));
1926 }
1927 
1928 int
1929 vm_wrlock_cycle(struct vmctx *ctx)
1930 {
1931 	if (ioctl(ctx->fd, VM_WRLOCK_CYCLE, 0) != 0) {
1932 		return (errno);
1933 	}
1934 	return (0);
1935 }
1936 
1937 int
1938 vm_get_run_state(struct vmctx *ctx, int vcpu, enum vcpu_run_state *state,
1939     uint8_t *sipi_vector)
1940 {
1941 	struct vm_run_state data;
1942 
1943 	data.vcpuid = vcpu;
1944 	if (ioctl(ctx->fd, VM_GET_RUN_STATE, &data) != 0) {
1945 		return (errno);
1946 	}
1947 
1948 	*state = data.state;
1949 	*sipi_vector = data.sipi_vector;
1950 	return (0);
1951 }
1952 
1953 int
1954 vm_set_run_state(struct vmctx *ctx, int vcpu, enum vcpu_run_state state,
1955     uint8_t sipi_vector)
1956 {
1957 	struct vm_run_state data;
1958 
1959 	data.vcpuid = vcpu;
1960 	data.state = state;
1961 	data.sipi_vector = sipi_vector;
1962 	if (ioctl(ctx->fd, VM_SET_RUN_STATE, &data) != 0) {
1963 		return (errno);
1964 	}
1965 
1966 	return (0);
1967 }
1968 
1969 #endif /* __FreeBSD__ */
1970 
1971 #ifdef __FreeBSD__
1972 const cap_ioctl_t *
1973 vm_get_ioctls(size_t *len)
1974 {
1975 	cap_ioctl_t *cmds;
1976 	/* keep in sync with machine/vmm_dev.h */
1977 	static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT,
1978 	    VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG,
1979 	    VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER,
1980 	    VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR,
1981 	    VM_SET_REGISTER_SET, VM_GET_REGISTER_SET,
1982 	    VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV,
1983 	    VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ,
1984 	    VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ,
1985 	    VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ,
1986 	    VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
1987 	    VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
1988 	    VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
1989 	    VM_PPTDEV_MSIX, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_DISABLE_MSIX,
1990 	    VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
1991 	    VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
1992 	    VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
1993 	    VM_GLA2GPA_NOFAULT,
1994 	    VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU,
1995 	    VM_SET_INTINFO, VM_GET_INTINFO,
1996 	    VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME,
1997 	    VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY };
1998 
1999 	if (len == NULL) {
2000 		cmds = malloc(sizeof(vm_ioctl_cmds));
2001 		if (cmds == NULL)
2002 			return (NULL);
2003 		bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds));
2004 		return (cmds);
2005 	}
2006 
2007 	*len = nitems(vm_ioctl_cmds);
2008 	return (NULL);
2009 }
2010 #endif /* __FreeBSD__ */
2011