xref: /freebsd/lib/libvmmapi/vmmapi.c (revision fd1e753fbc26dd2e68f88be26551a1ce623a4f36)
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/sysctl.h>
34 #include <sys/ioctl.h>
35 #include <sys/mman.h>
36 #include <sys/_iovec.h>
37 #include <sys/cpuset.h>
38 
39 #include <machine/specialreg.h>
40 #include <machine/param.h>
41 
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <assert.h>
45 #include <string.h>
46 #include <fcntl.h>
47 #include <unistd.h>
48 
49 #include <libutil.h>
50 
51 #include <machine/vmm.h>
52 #include <machine/vmm_dev.h>
53 
54 #include "vmmapi.h"
55 
56 #define	MB	(1024 * 1024UL)
57 #define	GB	(1024 * 1024 * 1024UL)
58 
59 struct vmctx {
60 	int	fd;
61 	uint32_t lowmem_limit;
62 	enum vm_mmap_style vms;
63 	int	memflags;
64 	size_t	lowmem;
65 	char	*lowmem_addr;
66 	size_t	highmem;
67 	char	*highmem_addr;
68 	char	*name;
69 };
70 
71 #define	CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
72 #define	DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
73 
74 static int
75 vm_device_open(const char *name)
76 {
77         int fd, len;
78         char *vmfile;
79 
80 	len = strlen("/dev/vmm/") + strlen(name) + 1;
81 	vmfile = malloc(len);
82 	assert(vmfile != NULL);
83 	snprintf(vmfile, len, "/dev/vmm/%s", name);
84 
85         /* Open the device file */
86         fd = open(vmfile, O_RDWR, 0);
87 
88 	free(vmfile);
89         return (fd);
90 }
91 
92 int
93 vm_create(const char *name)
94 {
95 
96 	return (CREATE((char *)name));
97 }
98 
99 struct vmctx *
100 vm_open(const char *name)
101 {
102 	struct vmctx *vm;
103 
104 	vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
105 	assert(vm != NULL);
106 
107 	vm->fd = -1;
108 	vm->memflags = 0;
109 	vm->lowmem_limit = 3 * GB;
110 	vm->name = (char *)(vm + 1);
111 	strcpy(vm->name, name);
112 
113 	if ((vm->fd = vm_device_open(vm->name)) < 0)
114 		goto err;
115 
116 	return (vm);
117 err:
118 	vm_destroy(vm);
119 	return (NULL);
120 }
121 
122 void
123 vm_destroy(struct vmctx *vm)
124 {
125 	assert(vm != NULL);
126 
127 	if (vm->fd >= 0)
128 		close(vm->fd);
129 	DESTROY(vm->name);
130 
131 	free(vm);
132 }
133 
134 int
135 vm_parse_memsize(const char *optarg, size_t *ret_memsize)
136 {
137 	char *endptr;
138 	size_t optval;
139 	int error;
140 
141 	optval = strtoul(optarg, &endptr, 0);
142 	if (*optarg != '\0' && *endptr == '\0') {
143 		/*
144 		 * For the sake of backward compatibility if the memory size
145 		 * specified on the command line is less than a megabyte then
146 		 * it is interpreted as being in units of MB.
147 		 */
148 		if (optval < MB)
149 			optval *= MB;
150 		*ret_memsize = optval;
151 		error = 0;
152 	} else
153 		error = expand_number(optarg, ret_memsize);
154 
155 	return (error);
156 }
157 
158 int
159 vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len,
160 		  int *wired)
161 {
162 	int error;
163 	struct vm_memory_segment seg;
164 
165 	bzero(&seg, sizeof(seg));
166 	seg.gpa = gpa;
167 	error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg);
168 	*ret_len = seg.len;
169 	if (wired != NULL)
170 		*wired = seg.wired;
171 	return (error);
172 }
173 
174 uint32_t
175 vm_get_lowmem_limit(struct vmctx *ctx)
176 {
177 
178 	return (ctx->lowmem_limit);
179 }
180 
181 void
182 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
183 {
184 
185 	ctx->lowmem_limit = limit;
186 }
187 
188 void
189 vm_set_memflags(struct vmctx *ctx, int flags)
190 {
191 
192 	ctx->memflags = flags;
193 }
194 
195 static int
196 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **addr)
197 {
198 	int error, mmap_flags;
199 	struct vm_memory_segment seg;
200 
201 	/*
202 	 * Create and optionally map 'len' bytes of memory at guest
203 	 * physical address 'gpa'
204 	 */
205 	bzero(&seg, sizeof(seg));
206 	seg.gpa = gpa;
207 	seg.len = len;
208 	error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg);
209 	if (error == 0 && addr != NULL) {
210 		mmap_flags = MAP_SHARED;
211 		if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
212 			mmap_flags |= MAP_NOCORE;
213 		*addr = mmap(NULL, len, PROT_READ | PROT_WRITE, mmap_flags,
214 		    ctx->fd, gpa);
215 	}
216 	return (error);
217 }
218 
219 int
220 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
221 {
222 	char **addr;
223 	int error;
224 
225 	/* XXX VM_MMAP_SPARSE not implemented yet */
226 	assert(vms == VM_MMAP_NONE || vms == VM_MMAP_ALL);
227 	ctx->vms = vms;
228 
229 	/*
230 	 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
231 	 * create another 'highmem' segment above 4GB for the remainder.
232 	 */
233 	if (memsize > ctx->lowmem_limit) {
234 		ctx->lowmem = ctx->lowmem_limit;
235 		ctx->highmem = memsize - ctx->lowmem;
236 	} else {
237 		ctx->lowmem = memsize;
238 		ctx->highmem = 0;
239 	}
240 
241 	if (ctx->lowmem > 0) {
242 		addr = (vms == VM_MMAP_ALL) ? &ctx->lowmem_addr : NULL;
243 		error = setup_memory_segment(ctx, 0, ctx->lowmem, addr);
244 		if (error)
245 			return (error);
246 	}
247 
248 	if (ctx->highmem > 0) {
249 		addr = (vms == VM_MMAP_ALL) ? &ctx->highmem_addr : NULL;
250 		error = setup_memory_segment(ctx, 4*GB, ctx->highmem, addr);
251 		if (error)
252 			return (error);
253 	}
254 
255 	return (0);
256 }
257 
258 void *
259 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
260 {
261 
262 	/* XXX VM_MMAP_SPARSE not implemented yet */
263 	assert(ctx->vms == VM_MMAP_ALL);
264 
265 	if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem)
266 		return ((void *)(ctx->lowmem_addr + gaddr));
267 
268 	if (gaddr >= 4*GB) {
269 		gaddr -= 4*GB;
270 		if (gaddr < ctx->highmem && gaddr + len <= ctx->highmem)
271 			return ((void *)(ctx->highmem_addr + gaddr));
272 	}
273 
274 	return (NULL);
275 }
276 
277 int
278 vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
279 	    uint64_t base, uint32_t limit, uint32_t access)
280 {
281 	int error;
282 	struct vm_seg_desc vmsegdesc;
283 
284 	bzero(&vmsegdesc, sizeof(vmsegdesc));
285 	vmsegdesc.cpuid = vcpu;
286 	vmsegdesc.regnum = reg;
287 	vmsegdesc.desc.base = base;
288 	vmsegdesc.desc.limit = limit;
289 	vmsegdesc.desc.access = access;
290 
291 	error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
292 	return (error);
293 }
294 
295 int
296 vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
297 	    uint64_t *base, uint32_t *limit, uint32_t *access)
298 {
299 	int error;
300 	struct vm_seg_desc vmsegdesc;
301 
302 	bzero(&vmsegdesc, sizeof(vmsegdesc));
303 	vmsegdesc.cpuid = vcpu;
304 	vmsegdesc.regnum = reg;
305 
306 	error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
307 	if (error == 0) {
308 		*base = vmsegdesc.desc.base;
309 		*limit = vmsegdesc.desc.limit;
310 		*access = vmsegdesc.desc.access;
311 	}
312 	return (error);
313 }
314 
315 int
316 vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
317 {
318 	int error;
319 	struct vm_register vmreg;
320 
321 	bzero(&vmreg, sizeof(vmreg));
322 	vmreg.cpuid = vcpu;
323 	vmreg.regnum = reg;
324 	vmreg.regval = val;
325 
326 	error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
327 	return (error);
328 }
329 
330 int
331 vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
332 {
333 	int error;
334 	struct vm_register vmreg;
335 
336 	bzero(&vmreg, sizeof(vmreg));
337 	vmreg.cpuid = vcpu;
338 	vmreg.regnum = reg;
339 
340 	error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
341 	*ret_val = vmreg.regval;
342 	return (error);
343 }
344 
345 int
346 vm_run(struct vmctx *ctx, int vcpu, uint64_t rip, struct vm_exit *vmexit)
347 {
348 	int error;
349 	struct vm_run vmrun;
350 
351 	bzero(&vmrun, sizeof(vmrun));
352 	vmrun.cpuid = vcpu;
353 	vmrun.rip = rip;
354 
355 	error = ioctl(ctx->fd, VM_RUN, &vmrun);
356 	bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
357 	return (error);
358 }
359 
360 int
361 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
362 {
363 	struct vm_suspend vmsuspend;
364 
365 	bzero(&vmsuspend, sizeof(vmsuspend));
366 	vmsuspend.how = how;
367 	return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
368 }
369 
370 static int
371 vm_inject_exception_real(struct vmctx *ctx, int vcpu, int vector,
372     int error_code, int error_code_valid)
373 {
374 	struct vm_exception exc;
375 
376 	bzero(&exc, sizeof(exc));
377 	exc.cpuid = vcpu;
378 	exc.vector = vector;
379 	exc.error_code = error_code;
380 	exc.error_code_valid = error_code_valid;
381 
382 	return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
383 }
384 
385 int
386 vm_inject_exception(struct vmctx *ctx, int vcpu, int vector)
387 {
388 
389 	return (vm_inject_exception_real(ctx, vcpu, vector, 0, 0));
390 }
391 
392 int
393 vm_inject_exception2(struct vmctx *ctx, int vcpu, int vector, int errcode)
394 {
395 
396 	return (vm_inject_exception_real(ctx, vcpu, vector, errcode, 1));
397 }
398 
399 int
400 vm_apicid2vcpu(struct vmctx *ctx, int apicid)
401 {
402 	/*
403 	 * The apic id associated with the 'vcpu' has the same numerical value
404 	 * as the 'vcpu' itself.
405 	 */
406 	return (apicid);
407 }
408 
409 int
410 vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
411 {
412 	struct vm_lapic_irq vmirq;
413 
414 	bzero(&vmirq, sizeof(vmirq));
415 	vmirq.cpuid = vcpu;
416 	vmirq.vector = vector;
417 
418 	return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
419 }
420 
421 int
422 vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
423 {
424 	struct vm_lapic_irq vmirq;
425 
426 	bzero(&vmirq, sizeof(vmirq));
427 	vmirq.cpuid = vcpu;
428 	vmirq.vector = vector;
429 
430 	return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
431 }
432 
433 int
434 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
435 {
436 	struct vm_lapic_msi vmmsi;
437 
438 	bzero(&vmmsi, sizeof(vmmsi));
439 	vmmsi.addr = addr;
440 	vmmsi.msg = msg;
441 
442 	return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
443 }
444 
445 int
446 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
447 {
448 	struct vm_ioapic_irq ioapic_irq;
449 
450 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
451 	ioapic_irq.irq = irq;
452 
453 	return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
454 }
455 
456 int
457 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
458 {
459 	struct vm_ioapic_irq ioapic_irq;
460 
461 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
462 	ioapic_irq.irq = irq;
463 
464 	return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
465 }
466 
467 int
468 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
469 {
470 	struct vm_ioapic_irq ioapic_irq;
471 
472 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
473 	ioapic_irq.irq = irq;
474 
475 	return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
476 }
477 
478 int
479 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
480 {
481 
482 	return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
483 }
484 
485 int
486 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
487 {
488 	struct vm_isa_irq isa_irq;
489 
490 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
491 	isa_irq.atpic_irq = atpic_irq;
492 	isa_irq.ioapic_irq = ioapic_irq;
493 
494 	return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
495 }
496 
497 int
498 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
499 {
500 	struct vm_isa_irq isa_irq;
501 
502 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
503 	isa_irq.atpic_irq = atpic_irq;
504 	isa_irq.ioapic_irq = ioapic_irq;
505 
506 	return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
507 }
508 
509 int
510 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
511 {
512 	struct vm_isa_irq isa_irq;
513 
514 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
515 	isa_irq.atpic_irq = atpic_irq;
516 	isa_irq.ioapic_irq = ioapic_irq;
517 
518 	return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
519 }
520 
521 int
522 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
523     enum vm_intr_trigger trigger)
524 {
525 	struct vm_isa_irq_trigger isa_irq_trigger;
526 
527 	bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
528 	isa_irq_trigger.atpic_irq = atpic_irq;
529 	isa_irq_trigger.trigger = trigger;
530 
531 	return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
532 }
533 
534 int
535 vm_inject_nmi(struct vmctx *ctx, int vcpu)
536 {
537 	struct vm_nmi vmnmi;
538 
539 	bzero(&vmnmi, sizeof(vmnmi));
540 	vmnmi.cpuid = vcpu;
541 
542 	return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
543 }
544 
545 static struct {
546 	const char	*name;
547 	int		type;
548 } capstrmap[] = {
549 	{ "hlt_exit",		VM_CAP_HALT_EXIT },
550 	{ "mtrap_exit",		VM_CAP_MTRAP_EXIT },
551 	{ "pause_exit",		VM_CAP_PAUSE_EXIT },
552 	{ "unrestricted_guest",	VM_CAP_UNRESTRICTED_GUEST },
553 	{ "enable_invpcid",	VM_CAP_ENABLE_INVPCID },
554 	{ 0 }
555 };
556 
557 int
558 vm_capability_name2type(const char *capname)
559 {
560 	int i;
561 
562 	for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) {
563 		if (strcmp(capstrmap[i].name, capname) == 0)
564 			return (capstrmap[i].type);
565 	}
566 
567 	return (-1);
568 }
569 
570 const char *
571 vm_capability_type2name(int type)
572 {
573 	int i;
574 
575 	for (i = 0; capstrmap[i].name != NULL; i++) {
576 		if (capstrmap[i].type == type)
577 			return (capstrmap[i].name);
578 	}
579 
580 	return (NULL);
581 }
582 
583 int
584 vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
585 		  int *retval)
586 {
587 	int error;
588 	struct vm_capability vmcap;
589 
590 	bzero(&vmcap, sizeof(vmcap));
591 	vmcap.cpuid = vcpu;
592 	vmcap.captype = cap;
593 
594 	error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
595 	*retval = vmcap.capval;
596 	return (error);
597 }
598 
599 int
600 vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
601 {
602 	struct vm_capability vmcap;
603 
604 	bzero(&vmcap, sizeof(vmcap));
605 	vmcap.cpuid = vcpu;
606 	vmcap.captype = cap;
607 	vmcap.capval = val;
608 
609 	return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
610 }
611 
612 int
613 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
614 {
615 	struct vm_pptdev pptdev;
616 
617 	bzero(&pptdev, sizeof(pptdev));
618 	pptdev.bus = bus;
619 	pptdev.slot = slot;
620 	pptdev.func = func;
621 
622 	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
623 }
624 
625 int
626 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
627 {
628 	struct vm_pptdev pptdev;
629 
630 	bzero(&pptdev, sizeof(pptdev));
631 	pptdev.bus = bus;
632 	pptdev.slot = slot;
633 	pptdev.func = func;
634 
635 	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
636 }
637 
638 int
639 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
640 		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
641 {
642 	struct vm_pptdev_mmio pptmmio;
643 
644 	bzero(&pptmmio, sizeof(pptmmio));
645 	pptmmio.bus = bus;
646 	pptmmio.slot = slot;
647 	pptmmio.func = func;
648 	pptmmio.gpa = gpa;
649 	pptmmio.len = len;
650 	pptmmio.hpa = hpa;
651 
652 	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
653 }
654 
655 int
656 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
657     uint64_t addr, uint64_t msg, int numvec)
658 {
659 	struct vm_pptdev_msi pptmsi;
660 
661 	bzero(&pptmsi, sizeof(pptmsi));
662 	pptmsi.vcpu = vcpu;
663 	pptmsi.bus = bus;
664 	pptmsi.slot = slot;
665 	pptmsi.func = func;
666 	pptmsi.msg = msg;
667 	pptmsi.addr = addr;
668 	pptmsi.numvec = numvec;
669 
670 	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
671 }
672 
673 int
674 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
675     int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
676 {
677 	struct vm_pptdev_msix pptmsix;
678 
679 	bzero(&pptmsix, sizeof(pptmsix));
680 	pptmsix.vcpu = vcpu;
681 	pptmsix.bus = bus;
682 	pptmsix.slot = slot;
683 	pptmsix.func = func;
684 	pptmsix.idx = idx;
685 	pptmsix.msg = msg;
686 	pptmsix.addr = addr;
687 	pptmsix.vector_control = vector_control;
688 
689 	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
690 }
691 
692 uint64_t *
693 vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
694 	     int *ret_entries)
695 {
696 	int error;
697 
698 	static struct vm_stats vmstats;
699 
700 	vmstats.cpuid = vcpu;
701 
702 	error = ioctl(ctx->fd, VM_STATS, &vmstats);
703 	if (error == 0) {
704 		if (ret_entries)
705 			*ret_entries = vmstats.num_entries;
706 		if (ret_tv)
707 			*ret_tv = vmstats.tv;
708 		return (vmstats.statbuf);
709 	} else
710 		return (NULL);
711 }
712 
713 const char *
714 vm_get_stat_desc(struct vmctx *ctx, int index)
715 {
716 	static struct vm_stat_desc statdesc;
717 
718 	statdesc.index = index;
719 	if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
720 		return (statdesc.desc);
721 	else
722 		return (NULL);
723 }
724 
725 int
726 vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
727 {
728 	int error;
729 	struct vm_x2apic x2apic;
730 
731 	bzero(&x2apic, sizeof(x2apic));
732 	x2apic.cpuid = vcpu;
733 
734 	error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
735 	*state = x2apic.state;
736 	return (error);
737 }
738 
739 int
740 vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
741 {
742 	int error;
743 	struct vm_x2apic x2apic;
744 
745 	bzero(&x2apic, sizeof(x2apic));
746 	x2apic.cpuid = vcpu;
747 	x2apic.state = state;
748 
749 	error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
750 
751 	return (error);
752 }
753 
754 /*
755  * From Intel Vol 3a:
756  * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
757  */
758 int
759 vcpu_reset(struct vmctx *vmctx, int vcpu)
760 {
761 	int error;
762 	uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
763 	uint32_t desc_access, desc_limit;
764 	uint16_t sel;
765 
766 	zero = 0;
767 
768 	rflags = 0x2;
769 	error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
770 	if (error)
771 		goto done;
772 
773 	rip = 0xfff0;
774 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
775 		goto done;
776 
777 	cr0 = CR0_NE;
778 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
779 		goto done;
780 
781 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
782 		goto done;
783 
784 	cr4 = 0;
785 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
786 		goto done;
787 
788 	/*
789 	 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
790 	 */
791 	desc_base = 0xffff0000;
792 	desc_limit = 0xffff;
793 	desc_access = 0x0093;
794 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
795 			    desc_base, desc_limit, desc_access);
796 	if (error)
797 		goto done;
798 
799 	sel = 0xf000;
800 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
801 		goto done;
802 
803 	/*
804 	 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
805 	 */
806 	desc_base = 0;
807 	desc_limit = 0xffff;
808 	desc_access = 0x0093;
809 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
810 			    desc_base, desc_limit, desc_access);
811 	if (error)
812 		goto done;
813 
814 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
815 			    desc_base, desc_limit, desc_access);
816 	if (error)
817 		goto done;
818 
819 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
820 			    desc_base, desc_limit, desc_access);
821 	if (error)
822 		goto done;
823 
824 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
825 			    desc_base, desc_limit, desc_access);
826 	if (error)
827 		goto done;
828 
829 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
830 			    desc_base, desc_limit, desc_access);
831 	if (error)
832 		goto done;
833 
834 	sel = 0;
835 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
836 		goto done;
837 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
838 		goto done;
839 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
840 		goto done;
841 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
842 		goto done;
843 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
844 		goto done;
845 
846 	/* General purpose registers */
847 	rdx = 0xf00;
848 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
849 		goto done;
850 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
851 		goto done;
852 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
853 		goto done;
854 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
855 		goto done;
856 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
857 		goto done;
858 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
859 		goto done;
860 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
861 		goto done;
862 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
863 		goto done;
864 
865 	/* GDTR, IDTR */
866 	desc_base = 0;
867 	desc_limit = 0xffff;
868 	desc_access = 0;
869 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
870 			    desc_base, desc_limit, desc_access);
871 	if (error != 0)
872 		goto done;
873 
874 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
875 			    desc_base, desc_limit, desc_access);
876 	if (error != 0)
877 		goto done;
878 
879 	/* TR */
880 	desc_base = 0;
881 	desc_limit = 0xffff;
882 	desc_access = 0x0000008b;
883 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
884 	if (error)
885 		goto done;
886 
887 	sel = 0;
888 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
889 		goto done;
890 
891 	/* LDTR */
892 	desc_base = 0;
893 	desc_limit = 0xffff;
894 	desc_access = 0x00000082;
895 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
896 			    desc_limit, desc_access);
897 	if (error)
898 		goto done;
899 
900 	sel = 0;
901 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
902 		goto done;
903 
904 	/* XXX cr2, debug registers */
905 
906 	error = 0;
907 done:
908 	return (error);
909 }
910 
911 int
912 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
913 {
914 	int error, i;
915 	struct vm_gpa_pte gpapte;
916 
917 	bzero(&gpapte, sizeof(gpapte));
918 	gpapte.gpa = gpa;
919 
920 	error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
921 
922 	if (error == 0) {
923 		*num = gpapte.ptenum;
924 		for (i = 0; i < gpapte.ptenum; i++)
925 			pte[i] = gpapte.pte[i];
926 	}
927 
928 	return (error);
929 }
930 
931 int
932 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
933 {
934 	int error;
935 	struct vm_hpet_cap cap;
936 
937 	bzero(&cap, sizeof(struct vm_hpet_cap));
938 	error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
939 	if (capabilities != NULL)
940 		*capabilities = cap.capabilities;
941 	return (error);
942 }
943 
944 static int
945 gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
946     uint64_t gla, int prot, int *fault, uint64_t *gpa)
947 {
948 	struct vm_gla2gpa gg;
949 	int error;
950 
951 	bzero(&gg, sizeof(struct vm_gla2gpa));
952 	gg.vcpuid = vcpu;
953 	gg.prot = prot;
954 	gg.gla = gla;
955 	gg.paging = *paging;
956 
957 	error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
958 	if (error == 0) {
959 		*fault = gg.fault;
960 		*gpa = gg.gpa;
961 	}
962 	return (error);
963 }
964 
965 #ifndef min
966 #define	min(a,b)	(((a) < (b)) ? (a) : (b))
967 #endif
968 
969 int
970 vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
971     uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt)
972 {
973 	uint64_t gpa;
974 	int error, fault, i, n, off;
975 
976 	for (i = 0; i < iovcnt; i++) {
977 		iov[i].iov_base = 0;
978 		iov[i].iov_len = 0;
979 	}
980 
981 	while (len) {
982 		assert(iovcnt > 0);
983 		error = gla2gpa(ctx, vcpu, paging, gla, prot, &fault, &gpa);
984 		if (error)
985 			return (-1);
986 		if (fault)
987 			return (1);
988 
989 		off = gpa & PAGE_MASK;
990 		n = min(len, PAGE_SIZE - off);
991 
992 		iov->iov_base = (void *)gpa;
993 		iov->iov_len = n;
994 		iov++;
995 		iovcnt--;
996 
997 		gla += n;
998 		len -= n;
999 	}
1000 	return (0);
1001 }
1002 
1003 void
1004 vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len)
1005 {
1006 	const char *src;
1007 	char *dst;
1008 	uint64_t gpa;
1009 	size_t n;
1010 
1011 	dst = vp;
1012 	while (len) {
1013 		assert(iov->iov_len);
1014 		gpa = (uint64_t)iov->iov_base;
1015 		n = min(len, iov->iov_len);
1016 		src = vm_map_gpa(ctx, gpa, n);
1017 		bcopy(src, dst, n);
1018 
1019 		iov++;
1020 		dst += n;
1021 		len -= n;
1022 	}
1023 }
1024 
1025 void
1026 vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov,
1027     size_t len)
1028 {
1029 	const char *src;
1030 	char *dst;
1031 	uint64_t gpa;
1032 	size_t n;
1033 
1034 	src = vp;
1035 	while (len) {
1036 		assert(iov->iov_len);
1037 		gpa = (uint64_t)iov->iov_base;
1038 		n = min(len, iov->iov_len);
1039 		dst = vm_map_gpa(ctx, gpa, n);
1040 		bcopy(src, dst, n);
1041 
1042 		iov++;
1043 		src += n;
1044 		len -= n;
1045 	}
1046 }
1047 
1048 static int
1049 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1050 {
1051 	struct vm_cpuset vm_cpuset;
1052 	int error;
1053 
1054 	bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1055 	vm_cpuset.which = which;
1056 	vm_cpuset.cpusetsize = sizeof(cpuset_t);
1057 	vm_cpuset.cpus = cpus;
1058 
1059 	error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1060 	return (error);
1061 }
1062 
1063 int
1064 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1065 {
1066 
1067 	return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1068 }
1069 
1070 int
1071 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1072 {
1073 
1074 	return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1075 }
1076 
1077 int
1078 vm_activate_cpu(struct vmctx *ctx, int vcpu)
1079 {
1080 	struct vm_activate_cpu ac;
1081 	int error;
1082 
1083 	bzero(&ac, sizeof(struct vm_activate_cpu));
1084 	ac.vcpuid = vcpu;
1085 	error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac);
1086 	return (error);
1087 }
1088