xref: /freebsd/lib/libvmmapi/vmmapi.c (revision f764fa47f09197795f9623aee01fca745799f191)
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/types.h>
33 #include <sys/sysctl.h>
34 #include <sys/ioctl.h>
35 #include <sys/mman.h>
36 
37 #include <machine/specialreg.h>
38 #include <machine/param.h>
39 
40 #include <stdio.h>
41 #include <stdlib.h>
42 #include <assert.h>
43 #include <string.h>
44 #include <fcntl.h>
45 #include <unistd.h>
46 
47 #include <libutil.h>
48 
49 #include <machine/vmm.h>
50 #include <machine/vmm_dev.h>
51 
52 #include "vmmapi.h"
53 
54 #define	MB	(1024 * 1024UL)
55 #define	GB	(1024 * 1024 * 1024UL)
56 
57 struct vmctx {
58 	int	fd;
59 	uint32_t lowmem_limit;
60 	enum vm_mmap_style vms;
61 	int	memflags;
62 	size_t	lowmem;
63 	char	*lowmem_addr;
64 	size_t	highmem;
65 	char	*highmem_addr;
66 	char	*name;
67 };
68 
69 #define	CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
70 #define	DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
71 
72 static int
73 vm_device_open(const char *name)
74 {
75         int fd, len;
76         char *vmfile;
77 
78 	len = strlen("/dev/vmm/") + strlen(name) + 1;
79 	vmfile = malloc(len);
80 	assert(vmfile != NULL);
81 	snprintf(vmfile, len, "/dev/vmm/%s", name);
82 
83         /* Open the device file */
84         fd = open(vmfile, O_RDWR, 0);
85 
86 	free(vmfile);
87         return (fd);
88 }
89 
90 int
91 vm_create(const char *name)
92 {
93 
94 	return (CREATE((char *)name));
95 }
96 
97 struct vmctx *
98 vm_open(const char *name)
99 {
100 	struct vmctx *vm;
101 
102 	vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
103 	assert(vm != NULL);
104 
105 	vm->fd = -1;
106 	vm->memflags = 0;
107 	vm->lowmem_limit = 3 * GB;
108 	vm->name = (char *)(vm + 1);
109 	strcpy(vm->name, name);
110 
111 	if ((vm->fd = vm_device_open(vm->name)) < 0)
112 		goto err;
113 
114 	return (vm);
115 err:
116 	vm_destroy(vm);
117 	return (NULL);
118 }
119 
120 void
121 vm_destroy(struct vmctx *vm)
122 {
123 	assert(vm != NULL);
124 
125 	if (vm->fd >= 0)
126 		close(vm->fd);
127 	DESTROY(vm->name);
128 
129 	free(vm);
130 }
131 
132 int
133 vm_parse_memsize(const char *optarg, size_t *ret_memsize)
134 {
135 	char *endptr;
136 	size_t optval;
137 	int error;
138 
139 	optval = strtoul(optarg, &endptr, 0);
140 	if (*optarg != '\0' && *endptr == '\0') {
141 		/*
142 		 * For the sake of backward compatibility if the memory size
143 		 * specified on the command line is less than a megabyte then
144 		 * it is interpreted as being in units of MB.
145 		 */
146 		if (optval < MB)
147 			optval *= MB;
148 		*ret_memsize = optval;
149 		error = 0;
150 	} else
151 		error = expand_number(optarg, ret_memsize);
152 
153 	return (error);
154 }
155 
156 int
157 vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len,
158 		  int *wired)
159 {
160 	int error;
161 	struct vm_memory_segment seg;
162 
163 	bzero(&seg, sizeof(seg));
164 	seg.gpa = gpa;
165 	error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg);
166 	*ret_len = seg.len;
167 	if (wired != NULL)
168 		*wired = seg.wired;
169 	return (error);
170 }
171 
172 uint32_t
173 vm_get_lowmem_limit(struct vmctx *ctx)
174 {
175 
176 	return (ctx->lowmem_limit);
177 }
178 
179 void
180 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
181 {
182 
183 	ctx->lowmem_limit = limit;
184 }
185 
186 void
187 vm_set_memflags(struct vmctx *ctx, int flags)
188 {
189 
190 	ctx->memflags = flags;
191 }
192 
193 static int
194 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **addr)
195 {
196 	int error, mmap_flags;
197 	struct vm_memory_segment seg;
198 
199 	/*
200 	 * Create and optionally map 'len' bytes of memory at guest
201 	 * physical address 'gpa'
202 	 */
203 	bzero(&seg, sizeof(seg));
204 	seg.gpa = gpa;
205 	seg.len = len;
206 	error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg);
207 	if (error == 0 && addr != NULL) {
208 		mmap_flags = MAP_SHARED;
209 		if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
210 			mmap_flags |= MAP_NOCORE;
211 		*addr = mmap(NULL, len, PROT_READ | PROT_WRITE, mmap_flags,
212 		    ctx->fd, gpa);
213 	}
214 	return (error);
215 }
216 
217 int
218 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
219 {
220 	char **addr;
221 	int error;
222 
223 	/* XXX VM_MMAP_SPARSE not implemented yet */
224 	assert(vms == VM_MMAP_NONE || vms == VM_MMAP_ALL);
225 	ctx->vms = vms;
226 
227 	/*
228 	 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
229 	 * create another 'highmem' segment above 4GB for the remainder.
230 	 */
231 	if (memsize > ctx->lowmem_limit) {
232 		ctx->lowmem = ctx->lowmem_limit;
233 		ctx->highmem = memsize - ctx->lowmem;
234 	} else {
235 		ctx->lowmem = memsize;
236 		ctx->highmem = 0;
237 	}
238 
239 	if (ctx->lowmem > 0) {
240 		addr = (vms == VM_MMAP_ALL) ? &ctx->lowmem_addr : NULL;
241 		error = setup_memory_segment(ctx, 0, ctx->lowmem, addr);
242 		if (error)
243 			return (error);
244 	}
245 
246 	if (ctx->highmem > 0) {
247 		addr = (vms == VM_MMAP_ALL) ? &ctx->highmem_addr : NULL;
248 		error = setup_memory_segment(ctx, 4*GB, ctx->highmem, addr);
249 		if (error)
250 			return (error);
251 	}
252 
253 	return (0);
254 }
255 
256 void *
257 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
258 {
259 
260 	/* XXX VM_MMAP_SPARSE not implemented yet */
261 	assert(ctx->vms == VM_MMAP_ALL);
262 
263 	if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem)
264 		return ((void *)(ctx->lowmem_addr + gaddr));
265 
266 	if (gaddr >= 4*GB) {
267 		gaddr -= 4*GB;
268 		if (gaddr < ctx->highmem && gaddr + len <= ctx->highmem)
269 			return ((void *)(ctx->highmem_addr + gaddr));
270 	}
271 
272 	return (NULL);
273 }
274 
275 int
276 vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
277 	    uint64_t base, uint32_t limit, uint32_t access)
278 {
279 	int error;
280 	struct vm_seg_desc vmsegdesc;
281 
282 	bzero(&vmsegdesc, sizeof(vmsegdesc));
283 	vmsegdesc.cpuid = vcpu;
284 	vmsegdesc.regnum = reg;
285 	vmsegdesc.desc.base = base;
286 	vmsegdesc.desc.limit = limit;
287 	vmsegdesc.desc.access = access;
288 
289 	error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
290 	return (error);
291 }
292 
293 int
294 vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
295 	    uint64_t *base, uint32_t *limit, uint32_t *access)
296 {
297 	int error;
298 	struct vm_seg_desc vmsegdesc;
299 
300 	bzero(&vmsegdesc, sizeof(vmsegdesc));
301 	vmsegdesc.cpuid = vcpu;
302 	vmsegdesc.regnum = reg;
303 
304 	error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
305 	if (error == 0) {
306 		*base = vmsegdesc.desc.base;
307 		*limit = vmsegdesc.desc.limit;
308 		*access = vmsegdesc.desc.access;
309 	}
310 	return (error);
311 }
312 
313 int
314 vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
315 {
316 	int error;
317 	struct vm_register vmreg;
318 
319 	bzero(&vmreg, sizeof(vmreg));
320 	vmreg.cpuid = vcpu;
321 	vmreg.regnum = reg;
322 	vmreg.regval = val;
323 
324 	error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
325 	return (error);
326 }
327 
328 int
329 vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
330 {
331 	int error;
332 	struct vm_register vmreg;
333 
334 	bzero(&vmreg, sizeof(vmreg));
335 	vmreg.cpuid = vcpu;
336 	vmreg.regnum = reg;
337 
338 	error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
339 	*ret_val = vmreg.regval;
340 	return (error);
341 }
342 
343 int
344 vm_run(struct vmctx *ctx, int vcpu, uint64_t rip, struct vm_exit *vmexit)
345 {
346 	int error;
347 	struct vm_run vmrun;
348 
349 	bzero(&vmrun, sizeof(vmrun));
350 	vmrun.cpuid = vcpu;
351 	vmrun.rip = rip;
352 
353 	error = ioctl(ctx->fd, VM_RUN, &vmrun);
354 	bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
355 	return (error);
356 }
357 
358 int
359 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
360 {
361 	struct vm_suspend vmsuspend;
362 
363 	bzero(&vmsuspend, sizeof(vmsuspend));
364 	vmsuspend.how = how;
365 	return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
366 }
367 
368 static int
369 vm_inject_exception_real(struct vmctx *ctx, int vcpu, int vector,
370     int error_code, int error_code_valid)
371 {
372 	struct vm_exception exc;
373 
374 	bzero(&exc, sizeof(exc));
375 	exc.cpuid = vcpu;
376 	exc.vector = vector;
377 	exc.error_code = error_code;
378 	exc.error_code_valid = error_code_valid;
379 
380 	return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
381 }
382 
383 int
384 vm_inject_exception(struct vmctx *ctx, int vcpu, int vector)
385 {
386 
387 	return (vm_inject_exception_real(ctx, vcpu, vector, 0, 0));
388 }
389 
390 int
391 vm_inject_exception2(struct vmctx *ctx, int vcpu, int vector, int errcode)
392 {
393 
394 	return (vm_inject_exception_real(ctx, vcpu, vector, errcode, 1));
395 }
396 
397 int
398 vm_apicid2vcpu(struct vmctx *ctx, int apicid)
399 {
400 	/*
401 	 * The apic id associated with the 'vcpu' has the same numerical value
402 	 * as the 'vcpu' itself.
403 	 */
404 	return (apicid);
405 }
406 
407 int
408 vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
409 {
410 	struct vm_lapic_irq vmirq;
411 
412 	bzero(&vmirq, sizeof(vmirq));
413 	vmirq.cpuid = vcpu;
414 	vmirq.vector = vector;
415 
416 	return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
417 }
418 
419 int
420 vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
421 {
422 	struct vm_lapic_irq vmirq;
423 
424 	bzero(&vmirq, sizeof(vmirq));
425 	vmirq.cpuid = vcpu;
426 	vmirq.vector = vector;
427 
428 	return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
429 }
430 
431 int
432 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
433 {
434 	struct vm_lapic_msi vmmsi;
435 
436 	bzero(&vmmsi, sizeof(vmmsi));
437 	vmmsi.addr = addr;
438 	vmmsi.msg = msg;
439 
440 	return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
441 }
442 
443 int
444 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
445 {
446 	struct vm_ioapic_irq ioapic_irq;
447 
448 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
449 	ioapic_irq.irq = irq;
450 
451 	return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
452 }
453 
454 int
455 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
456 {
457 	struct vm_ioapic_irq ioapic_irq;
458 
459 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
460 	ioapic_irq.irq = irq;
461 
462 	return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
463 }
464 
465 int
466 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
467 {
468 	struct vm_ioapic_irq ioapic_irq;
469 
470 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
471 	ioapic_irq.irq = irq;
472 
473 	return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
474 }
475 
476 int
477 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
478 {
479 
480 	return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
481 }
482 
483 int
484 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
485 {
486 	struct vm_isa_irq isa_irq;
487 
488 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
489 	isa_irq.atpic_irq = atpic_irq;
490 	isa_irq.ioapic_irq = ioapic_irq;
491 
492 	return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
493 }
494 
495 int
496 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
497 {
498 	struct vm_isa_irq isa_irq;
499 
500 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
501 	isa_irq.atpic_irq = atpic_irq;
502 	isa_irq.ioapic_irq = ioapic_irq;
503 
504 	return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
505 }
506 
507 int
508 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
509 {
510 	struct vm_isa_irq isa_irq;
511 
512 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
513 	isa_irq.atpic_irq = atpic_irq;
514 	isa_irq.ioapic_irq = ioapic_irq;
515 
516 	return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
517 }
518 
519 int
520 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
521     enum vm_intr_trigger trigger)
522 {
523 	struct vm_isa_irq_trigger isa_irq_trigger;
524 
525 	bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
526 	isa_irq_trigger.atpic_irq = atpic_irq;
527 	isa_irq_trigger.trigger = trigger;
528 
529 	return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
530 }
531 
532 int
533 vm_inject_nmi(struct vmctx *ctx, int vcpu)
534 {
535 	struct vm_nmi vmnmi;
536 
537 	bzero(&vmnmi, sizeof(vmnmi));
538 	vmnmi.cpuid = vcpu;
539 
540 	return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
541 }
542 
543 static struct {
544 	const char	*name;
545 	int		type;
546 } capstrmap[] = {
547 	{ "hlt_exit",		VM_CAP_HALT_EXIT },
548 	{ "mtrap_exit",		VM_CAP_MTRAP_EXIT },
549 	{ "pause_exit",		VM_CAP_PAUSE_EXIT },
550 	{ "unrestricted_guest",	VM_CAP_UNRESTRICTED_GUEST },
551 	{ "enable_invpcid",	VM_CAP_ENABLE_INVPCID },
552 	{ 0 }
553 };
554 
555 int
556 vm_capability_name2type(const char *capname)
557 {
558 	int i;
559 
560 	for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) {
561 		if (strcmp(capstrmap[i].name, capname) == 0)
562 			return (capstrmap[i].type);
563 	}
564 
565 	return (-1);
566 }
567 
568 const char *
569 vm_capability_type2name(int type)
570 {
571 	int i;
572 
573 	for (i = 0; capstrmap[i].name != NULL; i++) {
574 		if (capstrmap[i].type == type)
575 			return (capstrmap[i].name);
576 	}
577 
578 	return (NULL);
579 }
580 
581 int
582 vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
583 		  int *retval)
584 {
585 	int error;
586 	struct vm_capability vmcap;
587 
588 	bzero(&vmcap, sizeof(vmcap));
589 	vmcap.cpuid = vcpu;
590 	vmcap.captype = cap;
591 
592 	error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
593 	*retval = vmcap.capval;
594 	return (error);
595 }
596 
597 int
598 vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
599 {
600 	struct vm_capability vmcap;
601 
602 	bzero(&vmcap, sizeof(vmcap));
603 	vmcap.cpuid = vcpu;
604 	vmcap.captype = cap;
605 	vmcap.capval = val;
606 
607 	return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
608 }
609 
610 int
611 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
612 {
613 	struct vm_pptdev pptdev;
614 
615 	bzero(&pptdev, sizeof(pptdev));
616 	pptdev.bus = bus;
617 	pptdev.slot = slot;
618 	pptdev.func = func;
619 
620 	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
621 }
622 
623 int
624 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
625 {
626 	struct vm_pptdev pptdev;
627 
628 	bzero(&pptdev, sizeof(pptdev));
629 	pptdev.bus = bus;
630 	pptdev.slot = slot;
631 	pptdev.func = func;
632 
633 	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
634 }
635 
636 int
637 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
638 		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
639 {
640 	struct vm_pptdev_mmio pptmmio;
641 
642 	bzero(&pptmmio, sizeof(pptmmio));
643 	pptmmio.bus = bus;
644 	pptmmio.slot = slot;
645 	pptmmio.func = func;
646 	pptmmio.gpa = gpa;
647 	pptmmio.len = len;
648 	pptmmio.hpa = hpa;
649 
650 	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
651 }
652 
653 int
654 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
655     uint64_t addr, uint64_t msg, int numvec)
656 {
657 	struct vm_pptdev_msi pptmsi;
658 
659 	bzero(&pptmsi, sizeof(pptmsi));
660 	pptmsi.vcpu = vcpu;
661 	pptmsi.bus = bus;
662 	pptmsi.slot = slot;
663 	pptmsi.func = func;
664 	pptmsi.msg = msg;
665 	pptmsi.addr = addr;
666 	pptmsi.numvec = numvec;
667 
668 	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
669 }
670 
671 int
672 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
673     int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
674 {
675 	struct vm_pptdev_msix pptmsix;
676 
677 	bzero(&pptmsix, sizeof(pptmsix));
678 	pptmsix.vcpu = vcpu;
679 	pptmsix.bus = bus;
680 	pptmsix.slot = slot;
681 	pptmsix.func = func;
682 	pptmsix.idx = idx;
683 	pptmsix.msg = msg;
684 	pptmsix.addr = addr;
685 	pptmsix.vector_control = vector_control;
686 
687 	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
688 }
689 
690 uint64_t *
691 vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
692 	     int *ret_entries)
693 {
694 	int error;
695 
696 	static struct vm_stats vmstats;
697 
698 	vmstats.cpuid = vcpu;
699 
700 	error = ioctl(ctx->fd, VM_STATS, &vmstats);
701 	if (error == 0) {
702 		if (ret_entries)
703 			*ret_entries = vmstats.num_entries;
704 		if (ret_tv)
705 			*ret_tv = vmstats.tv;
706 		return (vmstats.statbuf);
707 	} else
708 		return (NULL);
709 }
710 
711 const char *
712 vm_get_stat_desc(struct vmctx *ctx, int index)
713 {
714 	static struct vm_stat_desc statdesc;
715 
716 	statdesc.index = index;
717 	if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
718 		return (statdesc.desc);
719 	else
720 		return (NULL);
721 }
722 
723 int
724 vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
725 {
726 	int error;
727 	struct vm_x2apic x2apic;
728 
729 	bzero(&x2apic, sizeof(x2apic));
730 	x2apic.cpuid = vcpu;
731 
732 	error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
733 	*state = x2apic.state;
734 	return (error);
735 }
736 
737 int
738 vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
739 {
740 	int error;
741 	struct vm_x2apic x2apic;
742 
743 	bzero(&x2apic, sizeof(x2apic));
744 	x2apic.cpuid = vcpu;
745 	x2apic.state = state;
746 
747 	error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
748 
749 	return (error);
750 }
751 
752 /*
753  * From Intel Vol 3a:
754  * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
755  */
756 int
757 vcpu_reset(struct vmctx *vmctx, int vcpu)
758 {
759 	int error;
760 	uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
761 	uint32_t desc_access, desc_limit;
762 	uint16_t sel;
763 
764 	zero = 0;
765 
766 	rflags = 0x2;
767 	error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
768 	if (error)
769 		goto done;
770 
771 	rip = 0xfff0;
772 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
773 		goto done;
774 
775 	cr0 = CR0_NE;
776 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
777 		goto done;
778 
779 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
780 		goto done;
781 
782 	cr4 = 0;
783 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
784 		goto done;
785 
786 	/*
787 	 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
788 	 */
789 	desc_base = 0xffff0000;
790 	desc_limit = 0xffff;
791 	desc_access = 0x0093;
792 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
793 			    desc_base, desc_limit, desc_access);
794 	if (error)
795 		goto done;
796 
797 	sel = 0xf000;
798 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
799 		goto done;
800 
801 	/*
802 	 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
803 	 */
804 	desc_base = 0;
805 	desc_limit = 0xffff;
806 	desc_access = 0x0093;
807 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
808 			    desc_base, desc_limit, desc_access);
809 	if (error)
810 		goto done;
811 
812 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
813 			    desc_base, desc_limit, desc_access);
814 	if (error)
815 		goto done;
816 
817 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
818 			    desc_base, desc_limit, desc_access);
819 	if (error)
820 		goto done;
821 
822 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
823 			    desc_base, desc_limit, desc_access);
824 	if (error)
825 		goto done;
826 
827 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
828 			    desc_base, desc_limit, desc_access);
829 	if (error)
830 		goto done;
831 
832 	sel = 0;
833 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
834 		goto done;
835 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
836 		goto done;
837 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
838 		goto done;
839 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
840 		goto done;
841 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
842 		goto done;
843 
844 	/* General purpose registers */
845 	rdx = 0xf00;
846 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
847 		goto done;
848 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
849 		goto done;
850 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
851 		goto done;
852 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
853 		goto done;
854 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
855 		goto done;
856 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
857 		goto done;
858 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
859 		goto done;
860 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
861 		goto done;
862 
863 	/* GDTR, IDTR */
864 	desc_base = 0;
865 	desc_limit = 0xffff;
866 	desc_access = 0;
867 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
868 			    desc_base, desc_limit, desc_access);
869 	if (error != 0)
870 		goto done;
871 
872 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
873 			    desc_base, desc_limit, desc_access);
874 	if (error != 0)
875 		goto done;
876 
877 	/* TR */
878 	desc_base = 0;
879 	desc_limit = 0xffff;
880 	desc_access = 0x0000008b;
881 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
882 	if (error)
883 		goto done;
884 
885 	sel = 0;
886 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
887 		goto done;
888 
889 	/* LDTR */
890 	desc_base = 0;
891 	desc_limit = 0xffff;
892 	desc_access = 0x00000082;
893 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
894 			    desc_limit, desc_access);
895 	if (error)
896 		goto done;
897 
898 	sel = 0;
899 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
900 		goto done;
901 
902 	/* XXX cr2, debug registers */
903 
904 	error = 0;
905 done:
906 	return (error);
907 }
908 
909 int
910 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
911 {
912 	int error, i;
913 	struct vm_gpa_pte gpapte;
914 
915 	bzero(&gpapte, sizeof(gpapte));
916 	gpapte.gpa = gpa;
917 
918 	error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
919 
920 	if (error == 0) {
921 		*num = gpapte.ptenum;
922 		for (i = 0; i < gpapte.ptenum; i++)
923 			pte[i] = gpapte.pte[i];
924 	}
925 
926 	return (error);
927 }
928 
929 int
930 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
931 {
932 	int error;
933 	struct vm_hpet_cap cap;
934 
935 	bzero(&cap, sizeof(struct vm_hpet_cap));
936 	error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
937 	if (capabilities != NULL)
938 		*capabilities = cap.capabilities;
939 	return (error);
940 }
941 
942 static int
943 vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
944     uint64_t gla, int prot, int *fault, uint64_t *gpa)
945 {
946 	struct vm_gla2gpa gg;
947 	int error;
948 
949 	bzero(&gg, sizeof(struct vm_gla2gpa));
950 	gg.vcpuid = vcpu;
951 	gg.prot = prot;
952 	gg.gla = gla;
953 	gg.paging = *paging;
954 
955 	error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
956 	if (error == 0) {
957 		*fault = gg.fault;
958 		*gpa = gg.gpa;
959 	}
960 	return (error);
961 }
962 
963 #ifndef min
964 #define	min(a,b)	(((a) < (b)) ? (a) : (b))
965 #endif
966 
967 int
968 vm_copyin(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
969     uint64_t gla, void *vp, size_t len)
970 {
971 	char *dst;
972 	const char *src;
973 	uint64_t gpa;
974 	int error, fault, n, off;
975 
976 	dst = vp;
977 	while (len) {
978 		error = vm_gla2gpa(ctx, vcpu, paging, gla, PROT_READ,
979 		    &fault, &gpa);
980 		if (error)
981 			return (-1);
982 		if (fault)
983 			return (1);
984 
985 		off = gpa & PAGE_MASK;
986 		n = min(len, PAGE_SIZE - off);
987 		src = vm_map_gpa(ctx, gpa, n);
988 		bcopy(src, dst, n);
989 
990 		gla += n;
991 		dst += n;
992 		len -= n;
993 	}
994 	return (0);
995 }
996 
997 int
998 vm_copyout(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
999     const void *vp, uint64_t gla, size_t len)
1000 {
1001 	uint64_t gpa;
1002 	char *dst;
1003 	const char *src;
1004 	int error, fault, n, off;
1005 
1006 	src = vp;
1007 	while (len) {
1008 		error = vm_gla2gpa(ctx, vcpu, paging, gla, PROT_WRITE,
1009 		    &fault, &gpa);
1010 		if (error)
1011 			return (-1);
1012 		if (fault)
1013 			return (1);
1014 
1015 		off = gpa & PAGE_MASK;
1016 		n = min(len, PAGE_SIZE - off);
1017 		dst = vm_map_gpa(ctx, gpa, n);
1018 		bcopy(src, dst, n);
1019 
1020 		gla += n;
1021 		src += n;
1022 		len -= n;
1023 	}
1024 	return (0);
1025 }
1026