xref: /freebsd/usr.sbin/bhyve/bhyverun.c (revision a18eacbefdfa1085ca3db829e86ece78cd416493)
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/time.h>
35 
36 #include <machine/segments.h>
37 
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <err.h>
42 #include <libgen.h>
43 #include <unistd.h>
44 #include <assert.h>
45 #include <errno.h>
46 #include <pthread.h>
47 #include <pthread_np.h>
48 #include <sysexits.h>
49 
50 #include <machine/vmm.h>
51 #include <vmmapi.h>
52 
53 #include "bhyverun.h"
54 #include "acpi.h"
55 #include "inout.h"
56 #include "dbgport.h"
57 #include "legacy_irq.h"
58 #include "mem.h"
59 #include "mevent.h"
60 #include "mptbl.h"
61 #include "pci_emul.h"
62 #include "pci_lpc.h"
63 #include "xmsr.h"
64 #include "spinup_ap.h"
65 #include "rtc.h"
66 
67 #define GUEST_NIO_PORT		0x488	/* guest upcalls via i/o port */
68 
69 #define	VMEXIT_SWITCH		0	/* force vcpu switch in mux mode */
70 #define	VMEXIT_CONTINUE		1	/* continue from next instruction */
71 #define	VMEXIT_RESTART		2	/* restart current instruction */
72 #define	VMEXIT_ABORT		3	/* abort the vm run loop */
73 #define	VMEXIT_RESET		4	/* guest machine has reset */
74 
75 #define MB		(1024UL * 1024)
76 #define GB		(1024UL * MB)
77 
78 typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
79 
80 char *vmname;
81 
82 int guest_ncpus;
83 
84 static int pincpu = -1;
85 static int guest_vmexit_on_hlt, guest_vmexit_on_pause, disable_x2apic;
86 static int virtio_msix = 1;
87 
88 static int foundcpus;
89 
90 static int strictio;
91 
92 static int acpi;
93 
94 static char *progname;
95 static const int BSP = 0;
96 
97 static int cpumask;
98 
99 static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip);
100 
101 struct vm_exit vmexit[VM_MAXCPU];
102 
103 struct bhyvestats {
104         uint64_t        vmexit_bogus;
105         uint64_t        vmexit_bogus_switch;
106         uint64_t        vmexit_hlt;
107         uint64_t        vmexit_pause;
108         uint64_t        vmexit_mtrap;
109         uint64_t        vmexit_inst_emul;
110         uint64_t        cpu_switch_rotate;
111         uint64_t        cpu_switch_direct;
112         int             io_reset;
113 } stats;
114 
115 struct mt_vmm_info {
116 	pthread_t	mt_thr;
117 	struct vmctx	*mt_ctx;
118 	int		mt_vcpu;
119 } mt_vmm_info[VM_MAXCPU];
120 
121 static void
122 usage(int code)
123 {
124 
125         fprintf(stderr,
126                 "Usage: %s [-aehAHIPW] [-g <gdb port>] [-s <pci>] [-S <pci>]\n"
127 		"       %*s [-c vcpus] [-p pincpu] [-m mem] [-l <lpc>] <vm>\n"
128 		"       -a: local apic is in XAPIC mode (default is X2APIC)\n"
129 		"       -A: create an ACPI table\n"
130 		"       -g: gdb port\n"
131 		"       -c: # cpus (default 1)\n"
132 		"       -p: pin vcpu 'n' to host cpu 'pincpu + n'\n"
133 		"       -H: vmexit from the guest on hlt\n"
134 		"       -P: vmexit from the guest on pause\n"
135 		"       -W: force virtio to use single-vector MSI\n"
136 		"       -e: exit on unhandled I/O access\n"
137 		"       -h: help\n"
138 		"       -s: <slot,driver,configinfo> PCI slot config\n"
139 		"       -S: <slot,driver,configinfo> legacy PCI slot config\n"
140 		"       -l: LPC device configuration\n"
141 		"       -m: memory size in MB\n",
142 		progname, (int)strlen(progname), "");
143 
144 	exit(code);
145 }
146 
147 void *
148 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
149 {
150 
151 	return (vm_map_gpa(ctx, gaddr, len));
152 }
153 
154 int
155 fbsdrun_disable_x2apic(void)
156 {
157 
158 	return (disable_x2apic);
159 }
160 
161 int
162 fbsdrun_vmexit_on_pause(void)
163 {
164 
165 	return (guest_vmexit_on_pause);
166 }
167 
168 int
169 fbsdrun_vmexit_on_hlt(void)
170 {
171 
172 	return (guest_vmexit_on_hlt);
173 }
174 
175 int
176 fbsdrun_virtio_msix(void)
177 {
178 
179 	return (virtio_msix);
180 }
181 
182 static void *
183 fbsdrun_start_thread(void *param)
184 {
185 	char tname[MAXCOMLEN + 1];
186 	struct mt_vmm_info *mtp;
187 	int vcpu;
188 
189 	mtp = param;
190 	vcpu = mtp->mt_vcpu;
191 
192 	snprintf(tname, sizeof(tname), "vcpu %d", vcpu);
193 	pthread_set_name_np(mtp->mt_thr, tname);
194 
195 	vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip);
196 
197 	/* not reached */
198 	exit(1);
199 	return (NULL);
200 }
201 
202 void
203 fbsdrun_addcpu(struct vmctx *ctx, int vcpu, uint64_t rip)
204 {
205 	int error;
206 
207 	if (cpumask & (1 << vcpu)) {
208 		fprintf(stderr, "addcpu: attempting to add existing cpu %d\n",
209 		    vcpu);
210 		exit(1);
211 	}
212 
213 	cpumask |= 1 << vcpu;
214 	foundcpus++;
215 
216 	/*
217 	 * Set up the vmexit struct to allow execution to start
218 	 * at the given RIP
219 	 */
220 	vmexit[vcpu].rip = rip;
221 	vmexit[vcpu].inst_length = 0;
222 
223 	mt_vmm_info[vcpu].mt_ctx = ctx;
224 	mt_vmm_info[vcpu].mt_vcpu = vcpu;
225 
226 	error = pthread_create(&mt_vmm_info[vcpu].mt_thr, NULL,
227 	    fbsdrun_start_thread, &mt_vmm_info[vcpu]);
228 	assert(error == 0);
229 }
230 
231 static int
232 vmexit_catch_reset(void)
233 {
234         stats.io_reset++;
235         return (VMEXIT_RESET);
236 }
237 
238 static int
239 vmexit_catch_inout(void)
240 {
241 	return (VMEXIT_ABORT);
242 }
243 
244 static int
245 vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu,
246 		     uint32_t eax)
247 {
248 #if BHYVE_DEBUG
249 	/*
250 	 * put guest-driven debug here
251 	 */
252 #endif
253         return (VMEXIT_CONTINUE);
254 }
255 
256 static int
257 vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
258 {
259 	int error;
260 	int bytes, port, in, out;
261 	uint32_t eax;
262 	int vcpu;
263 
264 	vcpu = *pvcpu;
265 
266 	port = vme->u.inout.port;
267 	bytes = vme->u.inout.bytes;
268 	eax = vme->u.inout.eax;
269 	in = vme->u.inout.in;
270 	out = !in;
271 
272 	/* We don't deal with these */
273 	if (vme->u.inout.string || vme->u.inout.rep)
274 		return (VMEXIT_ABORT);
275 
276 	/* Special case of guest reset */
277 	if (out && port == 0x64 && (uint8_t)eax == 0xFE)
278 		return (vmexit_catch_reset());
279 
280         /* Extra-special case of host notifications */
281         if (out && port == GUEST_NIO_PORT)
282                 return (vmexit_handle_notify(ctx, vme, pvcpu, eax));
283 
284 	error = emulate_inout(ctx, vcpu, in, port, bytes, &eax, strictio);
285 	if (error == 0 && in)
286 		error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX, eax);
287 
288 	if (error == 0)
289 		return (VMEXIT_CONTINUE);
290 	else {
291 		fprintf(stderr, "Unhandled %s%c 0x%04x\n",
292 			in ? "in" : "out",
293 			bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), port);
294 		return (vmexit_catch_inout());
295 	}
296 }
297 
298 static int
299 vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
300 {
301 	fprintf(stderr, "vm exit rdmsr 0x%x, cpu %d\n", vme->u.msr.code,
302 	    *pvcpu);
303 	return (VMEXIT_ABORT);
304 }
305 
306 static int
307 vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
308 {
309 	int newcpu;
310 	int retval = VMEXIT_CONTINUE;
311 
312 	newcpu = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code,vme->u.msr.wval);
313 
314         return (retval);
315 }
316 
317 static int
318 vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
319 {
320 	int newcpu;
321 	int retval = VMEXIT_CONTINUE;
322 
323 	newcpu = spinup_ap(ctx, *pvcpu,
324 			   vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
325 
326 	return (retval);
327 }
328 
329 static int
330 vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
331 {
332 
333 	fprintf(stderr, "vm exit[%d]\n", *pvcpu);
334 	fprintf(stderr, "\treason\t\tVMX\n");
335 	fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
336 	fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
337 	fprintf(stderr, "\terror\t\t%d\n", vmexit->u.vmx.error);
338 	fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason);
339 	fprintf(stderr, "\tqualification\t0x%016lx\n",
340 	    vmexit->u.vmx.exit_qualification);
341 
342 	return (VMEXIT_ABORT);
343 }
344 
345 static int
346 vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
347 {
348 
349 	stats.vmexit_bogus++;
350 
351 	return (VMEXIT_RESTART);
352 }
353 
354 static int
355 vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
356 {
357 
358 	stats.vmexit_hlt++;
359 
360 	/*
361 	 * Just continue execution with the next instruction. We use
362 	 * the HLT VM exit as a way to be friendly with the host
363 	 * scheduler.
364 	 */
365 	return (VMEXIT_CONTINUE);
366 }
367 
368 static int
369 vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
370 {
371 
372 	stats.vmexit_pause++;
373 
374 	return (VMEXIT_CONTINUE);
375 }
376 
377 static int
378 vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
379 {
380 
381 	stats.vmexit_mtrap++;
382 
383 	return (VMEXIT_RESTART);
384 }
385 
386 static int
387 vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
388 {
389 	int err;
390 	stats.vmexit_inst_emul++;
391 
392 	err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa,
393 			  &vmexit->u.inst_emul.vie);
394 
395 	if (err) {
396 		if (err == EINVAL) {
397 			fprintf(stderr,
398 			    "Failed to emulate instruction at 0x%lx\n",
399 			    vmexit->rip);
400 		} else if (err == ESRCH) {
401 			fprintf(stderr, "Unhandled memory access to 0x%lx\n",
402 			    vmexit->u.inst_emul.gpa);
403 		}
404 
405 		return (VMEXIT_ABORT);
406 	}
407 
408 	return (VMEXIT_CONTINUE);
409 }
410 
411 static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
412 	[VM_EXITCODE_INOUT]  = vmexit_inout,
413 	[VM_EXITCODE_VMX]    = vmexit_vmx,
414 	[VM_EXITCODE_BOGUS]  = vmexit_bogus,
415 	[VM_EXITCODE_RDMSR]  = vmexit_rdmsr,
416 	[VM_EXITCODE_WRMSR]  = vmexit_wrmsr,
417 	[VM_EXITCODE_MTRAP]  = vmexit_mtrap,
418 	[VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
419 	[VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
420 };
421 
422 static void
423 vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip)
424 {
425 	cpuset_t mask;
426 	int error, rc, prevcpu;
427 	enum vm_exitcode exitcode;
428 
429 	if (pincpu >= 0) {
430 		CPU_ZERO(&mask);
431 		CPU_SET(pincpu + vcpu, &mask);
432 		error = pthread_setaffinity_np(pthread_self(),
433 					       sizeof(mask), &mask);
434 		assert(error == 0);
435 	}
436 
437 	while (1) {
438 		error = vm_run(ctx, vcpu, rip, &vmexit[vcpu]);
439 		if (error != 0) {
440 			/*
441 			 * It is possible that 'vmmctl' or some other process
442 			 * has transitioned the vcpu to CANNOT_RUN state right
443 			 * before we tried to transition it to RUNNING.
444 			 *
445 			 * This is expected to be temporary so just retry.
446 			 */
447 			if (errno == EBUSY)
448 				continue;
449 			else
450 				break;
451 		}
452 
453 		prevcpu = vcpu;
454 
455 		exitcode = vmexit[vcpu].exitcode;
456 		if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
457 			fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
458 			    exitcode);
459 			exit(1);
460 		}
461 
462                 rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);
463 
464 		switch (rc) {
465 		case VMEXIT_CONTINUE:
466                         rip = vmexit[vcpu].rip + vmexit[vcpu].inst_length;
467 			break;
468 		case VMEXIT_RESTART:
469                         rip = vmexit[vcpu].rip;
470 			break;
471 		case VMEXIT_RESET:
472 			exit(0);
473 		default:
474 			exit(1);
475 		}
476 	}
477 	fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
478 }
479 
480 static int
481 num_vcpus_allowed(struct vmctx *ctx)
482 {
483 	int tmp, error;
484 
485 	error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
486 
487 	/*
488 	 * The guest is allowed to spinup more than one processor only if the
489 	 * UNRESTRICTED_GUEST capability is available.
490 	 */
491 	if (error == 0)
492 		return (VM_MAXCPU);
493 	else
494 		return (1);
495 }
496 
497 void
498 fbsdrun_set_capabilities(struct vmctx *ctx, int cpu)
499 {
500 	int err, tmp;
501 
502 	if (fbsdrun_vmexit_on_hlt()) {
503 		err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp);
504 		if (err < 0) {
505 			fprintf(stderr, "VM exit on HLT not supported\n");
506 			exit(1);
507 		}
508 		vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1);
509 		if (cpu == BSP)
510 			handler[VM_EXITCODE_HLT] = vmexit_hlt;
511 	}
512 
513         if (fbsdrun_vmexit_on_pause()) {
514 		/*
515 		 * pause exit support required for this mode
516 		 */
517 		err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp);
518 		if (err < 0) {
519 			fprintf(stderr,
520 			    "SMP mux requested, no pause support\n");
521 			exit(1);
522 		}
523 		vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1);
524 		if (cpu == BSP)
525 			handler[VM_EXITCODE_PAUSE] = vmexit_pause;
526         }
527 
528 	if (fbsdrun_disable_x2apic())
529 		err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED);
530 	else
531 		err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED);
532 
533 	if (err) {
534 		fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
535 		exit(1);
536 	}
537 
538 	vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1);
539 }
540 
541 int
542 main(int argc, char *argv[])
543 {
544 	int c, error, gdb_port, err, bvmcons;
545 	int max_vcpus;
546 	struct vmctx *ctx;
547 	uint64_t rip;
548 	size_t memsize;
549 
550 	bvmcons = 0;
551 	progname = basename(argv[0]);
552 	gdb_port = 0;
553 	guest_ncpus = 1;
554 	memsize = 256 * MB;
555 
556 	while ((c = getopt(argc, argv, "abehAHIPWp:g:c:s:S:m:l:")) != -1) {
557 		switch (c) {
558 		case 'a':
559 			disable_x2apic = 1;
560 			break;
561 		case 'A':
562 			acpi = 1;
563 			break;
564 		case 'b':
565 			bvmcons = 1;
566 			break;
567 		case 'p':
568 			pincpu = atoi(optarg);
569 			break;
570                 case 'c':
571 			guest_ncpus = atoi(optarg);
572 			break;
573 		case 'g':
574 			gdb_port = atoi(optarg);
575 			break;
576 		case 'l':
577 			if (lpc_device_parse(optarg) != 0) {
578 				errx(EX_USAGE, "invalid lpc device "
579 				    "configuration '%s'", optarg);
580 			}
581 			break;
582 		case 's':
583 			if (pci_parse_slot(optarg, 0) != 0)
584 				exit(1);
585 			else
586 				break;
587 		case 'S':
588 			if (pci_parse_slot(optarg, 1) != 0)
589 				exit(1);
590 			else
591 				break;
592                 case 'm':
593 			error = vm_parse_memsize(optarg, &memsize);
594 			if (error)
595 				errx(EX_USAGE, "invalid memsize '%s'", optarg);
596 			break;
597 		case 'H':
598 			guest_vmexit_on_hlt = 1;
599 			break;
600 		case 'I':
601 			/*
602 			 * The "-I" option was used to add an ioapic to the
603 			 * virtual machine.
604 			 *
605 			 * An ioapic is now provided unconditionally for each
606 			 * virtual machine and this option is now deprecated.
607 			 */
608 			break;
609 		case 'P':
610 			guest_vmexit_on_pause = 1;
611 			break;
612 		case 'e':
613 			strictio = 1;
614 			break;
615 		case 'W':
616 			virtio_msix = 0;
617 			break;
618 		case 'h':
619 			usage(0);
620 		default:
621 			usage(1);
622 		}
623 	}
624 	argc -= optind;
625 	argv += optind;
626 
627 	if (argc != 1)
628 		usage(1);
629 
630 	vmname = argv[0];
631 
632 	ctx = vm_open(vmname);
633 	if (ctx == NULL) {
634 		perror("vm_open");
635 		exit(1);
636 	}
637 
638 	max_vcpus = num_vcpus_allowed(ctx);
639 	if (guest_ncpus > max_vcpus) {
640 		fprintf(stderr, "%d vCPUs requested but only %d available\n",
641 			guest_ncpus, max_vcpus);
642 		exit(1);
643 	}
644 
645 	fbsdrun_set_capabilities(ctx, BSP);
646 
647 	err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
648 	if (err) {
649 		fprintf(stderr, "Unable to setup memory (%d)\n", err);
650 		exit(1);
651 	}
652 
653 	init_mem();
654 	init_inout();
655 	legacy_irq_init();
656 
657 	rtc_init(ctx);
658 
659 	/*
660 	 * Exit if a device emulation finds an error in it's initilization
661 	 */
662 	if (init_pci(ctx) != 0)
663 		exit(1);
664 
665 	if (gdb_port != 0)
666 		init_dbgport(gdb_port);
667 
668 	if (bvmcons)
669 		init_bvmcons();
670 
671 	error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
672 	assert(error == 0);
673 
674 	/*
675 	 * build the guest tables, MP etc.
676 	 */
677 	mptable_build(ctx, guest_ncpus);
678 
679 	if (acpi) {
680 		error = acpi_build(ctx, guest_ncpus);
681 		assert(error == 0);
682 	}
683 
684 	/*
685 	 * Change the proc title to include the VM name.
686 	 */
687 	setproctitle("%s", vmname);
688 
689 	/*
690 	 * Add CPU 0
691 	 */
692 	fbsdrun_addcpu(ctx, BSP, rip);
693 
694 	/*
695 	 * Head off to the main event dispatch loop
696 	 */
697 	mevent_dispatch();
698 
699 	exit(1);
700 }
701