xref: /freebsd/usr.sbin/bhyve/bhyverun.c (revision a9e8641da961bcf3d24afc85fd657f2083a872a2)
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/time.h>
35 
36 #include <machine/atomic.h>
37 #include <machine/segments.h>
38 
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <err.h>
43 #include <libgen.h>
44 #include <unistd.h>
45 #include <assert.h>
46 #include <errno.h>
47 #include <pthread.h>
48 #include <pthread_np.h>
49 #include <sysexits.h>
50 
51 #include <machine/vmm.h>
52 #include <vmmapi.h>
53 
54 #include "bhyverun.h"
55 #include "acpi.h"
56 #include "inout.h"
57 #include "dbgport.h"
58 #include "ioapic.h"
59 #include "mem.h"
60 #include "mevent.h"
61 #include "mptbl.h"
62 #include "pci_emul.h"
63 #include "pci_lpc.h"
64 #include "xmsr.h"
65 #include "spinup_ap.h"
66 #include "rtc.h"
67 
68 #define GUEST_NIO_PORT		0x488	/* guest upcalls via i/o port */
69 
70 #define	VMEXIT_SWITCH		0	/* force vcpu switch in mux mode */
71 #define	VMEXIT_CONTINUE		1	/* continue from next instruction */
72 #define	VMEXIT_RESTART		2	/* restart current instruction */
73 #define	VMEXIT_ABORT		3	/* abort the vm run loop */
74 #define	VMEXIT_RESET		4	/* guest machine has reset */
75 #define	VMEXIT_POWEROFF		5	/* guest machine has powered off */
76 
77 #define MB		(1024UL * 1024)
78 #define GB		(1024UL * MB)
79 
80 typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
81 
82 char *vmname;
83 
84 int guest_ncpus;
85 
86 static int pincpu = -1;
87 static int guest_vmexit_on_hlt, guest_vmexit_on_pause;
88 static int virtio_msix = 1;
89 static int x2apic_mode = 0;	/* default is xAPIC */
90 
91 static int strictio;
92 static int strictmsr = 1;
93 
94 static int acpi;
95 
96 static char *progname;
97 static const int BSP = 0;
98 
99 static int cpumask;
100 
101 static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip);
102 
103 struct vm_exit vmexit[VM_MAXCPU];
104 
105 struct bhyvestats {
106         uint64_t        vmexit_bogus;
107         uint64_t        vmexit_bogus_switch;
108         uint64_t        vmexit_hlt;
109         uint64_t        vmexit_pause;
110         uint64_t        vmexit_mtrap;
111         uint64_t        vmexit_inst_emul;
112         uint64_t        cpu_switch_rotate;
113         uint64_t        cpu_switch_direct;
114         int             io_reset;
115 } stats;
116 
117 struct mt_vmm_info {
118 	pthread_t	mt_thr;
119 	struct vmctx	*mt_ctx;
120 	int		mt_vcpu;
121 } mt_vmm_info[VM_MAXCPU];
122 
123 static void
124 usage(int code)
125 {
126 
127         fprintf(stderr,
128                 "Usage: %s [-aehwAHIPW] [-g <gdb port>] [-s <pci>]\n"
129 		"       %*s [-c vcpus] [-p pincpu] [-m mem] [-l <lpc>] <vm>\n"
130 		"       -a: local apic is in xAPIC mode (deprecated)\n"
131 		"       -A: create an ACPI table\n"
132 		"       -g: gdb port\n"
133 		"       -c: # cpus (default 1)\n"
134 		"       -p: pin vcpu 'n' to host cpu 'pincpu + n'\n"
135 		"       -H: vmexit from the guest on hlt\n"
136 		"       -P: vmexit from the guest on pause\n"
137 		"       -W: force virtio to use single-vector MSI\n"
138 		"       -e: exit on unhandled I/O access\n"
139 		"       -h: help\n"
140 		"       -s: <slot,driver,configinfo> PCI slot config\n"
141 		"       -l: LPC device configuration\n"
142 		"       -m: memory size in MB\n"
143 		"       -w: ignore unimplemented MSRs\n"
144 		"       -x: local apic is in x2APIC mode\n",
145 		progname, (int)strlen(progname), "");
146 
147 	exit(code);
148 }
149 
150 void *
151 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
152 {
153 
154 	return (vm_map_gpa(ctx, gaddr, len));
155 }
156 
157 int
158 fbsdrun_vmexit_on_pause(void)
159 {
160 
161 	return (guest_vmexit_on_pause);
162 }
163 
164 int
165 fbsdrun_vmexit_on_hlt(void)
166 {
167 
168 	return (guest_vmexit_on_hlt);
169 }
170 
171 int
172 fbsdrun_virtio_msix(void)
173 {
174 
175 	return (virtio_msix);
176 }
177 
178 static void *
179 fbsdrun_start_thread(void *param)
180 {
181 	char tname[MAXCOMLEN + 1];
182 	struct mt_vmm_info *mtp;
183 	int vcpu;
184 
185 	mtp = param;
186 	vcpu = mtp->mt_vcpu;
187 
188 	snprintf(tname, sizeof(tname), "vcpu %d", vcpu);
189 	pthread_set_name_np(mtp->mt_thr, tname);
190 
191 	vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip);
192 
193 	/* not reached */
194 	exit(1);
195 	return (NULL);
196 }
197 
198 void
199 fbsdrun_addcpu(struct vmctx *ctx, int vcpu, uint64_t rip)
200 {
201 	int error;
202 
203 	if (cpumask & (1 << vcpu)) {
204 		fprintf(stderr, "addcpu: attempting to add existing cpu %d\n",
205 		    vcpu);
206 		exit(1);
207 	}
208 
209 	atomic_set_int(&cpumask, 1 << vcpu);
210 
211 	/*
212 	 * Set up the vmexit struct to allow execution to start
213 	 * at the given RIP
214 	 */
215 	vmexit[vcpu].rip = rip;
216 	vmexit[vcpu].inst_length = 0;
217 
218 	mt_vmm_info[vcpu].mt_ctx = ctx;
219 	mt_vmm_info[vcpu].mt_vcpu = vcpu;
220 
221 	error = pthread_create(&mt_vmm_info[vcpu].mt_thr, NULL,
222 	    fbsdrun_start_thread, &mt_vmm_info[vcpu]);
223 	assert(error == 0);
224 }
225 
226 static int
227 fbsdrun_deletecpu(struct vmctx *ctx, int vcpu)
228 {
229 
230 	if ((cpumask & (1 << vcpu)) == 0) {
231 		fprintf(stderr, "addcpu: attempting to delete unknown cpu %d\n",
232 		    vcpu);
233 		exit(1);
234 	}
235 
236 	atomic_clear_int(&cpumask, 1 << vcpu);
237 	return (cpumask == 0);
238 }
239 
240 static int
241 vmexit_catch_reset(void)
242 {
243         stats.io_reset++;
244         return (VMEXIT_RESET);
245 }
246 
247 static int
248 vmexit_catch_inout(void)
249 {
250 	return (VMEXIT_ABORT);
251 }
252 
253 static int
254 vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu,
255 		     uint32_t eax)
256 {
257 #if BHYVE_DEBUG
258 	/*
259 	 * put guest-driven debug here
260 	 */
261 #endif
262         return (VMEXIT_CONTINUE);
263 }
264 
265 static int
266 vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
267 {
268 	int error;
269 	int bytes, port, in, out;
270 	uint32_t eax;
271 	int vcpu;
272 
273 	vcpu = *pvcpu;
274 
275 	port = vme->u.inout.port;
276 	bytes = vme->u.inout.bytes;
277 	eax = vme->u.inout.eax;
278 	in = vme->u.inout.in;
279 	out = !in;
280 
281 	/* We don't deal with these */
282 	if (vme->u.inout.string || vme->u.inout.rep)
283 		return (VMEXIT_ABORT);
284 
285 	/* Special case of guest reset */
286 	if (out && port == 0x64 && (uint8_t)eax == 0xFE)
287 		return (vmexit_catch_reset());
288 
289         /* Extra-special case of host notifications */
290         if (out && port == GUEST_NIO_PORT)
291                 return (vmexit_handle_notify(ctx, vme, pvcpu, eax));
292 
293 	error = emulate_inout(ctx, vcpu, in, port, bytes, &eax, strictio);
294 	if (error == INOUT_OK && in)
295 		error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX, eax);
296 
297 	switch (error) {
298 	case INOUT_OK:
299 		return (VMEXIT_CONTINUE);
300 	case INOUT_RESET:
301 		return (VMEXIT_RESET);
302 	case INOUT_POWEROFF:
303 		return (VMEXIT_POWEROFF);
304 	default:
305 		fprintf(stderr, "Unhandled %s%c 0x%04x\n",
306 			in ? "in" : "out",
307 			bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), port);
308 		return (vmexit_catch_inout());
309 	}
310 }
311 
312 static int
313 vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
314 {
315 	uint64_t val;
316 	uint32_t eax, edx;
317 	int error;
318 
319 	val = 0;
320 	error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val);
321 	if (error != 0) {
322 		fprintf(stderr, "rdmsr to register %#x on vcpu %d\n",
323 		    vme->u.msr.code, *pvcpu);
324 		if (strictmsr)
325 			return (VMEXIT_ABORT);
326 	}
327 
328 	eax = val;
329 	error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax);
330 	assert(error == 0);
331 
332 	edx = val >> 32;
333 	error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx);
334 	assert(error == 0);
335 
336 	return (VMEXIT_CONTINUE);
337 }
338 
339 static int
340 vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
341 {
342 	int error;
343 
344 	error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval);
345 	if (error != 0) {
346 		fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n",
347 		    vme->u.msr.code, vme->u.msr.wval, *pvcpu);
348 		if (strictmsr)
349 			return (VMEXIT_ABORT);
350 	}
351 	return (VMEXIT_CONTINUE);
352 }
353 
354 static int
355 vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
356 {
357 	int newcpu;
358 	int retval = VMEXIT_CONTINUE;
359 
360 	newcpu = spinup_ap(ctx, *pvcpu,
361 			   vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
362 
363 	return (retval);
364 }
365 
366 static int
367 vmexit_spindown_cpu(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
368 {
369 	int lastcpu;
370 
371 	lastcpu = fbsdrun_deletecpu(ctx, *pvcpu);
372 	if (!lastcpu)
373 		pthread_exit(NULL);
374 	return (vmexit_catch_reset());
375 }
376 
377 static int
378 vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
379 {
380 
381 	fprintf(stderr, "vm exit[%d]\n", *pvcpu);
382 	fprintf(stderr, "\treason\t\tVMX\n");
383 	fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
384 	fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
385 	fprintf(stderr, "\tstatus\t\t%d\n", vmexit->u.vmx.status);
386 	fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason);
387 	fprintf(stderr, "\tqualification\t0x%016lx\n",
388 	    vmexit->u.vmx.exit_qualification);
389 	fprintf(stderr, "\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type);
390 	fprintf(stderr, "\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error);
391 
392 	return (VMEXIT_ABORT);
393 }
394 
395 static int
396 vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
397 {
398 
399 	stats.vmexit_bogus++;
400 
401 	return (VMEXIT_RESTART);
402 }
403 
404 static int
405 vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
406 {
407 
408 	stats.vmexit_hlt++;
409 
410 	/*
411 	 * Just continue execution with the next instruction. We use
412 	 * the HLT VM exit as a way to be friendly with the host
413 	 * scheduler.
414 	 */
415 	return (VMEXIT_CONTINUE);
416 }
417 
418 static int
419 vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
420 {
421 
422 	stats.vmexit_pause++;
423 
424 	return (VMEXIT_CONTINUE);
425 }
426 
427 static int
428 vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
429 {
430 
431 	stats.vmexit_mtrap++;
432 
433 	return (VMEXIT_RESTART);
434 }
435 
436 static int
437 vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
438 {
439 	int err;
440 	stats.vmexit_inst_emul++;
441 
442 	err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa,
443 			  &vmexit->u.inst_emul.vie);
444 
445 	if (err) {
446 		if (err == EINVAL) {
447 			fprintf(stderr,
448 			    "Failed to emulate instruction at 0x%lx\n",
449 			    vmexit->rip);
450 		} else if (err == ESRCH) {
451 			fprintf(stderr, "Unhandled memory access to 0x%lx\n",
452 			    vmexit->u.inst_emul.gpa);
453 		}
454 
455 		return (VMEXIT_ABORT);
456 	}
457 
458 	return (VMEXIT_CONTINUE);
459 }
460 
461 static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
462 	[VM_EXITCODE_INOUT]  = vmexit_inout,
463 	[VM_EXITCODE_VMX]    = vmexit_vmx,
464 	[VM_EXITCODE_BOGUS]  = vmexit_bogus,
465 	[VM_EXITCODE_RDMSR]  = vmexit_rdmsr,
466 	[VM_EXITCODE_WRMSR]  = vmexit_wrmsr,
467 	[VM_EXITCODE_MTRAP]  = vmexit_mtrap,
468 	[VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
469 	[VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
470 	[VM_EXITCODE_SPINDOWN_CPU] = vmexit_spindown_cpu,
471 };
472 
473 static void
474 vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip)
475 {
476 	cpuset_t mask;
477 	int error, rc, prevcpu;
478 	enum vm_exitcode exitcode;
479 
480 	if (pincpu >= 0) {
481 		CPU_ZERO(&mask);
482 		CPU_SET(pincpu + vcpu, &mask);
483 		error = pthread_setaffinity_np(pthread_self(),
484 					       sizeof(mask), &mask);
485 		assert(error == 0);
486 	}
487 
488 	while (1) {
489 		error = vm_run(ctx, vcpu, rip, &vmexit[vcpu]);
490 		if (error != 0)
491 			break;
492 
493 		prevcpu = vcpu;
494 
495 		exitcode = vmexit[vcpu].exitcode;
496 		if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
497 			fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
498 			    exitcode);
499 			exit(1);
500 		}
501 
502                 rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);
503 
504 		switch (rc) {
505 		case VMEXIT_CONTINUE:
506                         rip = vmexit[vcpu].rip + vmexit[vcpu].inst_length;
507 			break;
508 		case VMEXIT_RESTART:
509                         rip = vmexit[vcpu].rip;
510 			break;
511 		case VMEXIT_RESET:
512 			exit(0);
513 		default:
514 			exit(1);
515 		}
516 	}
517 	fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
518 }
519 
520 static int
521 num_vcpus_allowed(struct vmctx *ctx)
522 {
523 	int tmp, error;
524 
525 	error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
526 
527 	/*
528 	 * The guest is allowed to spinup more than one processor only if the
529 	 * UNRESTRICTED_GUEST capability is available.
530 	 */
531 	if (error == 0)
532 		return (VM_MAXCPU);
533 	else
534 		return (1);
535 }
536 
537 void
538 fbsdrun_set_capabilities(struct vmctx *ctx, int cpu)
539 {
540 	int err, tmp;
541 
542 	if (fbsdrun_vmexit_on_hlt()) {
543 		err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp);
544 		if (err < 0) {
545 			fprintf(stderr, "VM exit on HLT not supported\n");
546 			exit(1);
547 		}
548 		vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1);
549 		if (cpu == BSP)
550 			handler[VM_EXITCODE_HLT] = vmexit_hlt;
551 	}
552 
553         if (fbsdrun_vmexit_on_pause()) {
554 		/*
555 		 * pause exit support required for this mode
556 		 */
557 		err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp);
558 		if (err < 0) {
559 			fprintf(stderr,
560 			    "SMP mux requested, no pause support\n");
561 			exit(1);
562 		}
563 		vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1);
564 		if (cpu == BSP)
565 			handler[VM_EXITCODE_PAUSE] = vmexit_pause;
566         }
567 
568 	if (x2apic_mode)
569 		err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED);
570 	else
571 		err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED);
572 
573 	if (err) {
574 		fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
575 		exit(1);
576 	}
577 
578 	vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1);
579 }
580 
581 int
582 main(int argc, char *argv[])
583 {
584 	int c, error, gdb_port, err, bvmcons;
585 	int max_vcpus;
586 	struct vmctx *ctx;
587 	uint64_t rip;
588 	size_t memsize;
589 
590 	bvmcons = 0;
591 	progname = basename(argv[0]);
592 	gdb_port = 0;
593 	guest_ncpus = 1;
594 	memsize = 256 * MB;
595 
596 	while ((c = getopt(argc, argv, "abehwxAHIPWp:g:c:s:m:l:")) != -1) {
597 		switch (c) {
598 		case 'a':
599 			x2apic_mode = 0;
600 			break;
601 		case 'A':
602 			acpi = 1;
603 			break;
604 		case 'b':
605 			bvmcons = 1;
606 			break;
607 		case 'p':
608 			pincpu = atoi(optarg);
609 			break;
610                 case 'c':
611 			guest_ncpus = atoi(optarg);
612 			break;
613 		case 'g':
614 			gdb_port = atoi(optarg);
615 			break;
616 		case 'l':
617 			if (lpc_device_parse(optarg) != 0) {
618 				errx(EX_USAGE, "invalid lpc device "
619 				    "configuration '%s'", optarg);
620 			}
621 			break;
622 		case 's':
623 			if (pci_parse_slot(optarg) != 0)
624 				exit(1);
625 			else
626 				break;
627                 case 'm':
628 			error = vm_parse_memsize(optarg, &memsize);
629 			if (error)
630 				errx(EX_USAGE, "invalid memsize '%s'", optarg);
631 			break;
632 		case 'H':
633 			guest_vmexit_on_hlt = 1;
634 			break;
635 		case 'I':
636 			/*
637 			 * The "-I" option was used to add an ioapic to the
638 			 * virtual machine.
639 			 *
640 			 * An ioapic is now provided unconditionally for each
641 			 * virtual machine and this option is now deprecated.
642 			 */
643 			break;
644 		case 'P':
645 			guest_vmexit_on_pause = 1;
646 			break;
647 		case 'e':
648 			strictio = 1;
649 			break;
650 		case 'w':
651 			strictmsr = 0;
652 			break;
653 		case 'W':
654 			virtio_msix = 0;
655 			break;
656 		case 'x':
657 			x2apic_mode = 1;
658 			break;
659 		case 'h':
660 			usage(0);
661 		default:
662 			usage(1);
663 		}
664 	}
665 	argc -= optind;
666 	argv += optind;
667 
668 	if (argc != 1)
669 		usage(1);
670 
671 	vmname = argv[0];
672 
673 	ctx = vm_open(vmname);
674 	if (ctx == NULL) {
675 		perror("vm_open");
676 		exit(1);
677 	}
678 
679 	max_vcpus = num_vcpus_allowed(ctx);
680 	if (guest_ncpus > max_vcpus) {
681 		fprintf(stderr, "%d vCPUs requested but only %d available\n",
682 			guest_ncpus, max_vcpus);
683 		exit(1);
684 	}
685 
686 	fbsdrun_set_capabilities(ctx, BSP);
687 
688 	err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
689 	if (err) {
690 		fprintf(stderr, "Unable to setup memory (%d)\n", err);
691 		exit(1);
692 	}
693 
694 	init_mem();
695 	init_inout();
696 	ioapic_init(ctx);
697 
698 	rtc_init(ctx);
699 
700 	/*
701 	 * Exit if a device emulation finds an error in it's initilization
702 	 */
703 	if (init_pci(ctx) != 0)
704 		exit(1);
705 
706 	if (gdb_port != 0)
707 		init_dbgport(gdb_port);
708 
709 	if (bvmcons)
710 		init_bvmcons();
711 
712 	error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
713 	assert(error == 0);
714 
715 	/*
716 	 * build the guest tables, MP etc.
717 	 */
718 	mptable_build(ctx, guest_ncpus);
719 
720 	if (acpi) {
721 		error = acpi_build(ctx, guest_ncpus);
722 		assert(error == 0);
723 	}
724 
725 	/*
726 	 * Change the proc title to include the VM name.
727 	 */
728 	setproctitle("%s", vmname);
729 
730 	/*
731 	 * Add CPU 0
732 	 */
733 	fbsdrun_addcpu(ctx, BSP, rip);
734 
735 	/*
736 	 * Head off to the main event dispatch loop
737 	 */
738 	mevent_dispatch();
739 
740 	exit(1);
741 }
742