xref: /freebsd/usr.sbin/bhyve/bhyverun.c (revision 69cfb77152f7daa782bce28efd073439a04fc441)
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/time.h>
35 
36 #include <machine/atomic.h>
37 #include <machine/segments.h>
38 
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <err.h>
43 #include <libgen.h>
44 #include <unistd.h>
45 #include <assert.h>
46 #include <errno.h>
47 #include <pthread.h>
48 #include <pthread_np.h>
49 #include <sysexits.h>
50 
51 #include <machine/vmm.h>
52 #include <vmmapi.h>
53 
54 #include "bhyverun.h"
55 #include "acpi.h"
56 #include "inout.h"
57 #include "dbgport.h"
58 #include "ioapic.h"
59 #include "mem.h"
60 #include "mevent.h"
61 #include "mptbl.h"
62 #include "pci_emul.h"
63 #include "pci_lpc.h"
64 #include "xmsr.h"
65 #include "spinup_ap.h"
66 #include "rtc.h"
67 
68 #define GUEST_NIO_PORT		0x488	/* guest upcalls via i/o port */
69 
70 #define	VMEXIT_SWITCH		0	/* force vcpu switch in mux mode */
71 #define	VMEXIT_CONTINUE		1	/* continue from next instruction */
72 #define	VMEXIT_RESTART		2	/* restart current instruction */
73 #define	VMEXIT_ABORT		3	/* abort the vm run loop */
74 #define	VMEXIT_RESET		4	/* guest machine has reset */
75 #define	VMEXIT_POWEROFF		5	/* guest machine has powered off */
76 
77 #define MB		(1024UL * 1024)
78 #define GB		(1024UL * MB)
79 
80 typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
81 
82 char *vmname;
83 
84 int guest_ncpus;
85 
86 static int pincpu = -1;
87 static int guest_vmexit_on_hlt, guest_vmexit_on_pause;
88 static int virtio_msix = 1;
89 static int x2apic_mode = 0;	/* default is xAPIC */
90 
91 static int strictio;
92 static int strictmsr = 1;
93 
94 static int acpi;
95 
96 static char *progname;
97 static const int BSP = 0;
98 
99 static int cpumask;
100 
101 static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip);
102 
103 struct vm_exit vmexit[VM_MAXCPU];
104 
105 struct bhyvestats {
106         uint64_t        vmexit_bogus;
107         uint64_t        vmexit_bogus_switch;
108         uint64_t        vmexit_hlt;
109         uint64_t        vmexit_pause;
110         uint64_t        vmexit_mtrap;
111         uint64_t        vmexit_inst_emul;
112         uint64_t        cpu_switch_rotate;
113         uint64_t        cpu_switch_direct;
114         int             io_reset;
115 } stats;
116 
117 struct mt_vmm_info {
118 	pthread_t	mt_thr;
119 	struct vmctx	*mt_ctx;
120 	int		mt_vcpu;
121 } mt_vmm_info[VM_MAXCPU];
122 
123 static void
124 usage(int code)
125 {
126 
127         fprintf(stderr,
128                 "Usage: %s [-aehwAHIPW] [-g <gdb port>] [-s <pci>]\n"
129 		"       %*s [-c vcpus] [-p pincpu] [-m mem] [-l <lpc>] <vm>\n"
130 		"       -a: local apic is in xAPIC mode (deprecated)\n"
131 		"       -A: create an ACPI table\n"
132 		"       -g: gdb port\n"
133 		"       -c: # cpus (default 1)\n"
134 		"       -p: pin vcpu 'n' to host cpu 'pincpu + n'\n"
135 		"       -H: vmexit from the guest on hlt\n"
136 		"       -P: vmexit from the guest on pause\n"
137 		"       -W: force virtio to use single-vector MSI\n"
138 		"       -e: exit on unhandled I/O access\n"
139 		"       -h: help\n"
140 		"       -s: <slot,driver,configinfo> PCI slot config\n"
141 		"       -l: LPC device configuration\n"
142 		"       -m: memory size in MB\n"
143 		"       -w: ignore unimplemented MSRs\n"
144 		"       -x: local apic is in x2APIC mode\n",
145 		progname, (int)strlen(progname), "");
146 
147 	exit(code);
148 }
149 
150 void *
151 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
152 {
153 
154 	return (vm_map_gpa(ctx, gaddr, len));
155 }
156 
157 int
158 fbsdrun_vmexit_on_pause(void)
159 {
160 
161 	return (guest_vmexit_on_pause);
162 }
163 
164 int
165 fbsdrun_vmexit_on_hlt(void)
166 {
167 
168 	return (guest_vmexit_on_hlt);
169 }
170 
171 int
172 fbsdrun_virtio_msix(void)
173 {
174 
175 	return (virtio_msix);
176 }
177 
178 static void *
179 fbsdrun_start_thread(void *param)
180 {
181 	char tname[MAXCOMLEN + 1];
182 	struct mt_vmm_info *mtp;
183 	int vcpu;
184 
185 	mtp = param;
186 	vcpu = mtp->mt_vcpu;
187 
188 	snprintf(tname, sizeof(tname), "vcpu %d", vcpu);
189 	pthread_set_name_np(mtp->mt_thr, tname);
190 
191 	vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip);
192 
193 	/* not reached */
194 	exit(1);
195 	return (NULL);
196 }
197 
198 void
199 fbsdrun_addcpu(struct vmctx *ctx, int vcpu, uint64_t rip)
200 {
201 	int error;
202 
203 	if (cpumask & (1 << vcpu)) {
204 		fprintf(stderr, "addcpu: attempting to add existing cpu %d\n",
205 		    vcpu);
206 		exit(1);
207 	}
208 
209 	atomic_set_int(&cpumask, 1 << vcpu);
210 
211 	/*
212 	 * Set up the vmexit struct to allow execution to start
213 	 * at the given RIP
214 	 */
215 	vmexit[vcpu].rip = rip;
216 	vmexit[vcpu].inst_length = 0;
217 
218 	mt_vmm_info[vcpu].mt_ctx = ctx;
219 	mt_vmm_info[vcpu].mt_vcpu = vcpu;
220 
221 	error = pthread_create(&mt_vmm_info[vcpu].mt_thr, NULL,
222 	    fbsdrun_start_thread, &mt_vmm_info[vcpu]);
223 	assert(error == 0);
224 }
225 
226 static int
227 fbsdrun_deletecpu(struct vmctx *ctx, int vcpu)
228 {
229 
230 	if ((cpumask & (1 << vcpu)) == 0) {
231 		fprintf(stderr, "addcpu: attempting to delete unknown cpu %d\n",
232 		    vcpu);
233 		exit(1);
234 	}
235 
236 	atomic_clear_int(&cpumask, 1 << vcpu);
237 	return (cpumask == 0);
238 }
239 
240 static int
241 vmexit_catch_reset(void)
242 {
243         stats.io_reset++;
244         return (VMEXIT_RESET);
245 }
246 
247 static int
248 vmexit_catch_inout(void)
249 {
250 	return (VMEXIT_ABORT);
251 }
252 
253 static int
254 vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu,
255 		     uint32_t eax)
256 {
257 #if BHYVE_DEBUG
258 	/*
259 	 * put guest-driven debug here
260 	 */
261 #endif
262         return (VMEXIT_CONTINUE);
263 }
264 
265 static int
266 vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
267 {
268 	int error;
269 	int bytes, port, in, out;
270 	uint32_t eax;
271 	int vcpu;
272 
273 	vcpu = *pvcpu;
274 
275 	port = vme->u.inout.port;
276 	bytes = vme->u.inout.bytes;
277 	eax = vme->u.inout.eax;
278 	in = vme->u.inout.in;
279 	out = !in;
280 
281 	/* We don't deal with these */
282 	if (vme->u.inout.string || vme->u.inout.rep)
283 		return (VMEXIT_ABORT);
284 
285 	/* Special case of guest reset */
286 	if (out && port == 0x64 && (uint8_t)eax == 0xFE)
287 		return (vmexit_catch_reset());
288 
289         /* Extra-special case of host notifications */
290         if (out && port == GUEST_NIO_PORT)
291                 return (vmexit_handle_notify(ctx, vme, pvcpu, eax));
292 
293 	error = emulate_inout(ctx, vcpu, in, port, bytes, &eax, strictio);
294 	if (error == INOUT_OK && in)
295 		error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX, eax);
296 
297 	switch (error) {
298 	case INOUT_OK:
299 		return (VMEXIT_CONTINUE);
300 	case INOUT_RESET:
301 		return (VMEXIT_RESET);
302 	case INOUT_POWEROFF:
303 		return (VMEXIT_POWEROFF);
304 	default:
305 		fprintf(stderr, "Unhandled %s%c 0x%04x\n",
306 			in ? "in" : "out",
307 			bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), port);
308 		return (vmexit_catch_inout());
309 	}
310 }
311 
312 static int
313 vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
314 {
315 	uint64_t val;
316 	uint32_t eax, edx;
317 	int error;
318 
319 	val = 0;
320 	error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val);
321 	if (error != 0) {
322 		fprintf(stderr, "rdmsr to register %#x on vcpu %d\n",
323 		    vme->u.msr.code, *pvcpu);
324 		if (strictmsr) {
325 			error = vm_inject_exception2(ctx, *pvcpu, IDT_GP, 0);
326 			assert(error == 0);
327 			return (VMEXIT_RESTART);
328 		}
329 	}
330 
331 	eax = val;
332 	error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax);
333 	assert(error == 0);
334 
335 	edx = val >> 32;
336 	error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx);
337 	assert(error == 0);
338 
339 	return (VMEXIT_CONTINUE);
340 }
341 
342 static int
343 vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
344 {
345 	int error;
346 
347 	error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval);
348 	if (error != 0) {
349 		fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n",
350 		    vme->u.msr.code, vme->u.msr.wval, *pvcpu);
351 		if (strictmsr) {
352 			error = vm_inject_exception2(ctx, *pvcpu, IDT_GP, 0);
353 			assert(error == 0);
354 			return (VMEXIT_RESTART);
355 		}
356 	}
357 	return (VMEXIT_CONTINUE);
358 }
359 
360 static int
361 vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
362 {
363 	int newcpu;
364 	int retval = VMEXIT_CONTINUE;
365 
366 	newcpu = spinup_ap(ctx, *pvcpu,
367 			   vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
368 
369 	return (retval);
370 }
371 
372 static int
373 vmexit_spindown_cpu(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
374 {
375 	int lastcpu;
376 
377 	lastcpu = fbsdrun_deletecpu(ctx, *pvcpu);
378 	if (!lastcpu)
379 		pthread_exit(NULL);
380 	return (vmexit_catch_reset());
381 }
382 
383 static int
384 vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
385 {
386 
387 	fprintf(stderr, "vm exit[%d]\n", *pvcpu);
388 	fprintf(stderr, "\treason\t\tVMX\n");
389 	fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
390 	fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
391 	fprintf(stderr, "\tstatus\t\t%d\n", vmexit->u.vmx.status);
392 	fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason);
393 	fprintf(stderr, "\tqualification\t0x%016lx\n",
394 	    vmexit->u.vmx.exit_qualification);
395 	fprintf(stderr, "\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type);
396 	fprintf(stderr, "\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error);
397 
398 	return (VMEXIT_ABORT);
399 }
400 
401 static int
402 vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
403 {
404 
405 	stats.vmexit_bogus++;
406 
407 	return (VMEXIT_RESTART);
408 }
409 
410 static int
411 vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
412 {
413 
414 	stats.vmexit_hlt++;
415 
416 	/*
417 	 * Just continue execution with the next instruction. We use
418 	 * the HLT VM exit as a way to be friendly with the host
419 	 * scheduler.
420 	 */
421 	return (VMEXIT_CONTINUE);
422 }
423 
424 static int
425 vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
426 {
427 
428 	stats.vmexit_pause++;
429 
430 	return (VMEXIT_CONTINUE);
431 }
432 
433 static int
434 vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
435 {
436 
437 	stats.vmexit_mtrap++;
438 
439 	return (VMEXIT_RESTART);
440 }
441 
442 static int
443 vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
444 {
445 	int err;
446 	stats.vmexit_inst_emul++;
447 
448 	err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa,
449 			  &vmexit->u.inst_emul.vie);
450 
451 	if (err) {
452 		if (err == EINVAL) {
453 			fprintf(stderr,
454 			    "Failed to emulate instruction at 0x%lx\n",
455 			    vmexit->rip);
456 		} else if (err == ESRCH) {
457 			fprintf(stderr, "Unhandled memory access to 0x%lx\n",
458 			    vmexit->u.inst_emul.gpa);
459 		}
460 
461 		return (VMEXIT_ABORT);
462 	}
463 
464 	return (VMEXIT_CONTINUE);
465 }
466 
467 static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
468 	[VM_EXITCODE_INOUT]  = vmexit_inout,
469 	[VM_EXITCODE_VMX]    = vmexit_vmx,
470 	[VM_EXITCODE_BOGUS]  = vmexit_bogus,
471 	[VM_EXITCODE_RDMSR]  = vmexit_rdmsr,
472 	[VM_EXITCODE_WRMSR]  = vmexit_wrmsr,
473 	[VM_EXITCODE_MTRAP]  = vmexit_mtrap,
474 	[VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
475 	[VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
476 	[VM_EXITCODE_SPINDOWN_CPU] = vmexit_spindown_cpu,
477 };
478 
479 static void
480 vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip)
481 {
482 	cpuset_t mask;
483 	int error, rc, prevcpu;
484 	enum vm_exitcode exitcode;
485 
486 	if (pincpu >= 0) {
487 		CPU_ZERO(&mask);
488 		CPU_SET(pincpu + vcpu, &mask);
489 		error = pthread_setaffinity_np(pthread_self(),
490 					       sizeof(mask), &mask);
491 		assert(error == 0);
492 	}
493 
494 	while (1) {
495 		error = vm_run(ctx, vcpu, rip, &vmexit[vcpu]);
496 		if (error != 0)
497 			break;
498 
499 		prevcpu = vcpu;
500 
501 		exitcode = vmexit[vcpu].exitcode;
502 		if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
503 			fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
504 			    exitcode);
505 			exit(1);
506 		}
507 
508                 rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);
509 
510 		switch (rc) {
511 		case VMEXIT_CONTINUE:
512                         rip = vmexit[vcpu].rip + vmexit[vcpu].inst_length;
513 			break;
514 		case VMEXIT_RESTART:
515                         rip = vmexit[vcpu].rip;
516 			break;
517 		case VMEXIT_RESET:
518 			exit(0);
519 		default:
520 			exit(1);
521 		}
522 	}
523 	fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
524 }
525 
526 static int
527 num_vcpus_allowed(struct vmctx *ctx)
528 {
529 	int tmp, error;
530 
531 	error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
532 
533 	/*
534 	 * The guest is allowed to spinup more than one processor only if the
535 	 * UNRESTRICTED_GUEST capability is available.
536 	 */
537 	if (error == 0)
538 		return (VM_MAXCPU);
539 	else
540 		return (1);
541 }
542 
543 void
544 fbsdrun_set_capabilities(struct vmctx *ctx, int cpu)
545 {
546 	int err, tmp;
547 
548 	if (fbsdrun_vmexit_on_hlt()) {
549 		err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp);
550 		if (err < 0) {
551 			fprintf(stderr, "VM exit on HLT not supported\n");
552 			exit(1);
553 		}
554 		vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1);
555 		if (cpu == BSP)
556 			handler[VM_EXITCODE_HLT] = vmexit_hlt;
557 	}
558 
559         if (fbsdrun_vmexit_on_pause()) {
560 		/*
561 		 * pause exit support required for this mode
562 		 */
563 		err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp);
564 		if (err < 0) {
565 			fprintf(stderr,
566 			    "SMP mux requested, no pause support\n");
567 			exit(1);
568 		}
569 		vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1);
570 		if (cpu == BSP)
571 			handler[VM_EXITCODE_PAUSE] = vmexit_pause;
572         }
573 
574 	if (x2apic_mode)
575 		err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED);
576 	else
577 		err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED);
578 
579 	if (err) {
580 		fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
581 		exit(1);
582 	}
583 
584 	vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1);
585 }
586 
587 int
588 main(int argc, char *argv[])
589 {
590 	int c, error, gdb_port, err, bvmcons;
591 	int max_vcpus;
592 	struct vmctx *ctx;
593 	uint64_t rip;
594 	size_t memsize;
595 
596 	bvmcons = 0;
597 	progname = basename(argv[0]);
598 	gdb_port = 0;
599 	guest_ncpus = 1;
600 	memsize = 256 * MB;
601 
602 	while ((c = getopt(argc, argv, "abehwxAHIPWp:g:c:s:m:l:")) != -1) {
603 		switch (c) {
604 		case 'a':
605 			x2apic_mode = 0;
606 			break;
607 		case 'A':
608 			acpi = 1;
609 			break;
610 		case 'b':
611 			bvmcons = 1;
612 			break;
613 		case 'p':
614 			pincpu = atoi(optarg);
615 			break;
616                 case 'c':
617 			guest_ncpus = atoi(optarg);
618 			break;
619 		case 'g':
620 			gdb_port = atoi(optarg);
621 			break;
622 		case 'l':
623 			if (lpc_device_parse(optarg) != 0) {
624 				errx(EX_USAGE, "invalid lpc device "
625 				    "configuration '%s'", optarg);
626 			}
627 			break;
628 		case 's':
629 			if (pci_parse_slot(optarg) != 0)
630 				exit(1);
631 			else
632 				break;
633                 case 'm':
634 			error = vm_parse_memsize(optarg, &memsize);
635 			if (error)
636 				errx(EX_USAGE, "invalid memsize '%s'", optarg);
637 			break;
638 		case 'H':
639 			guest_vmexit_on_hlt = 1;
640 			break;
641 		case 'I':
642 			/*
643 			 * The "-I" option was used to add an ioapic to the
644 			 * virtual machine.
645 			 *
646 			 * An ioapic is now provided unconditionally for each
647 			 * virtual machine and this option is now deprecated.
648 			 */
649 			break;
650 		case 'P':
651 			guest_vmexit_on_pause = 1;
652 			break;
653 		case 'e':
654 			strictio = 1;
655 			break;
656 		case 'w':
657 			strictmsr = 0;
658 			break;
659 		case 'W':
660 			virtio_msix = 0;
661 			break;
662 		case 'x':
663 			x2apic_mode = 1;
664 			break;
665 		case 'h':
666 			usage(0);
667 		default:
668 			usage(1);
669 		}
670 	}
671 	argc -= optind;
672 	argv += optind;
673 
674 	if (argc != 1)
675 		usage(1);
676 
677 	vmname = argv[0];
678 
679 	ctx = vm_open(vmname);
680 	if (ctx == NULL) {
681 		perror("vm_open");
682 		exit(1);
683 	}
684 
685 	max_vcpus = num_vcpus_allowed(ctx);
686 	if (guest_ncpus > max_vcpus) {
687 		fprintf(stderr, "%d vCPUs requested but only %d available\n",
688 			guest_ncpus, max_vcpus);
689 		exit(1);
690 	}
691 
692 	fbsdrun_set_capabilities(ctx, BSP);
693 
694 	err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
695 	if (err) {
696 		fprintf(stderr, "Unable to setup memory (%d)\n", err);
697 		exit(1);
698 	}
699 
700 	init_mem();
701 	init_inout();
702 	ioapic_init(ctx);
703 
704 	rtc_init(ctx);
705 
706 	/*
707 	 * Exit if a device emulation finds an error in it's initilization
708 	 */
709 	if (init_pci(ctx) != 0)
710 		exit(1);
711 
712 	if (gdb_port != 0)
713 		init_dbgport(gdb_port);
714 
715 	if (bvmcons)
716 		init_bvmcons();
717 
718 	error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
719 	assert(error == 0);
720 
721 	/*
722 	 * build the guest tables, MP etc.
723 	 */
724 	mptable_build(ctx, guest_ncpus);
725 
726 	if (acpi) {
727 		error = acpi_build(ctx, guest_ncpus);
728 		assert(error == 0);
729 	}
730 
731 	/*
732 	 * Change the proc title to include the VM name.
733 	 */
734 	setproctitle("%s", vmname);
735 
736 	/*
737 	 * Add CPU 0
738 	 */
739 	fbsdrun_addcpu(ctx, BSP, rip);
740 
741 	/*
742 	 * Head off to the main event dispatch loop
743 	 */
744 	mevent_dispatch();
745 
746 	exit(1);
747 }
748