xref: /freebsd/usr.sbin/bhyve/bhyverun.c (revision a0409676120c1e558d0ade943019934e0f15118d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/types.h>
35 #ifndef WITHOUT_CAPSICUM
36 #include <sys/capsicum.h>
37 #endif
38 #include <sys/mman.h>
39 #ifdef BHYVE_SNAPSHOT
40 #include <sys/socket.h>
41 #include <sys/stat.h>
42 #endif
43 #include <sys/time.h>
44 #ifdef BHYVE_SNAPSHOT
45 #include <sys/un.h>
46 #endif
47 
48 #include <amd64/vmm/intel/vmcs.h>
49 
50 #include <machine/atomic.h>
51 #include <machine/segments.h>
52 
53 #ifndef WITHOUT_CAPSICUM
54 #include <capsicum_helpers.h>
55 #endif
56 #include <stdio.h>
57 #include <stdlib.h>
58 #include <string.h>
59 #include <err.h>
60 #include <errno.h>
61 #ifdef BHYVE_SNAPSHOT
62 #include <fcntl.h>
63 #endif
64 #include <libgen.h>
65 #include <unistd.h>
66 #include <assert.h>
67 #include <pthread.h>
68 #include <pthread_np.h>
69 #include <sysexits.h>
70 #include <stdbool.h>
71 #include <stdint.h>
72 #ifdef BHYVE_SNAPSHOT
73 #include <ucl.h>
74 #include <unistd.h>
75 
76 #include <libxo/xo.h>
77 #endif
78 
79 #include <machine/vmm.h>
80 #ifndef WITHOUT_CAPSICUM
81 #include <machine/vmm_dev.h>
82 #endif
83 #include <machine/vmm_instruction_emul.h>
84 #include <vmmapi.h>
85 
86 #include "bhyverun.h"
87 #include "acpi.h"
88 #include "atkbdc.h"
89 #include "bootrom.h"
90 #include "config.h"
91 #include "inout.h"
92 #include "debug.h"
93 #include "fwctl.h"
94 #include "gdb.h"
95 #include "ioapic.h"
96 #include "kernemu_dev.h"
97 #include "mem.h"
98 #include "mevent.h"
99 #include "mptbl.h"
100 #include "pci_emul.h"
101 #include "pci_irq.h"
102 #include "pci_lpc.h"
103 #include "smbiostbl.h"
104 #ifdef BHYVE_SNAPSHOT
105 #include "snapshot.h"
106 #endif
107 #include "xmsr.h"
108 #include "spinup_ap.h"
109 #include "rtc.h"
110 #include "vmgenc.h"
111 
112 #define GUEST_NIO_PORT		0x488	/* guest upcalls via i/o port */
113 
114 #define MB		(1024UL * 1024)
115 #define GB		(1024UL * MB)
116 
117 static const char * const vmx_exit_reason_desc[] = {
118 	[EXIT_REASON_EXCEPTION] = "Exception or non-maskable interrupt (NMI)",
119 	[EXIT_REASON_EXT_INTR] = "External interrupt",
120 	[EXIT_REASON_TRIPLE_FAULT] = "Triple fault",
121 	[EXIT_REASON_INIT] = "INIT signal",
122 	[EXIT_REASON_SIPI] = "Start-up IPI (SIPI)",
123 	[EXIT_REASON_IO_SMI] = "I/O system-management interrupt (SMI)",
124 	[EXIT_REASON_SMI] = "Other SMI",
125 	[EXIT_REASON_INTR_WINDOW] = "Interrupt window",
126 	[EXIT_REASON_NMI_WINDOW] = "NMI window",
127 	[EXIT_REASON_TASK_SWITCH] = "Task switch",
128 	[EXIT_REASON_CPUID] = "CPUID",
129 	[EXIT_REASON_GETSEC] = "GETSEC",
130 	[EXIT_REASON_HLT] = "HLT",
131 	[EXIT_REASON_INVD] = "INVD",
132 	[EXIT_REASON_INVLPG] = "INVLPG",
133 	[EXIT_REASON_RDPMC] = "RDPMC",
134 	[EXIT_REASON_RDTSC] = "RDTSC",
135 	[EXIT_REASON_RSM] = "RSM",
136 	[EXIT_REASON_VMCALL] = "VMCALL",
137 	[EXIT_REASON_VMCLEAR] = "VMCLEAR",
138 	[EXIT_REASON_VMLAUNCH] = "VMLAUNCH",
139 	[EXIT_REASON_VMPTRLD] = "VMPTRLD",
140 	[EXIT_REASON_VMPTRST] = "VMPTRST",
141 	[EXIT_REASON_VMREAD] = "VMREAD",
142 	[EXIT_REASON_VMRESUME] = "VMRESUME",
143 	[EXIT_REASON_VMWRITE] = "VMWRITE",
144 	[EXIT_REASON_VMXOFF] = "VMXOFF",
145 	[EXIT_REASON_VMXON] = "VMXON",
146 	[EXIT_REASON_CR_ACCESS] = "Control-register accesses",
147 	[EXIT_REASON_DR_ACCESS] = "MOV DR",
148 	[EXIT_REASON_INOUT] = "I/O instruction",
149 	[EXIT_REASON_RDMSR] = "RDMSR",
150 	[EXIT_REASON_WRMSR] = "WRMSR",
151 	[EXIT_REASON_INVAL_VMCS] =
152 	    "VM-entry failure due to invalid guest state",
153 	[EXIT_REASON_INVAL_MSR] = "VM-entry failure due to MSR loading",
154 	[EXIT_REASON_MWAIT] = "MWAIT",
155 	[EXIT_REASON_MTF] = "Monitor trap flag",
156 	[EXIT_REASON_MONITOR] = "MONITOR",
157 	[EXIT_REASON_PAUSE] = "PAUSE",
158 	[EXIT_REASON_MCE_DURING_ENTRY] =
159 	    "VM-entry failure due to machine-check event",
160 	[EXIT_REASON_TPR] = "TPR below threshold",
161 	[EXIT_REASON_APIC_ACCESS] = "APIC access",
162 	[EXIT_REASON_VIRTUALIZED_EOI] = "Virtualized EOI",
163 	[EXIT_REASON_GDTR_IDTR] = "Access to GDTR or IDTR",
164 	[EXIT_REASON_LDTR_TR] = "Access to LDTR or TR",
165 	[EXIT_REASON_EPT_FAULT] = "EPT violation",
166 	[EXIT_REASON_EPT_MISCONFIG] = "EPT misconfiguration",
167 	[EXIT_REASON_INVEPT] = "INVEPT",
168 	[EXIT_REASON_RDTSCP] = "RDTSCP",
169 	[EXIT_REASON_VMX_PREEMPT] = "VMX-preemption timer expired",
170 	[EXIT_REASON_INVVPID] = "INVVPID",
171 	[EXIT_REASON_WBINVD] = "WBINVD",
172 	[EXIT_REASON_XSETBV] = "XSETBV",
173 	[EXIT_REASON_APIC_WRITE] = "APIC write",
174 	[EXIT_REASON_RDRAND] = "RDRAND",
175 	[EXIT_REASON_INVPCID] = "INVPCID",
176 	[EXIT_REASON_VMFUNC] = "VMFUNC",
177 	[EXIT_REASON_ENCLS] = "ENCLS",
178 	[EXIT_REASON_RDSEED] = "RDSEED",
179 	[EXIT_REASON_PM_LOG_FULL] = "Page-modification log full",
180 	[EXIT_REASON_XSAVES] = "XSAVES",
181 	[EXIT_REASON_XRSTORS] = "XRSTORS"
182 };
183 
184 typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
185 extern int vmexit_task_switch(struct vmctx *, struct vm_exit *, int *vcpu);
186 
187 int guest_ncpus;
188 uint16_t cores, maxcpus, sockets, threads;
189 
190 int raw_stdio = 0;
191 
192 static char *progname;
193 static const int BSP = 0;
194 
195 static cpuset_t cpumask;
196 
197 static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip);
198 
199 static struct vm_exit vmexit[VM_MAXCPU];
200 
201 struct bhyvestats {
202 	uint64_t	vmexit_bogus;
203 	uint64_t	vmexit_reqidle;
204 	uint64_t	vmexit_hlt;
205 	uint64_t	vmexit_pause;
206 	uint64_t	vmexit_mtrap;
207 	uint64_t	vmexit_inst_emul;
208 	uint64_t	cpu_switch_rotate;
209 	uint64_t	cpu_switch_direct;
210 } stats;
211 
212 struct mt_vmm_info {
213 	pthread_t	mt_thr;
214 	struct vmctx	*mt_ctx;
215 	int		mt_vcpu;
216 } mt_vmm_info[VM_MAXCPU];
217 
218 static cpuset_t *vcpumap[VM_MAXCPU] = { NULL };
219 
220 static void
221 usage(int code)
222 {
223 
224         fprintf(stderr,
225 		"Usage: %s [-aehuwxACDHPSWY]\n"
226 		"       %*s [-c [[cpus=]numcpus][,sockets=n][,cores=n][,threads=n]]\n"
227 		"       %*s [-k <file>] [-l <lpc>] [-m mem] [-o <var>=<value>]\n"
228 		"       %*s [-p vcpu:hostcpu] [-s <pci>] [-U uuid] [<vm>]\n"
229 		"       -a: local apic is in xAPIC mode (deprecated)\n"
230 		"       -A: create ACPI tables\n"
231 		"       -c: number of cpus and/or topology specification\n"
232 		"       -C: include guest memory in core file\n"
233 		"       -D: destroy on power-off\n"
234 		"       -e: exit on unhandled I/O access\n"
235 		"       -h: help\n"
236 		"       -H: vmexit from the guest on hlt\n"
237 		"       -k: key=value flat config file\n"
238 		"       -l: LPC device configuration\n"
239 		"       -m: memory size in MB\n"
240 		"       -o: set config 'var' to 'value'\n"
241 		"       -p: pin 'vcpu' to 'hostcpu'\n"
242 		"       -P: vmexit from the guest on pause\n"
243 #ifdef BHYVE_SNAPSHOT
244 		"       -r: path to checkpoint file\n"
245 #endif
246 		"       -s: <slot,driver,configinfo> PCI slot config\n"
247 		"       -S: guest memory cannot be swapped\n"
248 		"       -u: RTC keeps UTC time\n"
249 		"       -U: uuid\n"
250 		"       -w: ignore unimplemented MSRs\n"
251 		"       -W: force virtio to use single-vector MSI\n"
252 		"       -x: local apic is in x2APIC mode\n"
253 		"       -Y: disable MPtable generation\n",
254 		progname, (int)strlen(progname), "", (int)strlen(progname), "",
255 		(int)strlen(progname), "");
256 
257 	exit(code);
258 }
259 
260 /*
261  * XXX This parser is known to have the following issues:
262  * 1.  It accepts null key=value tokens ",," as setting "cpus" to an
263  *     empty string.
264  *
265  * The acceptance of a null specification ('-c ""') is by design to match the
266  * manual page syntax specification, this results in a topology of 1 vCPU.
267  */
268 static int
269 topology_parse(const char *opt)
270 {
271 	char *cp, *str;
272 
273 	if (*opt == '\0') {
274 		set_config_value("sockets", "1");
275 		set_config_value("cores", "1");
276 		set_config_value("threads", "1");
277 		set_config_value("cpus", "1");
278 		return (0);
279 	}
280 
281 	str = strdup(opt);
282 	if (str == NULL)
283 		errx(4, "Failed to allocate memory");
284 
285 	while ((cp = strsep(&str, ",")) != NULL) {
286 		if (strncmp(cp, "cpus=", strlen("cpus=")) == 0)
287 			set_config_value("cpus", cp + strlen("cpus="));
288 		else if (strncmp(cp, "sockets=", strlen("sockets=")) == 0)
289 			set_config_value("sockets", cp + strlen("sockets="));
290 		else if (strncmp(cp, "cores=", strlen("cores=")) == 0)
291 			set_config_value("cores", cp + strlen("cores="));
292 		else if (strncmp(cp, "threads=", strlen("threads=")) == 0)
293 			set_config_value("threads", cp + strlen("threads="));
294 #ifdef notyet  /* Do not expose this until vmm.ko implements it */
295 		else if (strncmp(cp, "maxcpus=", strlen("maxcpus=")) == 0)
296 			set_config_value("maxcpus", cp + strlen("maxcpus="));
297 #endif
298 		else if (strchr(cp, '=') != NULL)
299 			goto out;
300 		else
301 			set_config_value("cpus", cp);
302 	}
303 	free(str);
304 	return (0);
305 
306 out:
307 	free(str);
308 	return (-1);
309 }
310 
311 static int
312 parse_int_value(const char *key, const char *value, int minval, int maxval)
313 {
314 	char *cp;
315 	long lval;
316 
317 	errno = 0;
318 	lval = strtol(value, &cp, 0);
319 	if (errno != 0 || *cp != '\0' || cp == value || lval < minval ||
320 	    lval > maxval)
321 		errx(4, "Invalid value for %s: '%s'", key, value);
322 	return (lval);
323 }
324 
325 /*
326  * Set the sockets, cores, threads, and guest_cpus variables based on
327  * the configured topology.
328  *
329  * The limits of UINT16_MAX are due to the types passed to
330  * vm_set_topology().  vmm.ko may enforce tighter limits.
331  */
332 static void
333 calc_topolopgy(void)
334 {
335 	const char *value;
336 	bool explicit_cpus;
337 	uint64_t ncpus;
338 
339 	value = get_config_value("cpus");
340 	if (value != NULL) {
341 		guest_ncpus = parse_int_value("cpus", value, 1, UINT16_MAX);
342 		explicit_cpus = true;
343 	} else {
344 		guest_ncpus = 1;
345 		explicit_cpus = false;
346 	}
347 	value = get_config_value("cores");
348 	if (value != NULL)
349 		cores = parse_int_value("cores", value, 1, UINT16_MAX);
350 	else
351 		cores = 1;
352 	value = get_config_value("threads");
353 	if (value != NULL)
354 		threads = parse_int_value("threads", value, 1, UINT16_MAX);
355 	else
356 		threads = 1;
357 	value = get_config_value("sockets");
358 	if (value != NULL)
359 		sockets = parse_int_value("sockets", value, 1, UINT16_MAX);
360 	else
361 		sockets = guest_ncpus;
362 
363 	/*
364 	 * Compute sockets * cores * threads avoiding overflow.  The
365 	 * range check above insures these are 16 bit values.
366 	 */
367 	ncpus = (uint64_t)sockets * cores * threads;
368 	if (ncpus > UINT16_MAX)
369 		errx(4, "Computed number of vCPUs too high: %ju",
370 		    (uintmax_t)ncpus);
371 
372 	if (explicit_cpus) {
373 		if (guest_ncpus != ncpus)
374 			errx(4, "Topology (%d sockets, %d cores, %d threads) "
375 			    "does not match %d vCPUs", sockets, cores, threads,
376 			    guest_ncpus);
377 	} else
378 		guest_ncpus = ncpus;
379 }
380 
381 static int
382 pincpu_parse(const char *opt)
383 {
384 	const char *value;
385 	char *newval;
386 	char key[16];
387 	int vcpu, pcpu;
388 
389 	if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) {
390 		fprintf(stderr, "invalid format: %s\n", opt);
391 		return (-1);
392 	}
393 
394 	if (vcpu < 0 || vcpu >= VM_MAXCPU) {
395 		fprintf(stderr, "vcpu '%d' outside valid range from 0 to %d\n",
396 		    vcpu, VM_MAXCPU - 1);
397 		return (-1);
398 	}
399 
400 	if (pcpu < 0 || pcpu >= CPU_SETSIZE) {
401 		fprintf(stderr, "hostcpu '%d' outside valid range from "
402 		    "0 to %d\n", pcpu, CPU_SETSIZE - 1);
403 		return (-1);
404 	}
405 
406 	snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu);
407 	value = get_config_value(key);
408 
409 	if (asprintf(&newval, "%s%s%d", value != NULL ? value : "",
410 	    value != NULL ? "," : "", pcpu) == -1) {
411 		perror("failed to build new cpuset string");
412 		return (-1);
413 	}
414 
415 	set_config_value(key, newval);
416 	free(newval);
417 	return (0);
418 }
419 
420 static void
421 parse_cpuset(int vcpu, const char *list, cpuset_t *set)
422 {
423 	char *cp, *token;
424 	int pcpu, start;
425 
426 	CPU_ZERO(set);
427 	start = -1;
428 	token = __DECONST(char *, list);
429 	for (;;) {
430 		pcpu = strtoul(token, &cp, 0);
431 		if (cp == token)
432 			errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list);
433 		if (pcpu < 0 || pcpu >= CPU_SETSIZE)
434 			errx(4, "hostcpu '%d' outside valid range from 0 to %d",
435 			    pcpu, CPU_SETSIZE - 1);
436 		switch (*cp) {
437 		case ',':
438 		case '\0':
439 			if (start >= 0) {
440 				if (start > pcpu)
441 					errx(4, "Invalid hostcpu range %d-%d",
442 					    start, pcpu);
443 				while (start < pcpu) {
444 					CPU_SET(start, vcpumap[vcpu]);
445 					start++;
446 				}
447 				start = -1;
448 			}
449 			CPU_SET(pcpu, vcpumap[vcpu]);
450 			break;
451 		case '-':
452 			if (start >= 0)
453 				errx(4, "invalid cpuset for vcpu %d: '%s'",
454 				    vcpu, list);
455 			start = pcpu;
456 			break;
457 		default:
458 			errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list);
459 		}
460 		if (*cp == '\0')
461 			break;
462 		token = cp + 1;
463 	}
464 }
465 
466 static void
467 build_vcpumaps(void)
468 {
469 	char key[16];
470 	const char *value;
471 	int vcpu;
472 
473 	for (vcpu = 0; vcpu < guest_ncpus; vcpu++) {
474 		snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu);
475 		value = get_config_value(key);
476 		if (value == NULL)
477 			continue;
478 		vcpumap[vcpu] = malloc(sizeof(cpuset_t));
479 		if (vcpumap[vcpu] == NULL)
480 			err(4, "Failed to allocate cpuset for vcpu %d", vcpu);
481 		parse_cpuset(vcpu, value, vcpumap[vcpu]);
482 	}
483 }
484 
485 void
486 vm_inject_fault(void *arg, int vcpu, int vector, int errcode_valid,
487     int errcode)
488 {
489 	struct vmctx *ctx;
490 	int error, restart_instruction;
491 
492 	ctx = arg;
493 	restart_instruction = 1;
494 
495 	error = vm_inject_exception(ctx, vcpu, vector, errcode_valid, errcode,
496 	    restart_instruction);
497 	assert(error == 0);
498 }
499 
500 void *
501 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
502 {
503 
504 	return (vm_map_gpa(ctx, gaddr, len));
505 }
506 
507 #ifdef BHYVE_SNAPSHOT
508 uintptr_t
509 paddr_host2guest(struct vmctx *ctx, void *addr)
510 {
511 	return (vm_rev_map_gpa(ctx, addr));
512 }
513 #endif
514 
515 int
516 fbsdrun_virtio_msix(void)
517 {
518 
519 	return (get_config_bool_default("virtio_msix", true));
520 }
521 
522 static void *
523 fbsdrun_start_thread(void *param)
524 {
525 	char tname[MAXCOMLEN + 1];
526 	struct mt_vmm_info *mtp;
527 	int vcpu;
528 
529 	mtp = param;
530 	vcpu = mtp->mt_vcpu;
531 
532 	snprintf(tname, sizeof(tname), "vcpu %d", vcpu);
533 	pthread_set_name_np(mtp->mt_thr, tname);
534 
535 #ifdef BHYVE_SNAPSHOT
536 	checkpoint_cpu_add(vcpu);
537 #endif
538 	gdb_cpu_add(vcpu);
539 
540 	vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip);
541 
542 	/* not reached */
543 	exit(1);
544 	return (NULL);
545 }
546 
547 void
548 fbsdrun_addcpu(struct vmctx *ctx, int fromcpu, int newcpu, uint64_t rip)
549 {
550 	int error;
551 
552 	assert(fromcpu == BSP);
553 
554 	/*
555 	 * The 'newcpu' must be activated in the context of 'fromcpu'. If
556 	 * vm_activate_cpu() is delayed until newcpu's pthread starts running
557 	 * then vmm.ko is out-of-sync with bhyve and this can create a race
558 	 * with vm_suspend().
559 	 */
560 	error = vm_activate_cpu(ctx, newcpu);
561 	if (error != 0)
562 		err(EX_OSERR, "could not activate CPU %d", newcpu);
563 
564 	CPU_SET_ATOMIC(newcpu, &cpumask);
565 
566 	/*
567 	 * Set up the vmexit struct to allow execution to start
568 	 * at the given RIP
569 	 */
570 	vmexit[newcpu].rip = rip;
571 	vmexit[newcpu].inst_length = 0;
572 
573 	mt_vmm_info[newcpu].mt_ctx = ctx;
574 	mt_vmm_info[newcpu].mt_vcpu = newcpu;
575 
576 	error = pthread_create(&mt_vmm_info[newcpu].mt_thr, NULL,
577 	    fbsdrun_start_thread, &mt_vmm_info[newcpu]);
578 	assert(error == 0);
579 }
580 
581 static int
582 fbsdrun_deletecpu(struct vmctx *ctx, int vcpu)
583 {
584 
585 	if (!CPU_ISSET(vcpu, &cpumask)) {
586 		fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu);
587 		exit(4);
588 	}
589 
590 	CPU_CLR_ATOMIC(vcpu, &cpumask);
591 	return (CPU_EMPTY(&cpumask));
592 }
593 
594 static int
595 vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu,
596 		     uint32_t eax)
597 {
598 #if BHYVE_DEBUG
599 	/*
600 	 * put guest-driven debug here
601 	 */
602 #endif
603 	return (VMEXIT_CONTINUE);
604 }
605 
606 static int
607 vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
608 {
609 	int error;
610 	int bytes, port, in, out;
611 	int vcpu;
612 
613 	vcpu = *pvcpu;
614 
615 	port = vme->u.inout.port;
616 	bytes = vme->u.inout.bytes;
617 	in = vme->u.inout.in;
618 	out = !in;
619 
620         /* Extra-special case of host notifications */
621         if (out && port == GUEST_NIO_PORT) {
622                 error = vmexit_handle_notify(ctx, vme, pvcpu, vme->u.inout.eax);
623 		return (error);
624 	}
625 
626 	error = emulate_inout(ctx, vcpu, vme);
627 	if (error) {
628 		fprintf(stderr, "Unhandled %s%c 0x%04x at 0x%lx\n",
629 		    in ? "in" : "out",
630 		    bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'),
631 		    port, vmexit->rip);
632 		return (VMEXIT_ABORT);
633 	} else {
634 		return (VMEXIT_CONTINUE);
635 	}
636 }
637 
638 static int
639 vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
640 {
641 	uint64_t val;
642 	uint32_t eax, edx;
643 	int error;
644 
645 	val = 0;
646 	error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val);
647 	if (error != 0) {
648 		fprintf(stderr, "rdmsr to register %#x on vcpu %d\n",
649 		    vme->u.msr.code, *pvcpu);
650 		if (get_config_bool("x86.strictmsr")) {
651 			vm_inject_gp(ctx, *pvcpu);
652 			return (VMEXIT_CONTINUE);
653 		}
654 	}
655 
656 	eax = val;
657 	error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax);
658 	assert(error == 0);
659 
660 	edx = val >> 32;
661 	error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx);
662 	assert(error == 0);
663 
664 	return (VMEXIT_CONTINUE);
665 }
666 
667 static int
668 vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
669 {
670 	int error;
671 
672 	error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval);
673 	if (error != 0) {
674 		fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n",
675 		    vme->u.msr.code, vme->u.msr.wval, *pvcpu);
676 		if (get_config_bool("x86.strictmsr")) {
677 			vm_inject_gp(ctx, *pvcpu);
678 			return (VMEXIT_CONTINUE);
679 		}
680 	}
681 	return (VMEXIT_CONTINUE);
682 }
683 
684 static int
685 vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
686 {
687 
688 	(void)spinup_ap(ctx, *pvcpu,
689 		    vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
690 
691 	return (VMEXIT_CONTINUE);
692 }
693 
694 #define	DEBUG_EPT_MISCONFIG
695 #ifdef DEBUG_EPT_MISCONFIG
696 #define	VMCS_GUEST_PHYSICAL_ADDRESS	0x00002400
697 
698 static uint64_t ept_misconfig_gpa, ept_misconfig_pte[4];
699 static int ept_misconfig_ptenum;
700 #endif
701 
702 static const char *
703 vmexit_vmx_desc(uint32_t exit_reason)
704 {
705 
706 	if (exit_reason >= nitems(vmx_exit_reason_desc) ||
707 	    vmx_exit_reason_desc[exit_reason] == NULL)
708 		return ("Unknown");
709 	return (vmx_exit_reason_desc[exit_reason]);
710 }
711 
712 static int
713 vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
714 {
715 
716 	fprintf(stderr, "vm exit[%d]\n", *pvcpu);
717 	fprintf(stderr, "\treason\t\tVMX\n");
718 	fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
719 	fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
720 	fprintf(stderr, "\tstatus\t\t%d\n", vmexit->u.vmx.status);
721 	fprintf(stderr, "\texit_reason\t%u (%s)\n", vmexit->u.vmx.exit_reason,
722 	    vmexit_vmx_desc(vmexit->u.vmx.exit_reason));
723 	fprintf(stderr, "\tqualification\t0x%016lx\n",
724 	    vmexit->u.vmx.exit_qualification);
725 	fprintf(stderr, "\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type);
726 	fprintf(stderr, "\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error);
727 #ifdef DEBUG_EPT_MISCONFIG
728 	if (vmexit->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) {
729 		vm_get_register(ctx, *pvcpu,
730 		    VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS),
731 		    &ept_misconfig_gpa);
732 		vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte,
733 		    &ept_misconfig_ptenum);
734 		fprintf(stderr, "\tEPT misconfiguration:\n");
735 		fprintf(stderr, "\t\tGPA: %#lx\n", ept_misconfig_gpa);
736 		fprintf(stderr, "\t\tPTE(%d): %#lx %#lx %#lx %#lx\n",
737 		    ept_misconfig_ptenum, ept_misconfig_pte[0],
738 		    ept_misconfig_pte[1], ept_misconfig_pte[2],
739 		    ept_misconfig_pte[3]);
740 	}
741 #endif	/* DEBUG_EPT_MISCONFIG */
742 	return (VMEXIT_ABORT);
743 }
744 
745 static int
746 vmexit_svm(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
747 {
748 
749 	fprintf(stderr, "vm exit[%d]\n", *pvcpu);
750 	fprintf(stderr, "\treason\t\tSVM\n");
751 	fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
752 	fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
753 	fprintf(stderr, "\texitcode\t%#lx\n", vmexit->u.svm.exitcode);
754 	fprintf(stderr, "\texitinfo1\t%#lx\n", vmexit->u.svm.exitinfo1);
755 	fprintf(stderr, "\texitinfo2\t%#lx\n", vmexit->u.svm.exitinfo2);
756 	return (VMEXIT_ABORT);
757 }
758 
759 static int
760 vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
761 {
762 
763 	assert(vmexit->inst_length == 0);
764 
765 	stats.vmexit_bogus++;
766 
767 	return (VMEXIT_CONTINUE);
768 }
769 
770 static int
771 vmexit_reqidle(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
772 {
773 
774 	assert(vmexit->inst_length == 0);
775 
776 	stats.vmexit_reqidle++;
777 
778 	return (VMEXIT_CONTINUE);
779 }
780 
781 static int
782 vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
783 {
784 
785 	stats.vmexit_hlt++;
786 
787 	/*
788 	 * Just continue execution with the next instruction. We use
789 	 * the HLT VM exit as a way to be friendly with the host
790 	 * scheduler.
791 	 */
792 	return (VMEXIT_CONTINUE);
793 }
794 
795 static int
796 vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
797 {
798 
799 	stats.vmexit_pause++;
800 
801 	return (VMEXIT_CONTINUE);
802 }
803 
804 static int
805 vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
806 {
807 
808 	assert(vmexit->inst_length == 0);
809 
810 	stats.vmexit_mtrap++;
811 
812 #ifdef BHYVE_SNAPSHOT
813 	checkpoint_cpu_suspend(*pvcpu);
814 #endif
815 	gdb_cpu_mtrap(*pvcpu);
816 #ifdef BHYVE_SNAPSHOT
817 	checkpoint_cpu_resume(*pvcpu);
818 #endif
819 
820 	return (VMEXIT_CONTINUE);
821 }
822 
823 static int
824 vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
825 {
826 	int err, i, cs_d;
827 	struct vie *vie;
828 	enum vm_cpu_mode mode;
829 
830 	stats.vmexit_inst_emul++;
831 
832 	vie = &vmexit->u.inst_emul.vie;
833 	if (!vie->decoded) {
834 		/*
835 		 * Attempt to decode in userspace as a fallback.  This allows
836 		 * updating instruction decode in bhyve without rebooting the
837 		 * kernel (rapid prototyping), albeit with much slower
838 		 * emulation.
839 		 */
840 		vie_restart(vie);
841 		mode = vmexit->u.inst_emul.paging.cpu_mode;
842 		cs_d = vmexit->u.inst_emul.cs_d;
843 		if (vmm_decode_instruction(mode, cs_d, vie) != 0)
844 			goto fail;
845 		if (vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RIP,
846 		    vmexit->rip + vie->num_processed) != 0)
847 			goto fail;
848 	}
849 
850 	err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa,
851 	    vie, &vmexit->u.inst_emul.paging);
852 
853 	if (err) {
854 		if (err == ESRCH) {
855 			EPRINTLN("Unhandled memory access to 0x%lx\n",
856 			    vmexit->u.inst_emul.gpa);
857 		}
858 		goto fail;
859 	}
860 
861 	return (VMEXIT_CONTINUE);
862 
863 fail:
864 	fprintf(stderr, "Failed to emulate instruction sequence [ ");
865 	for (i = 0; i < vie->num_valid; i++)
866 		fprintf(stderr, "%02x", vie->inst[i]);
867 	FPRINTLN(stderr, " ] at 0x%lx", vmexit->rip);
868 	return (VMEXIT_ABORT);
869 }
870 
871 static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER;
872 static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER;
873 
874 static int
875 vmexit_suspend(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
876 {
877 	enum vm_suspend_how how;
878 
879 	how = vmexit->u.suspended.how;
880 
881 	fbsdrun_deletecpu(ctx, *pvcpu);
882 
883 	if (*pvcpu != BSP) {
884 		pthread_mutex_lock(&resetcpu_mtx);
885 		pthread_cond_signal(&resetcpu_cond);
886 		pthread_mutex_unlock(&resetcpu_mtx);
887 		pthread_exit(NULL);
888 	}
889 
890 	pthread_mutex_lock(&resetcpu_mtx);
891 	while (!CPU_EMPTY(&cpumask)) {
892 		pthread_cond_wait(&resetcpu_cond, &resetcpu_mtx);
893 	}
894 	pthread_mutex_unlock(&resetcpu_mtx);
895 
896 	switch (how) {
897 	case VM_SUSPEND_RESET:
898 		exit(0);
899 	case VM_SUSPEND_POWEROFF:
900 		if (get_config_bool_default("destroy_on_poweroff", false))
901 			vm_destroy(ctx);
902 		exit(1);
903 	case VM_SUSPEND_HALT:
904 		exit(2);
905 	case VM_SUSPEND_TRIPLEFAULT:
906 		exit(3);
907 	default:
908 		fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how);
909 		exit(100);
910 	}
911 	return (0);	/* NOTREACHED */
912 }
913 
914 static int
915 vmexit_debug(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
916 {
917 
918 #ifdef BHYVE_SNAPSHOT
919 	checkpoint_cpu_suspend(*pvcpu);
920 #endif
921 	gdb_cpu_suspend(*pvcpu);
922 #ifdef BHYVE_SNAPSHOT
923 	checkpoint_cpu_resume(*pvcpu);
924 #endif
925 	return (VMEXIT_CONTINUE);
926 }
927 
928 static int
929 vmexit_breakpoint(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
930 {
931 
932 	gdb_cpu_breakpoint(*pvcpu, vmexit);
933 	return (VMEXIT_CONTINUE);
934 }
935 
936 static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
937 	[VM_EXITCODE_INOUT]  = vmexit_inout,
938 	[VM_EXITCODE_INOUT_STR]  = vmexit_inout,
939 	[VM_EXITCODE_VMX]    = vmexit_vmx,
940 	[VM_EXITCODE_SVM]    = vmexit_svm,
941 	[VM_EXITCODE_BOGUS]  = vmexit_bogus,
942 	[VM_EXITCODE_REQIDLE] = vmexit_reqidle,
943 	[VM_EXITCODE_RDMSR]  = vmexit_rdmsr,
944 	[VM_EXITCODE_WRMSR]  = vmexit_wrmsr,
945 	[VM_EXITCODE_MTRAP]  = vmexit_mtrap,
946 	[VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
947 	[VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
948 	[VM_EXITCODE_SUSPENDED] = vmexit_suspend,
949 	[VM_EXITCODE_TASK_SWITCH] = vmexit_task_switch,
950 	[VM_EXITCODE_DEBUG] = vmexit_debug,
951 	[VM_EXITCODE_BPT] = vmexit_breakpoint,
952 };
953 
954 static void
955 vm_loop(struct vmctx *ctx, int vcpu, uint64_t startrip)
956 {
957 	int error, rc;
958 	enum vm_exitcode exitcode;
959 	cpuset_t active_cpus;
960 
961 	if (vcpumap[vcpu] != NULL) {
962 		error = pthread_setaffinity_np(pthread_self(),
963 		    sizeof(cpuset_t), vcpumap[vcpu]);
964 		assert(error == 0);
965 	}
966 
967 	error = vm_active_cpus(ctx, &active_cpus);
968 	assert(CPU_ISSET(vcpu, &active_cpus));
969 
970 	error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, startrip);
971 	assert(error == 0);
972 
973 	while (1) {
974 		error = vm_run(ctx, vcpu, &vmexit[vcpu]);
975 		if (error != 0)
976 			break;
977 
978 		exitcode = vmexit[vcpu].exitcode;
979 		if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
980 			fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
981 			    exitcode);
982 			exit(4);
983 		}
984 
985 		rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);
986 
987 		switch (rc) {
988 		case VMEXIT_CONTINUE:
989 			break;
990 		case VMEXIT_ABORT:
991 			abort();
992 		default:
993 			exit(4);
994 		}
995 	}
996 	fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
997 }
998 
999 static int
1000 num_vcpus_allowed(struct vmctx *ctx)
1001 {
1002 	int tmp, error;
1003 
1004 	error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
1005 
1006 	/*
1007 	 * The guest is allowed to spinup more than one processor only if the
1008 	 * UNRESTRICTED_GUEST capability is available.
1009 	 */
1010 	if (error == 0)
1011 		return (VM_MAXCPU);
1012 	else
1013 		return (1);
1014 }
1015 
1016 void
1017 fbsdrun_set_capabilities(struct vmctx *ctx, int cpu)
1018 {
1019 	int err, tmp;
1020 
1021 	if (get_config_bool_default("x86.vmexit_on_hlt", false)) {
1022 		err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp);
1023 		if (err < 0) {
1024 			fprintf(stderr, "VM exit on HLT not supported\n");
1025 			exit(4);
1026 		}
1027 		vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1);
1028 		if (cpu == BSP)
1029 			handler[VM_EXITCODE_HLT] = vmexit_hlt;
1030 	}
1031 
1032 	if (get_config_bool_default("x86.vmexit_on_pause", false)) {
1033 		/*
1034 		 * pause exit support required for this mode
1035 		 */
1036 		err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp);
1037 		if (err < 0) {
1038 			fprintf(stderr,
1039 			    "SMP mux requested, no pause support\n");
1040 			exit(4);
1041 		}
1042 		vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1);
1043 		if (cpu == BSP)
1044 			handler[VM_EXITCODE_PAUSE] = vmexit_pause;
1045         }
1046 
1047 	if (get_config_bool_default("x86.x2apic", false))
1048 		err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED);
1049 	else
1050 		err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED);
1051 
1052 	if (err) {
1053 		fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
1054 		exit(4);
1055 	}
1056 
1057 	vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1);
1058 }
1059 
1060 static struct vmctx *
1061 do_open(const char *vmname)
1062 {
1063 	struct vmctx *ctx;
1064 	int error;
1065 	bool reinit, romboot;
1066 #ifndef WITHOUT_CAPSICUM
1067 	cap_rights_t rights;
1068 	const cap_ioctl_t *cmds;
1069 	size_t ncmds;
1070 #endif
1071 
1072 	reinit = romboot = false;
1073 
1074 	if (lpc_bootrom())
1075 		romboot = true;
1076 
1077 	error = vm_create(vmname);
1078 	if (error) {
1079 		if (errno == EEXIST) {
1080 			if (romboot) {
1081 				reinit = true;
1082 			} else {
1083 				/*
1084 				 * The virtual machine has been setup by the
1085 				 * userspace bootloader.
1086 				 */
1087 			}
1088 		} else {
1089 			perror("vm_create");
1090 			exit(4);
1091 		}
1092 	} else {
1093 		if (!romboot) {
1094 			/*
1095 			 * If the virtual machine was just created then a
1096 			 * bootrom must be configured to boot it.
1097 			 */
1098 			fprintf(stderr, "virtual machine cannot be booted\n");
1099 			exit(4);
1100 		}
1101 	}
1102 
1103 	ctx = vm_open(vmname);
1104 	if (ctx == NULL) {
1105 		perror("vm_open");
1106 		exit(4);
1107 	}
1108 
1109 #ifndef WITHOUT_CAPSICUM
1110 	cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW);
1111 	if (caph_rights_limit(vm_get_device_fd(ctx), &rights) == -1)
1112 		errx(EX_OSERR, "Unable to apply rights for sandbox");
1113 	vm_get_ioctls(&ncmds);
1114 	cmds = vm_get_ioctls(NULL);
1115 	if (cmds == NULL)
1116 		errx(EX_OSERR, "out of memory");
1117 	if (caph_ioctls_limit(vm_get_device_fd(ctx), cmds, ncmds) == -1)
1118 		errx(EX_OSERR, "Unable to apply rights for sandbox");
1119 	free((cap_ioctl_t *)cmds);
1120 #endif
1121 
1122 	if (reinit) {
1123 		error = vm_reinit(ctx);
1124 		if (error) {
1125 			perror("vm_reinit");
1126 			exit(4);
1127 		}
1128 	}
1129 	error = vm_set_topology(ctx, sockets, cores, threads, maxcpus);
1130 	if (error)
1131 		errx(EX_OSERR, "vm_set_topology");
1132 	return (ctx);
1133 }
1134 
1135 void
1136 spinup_vcpu(struct vmctx *ctx, int vcpu)
1137 {
1138 	int error;
1139 	uint64_t rip;
1140 
1141 	error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RIP, &rip);
1142 	assert(error == 0);
1143 
1144 	fbsdrun_set_capabilities(ctx, vcpu);
1145 	error = vm_set_capability(ctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
1146 	assert(error == 0);
1147 
1148 	fbsdrun_addcpu(ctx, BSP, vcpu, rip);
1149 }
1150 
1151 static bool
1152 parse_config_option(const char *option)
1153 {
1154 	const char *value;
1155 	char *path;
1156 
1157 	value = strchr(option, '=');
1158 	if (value == NULL || value[1] == '\0')
1159 		return (false);
1160 	path = strndup(option, value - option);
1161 	if (path == NULL)
1162 		err(4, "Failed to allocate memory");
1163 	set_config_value(path, value + 1);
1164 	return (true);
1165 }
1166 
1167 static void
1168 parse_simple_config_file(const char *path)
1169 {
1170 	FILE *fp;
1171 	char *line, *cp;
1172 	size_t linecap;
1173 	unsigned int lineno;
1174 
1175 	fp = fopen(path, "r");
1176 	if (fp == NULL)
1177 		err(4, "Failed to open configuration file %s", path);
1178 	line = NULL;
1179 	linecap = 0;
1180 	lineno = 1;
1181 	for (lineno = 1; getline(&line, &linecap, fp) > 0; lineno++) {
1182 		if (*line == '#' || *line == '\n')
1183 			continue;
1184 		cp = strchr(line, '\n');
1185 		if (cp != NULL)
1186 			*cp = '\0';
1187 		if (!parse_config_option(line))
1188 			errx(4, "%s line %u: invalid config option '%s'", path,
1189 			    lineno, line);
1190 	}
1191 	free(line);
1192 	fclose(fp);
1193 }
1194 
1195 static void
1196 set_defaults(void)
1197 {
1198 
1199 	set_config_bool("acpi_tables", false);
1200 	set_config_value("memory.size", "256M");
1201 	set_config_bool("x86.strictmsr", true);
1202 }
1203 
1204 int
1205 main(int argc, char *argv[])
1206 {
1207 	int c, error, err;
1208 	int max_vcpus, memflags;
1209 	struct vmctx *ctx;
1210 	uint64_t rip;
1211 	size_t memsize;
1212 	const char *value, *vmname;
1213 	char *optstr;
1214 #ifdef BHYVE_SNAPSHOT
1215 	char *restore_file;
1216 	struct restore_state rstate;
1217 	int vcpu;
1218 
1219 	restore_file = NULL;
1220 #endif
1221 
1222 	init_config();
1223 	set_defaults();
1224 	progname = basename(argv[0]);
1225 
1226 #ifdef BHYVE_SNAPSHOT
1227 	optstr = "aehuwxACDHIPSWYk:o:p:G:c:s:m:l:U:r:";
1228 #else
1229 	optstr = "aehuwxACDHIPSWYk:o:p:G:c:s:m:l:U:";
1230 #endif
1231 	while ((c = getopt(argc, argv, optstr)) != -1) {
1232 		switch (c) {
1233 		case 'a':
1234 			set_config_bool("x86.x2apic", false);
1235 			break;
1236 		case 'A':
1237 			set_config_bool("acpi_tables", true);
1238 			break;
1239 		case 'D':
1240 			set_config_bool("destroy_on_poweroff", true);
1241 			break;
1242 		case 'p':
1243                         if (pincpu_parse(optarg) != 0) {
1244                             errx(EX_USAGE, "invalid vcpu pinning "
1245                                  "configuration '%s'", optarg);
1246                         }
1247 			break;
1248                 case 'c':
1249 			if (topology_parse(optarg) != 0) {
1250 			    errx(EX_USAGE, "invalid cpu topology "
1251 				"'%s'", optarg);
1252 			}
1253 			break;
1254 		case 'C':
1255 			set_config_bool("memory.guest_in_core", true);
1256 			break;
1257 		case 'G':
1258 			if (optarg[0] == 'w') {
1259 				set_config_bool("gdb.wait", true);
1260 				optarg++;
1261 			}
1262 			set_config_value("gdb.port", optarg);
1263 			break;
1264 		case 'k':
1265 			parse_simple_config_file(optarg);
1266 			break;
1267 		case 'l':
1268 			if (strncmp(optarg, "help", strlen(optarg)) == 0) {
1269 				lpc_print_supported_devices();
1270 				exit(0);
1271 			} else if (lpc_device_parse(optarg) != 0) {
1272 				errx(EX_USAGE, "invalid lpc device "
1273 				    "configuration '%s'", optarg);
1274 			}
1275 			break;
1276 #ifdef BHYVE_SNAPSHOT
1277 		case 'r':
1278 			restore_file = optarg;
1279 			break;
1280 #endif
1281 		case 's':
1282 			if (strncmp(optarg, "help", strlen(optarg)) == 0) {
1283 				pci_print_supported_devices();
1284 				exit(0);
1285 			} else if (pci_parse_slot(optarg) != 0)
1286 				exit(4);
1287 			else
1288 				break;
1289 		case 'S':
1290 			set_config_bool("memory.wired", true);
1291 			break;
1292                 case 'm':
1293 			set_config_value("memory.size", optarg);
1294 			break;
1295 		case 'o':
1296 			if (!parse_config_option(optarg))
1297 				errx(EX_USAGE, "invalid configuration option '%s'", optarg);
1298 			break;
1299 		case 'H':
1300 			set_config_bool("x86.vmexit_on_hlt", true);
1301 			break;
1302 		case 'I':
1303 			/*
1304 			 * The "-I" option was used to add an ioapic to the
1305 			 * virtual machine.
1306 			 *
1307 			 * An ioapic is now provided unconditionally for each
1308 			 * virtual machine and this option is now deprecated.
1309 			 */
1310 			break;
1311 		case 'P':
1312 			set_config_bool("x86.vmexit_on_pause", true);
1313 			break;
1314 		case 'e':
1315 			set_config_bool("x86.strictio", true);
1316 			break;
1317 		case 'u':
1318 			set_config_bool("rtc.use_localtime", false);
1319 			break;
1320 		case 'U':
1321 			set_config_value("uuid", optarg);
1322 			break;
1323 		case 'w':
1324 			set_config_bool("x86.strictmsr", false);
1325 			break;
1326 		case 'W':
1327 			set_config_bool("virtio_msix", false);
1328 			break;
1329 		case 'x':
1330 			set_config_bool("x86.x2apic", true);
1331 			break;
1332 		case 'Y':
1333 			set_config_bool("x86.mptable", false);
1334 			break;
1335 		case 'h':
1336 			usage(0);
1337 		default:
1338 			usage(1);
1339 		}
1340 	}
1341 	argc -= optind;
1342 	argv += optind;
1343 
1344 	if (argc > 1)
1345 		usage(1);
1346 
1347 #ifdef BHYVE_SNAPSHOT
1348 	if (restore_file != NULL) {
1349 		error = load_restore_file(restore_file, &rstate);
1350 		if (error) {
1351 			fprintf(stderr, "Failed to read checkpoint info from "
1352 					"file: '%s'.\n", restore_file);
1353 			exit(1);
1354 		}
1355 		vmname = lookup_vmname(&rstate);
1356 		if (vmname != NULL)
1357 			set_config_value("name", vmname);
1358 	}
1359 #endif
1360 
1361 	if (argc == 1)
1362 		set_config_value("name", argv[0]);
1363 
1364 	vmname = get_config_value("name");
1365 	if (vmname == NULL)
1366 		usage(1);
1367 
1368 	if (get_config_bool_default("config.dump", false)) {
1369 		dump_config();
1370 		exit(1);
1371 	}
1372 
1373 	calc_topolopgy();
1374 	build_vcpumaps();
1375 
1376 	value = get_config_value("memory.size");
1377 	error = vm_parse_memsize(value, &memsize);
1378 	if (error)
1379 		errx(EX_USAGE, "invalid memsize '%s'", value);
1380 
1381 	ctx = do_open(vmname);
1382 
1383 #ifdef BHYVE_SNAPSHOT
1384 	if (restore_file != NULL) {
1385 		guest_ncpus = lookup_guest_ncpus(&rstate);
1386 		memflags = lookup_memflags(&rstate);
1387 		memsize = lookup_memsize(&rstate);
1388 	}
1389 
1390 	if (guest_ncpus < 1) {
1391 		fprintf(stderr, "Invalid guest vCPUs (%d)\n", guest_ncpus);
1392 		exit(1);
1393 	}
1394 #endif
1395 
1396 	max_vcpus = num_vcpus_allowed(ctx);
1397 	if (guest_ncpus > max_vcpus) {
1398 		fprintf(stderr, "%d vCPUs requested but only %d available\n",
1399 			guest_ncpus, max_vcpus);
1400 		exit(4);
1401 	}
1402 
1403 	fbsdrun_set_capabilities(ctx, BSP);
1404 
1405 	memflags = 0;
1406 	if (get_config_bool_default("memory.wired", false))
1407 		memflags |= VM_MEM_F_WIRED;
1408 	if (get_config_bool_default("memory.guest_in_core", false))
1409 		memflags |= VM_MEM_F_INCORE;
1410 	vm_set_memflags(ctx, memflags);
1411 	err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
1412 	if (err) {
1413 		fprintf(stderr, "Unable to setup memory (%d)\n", errno);
1414 		exit(4);
1415 	}
1416 
1417 	error = init_msr();
1418 	if (error) {
1419 		fprintf(stderr, "init_msr error %d", error);
1420 		exit(4);
1421 	}
1422 
1423 	init_mem();
1424 	init_inout();
1425 	kernemu_dev_init();
1426 	init_bootrom(ctx);
1427 	atkbdc_init(ctx);
1428 	pci_irq_init(ctx);
1429 	ioapic_init(ctx);
1430 
1431 	rtc_init(ctx);
1432 	sci_init(ctx);
1433 
1434 	/*
1435 	 * Exit if a device emulation finds an error in its initilization
1436 	 */
1437 	if (init_pci(ctx) != 0) {
1438 		perror("device emulation initialization error");
1439 		exit(4);
1440 	}
1441 
1442 	/*
1443 	 * Initialize after PCI, to allow a bootrom file to reserve the high
1444 	 * region.
1445 	 */
1446 	if (get_config_bool("acpi_tables"))
1447 		vmgenc_init(ctx);
1448 
1449 	value = get_config_value("gdb.port");
1450 	if (value != NULL)
1451 		init_gdb(ctx, atoi(value), get_config_bool_default("gdb.wait",
1452 		    false));
1453 
1454 	if (lpc_bootrom()) {
1455 		if (vm_set_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, 1)) {
1456 			fprintf(stderr, "ROM boot failed: unrestricted guest "
1457 			    "capability not available\n");
1458 			exit(4);
1459 		}
1460 		error = vcpu_reset(ctx, BSP);
1461 		assert(error == 0);
1462 	}
1463 
1464 #ifdef BHYVE_SNAPSHOT
1465 	if (restore_file != NULL) {
1466 		fprintf(stdout, "Pausing pci devs...\r\n");
1467 		if (vm_pause_user_devs(ctx) != 0) {
1468 			fprintf(stderr, "Failed to pause PCI device state.\n");
1469 			exit(1);
1470 		}
1471 
1472 		fprintf(stdout, "Restoring vm mem...\r\n");
1473 		if (restore_vm_mem(ctx, &rstate) != 0) {
1474 			fprintf(stderr, "Failed to restore VM memory.\n");
1475 			exit(1);
1476 		}
1477 
1478 		fprintf(stdout, "Restoring pci devs...\r\n");
1479 		if (vm_restore_user_devs(ctx, &rstate) != 0) {
1480 			fprintf(stderr, "Failed to restore PCI device state.\n");
1481 			exit(1);
1482 		}
1483 
1484 		fprintf(stdout, "Restoring kernel structs...\r\n");
1485 		if (vm_restore_kern_structs(ctx, &rstate) != 0) {
1486 			fprintf(stderr, "Failed to restore kernel structs.\n");
1487 			exit(1);
1488 		}
1489 
1490 		fprintf(stdout, "Resuming pci devs...\r\n");
1491 		if (vm_resume_user_devs(ctx) != 0) {
1492 			fprintf(stderr, "Failed to resume PCI device state.\n");
1493 			exit(1);
1494 		}
1495 	}
1496 #endif
1497 
1498 	error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
1499 	assert(error == 0);
1500 
1501 	/*
1502 	 * build the guest tables, MP etc.
1503 	 */
1504 	if (get_config_bool_default("x86.mptable", true)) {
1505 		error = mptable_build(ctx, guest_ncpus);
1506 		if (error) {
1507 			perror("error to build the guest tables");
1508 			exit(4);
1509 		}
1510 	}
1511 
1512 	error = smbios_build(ctx);
1513 	assert(error == 0);
1514 
1515 	if (get_config_bool("acpi_tables")) {
1516 		error = acpi_build(ctx, guest_ncpus);
1517 		assert(error == 0);
1518 	}
1519 
1520 	if (lpc_bootrom())
1521 		fwctl_init();
1522 
1523 	/*
1524 	 * Change the proc title to include the VM name.
1525 	 */
1526 	setproctitle("%s", vmname);
1527 
1528 #ifndef WITHOUT_CAPSICUM
1529 	caph_cache_catpages();
1530 
1531 	if (caph_limit_stdout() == -1 || caph_limit_stderr() == -1)
1532 		errx(EX_OSERR, "Unable to apply rights for sandbox");
1533 
1534 	if (caph_enter() == -1)
1535 		errx(EX_OSERR, "cap_enter() failed");
1536 #endif
1537 
1538 #ifdef BHYVE_SNAPSHOT
1539 	if (restore_file != NULL)
1540 		destroy_restore_state(&rstate);
1541 
1542 	/*
1543 	 * checkpointing thread for communication with bhyvectl
1544 	 */
1545 	if (init_checkpoint_thread(ctx) < 0)
1546 		printf("Failed to start checkpoint thread!\r\n");
1547 
1548 	if (restore_file != NULL)
1549 		vm_restore_time(ctx);
1550 #endif
1551 
1552 	/*
1553 	 * Add CPU 0
1554 	 */
1555 	fbsdrun_addcpu(ctx, BSP, BSP, rip);
1556 
1557 #ifdef BHYVE_SNAPSHOT
1558 	/*
1559 	 * If we restore a VM, start all vCPUs now (including APs), otherwise,
1560 	 * let the guest OS to spin them up later via vmexits.
1561 	 */
1562 	if (restore_file != NULL) {
1563 		for (vcpu = 0; vcpu < guest_ncpus; vcpu++) {
1564 			if (vcpu == BSP)
1565 				continue;
1566 
1567 			fprintf(stdout, "spinning up vcpu no %d...\r\n", vcpu);
1568 			spinup_vcpu(ctx, vcpu);
1569 		}
1570 	}
1571 #endif
1572 
1573 	/*
1574 	 * Head off to the main event dispatch loop
1575 	 */
1576 	mevent_dispatch();
1577 
1578 	exit(4);
1579 }
1580