1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/types.h> 35 #ifndef WITHOUT_CAPSICUM 36 #include <sys/capsicum.h> 37 #endif 38 #include <sys/mman.h> 39 #ifdef BHYVE_SNAPSHOT 40 #include <sys/socket.h> 41 #include <sys/stat.h> 42 #endif 43 #include <sys/time.h> 44 #ifdef BHYVE_SNAPSHOT 45 #include <sys/un.h> 46 #endif 47 48 #include <amd64/vmm/intel/vmcs.h> 49 50 #include <machine/atomic.h> 51 #include <machine/segments.h> 52 53 #ifndef WITHOUT_CAPSICUM 54 #include <capsicum_helpers.h> 55 #endif 56 #include <stdio.h> 57 #include <stdlib.h> 58 #include <string.h> 59 #include <err.h> 60 #include <errno.h> 61 #ifdef BHYVE_SNAPSHOT 62 #include <fcntl.h> 63 #endif 64 #include <libgen.h> 65 #include <unistd.h> 66 #include <assert.h> 67 #include <pthread.h> 68 #include <pthread_np.h> 69 #include <sysexits.h> 70 #include <stdbool.h> 71 #include <stdint.h> 72 #ifdef BHYVE_SNAPSHOT 73 #include <ucl.h> 74 #include <unistd.h> 75 76 #include <libxo/xo.h> 77 #endif 78 79 #include <machine/vmm.h> 80 #ifndef WITHOUT_CAPSICUM 81 #include <machine/vmm_dev.h> 82 #endif 83 #include <machine/vmm_instruction_emul.h> 84 #include <vmmapi.h> 85 86 #include "bhyverun.h" 87 #include "acpi.h" 88 #include "atkbdc.h" 89 #include "bootrom.h" 90 #include "inout.h" 91 #include "dbgport.h" 92 #include "debug.h" 93 #include "fwctl.h" 94 #include "gdb.h" 95 #include "ioapic.h" 96 #include "kernemu_dev.h" 97 #include "mem.h" 98 #include "mevent.h" 99 #include "mptbl.h" 100 #include "pci_emul.h" 101 #include "pci_irq.h" 102 #include "pci_lpc.h" 103 #include "smbiostbl.h" 104 #ifdef BHYVE_SNAPSHOT 105 #include "snapshot.h" 106 #endif 107 #include "xmsr.h" 108 #include "spinup_ap.h" 109 #include "rtc.h" 110 #include "vmgenc.h" 111 112 #define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */ 113 114 #define MB (1024UL * 1024) 115 #define GB (1024UL * MB) 116 117 static const char * const vmx_exit_reason_desc[] = { 118 [EXIT_REASON_EXCEPTION] = "Exception or non-maskable interrupt (NMI)", 119 [EXIT_REASON_EXT_INTR] = "External interrupt", 120 [EXIT_REASON_TRIPLE_FAULT] = "Triple fault", 121 [EXIT_REASON_INIT] = "INIT signal", 122 [EXIT_REASON_SIPI] = "Start-up IPI (SIPI)", 123 [EXIT_REASON_IO_SMI] = "I/O system-management interrupt (SMI)", 124 [EXIT_REASON_SMI] = "Other SMI", 125 [EXIT_REASON_INTR_WINDOW] = "Interrupt window", 126 [EXIT_REASON_NMI_WINDOW] = "NMI window", 127 [EXIT_REASON_TASK_SWITCH] = "Task switch", 128 [EXIT_REASON_CPUID] = "CPUID", 129 [EXIT_REASON_GETSEC] = "GETSEC", 130 [EXIT_REASON_HLT] = "HLT", 131 [EXIT_REASON_INVD] = "INVD", 132 [EXIT_REASON_INVLPG] = "INVLPG", 133 [EXIT_REASON_RDPMC] = "RDPMC", 134 [EXIT_REASON_RDTSC] = "RDTSC", 135 [EXIT_REASON_RSM] = "RSM", 136 [EXIT_REASON_VMCALL] = "VMCALL", 137 [EXIT_REASON_VMCLEAR] = "VMCLEAR", 138 [EXIT_REASON_VMLAUNCH] = "VMLAUNCH", 139 [EXIT_REASON_VMPTRLD] = "VMPTRLD", 140 [EXIT_REASON_VMPTRST] = "VMPTRST", 141 [EXIT_REASON_VMREAD] = "VMREAD", 142 [EXIT_REASON_VMRESUME] = "VMRESUME", 143 [EXIT_REASON_VMWRITE] = "VMWRITE", 144 [EXIT_REASON_VMXOFF] = "VMXOFF", 145 [EXIT_REASON_VMXON] = "VMXON", 146 [EXIT_REASON_CR_ACCESS] = "Control-register accesses", 147 [EXIT_REASON_DR_ACCESS] = "MOV DR", 148 [EXIT_REASON_INOUT] = "I/O instruction", 149 [EXIT_REASON_RDMSR] = "RDMSR", 150 [EXIT_REASON_WRMSR] = "WRMSR", 151 [EXIT_REASON_INVAL_VMCS] = 152 "VM-entry failure due to invalid guest state", 153 [EXIT_REASON_INVAL_MSR] = "VM-entry failure due to MSR loading", 154 [EXIT_REASON_MWAIT] = "MWAIT", 155 [EXIT_REASON_MTF] = "Monitor trap flag", 156 [EXIT_REASON_MONITOR] = "MONITOR", 157 [EXIT_REASON_PAUSE] = "PAUSE", 158 [EXIT_REASON_MCE_DURING_ENTRY] = 159 "VM-entry failure due to machine-check event", 160 [EXIT_REASON_TPR] = "TPR below threshold", 161 [EXIT_REASON_APIC_ACCESS] = "APIC access", 162 [EXIT_REASON_VIRTUALIZED_EOI] = "Virtualized EOI", 163 [EXIT_REASON_GDTR_IDTR] = "Access to GDTR or IDTR", 164 [EXIT_REASON_LDTR_TR] = "Access to LDTR or TR", 165 [EXIT_REASON_EPT_FAULT] = "EPT violation", 166 [EXIT_REASON_EPT_MISCONFIG] = "EPT misconfiguration", 167 [EXIT_REASON_INVEPT] = "INVEPT", 168 [EXIT_REASON_RDTSCP] = "RDTSCP", 169 [EXIT_REASON_VMX_PREEMPT] = "VMX-preemption timer expired", 170 [EXIT_REASON_INVVPID] = "INVVPID", 171 [EXIT_REASON_WBINVD] = "WBINVD", 172 [EXIT_REASON_XSETBV] = "XSETBV", 173 [EXIT_REASON_APIC_WRITE] = "APIC write", 174 [EXIT_REASON_RDRAND] = "RDRAND", 175 [EXIT_REASON_INVPCID] = "INVPCID", 176 [EXIT_REASON_VMFUNC] = "VMFUNC", 177 [EXIT_REASON_ENCLS] = "ENCLS", 178 [EXIT_REASON_RDSEED] = "RDSEED", 179 [EXIT_REASON_PM_LOG_FULL] = "Page-modification log full", 180 [EXIT_REASON_XSAVES] = "XSAVES", 181 [EXIT_REASON_XRSTORS] = "XRSTORS" 182 }; 183 184 typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu); 185 extern int vmexit_task_switch(struct vmctx *, struct vm_exit *, int *vcpu); 186 187 const char *vmname; 188 189 int guest_ncpus; 190 uint16_t cores, maxcpus, sockets, threads; 191 192 char *guest_uuid_str; 193 194 int raw_stdio = 0; 195 196 static int gdb_port = 0; 197 static int guest_vmexit_on_hlt, guest_vmexit_on_pause; 198 static int virtio_msix = 1; 199 static int x2apic_mode = 0; /* default is xAPIC */ 200 static int destroy_on_poweroff = 0; 201 202 static int strictio; 203 static int strictmsr = 1; 204 205 static int acpi; 206 207 static char *progname; 208 static const int BSP = 0; 209 210 static cpuset_t cpumask; 211 212 static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip); 213 214 static struct vm_exit vmexit[VM_MAXCPU]; 215 216 struct bhyvestats { 217 uint64_t vmexit_bogus; 218 uint64_t vmexit_reqidle; 219 uint64_t vmexit_hlt; 220 uint64_t vmexit_pause; 221 uint64_t vmexit_mtrap; 222 uint64_t vmexit_inst_emul; 223 uint64_t cpu_switch_rotate; 224 uint64_t cpu_switch_direct; 225 } stats; 226 227 struct mt_vmm_info { 228 pthread_t mt_thr; 229 struct vmctx *mt_ctx; 230 int mt_vcpu; 231 } mt_vmm_info[VM_MAXCPU]; 232 233 static cpuset_t *vcpumap[VM_MAXCPU] = { NULL }; 234 235 static void 236 usage(int code) 237 { 238 239 fprintf(stderr, 240 "Usage: %s [-abehuwxACDHPSWY]\n" 241 " %*s [-c [[cpus=]numcpus][,sockets=n][,cores=n][,threads=n]]\n" 242 " %*s [-g <gdb port>] [-l <lpc>]\n" 243 " %*s [-m mem] [-p vcpu:hostcpu] [-s <pci>] [-U uuid] <vm>\n" 244 " -a: local apic is in xAPIC mode (deprecated)\n" 245 " -A: create ACPI tables\n" 246 " -c: number of cpus and/or topology specification\n" 247 " -C: include guest memory in core file\n" 248 " -D: destroy on power-off\n" 249 " -e: exit on unhandled I/O access\n" 250 " -g: gdb port\n" 251 " -h: help\n" 252 " -H: vmexit from the guest on hlt\n" 253 " -l: LPC device configuration\n" 254 " -m: memory size in MB\n" 255 #ifdef BHYVE_SNAPSHOT 256 " -r: path to checkpoint file\n" 257 #endif 258 " -p: pin 'vcpu' to 'hostcpu'\n" 259 " -P: vmexit from the guest on pause\n" 260 " -s: <slot,driver,configinfo> PCI slot config\n" 261 " -S: guest memory cannot be swapped\n" 262 " -u: RTC keeps UTC time\n" 263 " -U: uuid\n" 264 " -w: ignore unimplemented MSRs\n" 265 " -W: force virtio to use single-vector MSI\n" 266 " -x: local apic is in x2APIC mode\n" 267 " -Y: disable MPtable generation\n", 268 progname, (int)strlen(progname), "", (int)strlen(progname), "", 269 (int)strlen(progname), ""); 270 271 exit(code); 272 } 273 274 /* 275 * XXX This parser is known to have the following issues: 276 * 1. It accepts null key=value tokens ",,". 277 * 2. It accepts whitespace after = and before value. 278 * 3. Values out of range of INT are silently wrapped. 279 * 4. It doesn't check non-final values. 280 * 5. The apparently bogus limits of UINT16_MAX are for future expansion. 281 * 282 * The acceptance of a null specification ('-c ""') is by design to match the 283 * manual page syntax specification, this results in a topology of 1 vCPU. 284 */ 285 static int 286 topology_parse(const char *opt) 287 { 288 uint64_t ncpus; 289 int c, chk, n, s, t, tmp; 290 char *cp, *str; 291 bool ns, scts; 292 293 c = 1, n = 1, s = 1, t = 1; 294 ns = false, scts = false; 295 str = strdup(opt); 296 if (str == NULL) 297 goto out; 298 299 while ((cp = strsep(&str, ",")) != NULL) { 300 if (sscanf(cp, "%i%n", &tmp, &chk) == 1) { 301 n = tmp; 302 ns = true; 303 } else if (sscanf(cp, "cpus=%i%n", &tmp, &chk) == 1) { 304 n = tmp; 305 ns = true; 306 } else if (sscanf(cp, "sockets=%i%n", &tmp, &chk) == 1) { 307 s = tmp; 308 scts = true; 309 } else if (sscanf(cp, "cores=%i%n", &tmp, &chk) == 1) { 310 c = tmp; 311 scts = true; 312 } else if (sscanf(cp, "threads=%i%n", &tmp, &chk) == 1) { 313 t = tmp; 314 scts = true; 315 #ifdef notyet /* Do not expose this until vmm.ko implements it */ 316 } else if (sscanf(cp, "maxcpus=%i%n", &tmp, &chk) == 1) { 317 m = tmp; 318 #endif 319 /* Skip the empty argument case from -c "" */ 320 } else if (cp[0] == '\0') 321 continue; 322 else 323 goto out; 324 /* Any trailing garbage causes an error */ 325 if (cp[chk] != '\0') 326 goto out; 327 } 328 free(str); 329 str = NULL; 330 331 /* 332 * Range check 1 <= n <= UINT16_MAX all values 333 */ 334 if (n < 1 || s < 1 || c < 1 || t < 1 || 335 n > UINT16_MAX || s > UINT16_MAX || c > UINT16_MAX || 336 t > UINT16_MAX) 337 return (-1); 338 339 /* If only the cpus was specified, use that as sockets */ 340 if (!scts) 341 s = n; 342 /* 343 * Compute sockets * cores * threads avoiding overflow 344 * The range check above insures these are 16 bit values 345 * If n was specified check it against computed ncpus 346 */ 347 ncpus = (uint64_t)s * c * t; 348 if (ncpus > UINT16_MAX || (ns && n != ncpus)) 349 return (-1); 350 351 guest_ncpus = ncpus; 352 sockets = s; 353 cores = c; 354 threads = t; 355 return(0); 356 357 out: 358 free(str); 359 return (-1); 360 } 361 362 static int 363 pincpu_parse(const char *opt) 364 { 365 int vcpu, pcpu; 366 367 if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) { 368 fprintf(stderr, "invalid format: %s\n", opt); 369 return (-1); 370 } 371 372 if (vcpu < 0 || vcpu >= VM_MAXCPU) { 373 fprintf(stderr, "vcpu '%d' outside valid range from 0 to %d\n", 374 vcpu, VM_MAXCPU - 1); 375 return (-1); 376 } 377 378 if (pcpu < 0 || pcpu >= CPU_SETSIZE) { 379 fprintf(stderr, "hostcpu '%d' outside valid range from " 380 "0 to %d\n", pcpu, CPU_SETSIZE - 1); 381 return (-1); 382 } 383 384 if (vcpumap[vcpu] == NULL) { 385 if ((vcpumap[vcpu] = malloc(sizeof(cpuset_t))) == NULL) { 386 perror("malloc"); 387 return (-1); 388 } 389 CPU_ZERO(vcpumap[vcpu]); 390 } 391 CPU_SET(pcpu, vcpumap[vcpu]); 392 return (0); 393 } 394 395 void 396 vm_inject_fault(void *arg, int vcpu, int vector, int errcode_valid, 397 int errcode) 398 { 399 struct vmctx *ctx; 400 int error, restart_instruction; 401 402 ctx = arg; 403 restart_instruction = 1; 404 405 error = vm_inject_exception(ctx, vcpu, vector, errcode_valid, errcode, 406 restart_instruction); 407 assert(error == 0); 408 } 409 410 void * 411 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) 412 { 413 414 return (vm_map_gpa(ctx, gaddr, len)); 415 } 416 417 #ifdef BHYVE_SNAPSHOT 418 uintptr_t 419 paddr_host2guest(struct vmctx *ctx, void *addr) 420 { 421 return (vm_rev_map_gpa(ctx, addr)); 422 } 423 #endif 424 425 int 426 fbsdrun_vmexit_on_pause(void) 427 { 428 429 return (guest_vmexit_on_pause); 430 } 431 432 int 433 fbsdrun_vmexit_on_hlt(void) 434 { 435 436 return (guest_vmexit_on_hlt); 437 } 438 439 int 440 fbsdrun_virtio_msix(void) 441 { 442 443 return (virtio_msix); 444 } 445 446 static void * 447 fbsdrun_start_thread(void *param) 448 { 449 char tname[MAXCOMLEN + 1]; 450 struct mt_vmm_info *mtp; 451 int vcpu; 452 453 mtp = param; 454 vcpu = mtp->mt_vcpu; 455 456 snprintf(tname, sizeof(tname), "vcpu %d", vcpu); 457 pthread_set_name_np(mtp->mt_thr, tname); 458 459 #ifdef BHYVE_SNAPSHOT 460 checkpoint_cpu_add(vcpu); 461 #endif 462 if (gdb_port != 0) 463 gdb_cpu_add(vcpu); 464 465 vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip); 466 467 /* not reached */ 468 exit(1); 469 return (NULL); 470 } 471 472 void 473 fbsdrun_addcpu(struct vmctx *ctx, int fromcpu, int newcpu, uint64_t rip) 474 { 475 int error; 476 477 assert(fromcpu == BSP); 478 479 /* 480 * The 'newcpu' must be activated in the context of 'fromcpu'. If 481 * vm_activate_cpu() is delayed until newcpu's pthread starts running 482 * then vmm.ko is out-of-sync with bhyve and this can create a race 483 * with vm_suspend(). 484 */ 485 error = vm_activate_cpu(ctx, newcpu); 486 if (error != 0) 487 err(EX_OSERR, "could not activate CPU %d", newcpu); 488 489 CPU_SET_ATOMIC(newcpu, &cpumask); 490 491 /* 492 * Set up the vmexit struct to allow execution to start 493 * at the given RIP 494 */ 495 vmexit[newcpu].rip = rip; 496 vmexit[newcpu].inst_length = 0; 497 498 mt_vmm_info[newcpu].mt_ctx = ctx; 499 mt_vmm_info[newcpu].mt_vcpu = newcpu; 500 501 error = pthread_create(&mt_vmm_info[newcpu].mt_thr, NULL, 502 fbsdrun_start_thread, &mt_vmm_info[newcpu]); 503 assert(error == 0); 504 } 505 506 static int 507 fbsdrun_deletecpu(struct vmctx *ctx, int vcpu) 508 { 509 510 if (!CPU_ISSET(vcpu, &cpumask)) { 511 fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu); 512 exit(4); 513 } 514 515 CPU_CLR_ATOMIC(vcpu, &cpumask); 516 return (CPU_EMPTY(&cpumask)); 517 } 518 519 static int 520 vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu, 521 uint32_t eax) 522 { 523 #if BHYVE_DEBUG 524 /* 525 * put guest-driven debug here 526 */ 527 #endif 528 return (VMEXIT_CONTINUE); 529 } 530 531 static int 532 vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 533 { 534 int error; 535 int bytes, port, in, out; 536 int vcpu; 537 538 vcpu = *pvcpu; 539 540 port = vme->u.inout.port; 541 bytes = vme->u.inout.bytes; 542 in = vme->u.inout.in; 543 out = !in; 544 545 /* Extra-special case of host notifications */ 546 if (out && port == GUEST_NIO_PORT) { 547 error = vmexit_handle_notify(ctx, vme, pvcpu, vme->u.inout.eax); 548 return (error); 549 } 550 551 error = emulate_inout(ctx, vcpu, vme, strictio); 552 if (error) { 553 fprintf(stderr, "Unhandled %s%c 0x%04x at 0x%lx\n", 554 in ? "in" : "out", 555 bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), 556 port, vmexit->rip); 557 return (VMEXIT_ABORT); 558 } else { 559 return (VMEXIT_CONTINUE); 560 } 561 } 562 563 static int 564 vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 565 { 566 uint64_t val; 567 uint32_t eax, edx; 568 int error; 569 570 val = 0; 571 error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val); 572 if (error != 0) { 573 fprintf(stderr, "rdmsr to register %#x on vcpu %d\n", 574 vme->u.msr.code, *pvcpu); 575 if (strictmsr) { 576 vm_inject_gp(ctx, *pvcpu); 577 return (VMEXIT_CONTINUE); 578 } 579 } 580 581 eax = val; 582 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax); 583 assert(error == 0); 584 585 edx = val >> 32; 586 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx); 587 assert(error == 0); 588 589 return (VMEXIT_CONTINUE); 590 } 591 592 static int 593 vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 594 { 595 int error; 596 597 error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval); 598 if (error != 0) { 599 fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n", 600 vme->u.msr.code, vme->u.msr.wval, *pvcpu); 601 if (strictmsr) { 602 vm_inject_gp(ctx, *pvcpu); 603 return (VMEXIT_CONTINUE); 604 } 605 } 606 return (VMEXIT_CONTINUE); 607 } 608 609 static int 610 vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 611 { 612 613 (void)spinup_ap(ctx, *pvcpu, 614 vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip); 615 616 return (VMEXIT_CONTINUE); 617 } 618 619 #define DEBUG_EPT_MISCONFIG 620 #ifdef DEBUG_EPT_MISCONFIG 621 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400 622 623 static uint64_t ept_misconfig_gpa, ept_misconfig_pte[4]; 624 static int ept_misconfig_ptenum; 625 #endif 626 627 static const char * 628 vmexit_vmx_desc(uint32_t exit_reason) 629 { 630 631 if (exit_reason >= nitems(vmx_exit_reason_desc) || 632 vmx_exit_reason_desc[exit_reason] == NULL) 633 return ("Unknown"); 634 return (vmx_exit_reason_desc[exit_reason]); 635 } 636 637 static int 638 vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 639 { 640 641 fprintf(stderr, "vm exit[%d]\n", *pvcpu); 642 fprintf(stderr, "\treason\t\tVMX\n"); 643 fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip); 644 fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length); 645 fprintf(stderr, "\tstatus\t\t%d\n", vmexit->u.vmx.status); 646 fprintf(stderr, "\texit_reason\t%u (%s)\n", vmexit->u.vmx.exit_reason, 647 vmexit_vmx_desc(vmexit->u.vmx.exit_reason)); 648 fprintf(stderr, "\tqualification\t0x%016lx\n", 649 vmexit->u.vmx.exit_qualification); 650 fprintf(stderr, "\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type); 651 fprintf(stderr, "\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error); 652 #ifdef DEBUG_EPT_MISCONFIG 653 if (vmexit->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) { 654 vm_get_register(ctx, *pvcpu, 655 VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS), 656 &ept_misconfig_gpa); 657 vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte, 658 &ept_misconfig_ptenum); 659 fprintf(stderr, "\tEPT misconfiguration:\n"); 660 fprintf(stderr, "\t\tGPA: %#lx\n", ept_misconfig_gpa); 661 fprintf(stderr, "\t\tPTE(%d): %#lx %#lx %#lx %#lx\n", 662 ept_misconfig_ptenum, ept_misconfig_pte[0], 663 ept_misconfig_pte[1], ept_misconfig_pte[2], 664 ept_misconfig_pte[3]); 665 } 666 #endif /* DEBUG_EPT_MISCONFIG */ 667 return (VMEXIT_ABORT); 668 } 669 670 static int 671 vmexit_svm(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 672 { 673 674 fprintf(stderr, "vm exit[%d]\n", *pvcpu); 675 fprintf(stderr, "\treason\t\tSVM\n"); 676 fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip); 677 fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length); 678 fprintf(stderr, "\texitcode\t%#lx\n", vmexit->u.svm.exitcode); 679 fprintf(stderr, "\texitinfo1\t%#lx\n", vmexit->u.svm.exitinfo1); 680 fprintf(stderr, "\texitinfo2\t%#lx\n", vmexit->u.svm.exitinfo2); 681 return (VMEXIT_ABORT); 682 } 683 684 static int 685 vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 686 { 687 688 assert(vmexit->inst_length == 0); 689 690 stats.vmexit_bogus++; 691 692 return (VMEXIT_CONTINUE); 693 } 694 695 static int 696 vmexit_reqidle(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 697 { 698 699 assert(vmexit->inst_length == 0); 700 701 stats.vmexit_reqidle++; 702 703 return (VMEXIT_CONTINUE); 704 } 705 706 static int 707 vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 708 { 709 710 stats.vmexit_hlt++; 711 712 /* 713 * Just continue execution with the next instruction. We use 714 * the HLT VM exit as a way to be friendly with the host 715 * scheduler. 716 */ 717 return (VMEXIT_CONTINUE); 718 } 719 720 static int 721 vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 722 { 723 724 stats.vmexit_pause++; 725 726 return (VMEXIT_CONTINUE); 727 } 728 729 static int 730 vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 731 { 732 733 assert(vmexit->inst_length == 0); 734 735 stats.vmexit_mtrap++; 736 737 #ifdef BHYVE_SNAPSHOT 738 checkpoint_cpu_suspend(*pvcpu); 739 #endif 740 if (gdb_port != 0) 741 gdb_cpu_mtrap(*pvcpu); 742 #ifdef BHYVE_SNAPSHOT 743 checkpoint_cpu_resume(*pvcpu); 744 #endif 745 746 return (VMEXIT_CONTINUE); 747 } 748 749 static int 750 vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 751 { 752 int err, i, cs_d; 753 struct vie *vie; 754 enum vm_cpu_mode mode; 755 756 stats.vmexit_inst_emul++; 757 758 vie = &vmexit->u.inst_emul.vie; 759 if (!vie->decoded) { 760 /* 761 * Attempt to decode in userspace as a fallback. This allows 762 * updating instruction decode in bhyve without rebooting the 763 * kernel (rapid prototyping), albeit with much slower 764 * emulation. 765 */ 766 vie_restart(vie); 767 mode = vmexit->u.inst_emul.paging.cpu_mode; 768 cs_d = vmexit->u.inst_emul.cs_d; 769 if (vmm_decode_instruction(mode, cs_d, vie) != 0) 770 goto fail; 771 if (vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RIP, 772 vmexit->rip + vie->num_processed) != 0) 773 goto fail; 774 } 775 776 err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa, 777 vie, &vmexit->u.inst_emul.paging); 778 779 if (err) { 780 if (err == ESRCH) { 781 EPRINTLN("Unhandled memory access to 0x%lx\n", 782 vmexit->u.inst_emul.gpa); 783 } 784 goto fail; 785 } 786 787 return (VMEXIT_CONTINUE); 788 789 fail: 790 fprintf(stderr, "Failed to emulate instruction sequence [ "); 791 for (i = 0; i < vie->num_valid; i++) 792 fprintf(stderr, "%02x", vie->inst[i]); 793 FPRINTLN(stderr, " ] at 0x%lx", vmexit->rip); 794 return (VMEXIT_ABORT); 795 } 796 797 static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER; 798 static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER; 799 800 static int 801 vmexit_suspend(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 802 { 803 enum vm_suspend_how how; 804 805 how = vmexit->u.suspended.how; 806 807 fbsdrun_deletecpu(ctx, *pvcpu); 808 809 if (*pvcpu != BSP) { 810 pthread_mutex_lock(&resetcpu_mtx); 811 pthread_cond_signal(&resetcpu_cond); 812 pthread_mutex_unlock(&resetcpu_mtx); 813 pthread_exit(NULL); 814 } 815 816 pthread_mutex_lock(&resetcpu_mtx); 817 while (!CPU_EMPTY(&cpumask)) { 818 pthread_cond_wait(&resetcpu_cond, &resetcpu_mtx); 819 } 820 pthread_mutex_unlock(&resetcpu_mtx); 821 822 switch (how) { 823 case VM_SUSPEND_RESET: 824 exit(0); 825 case VM_SUSPEND_POWEROFF: 826 if (destroy_on_poweroff) 827 vm_destroy(ctx); 828 exit(1); 829 case VM_SUSPEND_HALT: 830 exit(2); 831 case VM_SUSPEND_TRIPLEFAULT: 832 exit(3); 833 default: 834 fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how); 835 exit(100); 836 } 837 return (0); /* NOTREACHED */ 838 } 839 840 static int 841 vmexit_debug(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 842 { 843 844 #ifdef BHYVE_SNAPSHOT 845 checkpoint_cpu_suspend(*pvcpu); 846 #endif 847 if (gdb_port != 0) 848 gdb_cpu_suspend(*pvcpu); 849 #ifdef BHYVE_SNAPSHOT 850 checkpoint_cpu_resume(*pvcpu); 851 #endif 852 return (VMEXIT_CONTINUE); 853 } 854 855 static int 856 vmexit_breakpoint(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 857 { 858 859 if (gdb_port == 0) { 860 fprintf(stderr, "vm_loop: unexpected VMEXIT_DEBUG\n"); 861 exit(4); 862 } 863 gdb_cpu_breakpoint(*pvcpu, vmexit); 864 return (VMEXIT_CONTINUE); 865 } 866 867 static vmexit_handler_t handler[VM_EXITCODE_MAX] = { 868 [VM_EXITCODE_INOUT] = vmexit_inout, 869 [VM_EXITCODE_INOUT_STR] = vmexit_inout, 870 [VM_EXITCODE_VMX] = vmexit_vmx, 871 [VM_EXITCODE_SVM] = vmexit_svm, 872 [VM_EXITCODE_BOGUS] = vmexit_bogus, 873 [VM_EXITCODE_REQIDLE] = vmexit_reqidle, 874 [VM_EXITCODE_RDMSR] = vmexit_rdmsr, 875 [VM_EXITCODE_WRMSR] = vmexit_wrmsr, 876 [VM_EXITCODE_MTRAP] = vmexit_mtrap, 877 [VM_EXITCODE_INST_EMUL] = vmexit_inst_emul, 878 [VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap, 879 [VM_EXITCODE_SUSPENDED] = vmexit_suspend, 880 [VM_EXITCODE_TASK_SWITCH] = vmexit_task_switch, 881 [VM_EXITCODE_DEBUG] = vmexit_debug, 882 [VM_EXITCODE_BPT] = vmexit_breakpoint, 883 }; 884 885 static void 886 vm_loop(struct vmctx *ctx, int vcpu, uint64_t startrip) 887 { 888 int error, rc; 889 enum vm_exitcode exitcode; 890 cpuset_t active_cpus; 891 892 if (vcpumap[vcpu] != NULL) { 893 error = pthread_setaffinity_np(pthread_self(), 894 sizeof(cpuset_t), vcpumap[vcpu]); 895 assert(error == 0); 896 } 897 898 error = vm_active_cpus(ctx, &active_cpus); 899 assert(CPU_ISSET(vcpu, &active_cpus)); 900 901 error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, startrip); 902 assert(error == 0); 903 904 while (1) { 905 error = vm_run(ctx, vcpu, &vmexit[vcpu]); 906 if (error != 0) 907 break; 908 909 exitcode = vmexit[vcpu].exitcode; 910 if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) { 911 fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n", 912 exitcode); 913 exit(4); 914 } 915 916 rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu); 917 918 switch (rc) { 919 case VMEXIT_CONTINUE: 920 break; 921 case VMEXIT_ABORT: 922 abort(); 923 default: 924 exit(4); 925 } 926 } 927 fprintf(stderr, "vm_run error %d, errno %d\n", error, errno); 928 } 929 930 static int 931 num_vcpus_allowed(struct vmctx *ctx) 932 { 933 int tmp, error; 934 935 error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp); 936 937 /* 938 * The guest is allowed to spinup more than one processor only if the 939 * UNRESTRICTED_GUEST capability is available. 940 */ 941 if (error == 0) 942 return (VM_MAXCPU); 943 else 944 return (1); 945 } 946 947 void 948 fbsdrun_set_capabilities(struct vmctx *ctx, int cpu) 949 { 950 int err, tmp; 951 952 if (fbsdrun_vmexit_on_hlt()) { 953 err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp); 954 if (err < 0) { 955 fprintf(stderr, "VM exit on HLT not supported\n"); 956 exit(4); 957 } 958 vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1); 959 if (cpu == BSP) 960 handler[VM_EXITCODE_HLT] = vmexit_hlt; 961 } 962 963 if (fbsdrun_vmexit_on_pause()) { 964 /* 965 * pause exit support required for this mode 966 */ 967 err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp); 968 if (err < 0) { 969 fprintf(stderr, 970 "SMP mux requested, no pause support\n"); 971 exit(4); 972 } 973 vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1); 974 if (cpu == BSP) 975 handler[VM_EXITCODE_PAUSE] = vmexit_pause; 976 } 977 978 if (x2apic_mode) 979 err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED); 980 else 981 err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED); 982 983 if (err) { 984 fprintf(stderr, "Unable to set x2apic state (%d)\n", err); 985 exit(4); 986 } 987 988 vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1); 989 } 990 991 static struct vmctx * 992 do_open(const char *vmname) 993 { 994 struct vmctx *ctx; 995 int error; 996 bool reinit, romboot; 997 #ifndef WITHOUT_CAPSICUM 998 cap_rights_t rights; 999 const cap_ioctl_t *cmds; 1000 size_t ncmds; 1001 #endif 1002 1003 reinit = romboot = false; 1004 1005 if (lpc_bootrom()) 1006 romboot = true; 1007 1008 error = vm_create(vmname); 1009 if (error) { 1010 if (errno == EEXIST) { 1011 if (romboot) { 1012 reinit = true; 1013 } else { 1014 /* 1015 * The virtual machine has been setup by the 1016 * userspace bootloader. 1017 */ 1018 } 1019 } else { 1020 perror("vm_create"); 1021 exit(4); 1022 } 1023 } else { 1024 if (!romboot) { 1025 /* 1026 * If the virtual machine was just created then a 1027 * bootrom must be configured to boot it. 1028 */ 1029 fprintf(stderr, "virtual machine cannot be booted\n"); 1030 exit(4); 1031 } 1032 } 1033 1034 ctx = vm_open(vmname); 1035 if (ctx == NULL) { 1036 perror("vm_open"); 1037 exit(4); 1038 } 1039 1040 #ifndef WITHOUT_CAPSICUM 1041 cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW); 1042 if (caph_rights_limit(vm_get_device_fd(ctx), &rights) == -1) 1043 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1044 vm_get_ioctls(&ncmds); 1045 cmds = vm_get_ioctls(NULL); 1046 if (cmds == NULL) 1047 errx(EX_OSERR, "out of memory"); 1048 if (caph_ioctls_limit(vm_get_device_fd(ctx), cmds, ncmds) == -1) 1049 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1050 free((cap_ioctl_t *)cmds); 1051 #endif 1052 1053 if (reinit) { 1054 error = vm_reinit(ctx); 1055 if (error) { 1056 perror("vm_reinit"); 1057 exit(4); 1058 } 1059 } 1060 error = vm_set_topology(ctx, sockets, cores, threads, maxcpus); 1061 if (error) 1062 errx(EX_OSERR, "vm_set_topology"); 1063 return (ctx); 1064 } 1065 1066 void 1067 spinup_vcpu(struct vmctx *ctx, int vcpu) 1068 { 1069 int error; 1070 uint64_t rip; 1071 1072 error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RIP, &rip); 1073 assert(error == 0); 1074 1075 fbsdrun_set_capabilities(ctx, vcpu); 1076 error = vm_set_capability(ctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, 1); 1077 assert(error == 0); 1078 1079 fbsdrun_addcpu(ctx, BSP, vcpu, rip); 1080 } 1081 1082 int 1083 main(int argc, char *argv[]) 1084 { 1085 int c, error, dbg_port, err, bvmcons; 1086 int max_vcpus, mptgen, memflags; 1087 int rtc_localtime; 1088 bool gdb_stop; 1089 struct vmctx *ctx; 1090 uint64_t rip; 1091 size_t memsize; 1092 char *optstr; 1093 #ifdef BHYVE_SNAPSHOT 1094 char *restore_file; 1095 struct restore_state rstate; 1096 int vcpu; 1097 1098 restore_file = NULL; 1099 #endif 1100 1101 bvmcons = 0; 1102 progname = basename(argv[0]); 1103 dbg_port = 0; 1104 gdb_stop = false; 1105 guest_ncpus = 1; 1106 sockets = cores = threads = 1; 1107 maxcpus = 0; 1108 memsize = 256 * MB; 1109 mptgen = 1; 1110 rtc_localtime = 1; 1111 memflags = 0; 1112 1113 #ifdef BHYVE_SNAPSHOT 1114 optstr = "abehuwxACDHIPSWYp:g:G:c:s:m:l:U:r:"; 1115 #else 1116 optstr = "abehuwxACDHIPSWYp:g:G:c:s:m:l:U:"; 1117 #endif 1118 while ((c = getopt(argc, argv, optstr)) != -1) { 1119 switch (c) { 1120 case 'a': 1121 x2apic_mode = 0; 1122 break; 1123 case 'A': 1124 acpi = 1; 1125 break; 1126 case 'b': 1127 bvmcons = 1; 1128 break; 1129 case 'D': 1130 destroy_on_poweroff = 1; 1131 break; 1132 case 'p': 1133 if (pincpu_parse(optarg) != 0) { 1134 errx(EX_USAGE, "invalid vcpu pinning " 1135 "configuration '%s'", optarg); 1136 } 1137 break; 1138 case 'c': 1139 if (topology_parse(optarg) != 0) { 1140 errx(EX_USAGE, "invalid cpu topology " 1141 "'%s'", optarg); 1142 } 1143 break; 1144 case 'C': 1145 memflags |= VM_MEM_F_INCORE; 1146 break; 1147 case 'g': 1148 dbg_port = atoi(optarg); 1149 break; 1150 case 'G': 1151 if (optarg[0] == 'w') { 1152 gdb_stop = true; 1153 optarg++; 1154 } 1155 gdb_port = atoi(optarg); 1156 break; 1157 case 'l': 1158 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1159 lpc_print_supported_devices(); 1160 exit(0); 1161 } else if (lpc_device_parse(optarg) != 0) { 1162 errx(EX_USAGE, "invalid lpc device " 1163 "configuration '%s'", optarg); 1164 } 1165 break; 1166 #ifdef BHYVE_SNAPSHOT 1167 case 'r': 1168 restore_file = optarg; 1169 break; 1170 #endif 1171 case 's': 1172 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1173 pci_print_supported_devices(); 1174 exit(0); 1175 } else if (pci_parse_slot(optarg) != 0) 1176 exit(4); 1177 else 1178 break; 1179 case 'S': 1180 memflags |= VM_MEM_F_WIRED; 1181 break; 1182 case 'm': 1183 error = vm_parse_memsize(optarg, &memsize); 1184 if (error) 1185 errx(EX_USAGE, "invalid memsize '%s'", optarg); 1186 break; 1187 case 'H': 1188 guest_vmexit_on_hlt = 1; 1189 break; 1190 case 'I': 1191 /* 1192 * The "-I" option was used to add an ioapic to the 1193 * virtual machine. 1194 * 1195 * An ioapic is now provided unconditionally for each 1196 * virtual machine and this option is now deprecated. 1197 */ 1198 break; 1199 case 'P': 1200 guest_vmexit_on_pause = 1; 1201 break; 1202 case 'e': 1203 strictio = 1; 1204 break; 1205 case 'u': 1206 rtc_localtime = 0; 1207 break; 1208 case 'U': 1209 guest_uuid_str = optarg; 1210 break; 1211 case 'w': 1212 strictmsr = 0; 1213 break; 1214 case 'W': 1215 virtio_msix = 0; 1216 break; 1217 case 'x': 1218 x2apic_mode = 1; 1219 break; 1220 case 'Y': 1221 mptgen = 0; 1222 break; 1223 case 'h': 1224 usage(0); 1225 default: 1226 usage(1); 1227 } 1228 } 1229 argc -= optind; 1230 argv += optind; 1231 1232 #ifdef BHYVE_SNAPSHOT 1233 if (argc > 1 || (argc == 0 && restore_file == NULL)) 1234 usage(1); 1235 1236 if (restore_file != NULL) { 1237 error = load_restore_file(restore_file, &rstate); 1238 if (error) { 1239 fprintf(stderr, "Failed to read checkpoint info from " 1240 "file: '%s'.\n", restore_file); 1241 exit(1); 1242 } 1243 } 1244 1245 if (argc == 1) { 1246 vmname = argv[0]; 1247 } else { 1248 vmname = lookup_vmname(&rstate); 1249 if (vmname == NULL) { 1250 fprintf(stderr, "Cannot find VM name in restore file. " 1251 "Please specify one.\n"); 1252 exit(1); 1253 } 1254 } 1255 #else 1256 if (argc != 1) 1257 usage(1); 1258 1259 vmname = argv[0]; 1260 #endif 1261 ctx = do_open(vmname); 1262 1263 #ifdef BHYVE_SNAPSHOT 1264 if (restore_file != NULL) { 1265 guest_ncpus = lookup_guest_ncpus(&rstate); 1266 memflags = lookup_memflags(&rstate); 1267 memsize = lookup_memsize(&rstate); 1268 } 1269 1270 if (guest_ncpus < 1) { 1271 fprintf(stderr, "Invalid guest vCPUs (%d)\n", guest_ncpus); 1272 exit(1); 1273 } 1274 #endif 1275 1276 max_vcpus = num_vcpus_allowed(ctx); 1277 if (guest_ncpus > max_vcpus) { 1278 fprintf(stderr, "%d vCPUs requested but only %d available\n", 1279 guest_ncpus, max_vcpus); 1280 exit(4); 1281 } 1282 1283 fbsdrun_set_capabilities(ctx, BSP); 1284 1285 vm_set_memflags(ctx, memflags); 1286 err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL); 1287 if (err) { 1288 fprintf(stderr, "Unable to setup memory (%d)\n", errno); 1289 exit(4); 1290 } 1291 1292 error = init_msr(); 1293 if (error) { 1294 fprintf(stderr, "init_msr error %d", error); 1295 exit(4); 1296 } 1297 1298 init_mem(); 1299 init_inout(); 1300 kernemu_dev_init(); 1301 init_bootrom(ctx); 1302 atkbdc_init(ctx); 1303 pci_irq_init(ctx); 1304 ioapic_init(ctx); 1305 1306 rtc_init(ctx, rtc_localtime); 1307 sci_init(ctx); 1308 1309 /* 1310 * Exit if a device emulation finds an error in its initilization 1311 */ 1312 if (init_pci(ctx) != 0) { 1313 perror("device emulation initialization error"); 1314 exit(4); 1315 } 1316 1317 /* 1318 * Initialize after PCI, to allow a bootrom file to reserve the high 1319 * region. 1320 */ 1321 if (acpi) 1322 vmgenc_init(ctx); 1323 1324 if (dbg_port != 0) 1325 init_dbgport(dbg_port); 1326 1327 if (gdb_port != 0) 1328 init_gdb(ctx, gdb_port, gdb_stop); 1329 1330 if (bvmcons) 1331 init_bvmcons(); 1332 1333 if (lpc_bootrom()) { 1334 if (vm_set_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, 1)) { 1335 fprintf(stderr, "ROM boot failed: unrestricted guest " 1336 "capability not available\n"); 1337 exit(4); 1338 } 1339 error = vcpu_reset(ctx, BSP); 1340 assert(error == 0); 1341 } 1342 1343 #ifdef BHYVE_SNAPSHOT 1344 if (restore_file != NULL) { 1345 fprintf(stdout, "Pausing pci devs...\r\n"); 1346 if (vm_pause_user_devs(ctx) != 0) { 1347 fprintf(stderr, "Failed to pause PCI device state.\n"); 1348 exit(1); 1349 } 1350 1351 fprintf(stdout, "Restoring vm mem...\r\n"); 1352 if (restore_vm_mem(ctx, &rstate) != 0) { 1353 fprintf(stderr, "Failed to restore VM memory.\n"); 1354 exit(1); 1355 } 1356 1357 fprintf(stdout, "Restoring pci devs...\r\n"); 1358 if (vm_restore_user_devs(ctx, &rstate) != 0) { 1359 fprintf(stderr, "Failed to restore PCI device state.\n"); 1360 exit(1); 1361 } 1362 1363 fprintf(stdout, "Restoring kernel structs...\r\n"); 1364 if (vm_restore_kern_structs(ctx, &rstate) != 0) { 1365 fprintf(stderr, "Failed to restore kernel structs.\n"); 1366 exit(1); 1367 } 1368 1369 fprintf(stdout, "Resuming pci devs...\r\n"); 1370 if (vm_resume_user_devs(ctx) != 0) { 1371 fprintf(stderr, "Failed to resume PCI device state.\n"); 1372 exit(1); 1373 } 1374 } 1375 #endif 1376 1377 error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip); 1378 assert(error == 0); 1379 1380 /* 1381 * build the guest tables, MP etc. 1382 */ 1383 if (mptgen) { 1384 error = mptable_build(ctx, guest_ncpus); 1385 if (error) { 1386 perror("error to build the guest tables"); 1387 exit(4); 1388 } 1389 } 1390 1391 error = smbios_build(ctx); 1392 assert(error == 0); 1393 1394 if (acpi) { 1395 error = acpi_build(ctx, guest_ncpus); 1396 assert(error == 0); 1397 } 1398 1399 if (lpc_bootrom()) 1400 fwctl_init(); 1401 1402 /* 1403 * Change the proc title to include the VM name. 1404 */ 1405 setproctitle("%s", vmname); 1406 1407 #ifndef WITHOUT_CAPSICUM 1408 caph_cache_catpages(); 1409 1410 if (caph_limit_stdout() == -1 || caph_limit_stderr() == -1) 1411 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1412 1413 if (caph_enter() == -1) 1414 errx(EX_OSERR, "cap_enter() failed"); 1415 #endif 1416 1417 #ifdef BHYVE_SNAPSHOT 1418 if (restore_file != NULL) 1419 destroy_restore_state(&rstate); 1420 1421 /* 1422 * checkpointing thread for communication with bhyvectl 1423 */ 1424 if (init_checkpoint_thread(ctx) < 0) 1425 printf("Failed to start checkpoint thread!\r\n"); 1426 1427 if (restore_file != NULL) 1428 vm_restore_time(ctx); 1429 #endif 1430 1431 /* 1432 * Add CPU 0 1433 */ 1434 fbsdrun_addcpu(ctx, BSP, BSP, rip); 1435 1436 #ifdef BHYVE_SNAPSHOT 1437 /* 1438 * If we restore a VM, start all vCPUs now (including APs), otherwise, 1439 * let the guest OS to spin them up later via vmexits. 1440 */ 1441 if (restore_file != NULL) { 1442 for (vcpu = 0; vcpu < guest_ncpus; vcpu++) { 1443 if (vcpu == BSP) 1444 continue; 1445 1446 fprintf(stdout, "spinning up vcpu no %d...\r\n", vcpu); 1447 spinup_vcpu(ctx, vcpu); 1448 } 1449 } 1450 #endif 1451 1452 /* 1453 * Head off to the main event dispatch loop 1454 */ 1455 mevent_dispatch(); 1456 1457 exit(4); 1458 } 1459