1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/types.h> 35 #ifndef WITHOUT_CAPSICUM 36 #include <sys/capsicum.h> 37 #endif 38 #include <sys/mman.h> 39 #ifdef BHYVE_SNAPSHOT 40 #include <sys/socket.h> 41 #include <sys/stat.h> 42 #endif 43 #include <sys/time.h> 44 #ifdef BHYVE_SNAPSHOT 45 #include <sys/un.h> 46 #endif 47 48 #include <amd64/vmm/intel/vmcs.h> 49 50 #include <machine/atomic.h> 51 #include <machine/segments.h> 52 53 #ifndef WITHOUT_CAPSICUM 54 #include <capsicum_helpers.h> 55 #endif 56 #include <stdio.h> 57 #include <stdlib.h> 58 #include <string.h> 59 #include <err.h> 60 #include <errno.h> 61 #ifdef BHYVE_SNAPSHOT 62 #include <fcntl.h> 63 #endif 64 #include <libgen.h> 65 #include <unistd.h> 66 #include <assert.h> 67 #include <pthread.h> 68 #include <pthread_np.h> 69 #include <sysexits.h> 70 #include <stdbool.h> 71 #include <stdint.h> 72 #ifdef BHYVE_SNAPSHOT 73 #include <ucl.h> 74 #include <unistd.h> 75 76 #include <libxo/xo.h> 77 #endif 78 79 #include <machine/vmm.h> 80 #ifndef WITHOUT_CAPSICUM 81 #include <machine/vmm_dev.h> 82 #endif 83 #include <machine/vmm_instruction_emul.h> 84 #include <vmmapi.h> 85 86 #include "bhyverun.h" 87 #include "acpi.h" 88 #include "atkbdc.h" 89 #include "bootrom.h" 90 #include "config.h" 91 #include "inout.h" 92 #include "debug.h" 93 #include "fwctl.h" 94 #include "gdb.h" 95 #include "ioapic.h" 96 #include "kernemu_dev.h" 97 #include "mem.h" 98 #include "mevent.h" 99 #include "mptbl.h" 100 #include "pci_emul.h" 101 #include "pci_irq.h" 102 #include "pci_lpc.h" 103 #include "smbiostbl.h" 104 #ifdef BHYVE_SNAPSHOT 105 #include "snapshot.h" 106 #endif 107 #include "xmsr.h" 108 #include "spinup_ap.h" 109 #include "rtc.h" 110 #include "vmgenc.h" 111 112 #define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */ 113 114 #define MB (1024UL * 1024) 115 #define GB (1024UL * MB) 116 117 static const char * const vmx_exit_reason_desc[] = { 118 [EXIT_REASON_EXCEPTION] = "Exception or non-maskable interrupt (NMI)", 119 [EXIT_REASON_EXT_INTR] = "External interrupt", 120 [EXIT_REASON_TRIPLE_FAULT] = "Triple fault", 121 [EXIT_REASON_INIT] = "INIT signal", 122 [EXIT_REASON_SIPI] = "Start-up IPI (SIPI)", 123 [EXIT_REASON_IO_SMI] = "I/O system-management interrupt (SMI)", 124 [EXIT_REASON_SMI] = "Other SMI", 125 [EXIT_REASON_INTR_WINDOW] = "Interrupt window", 126 [EXIT_REASON_NMI_WINDOW] = "NMI window", 127 [EXIT_REASON_TASK_SWITCH] = "Task switch", 128 [EXIT_REASON_CPUID] = "CPUID", 129 [EXIT_REASON_GETSEC] = "GETSEC", 130 [EXIT_REASON_HLT] = "HLT", 131 [EXIT_REASON_INVD] = "INVD", 132 [EXIT_REASON_INVLPG] = "INVLPG", 133 [EXIT_REASON_RDPMC] = "RDPMC", 134 [EXIT_REASON_RDTSC] = "RDTSC", 135 [EXIT_REASON_RSM] = "RSM", 136 [EXIT_REASON_VMCALL] = "VMCALL", 137 [EXIT_REASON_VMCLEAR] = "VMCLEAR", 138 [EXIT_REASON_VMLAUNCH] = "VMLAUNCH", 139 [EXIT_REASON_VMPTRLD] = "VMPTRLD", 140 [EXIT_REASON_VMPTRST] = "VMPTRST", 141 [EXIT_REASON_VMREAD] = "VMREAD", 142 [EXIT_REASON_VMRESUME] = "VMRESUME", 143 [EXIT_REASON_VMWRITE] = "VMWRITE", 144 [EXIT_REASON_VMXOFF] = "VMXOFF", 145 [EXIT_REASON_VMXON] = "VMXON", 146 [EXIT_REASON_CR_ACCESS] = "Control-register accesses", 147 [EXIT_REASON_DR_ACCESS] = "MOV DR", 148 [EXIT_REASON_INOUT] = "I/O instruction", 149 [EXIT_REASON_RDMSR] = "RDMSR", 150 [EXIT_REASON_WRMSR] = "WRMSR", 151 [EXIT_REASON_INVAL_VMCS] = 152 "VM-entry failure due to invalid guest state", 153 [EXIT_REASON_INVAL_MSR] = "VM-entry failure due to MSR loading", 154 [EXIT_REASON_MWAIT] = "MWAIT", 155 [EXIT_REASON_MTF] = "Monitor trap flag", 156 [EXIT_REASON_MONITOR] = "MONITOR", 157 [EXIT_REASON_PAUSE] = "PAUSE", 158 [EXIT_REASON_MCE_DURING_ENTRY] = 159 "VM-entry failure due to machine-check event", 160 [EXIT_REASON_TPR] = "TPR below threshold", 161 [EXIT_REASON_APIC_ACCESS] = "APIC access", 162 [EXIT_REASON_VIRTUALIZED_EOI] = "Virtualized EOI", 163 [EXIT_REASON_GDTR_IDTR] = "Access to GDTR or IDTR", 164 [EXIT_REASON_LDTR_TR] = "Access to LDTR or TR", 165 [EXIT_REASON_EPT_FAULT] = "EPT violation", 166 [EXIT_REASON_EPT_MISCONFIG] = "EPT misconfiguration", 167 [EXIT_REASON_INVEPT] = "INVEPT", 168 [EXIT_REASON_RDTSCP] = "RDTSCP", 169 [EXIT_REASON_VMX_PREEMPT] = "VMX-preemption timer expired", 170 [EXIT_REASON_INVVPID] = "INVVPID", 171 [EXIT_REASON_WBINVD] = "WBINVD", 172 [EXIT_REASON_XSETBV] = "XSETBV", 173 [EXIT_REASON_APIC_WRITE] = "APIC write", 174 [EXIT_REASON_RDRAND] = "RDRAND", 175 [EXIT_REASON_INVPCID] = "INVPCID", 176 [EXIT_REASON_VMFUNC] = "VMFUNC", 177 [EXIT_REASON_ENCLS] = "ENCLS", 178 [EXIT_REASON_RDSEED] = "RDSEED", 179 [EXIT_REASON_PM_LOG_FULL] = "Page-modification log full", 180 [EXIT_REASON_XSAVES] = "XSAVES", 181 [EXIT_REASON_XRSTORS] = "XRSTORS" 182 }; 183 184 typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu); 185 extern int vmexit_task_switch(struct vmctx *, struct vm_exit *, int *vcpu); 186 187 int guest_ncpus; 188 uint16_t cores, maxcpus, sockets, threads; 189 190 int raw_stdio = 0; 191 192 static char *progname; 193 static const int BSP = 0; 194 195 static cpuset_t cpumask; 196 197 static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip); 198 199 static struct vm_exit *vmexit; 200 201 struct bhyvestats { 202 uint64_t vmexit_bogus; 203 uint64_t vmexit_reqidle; 204 uint64_t vmexit_hlt; 205 uint64_t vmexit_pause; 206 uint64_t vmexit_mtrap; 207 uint64_t vmexit_inst_emul; 208 uint64_t cpu_switch_rotate; 209 uint64_t cpu_switch_direct; 210 } stats; 211 212 struct mt_vmm_info { 213 pthread_t mt_thr; 214 struct vmctx *mt_ctx; 215 int mt_vcpu; 216 } *mt_vmm_info; 217 218 static cpuset_t **vcpumap; 219 220 static void 221 usage(int code) 222 { 223 224 fprintf(stderr, 225 "Usage: %s [-AaCDeHhPSuWwxY]\n" 226 " %*s [-c [[cpus=]numcpus][,sockets=n][,cores=n][,threads=n]]\n" 227 " %*s [-G port] [-k config_file] [-l lpc] [-m mem] [-o var=value]\n" 228 " %*s [-p vcpu:hostcpu] [-r file] [-s pci] [-U uuid] vmname\n" 229 " -A: create ACPI tables\n" 230 " -a: local apic is in xAPIC mode (deprecated)\n" 231 " -C: include guest memory in core file\n" 232 " -c: number of CPUs and/or topology specification\n" 233 " -D: destroy on power-off\n" 234 " -e: exit on unhandled I/O access\n" 235 " -G: start a debug server\n" 236 " -H: vmexit from the guest on HLT\n" 237 " -h: help\n" 238 " -k: key=value flat config file\n" 239 " -K: PS2 keyboard layout\n" 240 " -l: LPC device configuration\n" 241 " -m: memory size\n" 242 " -o: set config 'var' to 'value'\n" 243 " -P: vmexit from the guest on pause\n" 244 " -p: pin 'vcpu' to 'hostcpu'\n" 245 #ifdef BHYVE_SNAPSHOT 246 " -r: path to checkpoint file\n" 247 #endif 248 " -S: guest memory cannot be swapped\n" 249 " -s: <slot,driver,configinfo> PCI slot config\n" 250 " -U: UUID\n" 251 " -u: RTC keeps UTC time\n" 252 " -W: force virtio to use single-vector MSI\n" 253 " -w: ignore unimplemented MSRs\n" 254 " -x: local APIC is in x2APIC mode\n" 255 " -Y: disable MPtable generation\n", 256 progname, (int)strlen(progname), "", (int)strlen(progname), "", 257 (int)strlen(progname), ""); 258 259 exit(code); 260 } 261 262 /* 263 * XXX This parser is known to have the following issues: 264 * 1. It accepts null key=value tokens ",," as setting "cpus" to an 265 * empty string. 266 * 267 * The acceptance of a null specification ('-c ""') is by design to match the 268 * manual page syntax specification, this results in a topology of 1 vCPU. 269 */ 270 static int 271 topology_parse(const char *opt) 272 { 273 char *cp, *str, *tofree; 274 275 if (*opt == '\0') { 276 set_config_value("sockets", "1"); 277 set_config_value("cores", "1"); 278 set_config_value("threads", "1"); 279 set_config_value("cpus", "1"); 280 return (0); 281 } 282 283 tofree = str = strdup(opt); 284 if (str == NULL) 285 errx(4, "Failed to allocate memory"); 286 287 while ((cp = strsep(&str, ",")) != NULL) { 288 if (strncmp(cp, "cpus=", strlen("cpus=")) == 0) 289 set_config_value("cpus", cp + strlen("cpus=")); 290 else if (strncmp(cp, "sockets=", strlen("sockets=")) == 0) 291 set_config_value("sockets", cp + strlen("sockets=")); 292 else if (strncmp(cp, "cores=", strlen("cores=")) == 0) 293 set_config_value("cores", cp + strlen("cores=")); 294 else if (strncmp(cp, "threads=", strlen("threads=")) == 0) 295 set_config_value("threads", cp + strlen("threads=")); 296 #ifdef notyet /* Do not expose this until vmm.ko implements it */ 297 else if (strncmp(cp, "maxcpus=", strlen("maxcpus=")) == 0) 298 set_config_value("maxcpus", cp + strlen("maxcpus=")); 299 #endif 300 else if (strchr(cp, '=') != NULL) 301 goto out; 302 else 303 set_config_value("cpus", cp); 304 } 305 free(tofree); 306 return (0); 307 308 out: 309 free(tofree); 310 return (-1); 311 } 312 313 static int 314 parse_int_value(const char *key, const char *value, int minval, int maxval) 315 { 316 char *cp; 317 long lval; 318 319 errno = 0; 320 lval = strtol(value, &cp, 0); 321 if (errno != 0 || *cp != '\0' || cp == value || lval < minval || 322 lval > maxval) 323 errx(4, "Invalid value for %s: '%s'", key, value); 324 return (lval); 325 } 326 327 /* 328 * Set the sockets, cores, threads, and guest_cpus variables based on 329 * the configured topology. 330 * 331 * The limits of UINT16_MAX are due to the types passed to 332 * vm_set_topology(). vmm.ko may enforce tighter limits. 333 */ 334 static void 335 calc_topolopgy(void) 336 { 337 const char *value; 338 bool explicit_cpus; 339 uint64_t ncpus; 340 341 value = get_config_value("cpus"); 342 if (value != NULL) { 343 guest_ncpus = parse_int_value("cpus", value, 1, UINT16_MAX); 344 explicit_cpus = true; 345 } else { 346 guest_ncpus = 1; 347 explicit_cpus = false; 348 } 349 value = get_config_value("cores"); 350 if (value != NULL) 351 cores = parse_int_value("cores", value, 1, UINT16_MAX); 352 else 353 cores = 1; 354 value = get_config_value("threads"); 355 if (value != NULL) 356 threads = parse_int_value("threads", value, 1, UINT16_MAX); 357 else 358 threads = 1; 359 value = get_config_value("sockets"); 360 if (value != NULL) 361 sockets = parse_int_value("sockets", value, 1, UINT16_MAX); 362 else 363 sockets = guest_ncpus; 364 365 /* 366 * Compute sockets * cores * threads avoiding overflow. The 367 * range check above insures these are 16 bit values. 368 */ 369 ncpus = (uint64_t)sockets * cores * threads; 370 if (ncpus > UINT16_MAX) 371 errx(4, "Computed number of vCPUs too high: %ju", 372 (uintmax_t)ncpus); 373 374 if (explicit_cpus) { 375 if (guest_ncpus != ncpus) 376 errx(4, "Topology (%d sockets, %d cores, %d threads) " 377 "does not match %d vCPUs", sockets, cores, threads, 378 guest_ncpus); 379 } else 380 guest_ncpus = ncpus; 381 } 382 383 static int 384 pincpu_parse(const char *opt) 385 { 386 const char *value; 387 char *newval; 388 char key[16]; 389 int vcpu, pcpu; 390 391 if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) { 392 fprintf(stderr, "invalid format: %s\n", opt); 393 return (-1); 394 } 395 396 if (vcpu < 0) { 397 fprintf(stderr, "invalid vcpu '%d'\n", vcpu); 398 return (-1); 399 } 400 401 if (pcpu < 0 || pcpu >= CPU_SETSIZE) { 402 fprintf(stderr, "hostcpu '%d' outside valid range from " 403 "0 to %d\n", pcpu, CPU_SETSIZE - 1); 404 return (-1); 405 } 406 407 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 408 value = get_config_value(key); 409 410 if (asprintf(&newval, "%s%s%d", value != NULL ? value : "", 411 value != NULL ? "," : "", pcpu) == -1) { 412 perror("failed to build new cpuset string"); 413 return (-1); 414 } 415 416 set_config_value(key, newval); 417 free(newval); 418 return (0); 419 } 420 421 static void 422 parse_cpuset(int vcpu, const char *list, cpuset_t *set) 423 { 424 char *cp, *token; 425 int pcpu, start; 426 427 CPU_ZERO(set); 428 start = -1; 429 token = __DECONST(char *, list); 430 for (;;) { 431 pcpu = strtoul(token, &cp, 0); 432 if (cp == token) 433 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 434 if (pcpu < 0 || pcpu >= CPU_SETSIZE) 435 errx(4, "hostcpu '%d' outside valid range from 0 to %d", 436 pcpu, CPU_SETSIZE - 1); 437 switch (*cp) { 438 case ',': 439 case '\0': 440 if (start >= 0) { 441 if (start > pcpu) 442 errx(4, "Invalid hostcpu range %d-%d", 443 start, pcpu); 444 while (start < pcpu) { 445 CPU_SET(start, vcpumap[vcpu]); 446 start++; 447 } 448 start = -1; 449 } 450 CPU_SET(pcpu, vcpumap[vcpu]); 451 break; 452 case '-': 453 if (start >= 0) 454 errx(4, "invalid cpuset for vcpu %d: '%s'", 455 vcpu, list); 456 start = pcpu; 457 break; 458 default: 459 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 460 } 461 if (*cp == '\0') 462 break; 463 token = cp + 1; 464 } 465 } 466 467 static void 468 build_vcpumaps(void) 469 { 470 char key[16]; 471 const char *value; 472 int vcpu; 473 474 vcpumap = calloc(guest_ncpus, sizeof(*vcpumap)); 475 for (vcpu = 0; vcpu < guest_ncpus; vcpu++) { 476 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 477 value = get_config_value(key); 478 if (value == NULL) 479 continue; 480 vcpumap[vcpu] = malloc(sizeof(cpuset_t)); 481 if (vcpumap[vcpu] == NULL) 482 err(4, "Failed to allocate cpuset for vcpu %d", vcpu); 483 parse_cpuset(vcpu, value, vcpumap[vcpu]); 484 } 485 } 486 487 void 488 vm_inject_fault(void *arg, int vcpu, int vector, int errcode_valid, 489 int errcode) 490 { 491 struct vmctx *ctx; 492 int error, restart_instruction; 493 494 ctx = arg; 495 restart_instruction = 1; 496 497 error = vm_inject_exception(ctx, vcpu, vector, errcode_valid, errcode, 498 restart_instruction); 499 assert(error == 0); 500 } 501 502 void * 503 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) 504 { 505 506 return (vm_map_gpa(ctx, gaddr, len)); 507 } 508 509 #ifdef BHYVE_SNAPSHOT 510 uintptr_t 511 paddr_host2guest(struct vmctx *ctx, void *addr) 512 { 513 return (vm_rev_map_gpa(ctx, addr)); 514 } 515 #endif 516 517 int 518 fbsdrun_virtio_msix(void) 519 { 520 521 return (get_config_bool_default("virtio_msix", true)); 522 } 523 524 static void * 525 fbsdrun_start_thread(void *param) 526 { 527 char tname[MAXCOMLEN + 1]; 528 struct mt_vmm_info *mtp; 529 int vcpu; 530 531 mtp = param; 532 vcpu = mtp->mt_vcpu; 533 534 snprintf(tname, sizeof(tname), "vcpu %d", vcpu); 535 pthread_set_name_np(mtp->mt_thr, tname); 536 537 #ifdef BHYVE_SNAPSHOT 538 checkpoint_cpu_add(vcpu); 539 #endif 540 gdb_cpu_add(vcpu); 541 542 vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip); 543 544 /* not reached */ 545 exit(1); 546 return (NULL); 547 } 548 549 void 550 fbsdrun_addcpu(struct vmctx *ctx, int fromcpu, int newcpu, uint64_t rip) 551 { 552 int error; 553 554 assert(fromcpu == BSP); 555 556 /* 557 * The 'newcpu' must be activated in the context of 'fromcpu'. If 558 * vm_activate_cpu() is delayed until newcpu's pthread starts running 559 * then vmm.ko is out-of-sync with bhyve and this can create a race 560 * with vm_suspend(). 561 */ 562 error = vm_activate_cpu(ctx, newcpu); 563 if (error != 0) 564 err(EX_OSERR, "could not activate CPU %d", newcpu); 565 566 CPU_SET_ATOMIC(newcpu, &cpumask); 567 568 /* 569 * Set up the vmexit struct to allow execution to start 570 * at the given RIP 571 */ 572 vmexit[newcpu].rip = rip; 573 vmexit[newcpu].inst_length = 0; 574 575 mt_vmm_info[newcpu].mt_ctx = ctx; 576 mt_vmm_info[newcpu].mt_vcpu = newcpu; 577 578 error = pthread_create(&mt_vmm_info[newcpu].mt_thr, NULL, 579 fbsdrun_start_thread, &mt_vmm_info[newcpu]); 580 assert(error == 0); 581 } 582 583 static int 584 fbsdrun_deletecpu(struct vmctx *ctx, int vcpu) 585 { 586 587 if (!CPU_ISSET(vcpu, &cpumask)) { 588 fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu); 589 exit(4); 590 } 591 592 CPU_CLR_ATOMIC(vcpu, &cpumask); 593 return (CPU_EMPTY(&cpumask)); 594 } 595 596 static int 597 vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu, 598 uint32_t eax) 599 { 600 #if BHYVE_DEBUG 601 /* 602 * put guest-driven debug here 603 */ 604 #endif 605 return (VMEXIT_CONTINUE); 606 } 607 608 static int 609 vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 610 { 611 int error; 612 int bytes, port, in, out; 613 int vcpu; 614 615 vcpu = *pvcpu; 616 617 port = vme->u.inout.port; 618 bytes = vme->u.inout.bytes; 619 in = vme->u.inout.in; 620 out = !in; 621 622 /* Extra-special case of host notifications */ 623 if (out && port == GUEST_NIO_PORT) { 624 error = vmexit_handle_notify(ctx, vme, pvcpu, vme->u.inout.eax); 625 return (error); 626 } 627 628 error = emulate_inout(ctx, vcpu, vme); 629 if (error) { 630 fprintf(stderr, "Unhandled %s%c 0x%04x at 0x%lx\n", 631 in ? "in" : "out", 632 bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), 633 port, vmexit->rip); 634 return (VMEXIT_ABORT); 635 } else { 636 return (VMEXIT_CONTINUE); 637 } 638 } 639 640 static int 641 vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 642 { 643 uint64_t val; 644 uint32_t eax, edx; 645 int error; 646 647 val = 0; 648 error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val); 649 if (error != 0) { 650 fprintf(stderr, "rdmsr to register %#x on vcpu %d\n", 651 vme->u.msr.code, *pvcpu); 652 if (get_config_bool("x86.strictmsr")) { 653 vm_inject_gp(ctx, *pvcpu); 654 return (VMEXIT_CONTINUE); 655 } 656 } 657 658 eax = val; 659 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax); 660 assert(error == 0); 661 662 edx = val >> 32; 663 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx); 664 assert(error == 0); 665 666 return (VMEXIT_CONTINUE); 667 } 668 669 static int 670 vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 671 { 672 int error; 673 674 error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval); 675 if (error != 0) { 676 fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n", 677 vme->u.msr.code, vme->u.msr.wval, *pvcpu); 678 if (get_config_bool("x86.strictmsr")) { 679 vm_inject_gp(ctx, *pvcpu); 680 return (VMEXIT_CONTINUE); 681 } 682 } 683 return (VMEXIT_CONTINUE); 684 } 685 686 static int 687 vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 688 { 689 690 (void)spinup_ap(ctx, *pvcpu, 691 vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip); 692 693 return (VMEXIT_CONTINUE); 694 } 695 696 #define DEBUG_EPT_MISCONFIG 697 #ifdef DEBUG_EPT_MISCONFIG 698 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400 699 700 static uint64_t ept_misconfig_gpa, ept_misconfig_pte[4]; 701 static int ept_misconfig_ptenum; 702 #endif 703 704 static const char * 705 vmexit_vmx_desc(uint32_t exit_reason) 706 { 707 708 if (exit_reason >= nitems(vmx_exit_reason_desc) || 709 vmx_exit_reason_desc[exit_reason] == NULL) 710 return ("Unknown"); 711 return (vmx_exit_reason_desc[exit_reason]); 712 } 713 714 static int 715 vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 716 { 717 718 fprintf(stderr, "vm exit[%d]\n", *pvcpu); 719 fprintf(stderr, "\treason\t\tVMX\n"); 720 fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip); 721 fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length); 722 fprintf(stderr, "\tstatus\t\t%d\n", vmexit->u.vmx.status); 723 fprintf(stderr, "\texit_reason\t%u (%s)\n", vmexit->u.vmx.exit_reason, 724 vmexit_vmx_desc(vmexit->u.vmx.exit_reason)); 725 fprintf(stderr, "\tqualification\t0x%016lx\n", 726 vmexit->u.vmx.exit_qualification); 727 fprintf(stderr, "\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type); 728 fprintf(stderr, "\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error); 729 #ifdef DEBUG_EPT_MISCONFIG 730 if (vmexit->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) { 731 vm_get_register(ctx, *pvcpu, 732 VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS), 733 &ept_misconfig_gpa); 734 vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte, 735 &ept_misconfig_ptenum); 736 fprintf(stderr, "\tEPT misconfiguration:\n"); 737 fprintf(stderr, "\t\tGPA: %#lx\n", ept_misconfig_gpa); 738 fprintf(stderr, "\t\tPTE(%d): %#lx %#lx %#lx %#lx\n", 739 ept_misconfig_ptenum, ept_misconfig_pte[0], 740 ept_misconfig_pte[1], ept_misconfig_pte[2], 741 ept_misconfig_pte[3]); 742 } 743 #endif /* DEBUG_EPT_MISCONFIG */ 744 return (VMEXIT_ABORT); 745 } 746 747 static int 748 vmexit_svm(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 749 { 750 751 fprintf(stderr, "vm exit[%d]\n", *pvcpu); 752 fprintf(stderr, "\treason\t\tSVM\n"); 753 fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip); 754 fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length); 755 fprintf(stderr, "\texitcode\t%#lx\n", vmexit->u.svm.exitcode); 756 fprintf(stderr, "\texitinfo1\t%#lx\n", vmexit->u.svm.exitinfo1); 757 fprintf(stderr, "\texitinfo2\t%#lx\n", vmexit->u.svm.exitinfo2); 758 return (VMEXIT_ABORT); 759 } 760 761 static int 762 vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 763 { 764 765 assert(vmexit->inst_length == 0); 766 767 stats.vmexit_bogus++; 768 769 return (VMEXIT_CONTINUE); 770 } 771 772 static int 773 vmexit_reqidle(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 774 { 775 776 assert(vmexit->inst_length == 0); 777 778 stats.vmexit_reqidle++; 779 780 return (VMEXIT_CONTINUE); 781 } 782 783 static int 784 vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 785 { 786 787 stats.vmexit_hlt++; 788 789 /* 790 * Just continue execution with the next instruction. We use 791 * the HLT VM exit as a way to be friendly with the host 792 * scheduler. 793 */ 794 return (VMEXIT_CONTINUE); 795 } 796 797 static int 798 vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 799 { 800 801 stats.vmexit_pause++; 802 803 return (VMEXIT_CONTINUE); 804 } 805 806 static int 807 vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 808 { 809 810 assert(vmexit->inst_length == 0); 811 812 stats.vmexit_mtrap++; 813 814 #ifdef BHYVE_SNAPSHOT 815 checkpoint_cpu_suspend(*pvcpu); 816 #endif 817 gdb_cpu_mtrap(*pvcpu); 818 #ifdef BHYVE_SNAPSHOT 819 checkpoint_cpu_resume(*pvcpu); 820 #endif 821 822 return (VMEXIT_CONTINUE); 823 } 824 825 static int 826 vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 827 { 828 int err, i, cs_d; 829 struct vie *vie; 830 enum vm_cpu_mode mode; 831 832 stats.vmexit_inst_emul++; 833 834 vie = &vmexit->u.inst_emul.vie; 835 if (!vie->decoded) { 836 /* 837 * Attempt to decode in userspace as a fallback. This allows 838 * updating instruction decode in bhyve without rebooting the 839 * kernel (rapid prototyping), albeit with much slower 840 * emulation. 841 */ 842 vie_restart(vie); 843 mode = vmexit->u.inst_emul.paging.cpu_mode; 844 cs_d = vmexit->u.inst_emul.cs_d; 845 if (vmm_decode_instruction(mode, cs_d, vie) != 0) 846 goto fail; 847 if (vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RIP, 848 vmexit->rip + vie->num_processed) != 0) 849 goto fail; 850 } 851 852 err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa, 853 vie, &vmexit->u.inst_emul.paging); 854 855 if (err) { 856 if (err == ESRCH) { 857 EPRINTLN("Unhandled memory access to 0x%lx\n", 858 vmexit->u.inst_emul.gpa); 859 } 860 goto fail; 861 } 862 863 return (VMEXIT_CONTINUE); 864 865 fail: 866 fprintf(stderr, "Failed to emulate instruction sequence [ "); 867 for (i = 0; i < vie->num_valid; i++) 868 fprintf(stderr, "%02x", vie->inst[i]); 869 FPRINTLN(stderr, " ] at 0x%lx", vmexit->rip); 870 return (VMEXIT_ABORT); 871 } 872 873 static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER; 874 static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER; 875 876 static int 877 vmexit_suspend(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 878 { 879 enum vm_suspend_how how; 880 881 how = vmexit->u.suspended.how; 882 883 fbsdrun_deletecpu(ctx, *pvcpu); 884 885 if (*pvcpu != BSP) { 886 pthread_mutex_lock(&resetcpu_mtx); 887 pthread_cond_signal(&resetcpu_cond); 888 pthread_mutex_unlock(&resetcpu_mtx); 889 pthread_exit(NULL); 890 } 891 892 pthread_mutex_lock(&resetcpu_mtx); 893 while (!CPU_EMPTY(&cpumask)) { 894 pthread_cond_wait(&resetcpu_cond, &resetcpu_mtx); 895 } 896 pthread_mutex_unlock(&resetcpu_mtx); 897 898 switch (how) { 899 case VM_SUSPEND_RESET: 900 exit(0); 901 case VM_SUSPEND_POWEROFF: 902 if (get_config_bool_default("destroy_on_poweroff", false)) 903 vm_destroy(ctx); 904 exit(1); 905 case VM_SUSPEND_HALT: 906 exit(2); 907 case VM_SUSPEND_TRIPLEFAULT: 908 exit(3); 909 default: 910 fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how); 911 exit(100); 912 } 913 return (0); /* NOTREACHED */ 914 } 915 916 static int 917 vmexit_debug(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 918 { 919 920 #ifdef BHYVE_SNAPSHOT 921 checkpoint_cpu_suspend(*pvcpu); 922 #endif 923 gdb_cpu_suspend(*pvcpu); 924 #ifdef BHYVE_SNAPSHOT 925 checkpoint_cpu_resume(*pvcpu); 926 #endif 927 return (VMEXIT_CONTINUE); 928 } 929 930 static int 931 vmexit_breakpoint(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 932 { 933 934 gdb_cpu_breakpoint(*pvcpu, vmexit); 935 return (VMEXIT_CONTINUE); 936 } 937 938 static vmexit_handler_t handler[VM_EXITCODE_MAX] = { 939 [VM_EXITCODE_INOUT] = vmexit_inout, 940 [VM_EXITCODE_INOUT_STR] = vmexit_inout, 941 [VM_EXITCODE_VMX] = vmexit_vmx, 942 [VM_EXITCODE_SVM] = vmexit_svm, 943 [VM_EXITCODE_BOGUS] = vmexit_bogus, 944 [VM_EXITCODE_REQIDLE] = vmexit_reqidle, 945 [VM_EXITCODE_RDMSR] = vmexit_rdmsr, 946 [VM_EXITCODE_WRMSR] = vmexit_wrmsr, 947 [VM_EXITCODE_MTRAP] = vmexit_mtrap, 948 [VM_EXITCODE_INST_EMUL] = vmexit_inst_emul, 949 [VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap, 950 [VM_EXITCODE_SUSPENDED] = vmexit_suspend, 951 [VM_EXITCODE_TASK_SWITCH] = vmexit_task_switch, 952 [VM_EXITCODE_DEBUG] = vmexit_debug, 953 [VM_EXITCODE_BPT] = vmexit_breakpoint, 954 }; 955 956 static void 957 vm_loop(struct vmctx *ctx, int vcpu, uint64_t startrip) 958 { 959 int error, rc; 960 enum vm_exitcode exitcode; 961 cpuset_t active_cpus; 962 963 if (vcpumap[vcpu] != NULL) { 964 error = pthread_setaffinity_np(pthread_self(), 965 sizeof(cpuset_t), vcpumap[vcpu]); 966 assert(error == 0); 967 } 968 969 error = vm_active_cpus(ctx, &active_cpus); 970 assert(CPU_ISSET(vcpu, &active_cpus)); 971 972 error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, startrip); 973 assert(error == 0); 974 975 while (1) { 976 error = vm_run(ctx, vcpu, &vmexit[vcpu]); 977 if (error != 0) 978 break; 979 980 exitcode = vmexit[vcpu].exitcode; 981 if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) { 982 fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n", 983 exitcode); 984 exit(4); 985 } 986 987 rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu); 988 989 switch (rc) { 990 case VMEXIT_CONTINUE: 991 break; 992 case VMEXIT_ABORT: 993 abort(); 994 default: 995 exit(4); 996 } 997 } 998 fprintf(stderr, "vm_run error %d, errno %d\n", error, errno); 999 } 1000 1001 static int 1002 num_vcpus_allowed(struct vmctx *ctx) 1003 { 1004 uint16_t sockets, cores, threads, maxcpus; 1005 int tmp, error; 1006 1007 /* 1008 * The guest is allowed to spinup more than one processor only if the 1009 * UNRESTRICTED_GUEST capability is available. 1010 */ 1011 error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp); 1012 if (error != 0) 1013 return (1); 1014 1015 error = vm_get_topology(ctx, &sockets, &cores, &threads, &maxcpus); 1016 if (error == 0) 1017 return (maxcpus); 1018 else 1019 return (1); 1020 } 1021 1022 void 1023 fbsdrun_set_capabilities(struct vmctx *ctx, int cpu) 1024 { 1025 int err, tmp; 1026 1027 if (get_config_bool_default("x86.vmexit_on_hlt", false)) { 1028 err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp); 1029 if (err < 0) { 1030 fprintf(stderr, "VM exit on HLT not supported\n"); 1031 exit(4); 1032 } 1033 vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1); 1034 if (cpu == BSP) 1035 handler[VM_EXITCODE_HLT] = vmexit_hlt; 1036 } 1037 1038 if (get_config_bool_default("x86.vmexit_on_pause", false)) { 1039 /* 1040 * pause exit support required for this mode 1041 */ 1042 err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp); 1043 if (err < 0) { 1044 fprintf(stderr, 1045 "SMP mux requested, no pause support\n"); 1046 exit(4); 1047 } 1048 vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1); 1049 if (cpu == BSP) 1050 handler[VM_EXITCODE_PAUSE] = vmexit_pause; 1051 } 1052 1053 if (get_config_bool_default("x86.x2apic", false)) 1054 err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED); 1055 else 1056 err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED); 1057 1058 if (err) { 1059 fprintf(stderr, "Unable to set x2apic state (%d)\n", err); 1060 exit(4); 1061 } 1062 1063 vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1); 1064 } 1065 1066 static struct vmctx * 1067 do_open(const char *vmname) 1068 { 1069 struct vmctx *ctx; 1070 int error; 1071 bool reinit, romboot; 1072 #ifndef WITHOUT_CAPSICUM 1073 cap_rights_t rights; 1074 const cap_ioctl_t *cmds; 1075 size_t ncmds; 1076 #endif 1077 1078 reinit = romboot = false; 1079 1080 if (lpc_bootrom()) 1081 romboot = true; 1082 1083 error = vm_create(vmname); 1084 if (error) { 1085 if (errno == EEXIST) { 1086 if (romboot) { 1087 reinit = true; 1088 } else { 1089 /* 1090 * The virtual machine has been setup by the 1091 * userspace bootloader. 1092 */ 1093 } 1094 } else { 1095 perror("vm_create"); 1096 exit(4); 1097 } 1098 } else { 1099 if (!romboot) { 1100 /* 1101 * If the virtual machine was just created then a 1102 * bootrom must be configured to boot it. 1103 */ 1104 fprintf(stderr, "virtual machine cannot be booted\n"); 1105 exit(4); 1106 } 1107 } 1108 1109 ctx = vm_open(vmname); 1110 if (ctx == NULL) { 1111 perror("vm_open"); 1112 exit(4); 1113 } 1114 1115 #ifndef WITHOUT_CAPSICUM 1116 cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW); 1117 if (caph_rights_limit(vm_get_device_fd(ctx), &rights) == -1) 1118 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1119 vm_get_ioctls(&ncmds); 1120 cmds = vm_get_ioctls(NULL); 1121 if (cmds == NULL) 1122 errx(EX_OSERR, "out of memory"); 1123 if (caph_ioctls_limit(vm_get_device_fd(ctx), cmds, ncmds) == -1) 1124 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1125 free((cap_ioctl_t *)cmds); 1126 #endif 1127 1128 if (reinit) { 1129 error = vm_reinit(ctx); 1130 if (error) { 1131 perror("vm_reinit"); 1132 exit(4); 1133 } 1134 } 1135 error = vm_set_topology(ctx, sockets, cores, threads, maxcpus); 1136 if (error) 1137 errx(EX_OSERR, "vm_set_topology"); 1138 return (ctx); 1139 } 1140 1141 void 1142 spinup_vcpu(struct vmctx *ctx, int vcpu) 1143 { 1144 int error; 1145 uint64_t rip; 1146 1147 error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RIP, &rip); 1148 assert(error == 0); 1149 1150 fbsdrun_set_capabilities(ctx, vcpu); 1151 error = vm_set_capability(ctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, 1); 1152 assert(error == 0); 1153 1154 fbsdrun_addcpu(ctx, BSP, vcpu, rip); 1155 } 1156 1157 static bool 1158 parse_config_option(const char *option) 1159 { 1160 const char *value; 1161 char *path; 1162 1163 value = strchr(option, '='); 1164 if (value == NULL || value[1] == '\0') 1165 return (false); 1166 path = strndup(option, value - option); 1167 if (path == NULL) 1168 err(4, "Failed to allocate memory"); 1169 set_config_value(path, value + 1); 1170 return (true); 1171 } 1172 1173 static void 1174 parse_simple_config_file(const char *path) 1175 { 1176 FILE *fp; 1177 char *line, *cp; 1178 size_t linecap; 1179 unsigned int lineno; 1180 1181 fp = fopen(path, "r"); 1182 if (fp == NULL) 1183 err(4, "Failed to open configuration file %s", path); 1184 line = NULL; 1185 linecap = 0; 1186 lineno = 1; 1187 for (lineno = 1; getline(&line, &linecap, fp) > 0; lineno++) { 1188 if (*line == '#' || *line == '\n') 1189 continue; 1190 cp = strchr(line, '\n'); 1191 if (cp != NULL) 1192 *cp = '\0'; 1193 if (!parse_config_option(line)) 1194 errx(4, "%s line %u: invalid config option '%s'", path, 1195 lineno, line); 1196 } 1197 free(line); 1198 fclose(fp); 1199 } 1200 1201 static void 1202 parse_gdb_options(char *optarg) 1203 { 1204 const char *sport; 1205 char *colon; 1206 1207 if (optarg[0] == 'w') { 1208 set_config_bool("gdb.wait", true); 1209 optarg++; 1210 } 1211 1212 colon = strrchr(optarg, ':'); 1213 if (colon == NULL) { 1214 sport = optarg; 1215 } else { 1216 *colon = '\0'; 1217 colon++; 1218 sport = colon; 1219 set_config_value("gdb.address", optarg); 1220 } 1221 1222 set_config_value("gdb.port", sport); 1223 } 1224 1225 static void 1226 set_defaults(void) 1227 { 1228 1229 set_config_bool("acpi_tables", false); 1230 set_config_value("memory.size", "256M"); 1231 set_config_bool("x86.strictmsr", true); 1232 } 1233 1234 int 1235 main(int argc, char *argv[]) 1236 { 1237 int c, error, err; 1238 int max_vcpus, memflags; 1239 struct vmctx *ctx; 1240 uint64_t rip; 1241 size_t memsize; 1242 const char *value, *vmname; 1243 char *optstr; 1244 #ifdef BHYVE_SNAPSHOT 1245 char *restore_file; 1246 struct restore_state rstate; 1247 int vcpu; 1248 1249 restore_file = NULL; 1250 #endif 1251 1252 init_config(); 1253 set_defaults(); 1254 progname = basename(argv[0]); 1255 1256 #ifdef BHYVE_SNAPSHOT 1257 optstr = "aehuwxACDHIPSWYk:o:p:G:c:s:m:l:K:U:r:"; 1258 #else 1259 optstr = "aehuwxACDHIPSWYk:o:p:G:c:s:m:l:K:U:"; 1260 #endif 1261 while ((c = getopt(argc, argv, optstr)) != -1) { 1262 switch (c) { 1263 case 'a': 1264 set_config_bool("x86.x2apic", false); 1265 break; 1266 case 'A': 1267 set_config_bool("acpi_tables", true); 1268 break; 1269 case 'D': 1270 set_config_bool("destroy_on_poweroff", true); 1271 break; 1272 case 'p': 1273 if (pincpu_parse(optarg) != 0) { 1274 errx(EX_USAGE, "invalid vcpu pinning " 1275 "configuration '%s'", optarg); 1276 } 1277 break; 1278 case 'c': 1279 if (topology_parse(optarg) != 0) { 1280 errx(EX_USAGE, "invalid cpu topology " 1281 "'%s'", optarg); 1282 } 1283 break; 1284 case 'C': 1285 set_config_bool("memory.guest_in_core", true); 1286 break; 1287 case 'G': 1288 parse_gdb_options(optarg); 1289 break; 1290 case 'k': 1291 parse_simple_config_file(optarg); 1292 break; 1293 case 'K': 1294 set_config_value("keyboard.layout", optarg); 1295 break; 1296 case 'l': 1297 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1298 lpc_print_supported_devices(); 1299 exit(0); 1300 } else if (lpc_device_parse(optarg) != 0) { 1301 errx(EX_USAGE, "invalid lpc device " 1302 "configuration '%s'", optarg); 1303 } 1304 break; 1305 #ifdef BHYVE_SNAPSHOT 1306 case 'r': 1307 restore_file = optarg; 1308 break; 1309 #endif 1310 case 's': 1311 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1312 pci_print_supported_devices(); 1313 exit(0); 1314 } else if (pci_parse_slot(optarg) != 0) 1315 exit(4); 1316 else 1317 break; 1318 case 'S': 1319 set_config_bool("memory.wired", true); 1320 break; 1321 case 'm': 1322 set_config_value("memory.size", optarg); 1323 break; 1324 case 'o': 1325 if (!parse_config_option(optarg)) 1326 errx(EX_USAGE, "invalid configuration option '%s'", optarg); 1327 break; 1328 case 'H': 1329 set_config_bool("x86.vmexit_on_hlt", true); 1330 break; 1331 case 'I': 1332 /* 1333 * The "-I" option was used to add an ioapic to the 1334 * virtual machine. 1335 * 1336 * An ioapic is now provided unconditionally for each 1337 * virtual machine and this option is now deprecated. 1338 */ 1339 break; 1340 case 'P': 1341 set_config_bool("x86.vmexit_on_pause", true); 1342 break; 1343 case 'e': 1344 set_config_bool("x86.strictio", true); 1345 break; 1346 case 'u': 1347 set_config_bool("rtc.use_localtime", false); 1348 break; 1349 case 'U': 1350 set_config_value("uuid", optarg); 1351 break; 1352 case 'w': 1353 set_config_bool("x86.strictmsr", false); 1354 break; 1355 case 'W': 1356 set_config_bool("virtio_msix", false); 1357 break; 1358 case 'x': 1359 set_config_bool("x86.x2apic", true); 1360 break; 1361 case 'Y': 1362 set_config_bool("x86.mptable", false); 1363 break; 1364 case 'h': 1365 usage(0); 1366 default: 1367 usage(1); 1368 } 1369 } 1370 argc -= optind; 1371 argv += optind; 1372 1373 if (argc > 1) 1374 usage(1); 1375 1376 #ifdef BHYVE_SNAPSHOT 1377 if (restore_file != NULL) { 1378 error = load_restore_file(restore_file, &rstate); 1379 if (error) { 1380 fprintf(stderr, "Failed to read checkpoint info from " 1381 "file: '%s'.\n", restore_file); 1382 exit(1); 1383 } 1384 vmname = lookup_vmname(&rstate); 1385 if (vmname != NULL) 1386 set_config_value("name", vmname); 1387 } 1388 #endif 1389 1390 if (argc == 1) 1391 set_config_value("name", argv[0]); 1392 1393 vmname = get_config_value("name"); 1394 if (vmname == NULL) 1395 usage(1); 1396 1397 if (get_config_bool_default("config.dump", false)) { 1398 dump_config(); 1399 exit(1); 1400 } 1401 1402 calc_topolopgy(); 1403 build_vcpumaps(); 1404 1405 value = get_config_value("memory.size"); 1406 error = vm_parse_memsize(value, &memsize); 1407 if (error) 1408 errx(EX_USAGE, "invalid memsize '%s'", value); 1409 1410 ctx = do_open(vmname); 1411 1412 #ifdef BHYVE_SNAPSHOT 1413 if (restore_file != NULL) { 1414 guest_ncpus = lookup_guest_ncpus(&rstate); 1415 memflags = lookup_memflags(&rstate); 1416 memsize = lookup_memsize(&rstate); 1417 } 1418 1419 if (guest_ncpus < 1) { 1420 fprintf(stderr, "Invalid guest vCPUs (%d)\n", guest_ncpus); 1421 exit(1); 1422 } 1423 #endif 1424 1425 max_vcpus = num_vcpus_allowed(ctx); 1426 if (guest_ncpus > max_vcpus) { 1427 fprintf(stderr, "%d vCPUs requested but only %d available\n", 1428 guest_ncpus, max_vcpus); 1429 exit(4); 1430 } 1431 1432 fbsdrun_set_capabilities(ctx, BSP); 1433 1434 memflags = 0; 1435 if (get_config_bool_default("memory.wired", false)) 1436 memflags |= VM_MEM_F_WIRED; 1437 if (get_config_bool_default("memory.guest_in_core", false)) 1438 memflags |= VM_MEM_F_INCORE; 1439 vm_set_memflags(ctx, memflags); 1440 err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL); 1441 if (err) { 1442 fprintf(stderr, "Unable to setup memory (%d)\n", errno); 1443 exit(4); 1444 } 1445 1446 error = init_msr(); 1447 if (error) { 1448 fprintf(stderr, "init_msr error %d", error); 1449 exit(4); 1450 } 1451 1452 init_mem(guest_ncpus); 1453 init_inout(); 1454 kernemu_dev_init(); 1455 init_bootrom(ctx); 1456 atkbdc_init(ctx); 1457 pci_irq_init(ctx); 1458 ioapic_init(ctx); 1459 1460 rtc_init(ctx); 1461 sci_init(ctx); 1462 1463 /* 1464 * Exit if a device emulation finds an error in its initilization 1465 */ 1466 if (init_pci(ctx) != 0) { 1467 perror("device emulation initialization error"); 1468 exit(4); 1469 } 1470 1471 /* 1472 * Initialize after PCI, to allow a bootrom file to reserve the high 1473 * region. 1474 */ 1475 if (get_config_bool("acpi_tables")) 1476 vmgenc_init(ctx); 1477 1478 init_gdb(ctx); 1479 1480 if (lpc_bootrom()) { 1481 if (vm_set_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, 1)) { 1482 fprintf(stderr, "ROM boot failed: unrestricted guest " 1483 "capability not available\n"); 1484 exit(4); 1485 } 1486 error = vcpu_reset(ctx, BSP); 1487 assert(error == 0); 1488 } 1489 1490 #ifdef BHYVE_SNAPSHOT 1491 if (restore_file != NULL) { 1492 fprintf(stdout, "Pausing pci devs...\r\n"); 1493 if (vm_pause_user_devs(ctx) != 0) { 1494 fprintf(stderr, "Failed to pause PCI device state.\n"); 1495 exit(1); 1496 } 1497 1498 fprintf(stdout, "Restoring vm mem...\r\n"); 1499 if (restore_vm_mem(ctx, &rstate) != 0) { 1500 fprintf(stderr, "Failed to restore VM memory.\n"); 1501 exit(1); 1502 } 1503 1504 fprintf(stdout, "Restoring pci devs...\r\n"); 1505 if (vm_restore_user_devs(ctx, &rstate) != 0) { 1506 fprintf(stderr, "Failed to restore PCI device state.\n"); 1507 exit(1); 1508 } 1509 1510 fprintf(stdout, "Restoring kernel structs...\r\n"); 1511 if (vm_restore_kern_structs(ctx, &rstate) != 0) { 1512 fprintf(stderr, "Failed to restore kernel structs.\n"); 1513 exit(1); 1514 } 1515 1516 fprintf(stdout, "Resuming pci devs...\r\n"); 1517 if (vm_resume_user_devs(ctx) != 0) { 1518 fprintf(stderr, "Failed to resume PCI device state.\n"); 1519 exit(1); 1520 } 1521 } 1522 #endif 1523 1524 error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip); 1525 assert(error == 0); 1526 1527 /* 1528 * build the guest tables, MP etc. 1529 */ 1530 if (get_config_bool_default("x86.mptable", true)) { 1531 error = mptable_build(ctx, guest_ncpus); 1532 if (error) { 1533 perror("error to build the guest tables"); 1534 exit(4); 1535 } 1536 } 1537 1538 error = smbios_build(ctx); 1539 if (error != 0) 1540 exit(4); 1541 1542 if (get_config_bool("acpi_tables")) { 1543 error = acpi_build(ctx, guest_ncpus); 1544 assert(error == 0); 1545 } 1546 1547 if (lpc_bootrom()) 1548 fwctl_init(); 1549 1550 /* 1551 * Change the proc title to include the VM name. 1552 */ 1553 setproctitle("%s", vmname); 1554 1555 #ifndef WITHOUT_CAPSICUM 1556 caph_cache_catpages(); 1557 1558 if (caph_limit_stdout() == -1 || caph_limit_stderr() == -1) 1559 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1560 1561 if (caph_enter() == -1) 1562 errx(EX_OSERR, "cap_enter() failed"); 1563 #endif 1564 1565 #ifdef BHYVE_SNAPSHOT 1566 if (restore_file != NULL) 1567 destroy_restore_state(&rstate); 1568 1569 /* initialize mutex/cond variables */ 1570 init_snapshot(); 1571 1572 /* 1573 * checkpointing thread for communication with bhyvectl 1574 */ 1575 if (init_checkpoint_thread(ctx) < 0) 1576 printf("Failed to start checkpoint thread!\r\n"); 1577 1578 if (restore_file != NULL) 1579 vm_restore_time(ctx); 1580 #endif 1581 1582 /* Allocate per-VCPU resources. */ 1583 vmexit = calloc(guest_ncpus, sizeof(*vmexit)); 1584 mt_vmm_info = calloc(guest_ncpus, sizeof(*mt_vmm_info)); 1585 1586 /* 1587 * Add CPU 0 1588 */ 1589 fbsdrun_addcpu(ctx, BSP, BSP, rip); 1590 1591 #ifdef BHYVE_SNAPSHOT 1592 /* 1593 * If we restore a VM, start all vCPUs now (including APs), otherwise, 1594 * let the guest OS to spin them up later via vmexits. 1595 */ 1596 if (restore_file != NULL) { 1597 for (vcpu = 0; vcpu < guest_ncpus; vcpu++) { 1598 if (vcpu == BSP) 1599 continue; 1600 1601 fprintf(stdout, "spinning up vcpu no %d...\r\n", vcpu); 1602 spinup_vcpu(ctx, vcpu); 1603 } 1604 } 1605 #endif 1606 1607 /* 1608 * Head off to the main event dispatch loop 1609 */ 1610 mevent_dispatch(); 1611 1612 exit(4); 1613 } 1614