1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/types.h> 35 #ifndef WITHOUT_CAPSICUM 36 #include <sys/capsicum.h> 37 #endif 38 #include <sys/mman.h> 39 #ifdef BHYVE_SNAPSHOT 40 #include <sys/socket.h> 41 #include <sys/stat.h> 42 #endif 43 #include <sys/time.h> 44 #ifdef BHYVE_SNAPSHOT 45 #include <sys/un.h> 46 #endif 47 48 #include <amd64/vmm/intel/vmcs.h> 49 #include <x86/apicreg.h> 50 51 #include <machine/atomic.h> 52 #include <machine/segments.h> 53 54 #ifndef WITHOUT_CAPSICUM 55 #include <capsicum_helpers.h> 56 #endif 57 #include <stdio.h> 58 #include <stdlib.h> 59 #include <string.h> 60 #include <err.h> 61 #include <errno.h> 62 #ifdef BHYVE_SNAPSHOT 63 #include <fcntl.h> 64 #endif 65 #include <libgen.h> 66 #include <unistd.h> 67 #include <assert.h> 68 #include <pthread.h> 69 #include <pthread_np.h> 70 #include <sysexits.h> 71 #include <stdbool.h> 72 #include <stdint.h> 73 #ifdef BHYVE_SNAPSHOT 74 #include <ucl.h> 75 #include <unistd.h> 76 77 #include <libxo/xo.h> 78 #endif 79 80 #include <machine/vmm.h> 81 #ifndef WITHOUT_CAPSICUM 82 #include <machine/vmm_dev.h> 83 #endif 84 #include <machine/vmm_instruction_emul.h> 85 #include <vmmapi.h> 86 87 #include "bhyverun.h" 88 #include "acpi.h" 89 #include "atkbdc.h" 90 #include "bootrom.h" 91 #include "config.h" 92 #include "inout.h" 93 #include "debug.h" 94 #include "fwctl.h" 95 #include "gdb.h" 96 #include "ioapic.h" 97 #include "kernemu_dev.h" 98 #include "mem.h" 99 #include "mevent.h" 100 #include "mptbl.h" 101 #include "pci_emul.h" 102 #include "pci_irq.h" 103 #include "pci_lpc.h" 104 #include "smbiostbl.h" 105 #ifdef BHYVE_SNAPSHOT 106 #include "snapshot.h" 107 #endif 108 #include "xmsr.h" 109 #include "spinup_ap.h" 110 #include "rtc.h" 111 #include "vmgenc.h" 112 113 #define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */ 114 115 #define MB (1024UL * 1024) 116 #define GB (1024UL * MB) 117 118 static const char * const vmx_exit_reason_desc[] = { 119 [EXIT_REASON_EXCEPTION] = "Exception or non-maskable interrupt (NMI)", 120 [EXIT_REASON_EXT_INTR] = "External interrupt", 121 [EXIT_REASON_TRIPLE_FAULT] = "Triple fault", 122 [EXIT_REASON_INIT] = "INIT signal", 123 [EXIT_REASON_SIPI] = "Start-up IPI (SIPI)", 124 [EXIT_REASON_IO_SMI] = "I/O system-management interrupt (SMI)", 125 [EXIT_REASON_SMI] = "Other SMI", 126 [EXIT_REASON_INTR_WINDOW] = "Interrupt window", 127 [EXIT_REASON_NMI_WINDOW] = "NMI window", 128 [EXIT_REASON_TASK_SWITCH] = "Task switch", 129 [EXIT_REASON_CPUID] = "CPUID", 130 [EXIT_REASON_GETSEC] = "GETSEC", 131 [EXIT_REASON_HLT] = "HLT", 132 [EXIT_REASON_INVD] = "INVD", 133 [EXIT_REASON_INVLPG] = "INVLPG", 134 [EXIT_REASON_RDPMC] = "RDPMC", 135 [EXIT_REASON_RDTSC] = "RDTSC", 136 [EXIT_REASON_RSM] = "RSM", 137 [EXIT_REASON_VMCALL] = "VMCALL", 138 [EXIT_REASON_VMCLEAR] = "VMCLEAR", 139 [EXIT_REASON_VMLAUNCH] = "VMLAUNCH", 140 [EXIT_REASON_VMPTRLD] = "VMPTRLD", 141 [EXIT_REASON_VMPTRST] = "VMPTRST", 142 [EXIT_REASON_VMREAD] = "VMREAD", 143 [EXIT_REASON_VMRESUME] = "VMRESUME", 144 [EXIT_REASON_VMWRITE] = "VMWRITE", 145 [EXIT_REASON_VMXOFF] = "VMXOFF", 146 [EXIT_REASON_VMXON] = "VMXON", 147 [EXIT_REASON_CR_ACCESS] = "Control-register accesses", 148 [EXIT_REASON_DR_ACCESS] = "MOV DR", 149 [EXIT_REASON_INOUT] = "I/O instruction", 150 [EXIT_REASON_RDMSR] = "RDMSR", 151 [EXIT_REASON_WRMSR] = "WRMSR", 152 [EXIT_REASON_INVAL_VMCS] = 153 "VM-entry failure due to invalid guest state", 154 [EXIT_REASON_INVAL_MSR] = "VM-entry failure due to MSR loading", 155 [EXIT_REASON_MWAIT] = "MWAIT", 156 [EXIT_REASON_MTF] = "Monitor trap flag", 157 [EXIT_REASON_MONITOR] = "MONITOR", 158 [EXIT_REASON_PAUSE] = "PAUSE", 159 [EXIT_REASON_MCE_DURING_ENTRY] = 160 "VM-entry failure due to machine-check event", 161 [EXIT_REASON_TPR] = "TPR below threshold", 162 [EXIT_REASON_APIC_ACCESS] = "APIC access", 163 [EXIT_REASON_VIRTUALIZED_EOI] = "Virtualized EOI", 164 [EXIT_REASON_GDTR_IDTR] = "Access to GDTR or IDTR", 165 [EXIT_REASON_LDTR_TR] = "Access to LDTR or TR", 166 [EXIT_REASON_EPT_FAULT] = "EPT violation", 167 [EXIT_REASON_EPT_MISCONFIG] = "EPT misconfiguration", 168 [EXIT_REASON_INVEPT] = "INVEPT", 169 [EXIT_REASON_RDTSCP] = "RDTSCP", 170 [EXIT_REASON_VMX_PREEMPT] = "VMX-preemption timer expired", 171 [EXIT_REASON_INVVPID] = "INVVPID", 172 [EXIT_REASON_WBINVD] = "WBINVD", 173 [EXIT_REASON_XSETBV] = "XSETBV", 174 [EXIT_REASON_APIC_WRITE] = "APIC write", 175 [EXIT_REASON_RDRAND] = "RDRAND", 176 [EXIT_REASON_INVPCID] = "INVPCID", 177 [EXIT_REASON_VMFUNC] = "VMFUNC", 178 [EXIT_REASON_ENCLS] = "ENCLS", 179 [EXIT_REASON_RDSEED] = "RDSEED", 180 [EXIT_REASON_PM_LOG_FULL] = "Page-modification log full", 181 [EXIT_REASON_XSAVES] = "XSAVES", 182 [EXIT_REASON_XRSTORS] = "XRSTORS" 183 }; 184 185 typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu); 186 187 int guest_ncpus; 188 uint16_t cpu_cores, cpu_sockets, cpu_threads; 189 190 int raw_stdio = 0; 191 192 static char *progname; 193 static const int BSP = 0; 194 195 static cpuset_t cpumask; 196 197 static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip); 198 199 static struct vm_exit *vmexit; 200 201 static struct bhyvestats { 202 uint64_t vmexit_bogus; 203 uint64_t vmexit_reqidle; 204 uint64_t vmexit_hlt; 205 uint64_t vmexit_pause; 206 uint64_t vmexit_mtrap; 207 uint64_t vmexit_inst_emul; 208 uint64_t cpu_switch_rotate; 209 uint64_t cpu_switch_direct; 210 } stats; 211 212 static struct mt_vmm_info { 213 pthread_t mt_thr; 214 struct vmctx *mt_ctx; 215 int mt_vcpu; 216 } *mt_vmm_info; 217 218 static cpuset_t **vcpumap; 219 220 static void 221 usage(int code) 222 { 223 224 fprintf(stderr, 225 "Usage: %s [-AaCDeHhPSuWwxY]\n" 226 " %*s [-c [[cpus=]numcpus][,sockets=n][,cores=n][,threads=n]]\n" 227 " %*s [-G port] [-k config_file] [-l lpc] [-m mem] [-o var=value]\n" 228 " %*s [-p vcpu:hostcpu] [-r file] [-s pci] [-U uuid] vmname\n" 229 " -A: create ACPI tables\n" 230 " -a: local apic is in xAPIC mode (deprecated)\n" 231 " -C: include guest memory in core file\n" 232 " -c: number of CPUs and/or topology specification\n" 233 " -D: destroy on power-off\n" 234 " -e: exit on unhandled I/O access\n" 235 " -G: start a debug server\n" 236 " -H: vmexit from the guest on HLT\n" 237 " -h: help\n" 238 " -k: key=value flat config file\n" 239 " -K: PS2 keyboard layout\n" 240 " -l: LPC device configuration\n" 241 " -m: memory size\n" 242 " -o: set config 'var' to 'value'\n" 243 " -P: vmexit from the guest on pause\n" 244 " -p: pin 'vcpu' to 'hostcpu'\n" 245 #ifdef BHYVE_SNAPSHOT 246 " -r: path to checkpoint file\n" 247 #endif 248 " -S: guest memory cannot be swapped\n" 249 " -s: <slot,driver,configinfo> PCI slot config\n" 250 " -U: UUID\n" 251 " -u: RTC keeps UTC time\n" 252 " -W: force virtio to use single-vector MSI\n" 253 " -w: ignore unimplemented MSRs\n" 254 " -x: local APIC is in x2APIC mode\n" 255 " -Y: disable MPtable generation\n", 256 progname, (int)strlen(progname), "", (int)strlen(progname), "", 257 (int)strlen(progname), ""); 258 259 exit(code); 260 } 261 262 /* 263 * XXX This parser is known to have the following issues: 264 * 1. It accepts null key=value tokens ",," as setting "cpus" to an 265 * empty string. 266 * 267 * The acceptance of a null specification ('-c ""') is by design to match the 268 * manual page syntax specification, this results in a topology of 1 vCPU. 269 */ 270 static int 271 topology_parse(const char *opt) 272 { 273 char *cp, *str, *tofree; 274 275 if (*opt == '\0') { 276 set_config_value("sockets", "1"); 277 set_config_value("cores", "1"); 278 set_config_value("threads", "1"); 279 set_config_value("cpus", "1"); 280 return (0); 281 } 282 283 tofree = str = strdup(opt); 284 if (str == NULL) 285 errx(4, "Failed to allocate memory"); 286 287 while ((cp = strsep(&str, ",")) != NULL) { 288 if (strncmp(cp, "cpus=", strlen("cpus=")) == 0) 289 set_config_value("cpus", cp + strlen("cpus=")); 290 else if (strncmp(cp, "sockets=", strlen("sockets=")) == 0) 291 set_config_value("sockets", cp + strlen("sockets=")); 292 else if (strncmp(cp, "cores=", strlen("cores=")) == 0) 293 set_config_value("cores", cp + strlen("cores=")); 294 else if (strncmp(cp, "threads=", strlen("threads=")) == 0) 295 set_config_value("threads", cp + strlen("threads=")); 296 #ifdef notyet /* Do not expose this until vmm.ko implements it */ 297 else if (strncmp(cp, "maxcpus=", strlen("maxcpus=")) == 0) 298 set_config_value("maxcpus", cp + strlen("maxcpus=")); 299 #endif 300 else if (strchr(cp, '=') != NULL) 301 goto out; 302 else 303 set_config_value("cpus", cp); 304 } 305 free(tofree); 306 return (0); 307 308 out: 309 free(tofree); 310 return (-1); 311 } 312 313 static int 314 parse_int_value(const char *key, const char *value, int minval, int maxval) 315 { 316 char *cp; 317 long lval; 318 319 errno = 0; 320 lval = strtol(value, &cp, 0); 321 if (errno != 0 || *cp != '\0' || cp == value || lval < minval || 322 lval > maxval) 323 errx(4, "Invalid value for %s: '%s'", key, value); 324 return (lval); 325 } 326 327 /* 328 * Set the sockets, cores, threads, and guest_cpus variables based on 329 * the configured topology. 330 * 331 * The limits of UINT16_MAX are due to the types passed to 332 * vm_set_topology(). vmm.ko may enforce tighter limits. 333 */ 334 static void 335 calc_topology(void) 336 { 337 const char *value; 338 bool explicit_cpus; 339 uint64_t ncpus; 340 341 value = get_config_value("cpus"); 342 if (value != NULL) { 343 guest_ncpus = parse_int_value("cpus", value, 1, UINT16_MAX); 344 explicit_cpus = true; 345 } else { 346 guest_ncpus = 1; 347 explicit_cpus = false; 348 } 349 value = get_config_value("cores"); 350 if (value != NULL) 351 cpu_cores = parse_int_value("cores", value, 1, UINT16_MAX); 352 else 353 cpu_cores = 1; 354 value = get_config_value("threads"); 355 if (value != NULL) 356 cpu_threads = parse_int_value("threads", value, 1, UINT16_MAX); 357 else 358 cpu_threads = 1; 359 value = get_config_value("sockets"); 360 if (value != NULL) 361 cpu_sockets = parse_int_value("sockets", value, 1, UINT16_MAX); 362 else 363 cpu_sockets = guest_ncpus; 364 365 /* 366 * Compute sockets * cores * threads avoiding overflow. The 367 * range check above insures these are 16 bit values. 368 */ 369 ncpus = (uint64_t)cpu_sockets * cpu_cores * cpu_threads; 370 if (ncpus > UINT16_MAX) 371 errx(4, "Computed number of vCPUs too high: %ju", 372 (uintmax_t)ncpus); 373 374 if (explicit_cpus) { 375 if (guest_ncpus != (int)ncpus) 376 errx(4, "Topology (%d sockets, %d cores, %d threads) " 377 "does not match %d vCPUs", 378 cpu_sockets, cpu_cores, cpu_threads, 379 guest_ncpus); 380 } else 381 guest_ncpus = ncpus; 382 } 383 384 static int 385 pincpu_parse(const char *opt) 386 { 387 const char *value; 388 char *newval; 389 char key[16]; 390 int vcpu, pcpu; 391 392 if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) { 393 fprintf(stderr, "invalid format: %s\n", opt); 394 return (-1); 395 } 396 397 if (vcpu < 0) { 398 fprintf(stderr, "invalid vcpu '%d'\n", vcpu); 399 return (-1); 400 } 401 402 if (pcpu < 0 || pcpu >= CPU_SETSIZE) { 403 fprintf(stderr, "hostcpu '%d' outside valid range from " 404 "0 to %d\n", pcpu, CPU_SETSIZE - 1); 405 return (-1); 406 } 407 408 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 409 value = get_config_value(key); 410 411 if (asprintf(&newval, "%s%s%d", value != NULL ? value : "", 412 value != NULL ? "," : "", pcpu) == -1) { 413 perror("failed to build new cpuset string"); 414 return (-1); 415 } 416 417 set_config_value(key, newval); 418 free(newval); 419 return (0); 420 } 421 422 static void 423 parse_cpuset(int vcpu, const char *list, cpuset_t *set) 424 { 425 char *cp, *token; 426 int pcpu, start; 427 428 CPU_ZERO(set); 429 start = -1; 430 token = __DECONST(char *, list); 431 for (;;) { 432 pcpu = strtoul(token, &cp, 0); 433 if (cp == token) 434 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 435 if (pcpu < 0 || pcpu >= CPU_SETSIZE) 436 errx(4, "hostcpu '%d' outside valid range from 0 to %d", 437 pcpu, CPU_SETSIZE - 1); 438 switch (*cp) { 439 case ',': 440 case '\0': 441 if (start >= 0) { 442 if (start > pcpu) 443 errx(4, "Invalid hostcpu range %d-%d", 444 start, pcpu); 445 while (start < pcpu) { 446 CPU_SET(start, vcpumap[vcpu]); 447 start++; 448 } 449 start = -1; 450 } 451 CPU_SET(pcpu, vcpumap[vcpu]); 452 break; 453 case '-': 454 if (start >= 0) 455 errx(4, "invalid cpuset for vcpu %d: '%s'", 456 vcpu, list); 457 start = pcpu; 458 break; 459 default: 460 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 461 } 462 if (*cp == '\0') 463 break; 464 token = cp + 1; 465 } 466 } 467 468 static void 469 build_vcpumaps(void) 470 { 471 char key[16]; 472 const char *value; 473 int vcpu; 474 475 vcpumap = calloc(guest_ncpus, sizeof(*vcpumap)); 476 for (vcpu = 0; vcpu < guest_ncpus; vcpu++) { 477 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 478 value = get_config_value(key); 479 if (value == NULL) 480 continue; 481 vcpumap[vcpu] = malloc(sizeof(cpuset_t)); 482 if (vcpumap[vcpu] == NULL) 483 err(4, "Failed to allocate cpuset for vcpu %d", vcpu); 484 parse_cpuset(vcpu, value, vcpumap[vcpu]); 485 } 486 } 487 488 void 489 vm_inject_fault(void *arg, int vcpu, int vector, int errcode_valid, 490 int errcode) 491 { 492 struct vmctx *ctx; 493 int error, restart_instruction; 494 495 ctx = arg; 496 restart_instruction = 1; 497 498 error = vm_inject_exception(ctx, vcpu, vector, errcode_valid, errcode, 499 restart_instruction); 500 assert(error == 0); 501 } 502 503 void * 504 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) 505 { 506 507 return (vm_map_gpa(ctx, gaddr, len)); 508 } 509 510 #ifdef BHYVE_SNAPSHOT 511 uintptr_t 512 paddr_host2guest(struct vmctx *ctx, void *addr) 513 { 514 return (vm_rev_map_gpa(ctx, addr)); 515 } 516 #endif 517 518 int 519 fbsdrun_virtio_msix(void) 520 { 521 522 return (get_config_bool_default("virtio_msix", true)); 523 } 524 525 static void * 526 fbsdrun_start_thread(void *param) 527 { 528 char tname[MAXCOMLEN + 1]; 529 struct mt_vmm_info *mtp; 530 int vcpu; 531 532 mtp = param; 533 vcpu = mtp->mt_vcpu; 534 535 snprintf(tname, sizeof(tname), "vcpu %d", vcpu); 536 pthread_set_name_np(mtp->mt_thr, tname); 537 538 #ifdef BHYVE_SNAPSHOT 539 checkpoint_cpu_add(vcpu); 540 #endif 541 gdb_cpu_add(vcpu); 542 543 vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip); 544 545 /* not reached */ 546 exit(1); 547 return (NULL); 548 } 549 550 static void 551 fbsdrun_addcpu(struct vmctx *ctx, int newcpu, uint64_t rip, bool suspend) 552 { 553 int error; 554 555 /* 556 * The 'newcpu' must be activated in the context of 'fromcpu'. If 557 * vm_activate_cpu() is delayed until newcpu's pthread starts running 558 * then vmm.ko is out-of-sync with bhyve and this can create a race 559 * with vm_suspend(). 560 */ 561 error = vm_activate_cpu(ctx, newcpu); 562 if (error != 0) 563 err(EX_OSERR, "could not activate CPU %d", newcpu); 564 565 CPU_SET_ATOMIC(newcpu, &cpumask); 566 567 if (suspend) 568 vm_suspend_cpu(ctx, newcpu); 569 570 /* 571 * Set up the vmexit struct to allow execution to start 572 * at the given RIP 573 */ 574 vmexit[newcpu].rip = rip; 575 vmexit[newcpu].inst_length = 0; 576 577 mt_vmm_info[newcpu].mt_ctx = ctx; 578 mt_vmm_info[newcpu].mt_vcpu = newcpu; 579 580 error = pthread_create(&mt_vmm_info[newcpu].mt_thr, NULL, 581 fbsdrun_start_thread, &mt_vmm_info[newcpu]); 582 assert(error == 0); 583 } 584 585 static int 586 fbsdrun_deletecpu(int vcpu) 587 { 588 589 if (!CPU_ISSET(vcpu, &cpumask)) { 590 fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu); 591 exit(4); 592 } 593 594 CPU_CLR_ATOMIC(vcpu, &cpumask); 595 return (CPU_EMPTY(&cpumask)); 596 } 597 598 static int 599 vmexit_handle_notify(struct vmctx *ctx __unused, struct vm_exit *vme __unused, 600 int *pvcpu __unused, uint32_t eax __unused) 601 { 602 #if BHYVE_DEBUG 603 /* 604 * put guest-driven debug here 605 */ 606 #endif 607 return (VMEXIT_CONTINUE); 608 } 609 610 static int 611 vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 612 { 613 int error; 614 int bytes, port, in, out; 615 int vcpu; 616 617 vcpu = *pvcpu; 618 619 port = vme->u.inout.port; 620 bytes = vme->u.inout.bytes; 621 in = vme->u.inout.in; 622 out = !in; 623 624 /* Extra-special case of host notifications */ 625 if (out && port == GUEST_NIO_PORT) { 626 error = vmexit_handle_notify(ctx, vme, pvcpu, vme->u.inout.eax); 627 return (error); 628 } 629 630 error = emulate_inout(ctx, vcpu, vme); 631 if (error) { 632 fprintf(stderr, "Unhandled %s%c 0x%04x at 0x%lx\n", 633 in ? "in" : "out", 634 bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), 635 port, vmexit->rip); 636 return (VMEXIT_ABORT); 637 } else { 638 return (VMEXIT_CONTINUE); 639 } 640 } 641 642 static int 643 vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 644 { 645 uint64_t val; 646 uint32_t eax, edx; 647 int error; 648 649 val = 0; 650 error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val); 651 if (error != 0) { 652 fprintf(stderr, "rdmsr to register %#x on vcpu %d\n", 653 vme->u.msr.code, *pvcpu); 654 if (get_config_bool("x86.strictmsr")) { 655 vm_inject_gp(ctx, *pvcpu); 656 return (VMEXIT_CONTINUE); 657 } 658 } 659 660 eax = val; 661 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax); 662 assert(error == 0); 663 664 edx = val >> 32; 665 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx); 666 assert(error == 0); 667 668 return (VMEXIT_CONTINUE); 669 } 670 671 static int 672 vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 673 { 674 int error; 675 676 error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval); 677 if (error != 0) { 678 fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n", 679 vme->u.msr.code, vme->u.msr.wval, *pvcpu); 680 if (get_config_bool("x86.strictmsr")) { 681 vm_inject_gp(ctx, *pvcpu); 682 return (VMEXIT_CONTINUE); 683 } 684 } 685 return (VMEXIT_CONTINUE); 686 } 687 688 static int 689 vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu __unused) 690 { 691 692 (void)spinup_ap(ctx, vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip); 693 694 return (VMEXIT_CONTINUE); 695 } 696 697 #define DEBUG_EPT_MISCONFIG 698 #ifdef DEBUG_EPT_MISCONFIG 699 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400 700 701 static uint64_t ept_misconfig_gpa, ept_misconfig_pte[4]; 702 static int ept_misconfig_ptenum; 703 #endif 704 705 static const char * 706 vmexit_vmx_desc(uint32_t exit_reason) 707 { 708 709 if (exit_reason >= nitems(vmx_exit_reason_desc) || 710 vmx_exit_reason_desc[exit_reason] == NULL) 711 return ("Unknown"); 712 return (vmx_exit_reason_desc[exit_reason]); 713 } 714 715 static int 716 vmexit_vmx(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 717 { 718 719 fprintf(stderr, "vm exit[%d]\n", *pvcpu); 720 fprintf(stderr, "\treason\t\tVMX\n"); 721 fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip); 722 fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length); 723 fprintf(stderr, "\tstatus\t\t%d\n", vme->u.vmx.status); 724 fprintf(stderr, "\texit_reason\t%u (%s)\n", vme->u.vmx.exit_reason, 725 vmexit_vmx_desc(vme->u.vmx.exit_reason)); 726 fprintf(stderr, "\tqualification\t0x%016lx\n", 727 vme->u.vmx.exit_qualification); 728 fprintf(stderr, "\tinst_type\t\t%d\n", vme->u.vmx.inst_type); 729 fprintf(stderr, "\tinst_error\t\t%d\n", vme->u.vmx.inst_error); 730 #ifdef DEBUG_EPT_MISCONFIG 731 if (vme->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) { 732 vm_get_register(ctx, *pvcpu, 733 VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS), 734 &ept_misconfig_gpa); 735 vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte, 736 &ept_misconfig_ptenum); 737 fprintf(stderr, "\tEPT misconfiguration:\n"); 738 fprintf(stderr, "\t\tGPA: %#lx\n", ept_misconfig_gpa); 739 fprintf(stderr, "\t\tPTE(%d): %#lx %#lx %#lx %#lx\n", 740 ept_misconfig_ptenum, ept_misconfig_pte[0], 741 ept_misconfig_pte[1], ept_misconfig_pte[2], 742 ept_misconfig_pte[3]); 743 } 744 #endif /* DEBUG_EPT_MISCONFIG */ 745 return (VMEXIT_ABORT); 746 } 747 748 static int 749 vmexit_svm(struct vmctx *ctx __unused, struct vm_exit *vme, int *pvcpu) 750 { 751 752 fprintf(stderr, "vm exit[%d]\n", *pvcpu); 753 fprintf(stderr, "\treason\t\tSVM\n"); 754 fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip); 755 fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length); 756 fprintf(stderr, "\texitcode\t%#lx\n", vme->u.svm.exitcode); 757 fprintf(stderr, "\texitinfo1\t%#lx\n", vme->u.svm.exitinfo1); 758 fprintf(stderr, "\texitinfo2\t%#lx\n", vme->u.svm.exitinfo2); 759 return (VMEXIT_ABORT); 760 } 761 762 static int 763 vmexit_bogus(struct vmctx *ctx __unused, struct vm_exit *vme, 764 int *pvcpu __unused) 765 { 766 767 assert(vme->inst_length == 0); 768 769 stats.vmexit_bogus++; 770 771 return (VMEXIT_CONTINUE); 772 } 773 774 static int 775 vmexit_reqidle(struct vmctx *ctx __unused, struct vm_exit *vme, 776 int *pvcpu __unused) 777 { 778 779 assert(vme->inst_length == 0); 780 781 stats.vmexit_reqidle++; 782 783 return (VMEXIT_CONTINUE); 784 } 785 786 static int 787 vmexit_hlt(struct vmctx *ctx __unused, struct vm_exit *vme __unused, 788 int *pvcpu __unused) 789 { 790 791 stats.vmexit_hlt++; 792 793 /* 794 * Just continue execution with the next instruction. We use 795 * the HLT VM exit as a way to be friendly with the host 796 * scheduler. 797 */ 798 return (VMEXIT_CONTINUE); 799 } 800 801 static int 802 vmexit_pause(struct vmctx *ctx __unused, struct vm_exit *vme __unused, 803 int *pvcpu __unused) 804 { 805 806 stats.vmexit_pause++; 807 808 return (VMEXIT_CONTINUE); 809 } 810 811 static int 812 vmexit_mtrap(struct vmctx *ctx __unused, struct vm_exit *vme, int *pvcpu) 813 { 814 815 assert(vme->inst_length == 0); 816 817 stats.vmexit_mtrap++; 818 819 #ifdef BHYVE_SNAPSHOT 820 checkpoint_cpu_suspend(*pvcpu); 821 #endif 822 gdb_cpu_mtrap(*pvcpu); 823 #ifdef BHYVE_SNAPSHOT 824 checkpoint_cpu_resume(*pvcpu); 825 #endif 826 827 return (VMEXIT_CONTINUE); 828 } 829 830 static int 831 vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 832 { 833 int err, i, cs_d; 834 struct vie *vie; 835 enum vm_cpu_mode mode; 836 837 stats.vmexit_inst_emul++; 838 839 vie = &vme->u.inst_emul.vie; 840 if (!vie->decoded) { 841 /* 842 * Attempt to decode in userspace as a fallback. This allows 843 * updating instruction decode in bhyve without rebooting the 844 * kernel (rapid prototyping), albeit with much slower 845 * emulation. 846 */ 847 vie_restart(vie); 848 mode = vme->u.inst_emul.paging.cpu_mode; 849 cs_d = vme->u.inst_emul.cs_d; 850 if (vmm_decode_instruction(mode, cs_d, vie) != 0) 851 goto fail; 852 if (vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RIP, 853 vme->rip + vie->num_processed) != 0) 854 goto fail; 855 } 856 857 err = emulate_mem(ctx, *pvcpu, vme->u.inst_emul.gpa, 858 vie, &vme->u.inst_emul.paging); 859 if (err) { 860 if (err == ESRCH) { 861 EPRINTLN("Unhandled memory access to 0x%lx\n", 862 vme->u.inst_emul.gpa); 863 } 864 goto fail; 865 } 866 867 return (VMEXIT_CONTINUE); 868 869 fail: 870 fprintf(stderr, "Failed to emulate instruction sequence [ "); 871 for (i = 0; i < vie->num_valid; i++) 872 fprintf(stderr, "%02x", vie->inst[i]); 873 FPRINTLN(stderr, " ] at 0x%lx", vme->rip); 874 return (VMEXIT_ABORT); 875 } 876 877 static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER; 878 static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER; 879 880 static int 881 vmexit_suspend(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 882 { 883 enum vm_suspend_how how; 884 885 how = vme->u.suspended.how; 886 887 fbsdrun_deletecpu(*pvcpu); 888 889 if (*pvcpu != BSP) { 890 pthread_mutex_lock(&resetcpu_mtx); 891 pthread_cond_signal(&resetcpu_cond); 892 pthread_mutex_unlock(&resetcpu_mtx); 893 pthread_exit(NULL); 894 } 895 896 pthread_mutex_lock(&resetcpu_mtx); 897 while (!CPU_EMPTY(&cpumask)) { 898 pthread_cond_wait(&resetcpu_cond, &resetcpu_mtx); 899 } 900 pthread_mutex_unlock(&resetcpu_mtx); 901 902 switch (how) { 903 case VM_SUSPEND_RESET: 904 exit(0); 905 case VM_SUSPEND_POWEROFF: 906 if (get_config_bool_default("destroy_on_poweroff", false)) 907 vm_destroy(ctx); 908 exit(1); 909 case VM_SUSPEND_HALT: 910 exit(2); 911 case VM_SUSPEND_TRIPLEFAULT: 912 exit(3); 913 default: 914 fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how); 915 exit(100); 916 } 917 return (0); /* NOTREACHED */ 918 } 919 920 static int 921 vmexit_debug(struct vmctx *ctx __unused, struct vm_exit *vme __unused, 922 int *pvcpu) 923 { 924 925 #ifdef BHYVE_SNAPSHOT 926 checkpoint_cpu_suspend(*pvcpu); 927 #endif 928 gdb_cpu_suspend(*pvcpu); 929 #ifdef BHYVE_SNAPSHOT 930 checkpoint_cpu_resume(*pvcpu); 931 #endif 932 return (VMEXIT_CONTINUE); 933 } 934 935 static int 936 vmexit_breakpoint(struct vmctx *ctx __unused, struct vm_exit *vme, int *pvcpu) 937 { 938 939 gdb_cpu_breakpoint(*pvcpu, vme); 940 return (VMEXIT_CONTINUE); 941 } 942 943 static int 944 vmexit_ipi(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu __unused) 945 { 946 int error = -1; 947 int i; 948 switch (vme->u.ipi.mode) { 949 case APIC_DELMODE_INIT: 950 CPU_FOREACH_ISSET(i, &vme->u.ipi.dmask) { 951 error = vm_suspend_cpu(ctx, i); 952 if (error) { 953 warnx("%s: failed to suspend cpu %d\n", 954 __func__, i); 955 break; 956 } 957 } 958 break; 959 case APIC_DELMODE_STARTUP: 960 CPU_FOREACH_ISSET(i, &vme->u.ipi.dmask) { 961 spinup_ap(ctx, i, vme->u.ipi.vector << PAGE_SHIFT); 962 } 963 error = 0; 964 break; 965 default: 966 break; 967 } 968 969 return (error); 970 } 971 972 static vmexit_handler_t handler[VM_EXITCODE_MAX] = { 973 [VM_EXITCODE_INOUT] = vmexit_inout, 974 [VM_EXITCODE_INOUT_STR] = vmexit_inout, 975 [VM_EXITCODE_VMX] = vmexit_vmx, 976 [VM_EXITCODE_SVM] = vmexit_svm, 977 [VM_EXITCODE_BOGUS] = vmexit_bogus, 978 [VM_EXITCODE_REQIDLE] = vmexit_reqidle, 979 [VM_EXITCODE_RDMSR] = vmexit_rdmsr, 980 [VM_EXITCODE_WRMSR] = vmexit_wrmsr, 981 [VM_EXITCODE_MTRAP] = vmexit_mtrap, 982 [VM_EXITCODE_INST_EMUL] = vmexit_inst_emul, 983 [VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap, 984 [VM_EXITCODE_SUSPENDED] = vmexit_suspend, 985 [VM_EXITCODE_TASK_SWITCH] = vmexit_task_switch, 986 [VM_EXITCODE_DEBUG] = vmexit_debug, 987 [VM_EXITCODE_BPT] = vmexit_breakpoint, 988 [VM_EXITCODE_IPI] = vmexit_ipi, 989 }; 990 991 static void 992 vm_loop(struct vmctx *ctx, int vcpu, uint64_t startrip) 993 { 994 int error, rc; 995 enum vm_exitcode exitcode; 996 cpuset_t active_cpus; 997 998 if (vcpumap[vcpu] != NULL) { 999 error = pthread_setaffinity_np(pthread_self(), 1000 sizeof(cpuset_t), vcpumap[vcpu]); 1001 assert(error == 0); 1002 } 1003 1004 error = vm_active_cpus(ctx, &active_cpus); 1005 assert(CPU_ISSET(vcpu, &active_cpus)); 1006 1007 error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, startrip); 1008 assert(error == 0); 1009 1010 while (1) { 1011 error = vm_run(ctx, vcpu, &vmexit[vcpu]); 1012 if (error != 0) 1013 break; 1014 1015 exitcode = vmexit[vcpu].exitcode; 1016 if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) { 1017 fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n", 1018 exitcode); 1019 exit(4); 1020 } 1021 1022 rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu); 1023 1024 switch (rc) { 1025 case VMEXIT_CONTINUE: 1026 break; 1027 case VMEXIT_ABORT: 1028 abort(); 1029 default: 1030 exit(4); 1031 } 1032 } 1033 fprintf(stderr, "vm_run error %d, errno %d\n", error, errno); 1034 } 1035 1036 static int 1037 num_vcpus_allowed(struct vmctx *ctx) 1038 { 1039 uint16_t sockets, cores, threads, maxcpus; 1040 int tmp, error; 1041 1042 /* 1043 * The guest is allowed to spinup more than one processor only if the 1044 * UNRESTRICTED_GUEST capability is available. 1045 */ 1046 error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp); 1047 if (error != 0) 1048 return (1); 1049 1050 error = vm_get_topology(ctx, &sockets, &cores, &threads, &maxcpus); 1051 if (error == 0) 1052 return (maxcpus); 1053 else 1054 return (1); 1055 } 1056 1057 void 1058 fbsdrun_set_capabilities(struct vmctx *ctx, int cpu) 1059 { 1060 int err, tmp; 1061 1062 if (get_config_bool_default("x86.vmexit_on_hlt", false)) { 1063 err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp); 1064 if (err < 0) { 1065 fprintf(stderr, "VM exit on HLT not supported\n"); 1066 exit(4); 1067 } 1068 vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1); 1069 if (cpu == BSP) 1070 handler[VM_EXITCODE_HLT] = vmexit_hlt; 1071 } 1072 1073 if (get_config_bool_default("x86.vmexit_on_pause", false)) { 1074 /* 1075 * pause exit support required for this mode 1076 */ 1077 err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp); 1078 if (err < 0) { 1079 fprintf(stderr, 1080 "SMP mux requested, no pause support\n"); 1081 exit(4); 1082 } 1083 vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1); 1084 if (cpu == BSP) 1085 handler[VM_EXITCODE_PAUSE] = vmexit_pause; 1086 } 1087 1088 if (get_config_bool_default("x86.x2apic", false)) 1089 err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED); 1090 else 1091 err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED); 1092 1093 if (err) { 1094 fprintf(stderr, "Unable to set x2apic state (%d)\n", err); 1095 exit(4); 1096 } 1097 1098 vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1); 1099 } 1100 1101 static struct vmctx * 1102 do_open(const char *vmname) 1103 { 1104 struct vmctx *ctx; 1105 int error; 1106 bool reinit, romboot; 1107 1108 reinit = romboot = false; 1109 1110 if (lpc_bootrom()) 1111 romboot = true; 1112 1113 error = vm_create(vmname); 1114 if (error) { 1115 if (errno == EEXIST) { 1116 if (romboot) { 1117 reinit = true; 1118 } else { 1119 /* 1120 * The virtual machine has been setup by the 1121 * userspace bootloader. 1122 */ 1123 } 1124 } else { 1125 perror("vm_create"); 1126 exit(4); 1127 } 1128 } else { 1129 if (!romboot) { 1130 /* 1131 * If the virtual machine was just created then a 1132 * bootrom must be configured to boot it. 1133 */ 1134 fprintf(stderr, "virtual machine cannot be booted\n"); 1135 exit(4); 1136 } 1137 } 1138 1139 ctx = vm_open(vmname); 1140 if (ctx == NULL) { 1141 perror("vm_open"); 1142 exit(4); 1143 } 1144 1145 #ifndef WITHOUT_CAPSICUM 1146 if (vm_limit_rights(ctx) != 0) 1147 err(EX_OSERR, "vm_limit_rights"); 1148 #endif 1149 1150 if (reinit) { 1151 error = vm_reinit(ctx); 1152 if (error) { 1153 perror("vm_reinit"); 1154 exit(4); 1155 } 1156 } 1157 error = vm_set_topology(ctx, cpu_sockets, cpu_cores, cpu_threads, 1158 0 /* maxcpus, unimplemented */); 1159 if (error) 1160 errx(EX_OSERR, "vm_set_topology"); 1161 return (ctx); 1162 } 1163 1164 static void 1165 spinup_vcpu(struct vmctx *ctx, int vcpu, bool suspend) 1166 { 1167 int error; 1168 uint64_t rip; 1169 1170 error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RIP, &rip); 1171 assert(error == 0); 1172 1173 fbsdrun_set_capabilities(ctx, vcpu); 1174 error = vm_set_capability(ctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, 1); 1175 assert(error == 0); 1176 1177 error = vm_set_capability(ctx, vcpu, VM_CAP_IPI_EXIT, 1); 1178 assert(error == 0); 1179 1180 fbsdrun_addcpu(ctx, vcpu, rip, suspend); 1181 } 1182 1183 static bool 1184 parse_config_option(const char *option) 1185 { 1186 const char *value; 1187 char *path; 1188 1189 value = strchr(option, '='); 1190 if (value == NULL || value[1] == '\0') 1191 return (false); 1192 path = strndup(option, value - option); 1193 if (path == NULL) 1194 err(4, "Failed to allocate memory"); 1195 set_config_value(path, value + 1); 1196 return (true); 1197 } 1198 1199 static void 1200 parse_simple_config_file(const char *path) 1201 { 1202 FILE *fp; 1203 char *line, *cp; 1204 size_t linecap; 1205 unsigned int lineno; 1206 1207 fp = fopen(path, "r"); 1208 if (fp == NULL) 1209 err(4, "Failed to open configuration file %s", path); 1210 line = NULL; 1211 linecap = 0; 1212 lineno = 1; 1213 for (lineno = 1; getline(&line, &linecap, fp) > 0; lineno++) { 1214 if (*line == '#' || *line == '\n') 1215 continue; 1216 cp = strchr(line, '\n'); 1217 if (cp != NULL) 1218 *cp = '\0'; 1219 if (!parse_config_option(line)) 1220 errx(4, "%s line %u: invalid config option '%s'", path, 1221 lineno, line); 1222 } 1223 free(line); 1224 fclose(fp); 1225 } 1226 1227 static void 1228 parse_gdb_options(const char *opt) 1229 { 1230 const char *sport; 1231 char *colon; 1232 1233 if (opt[0] == 'w') { 1234 set_config_bool("gdb.wait", true); 1235 opt++; 1236 } 1237 1238 colon = strrchr(opt, ':'); 1239 if (colon == NULL) { 1240 sport = opt; 1241 } else { 1242 *colon = '\0'; 1243 colon++; 1244 sport = colon; 1245 set_config_value("gdb.address", opt); 1246 } 1247 1248 set_config_value("gdb.port", sport); 1249 } 1250 1251 static void 1252 set_defaults(void) 1253 { 1254 1255 set_config_bool("acpi_tables", false); 1256 set_config_value("memory.size", "256M"); 1257 set_config_bool("x86.strictmsr", true); 1258 } 1259 1260 int 1261 main(int argc, char *argv[]) 1262 { 1263 int c, error, err; 1264 int max_vcpus, memflags; 1265 struct vmctx *ctx; 1266 uint64_t rip; 1267 size_t memsize; 1268 const char *optstr, *value, *vmname; 1269 #ifdef BHYVE_SNAPSHOT 1270 char *restore_file; 1271 struct restore_state rstate; 1272 1273 restore_file = NULL; 1274 #endif 1275 1276 init_config(); 1277 set_defaults(); 1278 progname = basename(argv[0]); 1279 1280 #ifdef BHYVE_SNAPSHOT 1281 optstr = "aehuwxACDHIPSWYk:o:p:G:c:s:m:l:K:U:r:"; 1282 #else 1283 optstr = "aehuwxACDHIPSWYk:o:p:G:c:s:m:l:K:U:"; 1284 #endif 1285 while ((c = getopt(argc, argv, optstr)) != -1) { 1286 switch (c) { 1287 case 'a': 1288 set_config_bool("x86.x2apic", false); 1289 break; 1290 case 'A': 1291 set_config_bool("acpi_tables", true); 1292 break; 1293 case 'D': 1294 set_config_bool("destroy_on_poweroff", true); 1295 break; 1296 case 'p': 1297 if (pincpu_parse(optarg) != 0) { 1298 errx(EX_USAGE, "invalid vcpu pinning " 1299 "configuration '%s'", optarg); 1300 } 1301 break; 1302 case 'c': 1303 if (topology_parse(optarg) != 0) { 1304 errx(EX_USAGE, "invalid cpu topology " 1305 "'%s'", optarg); 1306 } 1307 break; 1308 case 'C': 1309 set_config_bool("memory.guest_in_core", true); 1310 break; 1311 case 'G': 1312 parse_gdb_options(optarg); 1313 break; 1314 case 'k': 1315 parse_simple_config_file(optarg); 1316 break; 1317 case 'K': 1318 set_config_value("keyboard.layout", optarg); 1319 break; 1320 case 'l': 1321 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1322 lpc_print_supported_devices(); 1323 exit(0); 1324 } else if (lpc_device_parse(optarg) != 0) { 1325 errx(EX_USAGE, "invalid lpc device " 1326 "configuration '%s'", optarg); 1327 } 1328 break; 1329 #ifdef BHYVE_SNAPSHOT 1330 case 'r': 1331 restore_file = optarg; 1332 break; 1333 #endif 1334 case 's': 1335 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1336 pci_print_supported_devices(); 1337 exit(0); 1338 } else if (pci_parse_slot(optarg) != 0) 1339 exit(4); 1340 else 1341 break; 1342 case 'S': 1343 set_config_bool("memory.wired", true); 1344 break; 1345 case 'm': 1346 set_config_value("memory.size", optarg); 1347 break; 1348 case 'o': 1349 if (!parse_config_option(optarg)) 1350 errx(EX_USAGE, "invalid configuration option '%s'", optarg); 1351 break; 1352 case 'H': 1353 set_config_bool("x86.vmexit_on_hlt", true); 1354 break; 1355 case 'I': 1356 /* 1357 * The "-I" option was used to add an ioapic to the 1358 * virtual machine. 1359 * 1360 * An ioapic is now provided unconditionally for each 1361 * virtual machine and this option is now deprecated. 1362 */ 1363 break; 1364 case 'P': 1365 set_config_bool("x86.vmexit_on_pause", true); 1366 break; 1367 case 'e': 1368 set_config_bool("x86.strictio", true); 1369 break; 1370 case 'u': 1371 set_config_bool("rtc.use_localtime", false); 1372 break; 1373 case 'U': 1374 set_config_value("uuid", optarg); 1375 break; 1376 case 'w': 1377 set_config_bool("x86.strictmsr", false); 1378 break; 1379 case 'W': 1380 set_config_bool("virtio_msix", false); 1381 break; 1382 case 'x': 1383 set_config_bool("x86.x2apic", true); 1384 break; 1385 case 'Y': 1386 set_config_bool("x86.mptable", false); 1387 break; 1388 case 'h': 1389 usage(0); 1390 default: 1391 usage(1); 1392 } 1393 } 1394 argc -= optind; 1395 argv += optind; 1396 1397 if (argc > 1) 1398 usage(1); 1399 1400 #ifdef BHYVE_SNAPSHOT 1401 if (restore_file != NULL) { 1402 error = load_restore_file(restore_file, &rstate); 1403 if (error) { 1404 fprintf(stderr, "Failed to read checkpoint info from " 1405 "file: '%s'.\n", restore_file); 1406 exit(1); 1407 } 1408 vmname = lookup_vmname(&rstate); 1409 if (vmname != NULL) 1410 set_config_value("name", vmname); 1411 } 1412 #endif 1413 1414 if (argc == 1) 1415 set_config_value("name", argv[0]); 1416 1417 vmname = get_config_value("name"); 1418 if (vmname == NULL) 1419 usage(1); 1420 1421 if (get_config_bool_default("config.dump", false)) { 1422 dump_config(); 1423 exit(1); 1424 } 1425 1426 calc_topology(); 1427 build_vcpumaps(); 1428 1429 value = get_config_value("memory.size"); 1430 error = vm_parse_memsize(value, &memsize); 1431 if (error) 1432 errx(EX_USAGE, "invalid memsize '%s'", value); 1433 1434 ctx = do_open(vmname); 1435 1436 #ifdef BHYVE_SNAPSHOT 1437 if (restore_file != NULL) { 1438 guest_ncpus = lookup_guest_ncpus(&rstate); 1439 memflags = lookup_memflags(&rstate); 1440 memsize = lookup_memsize(&rstate); 1441 } 1442 1443 if (guest_ncpus < 1) { 1444 fprintf(stderr, "Invalid guest vCPUs (%d)\n", guest_ncpus); 1445 exit(1); 1446 } 1447 #endif 1448 1449 max_vcpus = num_vcpus_allowed(ctx); 1450 if (guest_ncpus > max_vcpus) { 1451 fprintf(stderr, "%d vCPUs requested but only %d available\n", 1452 guest_ncpus, max_vcpus); 1453 exit(4); 1454 } 1455 1456 fbsdrun_set_capabilities(ctx, BSP); 1457 1458 memflags = 0; 1459 if (get_config_bool_default("memory.wired", false)) 1460 memflags |= VM_MEM_F_WIRED; 1461 if (get_config_bool_default("memory.guest_in_core", false)) 1462 memflags |= VM_MEM_F_INCORE; 1463 vm_set_memflags(ctx, memflags); 1464 err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL); 1465 if (err) { 1466 fprintf(stderr, "Unable to setup memory (%d)\n", errno); 1467 exit(4); 1468 } 1469 1470 error = init_msr(); 1471 if (error) { 1472 fprintf(stderr, "init_msr error %d", error); 1473 exit(4); 1474 } 1475 1476 init_mem(guest_ncpus); 1477 init_inout(); 1478 kernemu_dev_init(); 1479 init_bootrom(ctx); 1480 atkbdc_init(ctx); 1481 pci_irq_init(ctx); 1482 ioapic_init(ctx); 1483 1484 rtc_init(ctx); 1485 sci_init(ctx); 1486 1487 /* 1488 * Exit if a device emulation finds an error in its initilization 1489 */ 1490 if (init_pci(ctx) != 0) { 1491 perror("device emulation initialization error"); 1492 exit(4); 1493 } 1494 1495 /* 1496 * Initialize after PCI, to allow a bootrom file to reserve the high 1497 * region. 1498 */ 1499 if (get_config_bool("acpi_tables")) 1500 vmgenc_init(ctx); 1501 1502 init_gdb(ctx); 1503 1504 if (lpc_bootrom()) { 1505 if (vm_set_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, 1)) { 1506 fprintf(stderr, "ROM boot failed: unrestricted guest " 1507 "capability not available\n"); 1508 exit(4); 1509 } 1510 error = vcpu_reset(ctx, BSP); 1511 assert(error == 0); 1512 } 1513 1514 #ifdef BHYVE_SNAPSHOT 1515 if (restore_file != NULL) { 1516 fprintf(stdout, "Pausing pci devs...\r\n"); 1517 if (vm_pause_user_devs(ctx) != 0) { 1518 fprintf(stderr, "Failed to pause PCI device state.\n"); 1519 exit(1); 1520 } 1521 1522 fprintf(stdout, "Restoring vm mem...\r\n"); 1523 if (restore_vm_mem(ctx, &rstate) != 0) { 1524 fprintf(stderr, "Failed to restore VM memory.\n"); 1525 exit(1); 1526 } 1527 1528 fprintf(stdout, "Restoring pci devs...\r\n"); 1529 if (vm_restore_user_devs(ctx, &rstate) != 0) { 1530 fprintf(stderr, "Failed to restore PCI device state.\n"); 1531 exit(1); 1532 } 1533 1534 fprintf(stdout, "Restoring kernel structs...\r\n"); 1535 if (vm_restore_kern_structs(ctx, &rstate) != 0) { 1536 fprintf(stderr, "Failed to restore kernel structs.\n"); 1537 exit(1); 1538 } 1539 1540 fprintf(stdout, "Resuming pci devs...\r\n"); 1541 if (vm_resume_user_devs(ctx) != 0) { 1542 fprintf(stderr, "Failed to resume PCI device state.\n"); 1543 exit(1); 1544 } 1545 } 1546 #endif 1547 1548 error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip); 1549 assert(error == 0); 1550 1551 /* 1552 * build the guest tables, MP etc. 1553 */ 1554 if (get_config_bool_default("x86.mptable", true)) { 1555 error = mptable_build(ctx, guest_ncpus); 1556 if (error) { 1557 perror("error to build the guest tables"); 1558 exit(4); 1559 } 1560 } 1561 1562 error = smbios_build(ctx); 1563 if (error != 0) 1564 exit(4); 1565 1566 if (get_config_bool("acpi_tables")) { 1567 error = acpi_build(ctx, guest_ncpus); 1568 assert(error == 0); 1569 } 1570 1571 if (lpc_bootrom()) 1572 fwctl_init(); 1573 1574 /* 1575 * Change the proc title to include the VM name. 1576 */ 1577 setproctitle("%s", vmname); 1578 1579 #ifndef WITHOUT_CAPSICUM 1580 caph_cache_catpages(); 1581 1582 if (caph_limit_stdout() == -1 || caph_limit_stderr() == -1) 1583 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1584 1585 if (caph_enter() == -1) 1586 errx(EX_OSERR, "cap_enter() failed"); 1587 #endif 1588 1589 #ifdef BHYVE_SNAPSHOT 1590 if (restore_file != NULL) 1591 destroy_restore_state(&rstate); 1592 1593 /* initialize mutex/cond variables */ 1594 init_snapshot(); 1595 1596 /* 1597 * checkpointing thread for communication with bhyvectl 1598 */ 1599 if (init_checkpoint_thread(ctx) < 0) 1600 printf("Failed to start checkpoint thread!\r\n"); 1601 1602 if (restore_file != NULL) 1603 vm_restore_time(ctx); 1604 #endif 1605 1606 /* Allocate per-VCPU resources. */ 1607 vmexit = calloc(guest_ncpus, sizeof(*vmexit)); 1608 mt_vmm_info = calloc(guest_ncpus, sizeof(*mt_vmm_info)); 1609 1610 /* 1611 * Add all vCPUs. 1612 */ 1613 for (int vcpu = 0; vcpu < guest_ncpus; vcpu++) { 1614 bool suspend = (vcpu != BSP); 1615 #ifdef BHYVE_SNAPSHOT 1616 if (restore_file != NULL) 1617 suspend = false; 1618 #endif 1619 spinup_vcpu(ctx, vcpu, suspend); 1620 } 1621 1622 /* 1623 * Head off to the main event dispatch loop 1624 */ 1625 mevent_dispatch(); 1626 1627 exit(4); 1628 } 1629