1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include <sys/types.h> 31 #ifndef WITHOUT_CAPSICUM 32 #include <sys/capsicum.h> 33 #endif 34 #include <sys/mman.h> 35 #ifdef BHYVE_SNAPSHOT 36 #include <sys/socket.h> 37 #include <sys/stat.h> 38 #endif 39 #include <sys/time.h> 40 #ifdef BHYVE_SNAPSHOT 41 #include <sys/un.h> 42 #endif 43 44 #include <amd64/vmm/intel/vmcs.h> 45 #include <x86/apicreg.h> 46 47 #include <machine/atomic.h> 48 #include <machine/segments.h> 49 50 #ifndef WITHOUT_CAPSICUM 51 #include <capsicum_helpers.h> 52 #endif 53 #include <stdio.h> 54 #include <stdlib.h> 55 #include <string.h> 56 #include <err.h> 57 #include <errno.h> 58 #ifdef BHYVE_SNAPSHOT 59 #include <fcntl.h> 60 #endif 61 #include <libgen.h> 62 #include <unistd.h> 63 #include <assert.h> 64 #include <pthread.h> 65 #include <pthread_np.h> 66 #include <sysexits.h> 67 #include <stdbool.h> 68 #include <stdint.h> 69 #ifdef BHYVE_SNAPSHOT 70 #include <ucl.h> 71 #include <unistd.h> 72 73 #include <libxo/xo.h> 74 #endif 75 76 #include <machine/vmm.h> 77 #ifndef WITHOUT_CAPSICUM 78 #include <machine/vmm_dev.h> 79 #endif 80 #include <machine/vmm_instruction_emul.h> 81 #include <vmmapi.h> 82 83 #include "bhyverun.h" 84 #include "acpi.h" 85 #ifdef __amd64__ 86 #include "amd64/atkbdc.h" 87 #endif 88 #include "bootrom.h" 89 #include "config.h" 90 #include "inout.h" 91 #include "debug.h" 92 #include "e820.h" 93 #ifdef __amd64__ 94 #include "amd64/fwctl.h" 95 #endif 96 #include "gdb.h" 97 #include "ioapic.h" 98 #include "kernemu_dev.h" 99 #include "mem.h" 100 #include "mevent.h" 101 #include "mptbl.h" 102 #include "pci_emul.h" 103 #include "pci_irq.h" 104 #include "pci_lpc.h" 105 #include "qemu_fwcfg.h" 106 #include "smbiostbl.h" 107 #ifdef BHYVE_SNAPSHOT 108 #include "snapshot.h" 109 #endif 110 #include "tpm_device.h" 111 #include "xmsr.h" 112 #include "spinup_ap.h" 113 #include "rtc.h" 114 #include "vmgenc.h" 115 116 #define MB (1024UL * 1024) 117 #define GB (1024UL * MB) 118 119 static const char * const vmx_exit_reason_desc[] = { 120 [EXIT_REASON_EXCEPTION] = "Exception or non-maskable interrupt (NMI)", 121 [EXIT_REASON_EXT_INTR] = "External interrupt", 122 [EXIT_REASON_TRIPLE_FAULT] = "Triple fault", 123 [EXIT_REASON_INIT] = "INIT signal", 124 [EXIT_REASON_SIPI] = "Start-up IPI (SIPI)", 125 [EXIT_REASON_IO_SMI] = "I/O system-management interrupt (SMI)", 126 [EXIT_REASON_SMI] = "Other SMI", 127 [EXIT_REASON_INTR_WINDOW] = "Interrupt window", 128 [EXIT_REASON_NMI_WINDOW] = "NMI window", 129 [EXIT_REASON_TASK_SWITCH] = "Task switch", 130 [EXIT_REASON_CPUID] = "CPUID", 131 [EXIT_REASON_GETSEC] = "GETSEC", 132 [EXIT_REASON_HLT] = "HLT", 133 [EXIT_REASON_INVD] = "INVD", 134 [EXIT_REASON_INVLPG] = "INVLPG", 135 [EXIT_REASON_RDPMC] = "RDPMC", 136 [EXIT_REASON_RDTSC] = "RDTSC", 137 [EXIT_REASON_RSM] = "RSM", 138 [EXIT_REASON_VMCALL] = "VMCALL", 139 [EXIT_REASON_VMCLEAR] = "VMCLEAR", 140 [EXIT_REASON_VMLAUNCH] = "VMLAUNCH", 141 [EXIT_REASON_VMPTRLD] = "VMPTRLD", 142 [EXIT_REASON_VMPTRST] = "VMPTRST", 143 [EXIT_REASON_VMREAD] = "VMREAD", 144 [EXIT_REASON_VMRESUME] = "VMRESUME", 145 [EXIT_REASON_VMWRITE] = "VMWRITE", 146 [EXIT_REASON_VMXOFF] = "VMXOFF", 147 [EXIT_REASON_VMXON] = "VMXON", 148 [EXIT_REASON_CR_ACCESS] = "Control-register accesses", 149 [EXIT_REASON_DR_ACCESS] = "MOV DR", 150 [EXIT_REASON_INOUT] = "I/O instruction", 151 [EXIT_REASON_RDMSR] = "RDMSR", 152 [EXIT_REASON_WRMSR] = "WRMSR", 153 [EXIT_REASON_INVAL_VMCS] = 154 "VM-entry failure due to invalid guest state", 155 [EXIT_REASON_INVAL_MSR] = "VM-entry failure due to MSR loading", 156 [EXIT_REASON_MWAIT] = "MWAIT", 157 [EXIT_REASON_MTF] = "Monitor trap flag", 158 [EXIT_REASON_MONITOR] = "MONITOR", 159 [EXIT_REASON_PAUSE] = "PAUSE", 160 [EXIT_REASON_MCE_DURING_ENTRY] = 161 "VM-entry failure due to machine-check event", 162 [EXIT_REASON_TPR] = "TPR below threshold", 163 [EXIT_REASON_APIC_ACCESS] = "APIC access", 164 [EXIT_REASON_VIRTUALIZED_EOI] = "Virtualized EOI", 165 [EXIT_REASON_GDTR_IDTR] = "Access to GDTR or IDTR", 166 [EXIT_REASON_LDTR_TR] = "Access to LDTR or TR", 167 [EXIT_REASON_EPT_FAULT] = "EPT violation", 168 [EXIT_REASON_EPT_MISCONFIG] = "EPT misconfiguration", 169 [EXIT_REASON_INVEPT] = "INVEPT", 170 [EXIT_REASON_RDTSCP] = "RDTSCP", 171 [EXIT_REASON_VMX_PREEMPT] = "VMX-preemption timer expired", 172 [EXIT_REASON_INVVPID] = "INVVPID", 173 [EXIT_REASON_WBINVD] = "WBINVD", 174 [EXIT_REASON_XSETBV] = "XSETBV", 175 [EXIT_REASON_APIC_WRITE] = "APIC write", 176 [EXIT_REASON_RDRAND] = "RDRAND", 177 [EXIT_REASON_INVPCID] = "INVPCID", 178 [EXIT_REASON_VMFUNC] = "VMFUNC", 179 [EXIT_REASON_ENCLS] = "ENCLS", 180 [EXIT_REASON_RDSEED] = "RDSEED", 181 [EXIT_REASON_PM_LOG_FULL] = "Page-modification log full", 182 [EXIT_REASON_XSAVES] = "XSAVES", 183 [EXIT_REASON_XRSTORS] = "XRSTORS" 184 }; 185 186 typedef int (*vmexit_handler_t)(struct vmctx *, struct vcpu *, struct vm_run *); 187 188 int guest_ncpus; 189 uint16_t cpu_cores, cpu_sockets, cpu_threads; 190 191 int raw_stdio = 0; 192 193 static char *progname; 194 static const int BSP = 0; 195 196 static cpuset_t cpumask; 197 198 static void vm_loop(struct vmctx *ctx, struct vcpu *vcpu); 199 200 static struct vcpu_info { 201 struct vmctx *ctx; 202 struct vcpu *vcpu; 203 int vcpuid; 204 } *vcpu_info; 205 206 static cpuset_t **vcpumap; 207 208 static void 209 usage(int code) 210 { 211 212 fprintf(stderr, 213 "Usage: %s [-AaCDeHhPSuWwxY]\n" 214 " %*s [-c [[cpus=]numcpus][,sockets=n][,cores=n][,threads=n]]\n" 215 " %*s [-G port] [-k config_file] [-l lpc] [-m mem] [-o var=value]\n" 216 " %*s [-p vcpu:hostcpu] [-r file] [-s pci] [-U uuid] vmname\n" 217 " -A: create ACPI tables\n" 218 " -a: local apic is in xAPIC mode (deprecated)\n" 219 " -C: include guest memory in core file\n" 220 " -c: number of CPUs and/or topology specification\n" 221 " -D: destroy on power-off\n" 222 " -e: exit on unhandled I/O access\n" 223 " -G: start a debug server\n" 224 " -H: vmexit from the guest on HLT\n" 225 " -h: help\n" 226 " -k: key=value flat config file\n" 227 " -K: PS2 keyboard layout\n" 228 " -l: LPC device configuration\n" 229 " -m: memory size\n" 230 " -o: set config 'var' to 'value'\n" 231 " -P: vmexit from the guest on pause\n" 232 " -p: pin 'vcpu' to 'hostcpu'\n" 233 #ifdef BHYVE_SNAPSHOT 234 " -r: path to checkpoint file\n" 235 #endif 236 " -S: guest memory cannot be swapped\n" 237 " -s: <slot,driver,configinfo> PCI slot config\n" 238 " -U: UUID\n" 239 " -u: RTC keeps UTC time\n" 240 " -W: force virtio to use single-vector MSI\n" 241 " -w: ignore unimplemented MSRs\n" 242 " -x: local APIC is in x2APIC mode\n" 243 " -Y: disable MPtable generation\n", 244 progname, (int)strlen(progname), "", (int)strlen(progname), "", 245 (int)strlen(progname), ""); 246 247 exit(code); 248 } 249 250 /* 251 * XXX This parser is known to have the following issues: 252 * 1. It accepts null key=value tokens ",," as setting "cpus" to an 253 * empty string. 254 * 255 * The acceptance of a null specification ('-c ""') is by design to match the 256 * manual page syntax specification, this results in a topology of 1 vCPU. 257 */ 258 static int 259 topology_parse(const char *opt) 260 { 261 char *cp, *str, *tofree; 262 263 if (*opt == '\0') { 264 set_config_value("sockets", "1"); 265 set_config_value("cores", "1"); 266 set_config_value("threads", "1"); 267 set_config_value("cpus", "1"); 268 return (0); 269 } 270 271 tofree = str = strdup(opt); 272 if (str == NULL) 273 errx(4, "Failed to allocate memory"); 274 275 while ((cp = strsep(&str, ",")) != NULL) { 276 if (strncmp(cp, "cpus=", strlen("cpus=")) == 0) 277 set_config_value("cpus", cp + strlen("cpus=")); 278 else if (strncmp(cp, "sockets=", strlen("sockets=")) == 0) 279 set_config_value("sockets", cp + strlen("sockets=")); 280 else if (strncmp(cp, "cores=", strlen("cores=")) == 0) 281 set_config_value("cores", cp + strlen("cores=")); 282 else if (strncmp(cp, "threads=", strlen("threads=")) == 0) 283 set_config_value("threads", cp + strlen("threads=")); 284 else if (strchr(cp, '=') != NULL) 285 goto out; 286 else 287 set_config_value("cpus", cp); 288 } 289 free(tofree); 290 return (0); 291 292 out: 293 free(tofree); 294 return (-1); 295 } 296 297 static int 298 parse_int_value(const char *key, const char *value, int minval, int maxval) 299 { 300 char *cp; 301 long lval; 302 303 errno = 0; 304 lval = strtol(value, &cp, 0); 305 if (errno != 0 || *cp != '\0' || cp == value || lval < minval || 306 lval > maxval) 307 errx(4, "Invalid value for %s: '%s'", key, value); 308 return (lval); 309 } 310 311 /* 312 * Set the sockets, cores, threads, and guest_cpus variables based on 313 * the configured topology. 314 * 315 * The limits of UINT16_MAX are due to the types passed to 316 * vm_set_topology(). vmm.ko may enforce tighter limits. 317 */ 318 static void 319 calc_topology(void) 320 { 321 const char *value; 322 bool explicit_cpus; 323 uint64_t ncpus; 324 325 value = get_config_value("cpus"); 326 if (value != NULL) { 327 guest_ncpus = parse_int_value("cpus", value, 1, UINT16_MAX); 328 explicit_cpus = true; 329 } else { 330 guest_ncpus = 1; 331 explicit_cpus = false; 332 } 333 value = get_config_value("cores"); 334 if (value != NULL) 335 cpu_cores = parse_int_value("cores", value, 1, UINT16_MAX); 336 else 337 cpu_cores = 1; 338 value = get_config_value("threads"); 339 if (value != NULL) 340 cpu_threads = parse_int_value("threads", value, 1, UINT16_MAX); 341 else 342 cpu_threads = 1; 343 value = get_config_value("sockets"); 344 if (value != NULL) 345 cpu_sockets = parse_int_value("sockets", value, 1, UINT16_MAX); 346 else 347 cpu_sockets = guest_ncpus; 348 349 /* 350 * Compute sockets * cores * threads avoiding overflow. The 351 * range check above insures these are 16 bit values. 352 */ 353 ncpus = (uint64_t)cpu_sockets * cpu_cores * cpu_threads; 354 if (ncpus > UINT16_MAX) 355 errx(4, "Computed number of vCPUs too high: %ju", 356 (uintmax_t)ncpus); 357 358 if (explicit_cpus) { 359 if (guest_ncpus != (int)ncpus) 360 errx(4, "Topology (%d sockets, %d cores, %d threads) " 361 "does not match %d vCPUs", 362 cpu_sockets, cpu_cores, cpu_threads, 363 guest_ncpus); 364 } else 365 guest_ncpus = ncpus; 366 } 367 368 static int 369 pincpu_parse(const char *opt) 370 { 371 const char *value; 372 char *newval; 373 char key[16]; 374 int vcpu, pcpu; 375 376 if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) { 377 fprintf(stderr, "invalid format: %s\n", opt); 378 return (-1); 379 } 380 381 if (vcpu < 0) { 382 fprintf(stderr, "invalid vcpu '%d'\n", vcpu); 383 return (-1); 384 } 385 386 if (pcpu < 0 || pcpu >= CPU_SETSIZE) { 387 fprintf(stderr, "hostcpu '%d' outside valid range from " 388 "0 to %d\n", pcpu, CPU_SETSIZE - 1); 389 return (-1); 390 } 391 392 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 393 value = get_config_value(key); 394 395 if (asprintf(&newval, "%s%s%d", value != NULL ? value : "", 396 value != NULL ? "," : "", pcpu) == -1) { 397 perror("failed to build new cpuset string"); 398 return (-1); 399 } 400 401 set_config_value(key, newval); 402 free(newval); 403 return (0); 404 } 405 406 static void 407 parse_cpuset(int vcpu, const char *list, cpuset_t *set) 408 { 409 char *cp, *token; 410 int pcpu, start; 411 412 CPU_ZERO(set); 413 start = -1; 414 token = __DECONST(char *, list); 415 for (;;) { 416 pcpu = strtoul(token, &cp, 0); 417 if (cp == token) 418 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 419 if (pcpu < 0 || pcpu >= CPU_SETSIZE) 420 errx(4, "hostcpu '%d' outside valid range from 0 to %d", 421 pcpu, CPU_SETSIZE - 1); 422 switch (*cp) { 423 case ',': 424 case '\0': 425 if (start >= 0) { 426 if (start > pcpu) 427 errx(4, "Invalid hostcpu range %d-%d", 428 start, pcpu); 429 while (start < pcpu) { 430 CPU_SET(start, set); 431 start++; 432 } 433 start = -1; 434 } 435 CPU_SET(pcpu, set); 436 break; 437 case '-': 438 if (start >= 0) 439 errx(4, "invalid cpuset for vcpu %d: '%s'", 440 vcpu, list); 441 start = pcpu; 442 break; 443 default: 444 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 445 } 446 if (*cp == '\0') 447 break; 448 token = cp + 1; 449 } 450 } 451 452 static void 453 build_vcpumaps(void) 454 { 455 char key[16]; 456 const char *value; 457 int vcpu; 458 459 vcpumap = calloc(guest_ncpus, sizeof(*vcpumap)); 460 for (vcpu = 0; vcpu < guest_ncpus; vcpu++) { 461 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 462 value = get_config_value(key); 463 if (value == NULL) 464 continue; 465 vcpumap[vcpu] = malloc(sizeof(cpuset_t)); 466 if (vcpumap[vcpu] == NULL) 467 err(4, "Failed to allocate cpuset for vcpu %d", vcpu); 468 parse_cpuset(vcpu, value, vcpumap[vcpu]); 469 } 470 } 471 472 void 473 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, 474 int errcode) 475 { 476 int error, restart_instruction; 477 478 restart_instruction = 1; 479 480 error = vm_inject_exception(vcpu, vector, errcode_valid, errcode, 481 restart_instruction); 482 assert(error == 0); 483 } 484 485 void * 486 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) 487 { 488 489 return (vm_map_gpa(ctx, gaddr, len)); 490 } 491 492 #ifdef BHYVE_SNAPSHOT 493 uintptr_t 494 paddr_host2guest(struct vmctx *ctx, void *addr) 495 { 496 return (vm_rev_map_gpa(ctx, addr)); 497 } 498 #endif 499 500 int 501 fbsdrun_virtio_msix(void) 502 { 503 504 return (get_config_bool_default("virtio_msix", true)); 505 } 506 507 static void * 508 fbsdrun_start_thread(void *param) 509 { 510 char tname[MAXCOMLEN + 1]; 511 struct vcpu_info *vi = param; 512 int error; 513 514 snprintf(tname, sizeof(tname), "vcpu %d", vi->vcpuid); 515 pthread_set_name_np(pthread_self(), tname); 516 517 if (vcpumap[vi->vcpuid] != NULL) { 518 error = pthread_setaffinity_np(pthread_self(), 519 sizeof(cpuset_t), vcpumap[vi->vcpuid]); 520 assert(error == 0); 521 } 522 523 #ifdef BHYVE_SNAPSHOT 524 checkpoint_cpu_add(vi->vcpuid); 525 #endif 526 gdb_cpu_add(vi->vcpu); 527 528 vm_loop(vi->ctx, vi->vcpu); 529 530 /* not reached */ 531 exit(1); 532 return (NULL); 533 } 534 535 static void 536 fbsdrun_addcpu(struct vcpu_info *vi) 537 { 538 pthread_t thr; 539 int error; 540 541 error = vm_activate_cpu(vi->vcpu); 542 if (error != 0) 543 err(EX_OSERR, "could not activate CPU %d", vi->vcpuid); 544 545 CPU_SET_ATOMIC(vi->vcpuid, &cpumask); 546 547 vm_suspend_cpu(vi->vcpu); 548 549 error = pthread_create(&thr, NULL, fbsdrun_start_thread, vi); 550 assert(error == 0); 551 } 552 553 static void 554 fbsdrun_deletecpu(int vcpu) 555 { 556 static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER; 557 static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER; 558 559 pthread_mutex_lock(&resetcpu_mtx); 560 if (!CPU_ISSET(vcpu, &cpumask)) { 561 fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu); 562 exit(4); 563 } 564 565 CPU_CLR(vcpu, &cpumask); 566 567 if (vcpu != BSP) { 568 pthread_cond_signal(&resetcpu_cond); 569 pthread_mutex_unlock(&resetcpu_mtx); 570 pthread_exit(NULL); 571 /* NOTREACHED */ 572 } 573 574 while (!CPU_EMPTY(&cpumask)) { 575 pthread_cond_wait(&resetcpu_cond, &resetcpu_mtx); 576 } 577 pthread_mutex_unlock(&resetcpu_mtx); 578 } 579 580 static int 581 vmexit_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) 582 { 583 struct vm_exit *vme; 584 int error; 585 int bytes, port, in; 586 587 vme = vmrun->vm_exit; 588 port = vme->u.inout.port; 589 bytes = vme->u.inout.bytes; 590 in = vme->u.inout.in; 591 592 error = emulate_inout(ctx, vcpu, vme); 593 if (error) { 594 fprintf(stderr, "Unhandled %s%c 0x%04x at 0x%lx\n", 595 in ? "in" : "out", 596 bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), 597 port, vme->rip); 598 return (VMEXIT_ABORT); 599 } else { 600 return (VMEXIT_CONTINUE); 601 } 602 } 603 604 static int 605 vmexit_rdmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, 606 struct vm_run *vmrun) 607 { 608 struct vm_exit *vme; 609 uint64_t val; 610 uint32_t eax, edx; 611 int error; 612 613 vme = vmrun->vm_exit; 614 615 val = 0; 616 error = emulate_rdmsr(vcpu, vme->u.msr.code, &val); 617 if (error != 0) { 618 fprintf(stderr, "rdmsr to register %#x on vcpu %d\n", 619 vme->u.msr.code, vcpu_id(vcpu)); 620 if (get_config_bool("x86.strictmsr")) { 621 vm_inject_gp(vcpu); 622 return (VMEXIT_CONTINUE); 623 } 624 } 625 626 eax = val; 627 error = vm_set_register(vcpu, VM_REG_GUEST_RAX, eax); 628 assert(error == 0); 629 630 edx = val >> 32; 631 error = vm_set_register(vcpu, VM_REG_GUEST_RDX, edx); 632 assert(error == 0); 633 634 return (VMEXIT_CONTINUE); 635 } 636 637 static int 638 vmexit_wrmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, 639 struct vm_run *vmrun) 640 { 641 struct vm_exit *vme; 642 int error; 643 644 vme = vmrun->vm_exit; 645 646 error = emulate_wrmsr(vcpu, vme->u.msr.code, vme->u.msr.wval); 647 if (error != 0) { 648 fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n", 649 vme->u.msr.code, vme->u.msr.wval, vcpu_id(vcpu)); 650 if (get_config_bool("x86.strictmsr")) { 651 vm_inject_gp(vcpu); 652 return (VMEXIT_CONTINUE); 653 } 654 } 655 return (VMEXIT_CONTINUE); 656 } 657 658 #define DEBUG_EPT_MISCONFIG 659 #ifdef DEBUG_EPT_MISCONFIG 660 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400 661 662 static uint64_t ept_misconfig_gpa, ept_misconfig_pte[4]; 663 static int ept_misconfig_ptenum; 664 #endif 665 666 static const char * 667 vmexit_vmx_desc(uint32_t exit_reason) 668 { 669 670 if (exit_reason >= nitems(vmx_exit_reason_desc) || 671 vmx_exit_reason_desc[exit_reason] == NULL) 672 return ("Unknown"); 673 return (vmx_exit_reason_desc[exit_reason]); 674 } 675 676 static int 677 vmexit_vmx(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) 678 { 679 struct vm_exit *vme; 680 681 vme = vmrun->vm_exit; 682 683 fprintf(stderr, "vm exit[%d]\n", vcpu_id(vcpu)); 684 fprintf(stderr, "\treason\t\tVMX\n"); 685 fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip); 686 fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length); 687 fprintf(stderr, "\tstatus\t\t%d\n", vme->u.vmx.status); 688 fprintf(stderr, "\texit_reason\t%u (%s)\n", vme->u.vmx.exit_reason, 689 vmexit_vmx_desc(vme->u.vmx.exit_reason)); 690 fprintf(stderr, "\tqualification\t0x%016lx\n", 691 vme->u.vmx.exit_qualification); 692 fprintf(stderr, "\tinst_type\t\t%d\n", vme->u.vmx.inst_type); 693 fprintf(stderr, "\tinst_error\t\t%d\n", vme->u.vmx.inst_error); 694 #ifdef DEBUG_EPT_MISCONFIG 695 if (vme->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) { 696 vm_get_register(vcpu, 697 VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS), 698 &ept_misconfig_gpa); 699 vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte, 700 &ept_misconfig_ptenum); 701 fprintf(stderr, "\tEPT misconfiguration:\n"); 702 fprintf(stderr, "\t\tGPA: %#lx\n", ept_misconfig_gpa); 703 fprintf(stderr, "\t\tPTE(%d): %#lx %#lx %#lx %#lx\n", 704 ept_misconfig_ptenum, ept_misconfig_pte[0], 705 ept_misconfig_pte[1], ept_misconfig_pte[2], 706 ept_misconfig_pte[3]); 707 } 708 #endif /* DEBUG_EPT_MISCONFIG */ 709 return (VMEXIT_ABORT); 710 } 711 712 static int 713 vmexit_svm(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun) 714 { 715 struct vm_exit *vme; 716 717 vme = vmrun->vm_exit; 718 719 fprintf(stderr, "vm exit[%d]\n", vcpu_id(vcpu)); 720 fprintf(stderr, "\treason\t\tSVM\n"); 721 fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip); 722 fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length); 723 fprintf(stderr, "\texitcode\t%#lx\n", vme->u.svm.exitcode); 724 fprintf(stderr, "\texitinfo1\t%#lx\n", vme->u.svm.exitinfo1); 725 fprintf(stderr, "\texitinfo2\t%#lx\n", vme->u.svm.exitinfo2); 726 return (VMEXIT_ABORT); 727 } 728 729 static int 730 vmexit_bogus(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 731 struct vm_run *vmrun) 732 { 733 assert(vmrun->vm_exit->inst_length == 0); 734 735 return (VMEXIT_CONTINUE); 736 } 737 738 static int 739 vmexit_reqidle(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 740 struct vm_run *vmrun) 741 { 742 assert(vmrun->vm_exit->inst_length == 0); 743 744 return (VMEXIT_CONTINUE); 745 } 746 747 static int 748 vmexit_hlt(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 749 struct vm_run *vmrun __unused) 750 { 751 /* 752 * Just continue execution with the next instruction. We use 753 * the HLT VM exit as a way to be friendly with the host 754 * scheduler. 755 */ 756 return (VMEXIT_CONTINUE); 757 } 758 759 static int 760 vmexit_pause(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 761 struct vm_run *vmrun __unused) 762 { 763 return (VMEXIT_CONTINUE); 764 } 765 766 static int 767 vmexit_mtrap(struct vmctx *ctx __unused, struct vcpu *vcpu, 768 struct vm_run *vmrun) 769 { 770 assert(vmrun->vm_exit->inst_length == 0); 771 772 #ifdef BHYVE_SNAPSHOT 773 checkpoint_cpu_suspend(vcpu_id(vcpu)); 774 #endif 775 gdb_cpu_mtrap(vcpu); 776 #ifdef BHYVE_SNAPSHOT 777 checkpoint_cpu_resume(vcpu_id(vcpu)); 778 #endif 779 780 return (VMEXIT_CONTINUE); 781 } 782 783 static int 784 vmexit_inst_emul(struct vmctx *ctx __unused, struct vcpu *vcpu, 785 struct vm_run *vmrun) 786 { 787 struct vm_exit *vme; 788 struct vie *vie; 789 int err, i, cs_d; 790 enum vm_cpu_mode mode; 791 792 vme = vmrun->vm_exit; 793 794 vie = &vme->u.inst_emul.vie; 795 if (!vie->decoded) { 796 /* 797 * Attempt to decode in userspace as a fallback. This allows 798 * updating instruction decode in bhyve without rebooting the 799 * kernel (rapid prototyping), albeit with much slower 800 * emulation. 801 */ 802 vie_restart(vie); 803 mode = vme->u.inst_emul.paging.cpu_mode; 804 cs_d = vme->u.inst_emul.cs_d; 805 if (vmm_decode_instruction(mode, cs_d, vie) != 0) 806 goto fail; 807 if (vm_set_register(vcpu, VM_REG_GUEST_RIP, 808 vme->rip + vie->num_processed) != 0) 809 goto fail; 810 } 811 812 err = emulate_mem(vcpu, vme->u.inst_emul.gpa, vie, 813 &vme->u.inst_emul.paging); 814 if (err) { 815 if (err == ESRCH) { 816 EPRINTLN("Unhandled memory access to 0x%lx\n", 817 vme->u.inst_emul.gpa); 818 } 819 goto fail; 820 } 821 822 return (VMEXIT_CONTINUE); 823 824 fail: 825 fprintf(stderr, "Failed to emulate instruction sequence [ "); 826 for (i = 0; i < vie->num_valid; i++) 827 fprintf(stderr, "%02x", vie->inst[i]); 828 FPRINTLN(stderr, " ] at 0x%lx", vme->rip); 829 return (VMEXIT_ABORT); 830 } 831 832 static int 833 vmexit_suspend(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) 834 { 835 struct vm_exit *vme; 836 enum vm_suspend_how how; 837 int vcpuid = vcpu_id(vcpu); 838 839 vme = vmrun->vm_exit; 840 841 how = vme->u.suspended.how; 842 843 fbsdrun_deletecpu(vcpuid); 844 845 switch (how) { 846 case VM_SUSPEND_RESET: 847 exit(0); 848 case VM_SUSPEND_POWEROFF: 849 if (get_config_bool_default("destroy_on_poweroff", false)) 850 vm_destroy(ctx); 851 exit(1); 852 case VM_SUSPEND_HALT: 853 exit(2); 854 case VM_SUSPEND_TRIPLEFAULT: 855 exit(3); 856 default: 857 fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how); 858 exit(100); 859 } 860 return (0); /* NOTREACHED */ 861 } 862 863 static int 864 vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu, 865 struct vm_run *vmrun __unused) 866 { 867 868 #ifdef BHYVE_SNAPSHOT 869 checkpoint_cpu_suspend(vcpu_id(vcpu)); 870 #endif 871 gdb_cpu_suspend(vcpu); 872 #ifdef BHYVE_SNAPSHOT 873 checkpoint_cpu_resume(vcpu_id(vcpu)); 874 #endif 875 /* 876 * XXX-MJ sleep for a short period to avoid chewing up the CPU in the 877 * window between activation of the vCPU thread and the STARTUP IPI. 878 */ 879 usleep(1000); 880 return (VMEXIT_CONTINUE); 881 } 882 883 static int 884 vmexit_breakpoint(struct vmctx *ctx __unused, struct vcpu *vcpu, 885 struct vm_run *vmrun) 886 { 887 gdb_cpu_breakpoint(vcpu, vmrun->vm_exit); 888 return (VMEXIT_CONTINUE); 889 } 890 891 static int 892 vmexit_ipi(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 893 struct vm_run *vmrun) 894 { 895 struct vm_exit *vme; 896 cpuset_t *dmask; 897 int error = -1; 898 int i; 899 900 dmask = vmrun->cpuset; 901 vme = vmrun->vm_exit; 902 903 switch (vme->u.ipi.mode) { 904 case APIC_DELMODE_INIT: 905 CPU_FOREACH_ISSET(i, dmask) { 906 error = vm_suspend_cpu(vcpu_info[i].vcpu); 907 if (error) { 908 warnx("%s: failed to suspend cpu %d\n", 909 __func__, i); 910 break; 911 } 912 } 913 break; 914 case APIC_DELMODE_STARTUP: 915 CPU_FOREACH_ISSET(i, dmask) { 916 spinup_ap(vcpu_info[i].vcpu, 917 vme->u.ipi.vector << PAGE_SHIFT); 918 } 919 error = 0; 920 break; 921 default: 922 break; 923 } 924 925 return (error); 926 } 927 928 int vmexit_task_switch(struct vmctx *, struct vcpu *, struct vm_run *); 929 930 static const vmexit_handler_t handler[VM_EXITCODE_MAX] = { 931 [VM_EXITCODE_INOUT] = vmexit_inout, 932 [VM_EXITCODE_INOUT_STR] = vmexit_inout, 933 [VM_EXITCODE_VMX] = vmexit_vmx, 934 [VM_EXITCODE_SVM] = vmexit_svm, 935 [VM_EXITCODE_BOGUS] = vmexit_bogus, 936 [VM_EXITCODE_REQIDLE] = vmexit_reqidle, 937 [VM_EXITCODE_RDMSR] = vmexit_rdmsr, 938 [VM_EXITCODE_WRMSR] = vmexit_wrmsr, 939 [VM_EXITCODE_MTRAP] = vmexit_mtrap, 940 [VM_EXITCODE_INST_EMUL] = vmexit_inst_emul, 941 [VM_EXITCODE_SUSPENDED] = vmexit_suspend, 942 [VM_EXITCODE_TASK_SWITCH] = vmexit_task_switch, 943 [VM_EXITCODE_DEBUG] = vmexit_debug, 944 [VM_EXITCODE_BPT] = vmexit_breakpoint, 945 [VM_EXITCODE_IPI] = vmexit_ipi, 946 [VM_EXITCODE_HLT] = vmexit_hlt, 947 [VM_EXITCODE_PAUSE] = vmexit_pause, 948 }; 949 950 static void 951 vm_loop(struct vmctx *ctx, struct vcpu *vcpu) 952 { 953 struct vm_exit vme; 954 struct vm_run vmrun; 955 int error, rc; 956 enum vm_exitcode exitcode; 957 cpuset_t active_cpus, dmask; 958 959 error = vm_active_cpus(ctx, &active_cpus); 960 assert(CPU_ISSET(vcpu_id(vcpu), &active_cpus)); 961 962 vmrun.vm_exit = &vme; 963 vmrun.cpuset = &dmask; 964 vmrun.cpusetsize = sizeof(dmask); 965 966 while (1) { 967 error = vm_run(vcpu, &vmrun); 968 if (error != 0) 969 break; 970 971 exitcode = vme.exitcode; 972 if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) { 973 fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n", 974 exitcode); 975 exit(4); 976 } 977 978 rc = (*handler[exitcode])(ctx, vcpu, &vmrun); 979 980 switch (rc) { 981 case VMEXIT_CONTINUE: 982 break; 983 case VMEXIT_ABORT: 984 abort(); 985 default: 986 exit(4); 987 } 988 } 989 fprintf(stderr, "vm_run error %d, errno %d\n", error, errno); 990 } 991 992 static int 993 num_vcpus_allowed(struct vmctx *ctx, struct vcpu *vcpu) 994 { 995 uint16_t sockets, cores, threads, maxcpus; 996 int tmp, error; 997 998 /* 999 * The guest is allowed to spinup more than one processor only if the 1000 * UNRESTRICTED_GUEST capability is available. 1001 */ 1002 error = vm_get_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, &tmp); 1003 if (error != 0) 1004 return (1); 1005 1006 error = vm_get_topology(ctx, &sockets, &cores, &threads, &maxcpus); 1007 if (error == 0) 1008 return (maxcpus); 1009 else 1010 return (1); 1011 } 1012 1013 static void 1014 fbsdrun_set_capabilities(struct vcpu *vcpu) 1015 { 1016 int err, tmp; 1017 1018 if (get_config_bool_default("x86.vmexit_on_hlt", false)) { 1019 err = vm_get_capability(vcpu, VM_CAP_HALT_EXIT, &tmp); 1020 if (err < 0) { 1021 fprintf(stderr, "VM exit on HLT not supported\n"); 1022 exit(4); 1023 } 1024 vm_set_capability(vcpu, VM_CAP_HALT_EXIT, 1); 1025 } 1026 1027 if (get_config_bool_default("x86.vmexit_on_pause", false)) { 1028 /* 1029 * pause exit support required for this mode 1030 */ 1031 err = vm_get_capability(vcpu, VM_CAP_PAUSE_EXIT, &tmp); 1032 if (err < 0) { 1033 fprintf(stderr, 1034 "SMP mux requested, no pause support\n"); 1035 exit(4); 1036 } 1037 vm_set_capability(vcpu, VM_CAP_PAUSE_EXIT, 1); 1038 } 1039 1040 if (get_config_bool_default("x86.x2apic", false)) 1041 err = vm_set_x2apic_state(vcpu, X2APIC_ENABLED); 1042 else 1043 err = vm_set_x2apic_state(vcpu, X2APIC_DISABLED); 1044 1045 if (err) { 1046 fprintf(stderr, "Unable to set x2apic state (%d)\n", err); 1047 exit(4); 1048 } 1049 1050 vm_set_capability(vcpu, VM_CAP_ENABLE_INVPCID, 1); 1051 1052 err = vm_set_capability(vcpu, VM_CAP_IPI_EXIT, 1); 1053 assert(err == 0); 1054 } 1055 1056 static struct vmctx * 1057 do_open(const char *vmname) 1058 { 1059 struct vmctx *ctx; 1060 int error; 1061 bool reinit, romboot; 1062 1063 reinit = romboot = false; 1064 1065 if (lpc_bootrom()) 1066 romboot = true; 1067 1068 error = vm_create(vmname); 1069 if (error) { 1070 if (errno == EEXIST) { 1071 if (romboot) { 1072 reinit = true; 1073 } else { 1074 /* 1075 * The virtual machine has been setup by the 1076 * userspace bootloader. 1077 */ 1078 } 1079 } else { 1080 perror("vm_create"); 1081 exit(4); 1082 } 1083 } else { 1084 if (!romboot) { 1085 /* 1086 * If the virtual machine was just created then a 1087 * bootrom must be configured to boot it. 1088 */ 1089 fprintf(stderr, "virtual machine cannot be booted\n"); 1090 exit(4); 1091 } 1092 } 1093 1094 ctx = vm_open(vmname); 1095 if (ctx == NULL) { 1096 perror("vm_open"); 1097 exit(4); 1098 } 1099 1100 #ifndef WITHOUT_CAPSICUM 1101 if (vm_limit_rights(ctx) != 0) 1102 err(EX_OSERR, "vm_limit_rights"); 1103 #endif 1104 1105 if (reinit) { 1106 error = vm_reinit(ctx); 1107 if (error) { 1108 perror("vm_reinit"); 1109 exit(4); 1110 } 1111 } 1112 error = vm_set_topology(ctx, cpu_sockets, cpu_cores, cpu_threads, 0); 1113 if (error) 1114 errx(EX_OSERR, "vm_set_topology"); 1115 return (ctx); 1116 } 1117 1118 static void 1119 spinup_vcpu(struct vcpu_info *vi, bool bsp) 1120 { 1121 int error; 1122 1123 if (!bsp) { 1124 fbsdrun_set_capabilities(vi->vcpu); 1125 1126 /* 1127 * Enable the 'unrestricted guest' mode for APs. 1128 * 1129 * APs startup in power-on 16-bit mode. 1130 */ 1131 error = vm_set_capability(vi->vcpu, VM_CAP_UNRESTRICTED_GUEST, 1); 1132 assert(error == 0); 1133 } 1134 1135 fbsdrun_addcpu(vi); 1136 } 1137 1138 static bool 1139 parse_config_option(const char *option) 1140 { 1141 const char *value; 1142 char *path; 1143 1144 value = strchr(option, '='); 1145 if (value == NULL || value[1] == '\0') 1146 return (false); 1147 path = strndup(option, value - option); 1148 if (path == NULL) 1149 err(4, "Failed to allocate memory"); 1150 set_config_value(path, value + 1); 1151 return (true); 1152 } 1153 1154 static void 1155 parse_simple_config_file(const char *path) 1156 { 1157 FILE *fp; 1158 char *line, *cp; 1159 size_t linecap; 1160 unsigned int lineno; 1161 1162 fp = fopen(path, "r"); 1163 if (fp == NULL) 1164 err(4, "Failed to open configuration file %s", path); 1165 line = NULL; 1166 linecap = 0; 1167 lineno = 1; 1168 for (lineno = 1; getline(&line, &linecap, fp) > 0; lineno++) { 1169 if (*line == '#' || *line == '\n') 1170 continue; 1171 cp = strchr(line, '\n'); 1172 if (cp != NULL) 1173 *cp = '\0'; 1174 if (!parse_config_option(line)) 1175 errx(4, "%s line %u: invalid config option '%s'", path, 1176 lineno, line); 1177 } 1178 free(line); 1179 fclose(fp); 1180 } 1181 1182 static void 1183 parse_gdb_options(const char *opt) 1184 { 1185 const char *sport; 1186 char *colon; 1187 1188 if (opt[0] == 'w') { 1189 set_config_bool("gdb.wait", true); 1190 opt++; 1191 } 1192 1193 colon = strrchr(opt, ':'); 1194 if (colon == NULL) { 1195 sport = opt; 1196 } else { 1197 *colon = '\0'; 1198 colon++; 1199 sport = colon; 1200 set_config_value("gdb.address", opt); 1201 } 1202 1203 set_config_value("gdb.port", sport); 1204 } 1205 1206 static void 1207 set_defaults(void) 1208 { 1209 1210 set_config_bool("acpi_tables", true); 1211 set_config_bool("acpi_tables_in_memory", true); 1212 set_config_value("memory.size", "256M"); 1213 set_config_bool("x86.strictmsr", true); 1214 set_config_value("lpc.fwcfg", "bhyve"); 1215 } 1216 1217 int 1218 main(int argc, char *argv[]) 1219 { 1220 int c, error; 1221 int max_vcpus, memflags; 1222 struct vcpu *bsp; 1223 struct vmctx *ctx; 1224 struct qemu_fwcfg_item *e820_fwcfg_item; 1225 size_t memsize; 1226 const char *optstr, *value, *vmname; 1227 #ifdef BHYVE_SNAPSHOT 1228 char *restore_file; 1229 struct restore_state rstate; 1230 1231 restore_file = NULL; 1232 #endif 1233 1234 init_config(); 1235 set_defaults(); 1236 progname = basename(argv[0]); 1237 1238 #ifdef BHYVE_SNAPSHOT 1239 optstr = "aehuwxACDHIPSWYk:f:o:p:G:c:s:m:l:K:U:r:"; 1240 #else 1241 optstr = "aehuwxACDHIPSWYk:f:o:p:G:c:s:m:l:K:U:"; 1242 #endif 1243 while ((c = getopt(argc, argv, optstr)) != -1) { 1244 switch (c) { 1245 case 'a': 1246 set_config_bool("x86.x2apic", false); 1247 break; 1248 case 'A': 1249 /* 1250 * NOP. For backward compatibility. Most systems don't 1251 * work properly without sane ACPI tables. Therefore, 1252 * we're always generating them. 1253 */ 1254 break; 1255 case 'D': 1256 set_config_bool("destroy_on_poweroff", true); 1257 break; 1258 case 'p': 1259 if (pincpu_parse(optarg) != 0) { 1260 errx(EX_USAGE, "invalid vcpu pinning " 1261 "configuration '%s'", optarg); 1262 } 1263 break; 1264 case 'c': 1265 if (topology_parse(optarg) != 0) { 1266 errx(EX_USAGE, "invalid cpu topology " 1267 "'%s'", optarg); 1268 } 1269 break; 1270 case 'C': 1271 set_config_bool("memory.guest_in_core", true); 1272 break; 1273 case 'f': 1274 if (qemu_fwcfg_parse_cmdline_arg(optarg) != 0) { 1275 errx(EX_USAGE, "invalid fwcfg item '%s'", optarg); 1276 } 1277 break; 1278 case 'G': 1279 parse_gdb_options(optarg); 1280 break; 1281 case 'k': 1282 parse_simple_config_file(optarg); 1283 break; 1284 case 'K': 1285 set_config_value("keyboard.layout", optarg); 1286 break; 1287 case 'l': 1288 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1289 lpc_print_supported_devices(); 1290 exit(0); 1291 } else if (lpc_device_parse(optarg) != 0) { 1292 errx(EX_USAGE, "invalid lpc device " 1293 "configuration '%s'", optarg); 1294 } 1295 break; 1296 #ifdef BHYVE_SNAPSHOT 1297 case 'r': 1298 restore_file = optarg; 1299 break; 1300 #endif 1301 case 's': 1302 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1303 pci_print_supported_devices(); 1304 exit(0); 1305 } else if (pci_parse_slot(optarg) != 0) 1306 exit(4); 1307 else 1308 break; 1309 case 'S': 1310 set_config_bool("memory.wired", true); 1311 break; 1312 case 'm': 1313 set_config_value("memory.size", optarg); 1314 break; 1315 case 'o': 1316 if (!parse_config_option(optarg)) 1317 errx(EX_USAGE, "invalid configuration option '%s'", optarg); 1318 break; 1319 case 'H': 1320 set_config_bool("x86.vmexit_on_hlt", true); 1321 break; 1322 case 'I': 1323 /* 1324 * The "-I" option was used to add an ioapic to the 1325 * virtual machine. 1326 * 1327 * An ioapic is now provided unconditionally for each 1328 * virtual machine and this option is now deprecated. 1329 */ 1330 break; 1331 case 'P': 1332 set_config_bool("x86.vmexit_on_pause", true); 1333 break; 1334 case 'e': 1335 set_config_bool("x86.strictio", true); 1336 break; 1337 case 'u': 1338 set_config_bool("rtc.use_localtime", false); 1339 break; 1340 case 'U': 1341 set_config_value("uuid", optarg); 1342 break; 1343 case 'w': 1344 set_config_bool("x86.strictmsr", false); 1345 break; 1346 case 'W': 1347 set_config_bool("virtio_msix", false); 1348 break; 1349 case 'x': 1350 set_config_bool("x86.x2apic", true); 1351 break; 1352 case 'Y': 1353 set_config_bool("x86.mptable", false); 1354 break; 1355 case 'h': 1356 usage(0); 1357 default: 1358 usage(1); 1359 } 1360 } 1361 argc -= optind; 1362 argv += optind; 1363 1364 if (argc > 1) 1365 usage(1); 1366 1367 #ifdef BHYVE_SNAPSHOT 1368 if (restore_file != NULL) { 1369 error = load_restore_file(restore_file, &rstate); 1370 if (error) { 1371 fprintf(stderr, "Failed to read checkpoint info from " 1372 "file: '%s'.\n", restore_file); 1373 exit(1); 1374 } 1375 vmname = lookup_vmname(&rstate); 1376 if (vmname != NULL) 1377 set_config_value("name", vmname); 1378 } 1379 #endif 1380 1381 if (argc == 1) 1382 set_config_value("name", argv[0]); 1383 1384 vmname = get_config_value("name"); 1385 if (vmname == NULL) 1386 usage(1); 1387 1388 if (get_config_bool_default("config.dump", false)) { 1389 dump_config(); 1390 exit(1); 1391 } 1392 1393 calc_topology(); 1394 build_vcpumaps(); 1395 1396 value = get_config_value("memory.size"); 1397 error = vm_parse_memsize(value, &memsize); 1398 if (error) 1399 errx(EX_USAGE, "invalid memsize '%s'", value); 1400 1401 ctx = do_open(vmname); 1402 1403 #ifdef BHYVE_SNAPSHOT 1404 if (restore_file != NULL) { 1405 guest_ncpus = lookup_guest_ncpus(&rstate); 1406 memflags = lookup_memflags(&rstate); 1407 memsize = lookup_memsize(&rstate); 1408 } 1409 1410 if (guest_ncpus < 1) { 1411 fprintf(stderr, "Invalid guest vCPUs (%d)\n", guest_ncpus); 1412 exit(1); 1413 } 1414 #endif 1415 1416 bsp = vm_vcpu_open(ctx, BSP); 1417 max_vcpus = num_vcpus_allowed(ctx, bsp); 1418 if (guest_ncpus > max_vcpus) { 1419 fprintf(stderr, "%d vCPUs requested but only %d available\n", 1420 guest_ncpus, max_vcpus); 1421 exit(4); 1422 } 1423 1424 fbsdrun_set_capabilities(bsp); 1425 1426 /* Allocate per-VCPU resources. */ 1427 vcpu_info = calloc(guest_ncpus, sizeof(*vcpu_info)); 1428 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) { 1429 vcpu_info[vcpuid].ctx = ctx; 1430 vcpu_info[vcpuid].vcpuid = vcpuid; 1431 if (vcpuid == BSP) 1432 vcpu_info[vcpuid].vcpu = bsp; 1433 else 1434 vcpu_info[vcpuid].vcpu = vm_vcpu_open(ctx, vcpuid); 1435 } 1436 1437 memflags = 0; 1438 if (get_config_bool_default("memory.wired", false)) 1439 memflags |= VM_MEM_F_WIRED; 1440 if (get_config_bool_default("memory.guest_in_core", false)) 1441 memflags |= VM_MEM_F_INCORE; 1442 vm_set_memflags(ctx, memflags); 1443 error = vm_setup_memory(ctx, memsize, VM_MMAP_ALL); 1444 if (error) { 1445 fprintf(stderr, "Unable to setup memory (%d)\n", errno); 1446 exit(4); 1447 } 1448 1449 error = init_msr(); 1450 if (error) { 1451 fprintf(stderr, "init_msr error %d", error); 1452 exit(4); 1453 } 1454 1455 init_mem(guest_ncpus); 1456 init_inout(); 1457 kernemu_dev_init(); 1458 init_bootrom(ctx); 1459 #ifdef __amd64__ 1460 atkbdc_init(ctx); 1461 #endif 1462 pci_irq_init(ctx); 1463 ioapic_init(ctx); 1464 1465 rtc_init(ctx); 1466 sci_init(ctx); 1467 1468 if (qemu_fwcfg_init(ctx) != 0) { 1469 fprintf(stderr, "qemu fwcfg initialization error"); 1470 exit(4); 1471 } 1472 1473 if (qemu_fwcfg_add_file("opt/bhyve/hw.ncpu", sizeof(guest_ncpus), 1474 &guest_ncpus) != 0) { 1475 fprintf(stderr, "Could not add qemu fwcfg opt/bhyve/hw.ncpu"); 1476 exit(4); 1477 } 1478 1479 if (e820_init(ctx) != 0) { 1480 fprintf(stderr, "Unable to setup E820"); 1481 exit(4); 1482 } 1483 1484 /* 1485 * Exit if a device emulation finds an error in its initialization 1486 */ 1487 if (init_pci(ctx) != 0) { 1488 perror("device emulation initialization error"); 1489 exit(4); 1490 } 1491 if (init_tpm(ctx) != 0) { 1492 fprintf(stderr, "Failed to init TPM device"); 1493 exit(4); 1494 } 1495 1496 /* 1497 * Initialize after PCI, to allow a bootrom file to reserve the high 1498 * region. 1499 */ 1500 if (get_config_bool("acpi_tables")) 1501 vmgenc_init(ctx); 1502 1503 init_gdb(ctx); 1504 1505 if (lpc_bootrom()) { 1506 if (vm_set_capability(bsp, VM_CAP_UNRESTRICTED_GUEST, 1)) { 1507 fprintf(stderr, "ROM boot failed: unrestricted guest " 1508 "capability not available\n"); 1509 exit(4); 1510 } 1511 error = vcpu_reset(bsp); 1512 assert(error == 0); 1513 } 1514 1515 /* 1516 * Add all vCPUs. 1517 */ 1518 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) 1519 spinup_vcpu(&vcpu_info[vcpuid], vcpuid == BSP); 1520 1521 #ifdef BHYVE_SNAPSHOT 1522 if (restore_file != NULL) { 1523 fprintf(stdout, "Pausing pci devs...\r\n"); 1524 if (vm_pause_devices() != 0) { 1525 fprintf(stderr, "Failed to pause PCI device state.\n"); 1526 exit(1); 1527 } 1528 1529 fprintf(stdout, "Restoring vm mem...\r\n"); 1530 if (restore_vm_mem(ctx, &rstate) != 0) { 1531 fprintf(stderr, "Failed to restore VM memory.\n"); 1532 exit(1); 1533 } 1534 1535 fprintf(stdout, "Restoring pci devs...\r\n"); 1536 if (vm_restore_devices(&rstate) != 0) { 1537 fprintf(stderr, "Failed to restore PCI device state.\n"); 1538 exit(1); 1539 } 1540 1541 fprintf(stdout, "Restoring kernel structs...\r\n"); 1542 if (vm_restore_kern_structs(ctx, &rstate) != 0) { 1543 fprintf(stderr, "Failed to restore kernel structs.\n"); 1544 exit(1); 1545 } 1546 1547 fprintf(stdout, "Resuming pci devs...\r\n"); 1548 if (vm_resume_devices() != 0) { 1549 fprintf(stderr, "Failed to resume PCI device state.\n"); 1550 exit(1); 1551 } 1552 } 1553 #endif 1554 1555 /* 1556 * build the guest tables, MP etc. 1557 */ 1558 if (get_config_bool_default("x86.mptable", true)) { 1559 error = mptable_build(ctx, guest_ncpus); 1560 if (error) { 1561 perror("error to build the guest tables"); 1562 exit(4); 1563 } 1564 } 1565 1566 error = smbios_build(ctx); 1567 if (error != 0) 1568 exit(4); 1569 1570 if (get_config_bool("acpi_tables")) { 1571 error = acpi_build(ctx, guest_ncpus); 1572 assert(error == 0); 1573 } 1574 1575 e820_fwcfg_item = e820_get_fwcfg_item(); 1576 if (e820_fwcfg_item == NULL) { 1577 fprintf(stderr, "invalid e820 table"); 1578 exit(4); 1579 } 1580 if (qemu_fwcfg_add_file("etc/e820", e820_fwcfg_item->size, 1581 e820_fwcfg_item->data) != 0) { 1582 fprintf(stderr, "could not add qemu fwcfg etc/e820"); 1583 exit(4); 1584 } 1585 free(e820_fwcfg_item); 1586 1587 #ifdef __amd64__ 1588 if (lpc_bootrom() && strcmp(lpc_fwcfg(), "bhyve") == 0) { 1589 fwctl_init(); 1590 } 1591 #endif 1592 1593 /* 1594 * Change the proc title to include the VM name. 1595 */ 1596 setproctitle("%s", vmname); 1597 1598 #ifdef BHYVE_SNAPSHOT 1599 /* initialize mutex/cond variables */ 1600 init_snapshot(); 1601 1602 /* 1603 * checkpointing thread for communication with bhyvectl 1604 */ 1605 if (init_checkpoint_thread(ctx) != 0) 1606 errx(EX_OSERR, "Failed to start checkpoint thread"); 1607 #endif 1608 1609 #ifndef WITHOUT_CAPSICUM 1610 caph_cache_catpages(); 1611 1612 if (caph_limit_stdout() == -1 || caph_limit_stderr() == -1) 1613 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1614 1615 if (caph_enter() == -1) 1616 errx(EX_OSERR, "cap_enter() failed"); 1617 #endif 1618 1619 #ifdef BHYVE_SNAPSHOT 1620 if (restore_file != NULL) { 1621 destroy_restore_state(&rstate); 1622 if (vm_restore_time(ctx) < 0) 1623 err(EX_OSERR, "Unable to restore time"); 1624 1625 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) 1626 vm_resume_cpu(vcpu_info[vcpuid].vcpu); 1627 } else 1628 #endif 1629 vm_resume_cpu(bsp); 1630 1631 /* 1632 * Head off to the main event dispatch loop 1633 */ 1634 mevent_dispatch(); 1635 1636 exit(4); 1637 } 1638