1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/types.h> 35 #ifndef WITHOUT_CAPSICUM 36 #include <sys/capsicum.h> 37 #endif 38 #include <sys/mman.h> 39 #ifdef BHYVE_SNAPSHOT 40 #include <sys/socket.h> 41 #include <sys/stat.h> 42 #endif 43 #include <sys/time.h> 44 #ifdef BHYVE_SNAPSHOT 45 #include <sys/un.h> 46 #endif 47 48 #include <amd64/vmm/intel/vmcs.h> 49 #include <x86/apicreg.h> 50 51 #include <machine/atomic.h> 52 #include <machine/segments.h> 53 54 #ifndef WITHOUT_CAPSICUM 55 #include <capsicum_helpers.h> 56 #endif 57 #include <stdio.h> 58 #include <stdlib.h> 59 #include <string.h> 60 #include <err.h> 61 #include <errno.h> 62 #ifdef BHYVE_SNAPSHOT 63 #include <fcntl.h> 64 #endif 65 #include <libgen.h> 66 #include <unistd.h> 67 #include <assert.h> 68 #include <pthread.h> 69 #include <pthread_np.h> 70 #include <sysexits.h> 71 #include <stdbool.h> 72 #include <stdint.h> 73 #ifdef BHYVE_SNAPSHOT 74 #include <ucl.h> 75 #include <unistd.h> 76 77 #include <libxo/xo.h> 78 #endif 79 80 #include <machine/vmm.h> 81 #ifndef WITHOUT_CAPSICUM 82 #include <machine/vmm_dev.h> 83 #endif 84 #include <machine/vmm_instruction_emul.h> 85 #include <vmmapi.h> 86 87 #include "bhyverun.h" 88 #include "acpi.h" 89 #include "atkbdc.h" 90 #include "bootrom.h" 91 #include "config.h" 92 #include "inout.h" 93 #include "debug.h" 94 #include "e820.h" 95 #include "fwctl.h" 96 #include "gdb.h" 97 #include "ioapic.h" 98 #include "kernemu_dev.h" 99 #include "mem.h" 100 #include "mevent.h" 101 #include "mptbl.h" 102 #include "pci_emul.h" 103 #include "pci_irq.h" 104 #include "pci_lpc.h" 105 #include "qemu_fwcfg.h" 106 #include "smbiostbl.h" 107 #ifdef BHYVE_SNAPSHOT 108 #include "snapshot.h" 109 #endif 110 #include "xmsr.h" 111 #include "spinup_ap.h" 112 #include "rtc.h" 113 #include "vmgenc.h" 114 115 #define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */ 116 117 #define MB (1024UL * 1024) 118 #define GB (1024UL * MB) 119 120 static const char * const vmx_exit_reason_desc[] = { 121 [EXIT_REASON_EXCEPTION] = "Exception or non-maskable interrupt (NMI)", 122 [EXIT_REASON_EXT_INTR] = "External interrupt", 123 [EXIT_REASON_TRIPLE_FAULT] = "Triple fault", 124 [EXIT_REASON_INIT] = "INIT signal", 125 [EXIT_REASON_SIPI] = "Start-up IPI (SIPI)", 126 [EXIT_REASON_IO_SMI] = "I/O system-management interrupt (SMI)", 127 [EXIT_REASON_SMI] = "Other SMI", 128 [EXIT_REASON_INTR_WINDOW] = "Interrupt window", 129 [EXIT_REASON_NMI_WINDOW] = "NMI window", 130 [EXIT_REASON_TASK_SWITCH] = "Task switch", 131 [EXIT_REASON_CPUID] = "CPUID", 132 [EXIT_REASON_GETSEC] = "GETSEC", 133 [EXIT_REASON_HLT] = "HLT", 134 [EXIT_REASON_INVD] = "INVD", 135 [EXIT_REASON_INVLPG] = "INVLPG", 136 [EXIT_REASON_RDPMC] = "RDPMC", 137 [EXIT_REASON_RDTSC] = "RDTSC", 138 [EXIT_REASON_RSM] = "RSM", 139 [EXIT_REASON_VMCALL] = "VMCALL", 140 [EXIT_REASON_VMCLEAR] = "VMCLEAR", 141 [EXIT_REASON_VMLAUNCH] = "VMLAUNCH", 142 [EXIT_REASON_VMPTRLD] = "VMPTRLD", 143 [EXIT_REASON_VMPTRST] = "VMPTRST", 144 [EXIT_REASON_VMREAD] = "VMREAD", 145 [EXIT_REASON_VMRESUME] = "VMRESUME", 146 [EXIT_REASON_VMWRITE] = "VMWRITE", 147 [EXIT_REASON_VMXOFF] = "VMXOFF", 148 [EXIT_REASON_VMXON] = "VMXON", 149 [EXIT_REASON_CR_ACCESS] = "Control-register accesses", 150 [EXIT_REASON_DR_ACCESS] = "MOV DR", 151 [EXIT_REASON_INOUT] = "I/O instruction", 152 [EXIT_REASON_RDMSR] = "RDMSR", 153 [EXIT_REASON_WRMSR] = "WRMSR", 154 [EXIT_REASON_INVAL_VMCS] = 155 "VM-entry failure due to invalid guest state", 156 [EXIT_REASON_INVAL_MSR] = "VM-entry failure due to MSR loading", 157 [EXIT_REASON_MWAIT] = "MWAIT", 158 [EXIT_REASON_MTF] = "Monitor trap flag", 159 [EXIT_REASON_MONITOR] = "MONITOR", 160 [EXIT_REASON_PAUSE] = "PAUSE", 161 [EXIT_REASON_MCE_DURING_ENTRY] = 162 "VM-entry failure due to machine-check event", 163 [EXIT_REASON_TPR] = "TPR below threshold", 164 [EXIT_REASON_APIC_ACCESS] = "APIC access", 165 [EXIT_REASON_VIRTUALIZED_EOI] = "Virtualized EOI", 166 [EXIT_REASON_GDTR_IDTR] = "Access to GDTR or IDTR", 167 [EXIT_REASON_LDTR_TR] = "Access to LDTR or TR", 168 [EXIT_REASON_EPT_FAULT] = "EPT violation", 169 [EXIT_REASON_EPT_MISCONFIG] = "EPT misconfiguration", 170 [EXIT_REASON_INVEPT] = "INVEPT", 171 [EXIT_REASON_RDTSCP] = "RDTSCP", 172 [EXIT_REASON_VMX_PREEMPT] = "VMX-preemption timer expired", 173 [EXIT_REASON_INVVPID] = "INVVPID", 174 [EXIT_REASON_WBINVD] = "WBINVD", 175 [EXIT_REASON_XSETBV] = "XSETBV", 176 [EXIT_REASON_APIC_WRITE] = "APIC write", 177 [EXIT_REASON_RDRAND] = "RDRAND", 178 [EXIT_REASON_INVPCID] = "INVPCID", 179 [EXIT_REASON_VMFUNC] = "VMFUNC", 180 [EXIT_REASON_ENCLS] = "ENCLS", 181 [EXIT_REASON_RDSEED] = "RDSEED", 182 [EXIT_REASON_PM_LOG_FULL] = "Page-modification log full", 183 [EXIT_REASON_XSAVES] = "XSAVES", 184 [EXIT_REASON_XRSTORS] = "XRSTORS" 185 }; 186 187 typedef int (*vmexit_handler_t)(struct vmctx *, struct vcpu *, struct vm_run *); 188 189 int guest_ncpus; 190 uint16_t cpu_cores, cpu_sockets, cpu_threads; 191 192 int raw_stdio = 0; 193 194 static char *progname; 195 static const int BSP = 0; 196 197 static cpuset_t cpumask; 198 199 static void vm_loop(struct vmctx *ctx, struct vcpu *vcpu); 200 201 static struct vcpu_info { 202 struct vmctx *ctx; 203 struct vcpu *vcpu; 204 int vcpuid; 205 } *vcpu_info; 206 207 static cpuset_t **vcpumap; 208 209 static void 210 usage(int code) 211 { 212 213 fprintf(stderr, 214 "Usage: %s [-AaCDeHhPSuWwxY]\n" 215 " %*s [-c [[cpus=]numcpus][,sockets=n][,cores=n][,threads=n]]\n" 216 " %*s [-G port] [-k config_file] [-l lpc] [-m mem] [-o var=value]\n" 217 " %*s [-p vcpu:hostcpu] [-r file] [-s pci] [-U uuid] vmname\n" 218 " -A: create ACPI tables\n" 219 " -a: local apic is in xAPIC mode (deprecated)\n" 220 " -C: include guest memory in core file\n" 221 " -c: number of CPUs and/or topology specification\n" 222 " -D: destroy on power-off\n" 223 " -e: exit on unhandled I/O access\n" 224 " -G: start a debug server\n" 225 " -H: vmexit from the guest on HLT\n" 226 " -h: help\n" 227 " -k: key=value flat config file\n" 228 " -K: PS2 keyboard layout\n" 229 " -l: LPC device configuration\n" 230 " -m: memory size\n" 231 " -o: set config 'var' to 'value'\n" 232 " -P: vmexit from the guest on pause\n" 233 " -p: pin 'vcpu' to 'hostcpu'\n" 234 #ifdef BHYVE_SNAPSHOT 235 " -r: path to checkpoint file\n" 236 #endif 237 " -S: guest memory cannot be swapped\n" 238 " -s: <slot,driver,configinfo> PCI slot config\n" 239 " -U: UUID\n" 240 " -u: RTC keeps UTC time\n" 241 " -W: force virtio to use single-vector MSI\n" 242 " -w: ignore unimplemented MSRs\n" 243 " -x: local APIC is in x2APIC mode\n" 244 " -Y: disable MPtable generation\n", 245 progname, (int)strlen(progname), "", (int)strlen(progname), "", 246 (int)strlen(progname), ""); 247 248 exit(code); 249 } 250 251 /* 252 * XXX This parser is known to have the following issues: 253 * 1. It accepts null key=value tokens ",," as setting "cpus" to an 254 * empty string. 255 * 256 * The acceptance of a null specification ('-c ""') is by design to match the 257 * manual page syntax specification, this results in a topology of 1 vCPU. 258 */ 259 static int 260 topology_parse(const char *opt) 261 { 262 char *cp, *str, *tofree; 263 264 if (*opt == '\0') { 265 set_config_value("sockets", "1"); 266 set_config_value("cores", "1"); 267 set_config_value("threads", "1"); 268 set_config_value("cpus", "1"); 269 return (0); 270 } 271 272 tofree = str = strdup(opt); 273 if (str == NULL) 274 errx(4, "Failed to allocate memory"); 275 276 while ((cp = strsep(&str, ",")) != NULL) { 277 if (strncmp(cp, "cpus=", strlen("cpus=")) == 0) 278 set_config_value("cpus", cp + strlen("cpus=")); 279 else if (strncmp(cp, "sockets=", strlen("sockets=")) == 0) 280 set_config_value("sockets", cp + strlen("sockets=")); 281 else if (strncmp(cp, "cores=", strlen("cores=")) == 0) 282 set_config_value("cores", cp + strlen("cores=")); 283 else if (strncmp(cp, "threads=", strlen("threads=")) == 0) 284 set_config_value("threads", cp + strlen("threads=")); 285 #ifdef notyet /* Do not expose this until vmm.ko implements it */ 286 else if (strncmp(cp, "maxcpus=", strlen("maxcpus=")) == 0) 287 set_config_value("maxcpus", cp + strlen("maxcpus=")); 288 #endif 289 else if (strchr(cp, '=') != NULL) 290 goto out; 291 else 292 set_config_value("cpus", cp); 293 } 294 free(tofree); 295 return (0); 296 297 out: 298 free(tofree); 299 return (-1); 300 } 301 302 static int 303 parse_int_value(const char *key, const char *value, int minval, int maxval) 304 { 305 char *cp; 306 long lval; 307 308 errno = 0; 309 lval = strtol(value, &cp, 0); 310 if (errno != 0 || *cp != '\0' || cp == value || lval < minval || 311 lval > maxval) 312 errx(4, "Invalid value for %s: '%s'", key, value); 313 return (lval); 314 } 315 316 /* 317 * Set the sockets, cores, threads, and guest_cpus variables based on 318 * the configured topology. 319 * 320 * The limits of UINT16_MAX are due to the types passed to 321 * vm_set_topology(). vmm.ko may enforce tighter limits. 322 */ 323 static void 324 calc_topology(void) 325 { 326 const char *value; 327 bool explicit_cpus; 328 uint64_t ncpus; 329 330 value = get_config_value("cpus"); 331 if (value != NULL) { 332 guest_ncpus = parse_int_value("cpus", value, 1, UINT16_MAX); 333 explicit_cpus = true; 334 } else { 335 guest_ncpus = 1; 336 explicit_cpus = false; 337 } 338 value = get_config_value("cores"); 339 if (value != NULL) 340 cpu_cores = parse_int_value("cores", value, 1, UINT16_MAX); 341 else 342 cpu_cores = 1; 343 value = get_config_value("threads"); 344 if (value != NULL) 345 cpu_threads = parse_int_value("threads", value, 1, UINT16_MAX); 346 else 347 cpu_threads = 1; 348 value = get_config_value("sockets"); 349 if (value != NULL) 350 cpu_sockets = parse_int_value("sockets", value, 1, UINT16_MAX); 351 else 352 cpu_sockets = guest_ncpus; 353 354 /* 355 * Compute sockets * cores * threads avoiding overflow. The 356 * range check above insures these are 16 bit values. 357 */ 358 ncpus = (uint64_t)cpu_sockets * cpu_cores * cpu_threads; 359 if (ncpus > UINT16_MAX) 360 errx(4, "Computed number of vCPUs too high: %ju", 361 (uintmax_t)ncpus); 362 363 if (explicit_cpus) { 364 if (guest_ncpus != (int)ncpus) 365 errx(4, "Topology (%d sockets, %d cores, %d threads) " 366 "does not match %d vCPUs", 367 cpu_sockets, cpu_cores, cpu_threads, 368 guest_ncpus); 369 } else 370 guest_ncpus = ncpus; 371 } 372 373 static int 374 pincpu_parse(const char *opt) 375 { 376 const char *value; 377 char *newval; 378 char key[16]; 379 int vcpu, pcpu; 380 381 if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) { 382 fprintf(stderr, "invalid format: %s\n", opt); 383 return (-1); 384 } 385 386 if (vcpu < 0) { 387 fprintf(stderr, "invalid vcpu '%d'\n", vcpu); 388 return (-1); 389 } 390 391 if (pcpu < 0 || pcpu >= CPU_SETSIZE) { 392 fprintf(stderr, "hostcpu '%d' outside valid range from " 393 "0 to %d\n", pcpu, CPU_SETSIZE - 1); 394 return (-1); 395 } 396 397 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 398 value = get_config_value(key); 399 400 if (asprintf(&newval, "%s%s%d", value != NULL ? value : "", 401 value != NULL ? "," : "", pcpu) == -1) { 402 perror("failed to build new cpuset string"); 403 return (-1); 404 } 405 406 set_config_value(key, newval); 407 free(newval); 408 return (0); 409 } 410 411 static void 412 parse_cpuset(int vcpu, const char *list, cpuset_t *set) 413 { 414 char *cp, *token; 415 int pcpu, start; 416 417 CPU_ZERO(set); 418 start = -1; 419 token = __DECONST(char *, list); 420 for (;;) { 421 pcpu = strtoul(token, &cp, 0); 422 if (cp == token) 423 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 424 if (pcpu < 0 || pcpu >= CPU_SETSIZE) 425 errx(4, "hostcpu '%d' outside valid range from 0 to %d", 426 pcpu, CPU_SETSIZE - 1); 427 switch (*cp) { 428 case ',': 429 case '\0': 430 if (start >= 0) { 431 if (start > pcpu) 432 errx(4, "Invalid hostcpu range %d-%d", 433 start, pcpu); 434 while (start < pcpu) { 435 CPU_SET(start, set); 436 start++; 437 } 438 start = -1; 439 } 440 CPU_SET(pcpu, set); 441 break; 442 case '-': 443 if (start >= 0) 444 errx(4, "invalid cpuset for vcpu %d: '%s'", 445 vcpu, list); 446 start = pcpu; 447 break; 448 default: 449 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 450 } 451 if (*cp == '\0') 452 break; 453 token = cp + 1; 454 } 455 } 456 457 static void 458 build_vcpumaps(void) 459 { 460 char key[16]; 461 const char *value; 462 int vcpu; 463 464 vcpumap = calloc(guest_ncpus, sizeof(*vcpumap)); 465 for (vcpu = 0; vcpu < guest_ncpus; vcpu++) { 466 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 467 value = get_config_value(key); 468 if (value == NULL) 469 continue; 470 vcpumap[vcpu] = malloc(sizeof(cpuset_t)); 471 if (vcpumap[vcpu] == NULL) 472 err(4, "Failed to allocate cpuset for vcpu %d", vcpu); 473 parse_cpuset(vcpu, value, vcpumap[vcpu]); 474 } 475 } 476 477 void 478 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, 479 int errcode) 480 { 481 int error, restart_instruction; 482 483 restart_instruction = 1; 484 485 error = vm_inject_exception(vcpu, vector, errcode_valid, errcode, 486 restart_instruction); 487 assert(error == 0); 488 } 489 490 void * 491 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) 492 { 493 494 return (vm_map_gpa(ctx, gaddr, len)); 495 } 496 497 #ifdef BHYVE_SNAPSHOT 498 uintptr_t 499 paddr_host2guest(struct vmctx *ctx, void *addr) 500 { 501 return (vm_rev_map_gpa(ctx, addr)); 502 } 503 #endif 504 505 int 506 fbsdrun_virtio_msix(void) 507 { 508 509 return (get_config_bool_default("virtio_msix", true)); 510 } 511 512 static void * 513 fbsdrun_start_thread(void *param) 514 { 515 char tname[MAXCOMLEN + 1]; 516 struct vcpu_info *vi = param; 517 int error; 518 519 snprintf(tname, sizeof(tname), "vcpu %d", vi->vcpuid); 520 pthread_set_name_np(pthread_self(), tname); 521 522 if (vcpumap[vi->vcpuid] != NULL) { 523 error = pthread_setaffinity_np(pthread_self(), 524 sizeof(cpuset_t), vcpumap[vi->vcpuid]); 525 assert(error == 0); 526 } 527 528 #ifdef BHYVE_SNAPSHOT 529 checkpoint_cpu_add(vi->vcpuid); 530 #endif 531 gdb_cpu_add(vi->vcpu); 532 533 vm_loop(vi->ctx, vi->vcpu); 534 535 /* not reached */ 536 exit(1); 537 return (NULL); 538 } 539 540 static void 541 fbsdrun_addcpu(struct vcpu_info *vi) 542 { 543 pthread_t thr; 544 int error; 545 546 error = vm_activate_cpu(vi->vcpu); 547 if (error != 0) 548 err(EX_OSERR, "could not activate CPU %d", vi->vcpuid); 549 550 CPU_SET_ATOMIC(vi->vcpuid, &cpumask); 551 552 vm_suspend_cpu(vi->vcpu); 553 554 error = pthread_create(&thr, NULL, fbsdrun_start_thread, vi); 555 assert(error == 0); 556 } 557 558 static int 559 fbsdrun_deletecpu(int vcpu) 560 { 561 562 if (!CPU_ISSET(vcpu, &cpumask)) { 563 fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu); 564 exit(4); 565 } 566 567 CPU_CLR_ATOMIC(vcpu, &cpumask); 568 return (CPU_EMPTY(&cpumask)); 569 } 570 571 static int 572 vmexit_handle_notify(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 573 struct vm_exit *vme __unused, uint32_t eax __unused) 574 { 575 #if BHYVE_DEBUG 576 /* 577 * put guest-driven debug here 578 */ 579 #endif 580 return (VMEXIT_CONTINUE); 581 } 582 583 static int 584 vmexit_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) 585 { 586 struct vm_exit *vme; 587 int error; 588 int bytes, port, in, out; 589 590 vme = vmrun->vm_exit; 591 port = vme->u.inout.port; 592 bytes = vme->u.inout.bytes; 593 in = vme->u.inout.in; 594 out = !in; 595 596 /* Extra-special case of host notifications */ 597 if (out && port == GUEST_NIO_PORT) { 598 error = vmexit_handle_notify(ctx, vcpu, vme, vme->u.inout.eax); 599 return (error); 600 } 601 602 error = emulate_inout(ctx, vcpu, vme); 603 if (error) { 604 fprintf(stderr, "Unhandled %s%c 0x%04x at 0x%lx\n", 605 in ? "in" : "out", 606 bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), 607 port, vme->rip); 608 return (VMEXIT_ABORT); 609 } else { 610 return (VMEXIT_CONTINUE); 611 } 612 } 613 614 static int 615 vmexit_rdmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, 616 struct vm_run *vmrun) 617 { 618 struct vm_exit *vme; 619 uint64_t val; 620 uint32_t eax, edx; 621 int error; 622 623 vme = vmrun->vm_exit; 624 625 val = 0; 626 error = emulate_rdmsr(vcpu, vme->u.msr.code, &val); 627 if (error != 0) { 628 fprintf(stderr, "rdmsr to register %#x on vcpu %d\n", 629 vme->u.msr.code, vcpu_id(vcpu)); 630 if (get_config_bool("x86.strictmsr")) { 631 vm_inject_gp(vcpu); 632 return (VMEXIT_CONTINUE); 633 } 634 } 635 636 eax = val; 637 error = vm_set_register(vcpu, VM_REG_GUEST_RAX, eax); 638 assert(error == 0); 639 640 edx = val >> 32; 641 error = vm_set_register(vcpu, VM_REG_GUEST_RDX, edx); 642 assert(error == 0); 643 644 return (VMEXIT_CONTINUE); 645 } 646 647 static int 648 vmexit_wrmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, 649 struct vm_run *vmrun) 650 { 651 struct vm_exit *vme; 652 int error; 653 654 vme = vmrun->vm_exit; 655 656 error = emulate_wrmsr(vcpu, vme->u.msr.code, vme->u.msr.wval); 657 if (error != 0) { 658 fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n", 659 vme->u.msr.code, vme->u.msr.wval, vcpu_id(vcpu)); 660 if (get_config_bool("x86.strictmsr")) { 661 vm_inject_gp(vcpu); 662 return (VMEXIT_CONTINUE); 663 } 664 } 665 return (VMEXIT_CONTINUE); 666 } 667 668 #define DEBUG_EPT_MISCONFIG 669 #ifdef DEBUG_EPT_MISCONFIG 670 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400 671 672 static uint64_t ept_misconfig_gpa, ept_misconfig_pte[4]; 673 static int ept_misconfig_ptenum; 674 #endif 675 676 static const char * 677 vmexit_vmx_desc(uint32_t exit_reason) 678 { 679 680 if (exit_reason >= nitems(vmx_exit_reason_desc) || 681 vmx_exit_reason_desc[exit_reason] == NULL) 682 return ("Unknown"); 683 return (vmx_exit_reason_desc[exit_reason]); 684 } 685 686 static int 687 vmexit_vmx(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) 688 { 689 struct vm_exit *vme; 690 691 vme = vmrun->vm_exit; 692 693 fprintf(stderr, "vm exit[%d]\n", vcpu_id(vcpu)); 694 fprintf(stderr, "\treason\t\tVMX\n"); 695 fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip); 696 fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length); 697 fprintf(stderr, "\tstatus\t\t%d\n", vme->u.vmx.status); 698 fprintf(stderr, "\texit_reason\t%u (%s)\n", vme->u.vmx.exit_reason, 699 vmexit_vmx_desc(vme->u.vmx.exit_reason)); 700 fprintf(stderr, "\tqualification\t0x%016lx\n", 701 vme->u.vmx.exit_qualification); 702 fprintf(stderr, "\tinst_type\t\t%d\n", vme->u.vmx.inst_type); 703 fprintf(stderr, "\tinst_error\t\t%d\n", vme->u.vmx.inst_error); 704 #ifdef DEBUG_EPT_MISCONFIG 705 if (vme->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) { 706 vm_get_register(vcpu, 707 VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS), 708 &ept_misconfig_gpa); 709 vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte, 710 &ept_misconfig_ptenum); 711 fprintf(stderr, "\tEPT misconfiguration:\n"); 712 fprintf(stderr, "\t\tGPA: %#lx\n", ept_misconfig_gpa); 713 fprintf(stderr, "\t\tPTE(%d): %#lx %#lx %#lx %#lx\n", 714 ept_misconfig_ptenum, ept_misconfig_pte[0], 715 ept_misconfig_pte[1], ept_misconfig_pte[2], 716 ept_misconfig_pte[3]); 717 } 718 #endif /* DEBUG_EPT_MISCONFIG */ 719 return (VMEXIT_ABORT); 720 } 721 722 static int 723 vmexit_svm(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun) 724 { 725 struct vm_exit *vme; 726 727 vme = vmrun->vm_exit; 728 729 fprintf(stderr, "vm exit[%d]\n", vcpu_id(vcpu)); 730 fprintf(stderr, "\treason\t\tSVM\n"); 731 fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip); 732 fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length); 733 fprintf(stderr, "\texitcode\t%#lx\n", vme->u.svm.exitcode); 734 fprintf(stderr, "\texitinfo1\t%#lx\n", vme->u.svm.exitinfo1); 735 fprintf(stderr, "\texitinfo2\t%#lx\n", vme->u.svm.exitinfo2); 736 return (VMEXIT_ABORT); 737 } 738 739 static int 740 vmexit_bogus(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 741 struct vm_run *vmrun) 742 { 743 assert(vmrun->vm_exit->inst_length == 0); 744 745 return (VMEXIT_CONTINUE); 746 } 747 748 static int 749 vmexit_reqidle(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 750 struct vm_run *vmrun) 751 { 752 assert(vmrun->vm_exit->inst_length == 0); 753 754 return (VMEXIT_CONTINUE); 755 } 756 757 static int 758 vmexit_hlt(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 759 struct vm_run *vmrun __unused) 760 { 761 /* 762 * Just continue execution with the next instruction. We use 763 * the HLT VM exit as a way to be friendly with the host 764 * scheduler. 765 */ 766 return (VMEXIT_CONTINUE); 767 } 768 769 static int 770 vmexit_pause(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 771 struct vm_run *vmrun __unused) 772 { 773 return (VMEXIT_CONTINUE); 774 } 775 776 static int 777 vmexit_mtrap(struct vmctx *ctx __unused, struct vcpu *vcpu, 778 struct vm_run *vmrun) 779 { 780 assert(vmrun->vm_exit->inst_length == 0); 781 782 #ifdef BHYVE_SNAPSHOT 783 checkpoint_cpu_suspend(vcpu_id(vcpu)); 784 #endif 785 gdb_cpu_mtrap(vcpu); 786 #ifdef BHYVE_SNAPSHOT 787 checkpoint_cpu_resume(vcpu_id(vcpu)); 788 #endif 789 790 return (VMEXIT_CONTINUE); 791 } 792 793 static int 794 vmexit_inst_emul(struct vmctx *ctx __unused, struct vcpu *vcpu, 795 struct vm_run *vmrun) 796 { 797 struct vm_exit *vme; 798 struct vie *vie; 799 int err, i, cs_d; 800 enum vm_cpu_mode mode; 801 802 vme = vmrun->vm_exit; 803 804 vie = &vme->u.inst_emul.vie; 805 if (!vie->decoded) { 806 /* 807 * Attempt to decode in userspace as a fallback. This allows 808 * updating instruction decode in bhyve without rebooting the 809 * kernel (rapid prototyping), albeit with much slower 810 * emulation. 811 */ 812 vie_restart(vie); 813 mode = vme->u.inst_emul.paging.cpu_mode; 814 cs_d = vme->u.inst_emul.cs_d; 815 if (vmm_decode_instruction(mode, cs_d, vie) != 0) 816 goto fail; 817 if (vm_set_register(vcpu, VM_REG_GUEST_RIP, 818 vme->rip + vie->num_processed) != 0) 819 goto fail; 820 } 821 822 err = emulate_mem(vcpu, vme->u.inst_emul.gpa, vie, 823 &vme->u.inst_emul.paging); 824 if (err) { 825 if (err == ESRCH) { 826 EPRINTLN("Unhandled memory access to 0x%lx\n", 827 vme->u.inst_emul.gpa); 828 } 829 goto fail; 830 } 831 832 return (VMEXIT_CONTINUE); 833 834 fail: 835 fprintf(stderr, "Failed to emulate instruction sequence [ "); 836 for (i = 0; i < vie->num_valid; i++) 837 fprintf(stderr, "%02x", vie->inst[i]); 838 FPRINTLN(stderr, " ] at 0x%lx", vme->rip); 839 return (VMEXIT_ABORT); 840 } 841 842 static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER; 843 static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER; 844 845 static int 846 vmexit_suspend(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) 847 { 848 struct vm_exit *vme; 849 enum vm_suspend_how how; 850 int vcpuid = vcpu_id(vcpu); 851 852 vme = vmrun->vm_exit; 853 854 how = vme->u.suspended.how; 855 856 fbsdrun_deletecpu(vcpuid); 857 858 if (vcpuid != BSP) { 859 pthread_mutex_lock(&resetcpu_mtx); 860 pthread_cond_signal(&resetcpu_cond); 861 pthread_mutex_unlock(&resetcpu_mtx); 862 pthread_exit(NULL); 863 } 864 865 pthread_mutex_lock(&resetcpu_mtx); 866 while (!CPU_EMPTY(&cpumask)) { 867 pthread_cond_wait(&resetcpu_cond, &resetcpu_mtx); 868 } 869 pthread_mutex_unlock(&resetcpu_mtx); 870 871 switch (how) { 872 case VM_SUSPEND_RESET: 873 exit(0); 874 case VM_SUSPEND_POWEROFF: 875 if (get_config_bool_default("destroy_on_poweroff", false)) 876 vm_destroy(ctx); 877 exit(1); 878 case VM_SUSPEND_HALT: 879 exit(2); 880 case VM_SUSPEND_TRIPLEFAULT: 881 exit(3); 882 default: 883 fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how); 884 exit(100); 885 } 886 return (0); /* NOTREACHED */ 887 } 888 889 static int 890 vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu, 891 struct vm_run *vmrun __unused) 892 { 893 894 #ifdef BHYVE_SNAPSHOT 895 checkpoint_cpu_suspend(vcpu_id(vcpu)); 896 #endif 897 gdb_cpu_suspend(vcpu); 898 #ifdef BHYVE_SNAPSHOT 899 checkpoint_cpu_resume(vcpu_id(vcpu)); 900 #endif 901 /* 902 * XXX-MJ sleep for a short period to avoid chewing up the CPU in the 903 * window between activation of the vCPU thread and the STARTUP IPI. 904 */ 905 usleep(1000); 906 return (VMEXIT_CONTINUE); 907 } 908 909 static int 910 vmexit_breakpoint(struct vmctx *ctx __unused, struct vcpu *vcpu, 911 struct vm_run *vmrun) 912 { 913 gdb_cpu_breakpoint(vcpu, vmrun->vm_exit); 914 return (VMEXIT_CONTINUE); 915 } 916 917 static int 918 vmexit_ipi(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 919 struct vm_run *vmrun) 920 { 921 struct vm_exit *vme; 922 cpuset_t *dmask; 923 int error = -1; 924 int i; 925 926 dmask = vmrun->cpuset; 927 vme = vmrun->vm_exit; 928 929 switch (vme->u.ipi.mode) { 930 case APIC_DELMODE_INIT: 931 CPU_FOREACH_ISSET(i, dmask) { 932 error = vm_suspend_cpu(vcpu_info[i].vcpu); 933 if (error) { 934 warnx("%s: failed to suspend cpu %d\n", 935 __func__, i); 936 break; 937 } 938 } 939 break; 940 case APIC_DELMODE_STARTUP: 941 CPU_FOREACH_ISSET(i, dmask) { 942 spinup_ap(vcpu_info[i].vcpu, 943 vme->u.ipi.vector << PAGE_SHIFT); 944 } 945 error = 0; 946 break; 947 default: 948 break; 949 } 950 951 return (error); 952 } 953 954 static vmexit_handler_t handler[VM_EXITCODE_MAX] = { 955 [VM_EXITCODE_INOUT] = vmexit_inout, 956 [VM_EXITCODE_INOUT_STR] = vmexit_inout, 957 [VM_EXITCODE_VMX] = vmexit_vmx, 958 [VM_EXITCODE_SVM] = vmexit_svm, 959 [VM_EXITCODE_BOGUS] = vmexit_bogus, 960 [VM_EXITCODE_REQIDLE] = vmexit_reqidle, 961 [VM_EXITCODE_RDMSR] = vmexit_rdmsr, 962 [VM_EXITCODE_WRMSR] = vmexit_wrmsr, 963 [VM_EXITCODE_MTRAP] = vmexit_mtrap, 964 [VM_EXITCODE_INST_EMUL] = vmexit_inst_emul, 965 [VM_EXITCODE_SUSPENDED] = vmexit_suspend, 966 [VM_EXITCODE_TASK_SWITCH] = vmexit_task_switch, 967 [VM_EXITCODE_DEBUG] = vmexit_debug, 968 [VM_EXITCODE_BPT] = vmexit_breakpoint, 969 [VM_EXITCODE_IPI] = vmexit_ipi, 970 }; 971 972 static void 973 vm_loop(struct vmctx *ctx, struct vcpu *vcpu) 974 { 975 struct vm_exit vme; 976 struct vm_run vmrun; 977 int error, rc; 978 enum vm_exitcode exitcode; 979 cpuset_t active_cpus, dmask; 980 981 error = vm_active_cpus(ctx, &active_cpus); 982 assert(CPU_ISSET(vcpu_id(vcpu), &active_cpus)); 983 984 vmrun.vm_exit = &vme; 985 vmrun.cpuset = &dmask; 986 vmrun.cpusetsize = sizeof(dmask); 987 988 while (1) { 989 error = vm_run(vcpu, &vmrun); 990 if (error != 0) 991 break; 992 993 exitcode = vme.exitcode; 994 if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) { 995 fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n", 996 exitcode); 997 exit(4); 998 } 999 1000 rc = (*handler[exitcode])(ctx, vcpu, &vmrun); 1001 1002 switch (rc) { 1003 case VMEXIT_CONTINUE: 1004 break; 1005 case VMEXIT_ABORT: 1006 abort(); 1007 default: 1008 exit(4); 1009 } 1010 } 1011 fprintf(stderr, "vm_run error %d, errno %d\n", error, errno); 1012 } 1013 1014 static int 1015 num_vcpus_allowed(struct vmctx *ctx, struct vcpu *vcpu) 1016 { 1017 uint16_t sockets, cores, threads, maxcpus; 1018 int tmp, error; 1019 1020 /* 1021 * The guest is allowed to spinup more than one processor only if the 1022 * UNRESTRICTED_GUEST capability is available. 1023 */ 1024 error = vm_get_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, &tmp); 1025 if (error != 0) 1026 return (1); 1027 1028 error = vm_get_topology(ctx, &sockets, &cores, &threads, &maxcpus); 1029 if (error == 0) 1030 return (maxcpus); 1031 else 1032 return (1); 1033 } 1034 1035 static void 1036 fbsdrun_set_capabilities(struct vcpu *vcpu, bool bsp) 1037 { 1038 int err, tmp; 1039 1040 if (get_config_bool_default("x86.vmexit_on_hlt", false)) { 1041 err = vm_get_capability(vcpu, VM_CAP_HALT_EXIT, &tmp); 1042 if (err < 0) { 1043 fprintf(stderr, "VM exit on HLT not supported\n"); 1044 exit(4); 1045 } 1046 vm_set_capability(vcpu, VM_CAP_HALT_EXIT, 1); 1047 if (bsp) 1048 handler[VM_EXITCODE_HLT] = vmexit_hlt; 1049 } 1050 1051 if (get_config_bool_default("x86.vmexit_on_pause", false)) { 1052 /* 1053 * pause exit support required for this mode 1054 */ 1055 err = vm_get_capability(vcpu, VM_CAP_PAUSE_EXIT, &tmp); 1056 if (err < 0) { 1057 fprintf(stderr, 1058 "SMP mux requested, no pause support\n"); 1059 exit(4); 1060 } 1061 vm_set_capability(vcpu, VM_CAP_PAUSE_EXIT, 1); 1062 if (bsp) 1063 handler[VM_EXITCODE_PAUSE] = vmexit_pause; 1064 } 1065 1066 if (get_config_bool_default("x86.x2apic", false)) 1067 err = vm_set_x2apic_state(vcpu, X2APIC_ENABLED); 1068 else 1069 err = vm_set_x2apic_state(vcpu, X2APIC_DISABLED); 1070 1071 if (err) { 1072 fprintf(stderr, "Unable to set x2apic state (%d)\n", err); 1073 exit(4); 1074 } 1075 1076 vm_set_capability(vcpu, VM_CAP_ENABLE_INVPCID, 1); 1077 1078 err = vm_set_capability(vcpu, VM_CAP_IPI_EXIT, 1); 1079 assert(err == 0); 1080 } 1081 1082 static struct vmctx * 1083 do_open(const char *vmname) 1084 { 1085 struct vmctx *ctx; 1086 int error; 1087 bool reinit, romboot; 1088 1089 reinit = romboot = false; 1090 1091 if (lpc_bootrom()) 1092 romboot = true; 1093 1094 error = vm_create(vmname); 1095 if (error) { 1096 if (errno == EEXIST) { 1097 if (romboot) { 1098 reinit = true; 1099 } else { 1100 /* 1101 * The virtual machine has been setup by the 1102 * userspace bootloader. 1103 */ 1104 } 1105 } else { 1106 perror("vm_create"); 1107 exit(4); 1108 } 1109 } else { 1110 if (!romboot) { 1111 /* 1112 * If the virtual machine was just created then a 1113 * bootrom must be configured to boot it. 1114 */ 1115 fprintf(stderr, "virtual machine cannot be booted\n"); 1116 exit(4); 1117 } 1118 } 1119 1120 ctx = vm_open(vmname); 1121 if (ctx == NULL) { 1122 perror("vm_open"); 1123 exit(4); 1124 } 1125 1126 #ifndef WITHOUT_CAPSICUM 1127 if (vm_limit_rights(ctx) != 0) 1128 err(EX_OSERR, "vm_limit_rights"); 1129 #endif 1130 1131 if (reinit) { 1132 error = vm_reinit(ctx); 1133 if (error) { 1134 perror("vm_reinit"); 1135 exit(4); 1136 } 1137 } 1138 error = vm_set_topology(ctx, cpu_sockets, cpu_cores, cpu_threads, 1139 0 /* maxcpus, unimplemented */); 1140 if (error) 1141 errx(EX_OSERR, "vm_set_topology"); 1142 return (ctx); 1143 } 1144 1145 static void 1146 spinup_vcpu(struct vcpu_info *vi, bool bsp) 1147 { 1148 int error; 1149 1150 if (!bsp) { 1151 fbsdrun_set_capabilities(vi->vcpu, false); 1152 1153 /* 1154 * Enable the 'unrestricted guest' mode for APs. 1155 * 1156 * APs startup in power-on 16-bit mode. 1157 */ 1158 error = vm_set_capability(vi->vcpu, VM_CAP_UNRESTRICTED_GUEST, 1); 1159 assert(error == 0); 1160 } 1161 1162 fbsdrun_addcpu(vi); 1163 } 1164 1165 static bool 1166 parse_config_option(const char *option) 1167 { 1168 const char *value; 1169 char *path; 1170 1171 value = strchr(option, '='); 1172 if (value == NULL || value[1] == '\0') 1173 return (false); 1174 path = strndup(option, value - option); 1175 if (path == NULL) 1176 err(4, "Failed to allocate memory"); 1177 set_config_value(path, value + 1); 1178 return (true); 1179 } 1180 1181 static void 1182 parse_simple_config_file(const char *path) 1183 { 1184 FILE *fp; 1185 char *line, *cp; 1186 size_t linecap; 1187 unsigned int lineno; 1188 1189 fp = fopen(path, "r"); 1190 if (fp == NULL) 1191 err(4, "Failed to open configuration file %s", path); 1192 line = NULL; 1193 linecap = 0; 1194 lineno = 1; 1195 for (lineno = 1; getline(&line, &linecap, fp) > 0; lineno++) { 1196 if (*line == '#' || *line == '\n') 1197 continue; 1198 cp = strchr(line, '\n'); 1199 if (cp != NULL) 1200 *cp = '\0'; 1201 if (!parse_config_option(line)) 1202 errx(4, "%s line %u: invalid config option '%s'", path, 1203 lineno, line); 1204 } 1205 free(line); 1206 fclose(fp); 1207 } 1208 1209 static void 1210 parse_gdb_options(const char *opt) 1211 { 1212 const char *sport; 1213 char *colon; 1214 1215 if (opt[0] == 'w') { 1216 set_config_bool("gdb.wait", true); 1217 opt++; 1218 } 1219 1220 colon = strrchr(opt, ':'); 1221 if (colon == NULL) { 1222 sport = opt; 1223 } else { 1224 *colon = '\0'; 1225 colon++; 1226 sport = colon; 1227 set_config_value("gdb.address", opt); 1228 } 1229 1230 set_config_value("gdb.port", sport); 1231 } 1232 1233 static void 1234 set_defaults(void) 1235 { 1236 1237 set_config_bool("acpi_tables", false); 1238 set_config_value("memory.size", "256M"); 1239 set_config_bool("x86.strictmsr", true); 1240 set_config_value("lpc.fwcfg", "bhyve"); 1241 } 1242 1243 int 1244 main(int argc, char *argv[]) 1245 { 1246 int c, error; 1247 int max_vcpus, memflags; 1248 struct vcpu *bsp; 1249 struct vmctx *ctx; 1250 struct qemu_fwcfg_item *e820_fwcfg_item; 1251 uint64_t rip; 1252 size_t memsize; 1253 const char *optstr, *value, *vmname; 1254 #ifdef BHYVE_SNAPSHOT 1255 char *restore_file; 1256 struct restore_state rstate; 1257 1258 restore_file = NULL; 1259 #endif 1260 1261 init_config(); 1262 set_defaults(); 1263 progname = basename(argv[0]); 1264 1265 #ifdef BHYVE_SNAPSHOT 1266 optstr = "aehuwxACDHIPSWYk:f:o:p:G:c:s:m:l:K:U:r:"; 1267 #else 1268 optstr = "aehuwxACDHIPSWYk:f:o:p:G:c:s:m:l:K:U:"; 1269 #endif 1270 while ((c = getopt(argc, argv, optstr)) != -1) { 1271 switch (c) { 1272 case 'a': 1273 set_config_bool("x86.x2apic", false); 1274 break; 1275 case 'A': 1276 set_config_bool("acpi_tables", true); 1277 break; 1278 case 'D': 1279 set_config_bool("destroy_on_poweroff", true); 1280 break; 1281 case 'p': 1282 if (pincpu_parse(optarg) != 0) { 1283 errx(EX_USAGE, "invalid vcpu pinning " 1284 "configuration '%s'", optarg); 1285 } 1286 break; 1287 case 'c': 1288 if (topology_parse(optarg) != 0) { 1289 errx(EX_USAGE, "invalid cpu topology " 1290 "'%s'", optarg); 1291 } 1292 break; 1293 case 'C': 1294 set_config_bool("memory.guest_in_core", true); 1295 break; 1296 case 'f': 1297 if (qemu_fwcfg_parse_cmdline_arg(optarg) != 0) { 1298 errx(EX_USAGE, "invalid fwcfg item '%s'", optarg); 1299 } 1300 break; 1301 case 'G': 1302 parse_gdb_options(optarg); 1303 break; 1304 case 'k': 1305 parse_simple_config_file(optarg); 1306 break; 1307 case 'K': 1308 set_config_value("keyboard.layout", optarg); 1309 break; 1310 case 'l': 1311 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1312 lpc_print_supported_devices(); 1313 exit(0); 1314 } else if (lpc_device_parse(optarg) != 0) { 1315 errx(EX_USAGE, "invalid lpc device " 1316 "configuration '%s'", optarg); 1317 } 1318 break; 1319 #ifdef BHYVE_SNAPSHOT 1320 case 'r': 1321 restore_file = optarg; 1322 break; 1323 #endif 1324 case 's': 1325 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1326 pci_print_supported_devices(); 1327 exit(0); 1328 } else if (pci_parse_slot(optarg) != 0) 1329 exit(4); 1330 else 1331 break; 1332 case 'S': 1333 set_config_bool("memory.wired", true); 1334 break; 1335 case 'm': 1336 set_config_value("memory.size", optarg); 1337 break; 1338 case 'o': 1339 if (!parse_config_option(optarg)) 1340 errx(EX_USAGE, "invalid configuration option '%s'", optarg); 1341 break; 1342 case 'H': 1343 set_config_bool("x86.vmexit_on_hlt", true); 1344 break; 1345 case 'I': 1346 /* 1347 * The "-I" option was used to add an ioapic to the 1348 * virtual machine. 1349 * 1350 * An ioapic is now provided unconditionally for each 1351 * virtual machine and this option is now deprecated. 1352 */ 1353 break; 1354 case 'P': 1355 set_config_bool("x86.vmexit_on_pause", true); 1356 break; 1357 case 'e': 1358 set_config_bool("x86.strictio", true); 1359 break; 1360 case 'u': 1361 set_config_bool("rtc.use_localtime", false); 1362 break; 1363 case 'U': 1364 set_config_value("uuid", optarg); 1365 break; 1366 case 'w': 1367 set_config_bool("x86.strictmsr", false); 1368 break; 1369 case 'W': 1370 set_config_bool("virtio_msix", false); 1371 break; 1372 case 'x': 1373 set_config_bool("x86.x2apic", true); 1374 break; 1375 case 'Y': 1376 set_config_bool("x86.mptable", false); 1377 break; 1378 case 'h': 1379 usage(0); 1380 default: 1381 usage(1); 1382 } 1383 } 1384 argc -= optind; 1385 argv += optind; 1386 1387 if (argc > 1) 1388 usage(1); 1389 1390 #ifdef BHYVE_SNAPSHOT 1391 if (restore_file != NULL) { 1392 error = load_restore_file(restore_file, &rstate); 1393 if (error) { 1394 fprintf(stderr, "Failed to read checkpoint info from " 1395 "file: '%s'.\n", restore_file); 1396 exit(1); 1397 } 1398 vmname = lookup_vmname(&rstate); 1399 if (vmname != NULL) 1400 set_config_value("name", vmname); 1401 } 1402 #endif 1403 1404 if (argc == 1) 1405 set_config_value("name", argv[0]); 1406 1407 vmname = get_config_value("name"); 1408 if (vmname == NULL) 1409 usage(1); 1410 1411 if (get_config_bool_default("config.dump", false)) { 1412 dump_config(); 1413 exit(1); 1414 } 1415 1416 calc_topology(); 1417 build_vcpumaps(); 1418 1419 value = get_config_value("memory.size"); 1420 error = vm_parse_memsize(value, &memsize); 1421 if (error) 1422 errx(EX_USAGE, "invalid memsize '%s'", value); 1423 1424 ctx = do_open(vmname); 1425 1426 #ifdef BHYVE_SNAPSHOT 1427 if (restore_file != NULL) { 1428 guest_ncpus = lookup_guest_ncpus(&rstate); 1429 memflags = lookup_memflags(&rstate); 1430 memsize = lookup_memsize(&rstate); 1431 } 1432 1433 if (guest_ncpus < 1) { 1434 fprintf(stderr, "Invalid guest vCPUs (%d)\n", guest_ncpus); 1435 exit(1); 1436 } 1437 #endif 1438 1439 bsp = vm_vcpu_open(ctx, BSP); 1440 max_vcpus = num_vcpus_allowed(ctx, bsp); 1441 if (guest_ncpus > max_vcpus) { 1442 fprintf(stderr, "%d vCPUs requested but only %d available\n", 1443 guest_ncpus, max_vcpus); 1444 exit(4); 1445 } 1446 1447 fbsdrun_set_capabilities(bsp, true); 1448 1449 /* Allocate per-VCPU resources. */ 1450 vcpu_info = calloc(guest_ncpus, sizeof(*vcpu_info)); 1451 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) { 1452 vcpu_info[vcpuid].ctx = ctx; 1453 vcpu_info[vcpuid].vcpuid = vcpuid; 1454 if (vcpuid == BSP) 1455 vcpu_info[vcpuid].vcpu = bsp; 1456 else 1457 vcpu_info[vcpuid].vcpu = vm_vcpu_open(ctx, vcpuid); 1458 } 1459 1460 memflags = 0; 1461 if (get_config_bool_default("memory.wired", false)) 1462 memflags |= VM_MEM_F_WIRED; 1463 if (get_config_bool_default("memory.guest_in_core", false)) 1464 memflags |= VM_MEM_F_INCORE; 1465 vm_set_memflags(ctx, memflags); 1466 error = vm_setup_memory(ctx, memsize, VM_MMAP_ALL); 1467 if (error) { 1468 fprintf(stderr, "Unable to setup memory (%d)\n", errno); 1469 exit(4); 1470 } 1471 1472 error = init_msr(); 1473 if (error) { 1474 fprintf(stderr, "init_msr error %d", error); 1475 exit(4); 1476 } 1477 1478 init_mem(guest_ncpus); 1479 init_inout(); 1480 kernemu_dev_init(); 1481 init_bootrom(ctx); 1482 atkbdc_init(ctx); 1483 pci_irq_init(ctx); 1484 ioapic_init(ctx); 1485 1486 rtc_init(ctx); 1487 sci_init(ctx); 1488 1489 if (qemu_fwcfg_init(ctx) != 0) { 1490 fprintf(stderr, "qemu fwcfg initialization error"); 1491 exit(4); 1492 } 1493 1494 if (qemu_fwcfg_add_file("opt/bhyve/hw.ncpu", sizeof(guest_ncpus), 1495 &guest_ncpus) != 0) { 1496 fprintf(stderr, "Could not add qemu fwcfg opt/bhyve/hw.ncpu"); 1497 exit(4); 1498 } 1499 1500 if (e820_init(ctx) != 0) { 1501 fprintf(stderr, "Unable to setup E820"); 1502 exit(4); 1503 } 1504 1505 /* 1506 * Exit if a device emulation finds an error in its initilization 1507 */ 1508 if (init_pci(ctx) != 0) { 1509 perror("device emulation initialization error"); 1510 exit(4); 1511 } 1512 1513 /* 1514 * Initialize after PCI, to allow a bootrom file to reserve the high 1515 * region. 1516 */ 1517 if (get_config_bool("acpi_tables")) 1518 vmgenc_init(ctx); 1519 1520 init_gdb(ctx); 1521 1522 if (lpc_bootrom()) { 1523 if (vm_set_capability(bsp, VM_CAP_UNRESTRICTED_GUEST, 1)) { 1524 fprintf(stderr, "ROM boot failed: unrestricted guest " 1525 "capability not available\n"); 1526 exit(4); 1527 } 1528 error = vcpu_reset(bsp); 1529 assert(error == 0); 1530 } 1531 1532 /* 1533 * Add all vCPUs. 1534 */ 1535 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) 1536 spinup_vcpu(&vcpu_info[vcpuid], vcpuid == BSP); 1537 1538 #ifdef BHYVE_SNAPSHOT 1539 if (restore_file != NULL) { 1540 fprintf(stdout, "Pausing pci devs...\r\n"); 1541 if (vm_pause_user_devs() != 0) { 1542 fprintf(stderr, "Failed to pause PCI device state.\n"); 1543 exit(1); 1544 } 1545 1546 fprintf(stdout, "Restoring vm mem...\r\n"); 1547 if (restore_vm_mem(ctx, &rstate) != 0) { 1548 fprintf(stderr, "Failed to restore VM memory.\n"); 1549 exit(1); 1550 } 1551 1552 fprintf(stdout, "Restoring pci devs...\r\n"); 1553 if (vm_restore_user_devs(&rstate) != 0) { 1554 fprintf(stderr, "Failed to restore PCI device state.\n"); 1555 exit(1); 1556 } 1557 1558 fprintf(stdout, "Restoring kernel structs...\r\n"); 1559 if (vm_restore_kern_structs(ctx, &rstate) != 0) { 1560 fprintf(stderr, "Failed to restore kernel structs.\n"); 1561 exit(1); 1562 } 1563 1564 fprintf(stdout, "Resuming pci devs...\r\n"); 1565 if (vm_resume_user_devs() != 0) { 1566 fprintf(stderr, "Failed to resume PCI device state.\n"); 1567 exit(1); 1568 } 1569 } 1570 #endif 1571 1572 error = vm_get_register(bsp, VM_REG_GUEST_RIP, &rip); 1573 assert(error == 0); 1574 1575 /* 1576 * build the guest tables, MP etc. 1577 */ 1578 if (get_config_bool_default("x86.mptable", true)) { 1579 error = mptable_build(ctx, guest_ncpus); 1580 if (error) { 1581 perror("error to build the guest tables"); 1582 exit(4); 1583 } 1584 } 1585 1586 error = smbios_build(ctx); 1587 if (error != 0) 1588 exit(4); 1589 1590 if (get_config_bool("acpi_tables")) { 1591 error = acpi_build(ctx, guest_ncpus); 1592 assert(error == 0); 1593 } 1594 1595 e820_fwcfg_item = e820_get_fwcfg_item(); 1596 if (e820_fwcfg_item == NULL) { 1597 fprintf(stderr, "invalid e820 table"); 1598 exit(4); 1599 } 1600 if (qemu_fwcfg_add_file("etc/e820", e820_fwcfg_item->size, 1601 e820_fwcfg_item->data) != 0) { 1602 fprintf(stderr, "could not add qemu fwcfg etc/e820"); 1603 exit(4); 1604 } 1605 free(e820_fwcfg_item); 1606 1607 if (lpc_bootrom() && strcmp(lpc_fwcfg(), "bhyve") == 0) { 1608 fwctl_init(); 1609 } 1610 1611 /* 1612 * Change the proc title to include the VM name. 1613 */ 1614 setproctitle("%s", vmname); 1615 1616 #ifdef BHYVE_SNAPSHOT 1617 /* initialize mutex/cond variables */ 1618 init_snapshot(); 1619 1620 /* 1621 * checkpointing thread for communication with bhyvectl 1622 */ 1623 if (init_checkpoint_thread(ctx) != 0) 1624 errx(EX_OSERR, "Failed to start checkpoint thread"); 1625 #endif 1626 1627 #ifndef WITHOUT_CAPSICUM 1628 caph_cache_catpages(); 1629 1630 if (caph_limit_stdout() == -1 || caph_limit_stderr() == -1) 1631 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1632 1633 if (caph_enter() == -1) 1634 errx(EX_OSERR, "cap_enter() failed"); 1635 #endif 1636 1637 #ifdef BHYVE_SNAPSHOT 1638 if (restore_file != NULL) { 1639 destroy_restore_state(&rstate); 1640 if (vm_restore_time(ctx) < 0) 1641 err(EX_OSERR, "Unable to restore time"); 1642 1643 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) 1644 vm_resume_cpu(vcpu_info[vcpuid].vcpu); 1645 } else 1646 #endif 1647 vm_resume_cpu(bsp); 1648 1649 /* 1650 * Head off to the main event dispatch loop 1651 */ 1652 mevent_dispatch(); 1653 1654 exit(4); 1655 } 1656