1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/types.h> 35 #ifndef WITHOUT_CAPSICUM 36 #include <sys/capsicum.h> 37 #endif 38 #include <sys/mman.h> 39 #ifdef BHYVE_SNAPSHOT 40 #include <sys/socket.h> 41 #include <sys/stat.h> 42 #endif 43 #include <sys/time.h> 44 #ifdef BHYVE_SNAPSHOT 45 #include <sys/un.h> 46 #endif 47 48 #include <amd64/vmm/intel/vmcs.h> 49 #include <x86/apicreg.h> 50 51 #include <machine/atomic.h> 52 #include <machine/segments.h> 53 54 #ifndef WITHOUT_CAPSICUM 55 #include <capsicum_helpers.h> 56 #endif 57 #include <stdio.h> 58 #include <stdlib.h> 59 #include <string.h> 60 #include <err.h> 61 #include <errno.h> 62 #ifdef BHYVE_SNAPSHOT 63 #include <fcntl.h> 64 #endif 65 #include <libgen.h> 66 #include <unistd.h> 67 #include <assert.h> 68 #include <pthread.h> 69 #include <pthread_np.h> 70 #include <sysexits.h> 71 #include <stdbool.h> 72 #include <stdint.h> 73 #ifdef BHYVE_SNAPSHOT 74 #include <ucl.h> 75 #include <unistd.h> 76 77 #include <libxo/xo.h> 78 #endif 79 80 #include <machine/vmm.h> 81 #ifndef WITHOUT_CAPSICUM 82 #include <machine/vmm_dev.h> 83 #endif 84 #include <machine/vmm_instruction_emul.h> 85 #include <vmmapi.h> 86 87 #include "bhyverun.h" 88 #include "acpi.h" 89 #include "atkbdc.h" 90 #include "bootrom.h" 91 #include "config.h" 92 #include "inout.h" 93 #include "debug.h" 94 #include "e820.h" 95 #include "fwctl.h" 96 #include "gdb.h" 97 #include "ioapic.h" 98 #include "kernemu_dev.h" 99 #include "mem.h" 100 #include "mevent.h" 101 #ifdef BHYVE_SNAPSHOT 102 #include "migration.h" 103 #endif 104 #include "mptbl.h" 105 #include "pci_emul.h" 106 #include "pci_irq.h" 107 #include "pci_lpc.h" 108 #include "qemu_fwcfg.h" 109 #include "smbiostbl.h" 110 #ifdef BHYVE_SNAPSHOT 111 #include "snapshot.h" 112 #endif 113 #include "xmsr.h" 114 #include "spinup_ap.h" 115 #include "rtc.h" 116 #include "vmgenc.h" 117 118 #define MB (1024UL * 1024) 119 #define GB (1024UL * MB) 120 121 static const char * const vmx_exit_reason_desc[] = { 122 [EXIT_REASON_EXCEPTION] = "Exception or non-maskable interrupt (NMI)", 123 [EXIT_REASON_EXT_INTR] = "External interrupt", 124 [EXIT_REASON_TRIPLE_FAULT] = "Triple fault", 125 [EXIT_REASON_INIT] = "INIT signal", 126 [EXIT_REASON_SIPI] = "Start-up IPI (SIPI)", 127 [EXIT_REASON_IO_SMI] = "I/O system-management interrupt (SMI)", 128 [EXIT_REASON_SMI] = "Other SMI", 129 [EXIT_REASON_INTR_WINDOW] = "Interrupt window", 130 [EXIT_REASON_NMI_WINDOW] = "NMI window", 131 [EXIT_REASON_TASK_SWITCH] = "Task switch", 132 [EXIT_REASON_CPUID] = "CPUID", 133 [EXIT_REASON_GETSEC] = "GETSEC", 134 [EXIT_REASON_HLT] = "HLT", 135 [EXIT_REASON_INVD] = "INVD", 136 [EXIT_REASON_INVLPG] = "INVLPG", 137 [EXIT_REASON_RDPMC] = "RDPMC", 138 [EXIT_REASON_RDTSC] = "RDTSC", 139 [EXIT_REASON_RSM] = "RSM", 140 [EXIT_REASON_VMCALL] = "VMCALL", 141 [EXIT_REASON_VMCLEAR] = "VMCLEAR", 142 [EXIT_REASON_VMLAUNCH] = "VMLAUNCH", 143 [EXIT_REASON_VMPTRLD] = "VMPTRLD", 144 [EXIT_REASON_VMPTRST] = "VMPTRST", 145 [EXIT_REASON_VMREAD] = "VMREAD", 146 [EXIT_REASON_VMRESUME] = "VMRESUME", 147 [EXIT_REASON_VMWRITE] = "VMWRITE", 148 [EXIT_REASON_VMXOFF] = "VMXOFF", 149 [EXIT_REASON_VMXON] = "VMXON", 150 [EXIT_REASON_CR_ACCESS] = "Control-register accesses", 151 [EXIT_REASON_DR_ACCESS] = "MOV DR", 152 [EXIT_REASON_INOUT] = "I/O instruction", 153 [EXIT_REASON_RDMSR] = "RDMSR", 154 [EXIT_REASON_WRMSR] = "WRMSR", 155 [EXIT_REASON_INVAL_VMCS] = 156 "VM-entry failure due to invalid guest state", 157 [EXIT_REASON_INVAL_MSR] = "VM-entry failure due to MSR loading", 158 [EXIT_REASON_MWAIT] = "MWAIT", 159 [EXIT_REASON_MTF] = "Monitor trap flag", 160 [EXIT_REASON_MONITOR] = "MONITOR", 161 [EXIT_REASON_PAUSE] = "PAUSE", 162 [EXIT_REASON_MCE_DURING_ENTRY] = 163 "VM-entry failure due to machine-check event", 164 [EXIT_REASON_TPR] = "TPR below threshold", 165 [EXIT_REASON_APIC_ACCESS] = "APIC access", 166 [EXIT_REASON_VIRTUALIZED_EOI] = "Virtualized EOI", 167 [EXIT_REASON_GDTR_IDTR] = "Access to GDTR or IDTR", 168 [EXIT_REASON_LDTR_TR] = "Access to LDTR or TR", 169 [EXIT_REASON_EPT_FAULT] = "EPT violation", 170 [EXIT_REASON_EPT_MISCONFIG] = "EPT misconfiguration", 171 [EXIT_REASON_INVEPT] = "INVEPT", 172 [EXIT_REASON_RDTSCP] = "RDTSCP", 173 [EXIT_REASON_VMX_PREEMPT] = "VMX-preemption timer expired", 174 [EXIT_REASON_INVVPID] = "INVVPID", 175 [EXIT_REASON_WBINVD] = "WBINVD", 176 [EXIT_REASON_XSETBV] = "XSETBV", 177 [EXIT_REASON_APIC_WRITE] = "APIC write", 178 [EXIT_REASON_RDRAND] = "RDRAND", 179 [EXIT_REASON_INVPCID] = "INVPCID", 180 [EXIT_REASON_VMFUNC] = "VMFUNC", 181 [EXIT_REASON_ENCLS] = "ENCLS", 182 [EXIT_REASON_RDSEED] = "RDSEED", 183 [EXIT_REASON_PM_LOG_FULL] = "Page-modification log full", 184 [EXIT_REASON_XSAVES] = "XSAVES", 185 [EXIT_REASON_XRSTORS] = "XRSTORS" 186 }; 187 188 typedef int (*vmexit_handler_t)(struct vmctx *, struct vcpu *, struct vm_run *); 189 190 int guest_ncpus; 191 uint16_t cpu_cores, cpu_sockets, cpu_threads; 192 193 int raw_stdio = 0; 194 195 static char *progname; 196 static const int BSP = 0; 197 198 static cpuset_t cpumask; 199 200 static void vm_loop(struct vmctx *ctx, struct vcpu *vcpu); 201 202 static struct vcpu_info { 203 struct vmctx *ctx; 204 struct vcpu *vcpu; 205 int vcpuid; 206 } *vcpu_info; 207 208 static cpuset_t **vcpumap; 209 210 static void 211 usage(int code) 212 { 213 214 fprintf(stderr, 215 "Usage: %s [-AaCDeHhPSuWwxY]\n" 216 " %*s [-c [[cpus=]numcpus][,sockets=n][,cores=n][,threads=n]]\n" 217 " %*s [-G port] [-k config_file] [-l lpc] [-m mem] [-o var=value]\n" 218 " %*s [-p vcpu:hostcpu] [-r file] [-s pci] [-U uuid] vmname\n" 219 " -A: create ACPI tables\n" 220 " -a: local apic is in xAPIC mode (deprecated)\n" 221 " -C: include guest memory in core file\n" 222 " -c: number of CPUs and/or topology specification\n" 223 " -D: destroy on power-off\n" 224 " -e: exit on unhandled I/O access\n" 225 " -G: start a debug server\n" 226 " -H: vmexit from the guest on HLT\n" 227 " -h: help\n" 228 " -k: key=value flat config file\n" 229 " -K: PS2 keyboard layout\n" 230 " -l: LPC device configuration\n" 231 " -m: memory size\n" 232 " -o: set config 'var' to 'value'\n" 233 " -P: vmexit from the guest on pause\n" 234 " -p: pin 'vcpu' to 'hostcpu'\n" 235 #ifdef BHYVE_SNAPSHOT 236 " -r: path to checkpoint file\n" 237 " -R: <host[:port]> the source vm host and port for migration\n" 238 #endif 239 " -S: guest memory cannot be swapped\n" 240 " -s: <slot,driver,configinfo> PCI slot config\n" 241 " -U: UUID\n" 242 " -u: RTC keeps UTC time\n" 243 " -W: force virtio to use single-vector MSI\n" 244 " -w: ignore unimplemented MSRs\n" 245 " -x: local APIC is in x2APIC mode\n" 246 " -Y: disable MPtable generation\n", 247 progname, (int)strlen(progname), "", (int)strlen(progname), "", 248 (int)strlen(progname), ""); 249 250 exit(code); 251 } 252 253 /* 254 * XXX This parser is known to have the following issues: 255 * 1. It accepts null key=value tokens ",," as setting "cpus" to an 256 * empty string. 257 * 258 * The acceptance of a null specification ('-c ""') is by design to match the 259 * manual page syntax specification, this results in a topology of 1 vCPU. 260 */ 261 static int 262 topology_parse(const char *opt) 263 { 264 char *cp, *str, *tofree; 265 266 if (*opt == '\0') { 267 set_config_value("sockets", "1"); 268 set_config_value("cores", "1"); 269 set_config_value("threads", "1"); 270 set_config_value("cpus", "1"); 271 return (0); 272 } 273 274 tofree = str = strdup(opt); 275 if (str == NULL) 276 errx(4, "Failed to allocate memory"); 277 278 while ((cp = strsep(&str, ",")) != NULL) { 279 if (strncmp(cp, "cpus=", strlen("cpus=")) == 0) 280 set_config_value("cpus", cp + strlen("cpus=")); 281 else if (strncmp(cp, "sockets=", strlen("sockets=")) == 0) 282 set_config_value("sockets", cp + strlen("sockets=")); 283 else if (strncmp(cp, "cores=", strlen("cores=")) == 0) 284 set_config_value("cores", cp + strlen("cores=")); 285 else if (strncmp(cp, "threads=", strlen("threads=")) == 0) 286 set_config_value("threads", cp + strlen("threads=")); 287 else if (strchr(cp, '=') != NULL) 288 goto out; 289 else 290 set_config_value("cpus", cp); 291 } 292 free(tofree); 293 return (0); 294 295 out: 296 free(tofree); 297 return (-1); 298 } 299 300 static int 301 parse_int_value(const char *key, const char *value, int minval, int maxval) 302 { 303 char *cp; 304 long lval; 305 306 errno = 0; 307 lval = strtol(value, &cp, 0); 308 if (errno != 0 || *cp != '\0' || cp == value || lval < minval || 309 lval > maxval) 310 errx(4, "Invalid value for %s: '%s'", key, value); 311 return (lval); 312 } 313 314 /* 315 * Set the sockets, cores, threads, and guest_cpus variables based on 316 * the configured topology. 317 * 318 * The limits of UINT16_MAX are due to the types passed to 319 * vm_set_topology(). vmm.ko may enforce tighter limits. 320 */ 321 static void 322 calc_topology(void) 323 { 324 const char *value; 325 bool explicit_cpus; 326 uint64_t ncpus; 327 328 value = get_config_value("cpus"); 329 if (value != NULL) { 330 guest_ncpus = parse_int_value("cpus", value, 1, UINT16_MAX); 331 explicit_cpus = true; 332 } else { 333 guest_ncpus = 1; 334 explicit_cpus = false; 335 } 336 value = get_config_value("cores"); 337 if (value != NULL) 338 cpu_cores = parse_int_value("cores", value, 1, UINT16_MAX); 339 else 340 cpu_cores = 1; 341 value = get_config_value("threads"); 342 if (value != NULL) 343 cpu_threads = parse_int_value("threads", value, 1, UINT16_MAX); 344 else 345 cpu_threads = 1; 346 value = get_config_value("sockets"); 347 if (value != NULL) 348 cpu_sockets = parse_int_value("sockets", value, 1, UINT16_MAX); 349 else 350 cpu_sockets = guest_ncpus; 351 352 /* 353 * Compute sockets * cores * threads avoiding overflow. The 354 * range check above insures these are 16 bit values. 355 */ 356 ncpus = (uint64_t)cpu_sockets * cpu_cores * cpu_threads; 357 if (ncpus > UINT16_MAX) 358 errx(4, "Computed number of vCPUs too high: %ju", 359 (uintmax_t)ncpus); 360 361 if (explicit_cpus) { 362 if (guest_ncpus != (int)ncpus) 363 errx(4, "Topology (%d sockets, %d cores, %d threads) " 364 "does not match %d vCPUs", 365 cpu_sockets, cpu_cores, cpu_threads, 366 guest_ncpus); 367 } else 368 guest_ncpus = ncpus; 369 } 370 371 static int 372 pincpu_parse(const char *opt) 373 { 374 const char *value; 375 char *newval; 376 char key[16]; 377 int vcpu, pcpu; 378 379 if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) { 380 fprintf(stderr, "invalid format: %s\n", opt); 381 return (-1); 382 } 383 384 if (vcpu < 0) { 385 fprintf(stderr, "invalid vcpu '%d'\n", vcpu); 386 return (-1); 387 } 388 389 if (pcpu < 0 || pcpu >= CPU_SETSIZE) { 390 fprintf(stderr, "hostcpu '%d' outside valid range from " 391 "0 to %d\n", pcpu, CPU_SETSIZE - 1); 392 return (-1); 393 } 394 395 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 396 value = get_config_value(key); 397 398 if (asprintf(&newval, "%s%s%d", value != NULL ? value : "", 399 value != NULL ? "," : "", pcpu) == -1) { 400 perror("failed to build new cpuset string"); 401 return (-1); 402 } 403 404 set_config_value(key, newval); 405 free(newval); 406 return (0); 407 } 408 409 static void 410 parse_cpuset(int vcpu, const char *list, cpuset_t *set) 411 { 412 char *cp, *token; 413 int pcpu, start; 414 415 CPU_ZERO(set); 416 start = -1; 417 token = __DECONST(char *, list); 418 for (;;) { 419 pcpu = strtoul(token, &cp, 0); 420 if (cp == token) 421 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 422 if (pcpu < 0 || pcpu >= CPU_SETSIZE) 423 errx(4, "hostcpu '%d' outside valid range from 0 to %d", 424 pcpu, CPU_SETSIZE - 1); 425 switch (*cp) { 426 case ',': 427 case '\0': 428 if (start >= 0) { 429 if (start > pcpu) 430 errx(4, "Invalid hostcpu range %d-%d", 431 start, pcpu); 432 while (start < pcpu) { 433 CPU_SET(start, set); 434 start++; 435 } 436 start = -1; 437 } 438 CPU_SET(pcpu, set); 439 break; 440 case '-': 441 if (start >= 0) 442 errx(4, "invalid cpuset for vcpu %d: '%s'", 443 vcpu, list); 444 start = pcpu; 445 break; 446 default: 447 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 448 } 449 if (*cp == '\0') 450 break; 451 token = cp + 1; 452 } 453 } 454 455 static void 456 build_vcpumaps(void) 457 { 458 char key[16]; 459 const char *value; 460 int vcpu; 461 462 vcpumap = calloc(guest_ncpus, sizeof(*vcpumap)); 463 for (vcpu = 0; vcpu < guest_ncpus; vcpu++) { 464 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 465 value = get_config_value(key); 466 if (value == NULL) 467 continue; 468 vcpumap[vcpu] = malloc(sizeof(cpuset_t)); 469 if (vcpumap[vcpu] == NULL) 470 err(4, "Failed to allocate cpuset for vcpu %d", vcpu); 471 parse_cpuset(vcpu, value, vcpumap[vcpu]); 472 } 473 } 474 475 void 476 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, 477 int errcode) 478 { 479 int error, restart_instruction; 480 481 restart_instruction = 1; 482 483 error = vm_inject_exception(vcpu, vector, errcode_valid, errcode, 484 restart_instruction); 485 assert(error == 0); 486 } 487 488 void * 489 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) 490 { 491 492 return (vm_map_gpa(ctx, gaddr, len)); 493 } 494 495 #ifdef BHYVE_SNAPSHOT 496 uintptr_t 497 paddr_host2guest(struct vmctx *ctx, void *addr) 498 { 499 return (vm_rev_map_gpa(ctx, addr)); 500 } 501 #endif 502 503 int 504 fbsdrun_virtio_msix(void) 505 { 506 507 return (get_config_bool_default("virtio_msix", true)); 508 } 509 510 static void * 511 fbsdrun_start_thread(void *param) 512 { 513 char tname[MAXCOMLEN + 1]; 514 struct vcpu_info *vi = param; 515 int error; 516 517 snprintf(tname, sizeof(tname), "vcpu %d", vi->vcpuid); 518 pthread_set_name_np(pthread_self(), tname); 519 520 if (vcpumap[vi->vcpuid] != NULL) { 521 error = pthread_setaffinity_np(pthread_self(), 522 sizeof(cpuset_t), vcpumap[vi->vcpuid]); 523 assert(error == 0); 524 } 525 526 #ifdef BHYVE_SNAPSHOT 527 checkpoint_cpu_add(vi->vcpuid); 528 #endif 529 gdb_cpu_add(vi->vcpu); 530 531 vm_loop(vi->ctx, vi->vcpu); 532 533 /* not reached */ 534 exit(1); 535 return (NULL); 536 } 537 538 static void 539 fbsdrun_addcpu(struct vcpu_info *vi) 540 { 541 pthread_t thr; 542 int error; 543 544 error = vm_activate_cpu(vi->vcpu); 545 if (error != 0) 546 err(EX_OSERR, "could not activate CPU %d", vi->vcpuid); 547 548 CPU_SET_ATOMIC(vi->vcpuid, &cpumask); 549 550 vm_suspend_cpu(vi->vcpu); 551 552 error = pthread_create(&thr, NULL, fbsdrun_start_thread, vi); 553 assert(error == 0); 554 } 555 556 static void 557 fbsdrun_deletecpu(int vcpu) 558 { 559 static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER; 560 static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER; 561 562 pthread_mutex_lock(&resetcpu_mtx); 563 if (!CPU_ISSET(vcpu, &cpumask)) { 564 fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu); 565 exit(4); 566 } 567 568 CPU_CLR(vcpu, &cpumask); 569 570 if (vcpu != BSP) { 571 pthread_cond_signal(&resetcpu_cond); 572 pthread_mutex_unlock(&resetcpu_mtx); 573 pthread_exit(NULL); 574 /* NOTREACHED */ 575 } 576 577 while (!CPU_EMPTY(&cpumask)) { 578 pthread_cond_wait(&resetcpu_cond, &resetcpu_mtx); 579 } 580 pthread_mutex_unlock(&resetcpu_mtx); 581 } 582 583 static int 584 vmexit_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) 585 { 586 struct vm_exit *vme; 587 int error; 588 int bytes, port, in; 589 590 vme = vmrun->vm_exit; 591 port = vme->u.inout.port; 592 bytes = vme->u.inout.bytes; 593 in = vme->u.inout.in; 594 595 error = emulate_inout(ctx, vcpu, vme); 596 if (error) { 597 fprintf(stderr, "Unhandled %s%c 0x%04x at 0x%lx\n", 598 in ? "in" : "out", 599 bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), 600 port, vme->rip); 601 return (VMEXIT_ABORT); 602 } else { 603 return (VMEXIT_CONTINUE); 604 } 605 } 606 607 static int 608 vmexit_rdmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, 609 struct vm_run *vmrun) 610 { 611 struct vm_exit *vme; 612 uint64_t val; 613 uint32_t eax, edx; 614 int error; 615 616 vme = vmrun->vm_exit; 617 618 val = 0; 619 error = emulate_rdmsr(vcpu, vme->u.msr.code, &val); 620 if (error != 0) { 621 fprintf(stderr, "rdmsr to register %#x on vcpu %d\n", 622 vme->u.msr.code, vcpu_id(vcpu)); 623 if (get_config_bool("x86.strictmsr")) { 624 vm_inject_gp(vcpu); 625 return (VMEXIT_CONTINUE); 626 } 627 } 628 629 eax = val; 630 error = vm_set_register(vcpu, VM_REG_GUEST_RAX, eax); 631 assert(error == 0); 632 633 edx = val >> 32; 634 error = vm_set_register(vcpu, VM_REG_GUEST_RDX, edx); 635 assert(error == 0); 636 637 return (VMEXIT_CONTINUE); 638 } 639 640 static int 641 vmexit_wrmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, 642 struct vm_run *vmrun) 643 { 644 struct vm_exit *vme; 645 int error; 646 647 vme = vmrun->vm_exit; 648 649 error = emulate_wrmsr(vcpu, vme->u.msr.code, vme->u.msr.wval); 650 if (error != 0) { 651 fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n", 652 vme->u.msr.code, vme->u.msr.wval, vcpu_id(vcpu)); 653 if (get_config_bool("x86.strictmsr")) { 654 vm_inject_gp(vcpu); 655 return (VMEXIT_CONTINUE); 656 } 657 } 658 return (VMEXIT_CONTINUE); 659 } 660 661 #define DEBUG_EPT_MISCONFIG 662 #ifdef DEBUG_EPT_MISCONFIG 663 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400 664 665 static uint64_t ept_misconfig_gpa, ept_misconfig_pte[4]; 666 static int ept_misconfig_ptenum; 667 #endif 668 669 static const char * 670 vmexit_vmx_desc(uint32_t exit_reason) 671 { 672 673 if (exit_reason >= nitems(vmx_exit_reason_desc) || 674 vmx_exit_reason_desc[exit_reason] == NULL) 675 return ("Unknown"); 676 return (vmx_exit_reason_desc[exit_reason]); 677 } 678 679 static int 680 vmexit_vmx(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) 681 { 682 struct vm_exit *vme; 683 684 vme = vmrun->vm_exit; 685 686 fprintf(stderr, "vm exit[%d]\n", vcpu_id(vcpu)); 687 fprintf(stderr, "\treason\t\tVMX\n"); 688 fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip); 689 fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length); 690 fprintf(stderr, "\tstatus\t\t%d\n", vme->u.vmx.status); 691 fprintf(stderr, "\texit_reason\t%u (%s)\n", vme->u.vmx.exit_reason, 692 vmexit_vmx_desc(vme->u.vmx.exit_reason)); 693 fprintf(stderr, "\tqualification\t0x%016lx\n", 694 vme->u.vmx.exit_qualification); 695 fprintf(stderr, "\tinst_type\t\t%d\n", vme->u.vmx.inst_type); 696 fprintf(stderr, "\tinst_error\t\t%d\n", vme->u.vmx.inst_error); 697 #ifdef DEBUG_EPT_MISCONFIG 698 if (vme->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) { 699 vm_get_register(vcpu, 700 VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS), 701 &ept_misconfig_gpa); 702 vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte, 703 &ept_misconfig_ptenum); 704 fprintf(stderr, "\tEPT misconfiguration:\n"); 705 fprintf(stderr, "\t\tGPA: %#lx\n", ept_misconfig_gpa); 706 fprintf(stderr, "\t\tPTE(%d): %#lx %#lx %#lx %#lx\n", 707 ept_misconfig_ptenum, ept_misconfig_pte[0], 708 ept_misconfig_pte[1], ept_misconfig_pte[2], 709 ept_misconfig_pte[3]); 710 } 711 #endif /* DEBUG_EPT_MISCONFIG */ 712 return (VMEXIT_ABORT); 713 } 714 715 static int 716 vmexit_svm(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun) 717 { 718 struct vm_exit *vme; 719 720 vme = vmrun->vm_exit; 721 722 fprintf(stderr, "vm exit[%d]\n", vcpu_id(vcpu)); 723 fprintf(stderr, "\treason\t\tSVM\n"); 724 fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip); 725 fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length); 726 fprintf(stderr, "\texitcode\t%#lx\n", vme->u.svm.exitcode); 727 fprintf(stderr, "\texitinfo1\t%#lx\n", vme->u.svm.exitinfo1); 728 fprintf(stderr, "\texitinfo2\t%#lx\n", vme->u.svm.exitinfo2); 729 return (VMEXIT_ABORT); 730 } 731 732 static int 733 vmexit_bogus(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 734 struct vm_run *vmrun) 735 { 736 assert(vmrun->vm_exit->inst_length == 0); 737 738 return (VMEXIT_CONTINUE); 739 } 740 741 static int 742 vmexit_reqidle(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 743 struct vm_run *vmrun) 744 { 745 assert(vmrun->vm_exit->inst_length == 0); 746 747 return (VMEXIT_CONTINUE); 748 } 749 750 static int 751 vmexit_hlt(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 752 struct vm_run *vmrun __unused) 753 { 754 /* 755 * Just continue execution with the next instruction. We use 756 * the HLT VM exit as a way to be friendly with the host 757 * scheduler. 758 */ 759 return (VMEXIT_CONTINUE); 760 } 761 762 static int 763 vmexit_pause(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 764 struct vm_run *vmrun __unused) 765 { 766 return (VMEXIT_CONTINUE); 767 } 768 769 static int 770 vmexit_mtrap(struct vmctx *ctx __unused, struct vcpu *vcpu, 771 struct vm_run *vmrun) 772 { 773 assert(vmrun->vm_exit->inst_length == 0); 774 775 #ifdef BHYVE_SNAPSHOT 776 checkpoint_cpu_suspend(vcpu_id(vcpu)); 777 #endif 778 gdb_cpu_mtrap(vcpu); 779 #ifdef BHYVE_SNAPSHOT 780 checkpoint_cpu_resume(vcpu_id(vcpu)); 781 #endif 782 783 return (VMEXIT_CONTINUE); 784 } 785 786 static int 787 vmexit_inst_emul(struct vmctx *ctx __unused, struct vcpu *vcpu, 788 struct vm_run *vmrun) 789 { 790 struct vm_exit *vme; 791 struct vie *vie; 792 int err, i, cs_d; 793 enum vm_cpu_mode mode; 794 795 vme = vmrun->vm_exit; 796 797 vie = &vme->u.inst_emul.vie; 798 if (!vie->decoded) { 799 /* 800 * Attempt to decode in userspace as a fallback. This allows 801 * updating instruction decode in bhyve without rebooting the 802 * kernel (rapid prototyping), albeit with much slower 803 * emulation. 804 */ 805 vie_restart(vie); 806 mode = vme->u.inst_emul.paging.cpu_mode; 807 cs_d = vme->u.inst_emul.cs_d; 808 if (vmm_decode_instruction(mode, cs_d, vie) != 0) 809 goto fail; 810 if (vm_set_register(vcpu, VM_REG_GUEST_RIP, 811 vme->rip + vie->num_processed) != 0) 812 goto fail; 813 } 814 815 err = emulate_mem(vcpu, vme->u.inst_emul.gpa, vie, 816 &vme->u.inst_emul.paging); 817 if (err) { 818 if (err == ESRCH) { 819 EPRINTLN("Unhandled memory access to 0x%lx\n", 820 vme->u.inst_emul.gpa); 821 } 822 goto fail; 823 } 824 825 return (VMEXIT_CONTINUE); 826 827 fail: 828 fprintf(stderr, "Failed to emulate instruction sequence [ "); 829 for (i = 0; i < vie->num_valid; i++) 830 fprintf(stderr, "%02x", vie->inst[i]); 831 FPRINTLN(stderr, " ] at 0x%lx", vme->rip); 832 return (VMEXIT_ABORT); 833 } 834 835 static int 836 vmexit_suspend(struct vmctx *ctx, struct vcpu *vcpu, struct vm_run *vmrun) 837 { 838 struct vm_exit *vme; 839 enum vm_suspend_how how; 840 int vcpuid = vcpu_id(vcpu); 841 842 vme = vmrun->vm_exit; 843 844 how = vme->u.suspended.how; 845 846 fbsdrun_deletecpu(vcpuid); 847 848 switch (how) { 849 case VM_SUSPEND_RESET: 850 exit(0); 851 case VM_SUSPEND_POWEROFF: 852 if (get_config_bool_default("destroy_on_poweroff", false)) 853 vm_destroy(ctx); 854 exit(1); 855 case VM_SUSPEND_HALT: 856 exit(2); 857 case VM_SUSPEND_TRIPLEFAULT: 858 exit(3); 859 default: 860 fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how); 861 exit(100); 862 } 863 return (0); /* NOTREACHED */ 864 } 865 866 static int 867 vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu, 868 struct vm_run *vmrun __unused) 869 { 870 871 #ifdef BHYVE_SNAPSHOT 872 checkpoint_cpu_suspend(vcpu_id(vcpu)); 873 #endif 874 gdb_cpu_suspend(vcpu); 875 #ifdef BHYVE_SNAPSHOT 876 checkpoint_cpu_resume(vcpu_id(vcpu)); 877 #endif 878 /* 879 * XXX-MJ sleep for a short period to avoid chewing up the CPU in the 880 * window between activation of the vCPU thread and the STARTUP IPI. 881 */ 882 usleep(1000); 883 return (VMEXIT_CONTINUE); 884 } 885 886 static int 887 vmexit_breakpoint(struct vmctx *ctx __unused, struct vcpu *vcpu, 888 struct vm_run *vmrun) 889 { 890 gdb_cpu_breakpoint(vcpu, vmrun->vm_exit); 891 return (VMEXIT_CONTINUE); 892 } 893 894 static int 895 vmexit_ipi(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, 896 struct vm_run *vmrun) 897 { 898 struct vm_exit *vme; 899 cpuset_t *dmask; 900 int error = -1; 901 int i; 902 903 dmask = vmrun->cpuset; 904 vme = vmrun->vm_exit; 905 906 switch (vme->u.ipi.mode) { 907 case APIC_DELMODE_INIT: 908 CPU_FOREACH_ISSET(i, dmask) { 909 error = vm_suspend_cpu(vcpu_info[i].vcpu); 910 if (error) { 911 warnx("%s: failed to suspend cpu %d\n", 912 __func__, i); 913 break; 914 } 915 } 916 break; 917 case APIC_DELMODE_STARTUP: 918 CPU_FOREACH_ISSET(i, dmask) { 919 spinup_ap(vcpu_info[i].vcpu, 920 vme->u.ipi.vector << PAGE_SHIFT); 921 } 922 error = 0; 923 break; 924 default: 925 break; 926 } 927 928 return (error); 929 } 930 931 static const vmexit_handler_t handler[VM_EXITCODE_MAX] = { 932 [VM_EXITCODE_INOUT] = vmexit_inout, 933 [VM_EXITCODE_INOUT_STR] = vmexit_inout, 934 [VM_EXITCODE_VMX] = vmexit_vmx, 935 [VM_EXITCODE_SVM] = vmexit_svm, 936 [VM_EXITCODE_BOGUS] = vmexit_bogus, 937 [VM_EXITCODE_REQIDLE] = vmexit_reqidle, 938 [VM_EXITCODE_RDMSR] = vmexit_rdmsr, 939 [VM_EXITCODE_WRMSR] = vmexit_wrmsr, 940 [VM_EXITCODE_MTRAP] = vmexit_mtrap, 941 [VM_EXITCODE_INST_EMUL] = vmexit_inst_emul, 942 [VM_EXITCODE_SUSPENDED] = vmexit_suspend, 943 [VM_EXITCODE_TASK_SWITCH] = vmexit_task_switch, 944 [VM_EXITCODE_DEBUG] = vmexit_debug, 945 [VM_EXITCODE_BPT] = vmexit_breakpoint, 946 [VM_EXITCODE_IPI] = vmexit_ipi, 947 [VM_EXITCODE_HLT] = vmexit_hlt, 948 [VM_EXITCODE_PAUSE] = vmexit_pause, 949 }; 950 951 static void 952 vm_loop(struct vmctx *ctx, struct vcpu *vcpu) 953 { 954 struct vm_exit vme; 955 struct vm_run vmrun; 956 int error, rc; 957 enum vm_exitcode exitcode; 958 cpuset_t active_cpus, dmask; 959 960 error = vm_active_cpus(ctx, &active_cpus); 961 assert(CPU_ISSET(vcpu_id(vcpu), &active_cpus)); 962 963 vmrun.vm_exit = &vme; 964 vmrun.cpuset = &dmask; 965 vmrun.cpusetsize = sizeof(dmask); 966 967 while (1) { 968 error = vm_run(vcpu, &vmrun); 969 if (error != 0) 970 break; 971 972 exitcode = vme.exitcode; 973 if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) { 974 fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n", 975 exitcode); 976 exit(4); 977 } 978 979 rc = (*handler[exitcode])(ctx, vcpu, &vmrun); 980 981 switch (rc) { 982 case VMEXIT_CONTINUE: 983 break; 984 case VMEXIT_ABORT: 985 abort(); 986 default: 987 exit(4); 988 } 989 } 990 fprintf(stderr, "vm_run error %d, errno %d\n", error, errno); 991 } 992 993 static int 994 num_vcpus_allowed(struct vmctx *ctx, struct vcpu *vcpu) 995 { 996 uint16_t sockets, cores, threads, maxcpus; 997 int tmp, error; 998 999 /* 1000 * The guest is allowed to spinup more than one processor only if the 1001 * UNRESTRICTED_GUEST capability is available. 1002 */ 1003 error = vm_get_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, &tmp); 1004 if (error != 0) 1005 return (1); 1006 1007 error = vm_get_topology(ctx, &sockets, &cores, &threads, &maxcpus); 1008 if (error == 0) 1009 return (maxcpus); 1010 else 1011 return (1); 1012 } 1013 1014 static void 1015 fbsdrun_set_capabilities(struct vcpu *vcpu) 1016 { 1017 int err, tmp; 1018 1019 if (get_config_bool_default("x86.vmexit_on_hlt", false)) { 1020 err = vm_get_capability(vcpu, VM_CAP_HALT_EXIT, &tmp); 1021 if (err < 0) { 1022 fprintf(stderr, "VM exit on HLT not supported\n"); 1023 exit(4); 1024 } 1025 vm_set_capability(vcpu, VM_CAP_HALT_EXIT, 1); 1026 } 1027 1028 if (get_config_bool_default("x86.vmexit_on_pause", false)) { 1029 /* 1030 * pause exit support required for this mode 1031 */ 1032 err = vm_get_capability(vcpu, VM_CAP_PAUSE_EXIT, &tmp); 1033 if (err < 0) { 1034 fprintf(stderr, 1035 "SMP mux requested, no pause support\n"); 1036 exit(4); 1037 } 1038 vm_set_capability(vcpu, VM_CAP_PAUSE_EXIT, 1); 1039 } 1040 1041 if (get_config_bool_default("x86.x2apic", false)) 1042 err = vm_set_x2apic_state(vcpu, X2APIC_ENABLED); 1043 else 1044 err = vm_set_x2apic_state(vcpu, X2APIC_DISABLED); 1045 1046 if (err) { 1047 fprintf(stderr, "Unable to set x2apic state (%d)\n", err); 1048 exit(4); 1049 } 1050 1051 vm_set_capability(vcpu, VM_CAP_ENABLE_INVPCID, 1); 1052 1053 err = vm_set_capability(vcpu, VM_CAP_IPI_EXIT, 1); 1054 assert(err == 0); 1055 } 1056 1057 static struct vmctx * 1058 do_open(const char *vmname) 1059 { 1060 struct vmctx *ctx; 1061 int error; 1062 bool reinit, romboot; 1063 1064 reinit = romboot = false; 1065 1066 if (lpc_bootrom()) 1067 romboot = true; 1068 1069 error = vm_create(vmname); 1070 if (error) { 1071 if (errno == EEXIST) { 1072 if (romboot) { 1073 reinit = true; 1074 } else { 1075 /* 1076 * The virtual machine has been setup by the 1077 * userspace bootloader. 1078 */ 1079 } 1080 } else { 1081 perror("vm_create"); 1082 exit(4); 1083 } 1084 } else { 1085 #ifndef BHYVE_SNAPSHOT 1086 if (!romboot) { 1087 #else 1088 if (!romboot && !get_config_bool_default("is_migrated", false)) { 1089 #endif 1090 /* 1091 * If the virtual machine was just created then a 1092 * bootrom must be configured to boot it. 1093 */ 1094 fprintf(stderr, "virtual machine cannot be booted\n"); 1095 exit(4); 1096 } 1097 } 1098 1099 ctx = vm_open(vmname); 1100 if (ctx == NULL) { 1101 perror("vm_open"); 1102 exit(4); 1103 } 1104 1105 #ifndef WITHOUT_CAPSICUM 1106 if (vm_limit_rights(ctx) != 0) 1107 err(EX_OSERR, "vm_limit_rights"); 1108 #endif 1109 1110 if (reinit) { 1111 error = vm_reinit(ctx); 1112 if (error) { 1113 perror("vm_reinit"); 1114 exit(4); 1115 } 1116 } 1117 error = vm_set_topology(ctx, cpu_sockets, cpu_cores, cpu_threads, 0); 1118 if (error) 1119 errx(EX_OSERR, "vm_set_topology"); 1120 return (ctx); 1121 } 1122 1123 static void 1124 spinup_vcpu(struct vcpu_info *vi, bool bsp) 1125 { 1126 int error; 1127 1128 if (!bsp) { 1129 fbsdrun_set_capabilities(vi->vcpu); 1130 1131 /* 1132 * Enable the 'unrestricted guest' mode for APs. 1133 * 1134 * APs startup in power-on 16-bit mode. 1135 */ 1136 error = vm_set_capability(vi->vcpu, VM_CAP_UNRESTRICTED_GUEST, 1); 1137 assert(error == 0); 1138 } 1139 1140 fbsdrun_addcpu(vi); 1141 } 1142 1143 static bool 1144 parse_config_option(const char *option) 1145 { 1146 const char *value; 1147 char *path; 1148 1149 value = strchr(option, '='); 1150 if (value == NULL || value[1] == '\0') 1151 return (false); 1152 path = strndup(option, value - option); 1153 if (path == NULL) 1154 err(4, "Failed to allocate memory"); 1155 set_config_value(path, value + 1); 1156 return (true); 1157 } 1158 1159 static void 1160 parse_simple_config_file(const char *path) 1161 { 1162 FILE *fp; 1163 char *line, *cp; 1164 size_t linecap; 1165 unsigned int lineno; 1166 1167 fp = fopen(path, "r"); 1168 if (fp == NULL) 1169 err(4, "Failed to open configuration file %s", path); 1170 line = NULL; 1171 linecap = 0; 1172 lineno = 1; 1173 for (lineno = 1; getline(&line, &linecap, fp) > 0; lineno++) { 1174 if (*line == '#' || *line == '\n') 1175 continue; 1176 cp = strchr(line, '\n'); 1177 if (cp != NULL) 1178 *cp = '\0'; 1179 if (!parse_config_option(line)) 1180 errx(4, "%s line %u: invalid config option '%s'", path, 1181 lineno, line); 1182 } 1183 free(line); 1184 fclose(fp); 1185 } 1186 1187 static void 1188 parse_gdb_options(const char *opt) 1189 { 1190 const char *sport; 1191 char *colon; 1192 1193 if (opt[0] == 'w') { 1194 set_config_bool("gdb.wait", true); 1195 opt++; 1196 } 1197 1198 colon = strrchr(opt, ':'); 1199 if (colon == NULL) { 1200 sport = opt; 1201 } else { 1202 *colon = '\0'; 1203 colon++; 1204 sport = colon; 1205 set_config_value("gdb.address", opt); 1206 } 1207 1208 set_config_value("gdb.port", sport); 1209 } 1210 1211 static void 1212 set_defaults(void) 1213 { 1214 1215 set_config_bool("acpi_tables", false); 1216 set_config_value("memory.size", "256M"); 1217 set_config_bool("x86.strictmsr", true); 1218 set_config_value("lpc.fwcfg", "bhyve"); 1219 } 1220 1221 int 1222 main(int argc, char *argv[]) 1223 { 1224 int c, error; 1225 int max_vcpus, memflags; 1226 struct vcpu *bsp; 1227 struct vmctx *ctx; 1228 struct qemu_fwcfg_item *e820_fwcfg_item; 1229 uint64_t rip; 1230 size_t memsize; 1231 const char *optstr, *value, *vmname; 1232 #ifdef BHYVE_SNAPSHOT 1233 char *restore_file; 1234 char *migration_host; 1235 struct restore_state rstate; 1236 1237 restore_file = NULL; 1238 migration_host = NULL; 1239 #endif 1240 1241 init_config(); 1242 set_defaults(); 1243 progname = basename(argv[0]); 1244 1245 #ifdef BHYVE_SNAPSHOT 1246 optstr = "aehuwxACDHIPSWYk:f:o:p:G:c:s:m:l:K:U:r:R:"; 1247 #else 1248 optstr = "aehuwxACDHIPSWYk:f:o:p:G:c:s:m:l:K:U:"; 1249 #endif 1250 while ((c = getopt(argc, argv, optstr)) != -1) { 1251 switch (c) { 1252 case 'a': 1253 set_config_bool("x86.x2apic", false); 1254 break; 1255 case 'A': 1256 set_config_bool("acpi_tables", true); 1257 break; 1258 case 'D': 1259 set_config_bool("destroy_on_poweroff", true); 1260 break; 1261 case 'p': 1262 if (pincpu_parse(optarg) != 0) { 1263 errx(EX_USAGE, "invalid vcpu pinning " 1264 "configuration '%s'", optarg); 1265 } 1266 break; 1267 case 'c': 1268 if (topology_parse(optarg) != 0) { 1269 errx(EX_USAGE, "invalid cpu topology " 1270 "'%s'", optarg); 1271 } 1272 break; 1273 case 'C': 1274 set_config_bool("memory.guest_in_core", true); 1275 break; 1276 case 'f': 1277 if (qemu_fwcfg_parse_cmdline_arg(optarg) != 0) { 1278 errx(EX_USAGE, "invalid fwcfg item '%s'", optarg); 1279 } 1280 break; 1281 case 'G': 1282 parse_gdb_options(optarg); 1283 break; 1284 case 'k': 1285 parse_simple_config_file(optarg); 1286 break; 1287 case 'K': 1288 set_config_value("keyboard.layout", optarg); 1289 break; 1290 case 'l': 1291 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1292 lpc_print_supported_devices(); 1293 exit(0); 1294 } else if (lpc_device_parse(optarg) != 0) { 1295 errx(EX_USAGE, "invalid lpc device " 1296 "configuration '%s'", optarg); 1297 } 1298 break; 1299 #ifdef BHYVE_SNAPSHOT 1300 case 'r': 1301 restore_file = optarg; 1302 break; 1303 case 'R': 1304 migration_host = optarg; 1305 set_config_bool("is_migrated", true); 1306 break; 1307 #endif 1308 case 's': 1309 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1310 pci_print_supported_devices(); 1311 exit(0); 1312 } else if (pci_parse_slot(optarg) != 0) 1313 exit(4); 1314 else 1315 break; 1316 case 'S': 1317 set_config_bool("memory.wired", true); 1318 break; 1319 case 'm': 1320 set_config_value("memory.size", optarg); 1321 break; 1322 case 'o': 1323 if (!parse_config_option(optarg)) 1324 errx(EX_USAGE, "invalid configuration option '%s'", optarg); 1325 break; 1326 case 'H': 1327 set_config_bool("x86.vmexit_on_hlt", true); 1328 break; 1329 case 'I': 1330 /* 1331 * The "-I" option was used to add an ioapic to the 1332 * virtual machine. 1333 * 1334 * An ioapic is now provided unconditionally for each 1335 * virtual machine and this option is now deprecated. 1336 */ 1337 break; 1338 case 'P': 1339 set_config_bool("x86.vmexit_on_pause", true); 1340 break; 1341 case 'e': 1342 set_config_bool("x86.strictio", true); 1343 break; 1344 case 'u': 1345 set_config_bool("rtc.use_localtime", false); 1346 break; 1347 case 'U': 1348 set_config_value("uuid", optarg); 1349 break; 1350 case 'w': 1351 set_config_bool("x86.strictmsr", false); 1352 break; 1353 case 'W': 1354 set_config_bool("virtio_msix", false); 1355 break; 1356 case 'x': 1357 set_config_bool("x86.x2apic", true); 1358 break; 1359 case 'Y': 1360 set_config_bool("x86.mptable", false); 1361 break; 1362 case 'h': 1363 usage(0); 1364 default: 1365 usage(1); 1366 } 1367 } 1368 argc -= optind; 1369 argv += optind; 1370 1371 if (argc > 1) 1372 usage(1); 1373 1374 #ifdef BHYVE_SNAPSHOT 1375 if (restore_file != NULL) { 1376 error = load_restore_file(restore_file, &rstate); 1377 if (error) { 1378 fprintf(stderr, "Failed to read checkpoint info from " 1379 "file: '%s'.\n", restore_file); 1380 exit(1); 1381 } 1382 vmname = lookup_vmname(&rstate); 1383 if (vmname != NULL) 1384 set_config_value("name", vmname); 1385 } 1386 #endif 1387 1388 if (argc == 1) 1389 set_config_value("name", argv[0]); 1390 1391 vmname = get_config_value("name"); 1392 if (vmname == NULL) 1393 usage(1); 1394 1395 if (get_config_bool_default("config.dump", false)) { 1396 dump_config(); 1397 exit(1); 1398 } 1399 1400 calc_topology(); 1401 build_vcpumaps(); 1402 1403 value = get_config_value("memory.size"); 1404 error = vm_parse_memsize(value, &memsize); 1405 if (error) 1406 errx(EX_USAGE, "invalid memsize '%s'", value); 1407 1408 ctx = do_open(vmname); 1409 1410 #ifdef BHYVE_SNAPSHOT 1411 if (restore_file != NULL) { 1412 guest_ncpus = lookup_guest_ncpus(&rstate); 1413 memflags = lookup_memflags(&rstate); 1414 memsize = lookup_memsize(&rstate); 1415 } 1416 1417 if (guest_ncpus < 1) { 1418 fprintf(stderr, "Invalid guest vCPUs (%d)\n", guest_ncpus); 1419 exit(1); 1420 } 1421 #endif 1422 1423 bsp = vm_vcpu_open(ctx, BSP); 1424 max_vcpus = num_vcpus_allowed(ctx, bsp); 1425 if (guest_ncpus > max_vcpus) { 1426 fprintf(stderr, "%d vCPUs requested but only %d available\n", 1427 guest_ncpus, max_vcpus); 1428 exit(4); 1429 } 1430 1431 fbsdrun_set_capabilities(bsp); 1432 1433 /* Allocate per-VCPU resources. */ 1434 vcpu_info = calloc(guest_ncpus, sizeof(*vcpu_info)); 1435 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) { 1436 vcpu_info[vcpuid].ctx = ctx; 1437 vcpu_info[vcpuid].vcpuid = vcpuid; 1438 if (vcpuid == BSP) 1439 vcpu_info[vcpuid].vcpu = bsp; 1440 else 1441 vcpu_info[vcpuid].vcpu = vm_vcpu_open(ctx, vcpuid); 1442 } 1443 1444 memflags = 0; 1445 if (get_config_bool_default("memory.wired", false)) 1446 memflags |= VM_MEM_F_WIRED; 1447 if (get_config_bool_default("memory.guest_in_core", false)) 1448 memflags |= VM_MEM_F_INCORE; 1449 vm_set_memflags(ctx, memflags); 1450 error = vm_setup_memory(ctx, memsize, VM_MMAP_ALL); 1451 if (error) { 1452 fprintf(stderr, "Unable to setup memory (%d)\n", errno); 1453 exit(4); 1454 } 1455 1456 error = init_msr(); 1457 if (error) { 1458 fprintf(stderr, "init_msr error %d", error); 1459 exit(4); 1460 } 1461 1462 init_mem(guest_ncpus); 1463 init_inout(); 1464 kernemu_dev_init(); 1465 init_bootrom(ctx); 1466 atkbdc_init(ctx); 1467 pci_irq_init(ctx); 1468 ioapic_init(ctx); 1469 1470 rtc_init(ctx); 1471 sci_init(ctx); 1472 1473 if (qemu_fwcfg_init(ctx) != 0) { 1474 fprintf(stderr, "qemu fwcfg initialization error"); 1475 exit(4); 1476 } 1477 1478 if (qemu_fwcfg_add_file("opt/bhyve/hw.ncpu", sizeof(guest_ncpus), 1479 &guest_ncpus) != 0) { 1480 fprintf(stderr, "Could not add qemu fwcfg opt/bhyve/hw.ncpu"); 1481 exit(4); 1482 } 1483 1484 if (e820_init(ctx) != 0) { 1485 fprintf(stderr, "Unable to setup E820"); 1486 exit(4); 1487 } 1488 1489 /* 1490 * Exit if a device emulation finds an error in its initialization 1491 */ 1492 if (init_pci(ctx) != 0) { 1493 perror("device emulation initialization error"); 1494 exit(4); 1495 } 1496 1497 /* 1498 * Initialize after PCI, to allow a bootrom file to reserve the high 1499 * region. 1500 */ 1501 if (get_config_bool("acpi_tables")) 1502 vmgenc_init(ctx); 1503 1504 init_gdb(ctx); 1505 1506 if (lpc_bootrom()) { 1507 if (vm_set_capability(bsp, VM_CAP_UNRESTRICTED_GUEST, 1)) { 1508 fprintf(stderr, "ROM boot failed: unrestricted guest " 1509 "capability not available\n"); 1510 exit(4); 1511 } 1512 error = vcpu_reset(bsp); 1513 assert(error == 0); 1514 } 1515 1516 /* 1517 * Add all vCPUs. 1518 */ 1519 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) 1520 spinup_vcpu(&vcpu_info[vcpuid], vcpuid == BSP); 1521 1522 #ifdef BHYVE_SNAPSHOT 1523 if (restore_file != NULL || migration_host != NULL) { 1524 fprintf(stdout, "Pausing pci devs...\n"); 1525 if (vm_pause_devices() != 0) { 1526 fprintf(stderr, "Failed to pause PCI device state.\n"); 1527 exit(1); 1528 } 1529 1530 if (restore_file != NULL) { 1531 fprintf(stdout, "Restoring vm mem...\n"); 1532 if (restore_vm_mem(ctx, &rstate) != 0) { 1533 fprintf(stderr, 1534 "Failed to restore VM memory.\n"); 1535 exit(1); 1536 } 1537 1538 fprintf(stdout, "Restoring pci devs...\n"); 1539 if (vm_restore_devices(&rstate) != 0) { 1540 fprintf(stderr, 1541 "Failed to restore PCI device state.\n"); 1542 exit(1); 1543 } 1544 1545 fprintf(stdout, "Restoring kernel structs...\n"); 1546 if (vm_restore_kern_structs(ctx, &rstate) != 0) { 1547 fprintf(stderr, 1548 "Failed to restore kernel structs.\n"); 1549 exit(1); 1550 } 1551 } 1552 1553 if (migration_host != NULL) { 1554 fprintf(stdout, "Starting the migration process...\n"); 1555 if (receive_vm_migration(ctx, migration_host) != 0) { 1556 fprintf(stderr, "Failed to migrate the vm.\n"); 1557 exit(1); 1558 } 1559 } 1560 1561 fprintf(stdout, "Resuming pci devs...\n"); 1562 if (vm_resume_devices() != 0) { 1563 fprintf(stderr, "Failed to resume PCI device state.\n"); 1564 exit(1); 1565 } 1566 } 1567 #endif /* BHYVE_SNAPSHOT */ 1568 1569 error = vm_get_register(bsp, VM_REG_GUEST_RIP, &rip); 1570 assert(error == 0); 1571 1572 /* 1573 * build the guest tables, MP etc. 1574 */ 1575 if (get_config_bool_default("x86.mptable", true)) { 1576 error = mptable_build(ctx, guest_ncpus); 1577 if (error) { 1578 perror("error to build the guest tables"); 1579 exit(4); 1580 } 1581 } 1582 1583 error = smbios_build(ctx); 1584 if (error != 0) 1585 exit(4); 1586 1587 if (get_config_bool("acpi_tables")) { 1588 error = acpi_build(ctx, guest_ncpus); 1589 assert(error == 0); 1590 } 1591 1592 e820_fwcfg_item = e820_get_fwcfg_item(); 1593 if (e820_fwcfg_item == NULL) { 1594 fprintf(stderr, "invalid e820 table"); 1595 exit(4); 1596 } 1597 if (qemu_fwcfg_add_file("etc/e820", e820_fwcfg_item->size, 1598 e820_fwcfg_item->data) != 0) { 1599 fprintf(stderr, "could not add qemu fwcfg etc/e820"); 1600 exit(4); 1601 } 1602 free(e820_fwcfg_item); 1603 1604 if (lpc_bootrom() && strcmp(lpc_fwcfg(), "bhyve") == 0) { 1605 fwctl_init(); 1606 } 1607 1608 /* 1609 * Change the proc title to include the VM name. 1610 */ 1611 setproctitle("%s", vmname); 1612 1613 #ifdef BHYVE_SNAPSHOT 1614 /* initialize mutex/cond variables */ 1615 init_snapshot(); 1616 1617 /* 1618 * checkpointing thread for communication with bhyvectl 1619 */ 1620 if (init_checkpoint_thread(ctx) != 0) 1621 errx(EX_OSERR, "Failed to start checkpoint thread"); 1622 #endif 1623 1624 #ifndef WITHOUT_CAPSICUM 1625 caph_cache_catpages(); 1626 1627 if (caph_limit_stdout() == -1 || caph_limit_stderr() == -1) 1628 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1629 1630 if (caph_enter() == -1) 1631 errx(EX_OSERR, "cap_enter() failed"); 1632 #endif 1633 1634 #ifdef BHYVE_SNAPSHOT 1635 if (restore_file != NULL) 1636 destroy_restore_state(&rstate); 1637 if (restore_file != NULL || migration_host != NULL) { 1638 if (vm_restore_time(ctx) < 0) 1639 err(EX_OSERR, "Unable to restore time"); 1640 1641 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) 1642 vm_resume_cpu(vcpu_info[vcpuid].vcpu); 1643 } else 1644 #endif 1645 vm_resume_cpu(bsp); 1646 1647 /* 1648 * Head off to the main event dispatch loop 1649 */ 1650 mevent_dispatch(); 1651 1652 exit(4); 1653 } 1654