1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/types.h> 30 #ifndef WITHOUT_CAPSICUM 31 #include <sys/capsicum.h> 32 #endif 33 #include <sys/cpuset.h> 34 #include <sys/domainset.h> 35 #include <sys/mman.h> 36 #ifdef BHYVE_SNAPSHOT 37 #include <sys/socket.h> 38 #include <sys/stat.h> 39 #endif 40 #include <sys/time.h> 41 #ifdef BHYVE_SNAPSHOT 42 #include <sys/un.h> 43 #endif 44 45 #include <machine/atomic.h> 46 47 #ifndef WITHOUT_CAPSICUM 48 #include <capsicum_helpers.h> 49 #endif 50 #include <stdio.h> 51 #include <stdlib.h> 52 #include <string.h> 53 #include <err.h> 54 #include <errno.h> 55 #ifdef BHYVE_SNAPSHOT 56 #include <fcntl.h> 57 #endif 58 #include <libgen.h> 59 #include <libutil.h> 60 #include <unistd.h> 61 #include <assert.h> 62 #include <pthread.h> 63 #include <pthread_np.h> 64 #include <sysexits.h> 65 #include <stdbool.h> 66 #include <stdint.h> 67 #ifdef BHYVE_SNAPSHOT 68 #include <ucl.h> 69 #include <unistd.h> 70 71 #include <libxo/xo.h> 72 #endif 73 74 #include <dev/vmm/vmm_mem.h> 75 #include <vmmapi.h> 76 77 #include "acpi.h" 78 #include "bhyverun.h" 79 #include "bootrom.h" 80 #include "config.h" 81 #include "debug.h" 82 #ifdef BHYVE_GDB 83 #include "gdb.h" 84 #endif 85 #include "mem.h" 86 #include "mevent.h" 87 #include "pci_emul.h" 88 #ifdef __amd64__ 89 #include "amd64/pci_lpc.h" 90 #endif 91 #include "qemu_fwcfg.h" 92 #ifdef BHYVE_SNAPSHOT 93 #include "snapshot.h" 94 #endif 95 #include "tpm_device.h" 96 #include "vmgenc.h" 97 #include "vmexit.h" 98 99 #define MB (1024UL * 1024) 100 #define GB (1024UL * MB) 101 102 int guest_ncpus; 103 uint16_t cpu_cores, cpu_sockets, cpu_threads; 104 105 int raw_stdio = 0; 106 107 #ifdef BHYVE_SNAPSHOT 108 char *restore_file; 109 #endif 110 111 static const int BSP = 0; 112 113 static cpuset_t cpumask; 114 115 static struct vm_mem_domain guest_domains[VM_MAXMEMDOM]; 116 static int guest_ndomains = 0; 117 118 static void vm_loop(struct vmctx *ctx, struct vcpu *vcpu); 119 120 static struct vcpu_info { 121 struct vmctx *ctx; 122 struct vcpu *vcpu; 123 int vcpuid; 124 } *vcpu_info; 125 126 static cpuset_t **vcpumap; 127 128 /* 129 * XXX This parser is known to have the following issues: 130 * 1. It accepts null key=value tokens ",," as setting "cpus" to an 131 * empty string. 132 * 133 * The acceptance of a null specification ('-c ""') is by design to match the 134 * manual page syntax specification, this results in a topology of 1 vCPU. 135 */ 136 int 137 bhyve_topology_parse(const char *opt) 138 { 139 char *cp, *str, *tofree; 140 141 if (*opt == '\0') { 142 set_config_value("sockets", "1"); 143 set_config_value("cores", "1"); 144 set_config_value("threads", "1"); 145 set_config_value("cpus", "1"); 146 return (0); 147 } 148 149 tofree = str = strdup(opt); 150 if (str == NULL) 151 errx(4, "Failed to allocate memory"); 152 153 while ((cp = strsep(&str, ",")) != NULL) { 154 if (strncmp(cp, "cpus=", strlen("cpus=")) == 0) 155 set_config_value("cpus", cp + strlen("cpus=")); 156 else if (strncmp(cp, "sockets=", strlen("sockets=")) == 0) 157 set_config_value("sockets", cp + strlen("sockets=")); 158 else if (strncmp(cp, "cores=", strlen("cores=")) == 0) 159 set_config_value("cores", cp + strlen("cores=")); 160 else if (strncmp(cp, "threads=", strlen("threads=")) == 0) 161 set_config_value("threads", cp + strlen("threads=")); 162 else if (strchr(cp, '=') != NULL) 163 goto out; 164 else 165 set_config_value("cpus", cp); 166 } 167 free(tofree); 168 return (0); 169 170 out: 171 free(tofree); 172 return (-1); 173 } 174 175 static int 176 parse_int_value(const char *key, const char *value, int minval, int maxval) 177 { 178 char *cp; 179 long lval; 180 181 errno = 0; 182 lval = strtol(value, &cp, 0); 183 if (errno != 0 || *cp != '\0' || cp == value || lval < minval || 184 lval > maxval) 185 errx(4, "Invalid value for %s: '%s'", key, value); 186 return (lval); 187 } 188 189 int 190 bhyve_numa_parse(const char *opt) 191 { 192 int id = -1; 193 nvlist_t *nvl; 194 char *cp, *str, *tofree; 195 char pathbuf[64] = { 0 }; 196 char *size = NULL, *cpus = NULL, *domain_policy = NULL; 197 198 if (*opt == '\0') { 199 return (-1); 200 } 201 202 tofree = str = strdup(opt); 203 if (str == NULL) 204 errx(4, "Failed to allocate memory"); 205 206 while ((cp = strsep(&str, ",")) != NULL) { 207 if (strncmp(cp, "id=", strlen("id=")) == 0) 208 id = parse_int_value("id", cp + strlen("id="), 0, 209 UINT8_MAX); 210 else if (strncmp(cp, "size=", strlen("size=")) == 0) 211 size = cp + strlen("size="); 212 else if (strncmp(cp, 213 "domain_policy=", strlen("domain_policy=")) == 0) 214 domain_policy = cp + strlen("domain_policy="); 215 else if (strncmp(cp, "cpus=", strlen("cpus=")) == 0) 216 cpus = cp + strlen("cpus="); 217 } 218 219 if (id == -1) { 220 EPRINTLN("Missing NUMA domain ID in '%s'", opt); 221 goto out; 222 } 223 224 snprintf(pathbuf, sizeof(pathbuf), "domains.%d", id); 225 nvl = find_config_node(pathbuf); 226 if (nvl == NULL) 227 nvl = create_config_node(pathbuf); 228 if (size != NULL) 229 set_config_value_node(nvl, "size", size); 230 if (domain_policy != NULL) 231 set_config_value_node(nvl, "domain_policy", domain_policy); 232 if (cpus != NULL) 233 set_config_value_node(nvl, "cpus", cpus); 234 235 free(tofree); 236 return (0); 237 238 out: 239 free(tofree); 240 return (-1); 241 } 242 243 static void 244 calc_mem_affinity(size_t vm_memsize) 245 { 246 int i; 247 nvlist_t *nvl; 248 bool need_recalc; 249 const char *value; 250 struct vm_mem_domain *dom; 251 char pathbuf[64] = { 0 }; 252 253 need_recalc = false; 254 for (i = 0; i < VM_MAXMEMDOM; i++) { 255 dom = &guest_domains[i]; 256 snprintf(pathbuf, sizeof(pathbuf), "domains.%d", i); 257 nvl = find_config_node(pathbuf); 258 if (nvl == NULL) { 259 break; 260 } 261 262 value = get_config_value_node(nvl, "size"); 263 need_recalc |= value == NULL; 264 if (value != NULL && vm_parse_memsize(value, &dom->size)) { 265 errx(EX_USAGE, "invalid memsize for domain %d: '%s'", i, 266 value); 267 } 268 269 dom->ds_mask = calloc(1, sizeof(domainset_t)); 270 if (dom->ds_mask == NULL) { 271 errx(EX_OSERR, "Failed to allocate domainset mask"); 272 } 273 dom->ds_size = sizeof(domainset_t); 274 value = get_config_value_node(nvl, "domain_policy"); 275 if (value == NULL) { 276 dom->ds_policy = DOMAINSET_POLICY_INVALID; 277 DOMAINSET_ZERO(dom->ds_mask); 278 } else if (domainset_parselist(value, dom->ds_mask, &dom->ds_policy) != 279 CPUSET_PARSE_OK) { 280 errx(EX_USAGE, "failed to parse domain policy '%s'", value); 281 } 282 } 283 284 guest_ndomains = i; 285 if (guest_ndomains == 0) { 286 /* 287 * No domains were specified - create domain 288 * 0 holding all CPUs and memory. 289 */ 290 guest_ndomains = 1; 291 guest_domains[0].size = vm_memsize; 292 } else if (need_recalc) { 293 warnx("At least one domain memory size was not specified, distributing" 294 " total VM memory size across all domains"); 295 for (i = 0; i < guest_ndomains; i++) { 296 guest_domains[i].size = vm_memsize / guest_ndomains; 297 } 298 } 299 } 300 301 /* 302 * Set the sockets, cores, threads, and guest_cpus variables based on 303 * the configured topology. 304 * 305 * The limits of UINT16_MAX are due to the types passed to 306 * vm_set_topology(). vmm.ko may enforce tighter limits. 307 */ 308 static void 309 calc_topology(void) 310 { 311 const char *value; 312 bool explicit_cpus; 313 uint64_t ncpus; 314 315 value = get_config_value("cpus"); 316 if (value != NULL) { 317 guest_ncpus = parse_int_value("cpus", value, 1, UINT16_MAX); 318 explicit_cpus = true; 319 } else { 320 guest_ncpus = 1; 321 explicit_cpus = false; 322 } 323 value = get_config_value("cores"); 324 if (value != NULL) 325 cpu_cores = parse_int_value("cores", value, 1, UINT16_MAX); 326 else 327 cpu_cores = 1; 328 value = get_config_value("threads"); 329 if (value != NULL) 330 cpu_threads = parse_int_value("threads", value, 1, UINT16_MAX); 331 else 332 cpu_threads = 1; 333 value = get_config_value("sockets"); 334 if (value != NULL) 335 cpu_sockets = parse_int_value("sockets", value, 1, UINT16_MAX); 336 else 337 cpu_sockets = guest_ncpus; 338 339 /* 340 * Compute sockets * cores * threads avoiding overflow. The 341 * range check above insures these are 16 bit values. 342 */ 343 ncpus = (uint64_t)cpu_sockets * cpu_cores * cpu_threads; 344 if (ncpus > UINT16_MAX) 345 errx(4, "Computed number of vCPUs too high: %ju", 346 (uintmax_t)ncpus); 347 348 if (explicit_cpus) { 349 if (guest_ncpus != (int)ncpus) 350 errx(4, "Topology (%d sockets, %d cores, %d threads) " 351 "does not match %d vCPUs", 352 cpu_sockets, cpu_cores, cpu_threads, 353 guest_ncpus); 354 } else 355 guest_ncpus = ncpus; 356 } 357 358 int 359 bhyve_pincpu_parse(const char *opt) 360 { 361 const char *value; 362 char *newval; 363 char key[16]; 364 int vcpu, pcpu; 365 366 if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) { 367 fprintf(stderr, "invalid format: %s\n", opt); 368 return (-1); 369 } 370 371 if (vcpu < 0) { 372 fprintf(stderr, "invalid vcpu '%d'\n", vcpu); 373 return (-1); 374 } 375 376 if (pcpu < 0 || pcpu >= CPU_SETSIZE) { 377 fprintf(stderr, "hostcpu '%d' outside valid range from " 378 "0 to %d\n", pcpu, CPU_SETSIZE - 1); 379 return (-1); 380 } 381 382 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 383 value = get_config_value(key); 384 385 if (asprintf(&newval, "%s%s%d", value != NULL ? value : "", 386 value != NULL ? "," : "", pcpu) == -1) { 387 perror("failed to build new cpuset string"); 388 return (-1); 389 } 390 391 set_config_value(key, newval); 392 free(newval); 393 return (0); 394 } 395 396 static void 397 parse_cpuset(int vcpu, const char *list, cpuset_t *set) 398 { 399 char *cp, *token; 400 int pcpu, start; 401 402 CPU_ZERO(set); 403 start = -1; 404 token = __DECONST(char *, list); 405 for (;;) { 406 pcpu = strtoul(token, &cp, 0); 407 if (cp == token) 408 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 409 if (pcpu < 0 || pcpu >= CPU_SETSIZE) 410 errx(4, "hostcpu '%d' outside valid range from 0 to %d", 411 pcpu, CPU_SETSIZE - 1); 412 switch (*cp) { 413 case ',': 414 case '\0': 415 if (start >= 0) { 416 if (start > pcpu) 417 errx(4, "Invalid hostcpu range %d-%d", 418 start, pcpu); 419 while (start < pcpu) { 420 CPU_SET(start, set); 421 start++; 422 } 423 start = -1; 424 } 425 CPU_SET(pcpu, set); 426 break; 427 case '-': 428 if (start >= 0) 429 errx(4, "invalid cpuset for vcpu %d: '%s'", 430 vcpu, list); 431 start = pcpu; 432 break; 433 default: 434 errx(4, "invalid cpuset for vcpu %d: '%s'", vcpu, list); 435 } 436 if (*cp == '\0') 437 break; 438 token = cp + 1; 439 } 440 } 441 442 static void 443 build_vcpumaps(void) 444 { 445 char key[16]; 446 const char *value; 447 int vcpu; 448 449 vcpumap = calloc(guest_ncpus, sizeof(*vcpumap)); 450 for (vcpu = 0; vcpu < guest_ncpus; vcpu++) { 451 snprintf(key, sizeof(key), "vcpu.%d.cpuset", vcpu); 452 value = get_config_value(key); 453 if (value == NULL) 454 continue; 455 vcpumap[vcpu] = malloc(sizeof(cpuset_t)); 456 if (vcpumap[vcpu] == NULL) 457 err(4, "Failed to allocate cpuset for vcpu %d", vcpu); 458 parse_cpuset(vcpu, value, vcpumap[vcpu]); 459 } 460 } 461 462 static void 463 set_vcpu_affinities(void) 464 { 465 int cpu, error; 466 nvlist_t *nvl = NULL; 467 cpuset_t cpus; 468 const char *value; 469 char pathbuf[64] = { 0 }; 470 471 for (int dom = 0; dom < guest_ndomains; dom++) { 472 snprintf(pathbuf, sizeof(pathbuf), "domains.%d", dom); 473 nvl = find_config_node(pathbuf); 474 if (nvl == NULL) 475 break; 476 477 value = get_config_value_node(nvl, "cpus"); 478 if (value == NULL) { 479 EPRINTLN("Missing CPU set for domain %d", dom); 480 exit(4); 481 } 482 483 parse_cpuset(dom, value, &cpus); 484 CPU_FOREACH_ISSET(cpu, &cpus) { 485 error = acpi_add_vcpu_affinity(cpu, dom); 486 if (error) { 487 EPRINTLN( 488 "Unable to set vCPU %d affinity for domain %d: %s", 489 cpu, dom, strerror(errno)); 490 exit(4); 491 } 492 } 493 } 494 if (guest_ndomains > 1 || nvl != NULL) 495 return; 496 497 /* 498 * If we're dealing with one domain and no cpuset was provided, create a 499 * default one holding all cpus. 500 */ 501 for (cpu = 0; cpu < guest_ncpus; cpu++) { 502 error = acpi_add_vcpu_affinity(cpu, 0); 503 if (error) { 504 EPRINTLN( 505 "Unable to set vCPU %d affinity for domain %d: %s", 506 cpu, 0, strerror(errno)); 507 exit(4); 508 } 509 } 510 } 511 512 void * 513 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) 514 { 515 516 return (vm_map_gpa(ctx, gaddr, len)); 517 } 518 519 #ifdef BHYVE_SNAPSHOT 520 uintptr_t 521 paddr_host2guest(struct vmctx *ctx, void *addr) 522 { 523 return (vm_rev_map_gpa(ctx, addr)); 524 } 525 #endif 526 527 int 528 fbsdrun_virtio_msix(void) 529 { 530 531 return (get_config_bool_default("virtio_msix", true)); 532 } 533 534 struct vcpu * 535 fbsdrun_vcpu(int vcpuid) 536 { 537 return (vcpu_info[vcpuid].vcpu); 538 } 539 540 static void * 541 fbsdrun_start_thread(void *param) 542 { 543 char tname[MAXCOMLEN + 1]; 544 struct vcpu_info *vi = param; 545 int error; 546 547 snprintf(tname, sizeof(tname), "vcpu %d", vi->vcpuid); 548 pthread_set_name_np(pthread_self(), tname); 549 550 if (vcpumap[vi->vcpuid] != NULL) { 551 error = pthread_setaffinity_np(pthread_self(), 552 sizeof(cpuset_t), vcpumap[vi->vcpuid]); 553 assert(error == 0); 554 } 555 556 #ifdef BHYVE_SNAPSHOT 557 checkpoint_cpu_add(vi->vcpuid); 558 #endif 559 #ifdef BHYVE_GDB 560 gdb_cpu_add(vi->vcpu); 561 #endif 562 563 vm_loop(vi->ctx, vi->vcpu); 564 /* We get here if the VM was destroyed asynchronously. */ 565 exit(4); 566 } 567 568 void 569 fbsdrun_addcpu(int vcpuid) 570 { 571 struct vcpu_info *vi; 572 pthread_t thr; 573 int error; 574 575 vi = &vcpu_info[vcpuid]; 576 577 error = vm_activate_cpu(vi->vcpu); 578 if (error != 0) 579 err(EX_OSERR, "could not activate CPU %d", vi->vcpuid); 580 581 CPU_SET_ATOMIC(vcpuid, &cpumask); 582 583 error = vm_suspend_cpu(vi->vcpu); 584 assert(error == 0); 585 586 error = pthread_create(&thr, NULL, fbsdrun_start_thread, vi); 587 assert(error == 0); 588 } 589 590 void 591 fbsdrun_deletecpu(int vcpu) 592 { 593 static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER; 594 static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER; 595 596 pthread_mutex_lock(&resetcpu_mtx); 597 if (!CPU_ISSET(vcpu, &cpumask)) { 598 EPRINTLN("Attempting to delete unknown cpu %d", vcpu); 599 exit(4); 600 } 601 602 CPU_CLR(vcpu, &cpumask); 603 604 if (vcpu != BSP) { 605 pthread_cond_signal(&resetcpu_cond); 606 pthread_mutex_unlock(&resetcpu_mtx); 607 pthread_exit(NULL); 608 /* NOTREACHED */ 609 } 610 611 while (!CPU_EMPTY(&cpumask)) { 612 pthread_cond_wait(&resetcpu_cond, &resetcpu_mtx); 613 } 614 pthread_mutex_unlock(&resetcpu_mtx); 615 } 616 617 int 618 fbsdrun_suspendcpu(int vcpuid) 619 { 620 return (vm_suspend_cpu(vcpu_info[vcpuid].vcpu)); 621 } 622 623 static void 624 vm_loop(struct vmctx *ctx, struct vcpu *vcpu) 625 { 626 struct vm_exit vme; 627 struct vm_run vmrun; 628 int error, rc; 629 enum vm_exitcode exitcode; 630 cpuset_t active_cpus, dmask; 631 632 error = vm_active_cpus(ctx, &active_cpus); 633 assert(CPU_ISSET(vcpu_id(vcpu), &active_cpus)); 634 635 vmrun.vm_exit = &vme; 636 vmrun.cpuset = &dmask; 637 vmrun.cpusetsize = sizeof(dmask); 638 639 while (1) { 640 error = vm_run(vcpu, &vmrun); 641 if (error != 0) 642 break; 643 644 exitcode = vme.exitcode; 645 if (exitcode >= VM_EXITCODE_MAX || 646 vmexit_handlers[exitcode] == NULL) { 647 warnx("vm_loop: unexpected exitcode 0x%x", exitcode); 648 exit(4); 649 } 650 651 rc = (*vmexit_handlers[exitcode])(ctx, vcpu, &vmrun); 652 653 switch (rc) { 654 case VMEXIT_CONTINUE: 655 break; 656 case VMEXIT_ABORT: 657 abort(); 658 default: 659 exit(4); 660 } 661 } 662 EPRINTLN("vm_run error %d, errno %d", error, errno); 663 } 664 665 static int 666 num_vcpus_allowed(struct vmctx *ctx, struct vcpu *vcpu) 667 { 668 uint16_t sockets, cores, threads, maxcpus; 669 int tmp, error; 670 671 /* 672 * The guest is allowed to spinup more than one processor only if the 673 * UNRESTRICTED_GUEST capability is available. 674 */ 675 error = vm_get_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, &tmp); 676 if (error != 0) 677 return (1); 678 679 error = vm_get_topology(ctx, &sockets, &cores, &threads, &maxcpus); 680 if (error == 0) 681 return (maxcpus); 682 else 683 return (1); 684 } 685 686 static struct vmctx * 687 do_open(const char *vmname) 688 { 689 struct vmctx *ctx; 690 int error; 691 bool romboot; 692 693 romboot = bootrom_boot(); 694 695 /* 696 * If we don't have a boot ROM, the guest context must have been 697 * initialized by bhyveload(8) or equivalent. 698 */ 699 ctx = vm_openf(vmname, romboot ? VMMAPI_OPEN_REINIT : 0); 700 if (ctx == NULL) { 701 if (errno != ENOENT) 702 err(4, "vm_openf"); 703 if (!romboot) 704 errx(4, "no bootrom was configured"); 705 ctx = vm_openf(vmname, VMMAPI_OPEN_CREATE); 706 if (ctx == NULL) 707 err(4, "vm_openf"); 708 } 709 710 #ifndef WITHOUT_CAPSICUM 711 if (vm_limit_rights(ctx) != 0) 712 err(EX_OSERR, "vm_limit_rights"); 713 #endif 714 715 error = vm_set_topology(ctx, cpu_sockets, cpu_cores, cpu_threads, 0); 716 if (error) 717 errx(EX_OSERR, "vm_set_topology"); 718 return (ctx); 719 } 720 721 bool 722 bhyve_parse_config_option(const char *option) 723 { 724 const char *value; 725 char *path; 726 727 value = strchr(option, '='); 728 if (value == NULL || value[1] == '\0') 729 return (false); 730 path = strndup(option, value - option); 731 if (path == NULL) 732 err(4, "Failed to allocate memory"); 733 set_config_value(path, value + 1); 734 free(path); 735 return (true); 736 } 737 738 void 739 bhyve_parse_simple_config_file(const char *path) 740 { 741 FILE *fp; 742 char *line, *cp; 743 size_t linecap; 744 unsigned int lineno; 745 746 fp = fopen(path, "r"); 747 if (fp == NULL) 748 err(4, "Failed to open configuration file %s", path); 749 line = NULL; 750 linecap = 0; 751 lineno = 1; 752 for (lineno = 1; getline(&line, &linecap, fp) > 0; lineno++) { 753 if (*line == '#' || *line == '\n') 754 continue; 755 cp = strchr(line, '\n'); 756 if (cp != NULL) 757 *cp = '\0'; 758 if (!bhyve_parse_config_option(line)) 759 errx(4, "%s line %u: invalid config option '%s'", path, 760 lineno, line); 761 } 762 free(line); 763 fclose(fp); 764 } 765 766 #ifdef BHYVE_GDB 767 void 768 bhyve_parse_gdb_options(const char *opt) 769 { 770 const char *sport; 771 char *colon; 772 773 if (opt[0] == 'w') { 774 set_config_bool("gdb.wait", true); 775 opt++; 776 } 777 778 colon = strrchr(opt, ':'); 779 if (colon == NULL) { 780 sport = opt; 781 } else { 782 *colon = '\0'; 783 colon++; 784 sport = colon; 785 set_config_value("gdb.address", opt); 786 } 787 788 set_config_value("gdb.port", sport); 789 } 790 #endif 791 792 int 793 main(int argc, char *argv[]) 794 { 795 int error; 796 int max_vcpus, memflags; 797 struct vcpu *bsp; 798 struct vmctx *ctx; 799 size_t memsize; 800 const char *value, *vmname; 801 #ifdef BHYVE_SNAPSHOT 802 struct restore_state rstate; 803 #endif 804 805 bhyve_init_config(); 806 bhyve_optparse(argc, argv); 807 argc -= optind; 808 argv += optind; 809 810 if (argc > 1) 811 bhyve_usage(1); 812 813 #ifdef BHYVE_SNAPSHOT 814 if (restore_file != NULL) { 815 error = load_restore_file(restore_file, &rstate); 816 if (error) { 817 fprintf(stderr, "Failed to read checkpoint info from " 818 "file: '%s'.\n", restore_file); 819 exit(1); 820 } 821 vmname = lookup_vmname(&rstate); 822 if (vmname != NULL) 823 set_config_value("name", vmname); 824 } 825 #endif 826 827 if (argc == 1) 828 set_config_value("name", argv[0]); 829 830 vmname = get_config_value("name"); 831 if (vmname == NULL) 832 bhyve_usage(1); 833 834 if (get_config_bool_default("config.dump", false)) { 835 dump_config(); 836 exit(1); 837 } 838 839 calc_topology(); 840 build_vcpumaps(); 841 842 value = get_config_value("memory.size"); 843 error = vm_parse_memsize(value, &memsize); 844 if (error) 845 errx(EX_USAGE, "invalid memsize '%s'", value); 846 847 ctx = do_open(vmname); 848 849 #ifdef BHYVE_SNAPSHOT 850 if (restore_file != NULL) { 851 guest_ncpus = lookup_guest_ncpus(&rstate); 852 memflags = lookup_memflags(&rstate); 853 memsize = lookup_memsize(&rstate); 854 } 855 856 if (guest_ncpus < 1) { 857 fprintf(stderr, "Invalid guest vCPUs (%d)\n", guest_ncpus); 858 exit(1); 859 } 860 #endif 861 862 bsp = vm_vcpu_open(ctx, BSP); 863 max_vcpus = num_vcpus_allowed(ctx, bsp); 864 if (guest_ncpus > max_vcpus) { 865 fprintf(stderr, "%d vCPUs requested but only %d available\n", 866 guest_ncpus, max_vcpus); 867 exit(4); 868 } 869 870 bhyve_init_vcpu(bsp); 871 872 /* Allocate per-VCPU resources. */ 873 vcpu_info = calloc(guest_ncpus, sizeof(*vcpu_info)); 874 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) { 875 vcpu_info[vcpuid].ctx = ctx; 876 vcpu_info[vcpuid].vcpuid = vcpuid; 877 if (vcpuid == BSP) 878 vcpu_info[vcpuid].vcpu = bsp; 879 else 880 vcpu_info[vcpuid].vcpu = vm_vcpu_open(ctx, vcpuid); 881 } 882 883 calc_mem_affinity(memsize); 884 memflags = 0; 885 if (get_config_bool_default("memory.wired", false)) 886 memflags |= VM_MEM_F_WIRED; 887 if (get_config_bool_default("memory.guest_in_core", false)) 888 memflags |= VM_MEM_F_INCORE; 889 vm_set_memflags(ctx, memflags); 890 error = vm_setup_memory_domains(ctx, VM_MMAP_ALL, guest_domains, 891 guest_ndomains); 892 if (error) { 893 fprintf(stderr, "Unable to setup memory (%d)\n", errno); 894 exit(4); 895 } 896 897 set_vcpu_affinities(); 898 init_mem(guest_ncpus); 899 init_bootrom(ctx); 900 if (bhyve_init_platform(ctx, bsp) != 0) 901 exit(4); 902 903 if (qemu_fwcfg_init(ctx) != 0) { 904 fprintf(stderr, "qemu fwcfg initialization error\n"); 905 exit(4); 906 } 907 908 if (qemu_fwcfg_add_file("opt/bhyve/hw.ncpu", sizeof(guest_ncpus), 909 &guest_ncpus) != 0) { 910 fprintf(stderr, "Could not add qemu fwcfg opt/bhyve/hw.ncpu\n"); 911 exit(4); 912 } 913 914 /* 915 * Exit if a device emulation finds an error in its initialization 916 */ 917 if (init_pci(ctx) != 0) { 918 EPRINTLN("Device emulation initialization error: %s", 919 strerror(errno)); 920 exit(4); 921 } 922 if (init_tpm(ctx) != 0) { 923 EPRINTLN("Failed to init TPM device"); 924 exit(4); 925 } 926 927 /* 928 * Initialize after PCI, to allow a bootrom file to reserve the high 929 * region. 930 */ 931 if (get_config_bool("acpi_tables")) 932 vmgenc_init(ctx); 933 934 #ifdef BHYVE_GDB 935 init_gdb(ctx); 936 #endif 937 938 /* 939 * Add all vCPUs. 940 */ 941 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) 942 bhyve_start_vcpu(vcpu_info[vcpuid].vcpu, vcpuid == BSP); 943 944 #ifdef BHYVE_SNAPSHOT 945 if (restore_file != NULL) { 946 FPRINTLN(stdout, "Pausing pci devs..."); 947 if (vm_pause_devices() != 0) { 948 EPRINTLN("Failed to pause PCI device state."); 949 exit(1); 950 } 951 952 FPRINTLN(stdout, "Restoring vm mem..."); 953 if (restore_vm_mem(ctx, &rstate) != 0) { 954 EPRINTLN("Failed to restore VM memory."); 955 exit(1); 956 } 957 958 FPRINTLN(stdout, "Restoring pci devs..."); 959 if (vm_restore_devices(&rstate) != 0) { 960 EPRINTLN("Failed to restore PCI device state."); 961 exit(1); 962 } 963 964 FPRINTLN(stdout, "Restoring kernel structs..."); 965 if (vm_restore_kern_structs(ctx, &rstate) != 0) { 966 EPRINTLN("Failed to restore kernel structs."); 967 exit(1); 968 } 969 970 FPRINTLN(stdout, "Resuming pci devs..."); 971 if (vm_resume_devices() != 0) { 972 EPRINTLN("Failed to resume PCI device state."); 973 exit(1); 974 } 975 } 976 #endif 977 978 if (bhyve_init_platform_late(ctx, bsp) != 0) 979 exit(4); 980 981 /* 982 * Change the proc title to include the VM name. 983 */ 984 setproctitle("%s", vmname); 985 986 #ifdef BHYVE_SNAPSHOT 987 /* 988 * checkpointing thread for communication with bhyvectl 989 */ 990 if (init_checkpoint_thread(ctx) != 0) 991 errx(EX_OSERR, "Failed to start checkpoint thread"); 992 #endif 993 994 #ifndef WITHOUT_CAPSICUM 995 caph_cache_catpages(); 996 997 if (caph_limit_stdout() == -1 || caph_limit_stderr() == -1) 998 errx(EX_OSERR, "Unable to apply rights for sandbox"); 999 1000 if (caph_enter() == -1) 1001 errx(EX_OSERR, "cap_enter() failed"); 1002 #endif 1003 1004 #ifdef BHYVE_SNAPSHOT 1005 if (restore_file != NULL) { 1006 destroy_restore_state(&rstate); 1007 if (vm_restore_time(ctx) < 0) 1008 err(EX_OSERR, "Unable to restore time"); 1009 1010 for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) 1011 vm_resume_cpu(vcpu_info[vcpuid].vcpu); 1012 } else 1013 #endif 1014 vm_resume_cpu(bsp); 1015 1016 /* 1017 * Head off to the main event dispatch loop 1018 */ 1019 mevent_dispatch(); 1020 1021 exit(4); 1022 } 1023