1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/types.h> 33 #ifndef WITHOUT_CAPSICUM 34 #include <sys/capsicum.h> 35 #endif 36 #include <sys/mman.h> 37 #include <sys/time.h> 38 39 #include <machine/atomic.h> 40 #include <machine/segments.h> 41 42 #ifndef WITHOUT_CAPSICUM 43 #include <capsicum_helpers.h> 44 #endif 45 #include <stdio.h> 46 #include <stdlib.h> 47 #include <string.h> 48 #include <err.h> 49 #include <errno.h> 50 #include <libgen.h> 51 #include <unistd.h> 52 #include <assert.h> 53 #include <errno.h> 54 #include <pthread.h> 55 #include <pthread_np.h> 56 #include <sysexits.h> 57 #include <stdbool.h> 58 59 #include <machine/vmm.h> 60 #ifndef WITHOUT_CAPSICUM 61 #include <machine/vmm_dev.h> 62 #endif 63 #include <vmmapi.h> 64 65 #include "bhyverun.h" 66 #include "acpi.h" 67 #include "atkbdc.h" 68 #include "inout.h" 69 #include "dbgport.h" 70 #include "fwctl.h" 71 #include "ioapic.h" 72 #include "mem.h" 73 #include "mevent.h" 74 #include "mptbl.h" 75 #include "pci_emul.h" 76 #include "pci_irq.h" 77 #include "pci_lpc.h" 78 #include "smbiostbl.h" 79 #include "xmsr.h" 80 #include "spinup_ap.h" 81 #include "rtc.h" 82 83 #define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */ 84 85 #define MB (1024UL * 1024) 86 #define GB (1024UL * MB) 87 88 typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu); 89 extern int vmexit_task_switch(struct vmctx *, struct vm_exit *, int *vcpu); 90 91 char *vmname; 92 93 int guest_ncpus; 94 char *guest_uuid_str; 95 96 static int guest_vmexit_on_hlt, guest_vmexit_on_pause; 97 static int virtio_msix = 1; 98 static int x2apic_mode = 0; /* default is xAPIC */ 99 100 static int strictio; 101 static int strictmsr = 1; 102 103 static int acpi; 104 105 static char *progname; 106 static const int BSP = 0; 107 108 static cpuset_t cpumask; 109 110 static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip); 111 112 static struct vm_exit vmexit[VM_MAXCPU]; 113 114 struct bhyvestats { 115 uint64_t vmexit_bogus; 116 uint64_t vmexit_reqidle; 117 uint64_t vmexit_hlt; 118 uint64_t vmexit_pause; 119 uint64_t vmexit_mtrap; 120 uint64_t vmexit_inst_emul; 121 uint64_t cpu_switch_rotate; 122 uint64_t cpu_switch_direct; 123 } stats; 124 125 struct mt_vmm_info { 126 pthread_t mt_thr; 127 struct vmctx *mt_ctx; 128 int mt_vcpu; 129 } mt_vmm_info[VM_MAXCPU]; 130 131 static cpuset_t *vcpumap[VM_MAXCPU] = { NULL }; 132 133 static void 134 usage(int code) 135 { 136 137 fprintf(stderr, 138 "Usage: %s [-abehuwxACHPSWY] [-c vcpus] [-g <gdb port>] [-l <lpc>]\n" 139 " %*s [-m mem] [-p vcpu:hostcpu] [-s <pci>] [-U uuid] <vm>\n" 140 " -a: local apic is in xAPIC mode (deprecated)\n" 141 " -A: create ACPI tables\n" 142 " -c: # cpus (default 1)\n" 143 " -C: include guest memory in core file\n" 144 " -e: exit on unhandled I/O access\n" 145 " -g: gdb port\n" 146 " -h: help\n" 147 " -H: vmexit from the guest on hlt\n" 148 " -l: LPC device configuration\n" 149 " -m: memory size in MB\n" 150 " -p: pin 'vcpu' to 'hostcpu'\n" 151 " -P: vmexit from the guest on pause\n" 152 " -s: <slot,driver,configinfo> PCI slot config\n" 153 " -S: guest memory cannot be swapped\n" 154 " -u: RTC keeps UTC time\n" 155 " -U: uuid\n" 156 " -w: ignore unimplemented MSRs\n" 157 " -W: force virtio to use single-vector MSI\n" 158 " -x: local apic is in x2APIC mode\n" 159 " -Y: disable MPtable generation\n", 160 progname, (int)strlen(progname), ""); 161 162 exit(code); 163 } 164 165 static int 166 pincpu_parse(const char *opt) 167 { 168 int vcpu, pcpu; 169 170 if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) { 171 fprintf(stderr, "invalid format: %s\n", opt); 172 return (-1); 173 } 174 175 if (vcpu < 0 || vcpu >= VM_MAXCPU) { 176 fprintf(stderr, "vcpu '%d' outside valid range from 0 to %d\n", 177 vcpu, VM_MAXCPU - 1); 178 return (-1); 179 } 180 181 if (pcpu < 0 || pcpu >= CPU_SETSIZE) { 182 fprintf(stderr, "hostcpu '%d' outside valid range from " 183 "0 to %d\n", pcpu, CPU_SETSIZE - 1); 184 return (-1); 185 } 186 187 if (vcpumap[vcpu] == NULL) { 188 if ((vcpumap[vcpu] = malloc(sizeof(cpuset_t))) == NULL) { 189 perror("malloc"); 190 return (-1); 191 } 192 CPU_ZERO(vcpumap[vcpu]); 193 } 194 CPU_SET(pcpu, vcpumap[vcpu]); 195 return (0); 196 } 197 198 void 199 vm_inject_fault(void *arg, int vcpu, int vector, int errcode_valid, 200 int errcode) 201 { 202 struct vmctx *ctx; 203 int error, restart_instruction; 204 205 ctx = arg; 206 restart_instruction = 1; 207 208 error = vm_inject_exception(ctx, vcpu, vector, errcode_valid, errcode, 209 restart_instruction); 210 assert(error == 0); 211 } 212 213 void * 214 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) 215 { 216 217 return (vm_map_gpa(ctx, gaddr, len)); 218 } 219 220 int 221 fbsdrun_vmexit_on_pause(void) 222 { 223 224 return (guest_vmexit_on_pause); 225 } 226 227 int 228 fbsdrun_vmexit_on_hlt(void) 229 { 230 231 return (guest_vmexit_on_hlt); 232 } 233 234 int 235 fbsdrun_virtio_msix(void) 236 { 237 238 return (virtio_msix); 239 } 240 241 static void * 242 fbsdrun_start_thread(void *param) 243 { 244 char tname[MAXCOMLEN + 1]; 245 struct mt_vmm_info *mtp; 246 int vcpu; 247 248 mtp = param; 249 vcpu = mtp->mt_vcpu; 250 251 snprintf(tname, sizeof(tname), "vcpu %d", vcpu); 252 pthread_set_name_np(mtp->mt_thr, tname); 253 254 vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip); 255 256 /* not reached */ 257 exit(1); 258 return (NULL); 259 } 260 261 void 262 fbsdrun_addcpu(struct vmctx *ctx, int fromcpu, int newcpu, uint64_t rip) 263 { 264 int error; 265 266 assert(fromcpu == BSP); 267 268 /* 269 * The 'newcpu' must be activated in the context of 'fromcpu'. If 270 * vm_activate_cpu() is delayed until newcpu's pthread starts running 271 * then vmm.ko is out-of-sync with bhyve and this can create a race 272 * with vm_suspend(). 273 */ 274 error = vm_activate_cpu(ctx, newcpu); 275 if (error != 0) 276 err(EX_OSERR, "could not activate CPU %d", newcpu); 277 278 CPU_SET_ATOMIC(newcpu, &cpumask); 279 280 /* 281 * Set up the vmexit struct to allow execution to start 282 * at the given RIP 283 */ 284 vmexit[newcpu].rip = rip; 285 vmexit[newcpu].inst_length = 0; 286 287 mt_vmm_info[newcpu].mt_ctx = ctx; 288 mt_vmm_info[newcpu].mt_vcpu = newcpu; 289 290 error = pthread_create(&mt_vmm_info[newcpu].mt_thr, NULL, 291 fbsdrun_start_thread, &mt_vmm_info[newcpu]); 292 assert(error == 0); 293 } 294 295 static int 296 fbsdrun_deletecpu(struct vmctx *ctx, int vcpu) 297 { 298 299 if (!CPU_ISSET(vcpu, &cpumask)) { 300 fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu); 301 exit(1); 302 } 303 304 CPU_CLR_ATOMIC(vcpu, &cpumask); 305 return (CPU_EMPTY(&cpumask)); 306 } 307 308 static int 309 vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu, 310 uint32_t eax) 311 { 312 #if BHYVE_DEBUG 313 /* 314 * put guest-driven debug here 315 */ 316 #endif 317 return (VMEXIT_CONTINUE); 318 } 319 320 static int 321 vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 322 { 323 int error; 324 int bytes, port, in, out; 325 int vcpu; 326 327 vcpu = *pvcpu; 328 329 port = vme->u.inout.port; 330 bytes = vme->u.inout.bytes; 331 in = vme->u.inout.in; 332 out = !in; 333 334 /* Extra-special case of host notifications */ 335 if (out && port == GUEST_NIO_PORT) { 336 error = vmexit_handle_notify(ctx, vme, pvcpu, vme->u.inout.eax); 337 return (error); 338 } 339 340 error = emulate_inout(ctx, vcpu, vme, strictio); 341 if (error) { 342 fprintf(stderr, "Unhandled %s%c 0x%04x at 0x%lx\n", 343 in ? "in" : "out", 344 bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), 345 port, vmexit->rip); 346 return (VMEXIT_ABORT); 347 } else { 348 return (VMEXIT_CONTINUE); 349 } 350 } 351 352 static int 353 vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 354 { 355 uint64_t val; 356 uint32_t eax, edx; 357 int error; 358 359 val = 0; 360 error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val); 361 if (error != 0) { 362 fprintf(stderr, "rdmsr to register %#x on vcpu %d\n", 363 vme->u.msr.code, *pvcpu); 364 if (strictmsr) { 365 vm_inject_gp(ctx, *pvcpu); 366 return (VMEXIT_CONTINUE); 367 } 368 } 369 370 eax = val; 371 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax); 372 assert(error == 0); 373 374 edx = val >> 32; 375 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx); 376 assert(error == 0); 377 378 return (VMEXIT_CONTINUE); 379 } 380 381 static int 382 vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 383 { 384 int error; 385 386 error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval); 387 if (error != 0) { 388 fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n", 389 vme->u.msr.code, vme->u.msr.wval, *pvcpu); 390 if (strictmsr) { 391 vm_inject_gp(ctx, *pvcpu); 392 return (VMEXIT_CONTINUE); 393 } 394 } 395 return (VMEXIT_CONTINUE); 396 } 397 398 static int 399 vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 400 { 401 402 (void)spinup_ap(ctx, *pvcpu, 403 vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip); 404 405 return (VMEXIT_CONTINUE); 406 } 407 408 #define DEBUG_EPT_MISCONFIG 409 #ifdef DEBUG_EPT_MISCONFIG 410 #define EXIT_REASON_EPT_MISCONFIG 49 411 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400 412 #define VMCS_IDENT(x) ((x) | 0x80000000) 413 414 static uint64_t ept_misconfig_gpa, ept_misconfig_pte[4]; 415 static int ept_misconfig_ptenum; 416 #endif 417 418 static int 419 vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 420 { 421 422 fprintf(stderr, "vm exit[%d]\n", *pvcpu); 423 fprintf(stderr, "\treason\t\tVMX\n"); 424 fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip); 425 fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length); 426 fprintf(stderr, "\tstatus\t\t%d\n", vmexit->u.vmx.status); 427 fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason); 428 fprintf(stderr, "\tqualification\t0x%016lx\n", 429 vmexit->u.vmx.exit_qualification); 430 fprintf(stderr, "\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type); 431 fprintf(stderr, "\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error); 432 #ifdef DEBUG_EPT_MISCONFIG 433 if (vmexit->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) { 434 vm_get_register(ctx, *pvcpu, 435 VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS), 436 &ept_misconfig_gpa); 437 vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte, 438 &ept_misconfig_ptenum); 439 fprintf(stderr, "\tEPT misconfiguration:\n"); 440 fprintf(stderr, "\t\tGPA: %#lx\n", ept_misconfig_gpa); 441 fprintf(stderr, "\t\tPTE(%d): %#lx %#lx %#lx %#lx\n", 442 ept_misconfig_ptenum, ept_misconfig_pte[0], 443 ept_misconfig_pte[1], ept_misconfig_pte[2], 444 ept_misconfig_pte[3]); 445 } 446 #endif /* DEBUG_EPT_MISCONFIG */ 447 return (VMEXIT_ABORT); 448 } 449 450 static int 451 vmexit_svm(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 452 { 453 454 fprintf(stderr, "vm exit[%d]\n", *pvcpu); 455 fprintf(stderr, "\treason\t\tSVM\n"); 456 fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip); 457 fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length); 458 fprintf(stderr, "\texitcode\t%#lx\n", vmexit->u.svm.exitcode); 459 fprintf(stderr, "\texitinfo1\t%#lx\n", vmexit->u.svm.exitinfo1); 460 fprintf(stderr, "\texitinfo2\t%#lx\n", vmexit->u.svm.exitinfo2); 461 return (VMEXIT_ABORT); 462 } 463 464 static int 465 vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 466 { 467 468 assert(vmexit->inst_length == 0); 469 470 stats.vmexit_bogus++; 471 472 return (VMEXIT_CONTINUE); 473 } 474 475 static int 476 vmexit_reqidle(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 477 { 478 479 assert(vmexit->inst_length == 0); 480 481 stats.vmexit_reqidle++; 482 483 return (VMEXIT_CONTINUE); 484 } 485 486 static int 487 vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 488 { 489 490 stats.vmexit_hlt++; 491 492 /* 493 * Just continue execution with the next instruction. We use 494 * the HLT VM exit as a way to be friendly with the host 495 * scheduler. 496 */ 497 return (VMEXIT_CONTINUE); 498 } 499 500 static int 501 vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 502 { 503 504 stats.vmexit_pause++; 505 506 return (VMEXIT_CONTINUE); 507 } 508 509 static int 510 vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 511 { 512 513 assert(vmexit->inst_length == 0); 514 515 stats.vmexit_mtrap++; 516 517 return (VMEXIT_CONTINUE); 518 } 519 520 static int 521 vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 522 { 523 int err, i; 524 struct vie *vie; 525 526 stats.vmexit_inst_emul++; 527 528 vie = &vmexit->u.inst_emul.vie; 529 err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa, 530 vie, &vmexit->u.inst_emul.paging); 531 532 if (err) { 533 if (err == ESRCH) { 534 fprintf(stderr, "Unhandled memory access to 0x%lx\n", 535 vmexit->u.inst_emul.gpa); 536 } 537 538 fprintf(stderr, "Failed to emulate instruction ["); 539 for (i = 0; i < vie->num_valid; i++) { 540 fprintf(stderr, "0x%02x%s", vie->inst[i], 541 i != (vie->num_valid - 1) ? " " : ""); 542 } 543 fprintf(stderr, "] at 0x%lx\n", vmexit->rip); 544 return (VMEXIT_ABORT); 545 } 546 547 return (VMEXIT_CONTINUE); 548 } 549 550 static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER; 551 static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER; 552 553 static int 554 vmexit_suspend(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 555 { 556 enum vm_suspend_how how; 557 558 how = vmexit->u.suspended.how; 559 560 fbsdrun_deletecpu(ctx, *pvcpu); 561 562 if (*pvcpu != BSP) { 563 pthread_mutex_lock(&resetcpu_mtx); 564 pthread_cond_signal(&resetcpu_cond); 565 pthread_mutex_unlock(&resetcpu_mtx); 566 pthread_exit(NULL); 567 } 568 569 pthread_mutex_lock(&resetcpu_mtx); 570 while (!CPU_EMPTY(&cpumask)) { 571 pthread_cond_wait(&resetcpu_cond, &resetcpu_mtx); 572 } 573 pthread_mutex_unlock(&resetcpu_mtx); 574 575 switch (how) { 576 case VM_SUSPEND_RESET: 577 exit(0); 578 case VM_SUSPEND_POWEROFF: 579 exit(1); 580 case VM_SUSPEND_HALT: 581 exit(2); 582 case VM_SUSPEND_TRIPLEFAULT: 583 exit(3); 584 default: 585 fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how); 586 exit(100); 587 } 588 return (0); /* NOTREACHED */ 589 } 590 591 static vmexit_handler_t handler[VM_EXITCODE_MAX] = { 592 [VM_EXITCODE_INOUT] = vmexit_inout, 593 [VM_EXITCODE_INOUT_STR] = vmexit_inout, 594 [VM_EXITCODE_VMX] = vmexit_vmx, 595 [VM_EXITCODE_SVM] = vmexit_svm, 596 [VM_EXITCODE_BOGUS] = vmexit_bogus, 597 [VM_EXITCODE_REQIDLE] = vmexit_reqidle, 598 [VM_EXITCODE_RDMSR] = vmexit_rdmsr, 599 [VM_EXITCODE_WRMSR] = vmexit_wrmsr, 600 [VM_EXITCODE_MTRAP] = vmexit_mtrap, 601 [VM_EXITCODE_INST_EMUL] = vmexit_inst_emul, 602 [VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap, 603 [VM_EXITCODE_SUSPENDED] = vmexit_suspend, 604 [VM_EXITCODE_TASK_SWITCH] = vmexit_task_switch, 605 }; 606 607 static void 608 vm_loop(struct vmctx *ctx, int vcpu, uint64_t startrip) 609 { 610 int error, rc; 611 enum vm_exitcode exitcode; 612 cpuset_t active_cpus; 613 614 if (vcpumap[vcpu] != NULL) { 615 error = pthread_setaffinity_np(pthread_self(), 616 sizeof(cpuset_t), vcpumap[vcpu]); 617 assert(error == 0); 618 } 619 620 error = vm_active_cpus(ctx, &active_cpus); 621 assert(CPU_ISSET(vcpu, &active_cpus)); 622 623 error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, startrip); 624 assert(error == 0); 625 626 while (1) { 627 error = vm_run(ctx, vcpu, &vmexit[vcpu]); 628 if (error != 0) 629 break; 630 631 exitcode = vmexit[vcpu].exitcode; 632 if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) { 633 fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n", 634 exitcode); 635 exit(1); 636 } 637 638 rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu); 639 640 switch (rc) { 641 case VMEXIT_CONTINUE: 642 break; 643 case VMEXIT_ABORT: 644 abort(); 645 default: 646 exit(1); 647 } 648 } 649 fprintf(stderr, "vm_run error %d, errno %d\n", error, errno); 650 } 651 652 static int 653 num_vcpus_allowed(struct vmctx *ctx) 654 { 655 int tmp, error; 656 657 error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp); 658 659 /* 660 * The guest is allowed to spinup more than one processor only if the 661 * UNRESTRICTED_GUEST capability is available. 662 */ 663 if (error == 0) 664 return (VM_MAXCPU); 665 else 666 return (1); 667 } 668 669 void 670 fbsdrun_set_capabilities(struct vmctx *ctx, int cpu) 671 { 672 int err, tmp; 673 674 if (fbsdrun_vmexit_on_hlt()) { 675 err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp); 676 if (err < 0) { 677 fprintf(stderr, "VM exit on HLT not supported\n"); 678 exit(1); 679 } 680 vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1); 681 if (cpu == BSP) 682 handler[VM_EXITCODE_HLT] = vmexit_hlt; 683 } 684 685 if (fbsdrun_vmexit_on_pause()) { 686 /* 687 * pause exit support required for this mode 688 */ 689 err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp); 690 if (err < 0) { 691 fprintf(stderr, 692 "SMP mux requested, no pause support\n"); 693 exit(1); 694 } 695 vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1); 696 if (cpu == BSP) 697 handler[VM_EXITCODE_PAUSE] = vmexit_pause; 698 } 699 700 if (x2apic_mode) 701 err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED); 702 else 703 err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED); 704 705 if (err) { 706 fprintf(stderr, "Unable to set x2apic state (%d)\n", err); 707 exit(1); 708 } 709 710 vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1); 711 } 712 713 static struct vmctx * 714 do_open(const char *vmname) 715 { 716 struct vmctx *ctx; 717 int error; 718 bool reinit, romboot; 719 #ifndef WITHOUT_CAPSICUM 720 cap_rights_t rights; 721 const cap_ioctl_t *cmds; 722 size_t ncmds; 723 #endif 724 725 reinit = romboot = false; 726 727 if (lpc_bootrom()) 728 romboot = true; 729 730 error = vm_create(vmname); 731 if (error) { 732 if (errno == EEXIST) { 733 if (romboot) { 734 reinit = true; 735 } else { 736 /* 737 * The virtual machine has been setup by the 738 * userspace bootloader. 739 */ 740 } 741 } else { 742 perror("vm_create"); 743 exit(1); 744 } 745 } else { 746 if (!romboot) { 747 /* 748 * If the virtual machine was just created then a 749 * bootrom must be configured to boot it. 750 */ 751 fprintf(stderr, "virtual machine cannot be booted\n"); 752 exit(1); 753 } 754 } 755 756 ctx = vm_open(vmname); 757 if (ctx == NULL) { 758 perror("vm_open"); 759 exit(1); 760 } 761 762 #ifndef WITHOUT_CAPSICUM 763 cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW); 764 if (cap_rights_limit(vm_get_device_fd(ctx), &rights) == -1 && 765 errno != ENOSYS) 766 errx(EX_OSERR, "Unable to apply rights for sandbox"); 767 vm_get_ioctls(&ncmds); 768 cmds = vm_get_ioctls(NULL); 769 if (cmds == NULL) 770 errx(EX_OSERR, "out of memory"); 771 if (cap_ioctls_limit(vm_get_device_fd(ctx), cmds, ncmds) == -1 && 772 errno != ENOSYS) 773 errx(EX_OSERR, "Unable to apply rights for sandbox"); 774 free((cap_ioctl_t *)cmds); 775 #endif 776 777 if (reinit) { 778 error = vm_reinit(ctx); 779 if (error) { 780 perror("vm_reinit"); 781 exit(1); 782 } 783 } 784 return (ctx); 785 } 786 787 int 788 main(int argc, char *argv[]) 789 { 790 int c, error, gdb_port, err, bvmcons; 791 int max_vcpus, mptgen, memflags; 792 int rtc_localtime; 793 struct vmctx *ctx; 794 uint64_t rip; 795 size_t memsize; 796 char *optstr; 797 798 bvmcons = 0; 799 progname = basename(argv[0]); 800 gdb_port = 0; 801 guest_ncpus = 1; 802 memsize = 256 * MB; 803 mptgen = 1; 804 rtc_localtime = 1; 805 memflags = 0; 806 807 optstr = "abehuwxACHIPSWYp:g:c:s:m:l:U:"; 808 while ((c = getopt(argc, argv, optstr)) != -1) { 809 switch (c) { 810 case 'a': 811 x2apic_mode = 0; 812 break; 813 case 'A': 814 acpi = 1; 815 break; 816 case 'b': 817 bvmcons = 1; 818 break; 819 case 'p': 820 if (pincpu_parse(optarg) != 0) { 821 errx(EX_USAGE, "invalid vcpu pinning " 822 "configuration '%s'", optarg); 823 } 824 break; 825 case 'c': 826 guest_ncpus = atoi(optarg); 827 break; 828 case 'C': 829 memflags |= VM_MEM_F_INCORE; 830 break; 831 case 'g': 832 gdb_port = atoi(optarg); 833 break; 834 case 'l': 835 if (lpc_device_parse(optarg) != 0) { 836 errx(EX_USAGE, "invalid lpc device " 837 "configuration '%s'", optarg); 838 } 839 break; 840 case 's': 841 if (pci_parse_slot(optarg) != 0) 842 exit(1); 843 else 844 break; 845 case 'S': 846 memflags |= VM_MEM_F_WIRED; 847 break; 848 case 'm': 849 error = vm_parse_memsize(optarg, &memsize); 850 if (error) 851 errx(EX_USAGE, "invalid memsize '%s'", optarg); 852 break; 853 case 'H': 854 guest_vmexit_on_hlt = 1; 855 break; 856 case 'I': 857 /* 858 * The "-I" option was used to add an ioapic to the 859 * virtual machine. 860 * 861 * An ioapic is now provided unconditionally for each 862 * virtual machine and this option is now deprecated. 863 */ 864 break; 865 case 'P': 866 guest_vmexit_on_pause = 1; 867 break; 868 case 'e': 869 strictio = 1; 870 break; 871 case 'u': 872 rtc_localtime = 0; 873 break; 874 case 'U': 875 guest_uuid_str = optarg; 876 break; 877 case 'w': 878 strictmsr = 0; 879 break; 880 case 'W': 881 virtio_msix = 0; 882 break; 883 case 'x': 884 x2apic_mode = 1; 885 break; 886 case 'Y': 887 mptgen = 0; 888 break; 889 case 'h': 890 usage(0); 891 default: 892 usage(1); 893 } 894 } 895 argc -= optind; 896 argv += optind; 897 898 if (argc != 1) 899 usage(1); 900 901 vmname = argv[0]; 902 ctx = do_open(vmname); 903 904 if (guest_ncpus < 1) { 905 fprintf(stderr, "Invalid guest vCPUs (%d)\n", guest_ncpus); 906 exit(1); 907 } 908 909 max_vcpus = num_vcpus_allowed(ctx); 910 if (guest_ncpus > max_vcpus) { 911 fprintf(stderr, "%d vCPUs requested but only %d available\n", 912 guest_ncpus, max_vcpus); 913 exit(1); 914 } 915 916 fbsdrun_set_capabilities(ctx, BSP); 917 918 vm_set_memflags(ctx, memflags); 919 err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL); 920 if (err) { 921 fprintf(stderr, "Unable to setup memory (%d)\n", errno); 922 exit(1); 923 } 924 925 error = init_msr(); 926 if (error) { 927 fprintf(stderr, "init_msr error %d", error); 928 exit(1); 929 } 930 931 init_mem(); 932 init_inout(); 933 atkbdc_init(ctx); 934 pci_irq_init(ctx); 935 ioapic_init(ctx); 936 937 rtc_init(ctx, rtc_localtime); 938 sci_init(ctx); 939 940 /* 941 * Exit if a device emulation finds an error in its initilization 942 */ 943 if (init_pci(ctx) != 0) 944 exit(1); 945 946 if (gdb_port != 0) 947 init_dbgport(gdb_port); 948 949 if (bvmcons) 950 init_bvmcons(); 951 952 if (lpc_bootrom()) { 953 if (vm_set_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, 1)) { 954 fprintf(stderr, "ROM boot failed: unrestricted guest " 955 "capability not available\n"); 956 exit(1); 957 } 958 error = vcpu_reset(ctx, BSP); 959 assert(error == 0); 960 } 961 962 error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip); 963 assert(error == 0); 964 965 /* 966 * build the guest tables, MP etc. 967 */ 968 if (mptgen) { 969 error = mptable_build(ctx, guest_ncpus); 970 if (error) 971 exit(1); 972 } 973 974 error = smbios_build(ctx); 975 assert(error == 0); 976 977 if (acpi) { 978 error = acpi_build(ctx, guest_ncpus); 979 assert(error == 0); 980 } 981 982 if (lpc_bootrom()) 983 fwctl_init(); 984 985 #ifndef WITHOUT_CAPSICUM 986 caph_cache_catpages(); 987 988 if (caph_limit_stdout() == -1 || caph_limit_stderr() == -1) 989 errx(EX_OSERR, "Unable to apply rights for sandbox"); 990 991 if (cap_enter() == -1 && errno != ENOSYS) 992 errx(EX_OSERR, "cap_enter() failed"); 993 #endif 994 995 /* 996 * Change the proc title to include the VM name. 997 */ 998 setproctitle("%s", vmname); 999 1000 /* 1001 * Add CPU 0 1002 */ 1003 fbsdrun_addcpu(ctx, BSP, BSP, rip); 1004 1005 /* 1006 * Head off to the main event dispatch loop 1007 */ 1008 mevent_dispatch(); 1009 1010 exit(1); 1011 } 1012