1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/types.h> 35 #ifndef WITHOUT_CAPSICUM 36 #include <sys/capsicum.h> 37 #endif 38 #include <sys/mman.h> 39 #include <sys/time.h> 40 41 #include <amd64/vmm/intel/vmcs.h> 42 43 #include <machine/atomic.h> 44 #include <machine/segments.h> 45 46 #ifndef WITHOUT_CAPSICUM 47 #include <capsicum_helpers.h> 48 #endif 49 #include <stdio.h> 50 #include <stdlib.h> 51 #include <string.h> 52 #include <err.h> 53 #include <errno.h> 54 #include <libgen.h> 55 #include <unistd.h> 56 #include <assert.h> 57 #include <pthread.h> 58 #include <pthread_np.h> 59 #include <sysexits.h> 60 #include <stdbool.h> 61 #include <stdint.h> 62 63 #include <machine/vmm.h> 64 #ifndef WITHOUT_CAPSICUM 65 #include <machine/vmm_dev.h> 66 #endif 67 #include <vmmapi.h> 68 69 #include "bhyverun.h" 70 #include "acpi.h" 71 #include "atkbdc.h" 72 #include "inout.h" 73 #include "dbgport.h" 74 #include "fwctl.h" 75 #include "gdb.h" 76 #include "ioapic.h" 77 #include "mem.h" 78 #include "mevent.h" 79 #include "mptbl.h" 80 #include "pci_emul.h" 81 #include "pci_irq.h" 82 #include "pci_lpc.h" 83 #include "smbiostbl.h" 84 #include "xmsr.h" 85 #include "spinup_ap.h" 86 #include "rtc.h" 87 88 #define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */ 89 90 #define MB (1024UL * 1024) 91 #define GB (1024UL * MB) 92 93 static const char * const vmx_exit_reason_desc[] = { 94 [EXIT_REASON_EXCEPTION] = "Exception or non-maskable interrupt (NMI)", 95 [EXIT_REASON_EXT_INTR] = "External interrupt", 96 [EXIT_REASON_TRIPLE_FAULT] = "Triple fault", 97 [EXIT_REASON_INIT] = "INIT signal", 98 [EXIT_REASON_SIPI] = "Start-up IPI (SIPI)", 99 [EXIT_REASON_IO_SMI] = "I/O system-management interrupt (SMI)", 100 [EXIT_REASON_SMI] = "Other SMI", 101 [EXIT_REASON_INTR_WINDOW] = "Interrupt window", 102 [EXIT_REASON_NMI_WINDOW] = "NMI window", 103 [EXIT_REASON_TASK_SWITCH] = "Task switch", 104 [EXIT_REASON_CPUID] = "CPUID", 105 [EXIT_REASON_GETSEC] = "GETSEC", 106 [EXIT_REASON_HLT] = "HLT", 107 [EXIT_REASON_INVD] = "INVD", 108 [EXIT_REASON_INVLPG] = "INVLPG", 109 [EXIT_REASON_RDPMC] = "RDPMC", 110 [EXIT_REASON_RDTSC] = "RDTSC", 111 [EXIT_REASON_RSM] = "RSM", 112 [EXIT_REASON_VMCALL] = "VMCALL", 113 [EXIT_REASON_VMCLEAR] = "VMCLEAR", 114 [EXIT_REASON_VMLAUNCH] = "VMLAUNCH", 115 [EXIT_REASON_VMPTRLD] = "VMPTRLD", 116 [EXIT_REASON_VMPTRST] = "VMPTRST", 117 [EXIT_REASON_VMREAD] = "VMREAD", 118 [EXIT_REASON_VMRESUME] = "VMRESUME", 119 [EXIT_REASON_VMWRITE] = "VMWRITE", 120 [EXIT_REASON_VMXOFF] = "VMXOFF", 121 [EXIT_REASON_VMXON] = "VMXON", 122 [EXIT_REASON_CR_ACCESS] = "Control-register accesses", 123 [EXIT_REASON_DR_ACCESS] = "MOV DR", 124 [EXIT_REASON_INOUT] = "I/O instruction", 125 [EXIT_REASON_RDMSR] = "RDMSR", 126 [EXIT_REASON_WRMSR] = "WRMSR", 127 [EXIT_REASON_INVAL_VMCS] = 128 "VM-entry failure due to invalid guest state", 129 [EXIT_REASON_INVAL_MSR] = "VM-entry failure due to MSR loading", 130 [EXIT_REASON_MWAIT] = "MWAIT", 131 [EXIT_REASON_MTF] = "Monitor trap flag", 132 [EXIT_REASON_MONITOR] = "MONITOR", 133 [EXIT_REASON_PAUSE] = "PAUSE", 134 [EXIT_REASON_MCE_DURING_ENTRY] = 135 "VM-entry failure due to machine-check event", 136 [EXIT_REASON_TPR] = "TPR below threshold", 137 [EXIT_REASON_APIC_ACCESS] = "APIC access", 138 [EXIT_REASON_VIRTUALIZED_EOI] = "Virtualized EOI", 139 [EXIT_REASON_GDTR_IDTR] = "Access to GDTR or IDTR", 140 [EXIT_REASON_LDTR_TR] = "Access to LDTR or TR", 141 [EXIT_REASON_EPT_FAULT] = "EPT violation", 142 [EXIT_REASON_EPT_MISCONFIG] = "EPT misconfiguration", 143 [EXIT_REASON_INVEPT] = "INVEPT", 144 [EXIT_REASON_RDTSCP] = "RDTSCP", 145 [EXIT_REASON_VMX_PREEMPT] = "VMX-preemption timer expired", 146 [EXIT_REASON_INVVPID] = "INVVPID", 147 [EXIT_REASON_WBINVD] = "WBINVD", 148 [EXIT_REASON_XSETBV] = "XSETBV", 149 [EXIT_REASON_APIC_WRITE] = "APIC write", 150 [EXIT_REASON_RDRAND] = "RDRAND", 151 [EXIT_REASON_INVPCID] = "INVPCID", 152 [EXIT_REASON_VMFUNC] = "VMFUNC", 153 [EXIT_REASON_ENCLS] = "ENCLS", 154 [EXIT_REASON_RDSEED] = "RDSEED", 155 [EXIT_REASON_PM_LOG_FULL] = "Page-modification log full", 156 [EXIT_REASON_XSAVES] = "XSAVES", 157 [EXIT_REASON_XRSTORS] = "XRSTORS" 158 }; 159 160 typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu); 161 extern int vmexit_task_switch(struct vmctx *, struct vm_exit *, int *vcpu); 162 163 char *vmname; 164 165 int guest_ncpus; 166 uint16_t cores, maxcpus, sockets, threads; 167 168 char *guest_uuid_str; 169 170 static int guest_vmexit_on_hlt, guest_vmexit_on_pause; 171 static int virtio_msix = 1; 172 static int x2apic_mode = 0; /* default is xAPIC */ 173 174 static int strictio; 175 static int strictmsr = 1; 176 177 static int acpi; 178 179 static char *progname; 180 static const int BSP = 0; 181 182 static cpuset_t cpumask; 183 184 static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip); 185 186 static struct vm_exit vmexit[VM_MAXCPU]; 187 188 struct bhyvestats { 189 uint64_t vmexit_bogus; 190 uint64_t vmexit_reqidle; 191 uint64_t vmexit_hlt; 192 uint64_t vmexit_pause; 193 uint64_t vmexit_mtrap; 194 uint64_t vmexit_inst_emul; 195 uint64_t cpu_switch_rotate; 196 uint64_t cpu_switch_direct; 197 } stats; 198 199 struct mt_vmm_info { 200 pthread_t mt_thr; 201 struct vmctx *mt_ctx; 202 int mt_vcpu; 203 } mt_vmm_info[VM_MAXCPU]; 204 205 static cpuset_t *vcpumap[VM_MAXCPU] = { NULL }; 206 207 static void 208 usage(int code) 209 { 210 211 fprintf(stderr, 212 "Usage: %s [-abehuwxACHPSWY]\n" 213 " %*s [-c [[cpus=]numcpus][,sockets=n][,cores=n][,threads=n]]\n" 214 " %*s [-g <gdb port>] [-l <lpc>]\n" 215 " %*s [-m mem] [-p vcpu:hostcpu] [-s <pci>] [-U uuid] <vm>\n" 216 " -a: local apic is in xAPIC mode (deprecated)\n" 217 " -A: create ACPI tables\n" 218 " -c: number of cpus and/or topology specification\n" 219 " -C: include guest memory in core file\n" 220 " -e: exit on unhandled I/O access\n" 221 " -g: gdb port\n" 222 " -h: help\n" 223 " -H: vmexit from the guest on hlt\n" 224 " -l: LPC device configuration\n" 225 " -m: memory size in MB\n" 226 " -p: pin 'vcpu' to 'hostcpu'\n" 227 " -P: vmexit from the guest on pause\n" 228 " -s: <slot,driver,configinfo> PCI slot config\n" 229 " -S: guest memory cannot be swapped\n" 230 " -u: RTC keeps UTC time\n" 231 " -U: uuid\n" 232 " -w: ignore unimplemented MSRs\n" 233 " -W: force virtio to use single-vector MSI\n" 234 " -x: local apic is in x2APIC mode\n" 235 " -Y: disable MPtable generation\n", 236 progname, (int)strlen(progname), "", (int)strlen(progname), "", 237 (int)strlen(progname), ""); 238 239 exit(code); 240 } 241 242 /* 243 * XXX This parser is known to have the following issues: 244 * 1. It accepts null key=value tokens ",,". 245 * 2. It accepts whitespace after = and before value. 246 * 3. Values out of range of INT are silently wrapped. 247 * 4. It doesn't check non-final values. 248 * 5. The apparently bogus limits of UINT16_MAX are for future expansion. 249 * 250 * The acceptance of a null specification ('-c ""') is by design to match the 251 * manual page syntax specification, this results in a topology of 1 vCPU. 252 */ 253 static int 254 topology_parse(const char *opt) 255 { 256 uint64_t ncpus; 257 int c, chk, n, s, t, tmp; 258 char *cp, *str; 259 bool ns, scts; 260 261 c = 1, n = 1, s = 1, t = 1; 262 ns = false, scts = false; 263 str = strdup(opt); 264 if (str == NULL) 265 goto out; 266 267 while ((cp = strsep(&str, ",")) != NULL) { 268 if (sscanf(cp, "%i%n", &tmp, &chk) == 1) { 269 n = tmp; 270 ns = true; 271 } else if (sscanf(cp, "cpus=%i%n", &tmp, &chk) == 1) { 272 n = tmp; 273 ns = true; 274 } else if (sscanf(cp, "sockets=%i%n", &tmp, &chk) == 1) { 275 s = tmp; 276 scts = true; 277 } else if (sscanf(cp, "cores=%i%n", &tmp, &chk) == 1) { 278 c = tmp; 279 scts = true; 280 } else if (sscanf(cp, "threads=%i%n", &tmp, &chk) == 1) { 281 t = tmp; 282 scts = true; 283 #ifdef notyet /* Do not expose this until vmm.ko implements it */ 284 } else if (sscanf(cp, "maxcpus=%i%n", &tmp, &chk) == 1) { 285 m = tmp; 286 #endif 287 /* Skip the empty argument case from -c "" */ 288 } else if (cp[0] == '\0') 289 continue; 290 else 291 goto out; 292 /* Any trailing garbage causes an error */ 293 if (cp[chk] != '\0') 294 goto out; 295 } 296 free(str); 297 str = NULL; 298 299 /* 300 * Range check 1 <= n <= UINT16_MAX all values 301 */ 302 if (n < 1 || s < 1 || c < 1 || t < 1 || 303 n > UINT16_MAX || s > UINT16_MAX || c > UINT16_MAX || 304 t > UINT16_MAX) 305 return (-1); 306 307 /* If only the cpus was specified, use that as sockets */ 308 if (!scts) 309 s = n; 310 /* 311 * Compute sockets * cores * threads avoiding overflow 312 * The range check above insures these are 16 bit values 313 * If n was specified check it against computed ncpus 314 */ 315 ncpus = (uint64_t)s * c * t; 316 if (ncpus > UINT16_MAX || (ns && n != ncpus)) 317 return (-1); 318 319 guest_ncpus = ncpus; 320 sockets = s; 321 cores = c; 322 threads = t; 323 return(0); 324 325 out: 326 free(str); 327 return (-1); 328 } 329 330 static int 331 pincpu_parse(const char *opt) 332 { 333 int vcpu, pcpu; 334 335 if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) { 336 fprintf(stderr, "invalid format: %s\n", opt); 337 return (-1); 338 } 339 340 if (vcpu < 0 || vcpu >= VM_MAXCPU) { 341 fprintf(stderr, "vcpu '%d' outside valid range from 0 to %d\n", 342 vcpu, VM_MAXCPU - 1); 343 return (-1); 344 } 345 346 if (pcpu < 0 || pcpu >= CPU_SETSIZE) { 347 fprintf(stderr, "hostcpu '%d' outside valid range from " 348 "0 to %d\n", pcpu, CPU_SETSIZE - 1); 349 return (-1); 350 } 351 352 if (vcpumap[vcpu] == NULL) { 353 if ((vcpumap[vcpu] = malloc(sizeof(cpuset_t))) == NULL) { 354 perror("malloc"); 355 return (-1); 356 } 357 CPU_ZERO(vcpumap[vcpu]); 358 } 359 CPU_SET(pcpu, vcpumap[vcpu]); 360 return (0); 361 } 362 363 void 364 vm_inject_fault(void *arg, int vcpu, int vector, int errcode_valid, 365 int errcode) 366 { 367 struct vmctx *ctx; 368 int error, restart_instruction; 369 370 ctx = arg; 371 restart_instruction = 1; 372 373 error = vm_inject_exception(ctx, vcpu, vector, errcode_valid, errcode, 374 restart_instruction); 375 assert(error == 0); 376 } 377 378 void * 379 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) 380 { 381 382 return (vm_map_gpa(ctx, gaddr, len)); 383 } 384 385 int 386 fbsdrun_vmexit_on_pause(void) 387 { 388 389 return (guest_vmexit_on_pause); 390 } 391 392 int 393 fbsdrun_vmexit_on_hlt(void) 394 { 395 396 return (guest_vmexit_on_hlt); 397 } 398 399 int 400 fbsdrun_virtio_msix(void) 401 { 402 403 return (virtio_msix); 404 } 405 406 static void * 407 fbsdrun_start_thread(void *param) 408 { 409 char tname[MAXCOMLEN + 1]; 410 struct mt_vmm_info *mtp; 411 int vcpu; 412 413 mtp = param; 414 vcpu = mtp->mt_vcpu; 415 416 snprintf(tname, sizeof(tname), "vcpu %d", vcpu); 417 pthread_set_name_np(mtp->mt_thr, tname); 418 419 gdb_cpu_add(vcpu); 420 421 vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip); 422 423 /* not reached */ 424 exit(1); 425 return (NULL); 426 } 427 428 void 429 fbsdrun_addcpu(struct vmctx *ctx, int fromcpu, int newcpu, uint64_t rip) 430 { 431 int error; 432 433 assert(fromcpu == BSP); 434 435 /* 436 * The 'newcpu' must be activated in the context of 'fromcpu'. If 437 * vm_activate_cpu() is delayed until newcpu's pthread starts running 438 * then vmm.ko is out-of-sync with bhyve and this can create a race 439 * with vm_suspend(). 440 */ 441 error = vm_activate_cpu(ctx, newcpu); 442 if (error != 0) 443 err(EX_OSERR, "could not activate CPU %d", newcpu); 444 445 CPU_SET_ATOMIC(newcpu, &cpumask); 446 447 /* 448 * Set up the vmexit struct to allow execution to start 449 * at the given RIP 450 */ 451 vmexit[newcpu].rip = rip; 452 vmexit[newcpu].inst_length = 0; 453 454 mt_vmm_info[newcpu].mt_ctx = ctx; 455 mt_vmm_info[newcpu].mt_vcpu = newcpu; 456 457 error = pthread_create(&mt_vmm_info[newcpu].mt_thr, NULL, 458 fbsdrun_start_thread, &mt_vmm_info[newcpu]); 459 assert(error == 0); 460 } 461 462 static int 463 fbsdrun_deletecpu(struct vmctx *ctx, int vcpu) 464 { 465 466 if (!CPU_ISSET(vcpu, &cpumask)) { 467 fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu); 468 exit(4); 469 } 470 471 CPU_CLR_ATOMIC(vcpu, &cpumask); 472 return (CPU_EMPTY(&cpumask)); 473 } 474 475 static int 476 vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu, 477 uint32_t eax) 478 { 479 #if BHYVE_DEBUG 480 /* 481 * put guest-driven debug here 482 */ 483 #endif 484 return (VMEXIT_CONTINUE); 485 } 486 487 static int 488 vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 489 { 490 int error; 491 int bytes, port, in, out; 492 int vcpu; 493 494 vcpu = *pvcpu; 495 496 port = vme->u.inout.port; 497 bytes = vme->u.inout.bytes; 498 in = vme->u.inout.in; 499 out = !in; 500 501 /* Extra-special case of host notifications */ 502 if (out && port == GUEST_NIO_PORT) { 503 error = vmexit_handle_notify(ctx, vme, pvcpu, vme->u.inout.eax); 504 return (error); 505 } 506 507 error = emulate_inout(ctx, vcpu, vme, strictio); 508 if (error) { 509 fprintf(stderr, "Unhandled %s%c 0x%04x at 0x%lx\n", 510 in ? "in" : "out", 511 bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), 512 port, vmexit->rip); 513 return (VMEXIT_ABORT); 514 } else { 515 return (VMEXIT_CONTINUE); 516 } 517 } 518 519 static int 520 vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 521 { 522 uint64_t val; 523 uint32_t eax, edx; 524 int error; 525 526 val = 0; 527 error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val); 528 if (error != 0) { 529 fprintf(stderr, "rdmsr to register %#x on vcpu %d\n", 530 vme->u.msr.code, *pvcpu); 531 if (strictmsr) { 532 vm_inject_gp(ctx, *pvcpu); 533 return (VMEXIT_CONTINUE); 534 } 535 } 536 537 eax = val; 538 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax); 539 assert(error == 0); 540 541 edx = val >> 32; 542 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx); 543 assert(error == 0); 544 545 return (VMEXIT_CONTINUE); 546 } 547 548 static int 549 vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 550 { 551 int error; 552 553 error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval); 554 if (error != 0) { 555 fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n", 556 vme->u.msr.code, vme->u.msr.wval, *pvcpu); 557 if (strictmsr) { 558 vm_inject_gp(ctx, *pvcpu); 559 return (VMEXIT_CONTINUE); 560 } 561 } 562 return (VMEXIT_CONTINUE); 563 } 564 565 static int 566 vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) 567 { 568 569 (void)spinup_ap(ctx, *pvcpu, 570 vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip); 571 572 return (VMEXIT_CONTINUE); 573 } 574 575 #define DEBUG_EPT_MISCONFIG 576 #ifdef DEBUG_EPT_MISCONFIG 577 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400 578 579 static uint64_t ept_misconfig_gpa, ept_misconfig_pte[4]; 580 static int ept_misconfig_ptenum; 581 #endif 582 583 static const char * 584 vmexit_vmx_desc(uint32_t exit_reason) 585 { 586 587 if (exit_reason >= nitems(vmx_exit_reason_desc) || 588 vmx_exit_reason_desc[exit_reason] == NULL) 589 return ("Unknown"); 590 return (vmx_exit_reason_desc[exit_reason]); 591 } 592 593 static int 594 vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 595 { 596 597 fprintf(stderr, "vm exit[%d]\n", *pvcpu); 598 fprintf(stderr, "\treason\t\tVMX\n"); 599 fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip); 600 fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length); 601 fprintf(stderr, "\tstatus\t\t%d\n", vmexit->u.vmx.status); 602 fprintf(stderr, "\texit_reason\t%u (%s)\n", vmexit->u.vmx.exit_reason, 603 vmexit_vmx_desc(vmexit->u.vmx.exit_reason)); 604 fprintf(stderr, "\tqualification\t0x%016lx\n", 605 vmexit->u.vmx.exit_qualification); 606 fprintf(stderr, "\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type); 607 fprintf(stderr, "\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error); 608 #ifdef DEBUG_EPT_MISCONFIG 609 if (vmexit->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) { 610 vm_get_register(ctx, *pvcpu, 611 VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS), 612 &ept_misconfig_gpa); 613 vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte, 614 &ept_misconfig_ptenum); 615 fprintf(stderr, "\tEPT misconfiguration:\n"); 616 fprintf(stderr, "\t\tGPA: %#lx\n", ept_misconfig_gpa); 617 fprintf(stderr, "\t\tPTE(%d): %#lx %#lx %#lx %#lx\n", 618 ept_misconfig_ptenum, ept_misconfig_pte[0], 619 ept_misconfig_pte[1], ept_misconfig_pte[2], 620 ept_misconfig_pte[3]); 621 } 622 #endif /* DEBUG_EPT_MISCONFIG */ 623 return (VMEXIT_ABORT); 624 } 625 626 static int 627 vmexit_svm(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 628 { 629 630 fprintf(stderr, "vm exit[%d]\n", *pvcpu); 631 fprintf(stderr, "\treason\t\tSVM\n"); 632 fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip); 633 fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length); 634 fprintf(stderr, "\texitcode\t%#lx\n", vmexit->u.svm.exitcode); 635 fprintf(stderr, "\texitinfo1\t%#lx\n", vmexit->u.svm.exitinfo1); 636 fprintf(stderr, "\texitinfo2\t%#lx\n", vmexit->u.svm.exitinfo2); 637 return (VMEXIT_ABORT); 638 } 639 640 static int 641 vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 642 { 643 644 assert(vmexit->inst_length == 0); 645 646 stats.vmexit_bogus++; 647 648 return (VMEXIT_CONTINUE); 649 } 650 651 static int 652 vmexit_reqidle(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 653 { 654 655 assert(vmexit->inst_length == 0); 656 657 stats.vmexit_reqidle++; 658 659 return (VMEXIT_CONTINUE); 660 } 661 662 static int 663 vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 664 { 665 666 stats.vmexit_hlt++; 667 668 /* 669 * Just continue execution with the next instruction. We use 670 * the HLT VM exit as a way to be friendly with the host 671 * scheduler. 672 */ 673 return (VMEXIT_CONTINUE); 674 } 675 676 static int 677 vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 678 { 679 680 stats.vmexit_pause++; 681 682 return (VMEXIT_CONTINUE); 683 } 684 685 static int 686 vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 687 { 688 689 assert(vmexit->inst_length == 0); 690 691 stats.vmexit_mtrap++; 692 693 gdb_cpu_mtrap(*pvcpu); 694 695 return (VMEXIT_CONTINUE); 696 } 697 698 static int 699 vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 700 { 701 int err, i; 702 struct vie *vie; 703 704 stats.vmexit_inst_emul++; 705 706 vie = &vmexit->u.inst_emul.vie; 707 err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa, 708 vie, &vmexit->u.inst_emul.paging); 709 710 if (err) { 711 if (err == ESRCH) { 712 fprintf(stderr, "Unhandled memory access to 0x%lx\n", 713 vmexit->u.inst_emul.gpa); 714 } 715 716 fprintf(stderr, "Failed to emulate instruction ["); 717 for (i = 0; i < vie->num_valid; i++) { 718 fprintf(stderr, "0x%02x%s", vie->inst[i], 719 i != (vie->num_valid - 1) ? " " : ""); 720 } 721 fprintf(stderr, "] at 0x%lx\n", vmexit->rip); 722 return (VMEXIT_ABORT); 723 } 724 725 return (VMEXIT_CONTINUE); 726 } 727 728 static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER; 729 static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER; 730 731 static int 732 vmexit_suspend(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 733 { 734 enum vm_suspend_how how; 735 736 how = vmexit->u.suspended.how; 737 738 fbsdrun_deletecpu(ctx, *pvcpu); 739 740 if (*pvcpu != BSP) { 741 pthread_mutex_lock(&resetcpu_mtx); 742 pthread_cond_signal(&resetcpu_cond); 743 pthread_mutex_unlock(&resetcpu_mtx); 744 pthread_exit(NULL); 745 } 746 747 pthread_mutex_lock(&resetcpu_mtx); 748 while (!CPU_EMPTY(&cpumask)) { 749 pthread_cond_wait(&resetcpu_cond, &resetcpu_mtx); 750 } 751 pthread_mutex_unlock(&resetcpu_mtx); 752 753 switch (how) { 754 case VM_SUSPEND_RESET: 755 exit(0); 756 case VM_SUSPEND_POWEROFF: 757 exit(1); 758 case VM_SUSPEND_HALT: 759 exit(2); 760 case VM_SUSPEND_TRIPLEFAULT: 761 exit(3); 762 default: 763 fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how); 764 exit(100); 765 } 766 return (0); /* NOTREACHED */ 767 } 768 769 static int 770 vmexit_debug(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) 771 { 772 773 gdb_cpu_suspend(*pvcpu); 774 return (VMEXIT_CONTINUE); 775 } 776 777 static vmexit_handler_t handler[VM_EXITCODE_MAX] = { 778 [VM_EXITCODE_INOUT] = vmexit_inout, 779 [VM_EXITCODE_INOUT_STR] = vmexit_inout, 780 [VM_EXITCODE_VMX] = vmexit_vmx, 781 [VM_EXITCODE_SVM] = vmexit_svm, 782 [VM_EXITCODE_BOGUS] = vmexit_bogus, 783 [VM_EXITCODE_REQIDLE] = vmexit_reqidle, 784 [VM_EXITCODE_RDMSR] = vmexit_rdmsr, 785 [VM_EXITCODE_WRMSR] = vmexit_wrmsr, 786 [VM_EXITCODE_MTRAP] = vmexit_mtrap, 787 [VM_EXITCODE_INST_EMUL] = vmexit_inst_emul, 788 [VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap, 789 [VM_EXITCODE_SUSPENDED] = vmexit_suspend, 790 [VM_EXITCODE_TASK_SWITCH] = vmexit_task_switch, 791 [VM_EXITCODE_DEBUG] = vmexit_debug, 792 }; 793 794 static void 795 vm_loop(struct vmctx *ctx, int vcpu, uint64_t startrip) 796 { 797 int error, rc; 798 enum vm_exitcode exitcode; 799 cpuset_t active_cpus; 800 801 if (vcpumap[vcpu] != NULL) { 802 error = pthread_setaffinity_np(pthread_self(), 803 sizeof(cpuset_t), vcpumap[vcpu]); 804 assert(error == 0); 805 } 806 807 error = vm_active_cpus(ctx, &active_cpus); 808 assert(CPU_ISSET(vcpu, &active_cpus)); 809 810 error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, startrip); 811 assert(error == 0); 812 813 while (1) { 814 error = vm_run(ctx, vcpu, &vmexit[vcpu]); 815 if (error != 0) 816 break; 817 818 exitcode = vmexit[vcpu].exitcode; 819 if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) { 820 fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n", 821 exitcode); 822 exit(4); 823 } 824 825 rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu); 826 827 switch (rc) { 828 case VMEXIT_CONTINUE: 829 break; 830 case VMEXIT_ABORT: 831 abort(); 832 default: 833 exit(4); 834 } 835 } 836 fprintf(stderr, "vm_run error %d, errno %d\n", error, errno); 837 } 838 839 static int 840 num_vcpus_allowed(struct vmctx *ctx) 841 { 842 int tmp, error; 843 844 error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp); 845 846 /* 847 * The guest is allowed to spinup more than one processor only if the 848 * UNRESTRICTED_GUEST capability is available. 849 */ 850 if (error == 0) 851 return (VM_MAXCPU); 852 else 853 return (1); 854 } 855 856 void 857 fbsdrun_set_capabilities(struct vmctx *ctx, int cpu) 858 { 859 int err, tmp; 860 861 if (fbsdrun_vmexit_on_hlt()) { 862 err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp); 863 if (err < 0) { 864 fprintf(stderr, "VM exit on HLT not supported\n"); 865 exit(4); 866 } 867 vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1); 868 if (cpu == BSP) 869 handler[VM_EXITCODE_HLT] = vmexit_hlt; 870 } 871 872 if (fbsdrun_vmexit_on_pause()) { 873 /* 874 * pause exit support required for this mode 875 */ 876 err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp); 877 if (err < 0) { 878 fprintf(stderr, 879 "SMP mux requested, no pause support\n"); 880 exit(4); 881 } 882 vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1); 883 if (cpu == BSP) 884 handler[VM_EXITCODE_PAUSE] = vmexit_pause; 885 } 886 887 if (x2apic_mode) 888 err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED); 889 else 890 err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED); 891 892 if (err) { 893 fprintf(stderr, "Unable to set x2apic state (%d)\n", err); 894 exit(4); 895 } 896 897 vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1); 898 } 899 900 static struct vmctx * 901 do_open(const char *vmname) 902 { 903 struct vmctx *ctx; 904 int error; 905 bool reinit, romboot; 906 #ifndef WITHOUT_CAPSICUM 907 cap_rights_t rights; 908 const cap_ioctl_t *cmds; 909 size_t ncmds; 910 #endif 911 912 reinit = romboot = false; 913 914 if (lpc_bootrom()) 915 romboot = true; 916 917 error = vm_create(vmname); 918 if (error) { 919 if (errno == EEXIST) { 920 if (romboot) { 921 reinit = true; 922 } else { 923 /* 924 * The virtual machine has been setup by the 925 * userspace bootloader. 926 */ 927 } 928 } else { 929 perror("vm_create"); 930 exit(4); 931 } 932 } else { 933 if (!romboot) { 934 /* 935 * If the virtual machine was just created then a 936 * bootrom must be configured to boot it. 937 */ 938 fprintf(stderr, "virtual machine cannot be booted\n"); 939 exit(4); 940 } 941 } 942 943 ctx = vm_open(vmname); 944 if (ctx == NULL) { 945 perror("vm_open"); 946 exit(4); 947 } 948 949 #ifndef WITHOUT_CAPSICUM 950 cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW); 951 if (caph_rights_limit(vm_get_device_fd(ctx), &rights) == -1) 952 errx(EX_OSERR, "Unable to apply rights for sandbox"); 953 vm_get_ioctls(&ncmds); 954 cmds = vm_get_ioctls(NULL); 955 if (cmds == NULL) 956 errx(EX_OSERR, "out of memory"); 957 if (caph_ioctls_limit(vm_get_device_fd(ctx), cmds, ncmds) == -1) 958 errx(EX_OSERR, "Unable to apply rights for sandbox"); 959 free((cap_ioctl_t *)cmds); 960 #endif 961 962 if (reinit) { 963 error = vm_reinit(ctx); 964 if (error) { 965 perror("vm_reinit"); 966 exit(4); 967 } 968 } 969 error = vm_set_topology(ctx, sockets, cores, threads, maxcpus); 970 if (error) 971 errx(EX_OSERR, "vm_set_topology"); 972 return (ctx); 973 } 974 975 int 976 main(int argc, char *argv[]) 977 { 978 int c, error, dbg_port, gdb_port, err, bvmcons; 979 int max_vcpus, mptgen, memflags; 980 int rtc_localtime; 981 bool gdb_stop; 982 struct vmctx *ctx; 983 uint64_t rip; 984 size_t memsize; 985 char *optstr; 986 987 bvmcons = 0; 988 progname = basename(argv[0]); 989 dbg_port = 0; 990 gdb_port = 0; 991 gdb_stop = false; 992 guest_ncpus = 1; 993 sockets = cores = threads = 1; 994 maxcpus = 0; 995 memsize = 256 * MB; 996 mptgen = 1; 997 rtc_localtime = 1; 998 memflags = 0; 999 1000 optstr = "abehuwxACHIPSWYp:g:G:c:s:m:l:U:"; 1001 while ((c = getopt(argc, argv, optstr)) != -1) { 1002 switch (c) { 1003 case 'a': 1004 x2apic_mode = 0; 1005 break; 1006 case 'A': 1007 acpi = 1; 1008 break; 1009 case 'b': 1010 bvmcons = 1; 1011 break; 1012 case 'p': 1013 if (pincpu_parse(optarg) != 0) { 1014 errx(EX_USAGE, "invalid vcpu pinning " 1015 "configuration '%s'", optarg); 1016 } 1017 break; 1018 case 'c': 1019 if (topology_parse(optarg) != 0) { 1020 errx(EX_USAGE, "invalid cpu topology " 1021 "'%s'", optarg); 1022 } 1023 break; 1024 case 'C': 1025 memflags |= VM_MEM_F_INCORE; 1026 break; 1027 case 'g': 1028 dbg_port = atoi(optarg); 1029 break; 1030 case 'G': 1031 if (optarg[0] == 'w') { 1032 gdb_stop = true; 1033 optarg++; 1034 } 1035 gdb_port = atoi(optarg); 1036 break; 1037 case 'l': 1038 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1039 lpc_print_supported_devices(); 1040 exit(0); 1041 } else if (lpc_device_parse(optarg) != 0) { 1042 errx(EX_USAGE, "invalid lpc device " 1043 "configuration '%s'", optarg); 1044 } 1045 break; 1046 case 's': 1047 if (strncmp(optarg, "help", strlen(optarg)) == 0) { 1048 pci_print_supported_devices(); 1049 exit(0); 1050 } else if (pci_parse_slot(optarg) != 0) 1051 exit(4); 1052 else 1053 break; 1054 case 'S': 1055 memflags |= VM_MEM_F_WIRED; 1056 break; 1057 case 'm': 1058 error = vm_parse_memsize(optarg, &memsize); 1059 if (error) 1060 errx(EX_USAGE, "invalid memsize '%s'", optarg); 1061 break; 1062 case 'H': 1063 guest_vmexit_on_hlt = 1; 1064 break; 1065 case 'I': 1066 /* 1067 * The "-I" option was used to add an ioapic to the 1068 * virtual machine. 1069 * 1070 * An ioapic is now provided unconditionally for each 1071 * virtual machine and this option is now deprecated. 1072 */ 1073 break; 1074 case 'P': 1075 guest_vmexit_on_pause = 1; 1076 break; 1077 case 'e': 1078 strictio = 1; 1079 break; 1080 case 'u': 1081 rtc_localtime = 0; 1082 break; 1083 case 'U': 1084 guest_uuid_str = optarg; 1085 break; 1086 case 'w': 1087 strictmsr = 0; 1088 break; 1089 case 'W': 1090 virtio_msix = 0; 1091 break; 1092 case 'x': 1093 x2apic_mode = 1; 1094 break; 1095 case 'Y': 1096 mptgen = 0; 1097 break; 1098 case 'h': 1099 usage(0); 1100 default: 1101 usage(1); 1102 } 1103 } 1104 argc -= optind; 1105 argv += optind; 1106 1107 if (argc != 1) 1108 usage(1); 1109 1110 vmname = argv[0]; 1111 ctx = do_open(vmname); 1112 1113 max_vcpus = num_vcpus_allowed(ctx); 1114 if (guest_ncpus > max_vcpus) { 1115 fprintf(stderr, "%d vCPUs requested but only %d available\n", 1116 guest_ncpus, max_vcpus); 1117 exit(4); 1118 } 1119 1120 fbsdrun_set_capabilities(ctx, BSP); 1121 1122 vm_set_memflags(ctx, memflags); 1123 err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL); 1124 if (err) { 1125 fprintf(stderr, "Unable to setup memory (%d)\n", errno); 1126 exit(4); 1127 } 1128 1129 error = init_msr(); 1130 if (error) { 1131 fprintf(stderr, "init_msr error %d", error); 1132 exit(4); 1133 } 1134 1135 init_mem(); 1136 init_inout(); 1137 atkbdc_init(ctx); 1138 pci_irq_init(ctx); 1139 ioapic_init(ctx); 1140 1141 rtc_init(ctx, rtc_localtime); 1142 sci_init(ctx); 1143 1144 /* 1145 * Exit if a device emulation finds an error in its initilization 1146 */ 1147 if (init_pci(ctx) != 0) { 1148 perror("device emulation initialization error"); 1149 exit(4); 1150 } 1151 1152 if (dbg_port != 0) 1153 init_dbgport(dbg_port); 1154 1155 if (gdb_port != 0) 1156 init_gdb(ctx, gdb_port, gdb_stop); 1157 1158 if (bvmcons) 1159 init_bvmcons(); 1160 1161 if (lpc_bootrom()) { 1162 if (vm_set_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, 1)) { 1163 fprintf(stderr, "ROM boot failed: unrestricted guest " 1164 "capability not available\n"); 1165 exit(4); 1166 } 1167 error = vcpu_reset(ctx, BSP); 1168 assert(error == 0); 1169 } 1170 1171 error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip); 1172 assert(error == 0); 1173 1174 /* 1175 * build the guest tables, MP etc. 1176 */ 1177 if (mptgen) { 1178 error = mptable_build(ctx, guest_ncpus); 1179 if (error) { 1180 perror("error to build the guest tables"); 1181 exit(4); 1182 } 1183 } 1184 1185 error = smbios_build(ctx); 1186 assert(error == 0); 1187 1188 if (acpi) { 1189 error = acpi_build(ctx, guest_ncpus); 1190 assert(error == 0); 1191 } 1192 1193 if (lpc_bootrom()) 1194 fwctl_init(); 1195 1196 /* 1197 * Change the proc title to include the VM name. 1198 */ 1199 setproctitle("%s", vmname); 1200 1201 #ifndef WITHOUT_CAPSICUM 1202 caph_cache_catpages(); 1203 1204 if (caph_limit_stdout() == -1 || caph_limit_stderr() == -1) 1205 errx(EX_OSERR, "Unable to apply rights for sandbox"); 1206 1207 if (caph_enter() == -1) 1208 errx(EX_OSERR, "cap_enter() failed"); 1209 #endif 1210 1211 /* 1212 * Add CPU 0 1213 */ 1214 fbsdrun_addcpu(ctx, BSP, BSP, rip); 1215 1216 /* 1217 * Head off to the main event dispatch loop 1218 */ 1219 mevent_dispatch(); 1220 1221 exit(4); 1222 } 1223