1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2015 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2022 Oxide Computer Company 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/kmem.h> 53 #include <sys/pcpu.h> 54 #include <sys/proc.h> 55 #include <sys/sysctl.h> 56 57 #include <sys/x86_archext.h> 58 #include <sys/smp_impldefs.h> 59 #include <sys/smt.h> 60 #include <sys/hma.h> 61 #include <sys/trap.h> 62 #include <sys/archsystm.h> 63 64 #include <machine/psl.h> 65 #include <machine/cpufunc.h> 66 #include <machine/md_var.h> 67 #include <machine/reg.h> 68 #include <machine/segments.h> 69 #include <machine/specialreg.h> 70 #include <machine/vmparam.h> 71 #include <sys/vmm_vm.h> 72 #include <sys/vmm_kernel.h> 73 74 #include <machine/vmm.h> 75 #include <machine/vmm_dev.h> 76 #include <sys/vmm_instruction_emul.h> 77 #include "vmm_lapic.h" 78 #include "vmm_host.h" 79 #include "vmm_ioport.h" 80 #include "vmm_stat.h" 81 #include "vatpic.h" 82 #include "vlapic.h" 83 #include "vlapic_priv.h" 84 85 #include "vmcs.h" 86 #include "vmx.h" 87 #include "vmx_msr.h" 88 #include "x86.h" 89 #include "vmx_controls.h" 90 91 #define PINBASED_CTLS_ONE_SETTING \ 92 (PINBASED_EXTINT_EXITING | \ 93 PINBASED_NMI_EXITING | \ 94 PINBASED_VIRTUAL_NMI) 95 #define PINBASED_CTLS_ZERO_SETTING 0 96 97 #define PROCBASED_CTLS_WINDOW_SETTING \ 98 (PROCBASED_INT_WINDOW_EXITING | \ 99 PROCBASED_NMI_WINDOW_EXITING) 100 101 /* We consider TSC offset a necessity for unsynched TSC handling */ 102 #define PROCBASED_CTLS_ONE_SETTING \ 103 (PROCBASED_SECONDARY_CONTROLS | \ 104 PROCBASED_TSC_OFFSET | \ 105 PROCBASED_MWAIT_EXITING | \ 106 PROCBASED_MONITOR_EXITING | \ 107 PROCBASED_IO_EXITING | \ 108 PROCBASED_MSR_BITMAPS | \ 109 PROCBASED_CTLS_WINDOW_SETTING | \ 110 PROCBASED_CR8_LOAD_EXITING | \ 111 PROCBASED_CR8_STORE_EXITING) 112 113 #define PROCBASED_CTLS_ZERO_SETTING \ 114 (PROCBASED_CR3_LOAD_EXITING | \ 115 PROCBASED_CR3_STORE_EXITING | \ 116 PROCBASED_IO_BITMAPS) 117 118 /* 119 * EPT and Unrestricted Guest are considered necessities. The latter is not a 120 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly 121 * without a bootrom starting in real mode. 122 */ 123 #define PROCBASED_CTLS2_ONE_SETTING \ 124 (PROCBASED2_ENABLE_EPT | \ 125 PROCBASED2_UNRESTRICTED_GUEST) 126 #define PROCBASED_CTLS2_ZERO_SETTING 0 127 128 #define VM_EXIT_CTLS_ONE_SETTING \ 129 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 130 VM_EXIT_HOST_LMA | \ 131 VM_EXIT_LOAD_PAT | \ 132 VM_EXIT_SAVE_EFER | \ 133 VM_EXIT_LOAD_EFER | \ 134 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 135 136 #define VM_EXIT_CTLS_ZERO_SETTING 0 137 138 #define VM_ENTRY_CTLS_ONE_SETTING \ 139 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 140 VM_ENTRY_LOAD_EFER) 141 142 #define VM_ENTRY_CTLS_ZERO_SETTING \ 143 (VM_ENTRY_INTO_SMM | \ 144 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 145 146 /* 147 * Cover the EPT capabilities used by bhyve at present: 148 * - 4-level page walks 149 * - write-back memory type 150 * - INVEPT operations (all types) 151 * - INVVPID operations (single-context only) 152 */ 153 #define EPT_CAPS_REQUIRED \ 154 (IA32_VMX_EPT_VPID_PWL4 | \ 155 IA32_VMX_EPT_VPID_TYPE_WB | \ 156 IA32_VMX_EPT_VPID_INVEPT | \ 157 IA32_VMX_EPT_VPID_INVEPT_SINGLE | \ 158 IA32_VMX_EPT_VPID_INVEPT_ALL | \ 159 IA32_VMX_EPT_VPID_INVVPID | \ 160 IA32_VMX_EPT_VPID_INVVPID_SINGLE) 161 162 #define HANDLED 1 163 #define UNHANDLED 0 164 165 SYSCTL_DECL(_hw_vmm); 166 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 167 NULL); 168 169 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 170 static uint32_t exit_ctls, entry_ctls; 171 172 static uint64_t cr0_ones_mask, cr0_zeros_mask; 173 174 static uint64_t cr4_ones_mask, cr4_zeros_mask; 175 176 static int vmx_initialized; 177 178 /* Do not flush RSB upon vmexit */ 179 static int no_flush_rsb; 180 181 /* 182 * Optional capabilities 183 */ 184 185 /* HLT triggers a VM-exit */ 186 static int cap_halt_exit; 187 188 /* PAUSE triggers a VM-exit */ 189 static int cap_pause_exit; 190 191 /* Monitor trap flag */ 192 static int cap_monitor_trap; 193 194 /* Guests are allowed to use INVPCID */ 195 static int cap_invpcid; 196 197 /* Extra capabilities (VMX_CAP_*) beyond the minimum */ 198 static enum vmx_caps vmx_capabilities; 199 200 /* APICv posted interrupt vector */ 201 static int pirvec = -1; 202 203 static uint_t vpid_alloc_failed; 204 205 int guest_l1d_flush; 206 int guest_l1d_flush_sw; 207 208 /* MSR save region is composed of an array of 'struct msr_entry' */ 209 struct msr_entry { 210 uint32_t index; 211 uint32_t reserved; 212 uint64_t val; 213 }; 214 215 static struct msr_entry msr_load_list[1] __aligned(16); 216 217 /* 218 * The definitions of SDT probes for VMX. 219 */ 220 221 /* BEGIN CSTYLED */ 222 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 223 "struct vmx *", "int", "struct vm_exit *"); 224 225 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 226 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 227 228 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 229 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 230 231 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 232 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 233 234 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 235 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 236 237 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 238 "struct vmx *", "int", "struct vm_exit *"); 239 240 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 241 "struct vmx *", "int", "struct vm_exit *"); 242 243 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 244 "struct vmx *", "int", "struct vm_exit *"); 245 246 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 247 "struct vmx *", "int", "struct vm_exit *"); 248 249 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 250 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 251 252 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 253 "struct vmx *", "int", "struct vm_exit *"); 254 255 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 256 "struct vmx *", "int", "struct vm_exit *"); 257 258 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 259 "struct vmx *", "int", "struct vm_exit *"); 260 261 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 262 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 263 264 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 265 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 266 267 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 268 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 269 270 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 271 "struct vmx *", "int", "struct vm_exit *"); 272 273 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 274 "struct vmx *", "int", "struct vm_exit *"); 275 276 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 277 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 278 279 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 280 "struct vmx *", "int", "struct vm_exit *"); 281 282 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 283 "struct vmx *", "int", "struct vm_exit *"); 284 285 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 286 "struct vmx *", "int", "struct vm_exit *"); 287 288 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 289 "struct vmx *", "int", "struct vm_exit *"); 290 291 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 292 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 293 294 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 295 "struct vmx *", "int", "struct vm_exit *", "int"); 296 /* END CSTYLED */ 297 298 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 299 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 300 static void vmx_apply_tsc_adjust(struct vmx *, int); 301 static void vmx_apicv_sync_tmr(struct vlapic *vlapic); 302 static void vmx_tpr_shadow_enter(struct vlapic *vlapic); 303 static void vmx_tpr_shadow_exit(struct vlapic *vlapic); 304 305 static void 306 vmx_allow_x2apic_msrs(struct vmx *vmx, int vcpuid) 307 { 308 /* 309 * Allow readonly access to the following x2APIC MSRs from the guest. 310 */ 311 guest_msr_ro(vmx, vcpuid, MSR_APIC_ID); 312 guest_msr_ro(vmx, vcpuid, MSR_APIC_VERSION); 313 guest_msr_ro(vmx, vcpuid, MSR_APIC_LDR); 314 guest_msr_ro(vmx, vcpuid, MSR_APIC_SVR); 315 316 for (uint_t i = 0; i < 8; i++) { 317 guest_msr_ro(vmx, vcpuid, MSR_APIC_ISR0 + i); 318 guest_msr_ro(vmx, vcpuid, MSR_APIC_TMR0 + i); 319 guest_msr_ro(vmx, vcpuid, MSR_APIC_IRR0 + i); 320 } 321 322 guest_msr_ro(vmx, vcpuid, MSR_APIC_ESR); 323 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_TIMER); 324 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_THERMAL); 325 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_PCINT); 326 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT0); 327 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT1); 328 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_ERROR); 329 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR_TIMER); 330 guest_msr_ro(vmx, vcpuid, MSR_APIC_DCR_TIMER); 331 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR); 332 333 /* 334 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 335 * 336 * These registers get special treatment described in the section 337 * "Virtualizing MSR-Based APIC Accesses". 338 */ 339 guest_msr_rw(vmx, vcpuid, MSR_APIC_TPR); 340 guest_msr_rw(vmx, vcpuid, MSR_APIC_EOI); 341 guest_msr_rw(vmx, vcpuid, MSR_APIC_SELF_IPI); 342 } 343 344 static ulong_t 345 vmx_fix_cr0(ulong_t cr0) 346 { 347 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 348 } 349 350 /* 351 * Given a live (VMCS-active) cr0 value, and its shadow counterpart, calculate 352 * the value observable from the guest. 353 */ 354 static ulong_t 355 vmx_unshadow_cr0(uint64_t cr0, uint64_t shadow) 356 { 357 return ((cr0 & ~cr0_ones_mask) | 358 (shadow & (cr0_zeros_mask | cr0_ones_mask))); 359 } 360 361 static ulong_t 362 vmx_fix_cr4(ulong_t cr4) 363 { 364 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 365 } 366 367 /* 368 * Given a live (VMCS-active) cr4 value, and its shadow counterpart, calculate 369 * the value observable from the guest. 370 */ 371 static ulong_t 372 vmx_unshadow_cr4(uint64_t cr4, uint64_t shadow) 373 { 374 return ((cr4 & ~cr4_ones_mask) | 375 (shadow & (cr4_zeros_mask | cr4_ones_mask))); 376 } 377 378 static void 379 vpid_free(int vpid) 380 { 381 if (vpid < 0 || vpid > 0xffff) 382 panic("vpid_free: invalid vpid %d", vpid); 383 384 /* 385 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 386 * the unit number allocator. 387 */ 388 389 if (vpid > VM_MAXCPU) 390 hma_vmx_vpid_free((uint16_t)vpid); 391 } 392 393 static void 394 vpid_alloc(uint16_t *vpid, int num) 395 { 396 int i, x; 397 398 if (num <= 0 || num > VM_MAXCPU) 399 panic("invalid number of vpids requested: %d", num); 400 401 /* 402 * If the "enable vpid" execution control is not enabled then the 403 * VPID is required to be 0 for all vcpus. 404 */ 405 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 406 for (i = 0; i < num; i++) 407 vpid[i] = 0; 408 return; 409 } 410 411 /* 412 * Allocate a unique VPID for each vcpu from the unit number allocator. 413 */ 414 for (i = 0; i < num; i++) { 415 uint16_t tmp; 416 417 tmp = hma_vmx_vpid_alloc(); 418 x = (tmp == 0) ? -1 : tmp; 419 420 if (x == -1) 421 break; 422 else 423 vpid[i] = x; 424 } 425 426 if (i < num) { 427 atomic_add_int(&vpid_alloc_failed, 1); 428 429 /* 430 * If the unit number allocator does not have enough unique 431 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 432 * 433 * These VPIDs are not be unique across VMs but this does not 434 * affect correctness because the combined mappings are also 435 * tagged with the EP4TA which is unique for each VM. 436 * 437 * It is still sub-optimal because the invvpid will invalidate 438 * combined mappings for a particular VPID across all EP4TAs. 439 */ 440 while (i-- > 0) 441 vpid_free(vpid[i]); 442 443 for (i = 0; i < num; i++) 444 vpid[i] = i + 1; 445 } 446 } 447 448 static int 449 vmx_cleanup(void) 450 { 451 /* This is taken care of by the hma registration */ 452 return (0); 453 } 454 455 static void 456 vmx_restore(void) 457 { 458 /* No-op on illumos */ 459 } 460 461 static int 462 vmx_init(void) 463 { 464 int error; 465 uint64_t fixed0, fixed1; 466 uint32_t tmp; 467 enum vmx_caps avail_caps = VMX_CAP_NONE; 468 469 /* Check support for primary processor-based VM-execution controls */ 470 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 471 MSR_VMX_TRUE_PROCBASED_CTLS, 472 PROCBASED_CTLS_ONE_SETTING, 473 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 474 if (error) { 475 printf("vmx_init: processor does not support desired primary " 476 "processor-based controls\n"); 477 return (error); 478 } 479 480 /* Clear the processor-based ctl bits that are set on demand */ 481 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 482 483 /* Check support for secondary processor-based VM-execution controls */ 484 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 485 MSR_VMX_PROCBASED_CTLS2, 486 PROCBASED_CTLS2_ONE_SETTING, 487 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 488 if (error) { 489 printf("vmx_init: processor does not support desired secondary " 490 "processor-based controls\n"); 491 return (error); 492 } 493 494 /* Check support for VPID */ 495 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 496 MSR_VMX_PROCBASED_CTLS2, 497 PROCBASED2_ENABLE_VPID, 498 0, &tmp); 499 if (error == 0) 500 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 501 502 /* Check support for pin-based VM-execution controls */ 503 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 504 MSR_VMX_TRUE_PINBASED_CTLS, 505 PINBASED_CTLS_ONE_SETTING, 506 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 507 if (error) { 508 printf("vmx_init: processor does not support desired " 509 "pin-based controls\n"); 510 return (error); 511 } 512 513 /* Check support for VM-exit controls */ 514 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 515 VM_EXIT_CTLS_ONE_SETTING, 516 VM_EXIT_CTLS_ZERO_SETTING, 517 &exit_ctls); 518 if (error) { 519 printf("vmx_init: processor does not support desired " 520 "exit controls\n"); 521 return (error); 522 } 523 524 /* Check support for VM-entry controls */ 525 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 526 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 527 &entry_ctls); 528 if (error) { 529 printf("vmx_init: processor does not support desired " 530 "entry controls\n"); 531 return (error); 532 } 533 534 /* 535 * Check support for optional features by testing them 536 * as individual bits 537 */ 538 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 539 MSR_VMX_TRUE_PROCBASED_CTLS, 540 PROCBASED_HLT_EXITING, 0, 541 &tmp) == 0); 542 543 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 544 MSR_VMX_PROCBASED_CTLS, 545 PROCBASED_MTF, 0, 546 &tmp) == 0); 547 548 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 549 MSR_VMX_TRUE_PROCBASED_CTLS, 550 PROCBASED_PAUSE_EXITING, 0, 551 &tmp) == 0); 552 553 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 554 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 555 &tmp) == 0); 556 557 /* 558 * Check for APIC virtualization capabilities: 559 * - TPR shadowing 560 * - Full APICv (with or without x2APIC support) 561 * - Posted interrupt handling 562 */ 563 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, 564 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) { 565 avail_caps |= VMX_CAP_TPR_SHADOW; 566 567 const uint32_t apicv_bits = 568 PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 569 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 570 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 571 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY; 572 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 573 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) { 574 avail_caps |= VMX_CAP_APICV; 575 576 /* 577 * It may make sense in the future to differentiate 578 * hardware (or software) configurations with APICv but 579 * no support for accelerating x2APIC mode. 580 */ 581 avail_caps |= VMX_CAP_APICV_X2APIC; 582 583 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 584 MSR_VMX_TRUE_PINBASED_CTLS, 585 PINBASED_POSTED_INTERRUPT, 0, &tmp); 586 if (error == 0) { 587 /* 588 * If the PSM-provided interfaces for requesting 589 * and using a PIR IPI vector are present, use 590 * them for posted interrupts. 591 */ 592 if (psm_get_pir_ipivect != NULL && 593 psm_send_pir_ipi != NULL) { 594 pirvec = psm_get_pir_ipivect(); 595 avail_caps |= VMX_CAP_APICV_PIR; 596 } 597 } 598 } 599 } 600 601 /* 602 * Check for necessary EPT capabilities 603 * 604 * TODO: Properly handle when IA32_VMX_EPT_VPID_HW_AD is missing and the 605 * hypervisor intends to utilize dirty page tracking. 606 */ 607 uint64_t ept_caps = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 608 if ((ept_caps & EPT_CAPS_REQUIRED) != EPT_CAPS_REQUIRED) { 609 cmn_err(CE_WARN, "!Inadequate EPT capabilities: %lx", ept_caps); 610 return (EINVAL); 611 } 612 613 #ifdef __FreeBSD__ 614 guest_l1d_flush = (cpu_ia32_arch_caps & 615 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 616 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 617 618 /* 619 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 620 * available. Otherwise fall back to the software flush 621 * method which loads enough data from the kernel text to 622 * flush existing L1D content, both on VMX entry and on NMI 623 * return. 624 */ 625 if (guest_l1d_flush) { 626 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 627 guest_l1d_flush_sw = 1; 628 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 629 &guest_l1d_flush_sw); 630 } 631 if (guest_l1d_flush_sw) { 632 if (nmi_flush_l1d_sw <= 1) 633 nmi_flush_l1d_sw = 1; 634 } else { 635 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 636 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 637 } 638 } 639 #else 640 /* L1D flushing is taken care of by smt_acquire() and friends */ 641 guest_l1d_flush = 0; 642 #endif /* __FreeBSD__ */ 643 644 /* 645 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 646 */ 647 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 648 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 649 cr0_ones_mask = fixed0 & fixed1; 650 cr0_zeros_mask = ~fixed0 & ~fixed1; 651 652 /* 653 * Since Unrestricted Guest was already verified present, CR0_PE and 654 * CR0_PG are allowed to be set to zero in VMX non-root operation 655 */ 656 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 657 658 /* 659 * Do not allow the guest to set CR0_NW or CR0_CD. 660 */ 661 cr0_zeros_mask |= (CR0_NW | CR0_CD); 662 663 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 664 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 665 cr4_ones_mask = fixed0 & fixed1; 666 cr4_zeros_mask = ~fixed0 & ~fixed1; 667 668 vmx_msr_init(); 669 670 vmx_capabilities = avail_caps; 671 vmx_initialized = 1; 672 673 return (0); 674 } 675 676 static void 677 vmx_trigger_hostintr(int vector) 678 { 679 VERIFY(vector >= 32 && vector <= 255); 680 vmx_call_isr(vector - 32); 681 } 682 683 static void * 684 vmx_vminit(struct vm *vm) 685 { 686 uint16_t vpid[VM_MAXCPU]; 687 int i, error, datasel; 688 struct vmx *vmx; 689 uint32_t exc_bitmap; 690 uint16_t maxcpus; 691 uint32_t proc_ctls, proc2_ctls, pin_ctls; 692 uint64_t apic_access_pa = UINT64_MAX; 693 694 vmx = kmem_zalloc(sizeof (struct vmx), KM_SLEEP); 695 VERIFY3U((uintptr_t)vmx & PAGE_MASK, ==, 0); 696 697 vmx->vm = vm; 698 vmx->eptp = vmspace_table_root(vm_get_vmspace(vm)); 699 700 /* 701 * Clean up EP4TA-tagged guest-physical and combined mappings 702 * 703 * VMX transitions are not required to invalidate any guest physical 704 * mappings. So, it may be possible for stale guest physical mappings 705 * to be present in the processor TLBs. 706 * 707 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 708 */ 709 hma_vmx_invept_allcpus((uintptr_t)vmx->eptp); 710 711 vmx_msr_bitmap_initialize(vmx); 712 713 vpid_alloc(vpid, VM_MAXCPU); 714 715 /* Grab the established defaults */ 716 proc_ctls = procbased_ctls; 717 proc2_ctls = procbased_ctls2; 718 pin_ctls = pinbased_ctls; 719 /* For now, default to the available capabilities */ 720 vmx->vmx_caps = vmx_capabilities; 721 722 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 723 proc_ctls |= PROCBASED_USE_TPR_SHADOW; 724 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 725 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING; 726 } 727 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 728 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)); 729 730 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 731 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 732 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 733 734 /* 735 * Allocate a page of memory to back the APIC access address for 736 * when APICv features are in use. Guest MMIO accesses should 737 * never actually reach this page, but rather be intercepted. 738 */ 739 vmx->apic_access_page = kmem_zalloc(PAGESIZE, KM_SLEEP); 740 VERIFY3U((uintptr_t)vmx->apic_access_page & PAGEOFFSET, ==, 0); 741 apic_access_pa = vtophys(vmx->apic_access_page); 742 743 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 744 apic_access_pa); 745 /* XXX this should really return an error to the caller */ 746 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 747 } 748 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 749 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV)); 750 751 pin_ctls |= PINBASED_POSTED_INTERRUPT; 752 } 753 754 maxcpus = vm_get_maxcpus(vm); 755 datasel = vmm_get_host_datasel(); 756 for (i = 0; i < maxcpus; i++) { 757 /* 758 * Cache physical address lookups for various components which 759 * may be required inside the critical_enter() section implied 760 * by VMPTRLD() below. 761 */ 762 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap[i]); 763 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]); 764 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]); 765 766 vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]); 767 vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]); 768 769 vmx_msr_guest_init(vmx, i); 770 771 vmcs_load(vmx->vmcs_pa[i]); 772 773 vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat()); 774 vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer()); 775 776 /* Load the control registers */ 777 vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0()); 778 vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE); 779 780 /* Load the segment selectors */ 781 vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel()); 782 783 vmcs_write(VMCS_HOST_ES_SELECTOR, datasel); 784 vmcs_write(VMCS_HOST_SS_SELECTOR, datasel); 785 vmcs_write(VMCS_HOST_DS_SELECTOR, datasel); 786 787 vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel()); 788 vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel()); 789 vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel()); 790 791 /* 792 * Configure host sysenter MSRs to be restored on VM exit. 793 * The thread-specific MSR_INTC_SEP_ESP value is loaded in 794 * vmx_run. 795 */ 796 vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL); 797 vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP, 798 rdmsr(MSR_SYSENTER_EIP_MSR)); 799 800 /* instruction pointer */ 801 if (no_flush_rsb) { 802 vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest); 803 } else { 804 vmcs_write(VMCS_HOST_RIP, 805 (uint64_t)vmx_exit_guest_flush_rsb); 806 } 807 808 /* link pointer */ 809 vmcs_write(VMCS_LINK_POINTER, ~0); 810 811 vmcs_write(VMCS_EPTP, vmx->eptp); 812 vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls); 813 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 814 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc2_ctls); 815 vmcs_write(VMCS_EXIT_CTLS, exit_ctls); 816 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 817 vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa); 818 vmcs_write(VMCS_VPID, vpid[i]); 819 820 if (guest_l1d_flush && !guest_l1d_flush_sw) { 821 vmcs_write(VMCS_ENTRY_MSR_LOAD, 822 vtophys(&msr_load_list[0])); 823 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 824 nitems(msr_load_list)); 825 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 826 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 827 } 828 829 /* exception bitmap */ 830 if (vcpu_trace_exceptions(vm, i)) 831 exc_bitmap = 0xffffffff; 832 else 833 exc_bitmap = 1 << IDT_MC; 834 vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap); 835 836 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 837 vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 838 839 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 840 vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa); 841 } 842 843 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 844 vmcs_write(VMCS_APIC_ACCESS, apic_access_pa); 845 vmcs_write(VMCS_EOI_EXIT0, 0); 846 vmcs_write(VMCS_EOI_EXIT1, 0); 847 vmcs_write(VMCS_EOI_EXIT2, 0); 848 vmcs_write(VMCS_EOI_EXIT3, 0); 849 } 850 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 851 vmcs_write(VMCS_PIR_VECTOR, pirvec); 852 vmcs_write(VMCS_PIR_DESC, pir_desc_pa); 853 } 854 855 /* 856 * Set up the CR0/4 masks and configure the read shadow state 857 * to the power-on register value from the Intel Sys Arch. 858 * CR0 - 0x60000010 859 * CR4 - 0 860 */ 861 vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask); 862 vmcs_write(VMCS_CR0_SHADOW, 0x60000010); 863 vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask); 864 vmcs_write(VMCS_CR4_SHADOW, 0); 865 866 vmcs_clear(vmx->vmcs_pa[i]); 867 868 vmx->cap[i].set = 0; 869 vmx->cap[i].proc_ctls = proc_ctls; 870 vmx->cap[i].proc_ctls2 = proc2_ctls; 871 vmx->cap[i].exc_bitmap = exc_bitmap; 872 873 vmx->state[i].nextrip = ~0; 874 vmx->state[i].lastcpu = NOCPU; 875 vmx->state[i].vpid = vpid[i]; 876 } 877 878 return (vmx); 879 } 880 881 static int 882 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 883 { 884 int handled; 885 886 handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, 887 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, 888 (uint64_t *)&vmxctx->guest_rdx); 889 return (handled); 890 } 891 892 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 893 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 894 895 #define INVVPID_TYPE_ADDRESS 0UL 896 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL 897 #define INVVPID_TYPE_ALL_CONTEXTS 2UL 898 899 struct invvpid_desc { 900 uint16_t vpid; 901 uint16_t _res1; 902 uint32_t _res2; 903 uint64_t linear_addr; 904 }; 905 CTASSERT(sizeof (struct invvpid_desc) == 16); 906 907 static __inline void 908 invvpid(uint64_t type, struct invvpid_desc desc) 909 { 910 int error; 911 912 DTRACE_PROBE3(vmx__invvpid, uint64_t, type, uint16_t, desc.vpid, 913 uint64_t, desc.linear_addr); 914 915 __asm __volatile("invvpid %[desc], %[type];" 916 VMX_SET_ERROR_CODE_ASM 917 : [error] "=r" (error) 918 : [desc] "m" (desc), [type] "r" (type) 919 : "memory"); 920 921 if (error) { 922 panic("invvpid error %d", error); 923 } 924 } 925 926 /* 927 * Invalidate guest mappings identified by its VPID from the TLB. 928 * 929 * This is effectively a flush of the guest TLB, removing only "combined 930 * mappings" (to use the VMX parlance). Actions which modify the EPT structures 931 * for the instance (such as unmapping GPAs) would require an 'invept' flush. 932 */ 933 static void 934 vmx_invvpid(struct vmx *vmx, int vcpu, int running) 935 { 936 struct vmxstate *vmxstate; 937 struct vmspace *vms; 938 939 vmxstate = &vmx->state[vcpu]; 940 if (vmxstate->vpid == 0) { 941 return; 942 } 943 944 if (!running) { 945 /* 946 * Set the 'lastcpu' to an invalid host cpu. 947 * 948 * This will invalidate TLB entries tagged with the vcpu's 949 * vpid the next time it runs via vmx_set_pcpu_defaults(). 950 */ 951 vmxstate->lastcpu = NOCPU; 952 return; 953 } 954 955 /* 956 * Invalidate all mappings tagged with 'vpid' 957 * 958 * This is done when a vCPU moves between host CPUs, where there may be 959 * stale TLB entries for this VPID on the target, or if emulated actions 960 * in the guest CPU have incurred an explicit TLB flush. 961 */ 962 vms = vm_get_vmspace(vmx->vm); 963 if (vmspace_table_gen(vms) == vmx->eptgen[curcpu]) { 964 struct invvpid_desc invvpid_desc = { 965 .vpid = vmxstate->vpid, 966 .linear_addr = 0, 967 ._res1 = 0, 968 ._res2 = 0, 969 }; 970 971 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 972 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 973 } else { 974 /* 975 * The INVVPID can be skipped if an INVEPT is going to be 976 * performed before entering the guest. The INVEPT will 977 * invalidate combined mappings for the EP4TA associated with 978 * this guest, in all VPIDs. 979 */ 980 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 981 } 982 } 983 984 static __inline void 985 invept(uint64_t type, uint64_t eptp) 986 { 987 int error; 988 struct invept_desc { 989 uint64_t eptp; 990 uint64_t _resv; 991 } desc = { eptp, 0 }; 992 993 DTRACE_PROBE2(vmx__invept, uint64_t, type, uint64_t, eptp); 994 995 __asm __volatile("invept %[desc], %[type];" 996 VMX_SET_ERROR_CODE_ASM 997 : [error] "=r" (error) 998 : [desc] "m" (desc), [type] "r" (type) 999 : "memory"); 1000 1001 if (error != 0) { 1002 panic("invvpid error %d", error); 1003 } 1004 } 1005 1006 static void 1007 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu) 1008 { 1009 struct vmxstate *vmxstate; 1010 1011 /* 1012 * Regardless of whether the VM appears to have migrated between CPUs, 1013 * save the host sysenter stack pointer. As it points to the kernel 1014 * stack of each thread, the correct value must be maintained for every 1015 * trip into the critical section. 1016 */ 1017 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR)); 1018 1019 /* 1020 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or 1021 * migration between host CPUs with differing TSC values. 1022 */ 1023 vmx_apply_tsc_adjust(vmx, vcpu); 1024 1025 vmxstate = &vmx->state[vcpu]; 1026 if (vmxstate->lastcpu == curcpu) 1027 return; 1028 1029 vmxstate->lastcpu = curcpu; 1030 1031 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1032 1033 /* Load the per-CPU IDT address */ 1034 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase()); 1035 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1036 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1037 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1038 vmx_invvpid(vmx, vcpu, 1); 1039 } 1040 1041 /* 1042 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1043 */ 1044 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1045 1046 static __inline void 1047 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1048 { 1049 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1050 /* Enable interrupt window exiting */ 1051 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1052 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1053 } 1054 } 1055 1056 static __inline void 1057 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1058 { 1059 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1060 ("intr_window_exiting not set: %x", vmx->cap[vcpu].proc_ctls)); 1061 1062 /* Disable interrupt window exiting */ 1063 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1064 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1065 } 1066 1067 static __inline bool 1068 vmx_nmi_window_exiting(struct vmx *vmx, int vcpu) 1069 { 1070 return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0); 1071 } 1072 1073 static __inline void 1074 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1075 { 1076 if (!vmx_nmi_window_exiting(vmx, vcpu)) { 1077 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1078 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1079 } 1080 } 1081 1082 static __inline void 1083 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1084 { 1085 ASSERT(vmx_nmi_window_exiting(vmx, vcpu)); 1086 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1087 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1088 } 1089 1090 /* 1091 * Set the TSC adjustment, taking into account the offsets measured between 1092 * host physical CPUs. This is required even if the guest has not set a TSC 1093 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has 1094 * migrated onto. Without this mitigation, un-synched host TSCs will convey 1095 * the appearance of TSC time-travel to the guest as its vCPUs migrate. 1096 */ 1097 static void 1098 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu) 1099 { 1100 const uint64_t offset = vcpu_tsc_offset(vmx->vm, vcpu, true); 1101 1102 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET); 1103 1104 if (vmx->tsc_offset_active[vcpu] != offset) { 1105 vmcs_write(VMCS_TSC_OFFSET, offset); 1106 vmx->tsc_offset_active[vcpu] = offset; 1107 } 1108 } 1109 1110 CTASSERT(VMCS_INTR_T_HWINTR == VM_INTINFO_HWINTR); 1111 CTASSERT(VMCS_INTR_T_NMI == VM_INTINFO_NMI); 1112 CTASSERT(VMCS_INTR_T_HWEXCEPTION == VM_INTINFO_HWEXCP); 1113 CTASSERT(VMCS_INTR_T_SWINTR == VM_INTINFO_SWINTR); 1114 CTASSERT(VMCS_INTR_T_PRIV_SWEXCEPTION == VM_INTINFO_RESV5); 1115 CTASSERT(VMCS_INTR_T_SWEXCEPTION == VM_INTINFO_RESV6); 1116 CTASSERT(VMCS_IDT_VEC_ERRCODE_VALID == VM_INTINFO_DEL_ERRCODE); 1117 CTASSERT(VMCS_INTR_T_MASK == VM_INTINFO_MASK_TYPE); 1118 1119 static uint64_t 1120 vmx_idtvec_to_intinfo(uint32_t info) 1121 { 1122 ASSERT(info & VMCS_IDT_VEC_VALID); 1123 1124 const uint32_t type = info & VMCS_INTR_T_MASK; 1125 const uint8_t vec = info & 0xff; 1126 1127 switch (type) { 1128 case VMCS_INTR_T_HWINTR: 1129 case VMCS_INTR_T_NMI: 1130 case VMCS_INTR_T_HWEXCEPTION: 1131 case VMCS_INTR_T_SWINTR: 1132 case VMCS_INTR_T_PRIV_SWEXCEPTION: 1133 case VMCS_INTR_T_SWEXCEPTION: 1134 break; 1135 default: 1136 panic("unexpected event type 0x%03x", type); 1137 } 1138 1139 uint64_t intinfo = VM_INTINFO_VALID | type | vec; 1140 if (info & VMCS_IDT_VEC_ERRCODE_VALID) { 1141 const uint32_t errcode = vmcs_read(VMCS_IDT_VECTORING_ERROR); 1142 intinfo |= (uint64_t)errcode << 32; 1143 } 1144 1145 return (intinfo); 1146 } 1147 1148 static void 1149 vmx_inject_intinfo(uint64_t info) 1150 { 1151 ASSERT(VM_INTINFO_PENDING(info)); 1152 ASSERT0(info & VM_INTINFO_MASK_RSVD); 1153 1154 /* 1155 * The bhyve format matches that of the VMCS, which is ensured by the 1156 * CTASSERTs above. 1157 */ 1158 uint32_t inject = info; 1159 switch (VM_INTINFO_VECTOR(info)) { 1160 case IDT_BP: 1161 case IDT_OF: 1162 /* 1163 * VT-x requires #BP and #OF to be injected as software 1164 * exceptions. 1165 */ 1166 inject &= ~VMCS_INTR_T_MASK; 1167 inject |= VMCS_INTR_T_SWEXCEPTION; 1168 break; 1169 default: 1170 break; 1171 } 1172 1173 if (VM_INTINFO_HAS_ERRCODE(info)) { 1174 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 1175 VM_INTINFO_ERRCODE(info)); 1176 } 1177 vmcs_write(VMCS_ENTRY_INTR_INFO, inject); 1178 } 1179 1180 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1181 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1182 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1183 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1184 1185 static void 1186 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1187 { 1188 ASSERT0(vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & NMI_BLOCKING); 1189 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1190 1191 /* 1192 * Inject the virtual NMI. The vector must be the NMI IDT entry 1193 * or the VMCS entry check will fail. 1194 */ 1195 vmcs_write(VMCS_ENTRY_INTR_INFO, 1196 IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID); 1197 1198 /* Clear the request */ 1199 vm_nmi_clear(vmx->vm, vcpu); 1200 } 1201 1202 /* 1203 * Inject exceptions, NMIs, and ExtINTs. 1204 * 1205 * The logic behind these are complicated and may involve mutex contention, so 1206 * the injection is performed without the protection of host CPU interrupts 1207 * being disabled. This means a racing notification could be "lost", 1208 * necessitating a later call to vmx_inject_recheck() to close that window 1209 * of opportunity. 1210 */ 1211 static enum event_inject_state 1212 vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip) 1213 { 1214 uint64_t entryinfo; 1215 uint32_t gi, info; 1216 int vector; 1217 enum event_inject_state state; 1218 1219 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1220 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1221 state = EIS_CAN_INJECT; 1222 1223 /* Clear any interrupt blocking if the guest %rip has changed */ 1224 if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) { 1225 gi &= ~HWINTR_BLOCKING; 1226 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1227 } 1228 1229 /* 1230 * It could be that an interrupt is already pending for injection from 1231 * the VMCS. This would be the case if the vCPU exited for conditions 1232 * such as an AST before a vm-entry delivered the injection. 1233 */ 1234 if ((info & VMCS_INTR_VALID) != 0) { 1235 return (EIS_EV_EXISTING | EIS_REQ_EXIT); 1236 } 1237 1238 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1239 vmx_inject_intinfo(entryinfo); 1240 state = EIS_EV_INJECTED; 1241 } 1242 1243 if (vm_nmi_pending(vmx->vm, vcpu)) { 1244 /* 1245 * If there are no conditions blocking NMI injection then inject 1246 * it directly here otherwise enable "NMI window exiting" to 1247 * inject it as soon as we can. 1248 * 1249 * According to the Intel manual, some CPUs do not allow NMI 1250 * injection when STI_BLOCKING is active. That check is 1251 * enforced here, regardless of CPU capability. If running on a 1252 * CPU without such a restriction it will immediately exit and 1253 * the NMI will be injected in the "NMI window exiting" handler. 1254 */ 1255 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1256 if (state == EIS_CAN_INJECT) { 1257 vmx_inject_nmi(vmx, vcpu); 1258 state = EIS_EV_INJECTED; 1259 } else { 1260 return (state | EIS_REQ_EXIT); 1261 } 1262 } else { 1263 vmx_set_nmi_window_exiting(vmx, vcpu); 1264 } 1265 } 1266 1267 if (vm_extint_pending(vmx->vm, vcpu)) { 1268 if (state != EIS_CAN_INJECT) { 1269 return (state | EIS_REQ_EXIT); 1270 } 1271 if ((gi & HWINTR_BLOCKING) != 0 || 1272 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1273 return (EIS_GI_BLOCK); 1274 } 1275 1276 /* Ask the legacy pic for a vector to inject */ 1277 vatpic_pending_intr(vmx->vm, &vector); 1278 1279 /* 1280 * From the Intel SDM, Volume 3, Section "Maskable 1281 * Hardware Interrupts": 1282 * - maskable interrupt vectors [0,255] can be delivered 1283 * through the INTR pin. 1284 */ 1285 KASSERT(vector >= 0 && vector <= 255, 1286 ("invalid vector %d from INTR", vector)); 1287 1288 /* Inject the interrupt */ 1289 vmcs_write(VMCS_ENTRY_INTR_INFO, 1290 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1291 1292 vm_extint_clear(vmx->vm, vcpu); 1293 vatpic_intr_accepted(vmx->vm, vector); 1294 state = EIS_EV_INJECTED; 1295 } 1296 1297 return (state); 1298 } 1299 1300 /* 1301 * Inject any interrupts pending on the vLAPIC. 1302 * 1303 * This is done with host CPU interrupts disabled so notification IPIs, either 1304 * from the standard vCPU notification or APICv posted interrupts, will be 1305 * queued on the host APIC and recognized when entering VMX context. 1306 */ 1307 static enum event_inject_state 1308 vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1309 { 1310 int vector; 1311 1312 if (!vlapic_pending_intr(vlapic, &vector)) { 1313 return (EIS_CAN_INJECT); 1314 } 1315 1316 /* 1317 * From the Intel SDM, Volume 3, Section "Maskable 1318 * Hardware Interrupts": 1319 * - maskable interrupt vectors [16,255] can be delivered 1320 * through the local APIC. 1321 */ 1322 KASSERT(vector >= 16 && vector <= 255, 1323 ("invalid vector %d from local APIC", vector)); 1324 1325 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 1326 uint16_t status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 1327 uint16_t status_new = (status_old & 0xff00) | vector; 1328 1329 /* 1330 * The APICv state will have been synced into the vLAPIC 1331 * as part of vlapic_pending_intr(). Prepare the VMCS 1332 * for the to-be-injected pending interrupt. 1333 */ 1334 if (status_new > status_old) { 1335 vmcs_write(VMCS_GUEST_INTR_STATUS, status_new); 1336 } 1337 1338 /* 1339 * Ensure VMCS state regarding EOI traps is kept in sync 1340 * with the TMRs in the vlapic. 1341 */ 1342 vmx_apicv_sync_tmr(vlapic); 1343 1344 /* 1345 * The rest of the injection process for injecting the 1346 * interrupt(s) is handled by APICv. It does not preclude other 1347 * event injection from occurring. 1348 */ 1349 return (EIS_CAN_INJECT); 1350 } 1351 1352 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1353 1354 /* Does guest interruptability block injection? */ 1355 if ((vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & HWINTR_BLOCKING) != 0 || 1356 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1357 return (EIS_GI_BLOCK); 1358 } 1359 1360 /* Inject the interrupt */ 1361 vmcs_write(VMCS_ENTRY_INTR_INFO, 1362 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1363 1364 /* Update the Local APIC ISR */ 1365 vlapic_intr_accepted(vlapic, vector); 1366 1367 return (EIS_EV_INJECTED); 1368 } 1369 1370 /* 1371 * Re-check for events to be injected. 1372 * 1373 * Once host CPU interrupts are disabled, check for the presence of any events 1374 * which require injection processing. If an exit is required upon injection, 1375 * or once the guest becomes interruptable, that will be configured too. 1376 */ 1377 static bool 1378 vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state) 1379 { 1380 if (state == EIS_CAN_INJECT) { 1381 if (vm_nmi_pending(vmx->vm, vcpu) && 1382 !vmx_nmi_window_exiting(vmx, vcpu)) { 1383 /* queued NMI not blocked by NMI-window-exiting */ 1384 return (true); 1385 } 1386 if (vm_extint_pending(vmx->vm, vcpu)) { 1387 /* queued ExtINT not blocked by existing injection */ 1388 return (true); 1389 } 1390 } else { 1391 if ((state & EIS_REQ_EXIT) != 0) { 1392 /* 1393 * Use a self-IPI to force an immediate exit after 1394 * event injection has occurred. 1395 */ 1396 poke_cpu(CPU->cpu_id); 1397 } else { 1398 /* 1399 * If any event is being injected, an exit immediately 1400 * upon becoming interruptable again will allow pending 1401 * or newly queued events to be injected in a timely 1402 * manner. 1403 */ 1404 vmx_set_int_window_exiting(vmx, vcpu); 1405 } 1406 } 1407 return (false); 1408 } 1409 1410 /* 1411 * If the Virtual NMIs execution control is '1' then the logical processor 1412 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1413 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1414 * virtual-NMI blocking. 1415 * 1416 * This unblocking occurs even if the IRET causes a fault. In this case the 1417 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1418 */ 1419 static void 1420 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1421 { 1422 uint32_t gi; 1423 1424 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1425 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1426 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1427 } 1428 1429 static void 1430 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1431 { 1432 uint32_t gi; 1433 1434 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1435 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1436 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1437 } 1438 1439 static void 1440 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1441 { 1442 uint32_t gi; 1443 1444 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1445 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1446 ("NMI blocking is not in effect %x", gi)); 1447 } 1448 1449 static int 1450 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1451 { 1452 struct vmxctx *vmxctx; 1453 uint64_t xcrval; 1454 const struct xsave_limits *limits; 1455 1456 vmxctx = &vmx->ctx[vcpu]; 1457 limits = vmm_get_xsave_limits(); 1458 1459 /* 1460 * Note that the processor raises a GP# fault on its own if 1461 * xsetbv is executed for CPL != 0, so we do not have to 1462 * emulate that fault here. 1463 */ 1464 1465 /* Only xcr0 is supported. */ 1466 if (vmxctx->guest_rcx != 0) { 1467 vm_inject_gp(vmx->vm, vcpu); 1468 return (HANDLED); 1469 } 1470 1471 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1472 if (!limits->xsave_enabled || 1473 !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1474 vm_inject_ud(vmx->vm, vcpu); 1475 return (HANDLED); 1476 } 1477 1478 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1479 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1480 vm_inject_gp(vmx->vm, vcpu); 1481 return (HANDLED); 1482 } 1483 1484 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1485 vm_inject_gp(vmx->vm, vcpu); 1486 return (HANDLED); 1487 } 1488 1489 /* AVX (YMM_Hi128) requires SSE. */ 1490 if (xcrval & XFEATURE_ENABLED_AVX && 1491 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1492 vm_inject_gp(vmx->vm, vcpu); 1493 return (HANDLED); 1494 } 1495 1496 /* 1497 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1498 * ZMM_Hi256, and Hi16_ZMM. 1499 */ 1500 if (xcrval & XFEATURE_AVX512 && 1501 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1502 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1503 vm_inject_gp(vmx->vm, vcpu); 1504 return (HANDLED); 1505 } 1506 1507 /* 1508 * Intel MPX requires both bound register state flags to be 1509 * set. 1510 */ 1511 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1512 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1513 vm_inject_gp(vmx->vm, vcpu); 1514 return (HANDLED); 1515 } 1516 1517 /* 1518 * This runs "inside" vmrun() with the guest's FPU state, so 1519 * modifying xcr0 directly modifies the guest's xcr0, not the 1520 * host's. 1521 */ 1522 load_xcr(0, xcrval); 1523 return (HANDLED); 1524 } 1525 1526 static uint64_t 1527 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1528 { 1529 const struct vmxctx *vmxctx; 1530 1531 vmxctx = &vmx->ctx[vcpu]; 1532 1533 switch (ident) { 1534 case 0: 1535 return (vmxctx->guest_rax); 1536 case 1: 1537 return (vmxctx->guest_rcx); 1538 case 2: 1539 return (vmxctx->guest_rdx); 1540 case 3: 1541 return (vmxctx->guest_rbx); 1542 case 4: 1543 return (vmcs_read(VMCS_GUEST_RSP)); 1544 case 5: 1545 return (vmxctx->guest_rbp); 1546 case 6: 1547 return (vmxctx->guest_rsi); 1548 case 7: 1549 return (vmxctx->guest_rdi); 1550 case 8: 1551 return (vmxctx->guest_r8); 1552 case 9: 1553 return (vmxctx->guest_r9); 1554 case 10: 1555 return (vmxctx->guest_r10); 1556 case 11: 1557 return (vmxctx->guest_r11); 1558 case 12: 1559 return (vmxctx->guest_r12); 1560 case 13: 1561 return (vmxctx->guest_r13); 1562 case 14: 1563 return (vmxctx->guest_r14); 1564 case 15: 1565 return (vmxctx->guest_r15); 1566 default: 1567 panic("invalid vmx register %d", ident); 1568 } 1569 } 1570 1571 static void 1572 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1573 { 1574 struct vmxctx *vmxctx; 1575 1576 vmxctx = &vmx->ctx[vcpu]; 1577 1578 switch (ident) { 1579 case 0: 1580 vmxctx->guest_rax = regval; 1581 break; 1582 case 1: 1583 vmxctx->guest_rcx = regval; 1584 break; 1585 case 2: 1586 vmxctx->guest_rdx = regval; 1587 break; 1588 case 3: 1589 vmxctx->guest_rbx = regval; 1590 break; 1591 case 4: 1592 vmcs_write(VMCS_GUEST_RSP, regval); 1593 break; 1594 case 5: 1595 vmxctx->guest_rbp = regval; 1596 break; 1597 case 6: 1598 vmxctx->guest_rsi = regval; 1599 break; 1600 case 7: 1601 vmxctx->guest_rdi = regval; 1602 break; 1603 case 8: 1604 vmxctx->guest_r8 = regval; 1605 break; 1606 case 9: 1607 vmxctx->guest_r9 = regval; 1608 break; 1609 case 10: 1610 vmxctx->guest_r10 = regval; 1611 break; 1612 case 11: 1613 vmxctx->guest_r11 = regval; 1614 break; 1615 case 12: 1616 vmxctx->guest_r12 = regval; 1617 break; 1618 case 13: 1619 vmxctx->guest_r13 = regval; 1620 break; 1621 case 14: 1622 vmxctx->guest_r14 = regval; 1623 break; 1624 case 15: 1625 vmxctx->guest_r15 = regval; 1626 break; 1627 default: 1628 panic("invalid vmx register %d", ident); 1629 } 1630 } 1631 1632 static int 1633 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1634 { 1635 uint64_t crval, regval; 1636 1637 /* We only handle mov to %cr0 at this time */ 1638 if ((exitqual & 0xf0) != 0x00) 1639 return (UNHANDLED); 1640 1641 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1642 1643 vmcs_write(VMCS_CR0_SHADOW, regval); 1644 1645 crval = regval | cr0_ones_mask; 1646 crval &= ~cr0_zeros_mask; 1647 1648 const uint64_t old = vmcs_read(VMCS_GUEST_CR0); 1649 const uint64_t diff = crval ^ old; 1650 /* Flush the TLB if the paging or write-protect bits are changing */ 1651 if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) { 1652 vmx_invvpid(vmx, vcpu, 1); 1653 } 1654 1655 vmcs_write(VMCS_GUEST_CR0, crval); 1656 1657 if (regval & CR0_PG) { 1658 uint64_t efer, entry_ctls; 1659 1660 /* 1661 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1662 * the "IA-32e mode guest" bit in VM-entry control must be 1663 * equal. 1664 */ 1665 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1666 if (efer & EFER_LME) { 1667 efer |= EFER_LMA; 1668 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1669 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1670 entry_ctls |= VM_ENTRY_GUEST_LMA; 1671 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1672 } 1673 } 1674 1675 return (HANDLED); 1676 } 1677 1678 static int 1679 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1680 { 1681 uint64_t crval, regval; 1682 1683 /* We only handle mov to %cr4 at this time */ 1684 if ((exitqual & 0xf0) != 0x00) 1685 return (UNHANDLED); 1686 1687 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1688 1689 vmcs_write(VMCS_CR4_SHADOW, regval); 1690 1691 crval = regval | cr4_ones_mask; 1692 crval &= ~cr4_zeros_mask; 1693 vmcs_write(VMCS_GUEST_CR4, crval); 1694 1695 return (HANDLED); 1696 } 1697 1698 static int 1699 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1700 { 1701 struct vlapic *vlapic; 1702 uint64_t cr8; 1703 int regnum; 1704 1705 /* We only handle mov %cr8 to/from a register at this time. */ 1706 if ((exitqual & 0xe0) != 0x00) { 1707 return (UNHANDLED); 1708 } 1709 1710 vlapic = vm_lapic(vmx->vm, vcpu); 1711 regnum = (exitqual >> 8) & 0xf; 1712 if (exitqual & 0x10) { 1713 cr8 = vlapic_get_cr8(vlapic); 1714 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1715 } else { 1716 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1717 vlapic_set_cr8(vlapic, cr8); 1718 } 1719 1720 return (HANDLED); 1721 } 1722 1723 /* 1724 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1725 */ 1726 static int 1727 vmx_cpl(void) 1728 { 1729 uint32_t ssar; 1730 1731 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1732 return ((ssar >> 5) & 0x3); 1733 } 1734 1735 static enum vm_cpu_mode 1736 vmx_cpu_mode(void) 1737 { 1738 uint32_t csar; 1739 1740 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1741 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1742 if (csar & 0x2000) 1743 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1744 else 1745 return (CPU_MODE_COMPATIBILITY); 1746 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1747 return (CPU_MODE_PROTECTED); 1748 } else { 1749 return (CPU_MODE_REAL); 1750 } 1751 } 1752 1753 static enum vm_paging_mode 1754 vmx_paging_mode(void) 1755 { 1756 1757 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1758 return (PAGING_MODE_FLAT); 1759 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1760 return (PAGING_MODE_32); 1761 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1762 return (PAGING_MODE_64); 1763 else 1764 return (PAGING_MODE_PAE); 1765 } 1766 1767 static void 1768 vmx_paging_info(struct vm_guest_paging *paging) 1769 { 1770 paging->cr3 = vmcs_read(VMCS_GUEST_CR3); 1771 paging->cpl = vmx_cpl(); 1772 paging->cpu_mode = vmx_cpu_mode(); 1773 paging->paging_mode = vmx_paging_mode(); 1774 } 1775 1776 static void 1777 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa, 1778 uint64_t gla) 1779 { 1780 struct vm_guest_paging paging; 1781 uint32_t csar; 1782 1783 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 1784 vmexit->inst_length = 0; 1785 vmexit->u.mmio_emul.gpa = gpa; 1786 vmexit->u.mmio_emul.gla = gla; 1787 vmx_paging_info(&paging); 1788 1789 switch (paging.cpu_mode) { 1790 case CPU_MODE_REAL: 1791 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1792 vmexit->u.mmio_emul.cs_d = 0; 1793 break; 1794 case CPU_MODE_PROTECTED: 1795 case CPU_MODE_COMPATIBILITY: 1796 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1797 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1798 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar); 1799 break; 1800 default: 1801 vmexit->u.mmio_emul.cs_base = 0; 1802 vmexit->u.mmio_emul.cs_d = 0; 1803 break; 1804 } 1805 1806 vie_init_mmio(vie, NULL, 0, &paging, gpa); 1807 } 1808 1809 static void 1810 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual, 1811 uint32_t eax) 1812 { 1813 struct vm_guest_paging paging; 1814 struct vm_inout *inout; 1815 1816 inout = &vmexit->u.inout; 1817 1818 inout->bytes = (qual & 0x7) + 1; 1819 inout->flags = 0; 1820 inout->flags |= (qual & 0x8) ? INOUT_IN : 0; 1821 inout->flags |= (qual & 0x10) ? INOUT_STR : 0; 1822 inout->flags |= (qual & 0x20) ? INOUT_REP : 0; 1823 inout->port = (uint16_t)(qual >> 16); 1824 inout->eax = eax; 1825 if (inout->flags & INOUT_STR) { 1826 uint64_t inst_info; 1827 1828 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 1829 1830 /* 1831 * According to the SDM, bits 9:7 encode the address size of the 1832 * ins/outs operation, but only values 0/1/2 are expected, 1833 * corresponding to 16/32/64 bit sizes. 1834 */ 1835 inout->addrsize = 2 << BITX(inst_info, 9, 7); 1836 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 1837 inout->addrsize == 8); 1838 1839 if (inout->flags & INOUT_IN) { 1840 /* 1841 * The bits describing the segment in INSTRUCTION_INFO 1842 * are not defined for ins, leaving it to system 1843 * software to assume %es (encoded as 0) 1844 */ 1845 inout->segment = 0; 1846 } else { 1847 /* 1848 * Bits 15-17 encode the segment for OUTS. 1849 * This value follows the standard x86 segment order. 1850 */ 1851 inout->segment = (inst_info >> 15) & 0x7; 1852 } 1853 } 1854 1855 vmexit->exitcode = VM_EXITCODE_INOUT; 1856 vmx_paging_info(&paging); 1857 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 1858 1859 /* The in/out emulation will handle advancing %rip */ 1860 vmexit->inst_length = 0; 1861 } 1862 1863 static int 1864 ept_fault_type(uint64_t ept_qual) 1865 { 1866 int fault_type; 1867 1868 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1869 fault_type = PROT_WRITE; 1870 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1871 fault_type = PROT_EXEC; 1872 else 1873 fault_type = PROT_READ; 1874 1875 return (fault_type); 1876 } 1877 1878 static bool 1879 ept_emulation_fault(uint64_t ept_qual) 1880 { 1881 int read, write; 1882 1883 /* EPT fault on an instruction fetch doesn't make sense here */ 1884 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1885 return (false); 1886 1887 /* EPT fault must be a read fault or a write fault */ 1888 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1889 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1890 if ((read | write) == 0) 1891 return (false); 1892 1893 /* 1894 * The EPT violation must have been caused by accessing a 1895 * guest-physical address that is a translation of a guest-linear 1896 * address. 1897 */ 1898 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1899 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1900 return (false); 1901 } 1902 1903 return (true); 1904 } 1905 1906 static __inline int 1907 apic_access_virtualization(struct vmx *vmx, int vcpuid) 1908 { 1909 uint32_t proc_ctls2; 1910 1911 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1912 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1913 } 1914 1915 static __inline int 1916 x2apic_virtualization(struct vmx *vmx, int vcpuid) 1917 { 1918 uint32_t proc_ctls2; 1919 1920 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1921 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1922 } 1923 1924 static int 1925 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1926 uint64_t qual) 1927 { 1928 const uint_t offset = APIC_WRITE_OFFSET(qual); 1929 1930 if (!apic_access_virtualization(vmx, vcpuid)) { 1931 /* 1932 * In general there should not be any APIC write VM-exits 1933 * unless APIC-access virtualization is enabled. 1934 * 1935 * However self-IPI virtualization can legitimately trigger 1936 * an APIC-write VM-exit so treat it specially. 1937 */ 1938 if (x2apic_virtualization(vmx, vcpuid) && 1939 offset == APIC_OFFSET_SELF_IPI) { 1940 const uint32_t *apic_regs = 1941 (uint32_t *)(vlapic->apic_page); 1942 const uint32_t vector = 1943 apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1944 1945 vlapic_self_ipi_handler(vlapic, vector); 1946 return (HANDLED); 1947 } else 1948 return (UNHANDLED); 1949 } 1950 1951 switch (offset) { 1952 case APIC_OFFSET_ID: 1953 vlapic_id_write_handler(vlapic); 1954 break; 1955 case APIC_OFFSET_LDR: 1956 vlapic_ldr_write_handler(vlapic); 1957 break; 1958 case APIC_OFFSET_DFR: 1959 vlapic_dfr_write_handler(vlapic); 1960 break; 1961 case APIC_OFFSET_SVR: 1962 vlapic_svr_write_handler(vlapic); 1963 break; 1964 case APIC_OFFSET_ESR: 1965 vlapic_esr_write_handler(vlapic); 1966 break; 1967 case APIC_OFFSET_ICR_LOW: 1968 vlapic_icrlo_write_handler(vlapic); 1969 break; 1970 case APIC_OFFSET_CMCI_LVT: 1971 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1972 vlapic_lvt_write_handler(vlapic, offset); 1973 break; 1974 case APIC_OFFSET_TIMER_ICR: 1975 vlapic_icrtmr_write_handler(vlapic); 1976 break; 1977 case APIC_OFFSET_TIMER_DCR: 1978 vlapic_dcr_write_handler(vlapic); 1979 break; 1980 default: 1981 return (UNHANDLED); 1982 } 1983 return (HANDLED); 1984 } 1985 1986 static bool 1987 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 1988 { 1989 1990 if (apic_access_virtualization(vmx, vcpuid) && 1991 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1992 return (true); 1993 else 1994 return (false); 1995 } 1996 1997 static int 1998 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1999 { 2000 uint64_t qual; 2001 int access_type, offset, allowed; 2002 struct vie *vie; 2003 2004 if (!apic_access_virtualization(vmx, vcpuid)) 2005 return (UNHANDLED); 2006 2007 qual = vmexit->u.vmx.exit_qualification; 2008 access_type = APIC_ACCESS_TYPE(qual); 2009 offset = APIC_ACCESS_OFFSET(qual); 2010 2011 allowed = 0; 2012 if (access_type == 0) { 2013 /* 2014 * Read data access to the following registers is expected. 2015 */ 2016 switch (offset) { 2017 case APIC_OFFSET_APR: 2018 case APIC_OFFSET_PPR: 2019 case APIC_OFFSET_RRR: 2020 case APIC_OFFSET_CMCI_LVT: 2021 case APIC_OFFSET_TIMER_CCR: 2022 allowed = 1; 2023 break; 2024 default: 2025 break; 2026 } 2027 } else if (access_type == 1) { 2028 /* 2029 * Write data access to the following registers is expected. 2030 */ 2031 switch (offset) { 2032 case APIC_OFFSET_VER: 2033 case APIC_OFFSET_APR: 2034 case APIC_OFFSET_PPR: 2035 case APIC_OFFSET_RRR: 2036 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2037 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2038 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2039 case APIC_OFFSET_CMCI_LVT: 2040 case APIC_OFFSET_TIMER_CCR: 2041 allowed = 1; 2042 break; 2043 default: 2044 break; 2045 } 2046 } 2047 2048 if (allowed) { 2049 vie = vm_vie_ctx(vmx->vm, vcpuid); 2050 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset, 2051 VIE_INVALID_GLA); 2052 } 2053 2054 /* 2055 * Regardless of whether the APIC-access is allowed this handler 2056 * always returns UNHANDLED: 2057 * - if the access is allowed then it is handled by emulating the 2058 * instruction that caused the VM-exit (outside the critical section) 2059 * - if the access is not allowed then it will be converted to an 2060 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2061 */ 2062 return (UNHANDLED); 2063 } 2064 2065 static enum task_switch_reason 2066 vmx_task_switch_reason(uint64_t qual) 2067 { 2068 int reason; 2069 2070 reason = (qual >> 30) & 0x3; 2071 switch (reason) { 2072 case 0: 2073 return (TSR_CALL); 2074 case 1: 2075 return (TSR_IRET); 2076 case 2: 2077 return (TSR_JMP); 2078 case 3: 2079 return (TSR_IDT_GATE); 2080 default: 2081 panic("%s: invalid reason %d", __func__, reason); 2082 } 2083 } 2084 2085 static int 2086 vmx_handle_msr(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit, 2087 bool is_wrmsr) 2088 { 2089 struct vmxctx *vmxctx = &vmx->ctx[vcpuid]; 2090 const uint32_t ecx = vmxctx->guest_rcx; 2091 vm_msr_result_t res; 2092 uint64_t val = 0; 2093 2094 if (is_wrmsr) { 2095 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_WRMSR, 1); 2096 val = vmxctx->guest_rdx << 32 | (uint32_t)vmxctx->guest_rax; 2097 2098 if (vlapic_owned_msr(ecx)) { 2099 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2100 2101 res = vlapic_wrmsr(vlapic, ecx, val); 2102 } else { 2103 res = vmx_wrmsr(vmx, vcpuid, ecx, val); 2104 } 2105 } else { 2106 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1); 2107 2108 if (vlapic_owned_msr(ecx)) { 2109 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2110 2111 res = vlapic_rdmsr(vlapic, ecx, &val); 2112 } else { 2113 res = vmx_rdmsr(vmx, vcpuid, ecx, &val); 2114 } 2115 } 2116 2117 switch (res) { 2118 case VMR_OK: 2119 /* Store rdmsr result in the appropriate registers */ 2120 if (!is_wrmsr) { 2121 vmxctx->guest_rax = (uint32_t)val; 2122 vmxctx->guest_rdx = val >> 32; 2123 } 2124 return (HANDLED); 2125 case VMR_GP: 2126 vm_inject_gp(vmx->vm, vcpuid); 2127 return (HANDLED); 2128 case VMR_UNHANLDED: 2129 vmexit->exitcode = is_wrmsr ? 2130 VM_EXITCODE_WRMSR : VM_EXITCODE_RDMSR; 2131 vmexit->u.msr.code = ecx; 2132 vmexit->u.msr.wval = val; 2133 return (UNHANDLED); 2134 default: 2135 panic("unexpected msr result %u\n", res); 2136 } 2137 } 2138 2139 static int 2140 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2141 { 2142 int error, errcode, errcode_valid, handled; 2143 struct vmxctx *vmxctx; 2144 struct vie *vie; 2145 struct vlapic *vlapic; 2146 struct vm_task_switch *ts; 2147 uint32_t idtvec_info, intr_info; 2148 uint32_t intr_type, intr_vec, reason; 2149 uint64_t qual, gpa; 2150 2151 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2152 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2153 2154 handled = UNHANDLED; 2155 vmxctx = &vmx->ctx[vcpu]; 2156 2157 qual = vmexit->u.vmx.exit_qualification; 2158 reason = vmexit->u.vmx.exit_reason; 2159 vmexit->exitcode = VM_EXITCODE_BOGUS; 2160 2161 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2162 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2163 2164 /* 2165 * VM-entry failures during or after loading guest state. 2166 * 2167 * These VM-exits are uncommon but must be handled specially 2168 * as most VM-exit fields are not populated as usual. 2169 */ 2170 if (reason == EXIT_REASON_MCE_DURING_ENTRY) { 2171 vmm_call_trap(T_MCE); 2172 return (1); 2173 } 2174 2175 /* 2176 * VM exits that can be triggered during event delivery need to 2177 * be handled specially by re-injecting the event if the IDT 2178 * vectoring information field's valid bit is set. 2179 * 2180 * See "Information for VM Exits During Event Delivery" in Intel SDM 2181 * for details. 2182 */ 2183 idtvec_info = vmcs_read(VMCS_IDT_VECTORING_INFO); 2184 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2185 /* Record exit intinfo */ 2186 VERIFY0(vm_exit_intinfo(vmx->vm, vcpu, 2187 vmx_idtvec_to_intinfo(idtvec_info))); 2188 2189 /* 2190 * If 'virtual NMIs' are being used and the VM-exit 2191 * happened while injecting an NMI during the previous 2192 * VM-entry, then clear "blocking by NMI" in the 2193 * Guest Interruptibility-State so the NMI can be 2194 * reinjected on the subsequent VM-entry. 2195 * 2196 * However, if the NMI was being delivered through a task 2197 * gate, then the new task must start execution with NMIs 2198 * blocked so don't clear NMI blocking in this case. 2199 */ 2200 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2201 if (intr_type == VMCS_INTR_T_NMI) { 2202 if (reason != EXIT_REASON_TASK_SWITCH) 2203 vmx_clear_nmi_blocking(vmx, vcpu); 2204 else 2205 vmx_assert_nmi_blocking(vmx, vcpu); 2206 } 2207 2208 /* 2209 * Update VM-entry instruction length if the event being 2210 * delivered was a software interrupt or software exception. 2211 */ 2212 if (intr_type == VMCS_INTR_T_SWINTR || 2213 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2214 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2215 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2216 } 2217 } 2218 2219 switch (reason) { 2220 case EXIT_REASON_TRIPLE_FAULT: 2221 (void) vm_suspend(vmx->vm, VM_SUSPEND_TRIPLEFAULT); 2222 handled = HANDLED; 2223 break; 2224 case EXIT_REASON_TASK_SWITCH: 2225 ts = &vmexit->u.task_switch; 2226 ts->tsssel = qual & 0xffff; 2227 ts->reason = vmx_task_switch_reason(qual); 2228 ts->ext = 0; 2229 ts->errcode_valid = 0; 2230 vmx_paging_info(&ts->paging); 2231 /* 2232 * If the task switch was due to a CALL, JMP, IRET, software 2233 * interrupt (INT n) or software exception (INT3, INTO), 2234 * then the saved %rip references the instruction that caused 2235 * the task switch. The instruction length field in the VMCS 2236 * is valid in this case. 2237 * 2238 * In all other cases (e.g., NMI, hardware exception) the 2239 * saved %rip is one that would have been saved in the old TSS 2240 * had the task switch completed normally so the instruction 2241 * length field is not needed in this case and is explicitly 2242 * set to 0. 2243 */ 2244 if (ts->reason == TSR_IDT_GATE) { 2245 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2246 ("invalid idtvec_info %x for IDT task switch", 2247 idtvec_info)); 2248 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2249 if (intr_type != VMCS_INTR_T_SWINTR && 2250 intr_type != VMCS_INTR_T_SWEXCEPTION && 2251 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2252 /* Task switch triggered by external event */ 2253 ts->ext = 1; 2254 vmexit->inst_length = 0; 2255 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2256 ts->errcode_valid = 1; 2257 ts->errcode = 2258 vmcs_read(VMCS_IDT_VECTORING_ERROR); 2259 } 2260 } 2261 } 2262 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2263 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2264 break; 2265 case EXIT_REASON_CR_ACCESS: 2266 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2267 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2268 switch (qual & 0xf) { 2269 case 0: 2270 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2271 break; 2272 case 4: 2273 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2274 break; 2275 case 8: 2276 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2277 break; 2278 } 2279 break; 2280 case EXIT_REASON_RDMSR: 2281 case EXIT_REASON_WRMSR: 2282 handled = vmx_handle_msr(vmx, vcpu, vmexit, 2283 reason == EXIT_REASON_WRMSR); 2284 break; 2285 case EXIT_REASON_HLT: 2286 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2287 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2288 vmexit->exitcode = VM_EXITCODE_HLT; 2289 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2290 break; 2291 case EXIT_REASON_MTF: 2292 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2293 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2294 vmexit->exitcode = VM_EXITCODE_MTRAP; 2295 vmexit->inst_length = 0; 2296 break; 2297 case EXIT_REASON_PAUSE: 2298 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2299 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2300 vmexit->exitcode = VM_EXITCODE_PAUSE; 2301 break; 2302 case EXIT_REASON_INTR_WINDOW: 2303 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2304 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2305 vmx_clear_int_window_exiting(vmx, vcpu); 2306 return (1); 2307 case EXIT_REASON_EXT_INTR: 2308 /* 2309 * External interrupts serve only to cause VM exits and allow 2310 * the host interrupt handler to run. 2311 * 2312 * If this external interrupt triggers a virtual interrupt 2313 * to a VM, then that state will be recorded by the 2314 * host interrupt handler in the VM's softc. We will inject 2315 * this virtual interrupt during the subsequent VM enter. 2316 */ 2317 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2318 SDT_PROBE4(vmm, vmx, exit, interrupt, 2319 vmx, vcpu, vmexit, intr_info); 2320 2321 /* 2322 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2323 * This appears to be a bug in VMware Fusion? 2324 */ 2325 if (!(intr_info & VMCS_INTR_VALID)) 2326 return (1); 2327 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2328 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2329 ("VM exit interruption info invalid: %x", intr_info)); 2330 vmx_trigger_hostintr(intr_info & 0xff); 2331 2332 /* 2333 * This is special. We want to treat this as an 'handled' 2334 * VM-exit but not increment the instruction pointer. 2335 */ 2336 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2337 return (1); 2338 case EXIT_REASON_NMI_WINDOW: 2339 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2340 /* Exit to allow the pending virtual NMI to be injected */ 2341 if (vm_nmi_pending(vmx->vm, vcpu)) 2342 vmx_inject_nmi(vmx, vcpu); 2343 vmx_clear_nmi_window_exiting(vmx, vcpu); 2344 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2345 return (1); 2346 case EXIT_REASON_INOUT: 2347 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2348 vie = vm_vie_ctx(vmx->vm, vcpu); 2349 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax); 2350 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2351 break; 2352 case EXIT_REASON_CPUID: 2353 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2354 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2355 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2356 break; 2357 case EXIT_REASON_EXCEPTION: 2358 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2359 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2360 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2361 ("VM exit interruption info invalid: %x", intr_info)); 2362 2363 intr_vec = intr_info & 0xff; 2364 intr_type = intr_info & VMCS_INTR_T_MASK; 2365 2366 /* 2367 * If Virtual NMIs control is 1 and the VM-exit is due to a 2368 * fault encountered during the execution of IRET then we must 2369 * restore the state of "virtual-NMI blocking" before resuming 2370 * the guest. 2371 * 2372 * See "Resuming Guest Software after Handling an Exception". 2373 * See "Information for VM Exits Due to Vectored Events". 2374 */ 2375 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2376 (intr_vec != IDT_DF) && 2377 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2378 vmx_restore_nmi_blocking(vmx, vcpu); 2379 2380 /* 2381 * The NMI has already been handled in vmx_exit_handle_nmi(). 2382 */ 2383 if (intr_type == VMCS_INTR_T_NMI) 2384 return (1); 2385 2386 /* 2387 * Call the machine check handler by hand. Also don't reflect 2388 * the machine check back into the guest. 2389 */ 2390 if (intr_vec == IDT_MC) { 2391 vmm_call_trap(T_MCE); 2392 return (1); 2393 } 2394 2395 /* 2396 * If the hypervisor has requested user exits for 2397 * debug exceptions, bounce them out to userland. 2398 */ 2399 if (intr_type == VMCS_INTR_T_SWEXCEPTION && 2400 intr_vec == IDT_BP && 2401 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) { 2402 vmexit->exitcode = VM_EXITCODE_BPT; 2403 vmexit->u.bpt.inst_length = vmexit->inst_length; 2404 vmexit->inst_length = 0; 2405 break; 2406 } 2407 2408 if (intr_vec == IDT_PF) { 2409 vmxctx->guest_cr2 = qual; 2410 } 2411 2412 /* 2413 * Software exceptions exhibit trap-like behavior. This in 2414 * turn requires populating the VM-entry instruction length 2415 * so that the %rip in the trap frame is past the INT3/INTO 2416 * instruction. 2417 */ 2418 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2419 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2420 2421 /* Reflect all other exceptions back into the guest */ 2422 errcode_valid = errcode = 0; 2423 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2424 errcode_valid = 1; 2425 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2426 } 2427 SDT_PROBE5(vmm, vmx, exit, exception, 2428 vmx, vcpu, vmexit, intr_vec, errcode); 2429 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2430 errcode_valid, errcode, 0); 2431 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2432 __func__, error)); 2433 return (1); 2434 2435 case EXIT_REASON_EPT_FAULT: 2436 /* 2437 * If 'gpa' lies within the address space allocated to 2438 * memory then this must be a nested page fault otherwise 2439 * this must be an instruction that accesses MMIO space. 2440 */ 2441 gpa = vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS); 2442 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2443 apic_access_fault(vmx, vcpu, gpa)) { 2444 vmexit->exitcode = VM_EXITCODE_PAGING; 2445 vmexit->inst_length = 0; 2446 vmexit->u.paging.gpa = gpa; 2447 vmexit->u.paging.fault_type = ept_fault_type(qual); 2448 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2449 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2450 vmx, vcpu, vmexit, gpa, qual); 2451 } else if (ept_emulation_fault(qual)) { 2452 vie = vm_vie_ctx(vmx->vm, vcpu); 2453 vmexit_mmio_emul(vmexit, vie, gpa, 2454 vmcs_read(VMCS_GUEST_LINEAR_ADDRESS)); 2455 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 2456 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2457 vmx, vcpu, vmexit, gpa); 2458 } 2459 /* 2460 * If Virtual NMIs control is 1 and the VM-exit is due to an 2461 * EPT fault during the execution of IRET then we must restore 2462 * the state of "virtual-NMI blocking" before resuming. 2463 * 2464 * See description of "NMI unblocking due to IRET" in 2465 * "Exit Qualification for EPT Violations". 2466 */ 2467 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2468 (qual & EXIT_QUAL_NMIUDTI) != 0) 2469 vmx_restore_nmi_blocking(vmx, vcpu); 2470 break; 2471 case EXIT_REASON_VIRTUALIZED_EOI: 2472 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2473 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2474 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2475 vmexit->inst_length = 0; /* trap-like */ 2476 break; 2477 case EXIT_REASON_APIC_ACCESS: 2478 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2479 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2480 break; 2481 case EXIT_REASON_APIC_WRITE: 2482 /* 2483 * APIC-write VM exit is trap-like so the %rip is already 2484 * pointing to the next instruction. 2485 */ 2486 vmexit->inst_length = 0; 2487 vlapic = vm_lapic(vmx->vm, vcpu); 2488 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2489 vmx, vcpu, vmexit, vlapic); 2490 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2491 break; 2492 case EXIT_REASON_XSETBV: 2493 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2494 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2495 break; 2496 case EXIT_REASON_MONITOR: 2497 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2498 vmexit->exitcode = VM_EXITCODE_MONITOR; 2499 break; 2500 case EXIT_REASON_MWAIT: 2501 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2502 vmexit->exitcode = VM_EXITCODE_MWAIT; 2503 break; 2504 case EXIT_REASON_TPR: 2505 vlapic = vm_lapic(vmx->vm, vcpu); 2506 vlapic_sync_tpr(vlapic); 2507 vmexit->inst_length = 0; 2508 handled = HANDLED; 2509 break; 2510 case EXIT_REASON_VMCALL: 2511 case EXIT_REASON_VMCLEAR: 2512 case EXIT_REASON_VMLAUNCH: 2513 case EXIT_REASON_VMPTRLD: 2514 case EXIT_REASON_VMPTRST: 2515 case EXIT_REASON_VMREAD: 2516 case EXIT_REASON_VMRESUME: 2517 case EXIT_REASON_VMWRITE: 2518 case EXIT_REASON_VMXOFF: 2519 case EXIT_REASON_VMXON: 2520 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2521 vmexit->exitcode = VM_EXITCODE_VMINSN; 2522 break; 2523 default: 2524 SDT_PROBE4(vmm, vmx, exit, unknown, 2525 vmx, vcpu, vmexit, reason); 2526 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2527 break; 2528 } 2529 2530 if (handled) { 2531 /* 2532 * It is possible that control is returned to userland 2533 * even though we were able to handle the VM exit in the 2534 * kernel. 2535 * 2536 * In such a case we want to make sure that the userland 2537 * restarts guest execution at the instruction *after* 2538 * the one we just processed. Therefore we update the 2539 * guest rip in the VMCS and in 'vmexit'. 2540 */ 2541 vmexit->rip += vmexit->inst_length; 2542 vmexit->inst_length = 0; 2543 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2544 } else { 2545 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2546 /* 2547 * If this VM exit was not claimed by anybody then 2548 * treat it as a generic VMX exit. 2549 */ 2550 vmexit->exitcode = VM_EXITCODE_VMX; 2551 vmexit->u.vmx.status = VM_SUCCESS; 2552 vmexit->u.vmx.inst_type = 0; 2553 vmexit->u.vmx.inst_error = 0; 2554 } else { 2555 /* 2556 * The exitcode and collateral have been populated. 2557 * The VM exit will be processed further in userland. 2558 */ 2559 } 2560 } 2561 2562 SDT_PROBE4(vmm, vmx, exit, return, 2563 vmx, vcpu, vmexit, handled); 2564 return (handled); 2565 } 2566 2567 static void 2568 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2569 { 2570 2571 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2572 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2573 vmxctx->inst_fail_status)); 2574 2575 vmexit->inst_length = 0; 2576 vmexit->exitcode = VM_EXITCODE_VMX; 2577 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2578 vmexit->u.vmx.inst_error = vmcs_read(VMCS_INSTRUCTION_ERROR); 2579 vmexit->u.vmx.exit_reason = ~0; 2580 vmexit->u.vmx.exit_qualification = ~0; 2581 2582 switch (rc) { 2583 case VMX_VMRESUME_ERROR: 2584 case VMX_VMLAUNCH_ERROR: 2585 case VMX_INVEPT_ERROR: 2586 case VMX_VMWRITE_ERROR: 2587 vmexit->u.vmx.inst_type = rc; 2588 break; 2589 default: 2590 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2591 } 2592 } 2593 2594 /* 2595 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2596 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2597 * sufficient to simply vector to the NMI handler via a software interrupt. 2598 * However, this must be done before maskable interrupts are enabled 2599 * otherwise the "iret" issued by an interrupt handler will incorrectly 2600 * clear NMI blocking. 2601 */ 2602 static __inline void 2603 vmx_exit_handle_possible_nmi(struct vm_exit *vmexit) 2604 { 2605 ASSERT(!interrupts_enabled()); 2606 2607 if (vmexit->u.vmx.exit_reason == EXIT_REASON_EXCEPTION) { 2608 uint32_t intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2609 ASSERT(intr_info & VMCS_INTR_VALID); 2610 2611 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2612 ASSERT3U(intr_info & 0xff, ==, IDT_NMI); 2613 vmm_call_trap(T_NMIFLT); 2614 } 2615 } 2616 } 2617 2618 static __inline void 2619 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2620 { 2621 uint64_t rflags; 2622 2623 /* Save host control debug registers. */ 2624 vmxctx->host_dr7 = rdr7(); 2625 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2626 2627 /* 2628 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2629 * exceptions in the host based on the guest DRx values. The 2630 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2631 */ 2632 load_dr7(0); 2633 wrmsr(MSR_DEBUGCTLMSR, 0); 2634 2635 /* 2636 * Disable single stepping the kernel to avoid corrupting the 2637 * guest DR6. A debugger might still be able to corrupt the 2638 * guest DR6 by setting a breakpoint after this point and then 2639 * single stepping. 2640 */ 2641 rflags = read_rflags(); 2642 vmxctx->host_tf = rflags & PSL_T; 2643 write_rflags(rflags & ~PSL_T); 2644 2645 /* Save host debug registers. */ 2646 vmxctx->host_dr0 = rdr0(); 2647 vmxctx->host_dr1 = rdr1(); 2648 vmxctx->host_dr2 = rdr2(); 2649 vmxctx->host_dr3 = rdr3(); 2650 vmxctx->host_dr6 = rdr6(); 2651 2652 /* Restore guest debug registers. */ 2653 load_dr0(vmxctx->guest_dr0); 2654 load_dr1(vmxctx->guest_dr1); 2655 load_dr2(vmxctx->guest_dr2); 2656 load_dr3(vmxctx->guest_dr3); 2657 load_dr6(vmxctx->guest_dr6); 2658 } 2659 2660 static __inline void 2661 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2662 { 2663 2664 /* Save guest debug registers. */ 2665 vmxctx->guest_dr0 = rdr0(); 2666 vmxctx->guest_dr1 = rdr1(); 2667 vmxctx->guest_dr2 = rdr2(); 2668 vmxctx->guest_dr3 = rdr3(); 2669 vmxctx->guest_dr6 = rdr6(); 2670 2671 /* 2672 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2673 * PSL_T last. 2674 */ 2675 load_dr0(vmxctx->host_dr0); 2676 load_dr1(vmxctx->host_dr1); 2677 load_dr2(vmxctx->host_dr2); 2678 load_dr3(vmxctx->host_dr3); 2679 load_dr6(vmxctx->host_dr6); 2680 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2681 load_dr7(vmxctx->host_dr7); 2682 write_rflags(read_rflags() | vmxctx->host_tf); 2683 } 2684 2685 static int 2686 vmx_run(void *arg, int vcpu, uint64_t rip) 2687 { 2688 int rc, handled, launched; 2689 struct vmx *vmx; 2690 struct vm *vm; 2691 struct vmxctx *vmxctx; 2692 uintptr_t vmcs_pa; 2693 struct vm_exit *vmexit; 2694 struct vlapic *vlapic; 2695 uint32_t exit_reason; 2696 bool tpr_shadow_active; 2697 vm_client_t *vmc; 2698 2699 vmx = arg; 2700 vm = vmx->vm; 2701 vmcs_pa = vmx->vmcs_pa[vcpu]; 2702 vmxctx = &vmx->ctx[vcpu]; 2703 vlapic = vm_lapic(vm, vcpu); 2704 vmexit = vm_exitinfo(vm, vcpu); 2705 vmc = vm_get_vmclient(vm, vcpu); 2706 launched = 0; 2707 tpr_shadow_active = vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) && 2708 !vmx_cap_en(vmx, VMX_CAP_APICV) && 2709 (vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0; 2710 2711 vmx_msr_guest_enter(vmx, vcpu); 2712 2713 vmcs_load(vmcs_pa); 2714 2715 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0); 2716 vmx->vmcs_state[vcpu] = VS_LOADED; 2717 2718 /* 2719 * XXX 2720 * We do this every time because we may setup the virtual machine 2721 * from a different process than the one that actually runs it. 2722 * 2723 * If the life of a virtual machine was spent entirely in the context 2724 * of a single process we could do this once in vmx_vminit(). 2725 */ 2726 vmcs_write(VMCS_HOST_CR3, rcr3()); 2727 2728 vmcs_write(VMCS_GUEST_RIP, rip); 2729 vmx_set_pcpu_defaults(vmx, vcpu); 2730 do { 2731 enum event_inject_state inject_state; 2732 uint64_t eptgen; 2733 2734 ASSERT3U(vmcs_read(VMCS_GUEST_RIP), ==, rip); 2735 2736 handled = UNHANDLED; 2737 2738 /* 2739 * Perform initial event/exception/interrupt injection before 2740 * host CPU interrupts are disabled. 2741 */ 2742 inject_state = vmx_inject_events(vmx, vcpu, rip); 2743 2744 /* 2745 * Interrupts are disabled from this point on until the 2746 * guest starts executing. This is done for the following 2747 * reasons: 2748 * 2749 * If an AST is asserted on this thread after the check below, 2750 * then the IPI_AST notification will not be lost, because it 2751 * will cause a VM exit due to external interrupt as soon as 2752 * the guest state is loaded. 2753 * 2754 * A posted interrupt after vmx_inject_vlapic() will not be 2755 * "lost" because it will be held pending in the host APIC 2756 * because interrupts are disabled. The pending interrupt will 2757 * be recognized as soon as the guest state is loaded. 2758 * 2759 * The same reasoning applies to the IPI generated by vmspace 2760 * invalidation. 2761 */ 2762 disable_intr(); 2763 2764 /* 2765 * If not precluded by existing events, inject any interrupt 2766 * pending on the vLAPIC. As a lock-less operation, it is safe 2767 * (and prudent) to perform with host CPU interrupts disabled. 2768 */ 2769 if (inject_state == EIS_CAN_INJECT) { 2770 inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic); 2771 } 2772 2773 /* 2774 * Check for vCPU bail-out conditions. This must be done after 2775 * vmx_inject_events() to detect a triple-fault condition. 2776 */ 2777 if (vcpu_entry_bailout_checks(vmx->vm, vcpu, rip)) { 2778 enable_intr(); 2779 break; 2780 } 2781 2782 if (vcpu_run_state_pending(vm, vcpu)) { 2783 enable_intr(); 2784 vm_exit_run_state(vmx->vm, vcpu, rip); 2785 break; 2786 } 2787 2788 /* 2789 * If subsequent activity queued events which require injection 2790 * handling, take another lap to handle them. 2791 */ 2792 if (vmx_inject_recheck(vmx, vcpu, inject_state)) { 2793 enable_intr(); 2794 handled = HANDLED; 2795 continue; 2796 } 2797 2798 if ((rc = smt_acquire()) != 1) { 2799 enable_intr(); 2800 vmexit->rip = rip; 2801 vmexit->inst_length = 0; 2802 if (rc == -1) { 2803 vmexit->exitcode = VM_EXITCODE_HT; 2804 } else { 2805 vmexit->exitcode = VM_EXITCODE_BOGUS; 2806 handled = HANDLED; 2807 } 2808 break; 2809 } 2810 2811 /* 2812 * If this thread has gone off-cpu due to mutex operations 2813 * during vmx_run, the VMCS will have been unloaded, forcing a 2814 * re-VMLAUNCH as opposed to VMRESUME. 2815 */ 2816 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0; 2817 /* 2818 * Restoration of the GDT limit is taken care of by 2819 * vmx_savectx(). Since the maximum practical index for the 2820 * IDT is 255, restoring its limits from the post-VMX-exit 2821 * default of 0xffff is not a concern. 2822 * 2823 * Only 64-bit hypervisor callers are allowed, which forgoes 2824 * the need to restore any LDT descriptor. Toss an error to 2825 * anyone attempting to break that rule. 2826 */ 2827 if (curproc->p_model != DATAMODEL_LP64) { 2828 smt_release(); 2829 enable_intr(); 2830 bzero(vmexit, sizeof (*vmexit)); 2831 vmexit->rip = rip; 2832 vmexit->exitcode = VM_EXITCODE_VMX; 2833 vmexit->u.vmx.status = VM_FAIL_INVALID; 2834 handled = UNHANDLED; 2835 break; 2836 } 2837 2838 if (tpr_shadow_active) { 2839 vmx_tpr_shadow_enter(vlapic); 2840 } 2841 2842 /* 2843 * Indicate activation of vmspace (EPT) table just prior to VMX 2844 * entry, checking for the necessity of an invept invalidation. 2845 */ 2846 eptgen = vmc_table_enter(vmc); 2847 if (vmx->eptgen[curcpu] != eptgen) { 2848 /* 2849 * VMspace generation does not match what was previously 2850 * used on this host CPU, so all mappings associated 2851 * with this EP4TA must be invalidated. 2852 */ 2853 invept(1, vmx->eptp); 2854 vmx->eptgen[curcpu] = eptgen; 2855 } 2856 2857 vcpu_ustate_change(vm, vcpu, VU_RUN); 2858 vmx_dr_enter_guest(vmxctx); 2859 2860 /* Perform VMX entry */ 2861 rc = vmx_enter_guest(vmxctx, vmx, launched); 2862 2863 vmx_dr_leave_guest(vmxctx); 2864 vcpu_ustate_change(vm, vcpu, VU_EMU_KERN); 2865 2866 vmx->vmcs_state[vcpu] |= VS_LAUNCHED; 2867 smt_release(); 2868 2869 if (tpr_shadow_active) { 2870 vmx_tpr_shadow_exit(vlapic); 2871 } 2872 2873 /* Collect some information for VM exit processing */ 2874 vmexit->rip = rip = vmcs_read(VMCS_GUEST_RIP); 2875 vmexit->inst_length = vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH); 2876 vmexit->u.vmx.exit_reason = exit_reason = 2877 (vmcs_read(VMCS_EXIT_REASON) & BASIC_EXIT_REASON_MASK); 2878 vmexit->u.vmx.exit_qualification = 2879 vmcs_read(VMCS_EXIT_QUALIFICATION); 2880 /* Update 'nextrip' */ 2881 vmx->state[vcpu].nextrip = rip; 2882 2883 if (rc == VMX_GUEST_VMEXIT) { 2884 vmx_exit_handle_possible_nmi(vmexit); 2885 } 2886 enable_intr(); 2887 vmc_table_exit(vmc); 2888 2889 if (rc == VMX_GUEST_VMEXIT) { 2890 handled = vmx_exit_process(vmx, vcpu, vmexit); 2891 } else { 2892 vmx_exit_inst_error(vmxctx, rc, vmexit); 2893 } 2894 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip, 2895 uint32_t, exit_reason); 2896 rip = vmexit->rip; 2897 } while (handled); 2898 2899 /* If a VM exit has been handled then the exitcode must be BOGUS */ 2900 if (handled && vmexit->exitcode != VM_EXITCODE_BOGUS) { 2901 panic("Non-BOGUS exitcode (%d) unexpected for handled VM exit", 2902 vmexit->exitcode); 2903 } 2904 2905 vmcs_clear(vmcs_pa); 2906 vmx_msr_guest_exit(vmx, vcpu); 2907 2908 VERIFY(vmx->vmcs_state != VS_NONE && curthread->t_preempt != 0); 2909 vmx->vmcs_state[vcpu] = VS_NONE; 2910 2911 return (0); 2912 } 2913 2914 static void 2915 vmx_vmcleanup(void *arg) 2916 { 2917 int i; 2918 struct vmx *vmx = arg; 2919 uint16_t maxcpus; 2920 2921 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2922 (void) vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2923 kmem_free(vmx->apic_access_page, PAGESIZE); 2924 } else { 2925 VERIFY3P(vmx->apic_access_page, ==, NULL); 2926 } 2927 2928 vmx_msr_bitmap_destroy(vmx); 2929 2930 maxcpus = vm_get_maxcpus(vmx->vm); 2931 for (i = 0; i < maxcpus; i++) 2932 vpid_free(vmx->state[i].vpid); 2933 2934 kmem_free(vmx, sizeof (*vmx)); 2935 } 2936 2937 static uint64_t * 2938 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2939 { 2940 switch (reg) { 2941 case VM_REG_GUEST_RAX: 2942 return (&vmxctx->guest_rax); 2943 case VM_REG_GUEST_RBX: 2944 return (&vmxctx->guest_rbx); 2945 case VM_REG_GUEST_RCX: 2946 return (&vmxctx->guest_rcx); 2947 case VM_REG_GUEST_RDX: 2948 return (&vmxctx->guest_rdx); 2949 case VM_REG_GUEST_RSI: 2950 return (&vmxctx->guest_rsi); 2951 case VM_REG_GUEST_RDI: 2952 return (&vmxctx->guest_rdi); 2953 case VM_REG_GUEST_RBP: 2954 return (&vmxctx->guest_rbp); 2955 case VM_REG_GUEST_R8: 2956 return (&vmxctx->guest_r8); 2957 case VM_REG_GUEST_R9: 2958 return (&vmxctx->guest_r9); 2959 case VM_REG_GUEST_R10: 2960 return (&vmxctx->guest_r10); 2961 case VM_REG_GUEST_R11: 2962 return (&vmxctx->guest_r11); 2963 case VM_REG_GUEST_R12: 2964 return (&vmxctx->guest_r12); 2965 case VM_REG_GUEST_R13: 2966 return (&vmxctx->guest_r13); 2967 case VM_REG_GUEST_R14: 2968 return (&vmxctx->guest_r14); 2969 case VM_REG_GUEST_R15: 2970 return (&vmxctx->guest_r15); 2971 case VM_REG_GUEST_CR2: 2972 return (&vmxctx->guest_cr2); 2973 case VM_REG_GUEST_DR0: 2974 return (&vmxctx->guest_dr0); 2975 case VM_REG_GUEST_DR1: 2976 return (&vmxctx->guest_dr1); 2977 case VM_REG_GUEST_DR2: 2978 return (&vmxctx->guest_dr2); 2979 case VM_REG_GUEST_DR3: 2980 return (&vmxctx->guest_dr3); 2981 case VM_REG_GUEST_DR6: 2982 return (&vmxctx->guest_dr6); 2983 default: 2984 break; 2985 } 2986 return (NULL); 2987 } 2988 2989 static int 2990 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 2991 { 2992 int running, hostcpu, err; 2993 struct vmx *vmx = arg; 2994 uint64_t *regp; 2995 2996 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2997 if (running && hostcpu != curcpu) 2998 panic("vmx_getreg: %d is running", vcpu); 2999 3000 /* VMCS access not required for ctx reads */ 3001 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3002 *retval = *regp; 3003 return (0); 3004 } 3005 3006 if (!running) { 3007 vmcs_load(vmx->vmcs_pa[vcpu]); 3008 } 3009 3010 err = 0; 3011 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3012 uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3013 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3014 } else { 3015 uint32_t encoding; 3016 3017 encoding = vmcs_field_encoding(reg); 3018 switch (encoding) { 3019 case VMCS_GUEST_CR0: 3020 /* Take the shadow bits into account */ 3021 *retval = vmx_unshadow_cr0(vmcs_read(encoding), 3022 vmcs_read(VMCS_CR0_SHADOW)); 3023 break; 3024 case VMCS_GUEST_CR4: 3025 /* Take the shadow bits into account */ 3026 *retval = vmx_unshadow_cr4(vmcs_read(encoding), 3027 vmcs_read(VMCS_CR4_SHADOW)); 3028 break; 3029 case VMCS_INVALID_ENCODING: 3030 err = EINVAL; 3031 break; 3032 default: 3033 *retval = vmcs_read(encoding); 3034 break; 3035 } 3036 } 3037 3038 if (!running) { 3039 vmcs_clear(vmx->vmcs_pa[vcpu]); 3040 } 3041 3042 return (err); 3043 } 3044 3045 static int 3046 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3047 { 3048 int running, hostcpu, error; 3049 struct vmx *vmx = arg; 3050 uint64_t *regp; 3051 3052 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3053 if (running && hostcpu != curcpu) 3054 panic("vmx_setreg: %d is running", vcpu); 3055 3056 /* VMCS access not required for ctx writes */ 3057 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3058 *regp = val; 3059 return (0); 3060 } 3061 3062 if (!running) { 3063 vmcs_load(vmx->vmcs_pa[vcpu]); 3064 } 3065 3066 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3067 if (val != 0) { 3068 /* 3069 * Forcing the vcpu into an interrupt shadow is not 3070 * presently supported. 3071 */ 3072 error = EINVAL; 3073 } else { 3074 uint64_t gi; 3075 3076 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3077 gi &= ~HWINTR_BLOCKING; 3078 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 3079 error = 0; 3080 } 3081 } else { 3082 uint32_t encoding; 3083 3084 error = 0; 3085 encoding = vmcs_field_encoding(reg); 3086 switch (encoding) { 3087 case VMCS_GUEST_IA32_EFER: 3088 /* 3089 * If the "load EFER" VM-entry control is 1 then the 3090 * value of EFER.LMA must be identical to "IA-32e mode 3091 * guest" bit in the VM-entry control. 3092 */ 3093 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0) { 3094 uint64_t ctls; 3095 3096 ctls = vmcs_read(VMCS_ENTRY_CTLS); 3097 if (val & EFER_LMA) { 3098 ctls |= VM_ENTRY_GUEST_LMA; 3099 } else { 3100 ctls &= ~VM_ENTRY_GUEST_LMA; 3101 } 3102 vmcs_write(VMCS_ENTRY_CTLS, ctls); 3103 } 3104 vmcs_write(encoding, val); 3105 break; 3106 case VMCS_GUEST_CR0: 3107 /* 3108 * The guest is not allowed to modify certain bits in 3109 * %cr0 and %cr4. To maintain the illusion of full 3110 * control, they have shadow versions which contain the 3111 * guest-perceived (via reads from the register) values 3112 * as opposed to the guest-effective values. 3113 * 3114 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6. 3115 */ 3116 vmcs_write(VMCS_CR0_SHADOW, val); 3117 vmcs_write(encoding, vmx_fix_cr0(val)); 3118 break; 3119 case VMCS_GUEST_CR4: 3120 /* See above for detail on %cr4 shadowing */ 3121 vmcs_write(VMCS_CR4_SHADOW, val); 3122 vmcs_write(encoding, vmx_fix_cr4(val)); 3123 break; 3124 case VMCS_GUEST_CR3: 3125 vmcs_write(encoding, val); 3126 /* 3127 * Invalidate the guest vcpu's TLB mappings to emulate 3128 * the behavior of updating %cr3. 3129 * 3130 * XXX the processor retains global mappings when %cr3 3131 * is updated but vmx_invvpid() does not. 3132 */ 3133 vmx_invvpid(vmx, vcpu, running); 3134 break; 3135 case VMCS_INVALID_ENCODING: 3136 error = EINVAL; 3137 break; 3138 default: 3139 vmcs_write(encoding, val); 3140 break; 3141 } 3142 } 3143 3144 if (!running) { 3145 vmcs_clear(vmx->vmcs_pa[vcpu]); 3146 } 3147 3148 return (error); 3149 } 3150 3151 static int 3152 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3153 { 3154 int hostcpu, running; 3155 struct vmx *vmx = arg; 3156 uint32_t base, limit, access; 3157 3158 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3159 if (running && hostcpu != curcpu) 3160 panic("vmx_getdesc: %d is running", vcpu); 3161 3162 if (!running) { 3163 vmcs_load(vmx->vmcs_pa[vcpu]); 3164 } 3165 3166 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3167 desc->base = vmcs_read(base); 3168 desc->limit = vmcs_read(limit); 3169 if (access != VMCS_INVALID_ENCODING) { 3170 desc->access = vmcs_read(access); 3171 } else { 3172 desc->access = 0; 3173 } 3174 3175 if (!running) { 3176 vmcs_clear(vmx->vmcs_pa[vcpu]); 3177 } 3178 return (0); 3179 } 3180 3181 static int 3182 vmx_setdesc(void *arg, int vcpu, int seg, const struct seg_desc *desc) 3183 { 3184 int hostcpu, running; 3185 struct vmx *vmx = arg; 3186 uint32_t base, limit, access; 3187 3188 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3189 if (running && hostcpu != curcpu) 3190 panic("vmx_setdesc: %d is running", vcpu); 3191 3192 if (!running) { 3193 vmcs_load(vmx->vmcs_pa[vcpu]); 3194 } 3195 3196 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3197 vmcs_write(base, desc->base); 3198 vmcs_write(limit, desc->limit); 3199 if (access != VMCS_INVALID_ENCODING) { 3200 vmcs_write(access, desc->access); 3201 } 3202 3203 if (!running) { 3204 vmcs_clear(vmx->vmcs_pa[vcpu]); 3205 } 3206 return (0); 3207 } 3208 3209 static int 3210 vmx_getcap(void *arg, int vcpu, int type, int *retval) 3211 { 3212 struct vmx *vmx = arg; 3213 int vcap; 3214 int ret; 3215 3216 ret = ENOENT; 3217 3218 vcap = vmx->cap[vcpu].set; 3219 3220 switch (type) { 3221 case VM_CAP_HALT_EXIT: 3222 if (cap_halt_exit) 3223 ret = 0; 3224 break; 3225 case VM_CAP_PAUSE_EXIT: 3226 if (cap_pause_exit) 3227 ret = 0; 3228 break; 3229 case VM_CAP_MTRAP_EXIT: 3230 if (cap_monitor_trap) 3231 ret = 0; 3232 break; 3233 case VM_CAP_ENABLE_INVPCID: 3234 if (cap_invpcid) 3235 ret = 0; 3236 break; 3237 case VM_CAP_BPT_EXIT: 3238 ret = 0; 3239 break; 3240 default: 3241 break; 3242 } 3243 3244 if (ret == 0) 3245 *retval = (vcap & (1 << type)) ? 1 : 0; 3246 3247 return (ret); 3248 } 3249 3250 static int 3251 vmx_setcap(void *arg, int vcpu, int type, int val) 3252 { 3253 struct vmx *vmx = arg; 3254 uint32_t baseval, reg, flag; 3255 uint32_t *pptr; 3256 int error; 3257 3258 error = ENOENT; 3259 pptr = NULL; 3260 3261 switch (type) { 3262 case VM_CAP_HALT_EXIT: 3263 if (cap_halt_exit) { 3264 error = 0; 3265 pptr = &vmx->cap[vcpu].proc_ctls; 3266 baseval = *pptr; 3267 flag = PROCBASED_HLT_EXITING; 3268 reg = VMCS_PRI_PROC_BASED_CTLS; 3269 } 3270 break; 3271 case VM_CAP_MTRAP_EXIT: 3272 if (cap_monitor_trap) { 3273 error = 0; 3274 pptr = &vmx->cap[vcpu].proc_ctls; 3275 baseval = *pptr; 3276 flag = PROCBASED_MTF; 3277 reg = VMCS_PRI_PROC_BASED_CTLS; 3278 } 3279 break; 3280 case VM_CAP_PAUSE_EXIT: 3281 if (cap_pause_exit) { 3282 error = 0; 3283 pptr = &vmx->cap[vcpu].proc_ctls; 3284 baseval = *pptr; 3285 flag = PROCBASED_PAUSE_EXITING; 3286 reg = VMCS_PRI_PROC_BASED_CTLS; 3287 } 3288 break; 3289 case VM_CAP_ENABLE_INVPCID: 3290 if (cap_invpcid) { 3291 error = 0; 3292 pptr = &vmx->cap[vcpu].proc_ctls2; 3293 baseval = *pptr; 3294 flag = PROCBASED2_ENABLE_INVPCID; 3295 reg = VMCS_SEC_PROC_BASED_CTLS; 3296 } 3297 break; 3298 case VM_CAP_BPT_EXIT: 3299 error = 0; 3300 3301 /* Don't change the bitmap if we are tracing all exceptions. */ 3302 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) { 3303 pptr = &vmx->cap[vcpu].exc_bitmap; 3304 baseval = *pptr; 3305 flag = (1 << IDT_BP); 3306 reg = VMCS_EXCEPTION_BITMAP; 3307 } 3308 break; 3309 default: 3310 break; 3311 } 3312 3313 if (error != 0) { 3314 return (error); 3315 } 3316 3317 if (pptr != NULL) { 3318 if (val) { 3319 baseval |= flag; 3320 } else { 3321 baseval &= ~flag; 3322 } 3323 vmcs_load(vmx->vmcs_pa[vcpu]); 3324 vmcs_write(reg, baseval); 3325 vmcs_clear(vmx->vmcs_pa[vcpu]); 3326 3327 /* 3328 * Update optional stored flags, and record 3329 * setting 3330 */ 3331 *pptr = baseval; 3332 } 3333 3334 if (val) { 3335 vmx->cap[vcpu].set |= (1 << type); 3336 } else { 3337 vmx->cap[vcpu].set &= ~(1 << type); 3338 } 3339 3340 return (0); 3341 } 3342 3343 struct vlapic_vtx { 3344 struct vlapic vlapic; 3345 3346 /* Align to the nearest cacheline */ 3347 uint8_t _pad[64 - (sizeof (struct vlapic) % 64)]; 3348 3349 /* TMR handling state for posted interrupts */ 3350 uint32_t tmr_active[8]; 3351 uint32_t pending_level[8]; 3352 uint32_t pending_edge[8]; 3353 3354 struct pir_desc *pir_desc; 3355 struct vmx *vmx; 3356 uint_t pending_prio; 3357 boolean_t tmr_sync; 3358 }; 3359 3360 CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0); 3361 3362 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3363 3364 static vcpu_notify_t 3365 vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level) 3366 { 3367 struct vlapic_vtx *vlapic_vtx; 3368 struct pir_desc *pir_desc; 3369 uint32_t mask, tmrval; 3370 int idx; 3371 vcpu_notify_t notify = VCPU_NOTIFY_NONE; 3372 3373 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3374 pir_desc = vlapic_vtx->pir_desc; 3375 idx = vector / 32; 3376 mask = 1UL << (vector % 32); 3377 3378 /* 3379 * If the currently asserted TMRs do not match the state requested by 3380 * the incoming interrupt, an exit will be required to reconcile those 3381 * bits in the APIC page. This will keep the vLAPIC behavior in line 3382 * with the architecturally defined expectations. 3383 * 3384 * If actors of mixed types (edge and level) are racing against the same 3385 * vector (toggling its TMR bit back and forth), the results could 3386 * inconsistent. Such circumstances are considered a rare edge case and 3387 * are never expected to be found in the wild. 3388 */ 3389 tmrval = atomic_load_acq_int(&vlapic_vtx->tmr_active[idx]); 3390 if (!level) { 3391 if ((tmrval & mask) != 0) { 3392 /* Edge-triggered interrupt needs TMR de-asserted */ 3393 atomic_set_int(&vlapic_vtx->pending_edge[idx], mask); 3394 atomic_store_rel_long(&pir_desc->pending, 1); 3395 return (VCPU_NOTIFY_EXIT); 3396 } 3397 } else { 3398 if ((tmrval & mask) == 0) { 3399 /* Level-triggered interrupt needs TMR asserted */ 3400 atomic_set_int(&vlapic_vtx->pending_level[idx], mask); 3401 atomic_store_rel_long(&pir_desc->pending, 1); 3402 return (VCPU_NOTIFY_EXIT); 3403 } 3404 } 3405 3406 /* 3407 * If the interrupt request does not require manipulation of the TMRs 3408 * for delivery, set it in PIR descriptor. It cannot be inserted into 3409 * the APIC page while the vCPU might be running. 3410 */ 3411 atomic_set_int(&pir_desc->pir[idx], mask); 3412 3413 /* 3414 * A notification is required whenever the 'pending' bit makes a 3415 * transition from 0->1. 3416 * 3417 * Even if the 'pending' bit is already asserted, notification about 3418 * the incoming interrupt may still be necessary. For example, if a 3419 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3420 * the 0->1 'pending' transition with a notification, but the vCPU 3421 * would ignore the interrupt for the time being. The same vCPU would 3422 * need to then be notified if a high-priority interrupt arrived which 3423 * satisfied the PPR. 3424 * 3425 * The priorities of interrupts injected while 'pending' is asserted 3426 * are tracked in a custom bitfield 'pending_prio'. Should the 3427 * to-be-injected interrupt exceed the priorities already present, the 3428 * notification is sent. The priorities recorded in 'pending_prio' are 3429 * cleared whenever the 'pending' bit makes another 0->1 transition. 3430 */ 3431 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3432 notify = VCPU_NOTIFY_APIC; 3433 vlapic_vtx->pending_prio = 0; 3434 } else { 3435 const uint_t old_prio = vlapic_vtx->pending_prio; 3436 const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3437 3438 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3439 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3440 notify = VCPU_NOTIFY_APIC; 3441 } 3442 } 3443 3444 return (notify); 3445 } 3446 3447 static void 3448 vmx_apicv_accepted(struct vlapic *vlapic, int vector) 3449 { 3450 /* 3451 * When APICv is enabled for an instance, the traditional interrupt 3452 * injection method (populating ENTRY_INTR_INFO in the VMCS) is not 3453 * used and the CPU does the heavy lifting of virtual interrupt 3454 * delivery. For that reason vmx_intr_accepted() should never be called 3455 * when APICv is enabled. 3456 */ 3457 panic("vmx_intr_accepted: not expected to be called"); 3458 } 3459 3460 static void 3461 vmx_apicv_sync_tmr(struct vlapic *vlapic) 3462 { 3463 struct vlapic_vtx *vlapic_vtx; 3464 const uint32_t *tmrs; 3465 3466 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3467 tmrs = &vlapic_vtx->tmr_active[0]; 3468 3469 if (!vlapic_vtx->tmr_sync) { 3470 return; 3471 } 3472 3473 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)tmrs[1] << 32) | tmrs[0]); 3474 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)tmrs[3] << 32) | tmrs[2]); 3475 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)tmrs[5] << 32) | tmrs[4]); 3476 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)tmrs[7] << 32) | tmrs[6]); 3477 vlapic_vtx->tmr_sync = B_FALSE; 3478 } 3479 3480 static void 3481 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3482 { 3483 struct vmx *vmx; 3484 uint32_t proc_ctls; 3485 int vcpuid; 3486 3487 vcpuid = vlapic->vcpuid; 3488 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3489 3490 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3491 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3492 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3493 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3494 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3495 3496 vmcs_load(vmx->vmcs_pa[vcpuid]); 3497 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3498 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3499 } 3500 3501 static void 3502 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3503 { 3504 struct vmx *vmx; 3505 uint32_t proc_ctls2; 3506 int vcpuid; 3507 3508 vcpuid = vlapic->vcpuid; 3509 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3510 3511 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3512 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3513 ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2)); 3514 3515 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3516 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3517 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3518 3519 vmcs_load(vmx->vmcs_pa[vcpuid]); 3520 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3521 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3522 3523 vmx_allow_x2apic_msrs(vmx, vcpuid); 3524 } 3525 3526 static void 3527 vmx_apicv_notify(struct vlapic *vlapic, int hostcpu) 3528 { 3529 psm_send_pir_ipi(hostcpu); 3530 } 3531 3532 static void 3533 vmx_apicv_sync(struct vlapic *vlapic) 3534 { 3535 struct vlapic_vtx *vlapic_vtx; 3536 struct pir_desc *pir_desc; 3537 struct LAPIC *lapic; 3538 uint_t i; 3539 3540 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3541 pir_desc = vlapic_vtx->pir_desc; 3542 lapic = vlapic->apic_page; 3543 3544 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3545 return; 3546 } 3547 3548 vlapic_vtx->pending_prio = 0; 3549 3550 /* Make sure the invalid (0-15) vectors are not set */ 3551 ASSERT0(vlapic_vtx->pending_level[0] & 0xffff); 3552 ASSERT0(vlapic_vtx->pending_edge[0] & 0xffff); 3553 ASSERT0(pir_desc->pir[0] & 0xffff); 3554 3555 for (i = 0; i <= 7; i++) { 3556 uint32_t *tmrp = &lapic->tmr0 + (i * 4); 3557 uint32_t *irrp = &lapic->irr0 + (i * 4); 3558 3559 const uint32_t pending_level = 3560 atomic_readandclear_int(&vlapic_vtx->pending_level[i]); 3561 const uint32_t pending_edge = 3562 atomic_readandclear_int(&vlapic_vtx->pending_edge[i]); 3563 const uint32_t pending_inject = 3564 atomic_readandclear_int(&pir_desc->pir[i]); 3565 3566 if (pending_level != 0) { 3567 /* 3568 * Level-triggered interrupts assert their corresponding 3569 * bit in the TMR when queued in IRR. 3570 */ 3571 *tmrp |= pending_level; 3572 *irrp |= pending_level; 3573 } 3574 if (pending_edge != 0) { 3575 /* 3576 * When queuing an edge-triggered interrupt in IRR, the 3577 * corresponding bit in the TMR is cleared. 3578 */ 3579 *tmrp &= ~pending_edge; 3580 *irrp |= pending_edge; 3581 } 3582 if (pending_inject != 0) { 3583 /* 3584 * Interrupts which do not require a change to the TMR 3585 * (because it already matches the necessary state) can 3586 * simply be queued in IRR. 3587 */ 3588 *irrp |= pending_inject; 3589 } 3590 3591 if (*tmrp != vlapic_vtx->tmr_active[i]) { 3592 /* Check if VMX EOI triggers require updating. */ 3593 vlapic_vtx->tmr_active[i] = *tmrp; 3594 vlapic_vtx->tmr_sync = B_TRUE; 3595 } 3596 } 3597 } 3598 3599 static void 3600 vmx_tpr_shadow_enter(struct vlapic *vlapic) 3601 { 3602 /* 3603 * When TPR shadowing is enabled, VMX will initiate a guest exit if its 3604 * TPR falls below a threshold priority. That threshold is set to the 3605 * current TPR priority, since guest interrupt status should be 3606 * re-evaluated if its TPR is set lower. 3607 */ 3608 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 3609 } 3610 3611 static void 3612 vmx_tpr_shadow_exit(struct vlapic *vlapic) 3613 { 3614 /* 3615 * Unlike full APICv, where changes to the TPR are reflected in the PPR, 3616 * with TPR shadowing, that duty is relegated to the VMM. Upon exit, 3617 * the PPR is updated to reflect any change in the TPR here. 3618 */ 3619 vlapic_sync_tpr(vlapic); 3620 } 3621 3622 static struct vlapic * 3623 vmx_vlapic_init(void *arg, int vcpuid) 3624 { 3625 struct vmx *vmx = arg; 3626 struct vlapic_vtx *vlapic_vtx; 3627 struct vlapic *vlapic; 3628 3629 vlapic_vtx = kmem_zalloc(sizeof (struct vlapic_vtx), KM_SLEEP); 3630 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3631 vlapic_vtx->vmx = vmx; 3632 3633 vlapic = &vlapic_vtx->vlapic; 3634 vlapic->vm = vmx->vm; 3635 vlapic->vcpuid = vcpuid; 3636 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3637 3638 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 3639 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3640 } 3641 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 3642 vlapic->ops.set_intr_ready = vmx_apicv_set_ready; 3643 vlapic->ops.sync_state = vmx_apicv_sync; 3644 vlapic->ops.intr_accepted = vmx_apicv_accepted; 3645 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3646 3647 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 3648 vlapic->ops.post_intr = vmx_apicv_notify; 3649 } 3650 } 3651 3652 vlapic_init(vlapic); 3653 3654 return (vlapic); 3655 } 3656 3657 static void 3658 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3659 { 3660 vlapic_cleanup(vlapic); 3661 kmem_free(vlapic, sizeof (struct vlapic_vtx)); 3662 } 3663 3664 static void 3665 vmx_savectx(void *arg, int vcpu) 3666 { 3667 struct vmx *vmx = arg; 3668 3669 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3670 vmcs_clear(vmx->vmcs_pa[vcpu]); 3671 vmx_msr_guest_exit(vmx, vcpu); 3672 /* 3673 * Having VMCLEARed the VMCS, it can no longer be re-entered 3674 * with VMRESUME, but must be VMLAUNCHed again. 3675 */ 3676 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED; 3677 } 3678 3679 reset_gdtr_limit(); 3680 } 3681 3682 static void 3683 vmx_restorectx(void *arg, int vcpu) 3684 { 3685 struct vmx *vmx = arg; 3686 3687 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED); 3688 3689 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3690 vmx_msr_guest_enter(vmx, vcpu); 3691 vmcs_load(vmx->vmcs_pa[vcpu]); 3692 } 3693 } 3694 3695 struct vmm_ops vmm_ops_intel = { 3696 .init = vmx_init, 3697 .cleanup = vmx_cleanup, 3698 .resume = vmx_restore, 3699 3700 .vminit = vmx_vminit, 3701 .vmrun = vmx_run, 3702 .vmcleanup = vmx_vmcleanup, 3703 .vmgetreg = vmx_getreg, 3704 .vmsetreg = vmx_setreg, 3705 .vmgetdesc = vmx_getdesc, 3706 .vmsetdesc = vmx_setdesc, 3707 .vmgetcap = vmx_getcap, 3708 .vmsetcap = vmx_setcap, 3709 .vlapic_init = vmx_vlapic_init, 3710 .vlapic_cleanup = vmx_vlapic_cleanup, 3711 3712 .vmsavectx = vmx_savectx, 3713 .vmrestorectx = vmx_restorectx, 3714 }; 3715 3716 /* Side-effect free HW validation derived from checks in vmx_init. */ 3717 int 3718 vmx_x86_supported(const char **msg) 3719 { 3720 int error; 3721 uint32_t tmp; 3722 3723 ASSERT(msg != NULL); 3724 3725 /* Check support for primary processor-based VM-execution controls */ 3726 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 3727 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, 3728 PROCBASED_CTLS_ZERO_SETTING, &tmp); 3729 if (error) { 3730 *msg = "processor does not support desired primary " 3731 "processor-based controls"; 3732 return (error); 3733 } 3734 3735 /* Check support for secondary processor-based VM-execution controls */ 3736 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 3737 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, 3738 PROCBASED_CTLS2_ZERO_SETTING, &tmp); 3739 if (error) { 3740 *msg = "processor does not support desired secondary " 3741 "processor-based controls"; 3742 return (error); 3743 } 3744 3745 /* Check support for pin-based VM-execution controls */ 3746 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 3747 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, 3748 PINBASED_CTLS_ZERO_SETTING, &tmp); 3749 if (error) { 3750 *msg = "processor does not support desired pin-based controls"; 3751 return (error); 3752 } 3753 3754 /* Check support for VM-exit controls */ 3755 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 3756 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp); 3757 if (error) { 3758 *msg = "processor does not support desired exit controls"; 3759 return (error); 3760 } 3761 3762 /* Check support for VM-entry controls */ 3763 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 3764 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp); 3765 if (error) { 3766 *msg = "processor does not support desired entry controls"; 3767 return (error); 3768 } 3769 3770 /* Unrestricted guest is nominally optional, but not for us. */ 3771 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 3772 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp); 3773 if (error) { 3774 *msg = "processor does not support desired unrestricted guest " 3775 "controls"; 3776 return (error); 3777 } 3778 3779 return (0); 3780 } 3781