1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2015 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2022 Oxide Computer Company 44 * Copyright 2022 MNX Cloud, Inc. 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/kmem.h> 54 #include <sys/pcpu.h> 55 #include <sys/proc.h> 56 #include <sys/sysctl.h> 57 58 #include <sys/x86_archext.h> 59 #include <sys/smp_impldefs.h> 60 #include <sys/smt.h> 61 #include <sys/hma.h> 62 #include <sys/trap.h> 63 #include <sys/archsystm.h> 64 65 #include <machine/psl.h> 66 #include <machine/cpufunc.h> 67 #include <machine/md_var.h> 68 #include <machine/reg.h> 69 #include <machine/segments.h> 70 #include <machine/specialreg.h> 71 #include <machine/vmparam.h> 72 #include <sys/vmm_vm.h> 73 #include <sys/vmm_kernel.h> 74 75 #include <machine/vmm.h> 76 #include <machine/vmm_dev.h> 77 #include <sys/vmm_instruction_emul.h> 78 #include "vmm_lapic.h" 79 #include "vmm_host.h" 80 #include "vmm_ioport.h" 81 #include "vmm_stat.h" 82 #include "vatpic.h" 83 #include "vlapic.h" 84 #include "vlapic_priv.h" 85 86 #include "vmcs.h" 87 #include "vmx.h" 88 #include "vmx_msr.h" 89 #include "vmx_controls.h" 90 91 #define PINBASED_CTLS_ONE_SETTING \ 92 (PINBASED_EXTINT_EXITING | \ 93 PINBASED_NMI_EXITING | \ 94 PINBASED_VIRTUAL_NMI) 95 #define PINBASED_CTLS_ZERO_SETTING 0 96 97 #define PROCBASED_CTLS_WINDOW_SETTING \ 98 (PROCBASED_INT_WINDOW_EXITING | \ 99 PROCBASED_NMI_WINDOW_EXITING) 100 101 /* We consider TSC offset a necessity for unsynched TSC handling */ 102 #define PROCBASED_CTLS_ONE_SETTING \ 103 (PROCBASED_SECONDARY_CONTROLS | \ 104 PROCBASED_TSC_OFFSET | \ 105 PROCBASED_MWAIT_EXITING | \ 106 PROCBASED_MONITOR_EXITING | \ 107 PROCBASED_IO_EXITING | \ 108 PROCBASED_MSR_BITMAPS | \ 109 PROCBASED_CTLS_WINDOW_SETTING | \ 110 PROCBASED_CR8_LOAD_EXITING | \ 111 PROCBASED_CR8_STORE_EXITING) 112 113 #define PROCBASED_CTLS_ZERO_SETTING \ 114 (PROCBASED_CR3_LOAD_EXITING | \ 115 PROCBASED_CR3_STORE_EXITING | \ 116 PROCBASED_IO_BITMAPS) 117 118 /* 119 * EPT and Unrestricted Guest are considered necessities. The latter is not a 120 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly 121 * without a bootrom starting in real mode. 122 */ 123 #define PROCBASED_CTLS2_ONE_SETTING \ 124 (PROCBASED2_ENABLE_EPT | \ 125 PROCBASED2_UNRESTRICTED_GUEST) 126 #define PROCBASED_CTLS2_ZERO_SETTING 0 127 128 #define VM_EXIT_CTLS_ONE_SETTING \ 129 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 130 VM_EXIT_HOST_LMA | \ 131 VM_EXIT_LOAD_PAT | \ 132 VM_EXIT_SAVE_EFER | \ 133 VM_EXIT_LOAD_EFER | \ 134 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 135 136 #define VM_EXIT_CTLS_ZERO_SETTING 0 137 138 #define VM_ENTRY_CTLS_ONE_SETTING \ 139 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 140 VM_ENTRY_LOAD_EFER) 141 142 #define VM_ENTRY_CTLS_ZERO_SETTING \ 143 (VM_ENTRY_INTO_SMM | \ 144 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 145 146 /* 147 * Cover the EPT capabilities used by bhyve at present: 148 * - 4-level page walks 149 * - write-back memory type 150 * - INVEPT operations (all types) 151 * - INVVPID operations (single-context only) 152 */ 153 #define EPT_CAPS_REQUIRED \ 154 (IA32_VMX_EPT_VPID_PWL4 | \ 155 IA32_VMX_EPT_VPID_TYPE_WB | \ 156 IA32_VMX_EPT_VPID_INVEPT | \ 157 IA32_VMX_EPT_VPID_INVEPT_SINGLE | \ 158 IA32_VMX_EPT_VPID_INVEPT_ALL | \ 159 IA32_VMX_EPT_VPID_INVVPID | \ 160 IA32_VMX_EPT_VPID_INVVPID_SINGLE) 161 162 #define HANDLED 1 163 #define UNHANDLED 0 164 165 SYSCTL_DECL(_hw_vmm); 166 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 167 NULL); 168 169 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 170 static uint32_t exit_ctls, entry_ctls; 171 172 static uint64_t cr0_ones_mask, cr0_zeros_mask; 173 174 static uint64_t cr4_ones_mask, cr4_zeros_mask; 175 176 static int vmx_initialized; 177 178 /* 179 * Optional capabilities 180 */ 181 182 /* HLT triggers a VM-exit */ 183 static int cap_halt_exit; 184 185 /* PAUSE triggers a VM-exit */ 186 static int cap_pause_exit; 187 188 /* Monitor trap flag */ 189 static int cap_monitor_trap; 190 191 /* Guests are allowed to use INVPCID */ 192 static int cap_invpcid; 193 194 /* Extra capabilities (VMX_CAP_*) beyond the minimum */ 195 static enum vmx_caps vmx_capabilities; 196 197 /* APICv posted interrupt vector */ 198 static int pirvec = -1; 199 200 static uint_t vpid_alloc_failed; 201 202 int guest_l1d_flush; 203 int guest_l1d_flush_sw; 204 205 /* MSR save region is composed of an array of 'struct msr_entry' */ 206 struct msr_entry { 207 uint32_t index; 208 uint32_t reserved; 209 uint64_t val; 210 }; 211 212 static struct msr_entry msr_load_list[1] __aligned(16); 213 214 /* 215 * The definitions of SDT probes for VMX. 216 */ 217 218 /* BEGIN CSTYLED */ 219 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 220 "struct vmx *", "int", "struct vm_exit *"); 221 222 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 223 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 224 225 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 226 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 227 228 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 229 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 230 231 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 232 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 233 234 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 235 "struct vmx *", "int", "struct vm_exit *"); 236 237 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 238 "struct vmx *", "int", "struct vm_exit *"); 239 240 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 241 "struct vmx *", "int", "struct vm_exit *"); 242 243 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 244 "struct vmx *", "int", "struct vm_exit *"); 245 246 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 247 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 248 249 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 250 "struct vmx *", "int", "struct vm_exit *"); 251 252 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 253 "struct vmx *", "int", "struct vm_exit *"); 254 255 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 256 "struct vmx *", "int", "struct vm_exit *"); 257 258 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 259 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 260 261 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 262 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 263 264 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 265 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 266 267 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 268 "struct vmx *", "int", "struct vm_exit *"); 269 270 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 271 "struct vmx *", "int", "struct vm_exit *"); 272 273 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 274 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 275 276 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 277 "struct vmx *", "int", "struct vm_exit *"); 278 279 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 280 "struct vmx *", "int", "struct vm_exit *"); 281 282 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 283 "struct vmx *", "int", "struct vm_exit *"); 284 285 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 286 "struct vmx *", "int", "struct vm_exit *"); 287 288 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 289 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 290 291 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 292 "struct vmx *", "int", "struct vm_exit *", "int"); 293 /* END CSTYLED */ 294 295 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 296 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 297 static void vmx_apply_tsc_adjust(struct vmx *, int); 298 static void vmx_apicv_sync_tmr(struct vlapic *vlapic); 299 static void vmx_tpr_shadow_enter(struct vlapic *vlapic); 300 static void vmx_tpr_shadow_exit(struct vlapic *vlapic); 301 302 static void 303 vmx_allow_x2apic_msrs(struct vmx *vmx, int vcpuid) 304 { 305 /* 306 * Allow readonly access to the following x2APIC MSRs from the guest. 307 */ 308 guest_msr_ro(vmx, vcpuid, MSR_APIC_ID); 309 guest_msr_ro(vmx, vcpuid, MSR_APIC_VERSION); 310 guest_msr_ro(vmx, vcpuid, MSR_APIC_LDR); 311 guest_msr_ro(vmx, vcpuid, MSR_APIC_SVR); 312 313 for (uint_t i = 0; i < 8; i++) { 314 guest_msr_ro(vmx, vcpuid, MSR_APIC_ISR0 + i); 315 guest_msr_ro(vmx, vcpuid, MSR_APIC_TMR0 + i); 316 guest_msr_ro(vmx, vcpuid, MSR_APIC_IRR0 + i); 317 } 318 319 guest_msr_ro(vmx, vcpuid, MSR_APIC_ESR); 320 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_TIMER); 321 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_THERMAL); 322 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_PCINT); 323 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT0); 324 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT1); 325 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_ERROR); 326 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR_TIMER); 327 guest_msr_ro(vmx, vcpuid, MSR_APIC_DCR_TIMER); 328 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR); 329 330 /* 331 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 332 * 333 * These registers get special treatment described in the section 334 * "Virtualizing MSR-Based APIC Accesses". 335 */ 336 guest_msr_rw(vmx, vcpuid, MSR_APIC_TPR); 337 guest_msr_rw(vmx, vcpuid, MSR_APIC_EOI); 338 guest_msr_rw(vmx, vcpuid, MSR_APIC_SELF_IPI); 339 } 340 341 static ulong_t 342 vmx_fix_cr0(ulong_t cr0) 343 { 344 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 345 } 346 347 /* 348 * Given a live (VMCS-active) cr0 value, and its shadow counterpart, calculate 349 * the value observable from the guest. 350 */ 351 static ulong_t 352 vmx_unshadow_cr0(uint64_t cr0, uint64_t shadow) 353 { 354 return ((cr0 & ~cr0_ones_mask) | 355 (shadow & (cr0_zeros_mask | cr0_ones_mask))); 356 } 357 358 static ulong_t 359 vmx_fix_cr4(ulong_t cr4) 360 { 361 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 362 } 363 364 /* 365 * Given a live (VMCS-active) cr4 value, and its shadow counterpart, calculate 366 * the value observable from the guest. 367 */ 368 static ulong_t 369 vmx_unshadow_cr4(uint64_t cr4, uint64_t shadow) 370 { 371 return ((cr4 & ~cr4_ones_mask) | 372 (shadow & (cr4_zeros_mask | cr4_ones_mask))); 373 } 374 375 static void 376 vpid_free(int vpid) 377 { 378 if (vpid < 0 || vpid > 0xffff) 379 panic("vpid_free: invalid vpid %d", vpid); 380 381 /* 382 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 383 * the unit number allocator. 384 */ 385 386 if (vpid > VM_MAXCPU) 387 hma_vmx_vpid_free((uint16_t)vpid); 388 } 389 390 static void 391 vpid_alloc(uint16_t *vpid, int num) 392 { 393 int i, x; 394 395 if (num <= 0 || num > VM_MAXCPU) 396 panic("invalid number of vpids requested: %d", num); 397 398 /* 399 * If the "enable vpid" execution control is not enabled then the 400 * VPID is required to be 0 for all vcpus. 401 */ 402 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 403 for (i = 0; i < num; i++) 404 vpid[i] = 0; 405 return; 406 } 407 408 /* 409 * Allocate a unique VPID for each vcpu from the unit number allocator. 410 */ 411 for (i = 0; i < num; i++) { 412 uint16_t tmp; 413 414 tmp = hma_vmx_vpid_alloc(); 415 x = (tmp == 0) ? -1 : tmp; 416 417 if (x == -1) 418 break; 419 else 420 vpid[i] = x; 421 } 422 423 if (i < num) { 424 atomic_add_int(&vpid_alloc_failed, 1); 425 426 /* 427 * If the unit number allocator does not have enough unique 428 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 429 * 430 * These VPIDs are not be unique across VMs but this does not 431 * affect correctness because the combined mappings are also 432 * tagged with the EP4TA which is unique for each VM. 433 * 434 * It is still sub-optimal because the invvpid will invalidate 435 * combined mappings for a particular VPID across all EP4TAs. 436 */ 437 while (i-- > 0) 438 vpid_free(vpid[i]); 439 440 for (i = 0; i < num; i++) 441 vpid[i] = i + 1; 442 } 443 } 444 445 static int 446 vmx_cleanup(void) 447 { 448 /* This is taken care of by the hma registration */ 449 return (0); 450 } 451 452 static void 453 vmx_restore(void) 454 { 455 /* No-op on illumos */ 456 } 457 458 static int 459 vmx_init(void) 460 { 461 int error; 462 uint64_t fixed0, fixed1; 463 uint32_t tmp; 464 enum vmx_caps avail_caps = VMX_CAP_NONE; 465 466 /* Check support for primary processor-based VM-execution controls */ 467 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 468 MSR_VMX_TRUE_PROCBASED_CTLS, 469 PROCBASED_CTLS_ONE_SETTING, 470 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 471 if (error) { 472 printf("vmx_init: processor does not support desired primary " 473 "processor-based controls\n"); 474 return (error); 475 } 476 477 /* Clear the processor-based ctl bits that are set on demand */ 478 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 479 480 /* Check support for secondary processor-based VM-execution controls */ 481 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 482 MSR_VMX_PROCBASED_CTLS2, 483 PROCBASED_CTLS2_ONE_SETTING, 484 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 485 if (error) { 486 printf("vmx_init: processor does not support desired secondary " 487 "processor-based controls\n"); 488 return (error); 489 } 490 491 /* Check support for VPID */ 492 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 493 MSR_VMX_PROCBASED_CTLS2, 494 PROCBASED2_ENABLE_VPID, 495 0, &tmp); 496 if (error == 0) 497 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 498 499 /* Check support for pin-based VM-execution controls */ 500 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 501 MSR_VMX_TRUE_PINBASED_CTLS, 502 PINBASED_CTLS_ONE_SETTING, 503 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 504 if (error) { 505 printf("vmx_init: processor does not support desired " 506 "pin-based controls\n"); 507 return (error); 508 } 509 510 /* Check support for VM-exit controls */ 511 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 512 VM_EXIT_CTLS_ONE_SETTING, 513 VM_EXIT_CTLS_ZERO_SETTING, 514 &exit_ctls); 515 if (error) { 516 printf("vmx_init: processor does not support desired " 517 "exit controls\n"); 518 return (error); 519 } 520 521 /* Check support for VM-entry controls */ 522 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 523 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 524 &entry_ctls); 525 if (error) { 526 printf("vmx_init: processor does not support desired " 527 "entry controls\n"); 528 return (error); 529 } 530 531 /* 532 * Check support for optional features by testing them 533 * as individual bits 534 */ 535 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 536 MSR_VMX_TRUE_PROCBASED_CTLS, 537 PROCBASED_HLT_EXITING, 0, 538 &tmp) == 0); 539 540 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 541 MSR_VMX_PROCBASED_CTLS, 542 PROCBASED_MTF, 0, 543 &tmp) == 0); 544 545 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 546 MSR_VMX_TRUE_PROCBASED_CTLS, 547 PROCBASED_PAUSE_EXITING, 0, 548 &tmp) == 0); 549 550 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 551 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 552 &tmp) == 0); 553 554 /* 555 * Check for APIC virtualization capabilities: 556 * - TPR shadowing 557 * - Full APICv (with or without x2APIC support) 558 * - Posted interrupt handling 559 */ 560 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, 561 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) { 562 avail_caps |= VMX_CAP_TPR_SHADOW; 563 564 const uint32_t apicv_bits = 565 PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 566 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 567 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 568 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY; 569 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 570 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) { 571 avail_caps |= VMX_CAP_APICV; 572 573 /* 574 * It may make sense in the future to differentiate 575 * hardware (or software) configurations with APICv but 576 * no support for accelerating x2APIC mode. 577 */ 578 avail_caps |= VMX_CAP_APICV_X2APIC; 579 580 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 581 MSR_VMX_TRUE_PINBASED_CTLS, 582 PINBASED_POSTED_INTERRUPT, 0, &tmp); 583 if (error == 0) { 584 /* 585 * If the PSM-provided interfaces for requesting 586 * and using a PIR IPI vector are present, use 587 * them for posted interrupts. 588 */ 589 if (psm_get_pir_ipivect != NULL && 590 psm_send_pir_ipi != NULL) { 591 pirvec = psm_get_pir_ipivect(); 592 avail_caps |= VMX_CAP_APICV_PIR; 593 } 594 } 595 } 596 } 597 598 /* 599 * Check for necessary EPT capabilities 600 * 601 * TODO: Properly handle when IA32_VMX_EPT_VPID_HW_AD is missing and the 602 * hypervisor intends to utilize dirty page tracking. 603 */ 604 uint64_t ept_caps = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 605 if ((ept_caps & EPT_CAPS_REQUIRED) != EPT_CAPS_REQUIRED) { 606 cmn_err(CE_WARN, "!Inadequate EPT capabilities: %lx", ept_caps); 607 return (EINVAL); 608 } 609 610 #ifdef __FreeBSD__ 611 guest_l1d_flush = (cpu_ia32_arch_caps & 612 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 613 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 614 615 /* 616 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 617 * available. Otherwise fall back to the software flush 618 * method which loads enough data from the kernel text to 619 * flush existing L1D content, both on VMX entry and on NMI 620 * return. 621 */ 622 if (guest_l1d_flush) { 623 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 624 guest_l1d_flush_sw = 1; 625 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 626 &guest_l1d_flush_sw); 627 } 628 if (guest_l1d_flush_sw) { 629 if (nmi_flush_l1d_sw <= 1) 630 nmi_flush_l1d_sw = 1; 631 } else { 632 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 633 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 634 } 635 } 636 #else 637 /* L1D flushing is taken care of by smt_acquire() and friends */ 638 guest_l1d_flush = 0; 639 #endif /* __FreeBSD__ */ 640 641 /* 642 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 643 */ 644 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 645 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 646 cr0_ones_mask = fixed0 & fixed1; 647 cr0_zeros_mask = ~fixed0 & ~fixed1; 648 649 /* 650 * Since Unrestricted Guest was already verified present, CR0_PE and 651 * CR0_PG are allowed to be set to zero in VMX non-root operation 652 */ 653 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 654 655 /* 656 * Do not allow the guest to set CR0_NW or CR0_CD. 657 */ 658 cr0_zeros_mask |= (CR0_NW | CR0_CD); 659 660 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 661 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 662 cr4_ones_mask = fixed0 & fixed1; 663 cr4_zeros_mask = ~fixed0 & ~fixed1; 664 665 vmx_msr_init(); 666 667 vmx_capabilities = avail_caps; 668 vmx_initialized = 1; 669 670 return (0); 671 } 672 673 static void 674 vmx_trigger_hostintr(int vector) 675 { 676 VERIFY(vector >= 32 && vector <= 255); 677 vmx_call_isr(vector - 32); 678 } 679 680 static void * 681 vmx_vminit(struct vm *vm) 682 { 683 uint16_t vpid[VM_MAXCPU]; 684 int i, error, datasel; 685 struct vmx *vmx; 686 uint32_t exc_bitmap; 687 uint16_t maxcpus; 688 uint32_t proc_ctls, proc2_ctls, pin_ctls; 689 uint64_t apic_access_pa = UINT64_MAX; 690 691 vmx = kmem_zalloc(sizeof (struct vmx), KM_SLEEP); 692 VERIFY3U((uintptr_t)vmx & PAGE_MASK, ==, 0); 693 694 vmx->vm = vm; 695 vmx->eptp = vmspace_table_root(vm_get_vmspace(vm)); 696 697 /* 698 * Clean up EP4TA-tagged guest-physical and combined mappings 699 * 700 * VMX transitions are not required to invalidate any guest physical 701 * mappings. So, it may be possible for stale guest physical mappings 702 * to be present in the processor TLBs. 703 * 704 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 705 */ 706 hma_vmx_invept_allcpus((uintptr_t)vmx->eptp); 707 708 vmx_msr_bitmap_initialize(vmx); 709 710 vpid_alloc(vpid, VM_MAXCPU); 711 712 /* Grab the established defaults */ 713 proc_ctls = procbased_ctls; 714 proc2_ctls = procbased_ctls2; 715 pin_ctls = pinbased_ctls; 716 /* For now, default to the available capabilities */ 717 vmx->vmx_caps = vmx_capabilities; 718 719 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 720 proc_ctls |= PROCBASED_USE_TPR_SHADOW; 721 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 722 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING; 723 } 724 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 725 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)); 726 727 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 728 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 729 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 730 731 /* 732 * Allocate a page of memory to back the APIC access address for 733 * when APICv features are in use. Guest MMIO accesses should 734 * never actually reach this page, but rather be intercepted. 735 */ 736 vmx->apic_access_page = kmem_zalloc(PAGESIZE, KM_SLEEP); 737 VERIFY3U((uintptr_t)vmx->apic_access_page & PAGEOFFSET, ==, 0); 738 apic_access_pa = vtophys(vmx->apic_access_page); 739 740 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 741 apic_access_pa); 742 /* XXX this should really return an error to the caller */ 743 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 744 } 745 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 746 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV)); 747 748 pin_ctls |= PINBASED_POSTED_INTERRUPT; 749 } 750 751 maxcpus = vm_get_maxcpus(vm); 752 datasel = vmm_get_host_datasel(); 753 for (i = 0; i < maxcpus; i++) { 754 /* 755 * Cache physical address lookups for various components which 756 * may be required inside the critical_enter() section implied 757 * by VMPTRLD() below. 758 */ 759 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap[i]); 760 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]); 761 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]); 762 763 vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]); 764 vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]); 765 766 vmx_msr_guest_init(vmx, i); 767 768 vmcs_load(vmx->vmcs_pa[i]); 769 770 vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat()); 771 vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer()); 772 773 /* Load the control registers */ 774 vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0()); 775 vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE); 776 777 /* Load the segment selectors */ 778 vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel()); 779 780 vmcs_write(VMCS_HOST_ES_SELECTOR, datasel); 781 vmcs_write(VMCS_HOST_SS_SELECTOR, datasel); 782 vmcs_write(VMCS_HOST_DS_SELECTOR, datasel); 783 784 vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel()); 785 vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel()); 786 vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel()); 787 788 /* 789 * Configure host sysenter MSRs to be restored on VM exit. 790 * The thread-specific MSR_INTC_SEP_ESP value is loaded in 791 * vmx_run. 792 */ 793 vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL); 794 vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP, 795 rdmsr(MSR_SYSENTER_EIP_MSR)); 796 797 /* instruction pointer */ 798 vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest); 799 800 /* link pointer */ 801 vmcs_write(VMCS_LINK_POINTER, ~0); 802 803 vmcs_write(VMCS_EPTP, vmx->eptp); 804 vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls); 805 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 806 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc2_ctls); 807 vmcs_write(VMCS_EXIT_CTLS, exit_ctls); 808 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 809 vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa); 810 vmcs_write(VMCS_VPID, vpid[i]); 811 812 if (guest_l1d_flush && !guest_l1d_flush_sw) { 813 vmcs_write(VMCS_ENTRY_MSR_LOAD, 814 vtophys(&msr_load_list[0])); 815 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 816 nitems(msr_load_list)); 817 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 818 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 819 } 820 821 /* exception bitmap */ 822 if (vcpu_trace_exceptions(vm, i)) 823 exc_bitmap = 0xffffffff; 824 else 825 exc_bitmap = 1 << IDT_MC; 826 vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap); 827 828 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 829 vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 830 831 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 832 vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa); 833 } 834 835 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 836 vmcs_write(VMCS_APIC_ACCESS, apic_access_pa); 837 vmcs_write(VMCS_EOI_EXIT0, 0); 838 vmcs_write(VMCS_EOI_EXIT1, 0); 839 vmcs_write(VMCS_EOI_EXIT2, 0); 840 vmcs_write(VMCS_EOI_EXIT3, 0); 841 } 842 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 843 vmcs_write(VMCS_PIR_VECTOR, pirvec); 844 vmcs_write(VMCS_PIR_DESC, pir_desc_pa); 845 } 846 847 /* 848 * Set up the CR0/4 masks and configure the read shadow state 849 * to the power-on register value from the Intel Sys Arch. 850 * CR0 - 0x60000010 851 * CR4 - 0 852 */ 853 vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask); 854 vmcs_write(VMCS_CR0_SHADOW, 0x60000010); 855 vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask); 856 vmcs_write(VMCS_CR4_SHADOW, 0); 857 858 vmcs_clear(vmx->vmcs_pa[i]); 859 860 vmx->cap[i].set = 0; 861 vmx->cap[i].proc_ctls = proc_ctls; 862 vmx->cap[i].proc_ctls2 = proc2_ctls; 863 vmx->cap[i].exc_bitmap = exc_bitmap; 864 865 vmx->state[i].nextrip = ~0; 866 vmx->state[i].lastcpu = NOCPU; 867 vmx->state[i].vpid = vpid[i]; 868 } 869 870 return (vmx); 871 } 872 873 static int 874 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 875 { 876 int handled; 877 878 handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, 879 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, 880 (uint64_t *)&vmxctx->guest_rdx); 881 return (handled); 882 } 883 884 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 885 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 886 887 #define INVVPID_TYPE_ADDRESS 0UL 888 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL 889 #define INVVPID_TYPE_ALL_CONTEXTS 2UL 890 891 struct invvpid_desc { 892 uint16_t vpid; 893 uint16_t _res1; 894 uint32_t _res2; 895 uint64_t linear_addr; 896 }; 897 CTASSERT(sizeof (struct invvpid_desc) == 16); 898 899 static __inline void 900 invvpid(uint64_t type, struct invvpid_desc desc) 901 { 902 int error; 903 904 DTRACE_PROBE3(vmx__invvpid, uint64_t, type, uint16_t, desc.vpid, 905 uint64_t, desc.linear_addr); 906 907 __asm __volatile("invvpid %[desc], %[type];" 908 VMX_SET_ERROR_CODE_ASM 909 : [error] "=r" (error) 910 : [desc] "m" (desc), [type] "r" (type) 911 : "memory"); 912 913 if (error) { 914 panic("invvpid error %d", error); 915 } 916 } 917 918 /* 919 * Invalidate guest mappings identified by its VPID from the TLB. 920 * 921 * This is effectively a flush of the guest TLB, removing only "combined 922 * mappings" (to use the VMX parlance). Actions which modify the EPT structures 923 * for the instance (such as unmapping GPAs) would require an 'invept' flush. 924 */ 925 static void 926 vmx_invvpid(struct vmx *vmx, int vcpu, int running) 927 { 928 struct vmxstate *vmxstate; 929 struct vmspace *vms; 930 931 vmxstate = &vmx->state[vcpu]; 932 if (vmxstate->vpid == 0) { 933 return; 934 } 935 936 if (!running) { 937 /* 938 * Set the 'lastcpu' to an invalid host cpu. 939 * 940 * This will invalidate TLB entries tagged with the vcpu's 941 * vpid the next time it runs via vmx_set_pcpu_defaults(). 942 */ 943 vmxstate->lastcpu = NOCPU; 944 return; 945 } 946 947 /* 948 * Invalidate all mappings tagged with 'vpid' 949 * 950 * This is done when a vCPU moves between host CPUs, where there may be 951 * stale TLB entries for this VPID on the target, or if emulated actions 952 * in the guest CPU have incurred an explicit TLB flush. 953 */ 954 vms = vm_get_vmspace(vmx->vm); 955 if (vmspace_table_gen(vms) == vmx->eptgen[curcpu]) { 956 struct invvpid_desc invvpid_desc = { 957 .vpid = vmxstate->vpid, 958 .linear_addr = 0, 959 ._res1 = 0, 960 ._res2 = 0, 961 }; 962 963 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 964 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 965 } else { 966 /* 967 * The INVVPID can be skipped if an INVEPT is going to be 968 * performed before entering the guest. The INVEPT will 969 * invalidate combined mappings for the EP4TA associated with 970 * this guest, in all VPIDs. 971 */ 972 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 973 } 974 } 975 976 static __inline void 977 invept(uint64_t type, uint64_t eptp) 978 { 979 int error; 980 struct invept_desc { 981 uint64_t eptp; 982 uint64_t _resv; 983 } desc = { eptp, 0 }; 984 985 DTRACE_PROBE2(vmx__invept, uint64_t, type, uint64_t, eptp); 986 987 __asm __volatile("invept %[desc], %[type];" 988 VMX_SET_ERROR_CODE_ASM 989 : [error] "=r" (error) 990 : [desc] "m" (desc), [type] "r" (type) 991 : "memory"); 992 993 if (error != 0) { 994 panic("invvpid error %d", error); 995 } 996 } 997 998 static void 999 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu) 1000 { 1001 struct vmxstate *vmxstate; 1002 1003 /* 1004 * Regardless of whether the VM appears to have migrated between CPUs, 1005 * save the host sysenter stack pointer. As it points to the kernel 1006 * stack of each thread, the correct value must be maintained for every 1007 * trip into the critical section. 1008 */ 1009 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR)); 1010 1011 /* 1012 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or 1013 * migration between host CPUs with differing TSC values. 1014 */ 1015 vmx_apply_tsc_adjust(vmx, vcpu); 1016 1017 vmxstate = &vmx->state[vcpu]; 1018 if (vmxstate->lastcpu == curcpu) 1019 return; 1020 1021 vmxstate->lastcpu = curcpu; 1022 1023 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1024 1025 /* Load the per-CPU IDT address */ 1026 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase()); 1027 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1028 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1029 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1030 vmx_invvpid(vmx, vcpu, 1); 1031 } 1032 1033 /* 1034 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1035 */ 1036 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1037 1038 static __inline void 1039 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1040 { 1041 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1042 /* Enable interrupt window exiting */ 1043 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1044 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1045 } 1046 } 1047 1048 static __inline void 1049 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1050 { 1051 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1052 ("intr_window_exiting not set: %x", vmx->cap[vcpu].proc_ctls)); 1053 1054 /* Disable interrupt window exiting */ 1055 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1056 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1057 } 1058 1059 static __inline bool 1060 vmx_nmi_window_exiting(struct vmx *vmx, int vcpu) 1061 { 1062 return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0); 1063 } 1064 1065 static __inline void 1066 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1067 { 1068 if (!vmx_nmi_window_exiting(vmx, vcpu)) { 1069 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1070 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1071 } 1072 } 1073 1074 static __inline void 1075 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1076 { 1077 ASSERT(vmx_nmi_window_exiting(vmx, vcpu)); 1078 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1079 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1080 } 1081 1082 /* 1083 * Set the TSC adjustment, taking into account the offsets measured between 1084 * host physical CPUs. This is required even if the guest has not set a TSC 1085 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has 1086 * migrated onto. Without this mitigation, un-synched host TSCs will convey 1087 * the appearance of TSC time-travel to the guest as its vCPUs migrate. 1088 */ 1089 static void 1090 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu) 1091 { 1092 const uint64_t offset = vcpu_tsc_offset(vmx->vm, vcpu, true); 1093 1094 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET); 1095 1096 if (vmx->tsc_offset_active[vcpu] != offset) { 1097 vmcs_write(VMCS_TSC_OFFSET, offset); 1098 vmx->tsc_offset_active[vcpu] = offset; 1099 } 1100 } 1101 1102 CTASSERT(VMCS_INTR_T_HWINTR == VM_INTINFO_HWINTR); 1103 CTASSERT(VMCS_INTR_T_NMI == VM_INTINFO_NMI); 1104 CTASSERT(VMCS_INTR_T_HWEXCEPTION == VM_INTINFO_HWEXCP); 1105 CTASSERT(VMCS_INTR_T_SWINTR == VM_INTINFO_SWINTR); 1106 CTASSERT(VMCS_INTR_T_PRIV_SWEXCEPTION == VM_INTINFO_RESV5); 1107 CTASSERT(VMCS_INTR_T_SWEXCEPTION == VM_INTINFO_RESV6); 1108 CTASSERT(VMCS_IDT_VEC_ERRCODE_VALID == VM_INTINFO_DEL_ERRCODE); 1109 CTASSERT(VMCS_INTR_T_MASK == VM_INTINFO_MASK_TYPE); 1110 1111 static uint64_t 1112 vmx_idtvec_to_intinfo(uint32_t info) 1113 { 1114 ASSERT(info & VMCS_IDT_VEC_VALID); 1115 1116 const uint32_t type = info & VMCS_INTR_T_MASK; 1117 const uint8_t vec = info & 0xff; 1118 1119 switch (type) { 1120 case VMCS_INTR_T_HWINTR: 1121 case VMCS_INTR_T_NMI: 1122 case VMCS_INTR_T_HWEXCEPTION: 1123 case VMCS_INTR_T_SWINTR: 1124 case VMCS_INTR_T_PRIV_SWEXCEPTION: 1125 case VMCS_INTR_T_SWEXCEPTION: 1126 break; 1127 default: 1128 panic("unexpected event type 0x%03x", type); 1129 } 1130 1131 uint64_t intinfo = VM_INTINFO_VALID | type | vec; 1132 if (info & VMCS_IDT_VEC_ERRCODE_VALID) { 1133 const uint32_t errcode = vmcs_read(VMCS_IDT_VECTORING_ERROR); 1134 intinfo |= (uint64_t)errcode << 32; 1135 } 1136 1137 return (intinfo); 1138 } 1139 1140 static void 1141 vmx_inject_intinfo(uint64_t info) 1142 { 1143 ASSERT(VM_INTINFO_PENDING(info)); 1144 ASSERT0(info & VM_INTINFO_MASK_RSVD); 1145 1146 /* 1147 * The bhyve format matches that of the VMCS, which is ensured by the 1148 * CTASSERTs above. 1149 */ 1150 uint32_t inject = info; 1151 switch (VM_INTINFO_VECTOR(info)) { 1152 case IDT_BP: 1153 case IDT_OF: 1154 /* 1155 * VT-x requires #BP and #OF to be injected as software 1156 * exceptions. 1157 */ 1158 inject &= ~VMCS_INTR_T_MASK; 1159 inject |= VMCS_INTR_T_SWEXCEPTION; 1160 break; 1161 default: 1162 break; 1163 } 1164 1165 if (VM_INTINFO_HAS_ERRCODE(info)) { 1166 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 1167 VM_INTINFO_ERRCODE(info)); 1168 } 1169 vmcs_write(VMCS_ENTRY_INTR_INFO, inject); 1170 } 1171 1172 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1173 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1174 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1175 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1176 1177 static void 1178 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1179 { 1180 ASSERT0(vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & NMI_BLOCKING); 1181 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1182 1183 /* 1184 * Inject the virtual NMI. The vector must be the NMI IDT entry 1185 * or the VMCS entry check will fail. 1186 */ 1187 vmcs_write(VMCS_ENTRY_INTR_INFO, 1188 IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID); 1189 1190 /* Clear the request */ 1191 vm_nmi_clear(vmx->vm, vcpu); 1192 } 1193 1194 /* 1195 * Inject exceptions, NMIs, and ExtINTs. 1196 * 1197 * The logic behind these are complicated and may involve mutex contention, so 1198 * the injection is performed without the protection of host CPU interrupts 1199 * being disabled. This means a racing notification could be "lost", 1200 * necessitating a later call to vmx_inject_recheck() to close that window 1201 * of opportunity. 1202 */ 1203 static enum event_inject_state 1204 vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip) 1205 { 1206 uint64_t entryinfo; 1207 uint32_t gi, info; 1208 int vector; 1209 enum event_inject_state state; 1210 1211 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1212 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1213 state = EIS_CAN_INJECT; 1214 1215 /* Clear any interrupt blocking if the guest %rip has changed */ 1216 if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) { 1217 gi &= ~HWINTR_BLOCKING; 1218 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1219 } 1220 1221 /* 1222 * It could be that an interrupt is already pending for injection from 1223 * the VMCS. This would be the case if the vCPU exited for conditions 1224 * such as an AST before a vm-entry delivered the injection. 1225 */ 1226 if ((info & VMCS_INTR_VALID) != 0) { 1227 return (EIS_EV_EXISTING | EIS_REQ_EXIT); 1228 } 1229 1230 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1231 vmx_inject_intinfo(entryinfo); 1232 state = EIS_EV_INJECTED; 1233 } 1234 1235 if (vm_nmi_pending(vmx->vm, vcpu)) { 1236 /* 1237 * If there are no conditions blocking NMI injection then inject 1238 * it directly here otherwise enable "NMI window exiting" to 1239 * inject it as soon as we can. 1240 * 1241 * According to the Intel manual, some CPUs do not allow NMI 1242 * injection when STI_BLOCKING is active. That check is 1243 * enforced here, regardless of CPU capability. If running on a 1244 * CPU without such a restriction it will immediately exit and 1245 * the NMI will be injected in the "NMI window exiting" handler. 1246 */ 1247 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1248 if (state == EIS_CAN_INJECT) { 1249 vmx_inject_nmi(vmx, vcpu); 1250 state = EIS_EV_INJECTED; 1251 } else { 1252 return (state | EIS_REQ_EXIT); 1253 } 1254 } else { 1255 vmx_set_nmi_window_exiting(vmx, vcpu); 1256 } 1257 } 1258 1259 if (vm_extint_pending(vmx->vm, vcpu)) { 1260 if (state != EIS_CAN_INJECT) { 1261 return (state | EIS_REQ_EXIT); 1262 } 1263 if ((gi & HWINTR_BLOCKING) != 0 || 1264 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1265 return (EIS_GI_BLOCK); 1266 } 1267 1268 /* Ask the legacy pic for a vector to inject */ 1269 vatpic_pending_intr(vmx->vm, &vector); 1270 1271 /* 1272 * From the Intel SDM, Volume 3, Section "Maskable 1273 * Hardware Interrupts": 1274 * - maskable interrupt vectors [0,255] can be delivered 1275 * through the INTR pin. 1276 */ 1277 KASSERT(vector >= 0 && vector <= 255, 1278 ("invalid vector %d from INTR", vector)); 1279 1280 /* Inject the interrupt */ 1281 vmcs_write(VMCS_ENTRY_INTR_INFO, 1282 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1283 1284 vm_extint_clear(vmx->vm, vcpu); 1285 vatpic_intr_accepted(vmx->vm, vector); 1286 state = EIS_EV_INJECTED; 1287 } 1288 1289 return (state); 1290 } 1291 1292 /* 1293 * Inject any interrupts pending on the vLAPIC. 1294 * 1295 * This is done with host CPU interrupts disabled so notification IPIs, either 1296 * from the standard vCPU notification or APICv posted interrupts, will be 1297 * queued on the host APIC and recognized when entering VMX context. 1298 */ 1299 static enum event_inject_state 1300 vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1301 { 1302 int vector; 1303 1304 if (!vlapic_pending_intr(vlapic, &vector)) { 1305 return (EIS_CAN_INJECT); 1306 } 1307 1308 /* 1309 * From the Intel SDM, Volume 3, Section "Maskable 1310 * Hardware Interrupts": 1311 * - maskable interrupt vectors [16,255] can be delivered 1312 * through the local APIC. 1313 */ 1314 KASSERT(vector >= 16 && vector <= 255, 1315 ("invalid vector %d from local APIC", vector)); 1316 1317 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 1318 uint16_t status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 1319 uint16_t status_new = (status_old & 0xff00) | vector; 1320 1321 /* 1322 * The APICv state will have been synced into the vLAPIC 1323 * as part of vlapic_pending_intr(). Prepare the VMCS 1324 * for the to-be-injected pending interrupt. 1325 */ 1326 if (status_new > status_old) { 1327 vmcs_write(VMCS_GUEST_INTR_STATUS, status_new); 1328 } 1329 1330 /* 1331 * Ensure VMCS state regarding EOI traps is kept in sync 1332 * with the TMRs in the vlapic. 1333 */ 1334 vmx_apicv_sync_tmr(vlapic); 1335 1336 /* 1337 * The rest of the injection process for injecting the 1338 * interrupt(s) is handled by APICv. It does not preclude other 1339 * event injection from occurring. 1340 */ 1341 return (EIS_CAN_INJECT); 1342 } 1343 1344 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1345 1346 /* Does guest interruptability block injection? */ 1347 if ((vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & HWINTR_BLOCKING) != 0 || 1348 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1349 return (EIS_GI_BLOCK); 1350 } 1351 1352 /* Inject the interrupt */ 1353 vmcs_write(VMCS_ENTRY_INTR_INFO, 1354 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1355 1356 /* Update the Local APIC ISR */ 1357 vlapic_intr_accepted(vlapic, vector); 1358 1359 return (EIS_EV_INJECTED); 1360 } 1361 1362 /* 1363 * Re-check for events to be injected. 1364 * 1365 * Once host CPU interrupts are disabled, check for the presence of any events 1366 * which require injection processing. If an exit is required upon injection, 1367 * or once the guest becomes interruptable, that will be configured too. 1368 */ 1369 static bool 1370 vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state) 1371 { 1372 if (state == EIS_CAN_INJECT) { 1373 if (vm_nmi_pending(vmx->vm, vcpu) && 1374 !vmx_nmi_window_exiting(vmx, vcpu)) { 1375 /* queued NMI not blocked by NMI-window-exiting */ 1376 return (true); 1377 } 1378 if (vm_extint_pending(vmx->vm, vcpu)) { 1379 /* queued ExtINT not blocked by existing injection */ 1380 return (true); 1381 } 1382 } else { 1383 if ((state & EIS_REQ_EXIT) != 0) { 1384 /* 1385 * Use a self-IPI to force an immediate exit after 1386 * event injection has occurred. 1387 */ 1388 poke_cpu(CPU->cpu_id); 1389 } else { 1390 /* 1391 * If any event is being injected, an exit immediately 1392 * upon becoming interruptable again will allow pending 1393 * or newly queued events to be injected in a timely 1394 * manner. 1395 */ 1396 vmx_set_int_window_exiting(vmx, vcpu); 1397 } 1398 } 1399 return (false); 1400 } 1401 1402 /* 1403 * If the Virtual NMIs execution control is '1' then the logical processor 1404 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1405 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1406 * virtual-NMI blocking. 1407 * 1408 * This unblocking occurs even if the IRET causes a fault. In this case the 1409 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1410 */ 1411 static void 1412 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1413 { 1414 uint32_t gi; 1415 1416 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1417 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1418 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1419 } 1420 1421 static void 1422 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1423 { 1424 uint32_t gi; 1425 1426 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1427 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1428 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1429 } 1430 1431 static void 1432 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1433 { 1434 uint32_t gi; 1435 1436 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1437 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1438 ("NMI blocking is not in effect %x", gi)); 1439 } 1440 1441 static int 1442 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1443 { 1444 struct vmxctx *vmxctx; 1445 uint64_t xcrval; 1446 const struct xsave_limits *limits; 1447 1448 vmxctx = &vmx->ctx[vcpu]; 1449 limits = vmm_get_xsave_limits(); 1450 1451 /* 1452 * Note that the processor raises a GP# fault on its own if 1453 * xsetbv is executed for CPL != 0, so we do not have to 1454 * emulate that fault here. 1455 */ 1456 1457 /* Only xcr0 is supported. */ 1458 if (vmxctx->guest_rcx != 0) { 1459 vm_inject_gp(vmx->vm, vcpu); 1460 return (HANDLED); 1461 } 1462 1463 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1464 if (!limits->xsave_enabled || 1465 !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1466 vm_inject_ud(vmx->vm, vcpu); 1467 return (HANDLED); 1468 } 1469 1470 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1471 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1472 vm_inject_gp(vmx->vm, vcpu); 1473 return (HANDLED); 1474 } 1475 1476 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1477 vm_inject_gp(vmx->vm, vcpu); 1478 return (HANDLED); 1479 } 1480 1481 /* AVX (YMM_Hi128) requires SSE. */ 1482 if (xcrval & XFEATURE_ENABLED_AVX && 1483 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1484 vm_inject_gp(vmx->vm, vcpu); 1485 return (HANDLED); 1486 } 1487 1488 /* 1489 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1490 * ZMM_Hi256, and Hi16_ZMM. 1491 */ 1492 if (xcrval & XFEATURE_AVX512 && 1493 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1494 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1495 vm_inject_gp(vmx->vm, vcpu); 1496 return (HANDLED); 1497 } 1498 1499 /* 1500 * Intel MPX requires both bound register state flags to be 1501 * set. 1502 */ 1503 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1504 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1505 vm_inject_gp(vmx->vm, vcpu); 1506 return (HANDLED); 1507 } 1508 1509 /* 1510 * This runs "inside" vmrun() with the guest's FPU state, so 1511 * modifying xcr0 directly modifies the guest's xcr0, not the 1512 * host's. 1513 */ 1514 load_xcr(0, xcrval); 1515 return (HANDLED); 1516 } 1517 1518 static uint64_t 1519 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1520 { 1521 const struct vmxctx *vmxctx; 1522 1523 vmxctx = &vmx->ctx[vcpu]; 1524 1525 switch (ident) { 1526 case 0: 1527 return (vmxctx->guest_rax); 1528 case 1: 1529 return (vmxctx->guest_rcx); 1530 case 2: 1531 return (vmxctx->guest_rdx); 1532 case 3: 1533 return (vmxctx->guest_rbx); 1534 case 4: 1535 return (vmcs_read(VMCS_GUEST_RSP)); 1536 case 5: 1537 return (vmxctx->guest_rbp); 1538 case 6: 1539 return (vmxctx->guest_rsi); 1540 case 7: 1541 return (vmxctx->guest_rdi); 1542 case 8: 1543 return (vmxctx->guest_r8); 1544 case 9: 1545 return (vmxctx->guest_r9); 1546 case 10: 1547 return (vmxctx->guest_r10); 1548 case 11: 1549 return (vmxctx->guest_r11); 1550 case 12: 1551 return (vmxctx->guest_r12); 1552 case 13: 1553 return (vmxctx->guest_r13); 1554 case 14: 1555 return (vmxctx->guest_r14); 1556 case 15: 1557 return (vmxctx->guest_r15); 1558 default: 1559 panic("invalid vmx register %d", ident); 1560 } 1561 } 1562 1563 static void 1564 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1565 { 1566 struct vmxctx *vmxctx; 1567 1568 vmxctx = &vmx->ctx[vcpu]; 1569 1570 switch (ident) { 1571 case 0: 1572 vmxctx->guest_rax = regval; 1573 break; 1574 case 1: 1575 vmxctx->guest_rcx = regval; 1576 break; 1577 case 2: 1578 vmxctx->guest_rdx = regval; 1579 break; 1580 case 3: 1581 vmxctx->guest_rbx = regval; 1582 break; 1583 case 4: 1584 vmcs_write(VMCS_GUEST_RSP, regval); 1585 break; 1586 case 5: 1587 vmxctx->guest_rbp = regval; 1588 break; 1589 case 6: 1590 vmxctx->guest_rsi = regval; 1591 break; 1592 case 7: 1593 vmxctx->guest_rdi = regval; 1594 break; 1595 case 8: 1596 vmxctx->guest_r8 = regval; 1597 break; 1598 case 9: 1599 vmxctx->guest_r9 = regval; 1600 break; 1601 case 10: 1602 vmxctx->guest_r10 = regval; 1603 break; 1604 case 11: 1605 vmxctx->guest_r11 = regval; 1606 break; 1607 case 12: 1608 vmxctx->guest_r12 = regval; 1609 break; 1610 case 13: 1611 vmxctx->guest_r13 = regval; 1612 break; 1613 case 14: 1614 vmxctx->guest_r14 = regval; 1615 break; 1616 case 15: 1617 vmxctx->guest_r15 = regval; 1618 break; 1619 default: 1620 panic("invalid vmx register %d", ident); 1621 } 1622 } 1623 1624 static void 1625 vmx_sync_efer_state(struct vmx *vmx, int vcpu, uint64_t efer) 1626 { 1627 uint64_t ctrl; 1628 1629 /* 1630 * If the "load EFER" VM-entry control is 1 (which we require) then the 1631 * value of EFER.LMA must be identical to "IA-32e mode guest" bit in the 1632 * VM-entry control. 1633 */ 1634 ctrl = vmcs_read(VMCS_ENTRY_CTLS); 1635 if ((efer & EFER_LMA) != 0) { 1636 ctrl |= VM_ENTRY_GUEST_LMA; 1637 } else { 1638 ctrl &= ~VM_ENTRY_GUEST_LMA; 1639 } 1640 vmcs_write(VMCS_ENTRY_CTLS, ctrl); 1641 } 1642 1643 static int 1644 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1645 { 1646 uint64_t crval, regval; 1647 1648 /* We only handle mov to %cr0 at this time */ 1649 if ((exitqual & 0xf0) != 0x00) 1650 return (UNHANDLED); 1651 1652 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1653 1654 vmcs_write(VMCS_CR0_SHADOW, regval); 1655 1656 crval = regval | cr0_ones_mask; 1657 crval &= ~cr0_zeros_mask; 1658 1659 const uint64_t old = vmcs_read(VMCS_GUEST_CR0); 1660 const uint64_t diff = crval ^ old; 1661 /* Flush the TLB if the paging or write-protect bits are changing */ 1662 if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) { 1663 vmx_invvpid(vmx, vcpu, 1); 1664 } 1665 1666 vmcs_write(VMCS_GUEST_CR0, crval); 1667 1668 if (regval & CR0_PG) { 1669 uint64_t efer; 1670 1671 /* Keep EFER.LMA properly updated if paging is enabled */ 1672 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1673 if (efer & EFER_LME) { 1674 efer |= EFER_LMA; 1675 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1676 vmx_sync_efer_state(vmx, vcpu, efer); 1677 } 1678 } 1679 1680 return (HANDLED); 1681 } 1682 1683 static int 1684 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1685 { 1686 uint64_t crval, regval; 1687 1688 /* We only handle mov to %cr4 at this time */ 1689 if ((exitqual & 0xf0) != 0x00) 1690 return (UNHANDLED); 1691 1692 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1693 1694 vmcs_write(VMCS_CR4_SHADOW, regval); 1695 1696 crval = regval | cr4_ones_mask; 1697 crval &= ~cr4_zeros_mask; 1698 vmcs_write(VMCS_GUEST_CR4, crval); 1699 1700 return (HANDLED); 1701 } 1702 1703 static int 1704 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1705 { 1706 struct vlapic *vlapic; 1707 uint64_t cr8; 1708 int regnum; 1709 1710 /* We only handle mov %cr8 to/from a register at this time. */ 1711 if ((exitqual & 0xe0) != 0x00) { 1712 return (UNHANDLED); 1713 } 1714 1715 vlapic = vm_lapic(vmx->vm, vcpu); 1716 regnum = (exitqual >> 8) & 0xf; 1717 if (exitqual & 0x10) { 1718 cr8 = vlapic_get_cr8(vlapic); 1719 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1720 } else { 1721 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1722 vlapic_set_cr8(vlapic, cr8); 1723 } 1724 1725 return (HANDLED); 1726 } 1727 1728 /* 1729 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1730 */ 1731 static int 1732 vmx_cpl(void) 1733 { 1734 uint32_t ssar; 1735 1736 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1737 return ((ssar >> 5) & 0x3); 1738 } 1739 1740 static enum vm_cpu_mode 1741 vmx_cpu_mode(void) 1742 { 1743 uint32_t csar; 1744 1745 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1746 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1747 if (csar & 0x2000) 1748 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1749 else 1750 return (CPU_MODE_COMPATIBILITY); 1751 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1752 return (CPU_MODE_PROTECTED); 1753 } else { 1754 return (CPU_MODE_REAL); 1755 } 1756 } 1757 1758 static enum vm_paging_mode 1759 vmx_paging_mode(void) 1760 { 1761 1762 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1763 return (PAGING_MODE_FLAT); 1764 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1765 return (PAGING_MODE_32); 1766 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1767 return (PAGING_MODE_64); 1768 else 1769 return (PAGING_MODE_PAE); 1770 } 1771 1772 static void 1773 vmx_paging_info(struct vm_guest_paging *paging) 1774 { 1775 paging->cr3 = vmcs_read(VMCS_GUEST_CR3); 1776 paging->cpl = vmx_cpl(); 1777 paging->cpu_mode = vmx_cpu_mode(); 1778 paging->paging_mode = vmx_paging_mode(); 1779 } 1780 1781 static void 1782 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa, 1783 uint64_t gla) 1784 { 1785 struct vm_guest_paging paging; 1786 uint32_t csar; 1787 1788 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 1789 vmexit->inst_length = 0; 1790 vmexit->u.mmio_emul.gpa = gpa; 1791 vmexit->u.mmio_emul.gla = gla; 1792 vmx_paging_info(&paging); 1793 1794 switch (paging.cpu_mode) { 1795 case CPU_MODE_REAL: 1796 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1797 vmexit->u.mmio_emul.cs_d = 0; 1798 break; 1799 case CPU_MODE_PROTECTED: 1800 case CPU_MODE_COMPATIBILITY: 1801 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1802 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1803 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar); 1804 break; 1805 default: 1806 vmexit->u.mmio_emul.cs_base = 0; 1807 vmexit->u.mmio_emul.cs_d = 0; 1808 break; 1809 } 1810 1811 vie_init_mmio(vie, NULL, 0, &paging, gpa); 1812 } 1813 1814 static void 1815 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual, 1816 uint32_t eax) 1817 { 1818 struct vm_guest_paging paging; 1819 struct vm_inout *inout; 1820 1821 inout = &vmexit->u.inout; 1822 1823 inout->bytes = (qual & 0x7) + 1; 1824 inout->flags = 0; 1825 inout->flags |= (qual & 0x8) ? INOUT_IN : 0; 1826 inout->flags |= (qual & 0x10) ? INOUT_STR : 0; 1827 inout->flags |= (qual & 0x20) ? INOUT_REP : 0; 1828 inout->port = (uint16_t)(qual >> 16); 1829 inout->eax = eax; 1830 if (inout->flags & INOUT_STR) { 1831 uint64_t inst_info; 1832 1833 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 1834 1835 /* 1836 * According to the SDM, bits 9:7 encode the address size of the 1837 * ins/outs operation, but only values 0/1/2 are expected, 1838 * corresponding to 16/32/64 bit sizes. 1839 */ 1840 inout->addrsize = 2 << BITX(inst_info, 9, 7); 1841 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 1842 inout->addrsize == 8); 1843 1844 if (inout->flags & INOUT_IN) { 1845 /* 1846 * The bits describing the segment in INSTRUCTION_INFO 1847 * are not defined for ins, leaving it to system 1848 * software to assume %es (encoded as 0) 1849 */ 1850 inout->segment = 0; 1851 } else { 1852 /* 1853 * Bits 15-17 encode the segment for OUTS. 1854 * This value follows the standard x86 segment order. 1855 */ 1856 inout->segment = (inst_info >> 15) & 0x7; 1857 } 1858 } 1859 1860 vmexit->exitcode = VM_EXITCODE_INOUT; 1861 vmx_paging_info(&paging); 1862 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 1863 1864 /* The in/out emulation will handle advancing %rip */ 1865 vmexit->inst_length = 0; 1866 } 1867 1868 static int 1869 ept_fault_type(uint64_t ept_qual) 1870 { 1871 int fault_type; 1872 1873 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1874 fault_type = PROT_WRITE; 1875 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1876 fault_type = PROT_EXEC; 1877 else 1878 fault_type = PROT_READ; 1879 1880 return (fault_type); 1881 } 1882 1883 static bool 1884 ept_emulation_fault(uint64_t ept_qual) 1885 { 1886 int read, write; 1887 1888 /* EPT fault on an instruction fetch doesn't make sense here */ 1889 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1890 return (false); 1891 1892 /* EPT fault must be a read fault or a write fault */ 1893 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1894 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1895 if ((read | write) == 0) 1896 return (false); 1897 1898 /* 1899 * The EPT violation must have been caused by accessing a 1900 * guest-physical address that is a translation of a guest-linear 1901 * address. 1902 */ 1903 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1904 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1905 return (false); 1906 } 1907 1908 return (true); 1909 } 1910 1911 static __inline int 1912 apic_access_virtualization(struct vmx *vmx, int vcpuid) 1913 { 1914 uint32_t proc_ctls2; 1915 1916 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1917 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1918 } 1919 1920 static __inline int 1921 x2apic_virtualization(struct vmx *vmx, int vcpuid) 1922 { 1923 uint32_t proc_ctls2; 1924 1925 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1926 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1927 } 1928 1929 static int 1930 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1931 uint64_t qual) 1932 { 1933 const uint_t offset = APIC_WRITE_OFFSET(qual); 1934 1935 if (!apic_access_virtualization(vmx, vcpuid)) { 1936 /* 1937 * In general there should not be any APIC write VM-exits 1938 * unless APIC-access virtualization is enabled. 1939 * 1940 * However self-IPI virtualization can legitimately trigger 1941 * an APIC-write VM-exit so treat it specially. 1942 */ 1943 if (x2apic_virtualization(vmx, vcpuid) && 1944 offset == APIC_OFFSET_SELF_IPI) { 1945 const uint32_t *apic_regs = 1946 (uint32_t *)(vlapic->apic_page); 1947 const uint32_t vector = 1948 apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1949 1950 vlapic_self_ipi_handler(vlapic, vector); 1951 return (HANDLED); 1952 } else 1953 return (UNHANDLED); 1954 } 1955 1956 switch (offset) { 1957 case APIC_OFFSET_ID: 1958 vlapic_id_write_handler(vlapic); 1959 break; 1960 case APIC_OFFSET_LDR: 1961 vlapic_ldr_write_handler(vlapic); 1962 break; 1963 case APIC_OFFSET_DFR: 1964 vlapic_dfr_write_handler(vlapic); 1965 break; 1966 case APIC_OFFSET_SVR: 1967 vlapic_svr_write_handler(vlapic); 1968 break; 1969 case APIC_OFFSET_ESR: 1970 vlapic_esr_write_handler(vlapic); 1971 break; 1972 case APIC_OFFSET_ICR_LOW: 1973 vlapic_icrlo_write_handler(vlapic); 1974 break; 1975 case APIC_OFFSET_CMCI_LVT: 1976 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1977 vlapic_lvt_write_handler(vlapic, offset); 1978 break; 1979 case APIC_OFFSET_TIMER_ICR: 1980 vlapic_icrtmr_write_handler(vlapic); 1981 break; 1982 case APIC_OFFSET_TIMER_DCR: 1983 vlapic_dcr_write_handler(vlapic); 1984 break; 1985 default: 1986 return (UNHANDLED); 1987 } 1988 return (HANDLED); 1989 } 1990 1991 static bool 1992 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 1993 { 1994 1995 if (apic_access_virtualization(vmx, vcpuid) && 1996 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1997 return (true); 1998 else 1999 return (false); 2000 } 2001 2002 static int 2003 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2004 { 2005 uint64_t qual; 2006 int access_type, offset, allowed; 2007 struct vie *vie; 2008 2009 if (!apic_access_virtualization(vmx, vcpuid)) 2010 return (UNHANDLED); 2011 2012 qual = vmexit->u.vmx.exit_qualification; 2013 access_type = APIC_ACCESS_TYPE(qual); 2014 offset = APIC_ACCESS_OFFSET(qual); 2015 2016 allowed = 0; 2017 if (access_type == 0) { 2018 /* 2019 * Read data access to the following registers is expected. 2020 */ 2021 switch (offset) { 2022 case APIC_OFFSET_APR: 2023 case APIC_OFFSET_PPR: 2024 case APIC_OFFSET_RRR: 2025 case APIC_OFFSET_CMCI_LVT: 2026 case APIC_OFFSET_TIMER_CCR: 2027 allowed = 1; 2028 break; 2029 default: 2030 break; 2031 } 2032 } else if (access_type == 1) { 2033 /* 2034 * Write data access to the following registers is expected. 2035 */ 2036 switch (offset) { 2037 case APIC_OFFSET_VER: 2038 case APIC_OFFSET_APR: 2039 case APIC_OFFSET_PPR: 2040 case APIC_OFFSET_RRR: 2041 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2042 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2043 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2044 case APIC_OFFSET_CMCI_LVT: 2045 case APIC_OFFSET_TIMER_CCR: 2046 allowed = 1; 2047 break; 2048 default: 2049 break; 2050 } 2051 } 2052 2053 if (allowed) { 2054 vie = vm_vie_ctx(vmx->vm, vcpuid); 2055 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset, 2056 VIE_INVALID_GLA); 2057 } 2058 2059 /* 2060 * Regardless of whether the APIC-access is allowed this handler 2061 * always returns UNHANDLED: 2062 * - if the access is allowed then it is handled by emulating the 2063 * instruction that caused the VM-exit (outside the critical section) 2064 * - if the access is not allowed then it will be converted to an 2065 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2066 */ 2067 return (UNHANDLED); 2068 } 2069 2070 static enum task_switch_reason 2071 vmx_task_switch_reason(uint64_t qual) 2072 { 2073 int reason; 2074 2075 reason = (qual >> 30) & 0x3; 2076 switch (reason) { 2077 case 0: 2078 return (TSR_CALL); 2079 case 1: 2080 return (TSR_IRET); 2081 case 2: 2082 return (TSR_JMP); 2083 case 3: 2084 return (TSR_IDT_GATE); 2085 default: 2086 panic("%s: invalid reason %d", __func__, reason); 2087 } 2088 } 2089 2090 static int 2091 vmx_handle_msr(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit, 2092 bool is_wrmsr) 2093 { 2094 struct vmxctx *vmxctx = &vmx->ctx[vcpuid]; 2095 const uint32_t ecx = vmxctx->guest_rcx; 2096 vm_msr_result_t res; 2097 uint64_t val = 0; 2098 2099 if (is_wrmsr) { 2100 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_WRMSR, 1); 2101 val = vmxctx->guest_rdx << 32 | (uint32_t)vmxctx->guest_rax; 2102 2103 if (vlapic_owned_msr(ecx)) { 2104 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2105 2106 res = vlapic_wrmsr(vlapic, ecx, val); 2107 } else { 2108 res = vmx_wrmsr(vmx, vcpuid, ecx, val); 2109 } 2110 } else { 2111 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1); 2112 2113 if (vlapic_owned_msr(ecx)) { 2114 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2115 2116 res = vlapic_rdmsr(vlapic, ecx, &val); 2117 } else { 2118 res = vmx_rdmsr(vmx, vcpuid, ecx, &val); 2119 } 2120 } 2121 2122 switch (res) { 2123 case VMR_OK: 2124 /* Store rdmsr result in the appropriate registers */ 2125 if (!is_wrmsr) { 2126 vmxctx->guest_rax = (uint32_t)val; 2127 vmxctx->guest_rdx = val >> 32; 2128 } 2129 return (HANDLED); 2130 case VMR_GP: 2131 vm_inject_gp(vmx->vm, vcpuid); 2132 return (HANDLED); 2133 case VMR_UNHANLDED: 2134 vmexit->exitcode = is_wrmsr ? 2135 VM_EXITCODE_WRMSR : VM_EXITCODE_RDMSR; 2136 vmexit->u.msr.code = ecx; 2137 vmexit->u.msr.wval = val; 2138 return (UNHANDLED); 2139 default: 2140 panic("unexpected msr result %u\n", res); 2141 } 2142 } 2143 2144 static int 2145 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2146 { 2147 int error, errcode, errcode_valid, handled; 2148 struct vmxctx *vmxctx; 2149 struct vie *vie; 2150 struct vlapic *vlapic; 2151 struct vm_task_switch *ts; 2152 uint32_t idtvec_info, intr_info; 2153 uint32_t intr_type, intr_vec, reason; 2154 uint64_t qual, gpa; 2155 2156 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2157 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2158 2159 handled = UNHANDLED; 2160 vmxctx = &vmx->ctx[vcpu]; 2161 2162 qual = vmexit->u.vmx.exit_qualification; 2163 reason = vmexit->u.vmx.exit_reason; 2164 vmexit->exitcode = VM_EXITCODE_BOGUS; 2165 2166 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2167 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2168 2169 /* 2170 * VM-entry failures during or after loading guest state. 2171 * 2172 * These VM-exits are uncommon but must be handled specially 2173 * as most VM-exit fields are not populated as usual. 2174 */ 2175 if (reason == EXIT_REASON_MCE_DURING_ENTRY) { 2176 vmm_call_trap(T_MCE); 2177 return (1); 2178 } 2179 2180 /* 2181 * VM exits that can be triggered during event delivery need to 2182 * be handled specially by re-injecting the event if the IDT 2183 * vectoring information field's valid bit is set. 2184 * 2185 * See "Information for VM Exits During Event Delivery" in Intel SDM 2186 * for details. 2187 */ 2188 idtvec_info = vmcs_read(VMCS_IDT_VECTORING_INFO); 2189 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2190 /* Record exit intinfo */ 2191 VERIFY0(vm_exit_intinfo(vmx->vm, vcpu, 2192 vmx_idtvec_to_intinfo(idtvec_info))); 2193 2194 /* 2195 * If 'virtual NMIs' are being used and the VM-exit 2196 * happened while injecting an NMI during the previous 2197 * VM-entry, then clear "blocking by NMI" in the 2198 * Guest Interruptibility-State so the NMI can be 2199 * reinjected on the subsequent VM-entry. 2200 * 2201 * However, if the NMI was being delivered through a task 2202 * gate, then the new task must start execution with NMIs 2203 * blocked so don't clear NMI blocking in this case. 2204 */ 2205 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2206 if (intr_type == VMCS_INTR_T_NMI) { 2207 if (reason != EXIT_REASON_TASK_SWITCH) 2208 vmx_clear_nmi_blocking(vmx, vcpu); 2209 else 2210 vmx_assert_nmi_blocking(vmx, vcpu); 2211 } 2212 2213 /* 2214 * Update VM-entry instruction length if the event being 2215 * delivered was a software interrupt or software exception. 2216 */ 2217 if (intr_type == VMCS_INTR_T_SWINTR || 2218 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2219 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2220 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2221 } 2222 } 2223 2224 switch (reason) { 2225 case EXIT_REASON_TRIPLE_FAULT: 2226 (void) vm_suspend(vmx->vm, VM_SUSPEND_TRIPLEFAULT); 2227 handled = HANDLED; 2228 break; 2229 case EXIT_REASON_TASK_SWITCH: 2230 ts = &vmexit->u.task_switch; 2231 ts->tsssel = qual & 0xffff; 2232 ts->reason = vmx_task_switch_reason(qual); 2233 ts->ext = 0; 2234 ts->errcode_valid = 0; 2235 vmx_paging_info(&ts->paging); 2236 /* 2237 * If the task switch was due to a CALL, JMP, IRET, software 2238 * interrupt (INT n) or software exception (INT3, INTO), 2239 * then the saved %rip references the instruction that caused 2240 * the task switch. The instruction length field in the VMCS 2241 * is valid in this case. 2242 * 2243 * In all other cases (e.g., NMI, hardware exception) the 2244 * saved %rip is one that would have been saved in the old TSS 2245 * had the task switch completed normally so the instruction 2246 * length field is not needed in this case and is explicitly 2247 * set to 0. 2248 */ 2249 if (ts->reason == TSR_IDT_GATE) { 2250 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2251 ("invalid idtvec_info %x for IDT task switch", 2252 idtvec_info)); 2253 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2254 if (intr_type != VMCS_INTR_T_SWINTR && 2255 intr_type != VMCS_INTR_T_SWEXCEPTION && 2256 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2257 /* Task switch triggered by external event */ 2258 ts->ext = 1; 2259 vmexit->inst_length = 0; 2260 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2261 ts->errcode_valid = 1; 2262 ts->errcode = 2263 vmcs_read(VMCS_IDT_VECTORING_ERROR); 2264 } 2265 } 2266 } 2267 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2268 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2269 break; 2270 case EXIT_REASON_CR_ACCESS: 2271 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2272 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2273 switch (qual & 0xf) { 2274 case 0: 2275 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2276 break; 2277 case 4: 2278 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2279 break; 2280 case 8: 2281 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2282 break; 2283 } 2284 break; 2285 case EXIT_REASON_RDMSR: 2286 case EXIT_REASON_WRMSR: 2287 handled = vmx_handle_msr(vmx, vcpu, vmexit, 2288 reason == EXIT_REASON_WRMSR); 2289 break; 2290 case EXIT_REASON_HLT: 2291 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2292 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2293 vmexit->exitcode = VM_EXITCODE_HLT; 2294 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2295 break; 2296 case EXIT_REASON_MTF: 2297 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2298 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2299 vmexit->exitcode = VM_EXITCODE_MTRAP; 2300 vmexit->inst_length = 0; 2301 break; 2302 case EXIT_REASON_PAUSE: 2303 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2304 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2305 vmexit->exitcode = VM_EXITCODE_PAUSE; 2306 break; 2307 case EXIT_REASON_INTR_WINDOW: 2308 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2309 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2310 vmx_clear_int_window_exiting(vmx, vcpu); 2311 return (1); 2312 case EXIT_REASON_EXT_INTR: 2313 /* 2314 * External interrupts serve only to cause VM exits and allow 2315 * the host interrupt handler to run. 2316 * 2317 * If this external interrupt triggers a virtual interrupt 2318 * to a VM, then that state will be recorded by the 2319 * host interrupt handler in the VM's softc. We will inject 2320 * this virtual interrupt during the subsequent VM enter. 2321 */ 2322 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2323 SDT_PROBE4(vmm, vmx, exit, interrupt, 2324 vmx, vcpu, vmexit, intr_info); 2325 2326 /* 2327 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2328 * This appears to be a bug in VMware Fusion? 2329 */ 2330 if (!(intr_info & VMCS_INTR_VALID)) 2331 return (1); 2332 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2333 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2334 ("VM exit interruption info invalid: %x", intr_info)); 2335 vmx_trigger_hostintr(intr_info & 0xff); 2336 2337 /* 2338 * This is special. We want to treat this as an 'handled' 2339 * VM-exit but not increment the instruction pointer. 2340 */ 2341 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2342 return (1); 2343 case EXIT_REASON_NMI_WINDOW: 2344 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2345 /* Exit to allow the pending virtual NMI to be injected */ 2346 if (vm_nmi_pending(vmx->vm, vcpu)) 2347 vmx_inject_nmi(vmx, vcpu); 2348 vmx_clear_nmi_window_exiting(vmx, vcpu); 2349 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2350 return (1); 2351 case EXIT_REASON_INOUT: 2352 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2353 vie = vm_vie_ctx(vmx->vm, vcpu); 2354 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax); 2355 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2356 break; 2357 case EXIT_REASON_CPUID: 2358 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2359 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2360 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2361 break; 2362 case EXIT_REASON_EXCEPTION: 2363 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2364 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2365 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2366 ("VM exit interruption info invalid: %x", intr_info)); 2367 2368 intr_vec = intr_info & 0xff; 2369 intr_type = intr_info & VMCS_INTR_T_MASK; 2370 2371 /* 2372 * If Virtual NMIs control is 1 and the VM-exit is due to a 2373 * fault encountered during the execution of IRET then we must 2374 * restore the state of "virtual-NMI blocking" before resuming 2375 * the guest. 2376 * 2377 * See "Resuming Guest Software after Handling an Exception". 2378 * See "Information for VM Exits Due to Vectored Events". 2379 */ 2380 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2381 (intr_vec != IDT_DF) && 2382 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2383 vmx_restore_nmi_blocking(vmx, vcpu); 2384 2385 /* 2386 * The NMI has already been handled in vmx_exit_handle_nmi(). 2387 */ 2388 if (intr_type == VMCS_INTR_T_NMI) 2389 return (1); 2390 2391 /* 2392 * Call the machine check handler by hand. Also don't reflect 2393 * the machine check back into the guest. 2394 */ 2395 if (intr_vec == IDT_MC) { 2396 vmm_call_trap(T_MCE); 2397 return (1); 2398 } 2399 2400 /* 2401 * If the hypervisor has requested user exits for 2402 * debug exceptions, bounce them out to userland. 2403 */ 2404 if (intr_type == VMCS_INTR_T_SWEXCEPTION && 2405 intr_vec == IDT_BP && 2406 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) { 2407 vmexit->exitcode = VM_EXITCODE_BPT; 2408 vmexit->u.bpt.inst_length = vmexit->inst_length; 2409 vmexit->inst_length = 0; 2410 break; 2411 } 2412 2413 if (intr_vec == IDT_PF) { 2414 vmxctx->guest_cr2 = qual; 2415 } 2416 2417 /* 2418 * Software exceptions exhibit trap-like behavior. This in 2419 * turn requires populating the VM-entry instruction length 2420 * so that the %rip in the trap frame is past the INT3/INTO 2421 * instruction. 2422 */ 2423 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2424 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2425 2426 /* Reflect all other exceptions back into the guest */ 2427 errcode_valid = errcode = 0; 2428 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2429 errcode_valid = 1; 2430 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2431 } 2432 SDT_PROBE5(vmm, vmx, exit, exception, 2433 vmx, vcpu, vmexit, intr_vec, errcode); 2434 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2435 errcode_valid, errcode, 0); 2436 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2437 __func__, error)); 2438 return (1); 2439 2440 case EXIT_REASON_EPT_FAULT: 2441 /* 2442 * If 'gpa' lies within the address space allocated to 2443 * memory then this must be a nested page fault otherwise 2444 * this must be an instruction that accesses MMIO space. 2445 */ 2446 gpa = vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS); 2447 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2448 apic_access_fault(vmx, vcpu, gpa)) { 2449 vmexit->exitcode = VM_EXITCODE_PAGING; 2450 vmexit->inst_length = 0; 2451 vmexit->u.paging.gpa = gpa; 2452 vmexit->u.paging.fault_type = ept_fault_type(qual); 2453 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2454 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2455 vmx, vcpu, vmexit, gpa, qual); 2456 } else if (ept_emulation_fault(qual)) { 2457 vie = vm_vie_ctx(vmx->vm, vcpu); 2458 vmexit_mmio_emul(vmexit, vie, gpa, 2459 vmcs_read(VMCS_GUEST_LINEAR_ADDRESS)); 2460 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 2461 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2462 vmx, vcpu, vmexit, gpa); 2463 } 2464 /* 2465 * If Virtual NMIs control is 1 and the VM-exit is due to an 2466 * EPT fault during the execution of IRET then we must restore 2467 * the state of "virtual-NMI blocking" before resuming. 2468 * 2469 * See description of "NMI unblocking due to IRET" in 2470 * "Exit Qualification for EPT Violations". 2471 */ 2472 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2473 (qual & EXIT_QUAL_NMIUDTI) != 0) 2474 vmx_restore_nmi_blocking(vmx, vcpu); 2475 break; 2476 case EXIT_REASON_VIRTUALIZED_EOI: 2477 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2478 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2479 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2480 vmexit->inst_length = 0; /* trap-like */ 2481 break; 2482 case EXIT_REASON_APIC_ACCESS: 2483 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2484 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2485 break; 2486 case EXIT_REASON_APIC_WRITE: 2487 /* 2488 * APIC-write VM exit is trap-like so the %rip is already 2489 * pointing to the next instruction. 2490 */ 2491 vmexit->inst_length = 0; 2492 vlapic = vm_lapic(vmx->vm, vcpu); 2493 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2494 vmx, vcpu, vmexit, vlapic); 2495 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2496 break; 2497 case EXIT_REASON_XSETBV: 2498 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2499 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2500 break; 2501 case EXIT_REASON_MONITOR: 2502 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2503 vmexit->exitcode = VM_EXITCODE_MONITOR; 2504 break; 2505 case EXIT_REASON_MWAIT: 2506 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2507 vmexit->exitcode = VM_EXITCODE_MWAIT; 2508 break; 2509 case EXIT_REASON_TPR: 2510 vlapic = vm_lapic(vmx->vm, vcpu); 2511 vlapic_sync_tpr(vlapic); 2512 vmexit->inst_length = 0; 2513 handled = HANDLED; 2514 break; 2515 case EXIT_REASON_VMCALL: 2516 case EXIT_REASON_VMCLEAR: 2517 case EXIT_REASON_VMLAUNCH: 2518 case EXIT_REASON_VMPTRLD: 2519 case EXIT_REASON_VMPTRST: 2520 case EXIT_REASON_VMREAD: 2521 case EXIT_REASON_VMRESUME: 2522 case EXIT_REASON_VMWRITE: 2523 case EXIT_REASON_VMXOFF: 2524 case EXIT_REASON_VMXON: 2525 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2526 vmexit->exitcode = VM_EXITCODE_VMINSN; 2527 break; 2528 default: 2529 SDT_PROBE4(vmm, vmx, exit, unknown, 2530 vmx, vcpu, vmexit, reason); 2531 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2532 break; 2533 } 2534 2535 if (handled) { 2536 /* 2537 * It is possible that control is returned to userland 2538 * even though we were able to handle the VM exit in the 2539 * kernel. 2540 * 2541 * In such a case we want to make sure that the userland 2542 * restarts guest execution at the instruction *after* 2543 * the one we just processed. Therefore we update the 2544 * guest rip in the VMCS and in 'vmexit'. 2545 */ 2546 vmexit->rip += vmexit->inst_length; 2547 vmexit->inst_length = 0; 2548 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2549 } else { 2550 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2551 /* 2552 * If this VM exit was not claimed by anybody then 2553 * treat it as a generic VMX exit. 2554 */ 2555 vmexit->exitcode = VM_EXITCODE_VMX; 2556 vmexit->u.vmx.status = VM_SUCCESS; 2557 vmexit->u.vmx.inst_type = 0; 2558 vmexit->u.vmx.inst_error = 0; 2559 } else { 2560 /* 2561 * The exitcode and collateral have been populated. 2562 * The VM exit will be processed further in userland. 2563 */ 2564 } 2565 } 2566 2567 SDT_PROBE4(vmm, vmx, exit, return, 2568 vmx, vcpu, vmexit, handled); 2569 return (handled); 2570 } 2571 2572 static void 2573 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2574 { 2575 2576 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2577 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2578 vmxctx->inst_fail_status)); 2579 2580 vmexit->inst_length = 0; 2581 vmexit->exitcode = VM_EXITCODE_VMX; 2582 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2583 vmexit->u.vmx.inst_error = vmcs_read(VMCS_INSTRUCTION_ERROR); 2584 vmexit->u.vmx.exit_reason = ~0; 2585 vmexit->u.vmx.exit_qualification = ~0; 2586 2587 switch (rc) { 2588 case VMX_VMRESUME_ERROR: 2589 case VMX_VMLAUNCH_ERROR: 2590 case VMX_INVEPT_ERROR: 2591 case VMX_VMWRITE_ERROR: 2592 vmexit->u.vmx.inst_type = rc; 2593 break; 2594 default: 2595 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2596 } 2597 } 2598 2599 /* 2600 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2601 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2602 * sufficient to simply vector to the NMI handler via a software interrupt. 2603 * However, this must be done before maskable interrupts are enabled 2604 * otherwise the "iret" issued by an interrupt handler will incorrectly 2605 * clear NMI blocking. 2606 */ 2607 static __inline void 2608 vmx_exit_handle_possible_nmi(struct vm_exit *vmexit) 2609 { 2610 ASSERT(!interrupts_enabled()); 2611 2612 if (vmexit->u.vmx.exit_reason == EXIT_REASON_EXCEPTION) { 2613 uint32_t intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2614 ASSERT(intr_info & VMCS_INTR_VALID); 2615 2616 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2617 ASSERT3U(intr_info & 0xff, ==, IDT_NMI); 2618 vmm_call_trap(T_NMIFLT); 2619 } 2620 } 2621 } 2622 2623 static __inline void 2624 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2625 { 2626 uint64_t rflags; 2627 2628 /* Save host control debug registers. */ 2629 vmxctx->host_dr7 = rdr7(); 2630 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2631 2632 /* 2633 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2634 * exceptions in the host based on the guest DRx values. The 2635 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2636 */ 2637 load_dr7(0); 2638 wrmsr(MSR_DEBUGCTLMSR, 0); 2639 2640 /* 2641 * Disable single stepping the kernel to avoid corrupting the 2642 * guest DR6. A debugger might still be able to corrupt the 2643 * guest DR6 by setting a breakpoint after this point and then 2644 * single stepping. 2645 */ 2646 rflags = read_rflags(); 2647 vmxctx->host_tf = rflags & PSL_T; 2648 write_rflags(rflags & ~PSL_T); 2649 2650 /* Save host debug registers. */ 2651 vmxctx->host_dr0 = rdr0(); 2652 vmxctx->host_dr1 = rdr1(); 2653 vmxctx->host_dr2 = rdr2(); 2654 vmxctx->host_dr3 = rdr3(); 2655 vmxctx->host_dr6 = rdr6(); 2656 2657 /* Restore guest debug registers. */ 2658 load_dr0(vmxctx->guest_dr0); 2659 load_dr1(vmxctx->guest_dr1); 2660 load_dr2(vmxctx->guest_dr2); 2661 load_dr3(vmxctx->guest_dr3); 2662 load_dr6(vmxctx->guest_dr6); 2663 } 2664 2665 static __inline void 2666 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2667 { 2668 2669 /* Save guest debug registers. */ 2670 vmxctx->guest_dr0 = rdr0(); 2671 vmxctx->guest_dr1 = rdr1(); 2672 vmxctx->guest_dr2 = rdr2(); 2673 vmxctx->guest_dr3 = rdr3(); 2674 vmxctx->guest_dr6 = rdr6(); 2675 2676 /* 2677 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2678 * PSL_T last. 2679 */ 2680 load_dr0(vmxctx->host_dr0); 2681 load_dr1(vmxctx->host_dr1); 2682 load_dr2(vmxctx->host_dr2); 2683 load_dr3(vmxctx->host_dr3); 2684 load_dr6(vmxctx->host_dr6); 2685 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2686 load_dr7(vmxctx->host_dr7); 2687 write_rflags(read_rflags() | vmxctx->host_tf); 2688 } 2689 2690 static int 2691 vmx_run(void *arg, int vcpu, uint64_t rip) 2692 { 2693 int rc, handled, launched; 2694 struct vmx *vmx; 2695 struct vm *vm; 2696 struct vmxctx *vmxctx; 2697 uintptr_t vmcs_pa; 2698 struct vm_exit *vmexit; 2699 struct vlapic *vlapic; 2700 uint32_t exit_reason; 2701 bool tpr_shadow_active; 2702 vm_client_t *vmc; 2703 2704 vmx = arg; 2705 vm = vmx->vm; 2706 vmcs_pa = vmx->vmcs_pa[vcpu]; 2707 vmxctx = &vmx->ctx[vcpu]; 2708 vlapic = vm_lapic(vm, vcpu); 2709 vmexit = vm_exitinfo(vm, vcpu); 2710 vmc = vm_get_vmclient(vm, vcpu); 2711 launched = 0; 2712 tpr_shadow_active = vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) && 2713 !vmx_cap_en(vmx, VMX_CAP_APICV) && 2714 (vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0; 2715 2716 vmx_msr_guest_enter(vmx, vcpu); 2717 2718 vmcs_load(vmcs_pa); 2719 2720 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0); 2721 vmx->vmcs_state[vcpu] = VS_LOADED; 2722 2723 /* 2724 * XXX 2725 * We do this every time because we may setup the virtual machine 2726 * from a different process than the one that actually runs it. 2727 * 2728 * If the life of a virtual machine was spent entirely in the context 2729 * of a single process we could do this once in vmx_vminit(). 2730 */ 2731 vmcs_write(VMCS_HOST_CR3, rcr3()); 2732 2733 vmcs_write(VMCS_GUEST_RIP, rip); 2734 vmx_set_pcpu_defaults(vmx, vcpu); 2735 do { 2736 enum event_inject_state inject_state; 2737 uint64_t eptgen; 2738 2739 ASSERT3U(vmcs_read(VMCS_GUEST_RIP), ==, rip); 2740 2741 handled = UNHANDLED; 2742 2743 /* 2744 * Perform initial event/exception/interrupt injection before 2745 * host CPU interrupts are disabled. 2746 */ 2747 inject_state = vmx_inject_events(vmx, vcpu, rip); 2748 2749 /* 2750 * Interrupts are disabled from this point on until the 2751 * guest starts executing. This is done for the following 2752 * reasons: 2753 * 2754 * If an AST is asserted on this thread after the check below, 2755 * then the IPI_AST notification will not be lost, because it 2756 * will cause a VM exit due to external interrupt as soon as 2757 * the guest state is loaded. 2758 * 2759 * A posted interrupt after vmx_inject_vlapic() will not be 2760 * "lost" because it will be held pending in the host APIC 2761 * because interrupts are disabled. The pending interrupt will 2762 * be recognized as soon as the guest state is loaded. 2763 * 2764 * The same reasoning applies to the IPI generated by vmspace 2765 * invalidation. 2766 */ 2767 disable_intr(); 2768 2769 /* 2770 * If not precluded by existing events, inject any interrupt 2771 * pending on the vLAPIC. As a lock-less operation, it is safe 2772 * (and prudent) to perform with host CPU interrupts disabled. 2773 */ 2774 if (inject_state == EIS_CAN_INJECT) { 2775 inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic); 2776 } 2777 2778 /* 2779 * Check for vCPU bail-out conditions. This must be done after 2780 * vmx_inject_events() to detect a triple-fault condition. 2781 */ 2782 if (vcpu_entry_bailout_checks(vmx->vm, vcpu, rip)) { 2783 enable_intr(); 2784 break; 2785 } 2786 2787 if (vcpu_run_state_pending(vm, vcpu)) { 2788 enable_intr(); 2789 vm_exit_run_state(vmx->vm, vcpu, rip); 2790 break; 2791 } 2792 2793 /* 2794 * If subsequent activity queued events which require injection 2795 * handling, take another lap to handle them. 2796 */ 2797 if (vmx_inject_recheck(vmx, vcpu, inject_state)) { 2798 enable_intr(); 2799 handled = HANDLED; 2800 continue; 2801 } 2802 2803 if ((rc = smt_acquire()) != 1) { 2804 enable_intr(); 2805 vmexit->rip = rip; 2806 vmexit->inst_length = 0; 2807 if (rc == -1) { 2808 vmexit->exitcode = VM_EXITCODE_HT; 2809 } else { 2810 vmexit->exitcode = VM_EXITCODE_BOGUS; 2811 handled = HANDLED; 2812 } 2813 break; 2814 } 2815 2816 /* 2817 * If this thread has gone off-cpu due to mutex operations 2818 * during vmx_run, the VMCS will have been unloaded, forcing a 2819 * re-VMLAUNCH as opposed to VMRESUME. 2820 */ 2821 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0; 2822 /* 2823 * Restoration of the GDT limit is taken care of by 2824 * vmx_savectx(). Since the maximum practical index for the 2825 * IDT is 255, restoring its limits from the post-VMX-exit 2826 * default of 0xffff is not a concern. 2827 * 2828 * Only 64-bit hypervisor callers are allowed, which forgoes 2829 * the need to restore any LDT descriptor. Toss an error to 2830 * anyone attempting to break that rule. 2831 */ 2832 if (curproc->p_model != DATAMODEL_LP64) { 2833 smt_release(); 2834 enable_intr(); 2835 bzero(vmexit, sizeof (*vmexit)); 2836 vmexit->rip = rip; 2837 vmexit->exitcode = VM_EXITCODE_VMX; 2838 vmexit->u.vmx.status = VM_FAIL_INVALID; 2839 handled = UNHANDLED; 2840 break; 2841 } 2842 2843 if (tpr_shadow_active) { 2844 vmx_tpr_shadow_enter(vlapic); 2845 } 2846 2847 /* 2848 * Indicate activation of vmspace (EPT) table just prior to VMX 2849 * entry, checking for the necessity of an invept invalidation. 2850 */ 2851 eptgen = vmc_table_enter(vmc); 2852 if (vmx->eptgen[curcpu] != eptgen) { 2853 /* 2854 * VMspace generation does not match what was previously 2855 * used on this host CPU, so all mappings associated 2856 * with this EP4TA must be invalidated. 2857 */ 2858 invept(1, vmx->eptp); 2859 vmx->eptgen[curcpu] = eptgen; 2860 } 2861 2862 vcpu_ustate_change(vm, vcpu, VU_RUN); 2863 vmx_dr_enter_guest(vmxctx); 2864 2865 /* Perform VMX entry */ 2866 rc = vmx_enter_guest(vmxctx, vmx, launched); 2867 2868 vmx_dr_leave_guest(vmxctx); 2869 vcpu_ustate_change(vm, vcpu, VU_EMU_KERN); 2870 2871 vmx->vmcs_state[vcpu] |= VS_LAUNCHED; 2872 smt_release(); 2873 2874 if (tpr_shadow_active) { 2875 vmx_tpr_shadow_exit(vlapic); 2876 } 2877 2878 /* Collect some information for VM exit processing */ 2879 vmexit->rip = rip = vmcs_read(VMCS_GUEST_RIP); 2880 vmexit->inst_length = vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH); 2881 vmexit->u.vmx.exit_reason = exit_reason = 2882 (vmcs_read(VMCS_EXIT_REASON) & BASIC_EXIT_REASON_MASK); 2883 vmexit->u.vmx.exit_qualification = 2884 vmcs_read(VMCS_EXIT_QUALIFICATION); 2885 /* Update 'nextrip' */ 2886 vmx->state[vcpu].nextrip = rip; 2887 2888 if (rc == VMX_GUEST_VMEXIT) { 2889 vmx_exit_handle_possible_nmi(vmexit); 2890 } 2891 enable_intr(); 2892 vmc_table_exit(vmc); 2893 2894 if (rc == VMX_GUEST_VMEXIT) { 2895 handled = vmx_exit_process(vmx, vcpu, vmexit); 2896 } else { 2897 vmx_exit_inst_error(vmxctx, rc, vmexit); 2898 } 2899 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip, 2900 uint32_t, exit_reason); 2901 rip = vmexit->rip; 2902 } while (handled); 2903 2904 /* If a VM exit has been handled then the exitcode must be BOGUS */ 2905 if (handled && vmexit->exitcode != VM_EXITCODE_BOGUS) { 2906 panic("Non-BOGUS exitcode (%d) unexpected for handled VM exit", 2907 vmexit->exitcode); 2908 } 2909 2910 vmcs_clear(vmcs_pa); 2911 vmx_msr_guest_exit(vmx, vcpu); 2912 2913 VERIFY(vmx->vmcs_state != VS_NONE && curthread->t_preempt != 0); 2914 vmx->vmcs_state[vcpu] = VS_NONE; 2915 2916 return (0); 2917 } 2918 2919 static void 2920 vmx_vmcleanup(void *arg) 2921 { 2922 int i; 2923 struct vmx *vmx = arg; 2924 uint16_t maxcpus; 2925 2926 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2927 (void) vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2928 kmem_free(vmx->apic_access_page, PAGESIZE); 2929 } else { 2930 VERIFY3P(vmx->apic_access_page, ==, NULL); 2931 } 2932 2933 vmx_msr_bitmap_destroy(vmx); 2934 2935 maxcpus = vm_get_maxcpus(vmx->vm); 2936 for (i = 0; i < maxcpus; i++) 2937 vpid_free(vmx->state[i].vpid); 2938 2939 kmem_free(vmx, sizeof (*vmx)); 2940 } 2941 2942 /* 2943 * Ensure that the VMCS for this vcpu is loaded. 2944 * Returns true if a VMCS load was required. 2945 */ 2946 static bool 2947 vmx_vmcs_access_ensure(struct vmx *vmx, int vcpu) 2948 { 2949 int hostcpu; 2950 2951 if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) { 2952 if (hostcpu != curcpu) { 2953 panic("unexpected vcpu migration %d != %d", 2954 hostcpu, curcpu); 2955 } 2956 /* Earlier logic already took care of the load */ 2957 return (false); 2958 } else { 2959 vmcs_load(vmx->vmcs_pa[vcpu]); 2960 return (true); 2961 } 2962 } 2963 2964 static void 2965 vmx_vmcs_access_done(struct vmx *vmx, int vcpu) 2966 { 2967 int hostcpu; 2968 2969 if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) { 2970 if (hostcpu != curcpu) { 2971 panic("unexpected vcpu migration %d != %d", 2972 hostcpu, curcpu); 2973 } 2974 /* Later logic will take care of the unload */ 2975 } else { 2976 vmcs_clear(vmx->vmcs_pa[vcpu]); 2977 } 2978 } 2979 2980 static uint64_t * 2981 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2982 { 2983 switch (reg) { 2984 case VM_REG_GUEST_RAX: 2985 return (&vmxctx->guest_rax); 2986 case VM_REG_GUEST_RBX: 2987 return (&vmxctx->guest_rbx); 2988 case VM_REG_GUEST_RCX: 2989 return (&vmxctx->guest_rcx); 2990 case VM_REG_GUEST_RDX: 2991 return (&vmxctx->guest_rdx); 2992 case VM_REG_GUEST_RSI: 2993 return (&vmxctx->guest_rsi); 2994 case VM_REG_GUEST_RDI: 2995 return (&vmxctx->guest_rdi); 2996 case VM_REG_GUEST_RBP: 2997 return (&vmxctx->guest_rbp); 2998 case VM_REG_GUEST_R8: 2999 return (&vmxctx->guest_r8); 3000 case VM_REG_GUEST_R9: 3001 return (&vmxctx->guest_r9); 3002 case VM_REG_GUEST_R10: 3003 return (&vmxctx->guest_r10); 3004 case VM_REG_GUEST_R11: 3005 return (&vmxctx->guest_r11); 3006 case VM_REG_GUEST_R12: 3007 return (&vmxctx->guest_r12); 3008 case VM_REG_GUEST_R13: 3009 return (&vmxctx->guest_r13); 3010 case VM_REG_GUEST_R14: 3011 return (&vmxctx->guest_r14); 3012 case VM_REG_GUEST_R15: 3013 return (&vmxctx->guest_r15); 3014 case VM_REG_GUEST_CR2: 3015 return (&vmxctx->guest_cr2); 3016 case VM_REG_GUEST_DR0: 3017 return (&vmxctx->guest_dr0); 3018 case VM_REG_GUEST_DR1: 3019 return (&vmxctx->guest_dr1); 3020 case VM_REG_GUEST_DR2: 3021 return (&vmxctx->guest_dr2); 3022 case VM_REG_GUEST_DR3: 3023 return (&vmxctx->guest_dr3); 3024 case VM_REG_GUEST_DR6: 3025 return (&vmxctx->guest_dr6); 3026 default: 3027 break; 3028 } 3029 return (NULL); 3030 } 3031 3032 static int 3033 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 3034 { 3035 struct vmx *vmx = arg; 3036 uint64_t *regp; 3037 3038 /* VMCS access not required for ctx reads */ 3039 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3040 *retval = *regp; 3041 return (0); 3042 } 3043 3044 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3045 int err = 0; 3046 3047 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3048 uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3049 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3050 } else { 3051 uint32_t encoding; 3052 3053 encoding = vmcs_field_encoding(reg); 3054 switch (encoding) { 3055 case VMCS_GUEST_CR0: 3056 /* Take the shadow bits into account */ 3057 *retval = vmx_unshadow_cr0(vmcs_read(encoding), 3058 vmcs_read(VMCS_CR0_SHADOW)); 3059 break; 3060 case VMCS_GUEST_CR4: 3061 /* Take the shadow bits into account */ 3062 *retval = vmx_unshadow_cr4(vmcs_read(encoding), 3063 vmcs_read(VMCS_CR4_SHADOW)); 3064 break; 3065 case VMCS_INVALID_ENCODING: 3066 err = EINVAL; 3067 break; 3068 default: 3069 *retval = vmcs_read(encoding); 3070 break; 3071 } 3072 } 3073 3074 if (vmcs_loaded) { 3075 vmx_vmcs_access_done(vmx, vcpu); 3076 } 3077 return (err); 3078 } 3079 3080 static int 3081 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3082 { 3083 struct vmx *vmx = arg; 3084 uint64_t *regp; 3085 3086 /* VMCS access not required for ctx writes */ 3087 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3088 *regp = val; 3089 return (0); 3090 } 3091 3092 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3093 int err = 0; 3094 3095 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3096 if (val != 0) { 3097 /* 3098 * Forcing the vcpu into an interrupt shadow is not 3099 * presently supported. 3100 */ 3101 err = EINVAL; 3102 } else { 3103 uint64_t gi; 3104 3105 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3106 gi &= ~HWINTR_BLOCKING; 3107 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 3108 err = 0; 3109 } 3110 } else { 3111 uint32_t encoding; 3112 3113 err = 0; 3114 encoding = vmcs_field_encoding(reg); 3115 switch (encoding) { 3116 case VMCS_GUEST_IA32_EFER: 3117 vmcs_write(encoding, val); 3118 vmx_sync_efer_state(vmx, vcpu, val); 3119 break; 3120 case VMCS_GUEST_CR0: 3121 /* 3122 * The guest is not allowed to modify certain bits in 3123 * %cr0 and %cr4. To maintain the illusion of full 3124 * control, they have shadow versions which contain the 3125 * guest-perceived (via reads from the register) values 3126 * as opposed to the guest-effective values. 3127 * 3128 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6. 3129 */ 3130 vmcs_write(VMCS_CR0_SHADOW, val); 3131 vmcs_write(encoding, vmx_fix_cr0(val)); 3132 break; 3133 case VMCS_GUEST_CR4: 3134 /* See above for detail on %cr4 shadowing */ 3135 vmcs_write(VMCS_CR4_SHADOW, val); 3136 vmcs_write(encoding, vmx_fix_cr4(val)); 3137 break; 3138 case VMCS_GUEST_CR3: 3139 vmcs_write(encoding, val); 3140 /* 3141 * Invalidate the guest vcpu's TLB mappings to emulate 3142 * the behavior of updating %cr3. 3143 * 3144 * XXX the processor retains global mappings when %cr3 3145 * is updated but vmx_invvpid() does not. 3146 */ 3147 vmx_invvpid(vmx, vcpu, 3148 vcpu_is_running(vmx->vm, vcpu, NULL)); 3149 break; 3150 case VMCS_INVALID_ENCODING: 3151 err = EINVAL; 3152 break; 3153 default: 3154 vmcs_write(encoding, val); 3155 break; 3156 } 3157 } 3158 3159 if (vmcs_loaded) { 3160 vmx_vmcs_access_done(vmx, vcpu); 3161 } 3162 return (err); 3163 } 3164 3165 static int 3166 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3167 { 3168 struct vmx *vmx = arg; 3169 uint32_t base, limit, access; 3170 3171 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3172 3173 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3174 desc->base = vmcs_read(base); 3175 desc->limit = vmcs_read(limit); 3176 if (access != VMCS_INVALID_ENCODING) { 3177 desc->access = vmcs_read(access); 3178 } else { 3179 desc->access = 0; 3180 } 3181 3182 if (vmcs_loaded) { 3183 vmx_vmcs_access_done(vmx, vcpu); 3184 } 3185 return (0); 3186 } 3187 3188 static int 3189 vmx_setdesc(void *arg, int vcpu, int seg, const struct seg_desc *desc) 3190 { 3191 struct vmx *vmx = arg; 3192 uint32_t base, limit, access; 3193 3194 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3195 3196 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3197 vmcs_write(base, desc->base); 3198 vmcs_write(limit, desc->limit); 3199 if (access != VMCS_INVALID_ENCODING) { 3200 vmcs_write(access, desc->access); 3201 } 3202 3203 if (vmcs_loaded) { 3204 vmx_vmcs_access_done(vmx, vcpu); 3205 } 3206 return (0); 3207 } 3208 3209 static uint64_t * 3210 vmx_msr_ptr(struct vmx *vmx, int vcpu, uint32_t msr) 3211 { 3212 uint64_t *guest_msrs = vmx->guest_msrs[vcpu]; 3213 3214 switch (msr) { 3215 case MSR_LSTAR: 3216 return (&guest_msrs[IDX_MSR_LSTAR]); 3217 case MSR_CSTAR: 3218 return (&guest_msrs[IDX_MSR_CSTAR]); 3219 case MSR_STAR: 3220 return (&guest_msrs[IDX_MSR_STAR]); 3221 case MSR_SF_MASK: 3222 return (&guest_msrs[IDX_MSR_SF_MASK]); 3223 case MSR_KGSBASE: 3224 return (&guest_msrs[IDX_MSR_KGSBASE]); 3225 case MSR_PAT: 3226 return (&guest_msrs[IDX_MSR_PAT]); 3227 default: 3228 return (NULL); 3229 } 3230 } 3231 3232 static int 3233 vmx_msr_get(void *arg, int vcpu, uint32_t msr, uint64_t *valp) 3234 { 3235 struct vmx *vmx = arg; 3236 3237 ASSERT(valp != NULL); 3238 3239 const uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr); 3240 if (msrp != NULL) { 3241 *valp = *msrp; 3242 return (0); 3243 } 3244 3245 const uint32_t vmcs_enc = vmcs_msr_encoding(msr); 3246 if (vmcs_enc != VMCS_INVALID_ENCODING) { 3247 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3248 3249 *valp = vmcs_read(vmcs_enc); 3250 3251 if (vmcs_loaded) { 3252 vmx_vmcs_access_done(vmx, vcpu); 3253 } 3254 return (0); 3255 } 3256 3257 return (EINVAL); 3258 } 3259 3260 static int 3261 vmx_msr_set(void *arg, int vcpu, uint32_t msr, uint64_t val) 3262 { 3263 struct vmx *vmx = arg; 3264 3265 /* TODO: mask value */ 3266 3267 uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr); 3268 if (msrp != NULL) { 3269 *msrp = val; 3270 return (0); 3271 } 3272 3273 const uint32_t vmcs_enc = vmcs_msr_encoding(msr); 3274 if (vmcs_enc != VMCS_INVALID_ENCODING) { 3275 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3276 3277 vmcs_write(vmcs_enc, val); 3278 3279 if (msr == MSR_EFER) { 3280 vmx_sync_efer_state(vmx, vcpu, val); 3281 } 3282 3283 if (vmcs_loaded) { 3284 vmx_vmcs_access_done(vmx, vcpu); 3285 } 3286 return (0); 3287 } 3288 return (EINVAL); 3289 } 3290 3291 static int 3292 vmx_getcap(void *arg, int vcpu, int type, int *retval) 3293 { 3294 struct vmx *vmx = arg; 3295 int vcap; 3296 int ret; 3297 3298 ret = ENOENT; 3299 3300 vcap = vmx->cap[vcpu].set; 3301 3302 switch (type) { 3303 case VM_CAP_HALT_EXIT: 3304 if (cap_halt_exit) 3305 ret = 0; 3306 break; 3307 case VM_CAP_PAUSE_EXIT: 3308 if (cap_pause_exit) 3309 ret = 0; 3310 break; 3311 case VM_CAP_MTRAP_EXIT: 3312 if (cap_monitor_trap) 3313 ret = 0; 3314 break; 3315 case VM_CAP_ENABLE_INVPCID: 3316 if (cap_invpcid) 3317 ret = 0; 3318 break; 3319 case VM_CAP_BPT_EXIT: 3320 ret = 0; 3321 break; 3322 default: 3323 break; 3324 } 3325 3326 if (ret == 0) 3327 *retval = (vcap & (1 << type)) ? 1 : 0; 3328 3329 return (ret); 3330 } 3331 3332 static int 3333 vmx_setcap(void *arg, int vcpu, int type, int val) 3334 { 3335 struct vmx *vmx = arg; 3336 uint32_t baseval, reg, flag; 3337 uint32_t *pptr; 3338 int error; 3339 3340 error = ENOENT; 3341 pptr = NULL; 3342 3343 switch (type) { 3344 case VM_CAP_HALT_EXIT: 3345 if (cap_halt_exit) { 3346 error = 0; 3347 pptr = &vmx->cap[vcpu].proc_ctls; 3348 baseval = *pptr; 3349 flag = PROCBASED_HLT_EXITING; 3350 reg = VMCS_PRI_PROC_BASED_CTLS; 3351 } 3352 break; 3353 case VM_CAP_MTRAP_EXIT: 3354 if (cap_monitor_trap) { 3355 error = 0; 3356 pptr = &vmx->cap[vcpu].proc_ctls; 3357 baseval = *pptr; 3358 flag = PROCBASED_MTF; 3359 reg = VMCS_PRI_PROC_BASED_CTLS; 3360 } 3361 break; 3362 case VM_CAP_PAUSE_EXIT: 3363 if (cap_pause_exit) { 3364 error = 0; 3365 pptr = &vmx->cap[vcpu].proc_ctls; 3366 baseval = *pptr; 3367 flag = PROCBASED_PAUSE_EXITING; 3368 reg = VMCS_PRI_PROC_BASED_CTLS; 3369 } 3370 break; 3371 case VM_CAP_ENABLE_INVPCID: 3372 if (cap_invpcid) { 3373 error = 0; 3374 pptr = &vmx->cap[vcpu].proc_ctls2; 3375 baseval = *pptr; 3376 flag = PROCBASED2_ENABLE_INVPCID; 3377 reg = VMCS_SEC_PROC_BASED_CTLS; 3378 } 3379 break; 3380 case VM_CAP_BPT_EXIT: 3381 error = 0; 3382 3383 /* Don't change the bitmap if we are tracing all exceptions. */ 3384 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) { 3385 pptr = &vmx->cap[vcpu].exc_bitmap; 3386 baseval = *pptr; 3387 flag = (1 << IDT_BP); 3388 reg = VMCS_EXCEPTION_BITMAP; 3389 } 3390 break; 3391 default: 3392 break; 3393 } 3394 3395 if (error != 0) { 3396 return (error); 3397 } 3398 3399 if (pptr != NULL) { 3400 if (val) { 3401 baseval |= flag; 3402 } else { 3403 baseval &= ~flag; 3404 } 3405 vmcs_load(vmx->vmcs_pa[vcpu]); 3406 vmcs_write(reg, baseval); 3407 vmcs_clear(vmx->vmcs_pa[vcpu]); 3408 3409 /* 3410 * Update optional stored flags, and record 3411 * setting 3412 */ 3413 *pptr = baseval; 3414 } 3415 3416 if (val) { 3417 vmx->cap[vcpu].set |= (1 << type); 3418 } else { 3419 vmx->cap[vcpu].set &= ~(1 << type); 3420 } 3421 3422 return (0); 3423 } 3424 3425 struct vlapic_vtx { 3426 struct vlapic vlapic; 3427 3428 /* Align to the nearest cacheline */ 3429 uint8_t _pad[64 - (sizeof (struct vlapic) % 64)]; 3430 3431 /* TMR handling state for posted interrupts */ 3432 uint32_t tmr_active[8]; 3433 uint32_t pending_level[8]; 3434 uint32_t pending_edge[8]; 3435 3436 struct pir_desc *pir_desc; 3437 struct vmx *vmx; 3438 uint_t pending_prio; 3439 boolean_t tmr_sync; 3440 }; 3441 3442 CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0); 3443 3444 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3445 3446 static vcpu_notify_t 3447 vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level) 3448 { 3449 struct vlapic_vtx *vlapic_vtx; 3450 struct pir_desc *pir_desc; 3451 uint32_t mask, tmrval; 3452 int idx; 3453 vcpu_notify_t notify = VCPU_NOTIFY_NONE; 3454 3455 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3456 pir_desc = vlapic_vtx->pir_desc; 3457 idx = vector / 32; 3458 mask = 1UL << (vector % 32); 3459 3460 /* 3461 * If the currently asserted TMRs do not match the state requested by 3462 * the incoming interrupt, an exit will be required to reconcile those 3463 * bits in the APIC page. This will keep the vLAPIC behavior in line 3464 * with the architecturally defined expectations. 3465 * 3466 * If actors of mixed types (edge and level) are racing against the same 3467 * vector (toggling its TMR bit back and forth), the results could 3468 * inconsistent. Such circumstances are considered a rare edge case and 3469 * are never expected to be found in the wild. 3470 */ 3471 tmrval = atomic_load_acq_int(&vlapic_vtx->tmr_active[idx]); 3472 if (!level) { 3473 if ((tmrval & mask) != 0) { 3474 /* Edge-triggered interrupt needs TMR de-asserted */ 3475 atomic_set_int(&vlapic_vtx->pending_edge[idx], mask); 3476 atomic_store_rel_long(&pir_desc->pending, 1); 3477 return (VCPU_NOTIFY_EXIT); 3478 } 3479 } else { 3480 if ((tmrval & mask) == 0) { 3481 /* Level-triggered interrupt needs TMR asserted */ 3482 atomic_set_int(&vlapic_vtx->pending_level[idx], mask); 3483 atomic_store_rel_long(&pir_desc->pending, 1); 3484 return (VCPU_NOTIFY_EXIT); 3485 } 3486 } 3487 3488 /* 3489 * If the interrupt request does not require manipulation of the TMRs 3490 * for delivery, set it in PIR descriptor. It cannot be inserted into 3491 * the APIC page while the vCPU might be running. 3492 */ 3493 atomic_set_int(&pir_desc->pir[idx], mask); 3494 3495 /* 3496 * A notification is required whenever the 'pending' bit makes a 3497 * transition from 0->1. 3498 * 3499 * Even if the 'pending' bit is already asserted, notification about 3500 * the incoming interrupt may still be necessary. For example, if a 3501 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3502 * the 0->1 'pending' transition with a notification, but the vCPU 3503 * would ignore the interrupt for the time being. The same vCPU would 3504 * need to then be notified if a high-priority interrupt arrived which 3505 * satisfied the PPR. 3506 * 3507 * The priorities of interrupts injected while 'pending' is asserted 3508 * are tracked in a custom bitfield 'pending_prio'. Should the 3509 * to-be-injected interrupt exceed the priorities already present, the 3510 * notification is sent. The priorities recorded in 'pending_prio' are 3511 * cleared whenever the 'pending' bit makes another 0->1 transition. 3512 */ 3513 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3514 notify = VCPU_NOTIFY_APIC; 3515 vlapic_vtx->pending_prio = 0; 3516 } else { 3517 const uint_t old_prio = vlapic_vtx->pending_prio; 3518 const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3519 3520 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3521 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3522 notify = VCPU_NOTIFY_APIC; 3523 } 3524 } 3525 3526 return (notify); 3527 } 3528 3529 static void 3530 vmx_apicv_accepted(struct vlapic *vlapic, int vector) 3531 { 3532 /* 3533 * When APICv is enabled for an instance, the traditional interrupt 3534 * injection method (populating ENTRY_INTR_INFO in the VMCS) is not 3535 * used and the CPU does the heavy lifting of virtual interrupt 3536 * delivery. For that reason vmx_intr_accepted() should never be called 3537 * when APICv is enabled. 3538 */ 3539 panic("vmx_intr_accepted: not expected to be called"); 3540 } 3541 3542 static void 3543 vmx_apicv_sync_tmr(struct vlapic *vlapic) 3544 { 3545 struct vlapic_vtx *vlapic_vtx; 3546 const uint32_t *tmrs; 3547 3548 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3549 tmrs = &vlapic_vtx->tmr_active[0]; 3550 3551 if (!vlapic_vtx->tmr_sync) { 3552 return; 3553 } 3554 3555 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)tmrs[1] << 32) | tmrs[0]); 3556 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)tmrs[3] << 32) | tmrs[2]); 3557 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)tmrs[5] << 32) | tmrs[4]); 3558 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)tmrs[7] << 32) | tmrs[6]); 3559 vlapic_vtx->tmr_sync = B_FALSE; 3560 } 3561 3562 static void 3563 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3564 { 3565 struct vmx *vmx; 3566 uint32_t proc_ctls; 3567 int vcpuid; 3568 3569 vcpuid = vlapic->vcpuid; 3570 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3571 3572 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3573 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3574 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3575 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3576 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3577 3578 vmcs_load(vmx->vmcs_pa[vcpuid]); 3579 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3580 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3581 } 3582 3583 static void 3584 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3585 { 3586 struct vmx *vmx; 3587 uint32_t proc_ctls2; 3588 int vcpuid; 3589 3590 vcpuid = vlapic->vcpuid; 3591 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3592 3593 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3594 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3595 ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2)); 3596 3597 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3598 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3599 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3600 3601 vmcs_load(vmx->vmcs_pa[vcpuid]); 3602 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3603 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3604 3605 vmx_allow_x2apic_msrs(vmx, vcpuid); 3606 } 3607 3608 static void 3609 vmx_apicv_notify(struct vlapic *vlapic, int hostcpu) 3610 { 3611 psm_send_pir_ipi(hostcpu); 3612 } 3613 3614 static void 3615 vmx_apicv_sync(struct vlapic *vlapic) 3616 { 3617 struct vlapic_vtx *vlapic_vtx; 3618 struct pir_desc *pir_desc; 3619 struct LAPIC *lapic; 3620 uint_t i; 3621 3622 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3623 pir_desc = vlapic_vtx->pir_desc; 3624 lapic = vlapic->apic_page; 3625 3626 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3627 return; 3628 } 3629 3630 vlapic_vtx->pending_prio = 0; 3631 3632 /* Make sure the invalid (0-15) vectors are not set */ 3633 ASSERT0(vlapic_vtx->pending_level[0] & 0xffff); 3634 ASSERT0(vlapic_vtx->pending_edge[0] & 0xffff); 3635 ASSERT0(pir_desc->pir[0] & 0xffff); 3636 3637 for (i = 0; i <= 7; i++) { 3638 uint32_t *tmrp = &lapic->tmr0 + (i * 4); 3639 uint32_t *irrp = &lapic->irr0 + (i * 4); 3640 3641 const uint32_t pending_level = 3642 atomic_readandclear_int(&vlapic_vtx->pending_level[i]); 3643 const uint32_t pending_edge = 3644 atomic_readandclear_int(&vlapic_vtx->pending_edge[i]); 3645 const uint32_t pending_inject = 3646 atomic_readandclear_int(&pir_desc->pir[i]); 3647 3648 if (pending_level != 0) { 3649 /* 3650 * Level-triggered interrupts assert their corresponding 3651 * bit in the TMR when queued in IRR. 3652 */ 3653 *tmrp |= pending_level; 3654 *irrp |= pending_level; 3655 } 3656 if (pending_edge != 0) { 3657 /* 3658 * When queuing an edge-triggered interrupt in IRR, the 3659 * corresponding bit in the TMR is cleared. 3660 */ 3661 *tmrp &= ~pending_edge; 3662 *irrp |= pending_edge; 3663 } 3664 if (pending_inject != 0) { 3665 /* 3666 * Interrupts which do not require a change to the TMR 3667 * (because it already matches the necessary state) can 3668 * simply be queued in IRR. 3669 */ 3670 *irrp |= pending_inject; 3671 } 3672 3673 if (*tmrp != vlapic_vtx->tmr_active[i]) { 3674 /* Check if VMX EOI triggers require updating. */ 3675 vlapic_vtx->tmr_active[i] = *tmrp; 3676 vlapic_vtx->tmr_sync = B_TRUE; 3677 } 3678 } 3679 } 3680 3681 static void 3682 vmx_tpr_shadow_enter(struct vlapic *vlapic) 3683 { 3684 /* 3685 * When TPR shadowing is enabled, VMX will initiate a guest exit if its 3686 * TPR falls below a threshold priority. That threshold is set to the 3687 * current TPR priority, since guest interrupt status should be 3688 * re-evaluated if its TPR is set lower. 3689 */ 3690 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 3691 } 3692 3693 static void 3694 vmx_tpr_shadow_exit(struct vlapic *vlapic) 3695 { 3696 /* 3697 * Unlike full APICv, where changes to the TPR are reflected in the PPR, 3698 * with TPR shadowing, that duty is relegated to the VMM. Upon exit, 3699 * the PPR is updated to reflect any change in the TPR here. 3700 */ 3701 vlapic_sync_tpr(vlapic); 3702 } 3703 3704 static struct vlapic * 3705 vmx_vlapic_init(void *arg, int vcpuid) 3706 { 3707 struct vmx *vmx = arg; 3708 struct vlapic_vtx *vlapic_vtx; 3709 struct vlapic *vlapic; 3710 3711 vlapic_vtx = kmem_zalloc(sizeof (struct vlapic_vtx), KM_SLEEP); 3712 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3713 vlapic_vtx->vmx = vmx; 3714 3715 vlapic = &vlapic_vtx->vlapic; 3716 vlapic->vm = vmx->vm; 3717 vlapic->vcpuid = vcpuid; 3718 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3719 3720 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 3721 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3722 } 3723 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 3724 vlapic->ops.set_intr_ready = vmx_apicv_set_ready; 3725 vlapic->ops.sync_state = vmx_apicv_sync; 3726 vlapic->ops.intr_accepted = vmx_apicv_accepted; 3727 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3728 3729 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 3730 vlapic->ops.post_intr = vmx_apicv_notify; 3731 } 3732 } 3733 3734 vlapic_init(vlapic); 3735 3736 return (vlapic); 3737 } 3738 3739 static void 3740 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3741 { 3742 vlapic_cleanup(vlapic); 3743 kmem_free(vlapic, sizeof (struct vlapic_vtx)); 3744 } 3745 3746 static void 3747 vmx_savectx(void *arg, int vcpu) 3748 { 3749 struct vmx *vmx = arg; 3750 3751 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3752 vmcs_clear(vmx->vmcs_pa[vcpu]); 3753 vmx_msr_guest_exit(vmx, vcpu); 3754 /* 3755 * Having VMCLEARed the VMCS, it can no longer be re-entered 3756 * with VMRESUME, but must be VMLAUNCHed again. 3757 */ 3758 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED; 3759 } 3760 3761 reset_gdtr_limit(); 3762 } 3763 3764 static void 3765 vmx_restorectx(void *arg, int vcpu) 3766 { 3767 struct vmx *vmx = arg; 3768 3769 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED); 3770 3771 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3772 vmx_msr_guest_enter(vmx, vcpu); 3773 vmcs_load(vmx->vmcs_pa[vcpu]); 3774 } 3775 } 3776 3777 struct vmm_ops vmm_ops_intel = { 3778 .init = vmx_init, 3779 .cleanup = vmx_cleanup, 3780 .resume = vmx_restore, 3781 3782 .vminit = vmx_vminit, 3783 .vmrun = vmx_run, 3784 .vmcleanup = vmx_vmcleanup, 3785 .vmgetreg = vmx_getreg, 3786 .vmsetreg = vmx_setreg, 3787 .vmgetdesc = vmx_getdesc, 3788 .vmsetdesc = vmx_setdesc, 3789 .vmgetcap = vmx_getcap, 3790 .vmsetcap = vmx_setcap, 3791 .vlapic_init = vmx_vlapic_init, 3792 .vlapic_cleanup = vmx_vlapic_cleanup, 3793 3794 .vmsavectx = vmx_savectx, 3795 .vmrestorectx = vmx_restorectx, 3796 3797 .vmgetmsr = vmx_msr_get, 3798 .vmsetmsr = vmx_msr_set, 3799 }; 3800 3801 /* Side-effect free HW validation derived from checks in vmx_init. */ 3802 int 3803 vmx_x86_supported(const char **msg) 3804 { 3805 int error; 3806 uint32_t tmp; 3807 3808 ASSERT(msg != NULL); 3809 3810 /* Check support for primary processor-based VM-execution controls */ 3811 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 3812 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, 3813 PROCBASED_CTLS_ZERO_SETTING, &tmp); 3814 if (error) { 3815 *msg = "processor does not support desired primary " 3816 "processor-based controls"; 3817 return (error); 3818 } 3819 3820 /* Check support for secondary processor-based VM-execution controls */ 3821 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 3822 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, 3823 PROCBASED_CTLS2_ZERO_SETTING, &tmp); 3824 if (error) { 3825 *msg = "processor does not support desired secondary " 3826 "processor-based controls"; 3827 return (error); 3828 } 3829 3830 /* Check support for pin-based VM-execution controls */ 3831 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 3832 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, 3833 PINBASED_CTLS_ZERO_SETTING, &tmp); 3834 if (error) { 3835 *msg = "processor does not support desired pin-based controls"; 3836 return (error); 3837 } 3838 3839 /* Check support for VM-exit controls */ 3840 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 3841 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp); 3842 if (error) { 3843 *msg = "processor does not support desired exit controls"; 3844 return (error); 3845 } 3846 3847 /* Check support for VM-entry controls */ 3848 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 3849 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp); 3850 if (error) { 3851 *msg = "processor does not support desired entry controls"; 3852 return (error); 3853 } 3854 3855 /* Unrestricted guest is nominally optional, but not for us. */ 3856 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 3857 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp); 3858 if (error) { 3859 *msg = "processor does not support desired unrestricted guest " 3860 "controls"; 3861 return (error); 3862 } 3863 3864 return (0); 3865 } 3866