1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/smp.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/pcpu.h> 38 #include <sys/proc.h> 39 #include <sys/sysctl.h> 40 41 #include <vm/vm.h> 42 #include <vm/pmap.h> 43 44 #include <machine/psl.h> 45 #include <machine/cpufunc.h> 46 #include <machine/md_var.h> 47 #include <machine/segments.h> 48 #include <machine/smp.h> 49 #include <machine/specialreg.h> 50 #include <machine/vmparam.h> 51 52 #include <machine/vmm.h> 53 #include <machine/vmm_dev.h> 54 #include <machine/vmm_instruction_emul.h> 55 #include "vmm_lapic.h" 56 #include "vmm_host.h" 57 #include "vmm_ioport.h" 58 #include "vmm_ktr.h" 59 #include "vmm_stat.h" 60 #include "vatpic.h" 61 #include "vlapic.h" 62 #include "vlapic_priv.h" 63 64 #include "ept.h" 65 #include "vmx_cpufunc.h" 66 #include "vmx.h" 67 #include "vmx_msr.h" 68 #include "x86.h" 69 #include "vmx_controls.h" 70 71 #define PINBASED_CTLS_ONE_SETTING \ 72 (PINBASED_EXTINT_EXITING | \ 73 PINBASED_NMI_EXITING | \ 74 PINBASED_VIRTUAL_NMI) 75 #define PINBASED_CTLS_ZERO_SETTING 0 76 77 #define PROCBASED_CTLS_WINDOW_SETTING \ 78 (PROCBASED_INT_WINDOW_EXITING | \ 79 PROCBASED_NMI_WINDOW_EXITING) 80 81 #define PROCBASED_CTLS_ONE_SETTING \ 82 (PROCBASED_SECONDARY_CONTROLS | \ 83 PROCBASED_MWAIT_EXITING | \ 84 PROCBASED_MONITOR_EXITING | \ 85 PROCBASED_IO_EXITING | \ 86 PROCBASED_MSR_BITMAPS | \ 87 PROCBASED_CTLS_WINDOW_SETTING | \ 88 PROCBASED_CR8_LOAD_EXITING | \ 89 PROCBASED_CR8_STORE_EXITING) 90 #define PROCBASED_CTLS_ZERO_SETTING \ 91 (PROCBASED_CR3_LOAD_EXITING | \ 92 PROCBASED_CR3_STORE_EXITING | \ 93 PROCBASED_IO_BITMAPS) 94 95 #define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 96 #define PROCBASED_CTLS2_ZERO_SETTING 0 97 98 #define VM_EXIT_CTLS_ONE_SETTING \ 99 (VM_EXIT_HOST_LMA | \ 100 VM_EXIT_SAVE_EFER | \ 101 VM_EXIT_LOAD_EFER | \ 102 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 103 104 #define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS 105 106 #define VM_ENTRY_CTLS_ONE_SETTING (VM_ENTRY_LOAD_EFER) 107 108 #define VM_ENTRY_CTLS_ZERO_SETTING \ 109 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 110 VM_ENTRY_INTO_SMM | \ 111 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 112 113 #define HANDLED 1 114 #define UNHANDLED 0 115 116 static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 117 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 118 119 SYSCTL_DECL(_hw_vmm); 120 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL); 121 122 int vmxon_enabled[MAXCPU]; 123 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 124 125 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 126 static uint32_t exit_ctls, entry_ctls; 127 128 static uint64_t cr0_ones_mask, cr0_zeros_mask; 129 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 130 &cr0_ones_mask, 0, NULL); 131 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 132 &cr0_zeros_mask, 0, NULL); 133 134 static uint64_t cr4_ones_mask, cr4_zeros_mask; 135 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 136 &cr4_ones_mask, 0, NULL); 137 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 138 &cr4_zeros_mask, 0, NULL); 139 140 static int vmx_initialized; 141 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 142 &vmx_initialized, 0, "Intel VMX initialized"); 143 144 /* 145 * Optional capabilities 146 */ 147 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL); 148 149 static int cap_halt_exit; 150 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0, 151 "HLT triggers a VM-exit"); 152 153 static int cap_pause_exit; 154 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit, 155 0, "PAUSE triggers a VM-exit"); 156 157 static int cap_unrestricted_guest; 158 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD, 159 &cap_unrestricted_guest, 0, "Unrestricted guests"); 160 161 static int cap_monitor_trap; 162 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD, 163 &cap_monitor_trap, 0, "Monitor trap flag"); 164 165 static int cap_invpcid; 166 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid, 167 0, "Guests are allowed to use INVPCID"); 168 169 static int virtual_interrupt_delivery; 170 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, 171 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 172 173 static int posted_interrupts; 174 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD, 175 &posted_interrupts, 0, "APICv posted interrupt support"); 176 177 static int pirvec = -1; 178 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 179 &pirvec, 0, "APICv posted interrupt vector"); 180 181 static struct unrhdr *vpid_unr; 182 static u_int vpid_alloc_failed; 183 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 184 &vpid_alloc_failed, 0, NULL); 185 186 /* 187 * Use the last page below 4GB as the APIC access address. This address is 188 * occupied by the boot firmware so it is guaranteed that it will not conflict 189 * with a page in system memory. 190 */ 191 #define APIC_ACCESS_ADDRESS 0xFFFFF000 192 193 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 194 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 195 static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val); 196 static void vmx_inject_pir(struct vlapic *vlapic); 197 198 #ifdef KTR 199 static const char * 200 exit_reason_to_str(int reason) 201 { 202 static char reasonbuf[32]; 203 204 switch (reason) { 205 case EXIT_REASON_EXCEPTION: 206 return "exception"; 207 case EXIT_REASON_EXT_INTR: 208 return "extint"; 209 case EXIT_REASON_TRIPLE_FAULT: 210 return "triplefault"; 211 case EXIT_REASON_INIT: 212 return "init"; 213 case EXIT_REASON_SIPI: 214 return "sipi"; 215 case EXIT_REASON_IO_SMI: 216 return "iosmi"; 217 case EXIT_REASON_SMI: 218 return "smi"; 219 case EXIT_REASON_INTR_WINDOW: 220 return "intrwindow"; 221 case EXIT_REASON_NMI_WINDOW: 222 return "nmiwindow"; 223 case EXIT_REASON_TASK_SWITCH: 224 return "taskswitch"; 225 case EXIT_REASON_CPUID: 226 return "cpuid"; 227 case EXIT_REASON_GETSEC: 228 return "getsec"; 229 case EXIT_REASON_HLT: 230 return "hlt"; 231 case EXIT_REASON_INVD: 232 return "invd"; 233 case EXIT_REASON_INVLPG: 234 return "invlpg"; 235 case EXIT_REASON_RDPMC: 236 return "rdpmc"; 237 case EXIT_REASON_RDTSC: 238 return "rdtsc"; 239 case EXIT_REASON_RSM: 240 return "rsm"; 241 case EXIT_REASON_VMCALL: 242 return "vmcall"; 243 case EXIT_REASON_VMCLEAR: 244 return "vmclear"; 245 case EXIT_REASON_VMLAUNCH: 246 return "vmlaunch"; 247 case EXIT_REASON_VMPTRLD: 248 return "vmptrld"; 249 case EXIT_REASON_VMPTRST: 250 return "vmptrst"; 251 case EXIT_REASON_VMREAD: 252 return "vmread"; 253 case EXIT_REASON_VMRESUME: 254 return "vmresume"; 255 case EXIT_REASON_VMWRITE: 256 return "vmwrite"; 257 case EXIT_REASON_VMXOFF: 258 return "vmxoff"; 259 case EXIT_REASON_VMXON: 260 return "vmxon"; 261 case EXIT_REASON_CR_ACCESS: 262 return "craccess"; 263 case EXIT_REASON_DR_ACCESS: 264 return "draccess"; 265 case EXIT_REASON_INOUT: 266 return "inout"; 267 case EXIT_REASON_RDMSR: 268 return "rdmsr"; 269 case EXIT_REASON_WRMSR: 270 return "wrmsr"; 271 case EXIT_REASON_INVAL_VMCS: 272 return "invalvmcs"; 273 case EXIT_REASON_INVAL_MSR: 274 return "invalmsr"; 275 case EXIT_REASON_MWAIT: 276 return "mwait"; 277 case EXIT_REASON_MTF: 278 return "mtf"; 279 case EXIT_REASON_MONITOR: 280 return "monitor"; 281 case EXIT_REASON_PAUSE: 282 return "pause"; 283 case EXIT_REASON_MCE_DURING_ENTRY: 284 return "mce-during-entry"; 285 case EXIT_REASON_TPR: 286 return "tpr"; 287 case EXIT_REASON_APIC_ACCESS: 288 return "apic-access"; 289 case EXIT_REASON_GDTR_IDTR: 290 return "gdtridtr"; 291 case EXIT_REASON_LDTR_TR: 292 return "ldtrtr"; 293 case EXIT_REASON_EPT_FAULT: 294 return "eptfault"; 295 case EXIT_REASON_EPT_MISCONFIG: 296 return "eptmisconfig"; 297 case EXIT_REASON_INVEPT: 298 return "invept"; 299 case EXIT_REASON_RDTSCP: 300 return "rdtscp"; 301 case EXIT_REASON_VMX_PREEMPT: 302 return "vmxpreempt"; 303 case EXIT_REASON_INVVPID: 304 return "invvpid"; 305 case EXIT_REASON_WBINVD: 306 return "wbinvd"; 307 case EXIT_REASON_XSETBV: 308 return "xsetbv"; 309 case EXIT_REASON_APIC_WRITE: 310 return "apic-write"; 311 default: 312 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 313 return (reasonbuf); 314 } 315 } 316 #endif /* KTR */ 317 318 static int 319 vmx_allow_x2apic_msrs(struct vmx *vmx) 320 { 321 int i, error; 322 323 error = 0; 324 325 /* 326 * Allow readonly access to the following x2APIC MSRs from the guest. 327 */ 328 error += guest_msr_ro(vmx, MSR_APIC_ID); 329 error += guest_msr_ro(vmx, MSR_APIC_VERSION); 330 error += guest_msr_ro(vmx, MSR_APIC_LDR); 331 error += guest_msr_ro(vmx, MSR_APIC_SVR); 332 333 for (i = 0; i < 8; i++) 334 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); 335 336 for (i = 0; i < 8; i++) 337 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); 338 339 for (i = 0; i < 8; i++) 340 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); 341 342 error += guest_msr_ro(vmx, MSR_APIC_ESR); 343 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); 344 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); 345 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); 346 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); 347 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); 348 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); 349 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); 350 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); 351 error += guest_msr_ro(vmx, MSR_APIC_ICR); 352 353 /* 354 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 355 * 356 * These registers get special treatment described in the section 357 * "Virtualizing MSR-Based APIC Accesses". 358 */ 359 error += guest_msr_rw(vmx, MSR_APIC_TPR); 360 error += guest_msr_rw(vmx, MSR_APIC_EOI); 361 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); 362 363 return (error); 364 } 365 366 u_long 367 vmx_fix_cr0(u_long cr0) 368 { 369 370 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 371 } 372 373 u_long 374 vmx_fix_cr4(u_long cr4) 375 { 376 377 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 378 } 379 380 static void 381 vpid_free(int vpid) 382 { 383 if (vpid < 0 || vpid > 0xffff) 384 panic("vpid_free: invalid vpid %d", vpid); 385 386 /* 387 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 388 * the unit number allocator. 389 */ 390 391 if (vpid > VM_MAXCPU) 392 free_unr(vpid_unr, vpid); 393 } 394 395 static void 396 vpid_alloc(uint16_t *vpid, int num) 397 { 398 int i, x; 399 400 if (num <= 0 || num > VM_MAXCPU) 401 panic("invalid number of vpids requested: %d", num); 402 403 /* 404 * If the "enable vpid" execution control is not enabled then the 405 * VPID is required to be 0 for all vcpus. 406 */ 407 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 408 for (i = 0; i < num; i++) 409 vpid[i] = 0; 410 return; 411 } 412 413 /* 414 * Allocate a unique VPID for each vcpu from the unit number allocator. 415 */ 416 for (i = 0; i < num; i++) { 417 x = alloc_unr(vpid_unr); 418 if (x == -1) 419 break; 420 else 421 vpid[i] = x; 422 } 423 424 if (i < num) { 425 atomic_add_int(&vpid_alloc_failed, 1); 426 427 /* 428 * If the unit number allocator does not have enough unique 429 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 430 * 431 * These VPIDs are not be unique across VMs but this does not 432 * affect correctness because the combined mappings are also 433 * tagged with the EP4TA which is unique for each VM. 434 * 435 * It is still sub-optimal because the invvpid will invalidate 436 * combined mappings for a particular VPID across all EP4TAs. 437 */ 438 while (i-- > 0) 439 vpid_free(vpid[i]); 440 441 for (i = 0; i < num; i++) 442 vpid[i] = i + 1; 443 } 444 } 445 446 static void 447 vpid_init(void) 448 { 449 /* 450 * VPID 0 is required when the "enable VPID" execution control is 451 * disabled. 452 * 453 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the 454 * unit number allocator does not have sufficient unique VPIDs to 455 * satisfy the allocation. 456 * 457 * The remaining VPIDs are managed by the unit number allocator. 458 */ 459 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); 460 } 461 462 static void 463 vmx_disable(void *arg __unused) 464 { 465 struct invvpid_desc invvpid_desc = { 0 }; 466 struct invept_desc invept_desc = { 0 }; 467 468 if (vmxon_enabled[curcpu]) { 469 /* 470 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 471 * 472 * VMXON or VMXOFF are not required to invalidate any TLB 473 * caching structures. This prevents potential retention of 474 * cached information in the TLB between distinct VMX episodes. 475 */ 476 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 477 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 478 vmxoff(); 479 } 480 load_cr4(rcr4() & ~CR4_VMXE); 481 } 482 483 static int 484 vmx_cleanup(void) 485 { 486 487 if (pirvec >= 0) 488 lapic_ipi_free(pirvec); 489 490 if (vpid_unr != NULL) { 491 delete_unrhdr(vpid_unr); 492 vpid_unr = NULL; 493 } 494 495 smp_rendezvous(NULL, vmx_disable, NULL, NULL); 496 497 return (0); 498 } 499 500 static void 501 vmx_enable(void *arg __unused) 502 { 503 int error; 504 uint64_t feature_control; 505 506 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 507 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 508 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 509 wrmsr(MSR_IA32_FEATURE_CONTROL, 510 feature_control | IA32_FEATURE_CONTROL_VMX_EN | 511 IA32_FEATURE_CONTROL_LOCK); 512 } 513 514 load_cr4(rcr4() | CR4_VMXE); 515 516 *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 517 error = vmxon(vmxon_region[curcpu]); 518 if (error == 0) 519 vmxon_enabled[curcpu] = 1; 520 } 521 522 static void 523 vmx_restore(void) 524 { 525 526 if (vmxon_enabled[curcpu]) 527 vmxon(vmxon_region[curcpu]); 528 } 529 530 static int 531 vmx_init(int ipinum) 532 { 533 int error, use_tpr_shadow; 534 uint64_t basic, fixed0, fixed1, feature_control; 535 uint32_t tmp, procbased2_vid_bits; 536 537 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 538 if (!(cpu_feature2 & CPUID2_VMX)) { 539 printf("vmx_init: processor does not support VMX operation\n"); 540 return (ENXIO); 541 } 542 543 /* 544 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 545 * are set (bits 0 and 2 respectively). 546 */ 547 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 548 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 && 549 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 550 printf("vmx_init: VMX operation disabled by BIOS\n"); 551 return (ENXIO); 552 } 553 554 /* 555 * Verify capabilities MSR_VMX_BASIC: 556 * - bit 54 indicates support for INS/OUTS decoding 557 */ 558 basic = rdmsr(MSR_VMX_BASIC); 559 if ((basic & (1UL << 54)) == 0) { 560 printf("vmx_init: processor does not support desired basic " 561 "capabilities\n"); 562 return (EINVAL); 563 } 564 565 /* Check support for primary processor-based VM-execution controls */ 566 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 567 MSR_VMX_TRUE_PROCBASED_CTLS, 568 PROCBASED_CTLS_ONE_SETTING, 569 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 570 if (error) { 571 printf("vmx_init: processor does not support desired primary " 572 "processor-based controls\n"); 573 return (error); 574 } 575 576 /* Clear the processor-based ctl bits that are set on demand */ 577 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 578 579 /* Check support for secondary processor-based VM-execution controls */ 580 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 581 MSR_VMX_PROCBASED_CTLS2, 582 PROCBASED_CTLS2_ONE_SETTING, 583 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 584 if (error) { 585 printf("vmx_init: processor does not support desired secondary " 586 "processor-based controls\n"); 587 return (error); 588 } 589 590 /* Check support for VPID */ 591 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 592 PROCBASED2_ENABLE_VPID, 0, &tmp); 593 if (error == 0) 594 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 595 596 /* Check support for pin-based VM-execution controls */ 597 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 598 MSR_VMX_TRUE_PINBASED_CTLS, 599 PINBASED_CTLS_ONE_SETTING, 600 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 601 if (error) { 602 printf("vmx_init: processor does not support desired " 603 "pin-based controls\n"); 604 return (error); 605 } 606 607 /* Check support for VM-exit controls */ 608 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 609 VM_EXIT_CTLS_ONE_SETTING, 610 VM_EXIT_CTLS_ZERO_SETTING, 611 &exit_ctls); 612 if (error) { 613 printf("vmx_init: processor does not support desired " 614 "exit controls\n"); 615 return (error); 616 } 617 618 /* Check support for VM-entry controls */ 619 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 620 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 621 &entry_ctls); 622 if (error) { 623 printf("vmx_init: processor does not support desired " 624 "entry controls\n"); 625 return (error); 626 } 627 628 /* 629 * Check support for optional features by testing them 630 * as individual bits 631 */ 632 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 633 MSR_VMX_TRUE_PROCBASED_CTLS, 634 PROCBASED_HLT_EXITING, 0, 635 &tmp) == 0); 636 637 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 638 MSR_VMX_PROCBASED_CTLS, 639 PROCBASED_MTF, 0, 640 &tmp) == 0); 641 642 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 643 MSR_VMX_TRUE_PROCBASED_CTLS, 644 PROCBASED_PAUSE_EXITING, 0, 645 &tmp) == 0); 646 647 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 648 MSR_VMX_PROCBASED_CTLS2, 649 PROCBASED2_UNRESTRICTED_GUEST, 0, 650 &tmp) == 0); 651 652 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 653 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 654 &tmp) == 0); 655 656 /* 657 * Check support for virtual interrupt delivery. 658 */ 659 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 660 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 661 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 662 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 663 664 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 665 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 666 &tmp) == 0); 667 668 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 669 procbased2_vid_bits, 0, &tmp); 670 if (error == 0 && use_tpr_shadow) { 671 virtual_interrupt_delivery = 1; 672 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 673 &virtual_interrupt_delivery); 674 } 675 676 if (virtual_interrupt_delivery) { 677 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 678 procbased_ctls2 |= procbased2_vid_bits; 679 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 680 681 /* 682 * No need to emulate accesses to %CR8 if virtual 683 * interrupt delivery is enabled. 684 */ 685 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 686 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING; 687 688 /* 689 * Check for Posted Interrupts only if Virtual Interrupt 690 * Delivery is enabled. 691 */ 692 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 693 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 694 &tmp); 695 if (error == 0) { 696 pirvec = lapic_ipi_alloc(&IDTVEC(justreturn)); 697 if (pirvec < 0) { 698 if (bootverbose) { 699 printf("vmx_init: unable to allocate " 700 "posted interrupt vector\n"); 701 } 702 } else { 703 posted_interrupts = 1; 704 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 705 &posted_interrupts); 706 } 707 } 708 } 709 710 if (posted_interrupts) 711 pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 712 713 /* Initialize EPT */ 714 error = ept_init(ipinum); 715 if (error) { 716 printf("vmx_init: ept initialization failed (%d)\n", error); 717 return (error); 718 } 719 720 /* 721 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 722 */ 723 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 724 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 725 cr0_ones_mask = fixed0 & fixed1; 726 cr0_zeros_mask = ~fixed0 & ~fixed1; 727 728 /* 729 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 730 * if unrestricted guest execution is allowed. 731 */ 732 if (cap_unrestricted_guest) 733 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 734 735 /* 736 * Do not allow the guest to set CR0_NW or CR0_CD. 737 */ 738 cr0_zeros_mask |= (CR0_NW | CR0_CD); 739 740 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 741 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 742 cr4_ones_mask = fixed0 & fixed1; 743 cr4_zeros_mask = ~fixed0 & ~fixed1; 744 745 vpid_init(); 746 747 vmx_msr_init(); 748 749 /* enable VMX operation */ 750 smp_rendezvous(NULL, vmx_enable, NULL, NULL); 751 752 vmx_initialized = 1; 753 754 return (0); 755 } 756 757 static void 758 vmx_trigger_hostintr(int vector) 759 { 760 uintptr_t func; 761 struct gate_descriptor *gd; 762 763 gd = &idt[vector]; 764 765 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 766 "invalid vector %d", vector)); 767 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 768 vector)); 769 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 770 "has invalid type %d", vector, gd->gd_type)); 771 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 772 "has invalid dpl %d", vector, gd->gd_dpl)); 773 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 774 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 775 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 776 "IST %d", vector, gd->gd_ist)); 777 778 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 779 vmx_call_isr(func); 780 } 781 782 static int 783 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 784 { 785 int error, mask_ident, shadow_ident; 786 uint64_t mask_value; 787 788 if (which != 0 && which != 4) 789 panic("vmx_setup_cr_shadow: unknown cr%d", which); 790 791 if (which == 0) { 792 mask_ident = VMCS_CR0_MASK; 793 mask_value = cr0_ones_mask | cr0_zeros_mask; 794 shadow_ident = VMCS_CR0_SHADOW; 795 } else { 796 mask_ident = VMCS_CR4_MASK; 797 mask_value = cr4_ones_mask | cr4_zeros_mask; 798 shadow_ident = VMCS_CR4_SHADOW; 799 } 800 801 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 802 if (error) 803 return (error); 804 805 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 806 if (error) 807 return (error); 808 809 return (0); 810 } 811 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 812 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 813 814 static void * 815 vmx_vminit(struct vm *vm, pmap_t pmap) 816 { 817 uint16_t vpid[VM_MAXCPU]; 818 int i, error; 819 struct vmx *vmx; 820 struct vmcs *vmcs; 821 uint32_t exc_bitmap; 822 823 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 824 if ((uintptr_t)vmx & PAGE_MASK) { 825 panic("malloc of struct vmx not aligned on %d byte boundary", 826 PAGE_SIZE); 827 } 828 vmx->vm = vm; 829 830 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 831 832 /* 833 * Clean up EPTP-tagged guest physical and combined mappings 834 * 835 * VMX transitions are not required to invalidate any guest physical 836 * mappings. So, it may be possible for stale guest physical mappings 837 * to be present in the processor TLBs. 838 * 839 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 840 */ 841 ept_invalidate_mappings(vmx->eptp); 842 843 msr_bitmap_initialize(vmx->msr_bitmap); 844 845 /* 846 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 847 * The guest FSBASE and GSBASE are saved and restored during 848 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 849 * always restored from the vmcs host state area on vm-exit. 850 * 851 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 852 * how they are saved/restored so can be directly accessed by the 853 * guest. 854 * 855 * MSR_EFER is saved and restored in the guest VMCS area on a 856 * VM exit and entry respectively. It is also restored from the 857 * host VMCS area on a VM exit. 858 * 859 * The TSC MSR is exposed read-only. Writes are disallowed as that 860 * will impact the host TSC. 861 * XXX Writes would be implemented with a wrmsr trap, and 862 * then modifying the TSC offset in the VMCS. 863 */ 864 if (guest_msr_rw(vmx, MSR_GSBASE) || 865 guest_msr_rw(vmx, MSR_FSBASE) || 866 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 867 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 868 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 869 guest_msr_rw(vmx, MSR_EFER) || 870 guest_msr_ro(vmx, MSR_TSC)) 871 panic("vmx_vminit: error setting guest msr access"); 872 873 vpid_alloc(vpid, VM_MAXCPU); 874 875 if (virtual_interrupt_delivery) { 876 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 877 APIC_ACCESS_ADDRESS); 878 /* XXX this should really return an error to the caller */ 879 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 880 } 881 882 for (i = 0; i < VM_MAXCPU; i++) { 883 vmcs = &vmx->vmcs[i]; 884 vmcs->identifier = vmx_revision(); 885 error = vmclear(vmcs); 886 if (error != 0) { 887 panic("vmx_vminit: vmclear error %d on vcpu %d\n", 888 error, i); 889 } 890 891 vmx_msr_guest_init(vmx, i); 892 893 error = vmcs_init(vmcs); 894 KASSERT(error == 0, ("vmcs_init error %d", error)); 895 896 VMPTRLD(vmcs); 897 error = 0; 898 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]); 899 error += vmwrite(VMCS_EPTP, vmx->eptp); 900 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 901 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 902 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 903 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 904 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 905 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 906 error += vmwrite(VMCS_VPID, vpid[i]); 907 908 /* exception bitmap */ 909 if (vcpu_trace_exceptions(vm, i)) 910 exc_bitmap = 0xffffffff; 911 else 912 exc_bitmap = 1 << IDT_MC; 913 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap); 914 915 if (virtual_interrupt_delivery) { 916 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 917 error += vmwrite(VMCS_VIRTUAL_APIC, 918 vtophys(&vmx->apic_page[i])); 919 error += vmwrite(VMCS_EOI_EXIT0, 0); 920 error += vmwrite(VMCS_EOI_EXIT1, 0); 921 error += vmwrite(VMCS_EOI_EXIT2, 0); 922 error += vmwrite(VMCS_EOI_EXIT3, 0); 923 } 924 if (posted_interrupts) { 925 error += vmwrite(VMCS_PIR_VECTOR, pirvec); 926 error += vmwrite(VMCS_PIR_DESC, 927 vtophys(&vmx->pir_desc[i])); 928 } 929 VMCLEAR(vmcs); 930 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs")); 931 932 vmx->cap[i].set = 0; 933 vmx->cap[i].proc_ctls = procbased_ctls; 934 vmx->cap[i].proc_ctls2 = procbased_ctls2; 935 936 vmx->state[i].nextrip = ~0; 937 vmx->state[i].lastcpu = NOCPU; 938 vmx->state[i].vpid = vpid[i]; 939 940 /* 941 * Set up the CR0/4 shadows, and init the read shadow 942 * to the power-on register value from the Intel Sys Arch. 943 * CR0 - 0x60000010 944 * CR4 - 0 945 */ 946 error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 947 if (error != 0) 948 panic("vmx_setup_cr0_shadow %d", error); 949 950 error = vmx_setup_cr4_shadow(vmcs, 0); 951 if (error != 0) 952 panic("vmx_setup_cr4_shadow %d", error); 953 954 vmx->ctx[i].pmap = pmap; 955 } 956 957 return (vmx); 958 } 959 960 static int 961 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 962 { 963 int handled, func; 964 965 func = vmxctx->guest_rax; 966 967 handled = x86_emulate_cpuid(vm, vcpu, 968 (uint32_t*)(&vmxctx->guest_rax), 969 (uint32_t*)(&vmxctx->guest_rbx), 970 (uint32_t*)(&vmxctx->guest_rcx), 971 (uint32_t*)(&vmxctx->guest_rdx)); 972 return (handled); 973 } 974 975 static __inline void 976 vmx_run_trace(struct vmx *vmx, int vcpu) 977 { 978 #ifdef KTR 979 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 980 #endif 981 } 982 983 static __inline void 984 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 985 int handled) 986 { 987 #ifdef KTR 988 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 989 handled ? "handled" : "unhandled", 990 exit_reason_to_str(exit_reason), rip); 991 #endif 992 } 993 994 static __inline void 995 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 996 { 997 #ifdef KTR 998 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 999 #endif 1000 } 1001 1002 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 1003 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 1004 1005 /* 1006 * Invalidate guest mappings identified by its vpid from the TLB. 1007 */ 1008 static __inline void 1009 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running) 1010 { 1011 struct vmxstate *vmxstate; 1012 struct invvpid_desc invvpid_desc; 1013 1014 vmxstate = &vmx->state[vcpu]; 1015 if (vmxstate->vpid == 0) 1016 return; 1017 1018 if (!running) { 1019 /* 1020 * Set the 'lastcpu' to an invalid host cpu. 1021 * 1022 * This will invalidate TLB entries tagged with the vcpu's 1023 * vpid the next time it runs via vmx_set_pcpu_defaults(). 1024 */ 1025 vmxstate->lastcpu = NOCPU; 1026 return; 1027 } 1028 1029 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " 1030 "critical section", __func__, vcpu)); 1031 1032 /* 1033 * Invalidate all mappings tagged with 'vpid' 1034 * 1035 * We do this because this vcpu was executing on a different host 1036 * cpu when it last ran. We do not track whether it invalidated 1037 * mappings associated with its 'vpid' during that run. So we must 1038 * assume that the mappings associated with 'vpid' on 'curcpu' are 1039 * stale and invalidate them. 1040 * 1041 * Note that we incur this penalty only when the scheduler chooses to 1042 * move the thread associated with this vcpu between host cpus. 1043 * 1044 * Note also that this will invalidate mappings tagged with 'vpid' 1045 * for "all" EP4TAs. 1046 */ 1047 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 1048 invvpid_desc._res1 = 0; 1049 invvpid_desc._res2 = 0; 1050 invvpid_desc.vpid = vmxstate->vpid; 1051 invvpid_desc.linear_addr = 0; 1052 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1053 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 1054 } else { 1055 /* 1056 * The invvpid can be skipped if an invept is going to 1057 * be performed before entering the guest. The invept 1058 * will invalidate combined mappings tagged with 1059 * 'vmx->eptp' for all vpids. 1060 */ 1061 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1062 } 1063 } 1064 1065 static void 1066 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 1067 { 1068 struct vmxstate *vmxstate; 1069 1070 vmxstate = &vmx->state[vcpu]; 1071 if (vmxstate->lastcpu == curcpu) 1072 return; 1073 1074 vmxstate->lastcpu = curcpu; 1075 1076 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1077 1078 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1079 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1080 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1081 vmx_invvpid(vmx, vcpu, pmap, 1); 1082 } 1083 1084 /* 1085 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1086 */ 1087 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1088 1089 static void __inline 1090 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1091 { 1092 1093 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1094 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1095 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1096 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1097 } 1098 } 1099 1100 static void __inline 1101 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1102 { 1103 1104 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1105 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls)); 1106 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1107 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1108 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1109 } 1110 1111 static void __inline 1112 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1113 { 1114 1115 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 1116 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1117 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1118 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1119 } 1120 } 1121 1122 static void __inline 1123 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1124 { 1125 1126 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 1127 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls)); 1128 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1129 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1130 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1131 } 1132 1133 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1134 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1135 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1136 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1137 1138 static void 1139 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1140 { 1141 uint32_t gi, info; 1142 1143 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1144 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 1145 "interruptibility-state %#x", gi)); 1146 1147 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1148 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 1149 "VM-entry interruption information %#x", info)); 1150 1151 /* 1152 * Inject the virtual NMI. The vector must be the NMI IDT entry 1153 * or the VMCS entry check will fail. 1154 */ 1155 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1156 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1157 1158 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1159 1160 /* Clear the request */ 1161 vm_nmi_clear(vmx->vm, vcpu); 1162 } 1163 1164 static void 1165 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic, 1166 uint64_t guestrip) 1167 { 1168 int vector, need_nmi_exiting, extint_pending; 1169 uint64_t rflags, entryinfo; 1170 uint32_t gi, info; 1171 1172 if (vmx->state[vcpu].nextrip != guestrip) { 1173 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1174 if (gi & HWINTR_BLOCKING) { 1175 VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking " 1176 "cleared due to rip change: %#lx/%#lx", 1177 vmx->state[vcpu].nextrip, guestrip); 1178 gi &= ~HWINTR_BLOCKING; 1179 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1180 } 1181 } 1182 1183 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1184 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry " 1185 "intinfo is not valid: %#lx", __func__, entryinfo)); 1186 1187 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1188 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " 1189 "pending exception: %#lx/%#x", __func__, entryinfo, info)); 1190 1191 info = entryinfo; 1192 vector = info & 0xff; 1193 if (vector == IDT_BP || vector == IDT_OF) { 1194 /* 1195 * VT-x requires #BP and #OF to be injected as software 1196 * exceptions. 1197 */ 1198 info &= ~VMCS_INTR_T_MASK; 1199 info |= VMCS_INTR_T_SWEXCEPTION; 1200 } 1201 1202 if (info & VMCS_INTR_DEL_ERRCODE) 1203 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); 1204 1205 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1206 } 1207 1208 if (vm_nmi_pending(vmx->vm, vcpu)) { 1209 /* 1210 * If there are no conditions blocking NMI injection then 1211 * inject it directly here otherwise enable "NMI window 1212 * exiting" to inject it as soon as we can. 1213 * 1214 * We also check for STI_BLOCKING because some implementations 1215 * don't allow NMI injection in this case. If we are running 1216 * on a processor that doesn't have this restriction it will 1217 * immediately exit and the NMI will be injected in the 1218 * "NMI window exiting" handler. 1219 */ 1220 need_nmi_exiting = 1; 1221 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1222 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1223 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1224 if ((info & VMCS_INTR_VALID) == 0) { 1225 vmx_inject_nmi(vmx, vcpu); 1226 need_nmi_exiting = 0; 1227 } else { 1228 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI " 1229 "due to VM-entry intr info %#x", info); 1230 } 1231 } else { 1232 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to " 1233 "Guest Interruptibility-state %#x", gi); 1234 } 1235 1236 if (need_nmi_exiting) 1237 vmx_set_nmi_window_exiting(vmx, vcpu); 1238 } 1239 1240 extint_pending = vm_extint_pending(vmx->vm, vcpu); 1241 1242 if (!extint_pending && virtual_interrupt_delivery) { 1243 vmx_inject_pir(vlapic); 1244 return; 1245 } 1246 1247 /* 1248 * If interrupt-window exiting is already in effect then don't bother 1249 * checking for pending interrupts. This is just an optimization and 1250 * not needed for correctness. 1251 */ 1252 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { 1253 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to " 1254 "pending int_window_exiting"); 1255 return; 1256 } 1257 1258 if (!extint_pending) { 1259 /* Ask the local apic for a vector to inject */ 1260 if (!vlapic_pending_intr(vlapic, &vector)) 1261 return; 1262 1263 /* 1264 * From the Intel SDM, Volume 3, Section "Maskable 1265 * Hardware Interrupts": 1266 * - maskable interrupt vectors [16,255] can be delivered 1267 * through the local APIC. 1268 */ 1269 KASSERT(vector >= 16 && vector <= 255, 1270 ("invalid vector %d from local APIC", vector)); 1271 } else { 1272 /* Ask the legacy pic for a vector to inject */ 1273 vatpic_pending_intr(vmx->vm, &vector); 1274 1275 /* 1276 * From the Intel SDM, Volume 3, Section "Maskable 1277 * Hardware Interrupts": 1278 * - maskable interrupt vectors [0,255] can be delivered 1279 * through the INTR pin. 1280 */ 1281 KASSERT(vector >= 0 && vector <= 255, 1282 ("invalid vector %d from INTR", vector)); 1283 } 1284 1285 /* Check RFLAGS.IF and the interruptibility state of the guest */ 1286 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1287 if ((rflags & PSL_I) == 0) { 1288 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1289 "rflags %#lx", vector, rflags); 1290 goto cantinject; 1291 } 1292 1293 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1294 if (gi & HWINTR_BLOCKING) { 1295 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1296 "Guest Interruptibility-state %#x", vector, gi); 1297 goto cantinject; 1298 } 1299 1300 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1301 if (info & VMCS_INTR_VALID) { 1302 /* 1303 * This is expected and could happen for multiple reasons: 1304 * - A vectoring VM-entry was aborted due to astpending 1305 * - A VM-exit happened during event injection. 1306 * - An exception was injected above. 1307 * - An NMI was injected above or after "NMI window exiting" 1308 */ 1309 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1310 "VM-entry intr info %#x", vector, info); 1311 goto cantinject; 1312 } 1313 1314 /* Inject the interrupt */ 1315 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1316 info |= vector; 1317 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1318 1319 if (!extint_pending) { 1320 /* Update the Local APIC ISR */ 1321 vlapic_intr_accepted(vlapic, vector); 1322 } else { 1323 vm_extint_clear(vmx->vm, vcpu); 1324 vatpic_intr_accepted(vmx->vm, vector); 1325 1326 /* 1327 * After we accepted the current ExtINT the PIC may 1328 * have posted another one. If that is the case, set 1329 * the Interrupt Window Exiting execution control so 1330 * we can inject that one too. 1331 * 1332 * Also, interrupt window exiting allows us to inject any 1333 * pending APIC vector that was preempted by the ExtINT 1334 * as soon as possible. This applies both for the software 1335 * emulated vlapic and the hardware assisted virtual APIC. 1336 */ 1337 vmx_set_int_window_exiting(vmx, vcpu); 1338 } 1339 1340 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1341 1342 return; 1343 1344 cantinject: 1345 /* 1346 * Set the Interrupt Window Exiting execution control so we can inject 1347 * the interrupt as soon as blocking condition goes away. 1348 */ 1349 vmx_set_int_window_exiting(vmx, vcpu); 1350 } 1351 1352 /* 1353 * If the Virtual NMIs execution control is '1' then the logical processor 1354 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1355 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1356 * virtual-NMI blocking. 1357 * 1358 * This unblocking occurs even if the IRET causes a fault. In this case the 1359 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1360 */ 1361 static void 1362 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1363 { 1364 uint32_t gi; 1365 1366 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1367 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1368 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1369 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1370 } 1371 1372 static void 1373 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1374 { 1375 uint32_t gi; 1376 1377 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1378 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1379 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1380 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1381 } 1382 1383 static void 1384 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1385 { 1386 uint32_t gi; 1387 1388 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1389 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1390 ("NMI blocking is not in effect %#x", gi)); 1391 } 1392 1393 static int 1394 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1395 { 1396 struct vmxctx *vmxctx; 1397 uint64_t xcrval; 1398 const struct xsave_limits *limits; 1399 1400 vmxctx = &vmx->ctx[vcpu]; 1401 limits = vmm_get_xsave_limits(); 1402 1403 /* 1404 * Note that the processor raises a GP# fault on its own if 1405 * xsetbv is executed for CPL != 0, so we do not have to 1406 * emulate that fault here. 1407 */ 1408 1409 /* Only xcr0 is supported. */ 1410 if (vmxctx->guest_rcx != 0) { 1411 vm_inject_gp(vmx->vm, vcpu); 1412 return (HANDLED); 1413 } 1414 1415 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1416 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1417 vm_inject_ud(vmx->vm, vcpu); 1418 return (HANDLED); 1419 } 1420 1421 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1422 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1423 vm_inject_gp(vmx->vm, vcpu); 1424 return (HANDLED); 1425 } 1426 1427 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1428 vm_inject_gp(vmx->vm, vcpu); 1429 return (HANDLED); 1430 } 1431 1432 /* AVX (YMM_Hi128) requires SSE. */ 1433 if (xcrval & XFEATURE_ENABLED_AVX && 1434 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1435 vm_inject_gp(vmx->vm, vcpu); 1436 return (HANDLED); 1437 } 1438 1439 /* 1440 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1441 * ZMM_Hi256, and Hi16_ZMM. 1442 */ 1443 if (xcrval & XFEATURE_AVX512 && 1444 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1445 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1446 vm_inject_gp(vmx->vm, vcpu); 1447 return (HANDLED); 1448 } 1449 1450 /* 1451 * Intel MPX requires both bound register state flags to be 1452 * set. 1453 */ 1454 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1455 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1456 vm_inject_gp(vmx->vm, vcpu); 1457 return (HANDLED); 1458 } 1459 1460 /* 1461 * This runs "inside" vmrun() with the guest's FPU state, so 1462 * modifying xcr0 directly modifies the guest's xcr0, not the 1463 * host's. 1464 */ 1465 load_xcr(0, xcrval); 1466 return (HANDLED); 1467 } 1468 1469 static uint64_t 1470 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1471 { 1472 const struct vmxctx *vmxctx; 1473 1474 vmxctx = &vmx->ctx[vcpu]; 1475 1476 switch (ident) { 1477 case 0: 1478 return (vmxctx->guest_rax); 1479 case 1: 1480 return (vmxctx->guest_rcx); 1481 case 2: 1482 return (vmxctx->guest_rdx); 1483 case 3: 1484 return (vmxctx->guest_rbx); 1485 case 4: 1486 return (vmcs_read(VMCS_GUEST_RSP)); 1487 case 5: 1488 return (vmxctx->guest_rbp); 1489 case 6: 1490 return (vmxctx->guest_rsi); 1491 case 7: 1492 return (vmxctx->guest_rdi); 1493 case 8: 1494 return (vmxctx->guest_r8); 1495 case 9: 1496 return (vmxctx->guest_r9); 1497 case 10: 1498 return (vmxctx->guest_r10); 1499 case 11: 1500 return (vmxctx->guest_r11); 1501 case 12: 1502 return (vmxctx->guest_r12); 1503 case 13: 1504 return (vmxctx->guest_r13); 1505 case 14: 1506 return (vmxctx->guest_r14); 1507 case 15: 1508 return (vmxctx->guest_r15); 1509 default: 1510 panic("invalid vmx register %d", ident); 1511 } 1512 } 1513 1514 static void 1515 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1516 { 1517 struct vmxctx *vmxctx; 1518 1519 vmxctx = &vmx->ctx[vcpu]; 1520 1521 switch (ident) { 1522 case 0: 1523 vmxctx->guest_rax = regval; 1524 break; 1525 case 1: 1526 vmxctx->guest_rcx = regval; 1527 break; 1528 case 2: 1529 vmxctx->guest_rdx = regval; 1530 break; 1531 case 3: 1532 vmxctx->guest_rbx = regval; 1533 break; 1534 case 4: 1535 vmcs_write(VMCS_GUEST_RSP, regval); 1536 break; 1537 case 5: 1538 vmxctx->guest_rbp = regval; 1539 break; 1540 case 6: 1541 vmxctx->guest_rsi = regval; 1542 break; 1543 case 7: 1544 vmxctx->guest_rdi = regval; 1545 break; 1546 case 8: 1547 vmxctx->guest_r8 = regval; 1548 break; 1549 case 9: 1550 vmxctx->guest_r9 = regval; 1551 break; 1552 case 10: 1553 vmxctx->guest_r10 = regval; 1554 break; 1555 case 11: 1556 vmxctx->guest_r11 = regval; 1557 break; 1558 case 12: 1559 vmxctx->guest_r12 = regval; 1560 break; 1561 case 13: 1562 vmxctx->guest_r13 = regval; 1563 break; 1564 case 14: 1565 vmxctx->guest_r14 = regval; 1566 break; 1567 case 15: 1568 vmxctx->guest_r15 = regval; 1569 break; 1570 default: 1571 panic("invalid vmx register %d", ident); 1572 } 1573 } 1574 1575 static int 1576 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1577 { 1578 uint64_t crval, regval; 1579 1580 /* We only handle mov to %cr0 at this time */ 1581 if ((exitqual & 0xf0) != 0x00) 1582 return (UNHANDLED); 1583 1584 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1585 1586 vmcs_write(VMCS_CR0_SHADOW, regval); 1587 1588 crval = regval | cr0_ones_mask; 1589 crval &= ~cr0_zeros_mask; 1590 vmcs_write(VMCS_GUEST_CR0, crval); 1591 1592 if (regval & CR0_PG) { 1593 uint64_t efer, entry_ctls; 1594 1595 /* 1596 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1597 * the "IA-32e mode guest" bit in VM-entry control must be 1598 * equal. 1599 */ 1600 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1601 if (efer & EFER_LME) { 1602 efer |= EFER_LMA; 1603 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1604 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1605 entry_ctls |= VM_ENTRY_GUEST_LMA; 1606 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1607 } 1608 } 1609 1610 return (HANDLED); 1611 } 1612 1613 static int 1614 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1615 { 1616 uint64_t crval, regval; 1617 1618 /* We only handle mov to %cr4 at this time */ 1619 if ((exitqual & 0xf0) != 0x00) 1620 return (UNHANDLED); 1621 1622 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1623 1624 vmcs_write(VMCS_CR4_SHADOW, regval); 1625 1626 crval = regval | cr4_ones_mask; 1627 crval &= ~cr4_zeros_mask; 1628 vmcs_write(VMCS_GUEST_CR4, crval); 1629 1630 return (HANDLED); 1631 } 1632 1633 static int 1634 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1635 { 1636 struct vlapic *vlapic; 1637 uint64_t cr8; 1638 int regnum; 1639 1640 /* We only handle mov %cr8 to/from a register at this time. */ 1641 if ((exitqual & 0xe0) != 0x00) { 1642 return (UNHANDLED); 1643 } 1644 1645 vlapic = vm_lapic(vmx->vm, vcpu); 1646 regnum = (exitqual >> 8) & 0xf; 1647 if (exitqual & 0x10) { 1648 cr8 = vlapic_get_cr8(vlapic); 1649 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1650 } else { 1651 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1652 vlapic_set_cr8(vlapic, cr8); 1653 } 1654 1655 return (HANDLED); 1656 } 1657 1658 /* 1659 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1660 */ 1661 static int 1662 vmx_cpl(void) 1663 { 1664 uint32_t ssar; 1665 1666 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1667 return ((ssar >> 5) & 0x3); 1668 } 1669 1670 static enum vm_cpu_mode 1671 vmx_cpu_mode(void) 1672 { 1673 uint32_t csar; 1674 1675 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1676 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1677 if (csar & 0x2000) 1678 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1679 else 1680 return (CPU_MODE_COMPATIBILITY); 1681 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1682 return (CPU_MODE_PROTECTED); 1683 } else { 1684 return (CPU_MODE_REAL); 1685 } 1686 } 1687 1688 static enum vm_paging_mode 1689 vmx_paging_mode(void) 1690 { 1691 1692 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1693 return (PAGING_MODE_FLAT); 1694 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1695 return (PAGING_MODE_32); 1696 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1697 return (PAGING_MODE_64); 1698 else 1699 return (PAGING_MODE_PAE); 1700 } 1701 1702 static uint64_t 1703 inout_str_index(struct vmx *vmx, int vcpuid, int in) 1704 { 1705 uint64_t val; 1706 int error; 1707 enum vm_reg_name reg; 1708 1709 reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI; 1710 error = vmx_getreg(vmx, vcpuid, reg, &val); 1711 KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error)); 1712 return (val); 1713 } 1714 1715 static uint64_t 1716 inout_str_count(struct vmx *vmx, int vcpuid, int rep) 1717 { 1718 uint64_t val; 1719 int error; 1720 1721 if (rep) { 1722 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val); 1723 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error)); 1724 } else { 1725 val = 1; 1726 } 1727 return (val); 1728 } 1729 1730 static int 1731 inout_str_addrsize(uint32_t inst_info) 1732 { 1733 uint32_t size; 1734 1735 size = (inst_info >> 7) & 0x7; 1736 switch (size) { 1737 case 0: 1738 return (2); /* 16 bit */ 1739 case 1: 1740 return (4); /* 32 bit */ 1741 case 2: 1742 return (8); /* 64 bit */ 1743 default: 1744 panic("%s: invalid size encoding %d", __func__, size); 1745 } 1746 } 1747 1748 static void 1749 inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in, 1750 struct vm_inout_str *vis) 1751 { 1752 int error, s; 1753 1754 if (in) { 1755 vis->seg_name = VM_REG_GUEST_ES; 1756 } else { 1757 s = (inst_info >> 15) & 0x7; 1758 vis->seg_name = vm_segment_name(s); 1759 } 1760 1761 error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc); 1762 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error)); 1763 } 1764 1765 static void 1766 vmx_paging_info(struct vm_guest_paging *paging) 1767 { 1768 paging->cr3 = vmcs_guest_cr3(); 1769 paging->cpl = vmx_cpl(); 1770 paging->cpu_mode = vmx_cpu_mode(); 1771 paging->paging_mode = vmx_paging_mode(); 1772 } 1773 1774 static void 1775 vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla) 1776 { 1777 struct vm_guest_paging *paging; 1778 uint32_t csar; 1779 1780 paging = &vmexit->u.inst_emul.paging; 1781 1782 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1783 vmexit->inst_length = 0; 1784 vmexit->u.inst_emul.gpa = gpa; 1785 vmexit->u.inst_emul.gla = gla; 1786 vmx_paging_info(paging); 1787 switch (paging->cpu_mode) { 1788 case CPU_MODE_REAL: 1789 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1790 vmexit->u.inst_emul.cs_d = 0; 1791 break; 1792 case CPU_MODE_PROTECTED: 1793 case CPU_MODE_COMPATIBILITY: 1794 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1795 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1796 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar); 1797 break; 1798 default: 1799 vmexit->u.inst_emul.cs_base = 0; 1800 vmexit->u.inst_emul.cs_d = 0; 1801 break; 1802 } 1803 vie_init(&vmexit->u.inst_emul.vie, NULL, 0); 1804 } 1805 1806 static int 1807 ept_fault_type(uint64_t ept_qual) 1808 { 1809 int fault_type; 1810 1811 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1812 fault_type = VM_PROT_WRITE; 1813 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1814 fault_type = VM_PROT_EXECUTE; 1815 else 1816 fault_type= VM_PROT_READ; 1817 1818 return (fault_type); 1819 } 1820 1821 static boolean_t 1822 ept_emulation_fault(uint64_t ept_qual) 1823 { 1824 int read, write; 1825 1826 /* EPT fault on an instruction fetch doesn't make sense here */ 1827 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1828 return (FALSE); 1829 1830 /* EPT fault must be a read fault or a write fault */ 1831 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1832 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1833 if ((read | write) == 0) 1834 return (FALSE); 1835 1836 /* 1837 * The EPT violation must have been caused by accessing a 1838 * guest-physical address that is a translation of a guest-linear 1839 * address. 1840 */ 1841 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1842 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1843 return (FALSE); 1844 } 1845 1846 return (TRUE); 1847 } 1848 1849 static __inline int 1850 apic_access_virtualization(struct vmx *vmx, int vcpuid) 1851 { 1852 uint32_t proc_ctls2; 1853 1854 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1855 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1856 } 1857 1858 static __inline int 1859 x2apic_virtualization(struct vmx *vmx, int vcpuid) 1860 { 1861 uint32_t proc_ctls2; 1862 1863 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1864 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1865 } 1866 1867 static int 1868 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1869 uint64_t qual) 1870 { 1871 int error, handled, offset; 1872 uint32_t *apic_regs, vector; 1873 bool retu; 1874 1875 handled = HANDLED; 1876 offset = APIC_WRITE_OFFSET(qual); 1877 1878 if (!apic_access_virtualization(vmx, vcpuid)) { 1879 /* 1880 * In general there should not be any APIC write VM-exits 1881 * unless APIC-access virtualization is enabled. 1882 * 1883 * However self-IPI virtualization can legitimately trigger 1884 * an APIC-write VM-exit so treat it specially. 1885 */ 1886 if (x2apic_virtualization(vmx, vcpuid) && 1887 offset == APIC_OFFSET_SELF_IPI) { 1888 apic_regs = (uint32_t *)(vlapic->apic_page); 1889 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1890 vlapic_self_ipi_handler(vlapic, vector); 1891 return (HANDLED); 1892 } else 1893 return (UNHANDLED); 1894 } 1895 1896 switch (offset) { 1897 case APIC_OFFSET_ID: 1898 vlapic_id_write_handler(vlapic); 1899 break; 1900 case APIC_OFFSET_LDR: 1901 vlapic_ldr_write_handler(vlapic); 1902 break; 1903 case APIC_OFFSET_DFR: 1904 vlapic_dfr_write_handler(vlapic); 1905 break; 1906 case APIC_OFFSET_SVR: 1907 vlapic_svr_write_handler(vlapic); 1908 break; 1909 case APIC_OFFSET_ESR: 1910 vlapic_esr_write_handler(vlapic); 1911 break; 1912 case APIC_OFFSET_ICR_LOW: 1913 retu = false; 1914 error = vlapic_icrlo_write_handler(vlapic, &retu); 1915 if (error != 0 || retu) 1916 handled = UNHANDLED; 1917 break; 1918 case APIC_OFFSET_CMCI_LVT: 1919 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1920 vlapic_lvt_write_handler(vlapic, offset); 1921 break; 1922 case APIC_OFFSET_TIMER_ICR: 1923 vlapic_icrtmr_write_handler(vlapic); 1924 break; 1925 case APIC_OFFSET_TIMER_DCR: 1926 vlapic_dcr_write_handler(vlapic); 1927 break; 1928 default: 1929 handled = UNHANDLED; 1930 break; 1931 } 1932 return (handled); 1933 } 1934 1935 static bool 1936 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 1937 { 1938 1939 if (apic_access_virtualization(vmx, vcpuid) && 1940 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1941 return (true); 1942 else 1943 return (false); 1944 } 1945 1946 static int 1947 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1948 { 1949 uint64_t qual; 1950 int access_type, offset, allowed; 1951 1952 if (!apic_access_virtualization(vmx, vcpuid)) 1953 return (UNHANDLED); 1954 1955 qual = vmexit->u.vmx.exit_qualification; 1956 access_type = APIC_ACCESS_TYPE(qual); 1957 offset = APIC_ACCESS_OFFSET(qual); 1958 1959 allowed = 0; 1960 if (access_type == 0) { 1961 /* 1962 * Read data access to the following registers is expected. 1963 */ 1964 switch (offset) { 1965 case APIC_OFFSET_APR: 1966 case APIC_OFFSET_PPR: 1967 case APIC_OFFSET_RRR: 1968 case APIC_OFFSET_CMCI_LVT: 1969 case APIC_OFFSET_TIMER_CCR: 1970 allowed = 1; 1971 break; 1972 default: 1973 break; 1974 } 1975 } else if (access_type == 1) { 1976 /* 1977 * Write data access to the following registers is expected. 1978 */ 1979 switch (offset) { 1980 case APIC_OFFSET_VER: 1981 case APIC_OFFSET_APR: 1982 case APIC_OFFSET_PPR: 1983 case APIC_OFFSET_RRR: 1984 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1985 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1986 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1987 case APIC_OFFSET_CMCI_LVT: 1988 case APIC_OFFSET_TIMER_CCR: 1989 allowed = 1; 1990 break; 1991 default: 1992 break; 1993 } 1994 } 1995 1996 if (allowed) { 1997 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset, 1998 VIE_INVALID_GLA); 1999 } 2000 2001 /* 2002 * Regardless of whether the APIC-access is allowed this handler 2003 * always returns UNHANDLED: 2004 * - if the access is allowed then it is handled by emulating the 2005 * instruction that caused the VM-exit (outside the critical section) 2006 * - if the access is not allowed then it will be converted to an 2007 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2008 */ 2009 return (UNHANDLED); 2010 } 2011 2012 static enum task_switch_reason 2013 vmx_task_switch_reason(uint64_t qual) 2014 { 2015 int reason; 2016 2017 reason = (qual >> 30) & 0x3; 2018 switch (reason) { 2019 case 0: 2020 return (TSR_CALL); 2021 case 1: 2022 return (TSR_IRET); 2023 case 2: 2024 return (TSR_JMP); 2025 case 3: 2026 return (TSR_IDT_GATE); 2027 default: 2028 panic("%s: invalid reason %d", __func__, reason); 2029 } 2030 } 2031 2032 static int 2033 emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu) 2034 { 2035 int error; 2036 2037 if (lapic_msr(num)) 2038 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu); 2039 else 2040 error = vmx_wrmsr(vmx, vcpuid, num, val, retu); 2041 2042 return (error); 2043 } 2044 2045 static int 2046 emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu) 2047 { 2048 struct vmxctx *vmxctx; 2049 uint64_t result; 2050 uint32_t eax, edx; 2051 int error; 2052 2053 if (lapic_msr(num)) 2054 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu); 2055 else 2056 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu); 2057 2058 if (error == 0) { 2059 eax = result; 2060 vmxctx = &vmx->ctx[vcpuid]; 2061 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax); 2062 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error)); 2063 2064 edx = result >> 32; 2065 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx); 2066 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error)); 2067 } 2068 2069 return (error); 2070 } 2071 2072 static int 2073 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2074 { 2075 int error, errcode, errcode_valid, handled, in; 2076 struct vmxctx *vmxctx; 2077 struct vlapic *vlapic; 2078 struct vm_inout_str *vis; 2079 struct vm_task_switch *ts; 2080 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info; 2081 uint32_t intr_type, intr_vec, reason; 2082 uint64_t exitintinfo, qual, gpa; 2083 bool retu; 2084 2085 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2086 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2087 2088 handled = UNHANDLED; 2089 vmxctx = &vmx->ctx[vcpu]; 2090 2091 qual = vmexit->u.vmx.exit_qualification; 2092 reason = vmexit->u.vmx.exit_reason; 2093 vmexit->exitcode = VM_EXITCODE_BOGUS; 2094 2095 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2096 2097 /* 2098 * VM-entry failures during or after loading guest state. 2099 * 2100 * These VM-exits are uncommon but must be handled specially 2101 * as most VM-exit fields are not populated as usual. 2102 */ 2103 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { 2104 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry"); 2105 __asm __volatile("int $18"); 2106 return (1); 2107 } 2108 2109 /* 2110 * VM exits that can be triggered during event delivery need to 2111 * be handled specially by re-injecting the event if the IDT 2112 * vectoring information field's valid bit is set. 2113 * 2114 * See "Information for VM Exits During Event Delivery" in Intel SDM 2115 * for details. 2116 */ 2117 idtvec_info = vmcs_idt_vectoring_info(); 2118 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2119 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2120 exitintinfo = idtvec_info; 2121 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2122 idtvec_err = vmcs_idt_vectoring_err(); 2123 exitintinfo |= (uint64_t)idtvec_err << 32; 2124 } 2125 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo); 2126 KASSERT(error == 0, ("%s: vm_set_intinfo error %d", 2127 __func__, error)); 2128 2129 /* 2130 * If 'virtual NMIs' are being used and the VM-exit 2131 * happened while injecting an NMI during the previous 2132 * VM-entry, then clear "blocking by NMI" in the 2133 * Guest Interruptibility-State so the NMI can be 2134 * reinjected on the subsequent VM-entry. 2135 * 2136 * However, if the NMI was being delivered through a task 2137 * gate, then the new task must start execution with NMIs 2138 * blocked so don't clear NMI blocking in this case. 2139 */ 2140 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2141 if (intr_type == VMCS_INTR_T_NMI) { 2142 if (reason != EXIT_REASON_TASK_SWITCH) 2143 vmx_clear_nmi_blocking(vmx, vcpu); 2144 else 2145 vmx_assert_nmi_blocking(vmx, vcpu); 2146 } 2147 2148 /* 2149 * Update VM-entry instruction length if the event being 2150 * delivered was a software interrupt or software exception. 2151 */ 2152 if (intr_type == VMCS_INTR_T_SWINTR || 2153 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2154 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2155 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2156 } 2157 } 2158 2159 switch (reason) { 2160 case EXIT_REASON_TASK_SWITCH: 2161 ts = &vmexit->u.task_switch; 2162 ts->tsssel = qual & 0xffff; 2163 ts->reason = vmx_task_switch_reason(qual); 2164 ts->ext = 0; 2165 ts->errcode_valid = 0; 2166 vmx_paging_info(&ts->paging); 2167 /* 2168 * If the task switch was due to a CALL, JMP, IRET, software 2169 * interrupt (INT n) or software exception (INT3, INTO), 2170 * then the saved %rip references the instruction that caused 2171 * the task switch. The instruction length field in the VMCS 2172 * is valid in this case. 2173 * 2174 * In all other cases (e.g., NMI, hardware exception) the 2175 * saved %rip is one that would have been saved in the old TSS 2176 * had the task switch completed normally so the instruction 2177 * length field is not needed in this case and is explicitly 2178 * set to 0. 2179 */ 2180 if (ts->reason == TSR_IDT_GATE) { 2181 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2182 ("invalid idtvec_info %#x for IDT task switch", 2183 idtvec_info)); 2184 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2185 if (intr_type != VMCS_INTR_T_SWINTR && 2186 intr_type != VMCS_INTR_T_SWEXCEPTION && 2187 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2188 /* Task switch triggered by external event */ 2189 ts->ext = 1; 2190 vmexit->inst_length = 0; 2191 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2192 ts->errcode_valid = 1; 2193 ts->errcode = vmcs_idt_vectoring_err(); 2194 } 2195 } 2196 } 2197 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2198 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, " 2199 "%s errcode 0x%016lx", ts->reason, ts->tsssel, 2200 ts->ext ? "external" : "internal", 2201 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); 2202 break; 2203 case EXIT_REASON_CR_ACCESS: 2204 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2205 switch (qual & 0xf) { 2206 case 0: 2207 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2208 break; 2209 case 4: 2210 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2211 break; 2212 case 8: 2213 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2214 break; 2215 } 2216 break; 2217 case EXIT_REASON_RDMSR: 2218 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 2219 retu = false; 2220 ecx = vmxctx->guest_rcx; 2221 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); 2222 error = emulate_rdmsr(vmx, vcpu, ecx, &retu); 2223 if (error) { 2224 vmexit->exitcode = VM_EXITCODE_RDMSR; 2225 vmexit->u.msr.code = ecx; 2226 } else if (!retu) { 2227 handled = HANDLED; 2228 } else { 2229 /* Return to userspace with a valid exitcode */ 2230 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2231 ("emulate_rdmsr retu with bogus exitcode")); 2232 } 2233 break; 2234 case EXIT_REASON_WRMSR: 2235 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 2236 retu = false; 2237 eax = vmxctx->guest_rax; 2238 ecx = vmxctx->guest_rcx; 2239 edx = vmxctx->guest_rdx; 2240 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", 2241 ecx, (uint64_t)edx << 32 | eax); 2242 error = emulate_wrmsr(vmx, vcpu, ecx, 2243 (uint64_t)edx << 32 | eax, &retu); 2244 if (error) { 2245 vmexit->exitcode = VM_EXITCODE_WRMSR; 2246 vmexit->u.msr.code = ecx; 2247 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 2248 } else if (!retu) { 2249 handled = HANDLED; 2250 } else { 2251 /* Return to userspace with a valid exitcode */ 2252 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2253 ("emulate_wrmsr retu with bogus exitcode")); 2254 } 2255 break; 2256 case EXIT_REASON_HLT: 2257 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2258 vmexit->exitcode = VM_EXITCODE_HLT; 2259 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2260 break; 2261 case EXIT_REASON_MTF: 2262 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2263 vmexit->exitcode = VM_EXITCODE_MTRAP; 2264 vmexit->inst_length = 0; 2265 break; 2266 case EXIT_REASON_PAUSE: 2267 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2268 vmexit->exitcode = VM_EXITCODE_PAUSE; 2269 break; 2270 case EXIT_REASON_INTR_WINDOW: 2271 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2272 vmx_clear_int_window_exiting(vmx, vcpu); 2273 return (1); 2274 case EXIT_REASON_EXT_INTR: 2275 /* 2276 * External interrupts serve only to cause VM exits and allow 2277 * the host interrupt handler to run. 2278 * 2279 * If this external interrupt triggers a virtual interrupt 2280 * to a VM, then that state will be recorded by the 2281 * host interrupt handler in the VM's softc. We will inject 2282 * this virtual interrupt during the subsequent VM enter. 2283 */ 2284 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2285 2286 /* 2287 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2288 * This appears to be a bug in VMware Fusion? 2289 */ 2290 if (!(intr_info & VMCS_INTR_VALID)) 2291 return (1); 2292 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2293 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2294 ("VM exit interruption info invalid: %#x", intr_info)); 2295 vmx_trigger_hostintr(intr_info & 0xff); 2296 2297 /* 2298 * This is special. We want to treat this as an 'handled' 2299 * VM-exit but not increment the instruction pointer. 2300 */ 2301 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2302 return (1); 2303 case EXIT_REASON_NMI_WINDOW: 2304 /* Exit to allow the pending virtual NMI to be injected */ 2305 if (vm_nmi_pending(vmx->vm, vcpu)) 2306 vmx_inject_nmi(vmx, vcpu); 2307 vmx_clear_nmi_window_exiting(vmx, vcpu); 2308 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2309 return (1); 2310 case EXIT_REASON_INOUT: 2311 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2312 vmexit->exitcode = VM_EXITCODE_INOUT; 2313 vmexit->u.inout.bytes = (qual & 0x7) + 1; 2314 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0; 2315 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 2316 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 2317 vmexit->u.inout.port = (uint16_t)(qual >> 16); 2318 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 2319 if (vmexit->u.inout.string) { 2320 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 2321 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 2322 vis = &vmexit->u.inout_str; 2323 vmx_paging_info(&vis->paging); 2324 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2325 vis->cr0 = vmcs_read(VMCS_GUEST_CR0); 2326 vis->index = inout_str_index(vmx, vcpu, in); 2327 vis->count = inout_str_count(vmx, vcpu, vis->inout.rep); 2328 vis->addrsize = inout_str_addrsize(inst_info); 2329 inout_str_seginfo(vmx, vcpu, inst_info, in, vis); 2330 } 2331 break; 2332 case EXIT_REASON_CPUID: 2333 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2334 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2335 break; 2336 case EXIT_REASON_EXCEPTION: 2337 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2338 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2339 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2340 ("VM exit interruption info invalid: %#x", intr_info)); 2341 2342 intr_vec = intr_info & 0xff; 2343 intr_type = intr_info & VMCS_INTR_T_MASK; 2344 2345 /* 2346 * If Virtual NMIs control is 1 and the VM-exit is due to a 2347 * fault encountered during the execution of IRET then we must 2348 * restore the state of "virtual-NMI blocking" before resuming 2349 * the guest. 2350 * 2351 * See "Resuming Guest Software after Handling an Exception". 2352 * See "Information for VM Exits Due to Vectored Events". 2353 */ 2354 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2355 (intr_vec != IDT_DF) && 2356 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2357 vmx_restore_nmi_blocking(vmx, vcpu); 2358 2359 /* 2360 * The NMI has already been handled in vmx_exit_handle_nmi(). 2361 */ 2362 if (intr_type == VMCS_INTR_T_NMI) 2363 return (1); 2364 2365 /* 2366 * Call the machine check handler by hand. Also don't reflect 2367 * the machine check back into the guest. 2368 */ 2369 if (intr_vec == IDT_MC) { 2370 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler"); 2371 __asm __volatile("int $18"); 2372 return (1); 2373 } 2374 2375 if (intr_vec == IDT_PF) { 2376 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual); 2377 KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d", 2378 __func__, error)); 2379 } 2380 2381 /* 2382 * Software exceptions exhibit trap-like behavior. This in 2383 * turn requires populating the VM-entry instruction length 2384 * so that the %rip in the trap frame is past the INT3/INTO 2385 * instruction. 2386 */ 2387 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2388 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2389 2390 /* Reflect all other exceptions back into the guest */ 2391 errcode_valid = errcode = 0; 2392 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2393 errcode_valid = 1; 2394 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2395 } 2396 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into " 2397 "the guest", intr_vec, errcode); 2398 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2399 errcode_valid, errcode, 0); 2400 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2401 __func__, error)); 2402 return (1); 2403 2404 case EXIT_REASON_EPT_FAULT: 2405 /* 2406 * If 'gpa' lies within the address space allocated to 2407 * memory then this must be a nested page fault otherwise 2408 * this must be an instruction that accesses MMIO space. 2409 */ 2410 gpa = vmcs_gpa(); 2411 if (vm_mem_allocated(vmx->vm, gpa) || 2412 apic_access_fault(vmx, vcpu, gpa)) { 2413 vmexit->exitcode = VM_EXITCODE_PAGING; 2414 vmexit->inst_length = 0; 2415 vmexit->u.paging.gpa = gpa; 2416 vmexit->u.paging.fault_type = ept_fault_type(qual); 2417 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2418 } else if (ept_emulation_fault(qual)) { 2419 vmexit_inst_emul(vmexit, gpa, vmcs_gla()); 2420 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1); 2421 } 2422 /* 2423 * If Virtual NMIs control is 1 and the VM-exit is due to an 2424 * EPT fault during the execution of IRET then we must restore 2425 * the state of "virtual-NMI blocking" before resuming. 2426 * 2427 * See description of "NMI unblocking due to IRET" in 2428 * "Exit Qualification for EPT Violations". 2429 */ 2430 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2431 (qual & EXIT_QUAL_NMIUDTI) != 0) 2432 vmx_restore_nmi_blocking(vmx, vcpu); 2433 break; 2434 case EXIT_REASON_VIRTUALIZED_EOI: 2435 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2436 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2437 vmexit->inst_length = 0; /* trap-like */ 2438 break; 2439 case EXIT_REASON_APIC_ACCESS: 2440 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2441 break; 2442 case EXIT_REASON_APIC_WRITE: 2443 /* 2444 * APIC-write VM exit is trap-like so the %rip is already 2445 * pointing to the next instruction. 2446 */ 2447 vmexit->inst_length = 0; 2448 vlapic = vm_lapic(vmx->vm, vcpu); 2449 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2450 break; 2451 case EXIT_REASON_XSETBV: 2452 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2453 break; 2454 case EXIT_REASON_MONITOR: 2455 vmexit->exitcode = VM_EXITCODE_MONITOR; 2456 break; 2457 case EXIT_REASON_MWAIT: 2458 vmexit->exitcode = VM_EXITCODE_MWAIT; 2459 break; 2460 default: 2461 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2462 break; 2463 } 2464 2465 if (handled) { 2466 /* 2467 * It is possible that control is returned to userland 2468 * even though we were able to handle the VM exit in the 2469 * kernel. 2470 * 2471 * In such a case we want to make sure that the userland 2472 * restarts guest execution at the instruction *after* 2473 * the one we just processed. Therefore we update the 2474 * guest rip in the VMCS and in 'vmexit'. 2475 */ 2476 vmexit->rip += vmexit->inst_length; 2477 vmexit->inst_length = 0; 2478 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2479 } else { 2480 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2481 /* 2482 * If this VM exit was not claimed by anybody then 2483 * treat it as a generic VMX exit. 2484 */ 2485 vmexit->exitcode = VM_EXITCODE_VMX; 2486 vmexit->u.vmx.status = VM_SUCCESS; 2487 vmexit->u.vmx.inst_type = 0; 2488 vmexit->u.vmx.inst_error = 0; 2489 } else { 2490 /* 2491 * The exitcode and collateral have been populated. 2492 * The VM exit will be processed further in userland. 2493 */ 2494 } 2495 } 2496 return (handled); 2497 } 2498 2499 static __inline void 2500 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2501 { 2502 2503 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2504 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2505 vmxctx->inst_fail_status)); 2506 2507 vmexit->inst_length = 0; 2508 vmexit->exitcode = VM_EXITCODE_VMX; 2509 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2510 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2511 vmexit->u.vmx.exit_reason = ~0; 2512 vmexit->u.vmx.exit_qualification = ~0; 2513 2514 switch (rc) { 2515 case VMX_VMRESUME_ERROR: 2516 case VMX_VMLAUNCH_ERROR: 2517 case VMX_INVEPT_ERROR: 2518 vmexit->u.vmx.inst_type = rc; 2519 break; 2520 default: 2521 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2522 } 2523 } 2524 2525 /* 2526 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2527 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2528 * sufficient to simply vector to the NMI handler via a software interrupt. 2529 * However, this must be done before maskable interrupts are enabled 2530 * otherwise the "iret" issued by an interrupt handler will incorrectly 2531 * clear NMI blocking. 2532 */ 2533 static __inline void 2534 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2535 { 2536 uint32_t intr_info; 2537 2538 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 2539 2540 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 2541 return; 2542 2543 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2544 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2545 ("VM exit interruption info invalid: %#x", intr_info)); 2546 2547 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2548 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 2549 "to NMI has invalid vector: %#x", intr_info)); 2550 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); 2551 __asm __volatile("int $2"); 2552 } 2553 } 2554 2555 static int 2556 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap, 2557 void *rendezvous_cookie, void *suspend_cookie) 2558 { 2559 int rc, handled, launched; 2560 struct vmx *vmx; 2561 struct vm *vm; 2562 struct vmxctx *vmxctx; 2563 struct vmcs *vmcs; 2564 struct vm_exit *vmexit; 2565 struct vlapic *vlapic; 2566 uint32_t exit_reason; 2567 2568 vmx = arg; 2569 vm = vmx->vm; 2570 vmcs = &vmx->vmcs[vcpu]; 2571 vmxctx = &vmx->ctx[vcpu]; 2572 vlapic = vm_lapic(vm, vcpu); 2573 vmexit = vm_exitinfo(vm, vcpu); 2574 launched = 0; 2575 2576 KASSERT(vmxctx->pmap == pmap, 2577 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 2578 2579 vmx_msr_guest_enter(vmx, vcpu); 2580 2581 VMPTRLD(vmcs); 2582 2583 /* 2584 * XXX 2585 * We do this every time because we may setup the virtual machine 2586 * from a different process than the one that actually runs it. 2587 * 2588 * If the life of a virtual machine was spent entirely in the context 2589 * of a single process we could do this once in vmx_vminit(). 2590 */ 2591 vmcs_write(VMCS_HOST_CR3, rcr3()); 2592 2593 vmcs_write(VMCS_GUEST_RIP, rip); 2594 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 2595 do { 2596 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " 2597 "%#lx/%#lx", __func__, vmcs_guest_rip(), rip)); 2598 2599 handled = UNHANDLED; 2600 /* 2601 * Interrupts are disabled from this point on until the 2602 * guest starts executing. This is done for the following 2603 * reasons: 2604 * 2605 * If an AST is asserted on this thread after the check below, 2606 * then the IPI_AST notification will not be lost, because it 2607 * will cause a VM exit due to external interrupt as soon as 2608 * the guest state is loaded. 2609 * 2610 * A posted interrupt after 'vmx_inject_interrupts()' will 2611 * not be "lost" because it will be held pending in the host 2612 * APIC because interrupts are disabled. The pending interrupt 2613 * will be recognized as soon as the guest state is loaded. 2614 * 2615 * The same reasoning applies to the IPI generated by 2616 * pmap_invalidate_ept(). 2617 */ 2618 disable_intr(); 2619 vmx_inject_interrupts(vmx, vcpu, vlapic, rip); 2620 2621 /* 2622 * Check for vcpu suspension after injecting events because 2623 * vmx_inject_interrupts() can suspend the vcpu due to a 2624 * triple fault. 2625 */ 2626 if (vcpu_suspended(suspend_cookie)) { 2627 enable_intr(); 2628 vm_exit_suspended(vmx->vm, vcpu, rip); 2629 break; 2630 } 2631 2632 if (vcpu_rendezvous_pending(rendezvous_cookie)) { 2633 enable_intr(); 2634 vm_exit_rendezvous(vmx->vm, vcpu, rip); 2635 break; 2636 } 2637 2638 if (vcpu_should_yield(vm, vcpu)) { 2639 enable_intr(); 2640 vm_exit_astpending(vmx->vm, vcpu, rip); 2641 vmx_astpending_trace(vmx, vcpu, rip); 2642 handled = HANDLED; 2643 break; 2644 } 2645 2646 vmx_run_trace(vmx, vcpu); 2647 rc = vmx_enter_guest(vmxctx, vmx, launched); 2648 2649 /* Collect some information for VM exit processing */ 2650 vmexit->rip = rip = vmcs_guest_rip(); 2651 vmexit->inst_length = vmexit_instruction_length(); 2652 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 2653 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 2654 2655 /* Update 'nextrip' */ 2656 vmx->state[vcpu].nextrip = rip; 2657 2658 if (rc == VMX_GUEST_VMEXIT) { 2659 vmx_exit_handle_nmi(vmx, vcpu, vmexit); 2660 enable_intr(); 2661 handled = vmx_exit_process(vmx, vcpu, vmexit); 2662 } else { 2663 enable_intr(); 2664 vmx_exit_inst_error(vmxctx, rc, vmexit); 2665 } 2666 launched = 1; 2667 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 2668 rip = vmexit->rip; 2669 } while (handled); 2670 2671 /* 2672 * If a VM exit has been handled then the exitcode must be BOGUS 2673 * If a VM exit is not handled then the exitcode must not be BOGUS 2674 */ 2675 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 2676 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 2677 panic("Mismatch between handled (%d) and exitcode (%d)", 2678 handled, vmexit->exitcode); 2679 } 2680 2681 if (!handled) 2682 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 2683 2684 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 2685 vmexit->exitcode); 2686 2687 VMCLEAR(vmcs); 2688 vmx_msr_guest_exit(vmx, vcpu); 2689 2690 return (0); 2691 } 2692 2693 static void 2694 vmx_vmcleanup(void *arg) 2695 { 2696 int i; 2697 struct vmx *vmx = arg; 2698 2699 if (apic_access_virtualization(vmx, 0)) 2700 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2701 2702 for (i = 0; i < VM_MAXCPU; i++) 2703 vpid_free(vmx->state[i].vpid); 2704 2705 free(vmx, M_VMX); 2706 2707 return; 2708 } 2709 2710 static register_t * 2711 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2712 { 2713 2714 switch (reg) { 2715 case VM_REG_GUEST_RAX: 2716 return (&vmxctx->guest_rax); 2717 case VM_REG_GUEST_RBX: 2718 return (&vmxctx->guest_rbx); 2719 case VM_REG_GUEST_RCX: 2720 return (&vmxctx->guest_rcx); 2721 case VM_REG_GUEST_RDX: 2722 return (&vmxctx->guest_rdx); 2723 case VM_REG_GUEST_RSI: 2724 return (&vmxctx->guest_rsi); 2725 case VM_REG_GUEST_RDI: 2726 return (&vmxctx->guest_rdi); 2727 case VM_REG_GUEST_RBP: 2728 return (&vmxctx->guest_rbp); 2729 case VM_REG_GUEST_R8: 2730 return (&vmxctx->guest_r8); 2731 case VM_REG_GUEST_R9: 2732 return (&vmxctx->guest_r9); 2733 case VM_REG_GUEST_R10: 2734 return (&vmxctx->guest_r10); 2735 case VM_REG_GUEST_R11: 2736 return (&vmxctx->guest_r11); 2737 case VM_REG_GUEST_R12: 2738 return (&vmxctx->guest_r12); 2739 case VM_REG_GUEST_R13: 2740 return (&vmxctx->guest_r13); 2741 case VM_REG_GUEST_R14: 2742 return (&vmxctx->guest_r14); 2743 case VM_REG_GUEST_R15: 2744 return (&vmxctx->guest_r15); 2745 case VM_REG_GUEST_CR2: 2746 return (&vmxctx->guest_cr2); 2747 default: 2748 break; 2749 } 2750 return (NULL); 2751 } 2752 2753 static int 2754 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 2755 { 2756 register_t *regp; 2757 2758 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2759 *retval = *regp; 2760 return (0); 2761 } else 2762 return (EINVAL); 2763 } 2764 2765 static int 2766 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 2767 { 2768 register_t *regp; 2769 2770 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2771 *regp = val; 2772 return (0); 2773 } else 2774 return (EINVAL); 2775 } 2776 2777 static int 2778 vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval) 2779 { 2780 uint64_t gi; 2781 int error; 2782 2783 error = vmcs_getreg(&vmx->vmcs[vcpu], running, 2784 VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi); 2785 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 2786 return (error); 2787 } 2788 2789 static int 2790 vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val) 2791 { 2792 struct vmcs *vmcs; 2793 uint64_t gi; 2794 int error, ident; 2795 2796 /* 2797 * Forcing the vcpu into an interrupt shadow is not supported. 2798 */ 2799 if (val) { 2800 error = EINVAL; 2801 goto done; 2802 } 2803 2804 vmcs = &vmx->vmcs[vcpu]; 2805 ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY); 2806 error = vmcs_getreg(vmcs, running, ident, &gi); 2807 if (error == 0) { 2808 gi &= ~HWINTR_BLOCKING; 2809 error = vmcs_setreg(vmcs, running, ident, gi); 2810 } 2811 done: 2812 VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val, 2813 error ? "failed" : "succeeded"); 2814 return (error); 2815 } 2816 2817 static int 2818 vmx_shadow_reg(int reg) 2819 { 2820 int shreg; 2821 2822 shreg = -1; 2823 2824 switch (reg) { 2825 case VM_REG_GUEST_CR0: 2826 shreg = VMCS_CR0_SHADOW; 2827 break; 2828 case VM_REG_GUEST_CR4: 2829 shreg = VMCS_CR4_SHADOW; 2830 break; 2831 default: 2832 break; 2833 } 2834 2835 return (shreg); 2836 } 2837 2838 static int 2839 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 2840 { 2841 int running, hostcpu; 2842 struct vmx *vmx = arg; 2843 2844 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2845 if (running && hostcpu != curcpu) 2846 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 2847 2848 if (reg == VM_REG_GUEST_INTR_SHADOW) 2849 return (vmx_get_intr_shadow(vmx, vcpu, running, retval)); 2850 2851 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 2852 return (0); 2853 2854 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 2855 } 2856 2857 static int 2858 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 2859 { 2860 int error, hostcpu, running, shadow; 2861 uint64_t ctls; 2862 pmap_t pmap; 2863 struct vmx *vmx = arg; 2864 2865 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2866 if (running && hostcpu != curcpu) 2867 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 2868 2869 if (reg == VM_REG_GUEST_INTR_SHADOW) 2870 return (vmx_modify_intr_shadow(vmx, vcpu, running, val)); 2871 2872 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 2873 return (0); 2874 2875 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 2876 2877 if (error == 0) { 2878 /* 2879 * If the "load EFER" VM-entry control is 1 then the 2880 * value of EFER.LMA must be identical to "IA-32e mode guest" 2881 * bit in the VM-entry control. 2882 */ 2883 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 2884 (reg == VM_REG_GUEST_EFER)) { 2885 vmcs_getreg(&vmx->vmcs[vcpu], running, 2886 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 2887 if (val & EFER_LMA) 2888 ctls |= VM_ENTRY_GUEST_LMA; 2889 else 2890 ctls &= ~VM_ENTRY_GUEST_LMA; 2891 vmcs_setreg(&vmx->vmcs[vcpu], running, 2892 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 2893 } 2894 2895 shadow = vmx_shadow_reg(reg); 2896 if (shadow > 0) { 2897 /* 2898 * Store the unmodified value in the shadow 2899 */ 2900 error = vmcs_setreg(&vmx->vmcs[vcpu], running, 2901 VMCS_IDENT(shadow), val); 2902 } 2903 2904 if (reg == VM_REG_GUEST_CR3) { 2905 /* 2906 * Invalidate the guest vcpu's TLB mappings to emulate 2907 * the behavior of updating %cr3. 2908 * 2909 * XXX the processor retains global mappings when %cr3 2910 * is updated but vmx_invvpid() does not. 2911 */ 2912 pmap = vmx->ctx[vcpu].pmap; 2913 vmx_invvpid(vmx, vcpu, pmap, running); 2914 } 2915 } 2916 2917 return (error); 2918 } 2919 2920 static int 2921 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2922 { 2923 int hostcpu, running; 2924 struct vmx *vmx = arg; 2925 2926 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2927 if (running && hostcpu != curcpu) 2928 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu); 2929 2930 return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc)); 2931 } 2932 2933 static int 2934 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2935 { 2936 int hostcpu, running; 2937 struct vmx *vmx = arg; 2938 2939 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2940 if (running && hostcpu != curcpu) 2941 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu); 2942 2943 return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc)); 2944 } 2945 2946 static int 2947 vmx_getcap(void *arg, int vcpu, int type, int *retval) 2948 { 2949 struct vmx *vmx = arg; 2950 int vcap; 2951 int ret; 2952 2953 ret = ENOENT; 2954 2955 vcap = vmx->cap[vcpu].set; 2956 2957 switch (type) { 2958 case VM_CAP_HALT_EXIT: 2959 if (cap_halt_exit) 2960 ret = 0; 2961 break; 2962 case VM_CAP_PAUSE_EXIT: 2963 if (cap_pause_exit) 2964 ret = 0; 2965 break; 2966 case VM_CAP_MTRAP_EXIT: 2967 if (cap_monitor_trap) 2968 ret = 0; 2969 break; 2970 case VM_CAP_UNRESTRICTED_GUEST: 2971 if (cap_unrestricted_guest) 2972 ret = 0; 2973 break; 2974 case VM_CAP_ENABLE_INVPCID: 2975 if (cap_invpcid) 2976 ret = 0; 2977 break; 2978 default: 2979 break; 2980 } 2981 2982 if (ret == 0) 2983 *retval = (vcap & (1 << type)) ? 1 : 0; 2984 2985 return (ret); 2986 } 2987 2988 static int 2989 vmx_setcap(void *arg, int vcpu, int type, int val) 2990 { 2991 struct vmx *vmx = arg; 2992 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 2993 uint32_t baseval; 2994 uint32_t *pptr; 2995 int error; 2996 int flag; 2997 int reg; 2998 int retval; 2999 3000 retval = ENOENT; 3001 pptr = NULL; 3002 3003 switch (type) { 3004 case VM_CAP_HALT_EXIT: 3005 if (cap_halt_exit) { 3006 retval = 0; 3007 pptr = &vmx->cap[vcpu].proc_ctls; 3008 baseval = *pptr; 3009 flag = PROCBASED_HLT_EXITING; 3010 reg = VMCS_PRI_PROC_BASED_CTLS; 3011 } 3012 break; 3013 case VM_CAP_MTRAP_EXIT: 3014 if (cap_monitor_trap) { 3015 retval = 0; 3016 pptr = &vmx->cap[vcpu].proc_ctls; 3017 baseval = *pptr; 3018 flag = PROCBASED_MTF; 3019 reg = VMCS_PRI_PROC_BASED_CTLS; 3020 } 3021 break; 3022 case VM_CAP_PAUSE_EXIT: 3023 if (cap_pause_exit) { 3024 retval = 0; 3025 pptr = &vmx->cap[vcpu].proc_ctls; 3026 baseval = *pptr; 3027 flag = PROCBASED_PAUSE_EXITING; 3028 reg = VMCS_PRI_PROC_BASED_CTLS; 3029 } 3030 break; 3031 case VM_CAP_UNRESTRICTED_GUEST: 3032 if (cap_unrestricted_guest) { 3033 retval = 0; 3034 pptr = &vmx->cap[vcpu].proc_ctls2; 3035 baseval = *pptr; 3036 flag = PROCBASED2_UNRESTRICTED_GUEST; 3037 reg = VMCS_SEC_PROC_BASED_CTLS; 3038 } 3039 break; 3040 case VM_CAP_ENABLE_INVPCID: 3041 if (cap_invpcid) { 3042 retval = 0; 3043 pptr = &vmx->cap[vcpu].proc_ctls2; 3044 baseval = *pptr; 3045 flag = PROCBASED2_ENABLE_INVPCID; 3046 reg = VMCS_SEC_PROC_BASED_CTLS; 3047 } 3048 break; 3049 default: 3050 break; 3051 } 3052 3053 if (retval == 0) { 3054 if (val) { 3055 baseval |= flag; 3056 } else { 3057 baseval &= ~flag; 3058 } 3059 VMPTRLD(vmcs); 3060 error = vmwrite(reg, baseval); 3061 VMCLEAR(vmcs); 3062 3063 if (error) { 3064 retval = error; 3065 } else { 3066 /* 3067 * Update optional stored flags, and record 3068 * setting 3069 */ 3070 if (pptr != NULL) { 3071 *pptr = baseval; 3072 } 3073 3074 if (val) { 3075 vmx->cap[vcpu].set |= (1 << type); 3076 } else { 3077 vmx->cap[vcpu].set &= ~(1 << type); 3078 } 3079 } 3080 } 3081 3082 return (retval); 3083 } 3084 3085 struct vlapic_vtx { 3086 struct vlapic vlapic; 3087 struct pir_desc *pir_desc; 3088 struct vmx *vmx; 3089 }; 3090 3091 #define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 3092 do { \ 3093 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 3094 level ? "level" : "edge", vector); \ 3095 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 3096 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 3097 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 3098 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 3099 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 3100 } while (0) 3101 3102 /* 3103 * vlapic->ops handlers that utilize the APICv hardware assist described in 3104 * Chapter 29 of the Intel SDM. 3105 */ 3106 static int 3107 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 3108 { 3109 struct vlapic_vtx *vlapic_vtx; 3110 struct pir_desc *pir_desc; 3111 uint64_t mask; 3112 int idx, notify; 3113 3114 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3115 pir_desc = vlapic_vtx->pir_desc; 3116 3117 /* 3118 * Keep track of interrupt requests in the PIR descriptor. This is 3119 * because the virtual APIC page pointed to by the VMCS cannot be 3120 * modified if the vcpu is running. 3121 */ 3122 idx = vector / 64; 3123 mask = 1UL << (vector % 64); 3124 atomic_set_long(&pir_desc->pir[idx], mask); 3125 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1); 3126 3127 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 3128 level, "vmx_set_intr_ready"); 3129 return (notify); 3130 } 3131 3132 static int 3133 vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 3134 { 3135 struct vlapic_vtx *vlapic_vtx; 3136 struct pir_desc *pir_desc; 3137 struct LAPIC *lapic; 3138 uint64_t pending, pirval; 3139 uint32_t ppr, vpr; 3140 int i; 3141 3142 /* 3143 * This function is only expected to be called from the 'HLT' exit 3144 * handler which does not care about the vector that is pending. 3145 */ 3146 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 3147 3148 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3149 pir_desc = vlapic_vtx->pir_desc; 3150 3151 pending = atomic_load_acq_long(&pir_desc->pending); 3152 if (!pending) 3153 return (0); /* common case */ 3154 3155 /* 3156 * If there is an interrupt pending then it will be recognized only 3157 * if its priority is greater than the processor priority. 3158 * 3159 * Special case: if the processor priority is zero then any pending 3160 * interrupt will be recognized. 3161 */ 3162 lapic = vlapic->apic_page; 3163 ppr = lapic->ppr & 0xf0; 3164 if (ppr == 0) 3165 return (1); 3166 3167 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 3168 lapic->ppr); 3169 3170 for (i = 3; i >= 0; i--) { 3171 pirval = pir_desc->pir[i]; 3172 if (pirval != 0) { 3173 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0; 3174 return (vpr > ppr); 3175 } 3176 } 3177 return (0); 3178 } 3179 3180 static void 3181 vmx_intr_accepted(struct vlapic *vlapic, int vector) 3182 { 3183 3184 panic("vmx_intr_accepted: not expected to be called"); 3185 } 3186 3187 static void 3188 vmx_set_tmr(struct vlapic *vlapic, int vector, bool level) 3189 { 3190 struct vlapic_vtx *vlapic_vtx; 3191 struct vmx *vmx; 3192 struct vmcs *vmcs; 3193 uint64_t mask, val; 3194 3195 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); 3196 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL), 3197 ("vmx_set_tmr: vcpu cannot be running")); 3198 3199 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3200 vmx = vlapic_vtx->vmx; 3201 vmcs = &vmx->vmcs[vlapic->vcpuid]; 3202 mask = 1UL << (vector % 64); 3203 3204 VMPTRLD(vmcs); 3205 val = vmcs_read(VMCS_EOI_EXIT(vector)); 3206 if (level) 3207 val |= mask; 3208 else 3209 val &= ~mask; 3210 vmcs_write(VMCS_EOI_EXIT(vector), val); 3211 VMCLEAR(vmcs); 3212 } 3213 3214 static void 3215 vmx_enable_x2apic_mode(struct vlapic *vlapic) 3216 { 3217 struct vmx *vmx; 3218 struct vmcs *vmcs; 3219 uint32_t proc_ctls2; 3220 int vcpuid, error; 3221 3222 vcpuid = vlapic->vcpuid; 3223 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3224 vmcs = &vmx->vmcs[vcpuid]; 3225 3226 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3227 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3228 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2)); 3229 3230 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3231 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3232 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3233 3234 VMPTRLD(vmcs); 3235 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3236 VMCLEAR(vmcs); 3237 3238 if (vlapic->vcpuid == 0) { 3239 /* 3240 * The nested page table mappings are shared by all vcpus 3241 * so unmap the APIC access page just once. 3242 */ 3243 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3244 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", 3245 __func__, error)); 3246 3247 /* 3248 * The MSR bitmap is shared by all vcpus so modify it only 3249 * once in the context of vcpu 0. 3250 */ 3251 error = vmx_allow_x2apic_msrs(vmx); 3252 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", 3253 __func__, error)); 3254 } 3255 } 3256 3257 static void 3258 vmx_post_intr(struct vlapic *vlapic, int hostcpu) 3259 { 3260 3261 ipi_cpu(hostcpu, pirvec); 3262 } 3263 3264 /* 3265 * Transfer the pending interrupts in the PIR descriptor to the IRR 3266 * in the virtual APIC page. 3267 */ 3268 static void 3269 vmx_inject_pir(struct vlapic *vlapic) 3270 { 3271 struct vlapic_vtx *vlapic_vtx; 3272 struct pir_desc *pir_desc; 3273 struct LAPIC *lapic; 3274 uint64_t val, pirval; 3275 int rvi, pirbase = -1; 3276 uint16_t intr_status_old, intr_status_new; 3277 3278 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3279 pir_desc = vlapic_vtx->pir_desc; 3280 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3281 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3282 "no posted interrupt pending"); 3283 return; 3284 } 3285 3286 pirval = 0; 3287 pirbase = -1; 3288 lapic = vlapic->apic_page; 3289 3290 val = atomic_readandclear_long(&pir_desc->pir[0]); 3291 if (val != 0) { 3292 lapic->irr0 |= val; 3293 lapic->irr1 |= val >> 32; 3294 pirbase = 0; 3295 pirval = val; 3296 } 3297 3298 val = atomic_readandclear_long(&pir_desc->pir[1]); 3299 if (val != 0) { 3300 lapic->irr2 |= val; 3301 lapic->irr3 |= val >> 32; 3302 pirbase = 64; 3303 pirval = val; 3304 } 3305 3306 val = atomic_readandclear_long(&pir_desc->pir[2]); 3307 if (val != 0) { 3308 lapic->irr4 |= val; 3309 lapic->irr5 |= val >> 32; 3310 pirbase = 128; 3311 pirval = val; 3312 } 3313 3314 val = atomic_readandclear_long(&pir_desc->pir[3]); 3315 if (val != 0) { 3316 lapic->irr6 |= val; 3317 lapic->irr7 |= val >> 32; 3318 pirbase = 192; 3319 pirval = val; 3320 } 3321 3322 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 3323 3324 /* 3325 * Update RVI so the processor can evaluate pending virtual 3326 * interrupts on VM-entry. 3327 * 3328 * It is possible for pirval to be 0 here, even though the 3329 * pending bit has been set. The scenario is: 3330 * CPU-Y is sending a posted interrupt to CPU-X, which 3331 * is running a guest and processing posted interrupts in h/w. 3332 * CPU-X will eventually exit and the state seen in s/w is 3333 * the pending bit set, but no PIR bits set. 3334 * 3335 * CPU-X CPU-Y 3336 * (vm running) (host running) 3337 * rx posted interrupt 3338 * CLEAR pending bit 3339 * SET PIR bit 3340 * READ/CLEAR PIR bits 3341 * SET pending bit 3342 * (vm exit) 3343 * pending bit set, PIR 0 3344 */ 3345 if (pirval != 0) { 3346 rvi = pirbase + flsl(pirval) - 1; 3347 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 3348 intr_status_new = (intr_status_old & 0xFF00) | rvi; 3349 if (intr_status_new > intr_status_old) { 3350 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 3351 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3352 "guest_intr_status changed from 0x%04x to 0x%04x", 3353 intr_status_old, intr_status_new); 3354 } 3355 } 3356 } 3357 3358 static struct vlapic * 3359 vmx_vlapic_init(void *arg, int vcpuid) 3360 { 3361 struct vmx *vmx; 3362 struct vlapic *vlapic; 3363 struct vlapic_vtx *vlapic_vtx; 3364 3365 vmx = arg; 3366 3367 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 3368 vlapic->vm = vmx->vm; 3369 vlapic->vcpuid = vcpuid; 3370 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3371 3372 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3373 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3374 vlapic_vtx->vmx = vmx; 3375 3376 if (virtual_interrupt_delivery) { 3377 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 3378 vlapic->ops.pending_intr = vmx_pending_intr; 3379 vlapic->ops.intr_accepted = vmx_intr_accepted; 3380 vlapic->ops.set_tmr = vmx_set_tmr; 3381 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode; 3382 } 3383 3384 if (posted_interrupts) 3385 vlapic->ops.post_intr = vmx_post_intr; 3386 3387 vlapic_init(vlapic); 3388 3389 return (vlapic); 3390 } 3391 3392 static void 3393 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3394 { 3395 3396 vlapic_cleanup(vlapic); 3397 free(vlapic, M_VLAPIC); 3398 } 3399 3400 struct vmm_ops vmm_ops_intel = { 3401 vmx_init, 3402 vmx_cleanup, 3403 vmx_restore, 3404 vmx_vminit, 3405 vmx_run, 3406 vmx_vmcleanup, 3407 vmx_getreg, 3408 vmx_setreg, 3409 vmx_getdesc, 3410 vmx_setdesc, 3411 vmx_getcap, 3412 vmx_setcap, 3413 ept_vmspace_alloc, 3414 ept_vmspace_free, 3415 vmx_vlapic_init, 3416 vmx_vlapic_cleanup, 3417 }; 3418