1 /*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/smp.h> 33 #include <sys/kernel.h> 34 #include <sys/malloc.h> 35 #include <sys/pcpu.h> 36 #include <sys/proc.h> 37 #include <sys/sysctl.h> 38 39 #include <vm/vm.h> 40 #include <vm/pmap.h> 41 42 #include <machine/cpufunc.h> 43 #include <machine/psl.h> 44 #include <machine/pmap.h> 45 #include <machine/md_var.h> 46 #include <machine/specialreg.h> 47 #include <machine/smp.h> 48 #include <machine/vmm.h> 49 #include <machine/vmm_dev.h> 50 #include <machine/vmm_instruction_emul.h> 51 52 #include "vmm_lapic.h" 53 #include "vmm_stat.h" 54 #include "vmm_ktr.h" 55 #include "vmm_ioport.h" 56 #include "vatpic.h" 57 #include "vlapic.h" 58 #include "vlapic_priv.h" 59 60 #include "x86.h" 61 #include "vmcb.h" 62 #include "svm.h" 63 #include "svm_softc.h" 64 #include "svm_msr.h" 65 #include "npt.h" 66 67 SYSCTL_DECL(_hw_vmm); 68 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 69 70 /* 71 * SVM CPUID function 0x8000_000A, edx bit decoding. 72 */ 73 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 74 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 75 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 76 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 77 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 78 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 79 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 80 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 81 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 82 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 83 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 84 85 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 86 VMCB_CACHE_IOPM | \ 87 VMCB_CACHE_I | \ 88 VMCB_CACHE_TPR | \ 89 VMCB_CACHE_CR2 | \ 90 VMCB_CACHE_CR | \ 91 VMCB_CACHE_DT | \ 92 VMCB_CACHE_SEG | \ 93 VMCB_CACHE_NP) 94 95 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 96 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 97 0, NULL); 98 99 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 100 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 101 102 /* Per-CPU context area. */ 103 extern struct pcpu __pcpu[]; 104 105 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 106 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 107 "SVM features advertised by CPUID.8000000AH:EDX"); 108 109 static int disable_npf_assist; 110 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 111 &disable_npf_assist, 0, NULL); 112 113 /* Maximum ASIDs supported by the processor */ 114 static uint32_t nasid; 115 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 116 "Number of ASIDs supported by this processor"); 117 118 /* Current ASID generation for each host cpu */ 119 static struct asid asid[MAXCPU]; 120 121 /* 122 * SVM host state saved area of size 4KB for each core. 123 */ 124 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 125 126 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 127 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 128 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 129 130 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 131 132 static __inline int 133 flush_by_asid(void) 134 { 135 136 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 137 } 138 139 static __inline int 140 decode_assist(void) 141 { 142 143 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 144 } 145 146 static void 147 svm_disable(void *arg __unused) 148 { 149 uint64_t efer; 150 151 efer = rdmsr(MSR_EFER); 152 efer &= ~EFER_SVM; 153 wrmsr(MSR_EFER, efer); 154 } 155 156 /* 157 * Disable SVM on all CPUs. 158 */ 159 static int 160 svm_cleanup(void) 161 { 162 163 smp_rendezvous(NULL, svm_disable, NULL, NULL); 164 return (0); 165 } 166 167 /* 168 * Verify that all the features required by bhyve are available. 169 */ 170 static int 171 check_svm_features(void) 172 { 173 u_int regs[4]; 174 175 /* CPUID Fn8000_000A is for SVM */ 176 do_cpuid(0x8000000A, regs); 177 svm_feature &= regs[3]; 178 179 /* 180 * The number of ASIDs can be configured to be less than what is 181 * supported by the hardware but not more. 182 */ 183 if (nasid == 0 || nasid > regs[1]) 184 nasid = regs[1]; 185 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 186 187 /* bhyve requires the Nested Paging feature */ 188 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 189 printf("SVM: Nested Paging feature not available.\n"); 190 return (ENXIO); 191 } 192 193 /* bhyve requires the NRIP Save feature */ 194 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 195 printf("SVM: NRIP Save feature not available.\n"); 196 return (ENXIO); 197 } 198 199 return (0); 200 } 201 202 static void 203 svm_enable(void *arg __unused) 204 { 205 uint64_t efer; 206 207 efer = rdmsr(MSR_EFER); 208 efer |= EFER_SVM; 209 wrmsr(MSR_EFER, efer); 210 211 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 212 } 213 214 /* 215 * Return 1 if SVM is enabled on this processor and 0 otherwise. 216 */ 217 static int 218 svm_available(void) 219 { 220 uint64_t msr; 221 222 /* Section 15.4 Enabling SVM from APM2. */ 223 if ((amd_feature2 & AMDID2_SVM) == 0) { 224 printf("SVM: not available.\n"); 225 return (0); 226 } 227 228 msr = rdmsr(MSR_VM_CR); 229 if ((msr & VM_CR_SVMDIS) != 0) { 230 printf("SVM: disabled by BIOS.\n"); 231 return (0); 232 } 233 234 return (1); 235 } 236 237 static int 238 svm_init(int ipinum) 239 { 240 int error, cpu; 241 242 if (!svm_available()) 243 return (ENXIO); 244 245 error = check_svm_features(); 246 if (error) 247 return (error); 248 249 vmcb_clean &= VMCB_CACHE_DEFAULT; 250 251 for (cpu = 0; cpu < MAXCPU; cpu++) { 252 /* 253 * Initialize the host ASIDs to their "highest" valid values. 254 * 255 * The next ASID allocation will rollover both 'gen' and 'num' 256 * and start off the sequence at {1,1}. 257 */ 258 asid[cpu].gen = ~0UL; 259 asid[cpu].num = nasid - 1; 260 } 261 262 svm_msr_init(); 263 svm_npt_init(ipinum); 264 265 /* Enable SVM on all CPUs */ 266 smp_rendezvous(NULL, svm_enable, NULL, NULL); 267 268 return (0); 269 } 270 271 static void 272 svm_restore(void) 273 { 274 275 svm_enable(NULL); 276 } 277 278 /* Pentium compatible MSRs */ 279 #define MSR_PENTIUM_START 0 280 #define MSR_PENTIUM_END 0x1FFF 281 /* AMD 6th generation and Intel compatible MSRs */ 282 #define MSR_AMD6TH_START 0xC0000000UL 283 #define MSR_AMD6TH_END 0xC0001FFFUL 284 /* AMD 7th and 8th generation compatible MSRs */ 285 #define MSR_AMD7TH_START 0xC0010000UL 286 #define MSR_AMD7TH_END 0xC0011FFFUL 287 288 /* 289 * Get the index and bit position for a MSR in permission bitmap. 290 * Two bits are used for each MSR: lower bit for read and higher bit for write. 291 */ 292 static int 293 svm_msr_index(uint64_t msr, int *index, int *bit) 294 { 295 uint32_t base, off; 296 297 *index = -1; 298 *bit = (msr % 4) * 2; 299 base = 0; 300 301 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 302 *index = msr / 4; 303 return (0); 304 } 305 306 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 307 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 308 off = (msr - MSR_AMD6TH_START); 309 *index = (off + base) / 4; 310 return (0); 311 } 312 313 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 314 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 315 off = (msr - MSR_AMD7TH_START); 316 *index = (off + base) / 4; 317 return (0); 318 } 319 320 return (EINVAL); 321 } 322 323 /* 324 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 325 */ 326 static void 327 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 328 { 329 int index, bit, error; 330 331 error = svm_msr_index(msr, &index, &bit); 332 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 333 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 334 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 335 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 336 "msr %#lx", __func__, bit, msr)); 337 338 if (read) 339 perm_bitmap[index] &= ~(1UL << bit); 340 341 if (write) 342 perm_bitmap[index] &= ~(2UL << bit); 343 } 344 345 static void 346 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 347 { 348 349 svm_msr_perm(perm_bitmap, msr, true, true); 350 } 351 352 static void 353 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 354 { 355 356 svm_msr_perm(perm_bitmap, msr, true, false); 357 } 358 359 static __inline int 360 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 361 { 362 struct vmcb_ctrl *ctrl; 363 364 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 365 366 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 367 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 368 } 369 370 static __inline void 371 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 372 int enabled) 373 { 374 struct vmcb_ctrl *ctrl; 375 uint32_t oldval; 376 377 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 378 379 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 380 oldval = ctrl->intercept[idx]; 381 382 if (enabled) 383 ctrl->intercept[idx] |= bitmask; 384 else 385 ctrl->intercept[idx] &= ~bitmask; 386 387 if (ctrl->intercept[idx] != oldval) { 388 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 389 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 390 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 391 } 392 } 393 394 static __inline void 395 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 396 { 397 398 svm_set_intercept(sc, vcpu, off, bitmask, 0); 399 } 400 401 static __inline void 402 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 403 { 404 405 svm_set_intercept(sc, vcpu, off, bitmask, 1); 406 } 407 408 static void 409 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 410 uint64_t msrpm_base_pa, uint64_t np_pml4) 411 { 412 struct vmcb_ctrl *ctrl; 413 struct vmcb_state *state; 414 uint32_t mask; 415 int n; 416 417 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 418 state = svm_get_vmcb_state(sc, vcpu); 419 420 ctrl->iopm_base_pa = iopm_base_pa; 421 ctrl->msrpm_base_pa = msrpm_base_pa; 422 423 /* Enable nested paging */ 424 ctrl->np_enable = 1; 425 ctrl->n_cr3 = np_pml4; 426 427 /* 428 * Intercept accesses to the control registers that are not shadowed 429 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 430 */ 431 for (n = 0; n < 16; n++) { 432 mask = (BIT(n) << 16) | BIT(n); 433 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 434 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 435 else 436 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 437 } 438 439 440 /* 441 * Intercept everything when tracing guest exceptions otherwise 442 * just intercept machine check exception. 443 */ 444 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 445 for (n = 0; n < 32; n++) { 446 /* 447 * Skip unimplemented vectors in the exception bitmap. 448 */ 449 if (n == 2 || n == 9) { 450 continue; 451 } 452 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 453 } 454 } else { 455 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 456 } 457 458 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 459 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 460 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 461 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 463 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 464 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 465 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 466 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 467 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 468 VMCB_INTCPT_FERR_FREEZE); 469 470 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 471 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 472 473 /* 474 * From section "Canonicalization and Consistency Checks" in APMv2 475 * the VMRUN intercept bit must be set to pass the consistency check. 476 */ 477 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 478 479 /* 480 * The ASID will be set to a non-zero value just before VMRUN. 481 */ 482 ctrl->asid = 0; 483 484 /* 485 * Section 15.21.1, Interrupt Masking in EFLAGS 486 * Section 15.21.2, Virtualizing APIC.TPR 487 * 488 * This must be set for %rflag and %cr8 isolation of guest and host. 489 */ 490 ctrl->v_intr_masking = 1; 491 492 /* Enable Last Branch Record aka LBR for debugging */ 493 ctrl->lbr_virt_en = 1; 494 state->dbgctl = BIT(0); 495 496 /* EFER_SVM must always be set when the guest is executing */ 497 state->efer = EFER_SVM; 498 499 /* Set up the PAT to power-on state */ 500 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 501 PAT_VALUE(1, PAT_WRITE_THROUGH) | 502 PAT_VALUE(2, PAT_UNCACHED) | 503 PAT_VALUE(3, PAT_UNCACHEABLE) | 504 PAT_VALUE(4, PAT_WRITE_BACK) | 505 PAT_VALUE(5, PAT_WRITE_THROUGH) | 506 PAT_VALUE(6, PAT_UNCACHED) | 507 PAT_VALUE(7, PAT_UNCACHEABLE); 508 } 509 510 /* 511 * Initialize a virtual machine. 512 */ 513 static void * 514 svm_vminit(struct vm *vm, pmap_t pmap) 515 { 516 struct svm_softc *svm_sc; 517 struct svm_vcpu *vcpu; 518 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 519 int i; 520 521 svm_sc = malloc(sizeof (struct svm_softc), M_SVM, M_WAITOK | M_ZERO); 522 svm_sc->vm = vm; 523 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 524 525 /* 526 * Intercept read and write accesses to all MSRs. 527 */ 528 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 529 530 /* 531 * Access to the following MSRs is redirected to the VMCB when the 532 * guest is executing. Therefore it is safe to allow the guest to 533 * read/write these MSRs directly without hypervisor involvement. 534 */ 535 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 536 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 537 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 538 539 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 540 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 541 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 542 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 543 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 544 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 545 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 546 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 547 548 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 549 550 /* 551 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 552 */ 553 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 554 555 /* Intercept access to all I/O ports. */ 556 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 557 558 iopm_pa = vtophys(svm_sc->iopm_bitmap); 559 msrpm_pa = vtophys(svm_sc->msr_bitmap); 560 pml4_pa = svm_sc->nptp; 561 for (i = 0; i < VM_MAXCPU; i++) { 562 vcpu = svm_get_vcpu(svm_sc, i); 563 vcpu->nextrip = ~0; 564 vcpu->lastcpu = NOCPU; 565 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 566 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 567 svm_msr_guest_init(svm_sc, i); 568 } 569 return (svm_sc); 570 } 571 572 /* 573 * Collateral for a generic SVM VM-exit. 574 */ 575 static void 576 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 577 { 578 579 vme->exitcode = VM_EXITCODE_SVM; 580 vme->u.svm.exitcode = code; 581 vme->u.svm.exitinfo1 = info1; 582 vme->u.svm.exitinfo2 = info2; 583 } 584 585 static int 586 svm_cpl(struct vmcb_state *state) 587 { 588 589 /* 590 * From APMv2: 591 * "Retrieve the CPL from the CPL field in the VMCB, not 592 * from any segment DPL" 593 */ 594 return (state->cpl); 595 } 596 597 static enum vm_cpu_mode 598 svm_vcpu_mode(struct vmcb *vmcb) 599 { 600 struct vmcb_segment seg; 601 struct vmcb_state *state; 602 int error; 603 604 state = &vmcb->state; 605 606 if (state->efer & EFER_LMA) { 607 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 608 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 609 error)); 610 611 /* 612 * Section 4.8.1 for APM2, check if Code Segment has 613 * Long attribute set in descriptor. 614 */ 615 if (seg.attrib & VMCB_CS_ATTRIB_L) 616 return (CPU_MODE_64BIT); 617 else 618 return (CPU_MODE_COMPATIBILITY); 619 } else if (state->cr0 & CR0_PE) { 620 return (CPU_MODE_PROTECTED); 621 } else { 622 return (CPU_MODE_REAL); 623 } 624 } 625 626 static enum vm_paging_mode 627 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 628 { 629 630 if ((cr0 & CR0_PG) == 0) 631 return (PAGING_MODE_FLAT); 632 if ((cr4 & CR4_PAE) == 0) 633 return (PAGING_MODE_32); 634 if (efer & EFER_LME) 635 return (PAGING_MODE_64); 636 else 637 return (PAGING_MODE_PAE); 638 } 639 640 /* 641 * ins/outs utility routines 642 */ 643 static uint64_t 644 svm_inout_str_index(struct svm_regctx *regs, int in) 645 { 646 uint64_t val; 647 648 val = in ? regs->sctx_rdi : regs->sctx_rsi; 649 650 return (val); 651 } 652 653 static uint64_t 654 svm_inout_str_count(struct svm_regctx *regs, int rep) 655 { 656 uint64_t val; 657 658 val = rep ? regs->sctx_rcx : 1; 659 660 return (val); 661 } 662 663 static void 664 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 665 int in, struct vm_inout_str *vis) 666 { 667 int error, s; 668 669 if (in) { 670 vis->seg_name = VM_REG_GUEST_ES; 671 } else { 672 /* The segment field has standard encoding */ 673 s = (info1 >> 10) & 0x7; 674 vis->seg_name = vm_segment_name(s); 675 } 676 677 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 678 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 679 } 680 681 static int 682 svm_inout_str_addrsize(uint64_t info1) 683 { 684 uint32_t size; 685 686 size = (info1 >> 7) & 0x7; 687 switch (size) { 688 case 1: 689 return (2); /* 16 bit */ 690 case 2: 691 return (4); /* 32 bit */ 692 case 4: 693 return (8); /* 64 bit */ 694 default: 695 panic("%s: invalid size encoding %d", __func__, size); 696 } 697 } 698 699 static void 700 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 701 { 702 struct vmcb_state *state; 703 704 state = &vmcb->state; 705 paging->cr3 = state->cr3; 706 paging->cpl = svm_cpl(state); 707 paging->cpu_mode = svm_vcpu_mode(vmcb); 708 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 709 state->efer); 710 } 711 712 #define UNHANDLED 0 713 714 /* 715 * Handle guest I/O intercept. 716 */ 717 static int 718 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 719 { 720 struct vmcb_ctrl *ctrl; 721 struct vmcb_state *state; 722 struct svm_regctx *regs; 723 struct vm_inout_str *vis; 724 uint64_t info1; 725 int inout_string; 726 727 state = svm_get_vmcb_state(svm_sc, vcpu); 728 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 729 regs = svm_get_guest_regctx(svm_sc, vcpu); 730 731 info1 = ctrl->exitinfo1; 732 inout_string = info1 & BIT(2) ? 1 : 0; 733 734 /* 735 * The effective segment number in EXITINFO1[12:10] is populated 736 * only if the processor has the DecodeAssist capability. 737 * 738 * XXX this is not specified explicitly in APMv2 but can be verified 739 * empirically. 740 */ 741 if (inout_string && !decode_assist()) 742 return (UNHANDLED); 743 744 vmexit->exitcode = VM_EXITCODE_INOUT; 745 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 746 vmexit->u.inout.string = inout_string; 747 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 748 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 749 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 750 vmexit->u.inout.eax = (uint32_t)(state->rax); 751 752 if (inout_string) { 753 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 754 vis = &vmexit->u.inout_str; 755 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 756 vis->rflags = state->rflags; 757 vis->cr0 = state->cr0; 758 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 759 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 760 vis->addrsize = svm_inout_str_addrsize(info1); 761 svm_inout_str_seginfo(svm_sc, vcpu, info1, 762 vmexit->u.inout.in, vis); 763 } 764 765 return (UNHANDLED); 766 } 767 768 static int 769 npf_fault_type(uint64_t exitinfo1) 770 { 771 772 if (exitinfo1 & VMCB_NPF_INFO1_W) 773 return (VM_PROT_WRITE); 774 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 775 return (VM_PROT_EXECUTE); 776 else 777 return (VM_PROT_READ); 778 } 779 780 static bool 781 svm_npf_emul_fault(uint64_t exitinfo1) 782 { 783 784 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 785 return (false); 786 } 787 788 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 789 return (false); 790 } 791 792 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 793 return (false); 794 } 795 796 return (true); 797 } 798 799 static void 800 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 801 { 802 struct vm_guest_paging *paging; 803 struct vmcb_segment seg; 804 struct vmcb_ctrl *ctrl; 805 char *inst_bytes; 806 int error, inst_len; 807 808 ctrl = &vmcb->ctrl; 809 paging = &vmexit->u.inst_emul.paging; 810 811 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 812 vmexit->u.inst_emul.gpa = gpa; 813 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 814 svm_paging_info(vmcb, paging); 815 816 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 817 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 818 819 switch(paging->cpu_mode) { 820 case CPU_MODE_REAL: 821 vmexit->u.inst_emul.cs_base = seg.base; 822 vmexit->u.inst_emul.cs_d = 0; 823 break; 824 case CPU_MODE_PROTECTED: 825 case CPU_MODE_COMPATIBILITY: 826 vmexit->u.inst_emul.cs_base = seg.base; 827 828 /* 829 * Section 4.8.1 of APM2, Default Operand Size or D bit. 830 */ 831 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 832 1 : 0; 833 break; 834 default: 835 vmexit->u.inst_emul.cs_base = 0; 836 vmexit->u.inst_emul.cs_d = 0; 837 break; 838 } 839 840 /* 841 * Copy the instruction bytes into 'vie' if available. 842 */ 843 if (decode_assist() && !disable_npf_assist) { 844 inst_len = ctrl->inst_len; 845 inst_bytes = ctrl->inst_bytes; 846 } else { 847 inst_len = 0; 848 inst_bytes = NULL; 849 } 850 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 851 } 852 853 #ifdef KTR 854 static const char * 855 intrtype_to_str(int intr_type) 856 { 857 switch (intr_type) { 858 case VMCB_EVENTINJ_TYPE_INTR: 859 return ("hwintr"); 860 case VMCB_EVENTINJ_TYPE_NMI: 861 return ("nmi"); 862 case VMCB_EVENTINJ_TYPE_INTn: 863 return ("swintr"); 864 case VMCB_EVENTINJ_TYPE_EXCEPTION: 865 return ("exception"); 866 default: 867 panic("%s: unknown intr_type %d", __func__, intr_type); 868 } 869 } 870 #endif 871 872 /* 873 * Inject an event to vcpu as described in section 15.20, "Event injection". 874 */ 875 static void 876 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 877 uint32_t error, bool ec_valid) 878 { 879 struct vmcb_ctrl *ctrl; 880 881 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 882 883 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 884 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 885 886 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 887 __func__, vector)); 888 889 switch (intr_type) { 890 case VMCB_EVENTINJ_TYPE_INTR: 891 case VMCB_EVENTINJ_TYPE_NMI: 892 case VMCB_EVENTINJ_TYPE_INTn: 893 break; 894 case VMCB_EVENTINJ_TYPE_EXCEPTION: 895 if (vector >= 0 && vector <= 31 && vector != 2) 896 break; 897 /* FALLTHROUGH */ 898 default: 899 panic("%s: invalid intr_type/vector: %d/%d", __func__, 900 intr_type, vector); 901 } 902 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 903 if (ec_valid) { 904 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 905 ctrl->eventinj |= (uint64_t)error << 32; 906 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 907 intrtype_to_str(intr_type), vector, error); 908 } else { 909 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 910 intrtype_to_str(intr_type), vector); 911 } 912 } 913 914 static void 915 svm_update_virqinfo(struct svm_softc *sc, int vcpu) 916 { 917 struct vm *vm; 918 struct vlapic *vlapic; 919 struct vmcb_ctrl *ctrl; 920 int pending; 921 922 vm = sc->vm; 923 vlapic = vm_lapic(vm, vcpu); 924 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 925 926 /* Update %cr8 in the emulated vlapic */ 927 vlapic_set_cr8(vlapic, ctrl->v_tpr); 928 929 /* 930 * If V_IRQ indicates that the interrupt injection attempted on then 931 * last VMRUN was successful then update the vlapic accordingly. 932 */ 933 if (ctrl->v_intr_vector != 0) { 934 pending = ctrl->v_irq; 935 KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid " 936 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 937 KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 938 VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector, 939 pending ? "pending" : "accepted"); 940 if (!pending) 941 vlapic_intr_accepted(vlapic, ctrl->v_intr_vector); 942 } 943 } 944 945 static void 946 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 947 { 948 struct vmcb_ctrl *ctrl; 949 uint64_t intinfo; 950 951 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 952 intinfo = ctrl->exitintinfo; 953 if (!VMCB_EXITINTINFO_VALID(intinfo)) 954 return; 955 956 /* 957 * From APMv2, Section "Intercepts during IDT interrupt delivery" 958 * 959 * If a #VMEXIT happened during event delivery then record the event 960 * that was being delivered. 961 */ 962 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 963 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 964 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 965 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 966 } 967 968 static __inline int 969 vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 970 { 971 972 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 973 VMCB_INTCPT_VINTR)); 974 } 975 976 static __inline void 977 enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 978 { 979 struct vmcb_ctrl *ctrl; 980 981 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 982 983 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 984 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 985 KASSERT(vintr_intercept_enabled(sc, vcpu), 986 ("%s: vintr intercept should be enabled", __func__)); 987 return; 988 } 989 990 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 991 ctrl->v_irq = 1; 992 ctrl->v_ign_tpr = 1; 993 ctrl->v_intr_vector = 0; 994 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 995 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 996 } 997 998 static __inline void 999 disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1000 { 1001 struct vmcb_ctrl *ctrl; 1002 1003 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1004 1005 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1006 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1007 ("%s: vintr intercept should be disabled", __func__)); 1008 return; 1009 } 1010 1011 #ifdef KTR 1012 if (ctrl->v_intr_vector == 0) 1013 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1014 else 1015 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); 1016 #endif 1017 ctrl->v_irq = 0; 1018 ctrl->v_intr_vector = 0; 1019 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1020 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1021 } 1022 1023 static int 1024 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1025 { 1026 struct vmcb_ctrl *ctrl; 1027 int oldval, newval; 1028 1029 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1030 oldval = ctrl->intr_shadow; 1031 newval = val ? 1 : 0; 1032 if (newval != oldval) { 1033 ctrl->intr_shadow = newval; 1034 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1035 } 1036 return (0); 1037 } 1038 1039 static int 1040 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1041 { 1042 struct vmcb_ctrl *ctrl; 1043 1044 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1045 *val = ctrl->intr_shadow; 1046 return (0); 1047 } 1048 1049 /* 1050 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1051 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1052 * to track when the vcpu is done handling the NMI. 1053 */ 1054 static int 1055 nmi_blocked(struct svm_softc *sc, int vcpu) 1056 { 1057 int blocked; 1058 1059 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1060 VMCB_INTCPT_IRET); 1061 return (blocked); 1062 } 1063 1064 static void 1065 enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1066 { 1067 1068 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1069 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1070 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1071 } 1072 1073 static void 1074 clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1075 { 1076 int error; 1077 1078 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1079 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1080 /* 1081 * When the IRET intercept is cleared the vcpu will attempt to execute 1082 * the "iret" when it runs next. However, it is possible to inject 1083 * another NMI into the vcpu before the "iret" has actually executed. 1084 * 1085 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1086 * it will trap back into the hypervisor. If an NMI is pending for 1087 * the vcpu it will be injected into the guest. 1088 * 1089 * XXX this needs to be fixed 1090 */ 1091 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1092 1093 /* 1094 * Set 'intr_shadow' to prevent an NMI from being injected on the 1095 * immediate VMRUN. 1096 */ 1097 error = svm_modify_intr_shadow(sc, vcpu, 1); 1098 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1099 } 1100 1101 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1102 1103 static int 1104 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) 1105 { 1106 struct vm_exit *vme; 1107 struct vmcb_state *state; 1108 uint64_t changed, lma, oldval; 1109 int error; 1110 1111 state = svm_get_vmcb_state(sc, vcpu); 1112 1113 oldval = state->efer; 1114 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1115 1116 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1117 changed = oldval ^ newval; 1118 1119 if (newval & EFER_MBZ_BITS) 1120 goto gpf; 1121 1122 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1123 if (changed & EFER_LME) { 1124 if (state->cr0 & CR0_PG) 1125 goto gpf; 1126 } 1127 1128 /* EFER.LMA = EFER.LME & CR0.PG */ 1129 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1130 lma = EFER_LMA; 1131 else 1132 lma = 0; 1133 1134 if ((newval & EFER_LMA) != lma) 1135 goto gpf; 1136 1137 if (newval & EFER_NXE) { 1138 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) 1139 goto gpf; 1140 } 1141 1142 /* 1143 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1144 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1145 */ 1146 if (newval & EFER_LMSLE) { 1147 vme = vm_exitinfo(sc->vm, vcpu); 1148 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1149 *retu = true; 1150 return (0); 1151 } 1152 1153 if (newval & EFER_FFXSR) { 1154 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) 1155 goto gpf; 1156 } 1157 1158 if (newval & EFER_TCE) { 1159 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) 1160 goto gpf; 1161 } 1162 1163 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1164 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1165 return (0); 1166 gpf: 1167 vm_inject_gp(sc->vm, vcpu); 1168 return (0); 1169 } 1170 1171 static int 1172 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1173 bool *retu) 1174 { 1175 int error; 1176 1177 if (lapic_msr(num)) 1178 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1179 else if (num == MSR_EFER) 1180 error = svm_write_efer(sc, vcpu, val, retu); 1181 else 1182 error = svm_wrmsr(sc, vcpu, num, val, retu); 1183 1184 return (error); 1185 } 1186 1187 static int 1188 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1189 { 1190 struct vmcb_state *state; 1191 struct svm_regctx *ctx; 1192 uint64_t result; 1193 int error; 1194 1195 if (lapic_msr(num)) 1196 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1197 else 1198 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1199 1200 if (error == 0) { 1201 state = svm_get_vmcb_state(sc, vcpu); 1202 ctx = svm_get_guest_regctx(sc, vcpu); 1203 state->rax = result & 0xffffffff; 1204 ctx->sctx_rdx = result >> 32; 1205 } 1206 1207 return (error); 1208 } 1209 1210 #ifdef KTR 1211 static const char * 1212 exit_reason_to_str(uint64_t reason) 1213 { 1214 static char reasonbuf[32]; 1215 1216 switch (reason) { 1217 case VMCB_EXIT_INVALID: 1218 return ("invalvmcb"); 1219 case VMCB_EXIT_SHUTDOWN: 1220 return ("shutdown"); 1221 case VMCB_EXIT_NPF: 1222 return ("nptfault"); 1223 case VMCB_EXIT_PAUSE: 1224 return ("pause"); 1225 case VMCB_EXIT_HLT: 1226 return ("hlt"); 1227 case VMCB_EXIT_CPUID: 1228 return ("cpuid"); 1229 case VMCB_EXIT_IO: 1230 return ("inout"); 1231 case VMCB_EXIT_MC: 1232 return ("mchk"); 1233 case VMCB_EXIT_INTR: 1234 return ("extintr"); 1235 case VMCB_EXIT_NMI: 1236 return ("nmi"); 1237 case VMCB_EXIT_VINTR: 1238 return ("vintr"); 1239 case VMCB_EXIT_MSR: 1240 return ("msr"); 1241 case VMCB_EXIT_IRET: 1242 return ("iret"); 1243 case VMCB_EXIT_MONITOR: 1244 return ("monitor"); 1245 case VMCB_EXIT_MWAIT: 1246 return ("mwait"); 1247 default: 1248 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1249 return (reasonbuf); 1250 } 1251 } 1252 #endif /* KTR */ 1253 1254 /* 1255 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1256 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1257 * and exceptions caused by INT3, INTO and BOUND instructions. 1258 * 1259 * Return 1 if the nRIP is valid and 0 otherwise. 1260 */ 1261 static int 1262 nrip_valid(uint64_t exitcode) 1263 { 1264 switch (exitcode) { 1265 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1266 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1267 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1268 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1269 case 0x43: /* INT3 */ 1270 case 0x44: /* INTO */ 1271 case 0x45: /* BOUND */ 1272 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1273 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1274 return (1); 1275 default: 1276 return (0); 1277 } 1278 } 1279 1280 static int 1281 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1282 { 1283 struct vmcb *vmcb; 1284 struct vmcb_state *state; 1285 struct vmcb_ctrl *ctrl; 1286 struct svm_regctx *ctx; 1287 uint64_t code, info1, info2, val; 1288 uint32_t eax, ecx, edx; 1289 int error, errcode_valid, handled, idtvec, reflect; 1290 bool retu; 1291 1292 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1293 vmcb = svm_get_vmcb(svm_sc, vcpu); 1294 state = &vmcb->state; 1295 ctrl = &vmcb->ctrl; 1296 1297 handled = 0; 1298 code = ctrl->exitcode; 1299 info1 = ctrl->exitinfo1; 1300 info2 = ctrl->exitinfo2; 1301 1302 vmexit->exitcode = VM_EXITCODE_BOGUS; 1303 vmexit->rip = state->rip; 1304 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1305 1306 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1307 1308 /* 1309 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1310 * in an inconsistent state and can trigger assertions that would 1311 * never happen otherwise. 1312 */ 1313 if (code == VMCB_EXIT_INVALID) { 1314 vm_exit_svm(vmexit, code, info1, info2); 1315 return (0); 1316 } 1317 1318 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1319 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1320 1321 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1322 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1323 vmexit->inst_length, code, info1, info2)); 1324 1325 svm_update_virqinfo(svm_sc, vcpu); 1326 svm_save_intinfo(svm_sc, vcpu); 1327 1328 switch (code) { 1329 case VMCB_EXIT_IRET: 1330 /* 1331 * Restart execution at "iret" but with the intercept cleared. 1332 */ 1333 vmexit->inst_length = 0; 1334 clear_nmi_blocking(svm_sc, vcpu); 1335 handled = 1; 1336 break; 1337 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1338 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1339 handled = 1; 1340 break; 1341 case VMCB_EXIT_INTR: /* external interrupt */ 1342 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1343 handled = 1; 1344 break; 1345 case VMCB_EXIT_NMI: /* external NMI */ 1346 handled = 1; 1347 break; 1348 case 0x40 ... 0x5F: 1349 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1350 reflect = 1; 1351 idtvec = code - 0x40; 1352 switch (idtvec) { 1353 case IDT_MC: 1354 /* 1355 * Call the machine check handler by hand. Also don't 1356 * reflect the machine check back into the guest. 1357 */ 1358 reflect = 0; 1359 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1360 __asm __volatile("int $18"); 1361 break; 1362 case IDT_PF: 1363 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1364 info2); 1365 KASSERT(error == 0, ("%s: error %d updating cr2", 1366 __func__, error)); 1367 /* fallthru */ 1368 case IDT_NP: 1369 case IDT_SS: 1370 case IDT_GP: 1371 case IDT_AC: 1372 case IDT_TS: 1373 errcode_valid = 1; 1374 break; 1375 1376 case IDT_DF: 1377 errcode_valid = 1; 1378 info1 = 0; 1379 break; 1380 1381 case IDT_BP: 1382 case IDT_OF: 1383 case IDT_BR: 1384 /* 1385 * The 'nrip' field is populated for INT3, INTO and 1386 * BOUND exceptions and this also implies that 1387 * 'inst_length' is non-zero. 1388 * 1389 * Reset 'inst_length' to zero so the guest %rip at 1390 * event injection is identical to what it was when 1391 * the exception originally happened. 1392 */ 1393 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1394 "to zero before injecting exception %d", 1395 vmexit->inst_length, idtvec); 1396 vmexit->inst_length = 0; 1397 /* fallthru */ 1398 default: 1399 errcode_valid = 0; 1400 info1 = 0; 1401 break; 1402 } 1403 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1404 "when reflecting exception %d into guest", 1405 vmexit->inst_length, idtvec)); 1406 1407 if (reflect) { 1408 /* Reflect the exception back into the guest */ 1409 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1410 "%d/%#x into the guest", idtvec, (int)info1); 1411 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1412 errcode_valid, info1, 0); 1413 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1414 __func__, error)); 1415 } 1416 handled = 1; 1417 break; 1418 case VMCB_EXIT_MSR: /* MSR access. */ 1419 eax = state->rax; 1420 ecx = ctx->sctx_rcx; 1421 edx = ctx->sctx_rdx; 1422 retu = false; 1423 1424 if (info1) { 1425 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1426 val = (uint64_t)edx << 32 | eax; 1427 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1428 ecx, val); 1429 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1430 vmexit->exitcode = VM_EXITCODE_WRMSR; 1431 vmexit->u.msr.code = ecx; 1432 vmexit->u.msr.wval = val; 1433 } else if (!retu) { 1434 handled = 1; 1435 } else { 1436 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1437 ("emulate_wrmsr retu with bogus exitcode")); 1438 } 1439 } else { 1440 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1441 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1442 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1443 vmexit->exitcode = VM_EXITCODE_RDMSR; 1444 vmexit->u.msr.code = ecx; 1445 } else if (!retu) { 1446 handled = 1; 1447 } else { 1448 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1449 ("emulate_rdmsr retu with bogus exitcode")); 1450 } 1451 } 1452 break; 1453 case VMCB_EXIT_IO: 1454 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1455 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1456 break; 1457 case VMCB_EXIT_CPUID: 1458 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1459 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1460 (uint32_t *)&state->rax, 1461 (uint32_t *)&ctx->sctx_rbx, 1462 (uint32_t *)&ctx->sctx_rcx, 1463 (uint32_t *)&ctx->sctx_rdx); 1464 break; 1465 case VMCB_EXIT_HLT: 1466 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1467 vmexit->exitcode = VM_EXITCODE_HLT; 1468 vmexit->u.hlt.rflags = state->rflags; 1469 break; 1470 case VMCB_EXIT_PAUSE: 1471 vmexit->exitcode = VM_EXITCODE_PAUSE; 1472 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1473 break; 1474 case VMCB_EXIT_NPF: 1475 /* EXITINFO2 contains the faulting guest physical address */ 1476 if (info1 & VMCB_NPF_INFO1_RSV) { 1477 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1478 "reserved bits set: info1(%#lx) info2(%#lx)", 1479 info1, info2); 1480 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { 1481 vmexit->exitcode = VM_EXITCODE_PAGING; 1482 vmexit->u.paging.gpa = info2; 1483 vmexit->u.paging.fault_type = npf_fault_type(info1); 1484 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1485 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1486 "on gpa %#lx/%#lx at rip %#lx", 1487 info2, info1, state->rip); 1488 } else if (svm_npf_emul_fault(info1)) { 1489 svm_handle_inst_emul(vmcb, info2, vmexit); 1490 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1491 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1492 "for gpa %#lx/%#lx at rip %#lx", 1493 info2, info1, state->rip); 1494 } 1495 break; 1496 case VMCB_EXIT_MONITOR: 1497 vmexit->exitcode = VM_EXITCODE_MONITOR; 1498 break; 1499 case VMCB_EXIT_MWAIT: 1500 vmexit->exitcode = VM_EXITCODE_MWAIT; 1501 break; 1502 default: 1503 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1504 break; 1505 } 1506 1507 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1508 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1509 vmexit->rip, vmexit->inst_length); 1510 1511 if (handled) { 1512 vmexit->rip += vmexit->inst_length; 1513 vmexit->inst_length = 0; 1514 state->rip = vmexit->rip; 1515 } else { 1516 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1517 /* 1518 * If this VM exit was not claimed by anybody then 1519 * treat it as a generic SVM exit. 1520 */ 1521 vm_exit_svm(vmexit, code, info1, info2); 1522 } else { 1523 /* 1524 * The exitcode and collateral have been populated. 1525 * The VM exit will be processed further in userland. 1526 */ 1527 } 1528 } 1529 return (handled); 1530 } 1531 1532 static void 1533 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1534 { 1535 uint64_t intinfo; 1536 1537 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1538 return; 1539 1540 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1541 "valid: %#lx", __func__, intinfo)); 1542 1543 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1544 VMCB_EXITINTINFO_VECTOR(intinfo), 1545 VMCB_EXITINTINFO_EC(intinfo), 1546 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1547 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1548 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1549 } 1550 1551 /* 1552 * Inject event to virtual cpu. 1553 */ 1554 static void 1555 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1556 { 1557 struct vmcb_ctrl *ctrl; 1558 struct vmcb_state *state; 1559 struct svm_vcpu *vcpustate; 1560 uint8_t v_tpr; 1561 int vector, need_intr_window, pending_apic_vector; 1562 1563 state = svm_get_vmcb_state(sc, vcpu); 1564 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1565 vcpustate = svm_get_vcpu(sc, vcpu); 1566 1567 need_intr_window = 0; 1568 pending_apic_vector = 0; 1569 1570 if (vcpustate->nextrip != state->rip) { 1571 ctrl->intr_shadow = 0; 1572 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1573 "cleared due to rip change: %#lx/%#lx", 1574 vcpustate->nextrip, state->rip); 1575 } 1576 1577 /* 1578 * Inject pending events or exceptions for this vcpu. 1579 * 1580 * An event might be pending because the previous #VMEXIT happened 1581 * during event delivery (i.e. ctrl->exitintinfo). 1582 * 1583 * An event might also be pending because an exception was injected 1584 * by the hypervisor (e.g. #PF during instruction emulation). 1585 */ 1586 svm_inj_intinfo(sc, vcpu); 1587 1588 /* NMI event has priority over interrupts. */ 1589 if (vm_nmi_pending(sc->vm, vcpu)) { 1590 if (nmi_blocked(sc, vcpu)) { 1591 /* 1592 * Can't inject another NMI if the guest has not 1593 * yet executed an "iret" after the last NMI. 1594 */ 1595 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1596 "to NMI-blocking"); 1597 } else if (ctrl->intr_shadow) { 1598 /* 1599 * Can't inject an NMI if the vcpu is in an intr_shadow. 1600 */ 1601 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1602 "interrupt shadow"); 1603 need_intr_window = 1; 1604 goto done; 1605 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1606 /* 1607 * If there is already an exception/interrupt pending 1608 * then defer the NMI until after that. 1609 */ 1610 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1611 "eventinj %#lx", ctrl->eventinj); 1612 1613 /* 1614 * Use self-IPI to trigger a VM-exit as soon as 1615 * possible after the event injection is completed. 1616 * 1617 * This works only if the external interrupt exiting 1618 * is at a lower priority than the event injection. 1619 * 1620 * Although not explicitly specified in APMv2 the 1621 * relative priorities were verified empirically. 1622 */ 1623 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1624 } else { 1625 vm_nmi_clear(sc->vm, vcpu); 1626 1627 /* Inject NMI, vector number is not used */ 1628 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1629 IDT_NMI, 0, false); 1630 1631 /* virtual NMI blocking is now in effect */ 1632 enable_nmi_blocking(sc, vcpu); 1633 1634 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1635 } 1636 } 1637 1638 if (!vm_extint_pending(sc->vm, vcpu)) { 1639 /* 1640 * APIC interrupts are delivered using the V_IRQ offload. 1641 * 1642 * The primary benefit is that the hypervisor doesn't need to 1643 * deal with the various conditions that inhibit interrupts. 1644 * It also means that TPR changes via CR8 will be handled 1645 * without any hypervisor involvement. 1646 * 1647 * Note that the APIC vector must remain pending in the vIRR 1648 * until it is confirmed that it was delivered to the guest. 1649 * This can be confirmed based on the value of V_IRQ at the 1650 * next #VMEXIT (1 = pending, 0 = delivered). 1651 * 1652 * Also note that it is possible that another higher priority 1653 * vector can become pending before this vector is delivered 1654 * to the guest. This is alright because vcpu_notify_event() 1655 * will send an IPI and force the vcpu to trap back into the 1656 * hypervisor. The higher priority vector will be injected on 1657 * the next VMRUN. 1658 */ 1659 if (vlapic_pending_intr(vlapic, &vector)) { 1660 KASSERT(vector >= 16 && vector <= 255, 1661 ("invalid vector %d from local APIC", vector)); 1662 pending_apic_vector = vector; 1663 } 1664 goto done; 1665 } 1666 1667 /* Ask the legacy pic for a vector to inject */ 1668 vatpic_pending_intr(sc->vm, &vector); 1669 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", 1670 vector)); 1671 1672 /* 1673 * If the guest has disabled interrupts or is in an interrupt shadow 1674 * then we cannot inject the pending interrupt. 1675 */ 1676 if ((state->rflags & PSL_I) == 0) { 1677 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1678 "rflags %#lx", vector, state->rflags); 1679 need_intr_window = 1; 1680 goto done; 1681 } 1682 1683 if (ctrl->intr_shadow) { 1684 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1685 "interrupt shadow", vector); 1686 need_intr_window = 1; 1687 goto done; 1688 } 1689 1690 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1691 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1692 "eventinj %#lx", vector, ctrl->eventinj); 1693 need_intr_window = 1; 1694 goto done; 1695 } 1696 1697 /* 1698 * Legacy PIC interrupts are delivered via the event injection 1699 * mechanism. 1700 */ 1701 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1702 1703 vm_extint_clear(sc->vm, vcpu); 1704 vatpic_intr_accepted(sc->vm, vector); 1705 1706 /* 1707 * Force a VM-exit as soon as the vcpu is ready to accept another 1708 * interrupt. This is done because the PIC might have another vector 1709 * that it wants to inject. Also, if the APIC has a pending interrupt 1710 * that was preempted by the ExtInt then it allows us to inject the 1711 * APIC vector as soon as possible. 1712 */ 1713 need_intr_window = 1; 1714 done: 1715 /* 1716 * The guest can modify the TPR by writing to %CR8. In guest mode 1717 * the processor reflects this write to V_TPR without hypervisor 1718 * intervention. 1719 * 1720 * The guest can also modify the TPR by writing to it via the memory 1721 * mapped APIC page. In this case, the write will be emulated by the 1722 * hypervisor. For this reason V_TPR must be updated before every 1723 * VMRUN. 1724 */ 1725 v_tpr = vlapic_get_cr8(vlapic); 1726 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1727 if (ctrl->v_tpr != v_tpr) { 1728 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1729 ctrl->v_tpr, v_tpr); 1730 ctrl->v_tpr = v_tpr; 1731 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1732 } 1733 1734 if (pending_apic_vector) { 1735 /* 1736 * If an APIC vector is being injected then interrupt window 1737 * exiting is not possible on this VMRUN. 1738 */ 1739 KASSERT(!need_intr_window, ("intr_window exiting impossible")); 1740 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", 1741 pending_apic_vector); 1742 1743 ctrl->v_irq = 1; 1744 ctrl->v_ign_tpr = 0; 1745 ctrl->v_intr_vector = pending_apic_vector; 1746 ctrl->v_intr_prio = pending_apic_vector >> 4; 1747 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1748 } else if (need_intr_window) { 1749 /* 1750 * We use V_IRQ in conjunction with the VINTR intercept to 1751 * trap into the hypervisor as soon as a virtual interrupt 1752 * can be delivered. 1753 * 1754 * Since injected events are not subject to intercept checks 1755 * we need to ensure that the V_IRQ is not actually going to 1756 * be delivered on VM entry. The KASSERT below enforces this. 1757 */ 1758 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1759 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1760 ("Bogus intr_window_exiting: eventinj (%#lx), " 1761 "intr_shadow (%u), rflags (%#lx)", 1762 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1763 enable_intr_window_exiting(sc, vcpu); 1764 } else { 1765 disable_intr_window_exiting(sc, vcpu); 1766 } 1767 } 1768 1769 static __inline void 1770 restore_host_tss(void) 1771 { 1772 struct system_segment_descriptor *tss_sd; 1773 1774 /* 1775 * The TSS descriptor was in use prior to launching the guest so it 1776 * has been marked busy. 1777 * 1778 * 'ltr' requires the descriptor to be marked available so change the 1779 * type to "64-bit available TSS". 1780 */ 1781 tss_sd = PCPU_GET(tss); 1782 tss_sd->sd_type = SDT_SYSTSS; 1783 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1784 } 1785 1786 static void 1787 check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1788 { 1789 struct svm_vcpu *vcpustate; 1790 struct vmcb_ctrl *ctrl; 1791 long eptgen; 1792 bool alloc_asid; 1793 1794 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1795 "active on cpu %u", __func__, thiscpu)); 1796 1797 vcpustate = svm_get_vcpu(sc, vcpuid); 1798 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1799 1800 /* 1801 * The TLB entries associated with the vcpu's ASID are not valid 1802 * if either of the following conditions is true: 1803 * 1804 * 1. The vcpu's ASID generation is different than the host cpu's 1805 * ASID generation. This happens when the vcpu migrates to a new 1806 * host cpu. It can also happen when the number of vcpus executing 1807 * on a host cpu is greater than the number of ASIDs available. 1808 * 1809 * 2. The pmap generation number is different than the value cached in 1810 * the 'vcpustate'. This happens when the host invalidates pages 1811 * belonging to the guest. 1812 * 1813 * asidgen eptgen Action 1814 * mismatch mismatch 1815 * 0 0 (a) 1816 * 0 1 (b1) or (b2) 1817 * 1 0 (c) 1818 * 1 1 (d) 1819 * 1820 * (a) There is no mismatch in eptgen or ASID generation and therefore 1821 * no further action is needed. 1822 * 1823 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1824 * retained and the TLB entries associated with this ASID 1825 * are flushed by VMRUN. 1826 * 1827 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1828 * allocated. 1829 * 1830 * (c) A new ASID is allocated. 1831 * 1832 * (d) A new ASID is allocated. 1833 */ 1834 1835 alloc_asid = false; 1836 eptgen = pmap->pm_eptgen; 1837 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1838 1839 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1840 alloc_asid = true; /* (c) and (d) */ 1841 } else if (vcpustate->eptgen != eptgen) { 1842 if (flush_by_asid()) 1843 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1844 else 1845 alloc_asid = true; /* (b2) */ 1846 } else { 1847 /* 1848 * This is the common case (a). 1849 */ 1850 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1851 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1852 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1853 } 1854 1855 if (alloc_asid) { 1856 if (++asid[thiscpu].num >= nasid) { 1857 asid[thiscpu].num = 1; 1858 if (++asid[thiscpu].gen == 0) 1859 asid[thiscpu].gen = 1; 1860 /* 1861 * If this cpu does not support "flush-by-asid" 1862 * then flush the entire TLB on a generation 1863 * bump. Subsequent ASID allocation in this 1864 * generation can be done without a TLB flush. 1865 */ 1866 if (!flush_by_asid()) 1867 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1868 } 1869 vcpustate->asid.gen = asid[thiscpu].gen; 1870 vcpustate->asid.num = asid[thiscpu].num; 1871 1872 ctrl->asid = vcpustate->asid.num; 1873 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1874 /* 1875 * If this cpu supports "flush-by-asid" then the TLB 1876 * was not flushed after the generation bump. The TLB 1877 * is flushed selectively after every new ASID allocation. 1878 */ 1879 if (flush_by_asid()) 1880 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1881 } 1882 vcpustate->eptgen = eptgen; 1883 1884 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1885 KASSERT(ctrl->asid == vcpustate->asid.num, 1886 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1887 } 1888 1889 static __inline void 1890 disable_gintr(void) 1891 { 1892 1893 __asm __volatile("clgi"); 1894 } 1895 1896 static __inline void 1897 enable_gintr(void) 1898 { 1899 1900 __asm __volatile("stgi"); 1901 } 1902 1903 /* 1904 * Start vcpu with specified RIP. 1905 */ 1906 static int 1907 svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1908 struct vm_eventinfo *evinfo) 1909 { 1910 struct svm_regctx *gctx; 1911 struct svm_softc *svm_sc; 1912 struct svm_vcpu *vcpustate; 1913 struct vmcb_state *state; 1914 struct vmcb_ctrl *ctrl; 1915 struct vm_exit *vmexit; 1916 struct vlapic *vlapic; 1917 struct vm *vm; 1918 uint64_t vmcb_pa; 1919 int handled; 1920 1921 svm_sc = arg; 1922 vm = svm_sc->vm; 1923 1924 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1925 state = svm_get_vmcb_state(svm_sc, vcpu); 1926 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1927 vmexit = vm_exitinfo(vm, vcpu); 1928 vlapic = vm_lapic(vm, vcpu); 1929 1930 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1931 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1932 1933 if (vcpustate->lastcpu != curcpu) { 1934 /* 1935 * Force new ASID allocation by invalidating the generation. 1936 */ 1937 vcpustate->asid.gen = 0; 1938 1939 /* 1940 * Invalidate the VMCB state cache by marking all fields dirty. 1941 */ 1942 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1943 1944 /* 1945 * XXX 1946 * Setting 'vcpustate->lastcpu' here is bit premature because 1947 * we may return from this function without actually executing 1948 * the VMRUN instruction. This could happen if a rendezvous 1949 * or an AST is pending on the first time through the loop. 1950 * 1951 * This works for now but any new side-effects of vcpu 1952 * migration should take this case into account. 1953 */ 1954 vcpustate->lastcpu = curcpu; 1955 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1956 } 1957 1958 svm_msr_guest_enter(svm_sc, vcpu); 1959 1960 /* Update Guest RIP */ 1961 state->rip = rip; 1962 1963 do { 1964 /* 1965 * Disable global interrupts to guarantee atomicity during 1966 * loading of guest state. This includes not only the state 1967 * loaded by the "vmrun" instruction but also software state 1968 * maintained by the hypervisor: suspended and rendezvous 1969 * state, NPT generation number, vlapic interrupts etc. 1970 */ 1971 disable_gintr(); 1972 1973 if (vcpu_suspended(evinfo)) { 1974 enable_gintr(); 1975 vm_exit_suspended(vm, vcpu, state->rip); 1976 break; 1977 } 1978 1979 if (vcpu_rendezvous_pending(evinfo)) { 1980 enable_gintr(); 1981 vm_exit_rendezvous(vm, vcpu, state->rip); 1982 break; 1983 } 1984 1985 if (vcpu_reqidle(evinfo)) { 1986 enable_gintr(); 1987 vm_exit_reqidle(vm, vcpu, state->rip); 1988 break; 1989 } 1990 1991 /* We are asked to give the cpu by scheduler. */ 1992 if (vcpu_should_yield(vm, vcpu)) { 1993 enable_gintr(); 1994 vm_exit_astpending(vm, vcpu, state->rip); 1995 break; 1996 } 1997 1998 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1999 2000 /* Activate the nested pmap on 'curcpu' */ 2001 CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active); 2002 2003 /* 2004 * Check the pmap generation and the ASID generation to 2005 * ensure that the vcpu does not use stale TLB mappings. 2006 */ 2007 check_asid(svm_sc, vcpu, pmap, curcpu); 2008 2009 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 2010 vcpustate->dirty = 0; 2011 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2012 2013 /* Launch Virtual Machine. */ 2014 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 2015 svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]); 2016 2017 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 2018 2019 /* 2020 * The host GDTR and IDTR is saved by VMRUN and restored 2021 * automatically on #VMEXIT. However, the host TSS needs 2022 * to be restored explicitly. 2023 */ 2024 restore_host_tss(); 2025 2026 /* #VMEXIT disables interrupts so re-enable them here. */ 2027 enable_gintr(); 2028 2029 /* Update 'nextrip' */ 2030 vcpustate->nextrip = state->rip; 2031 2032 /* Handle #VMEXIT and if required return to user space. */ 2033 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2034 } while (handled); 2035 2036 svm_msr_guest_exit(svm_sc, vcpu); 2037 2038 return (0); 2039 } 2040 2041 static void 2042 svm_vmcleanup(void *arg) 2043 { 2044 struct svm_softc *sc = arg; 2045 2046 free(sc, M_SVM); 2047 } 2048 2049 static register_t * 2050 swctx_regptr(struct svm_regctx *regctx, int reg) 2051 { 2052 2053 switch (reg) { 2054 case VM_REG_GUEST_RBX: 2055 return (®ctx->sctx_rbx); 2056 case VM_REG_GUEST_RCX: 2057 return (®ctx->sctx_rcx); 2058 case VM_REG_GUEST_RDX: 2059 return (®ctx->sctx_rdx); 2060 case VM_REG_GUEST_RDI: 2061 return (®ctx->sctx_rdi); 2062 case VM_REG_GUEST_RSI: 2063 return (®ctx->sctx_rsi); 2064 case VM_REG_GUEST_RBP: 2065 return (®ctx->sctx_rbp); 2066 case VM_REG_GUEST_R8: 2067 return (®ctx->sctx_r8); 2068 case VM_REG_GUEST_R9: 2069 return (®ctx->sctx_r9); 2070 case VM_REG_GUEST_R10: 2071 return (®ctx->sctx_r10); 2072 case VM_REG_GUEST_R11: 2073 return (®ctx->sctx_r11); 2074 case VM_REG_GUEST_R12: 2075 return (®ctx->sctx_r12); 2076 case VM_REG_GUEST_R13: 2077 return (®ctx->sctx_r13); 2078 case VM_REG_GUEST_R14: 2079 return (®ctx->sctx_r14); 2080 case VM_REG_GUEST_R15: 2081 return (®ctx->sctx_r15); 2082 default: 2083 return (NULL); 2084 } 2085 } 2086 2087 static int 2088 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2089 { 2090 struct svm_softc *svm_sc; 2091 register_t *reg; 2092 2093 svm_sc = arg; 2094 2095 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2096 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2097 } 2098 2099 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2100 return (0); 2101 } 2102 2103 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2104 2105 if (reg != NULL) { 2106 *val = *reg; 2107 return (0); 2108 } 2109 2110 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2111 return (EINVAL); 2112 } 2113 2114 static int 2115 svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2116 { 2117 struct svm_softc *svm_sc; 2118 register_t *reg; 2119 2120 svm_sc = arg; 2121 2122 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2123 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2124 } 2125 2126 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2127 return (0); 2128 } 2129 2130 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2131 2132 if (reg != NULL) { 2133 *reg = val; 2134 return (0); 2135 } 2136 2137 /* 2138 * XXX deal with CR3 and invalidate TLB entries tagged with the 2139 * vcpu's ASID. This needs to be treated differently depending on 2140 * whether 'running' is true/false. 2141 */ 2142 2143 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2144 return (EINVAL); 2145 } 2146 2147 static int 2148 svm_setcap(void *arg, int vcpu, int type, int val) 2149 { 2150 struct svm_softc *sc; 2151 int error; 2152 2153 sc = arg; 2154 error = 0; 2155 switch (type) { 2156 case VM_CAP_HALT_EXIT: 2157 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2158 VMCB_INTCPT_HLT, val); 2159 break; 2160 case VM_CAP_PAUSE_EXIT: 2161 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2162 VMCB_INTCPT_PAUSE, val); 2163 break; 2164 case VM_CAP_UNRESTRICTED_GUEST: 2165 /* Unrestricted guest execution cannot be disabled in SVM */ 2166 if (val == 0) 2167 error = EINVAL; 2168 break; 2169 default: 2170 error = ENOENT; 2171 break; 2172 } 2173 return (error); 2174 } 2175 2176 static int 2177 svm_getcap(void *arg, int vcpu, int type, int *retval) 2178 { 2179 struct svm_softc *sc; 2180 int error; 2181 2182 sc = arg; 2183 error = 0; 2184 2185 switch (type) { 2186 case VM_CAP_HALT_EXIT: 2187 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2188 VMCB_INTCPT_HLT); 2189 break; 2190 case VM_CAP_PAUSE_EXIT: 2191 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2192 VMCB_INTCPT_PAUSE); 2193 break; 2194 case VM_CAP_UNRESTRICTED_GUEST: 2195 *retval = 1; /* unrestricted guest is always enabled */ 2196 break; 2197 default: 2198 error = ENOENT; 2199 break; 2200 } 2201 return (error); 2202 } 2203 2204 static struct vlapic * 2205 svm_vlapic_init(void *arg, int vcpuid) 2206 { 2207 struct svm_softc *svm_sc; 2208 struct vlapic *vlapic; 2209 2210 svm_sc = arg; 2211 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2212 vlapic->vm = svm_sc->vm; 2213 vlapic->vcpuid = vcpuid; 2214 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2215 2216 vlapic_init(vlapic); 2217 2218 return (vlapic); 2219 } 2220 2221 static void 2222 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2223 { 2224 2225 vlapic_cleanup(vlapic); 2226 free(vlapic, M_SVM_VLAPIC); 2227 } 2228 2229 struct vmm_ops vmm_ops_amd = { 2230 svm_init, 2231 svm_cleanup, 2232 svm_restore, 2233 svm_vminit, 2234 svm_vmrun, 2235 svm_vmcleanup, 2236 svm_getreg, 2237 svm_setreg, 2238 vmcb_getdesc, 2239 vmcb_setdesc, 2240 svm_getcap, 2241 svm_setcap, 2242 svm_npt_alloc, 2243 svm_npt_free, 2244 svm_vlapic_init, 2245 svm_vlapic_cleanup 2246 }; 2247