1 /*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/smp.h> 33 #include <sys/kernel.h> 34 #include <sys/malloc.h> 35 #include <sys/pcpu.h> 36 #include <sys/proc.h> 37 #include <sys/sysctl.h> 38 39 #include <vm/vm.h> 40 #include <vm/pmap.h> 41 42 #include <machine/cpufunc.h> 43 #include <machine/psl.h> 44 #include <machine/md_var.h> 45 #include <machine/specialreg.h> 46 #include <machine/smp.h> 47 #include <machine/vmm.h> 48 #include <machine/vmm_dev.h> 49 #include <machine/vmm_instruction_emul.h> 50 51 #include "vmm_lapic.h" 52 #include "vmm_stat.h" 53 #include "vmm_ktr.h" 54 #include "vmm_ioport.h" 55 #include "vatpic.h" 56 #include "vlapic.h" 57 #include "vlapic_priv.h" 58 59 #include "x86.h" 60 #include "vmcb.h" 61 #include "svm.h" 62 #include "svm_softc.h" 63 #include "svm_msr.h" 64 #include "npt.h" 65 66 SYSCTL_DECL(_hw_vmm); 67 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 68 69 /* 70 * SVM CPUID function 0x8000_000A, edx bit decoding. 71 */ 72 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 73 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 74 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 75 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 76 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 77 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 78 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 79 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 80 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 81 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 82 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 83 84 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 85 VMCB_CACHE_IOPM | \ 86 VMCB_CACHE_I | \ 87 VMCB_CACHE_TPR | \ 88 VMCB_CACHE_CR2 | \ 89 VMCB_CACHE_CR | \ 90 VMCB_CACHE_DT | \ 91 VMCB_CACHE_SEG | \ 92 VMCB_CACHE_NP) 93 94 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 95 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 96 0, NULL); 97 98 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 99 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 100 101 /* Per-CPU context area. */ 102 extern struct pcpu __pcpu[]; 103 104 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 105 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 106 "SVM features advertised by CPUID.8000000AH:EDX"); 107 108 static int disable_npf_assist; 109 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 110 &disable_npf_assist, 0, NULL); 111 112 /* Maximum ASIDs supported by the processor */ 113 static uint32_t nasid; 114 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 115 "Number of ASIDs supported by this processor"); 116 117 /* Current ASID generation for each host cpu */ 118 static struct asid asid[MAXCPU]; 119 120 /* 121 * SVM host state saved area of size 4KB for each core. 122 */ 123 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 124 125 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 126 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 127 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 128 129 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 130 131 static __inline int 132 flush_by_asid(void) 133 { 134 135 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 136 } 137 138 static __inline int 139 decode_assist(void) 140 { 141 142 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 143 } 144 145 static void 146 svm_disable(void *arg __unused) 147 { 148 uint64_t efer; 149 150 efer = rdmsr(MSR_EFER); 151 efer &= ~EFER_SVM; 152 wrmsr(MSR_EFER, efer); 153 } 154 155 /* 156 * Disable SVM on all CPUs. 157 */ 158 static int 159 svm_cleanup(void) 160 { 161 162 smp_rendezvous(NULL, svm_disable, NULL, NULL); 163 return (0); 164 } 165 166 /* 167 * Verify that all the features required by bhyve are available. 168 */ 169 static int 170 check_svm_features(void) 171 { 172 u_int regs[4]; 173 174 /* CPUID Fn8000_000A is for SVM */ 175 do_cpuid(0x8000000A, regs); 176 svm_feature &= regs[3]; 177 178 /* 179 * The number of ASIDs can be configured to be less than what is 180 * supported by the hardware but not more. 181 */ 182 if (nasid == 0 || nasid > regs[1]) 183 nasid = regs[1]; 184 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 185 186 /* bhyve requires the Nested Paging feature */ 187 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 188 printf("SVM: Nested Paging feature not available.\n"); 189 return (ENXIO); 190 } 191 192 /* bhyve requires the NRIP Save feature */ 193 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 194 printf("SVM: NRIP Save feature not available.\n"); 195 return (ENXIO); 196 } 197 198 return (0); 199 } 200 201 static void 202 svm_enable(void *arg __unused) 203 { 204 uint64_t efer; 205 206 efer = rdmsr(MSR_EFER); 207 efer |= EFER_SVM; 208 wrmsr(MSR_EFER, efer); 209 210 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 211 } 212 213 /* 214 * Return 1 if SVM is enabled on this processor and 0 otherwise. 215 */ 216 static int 217 svm_available(void) 218 { 219 uint64_t msr; 220 221 /* Section 15.4 Enabling SVM from APM2. */ 222 if ((amd_feature2 & AMDID2_SVM) == 0) { 223 printf("SVM: not available.\n"); 224 return (0); 225 } 226 227 msr = rdmsr(MSR_VM_CR); 228 if ((msr & VM_CR_SVMDIS) != 0) { 229 printf("SVM: disabled by BIOS.\n"); 230 return (0); 231 } 232 233 return (1); 234 } 235 236 static int 237 svm_init(int ipinum) 238 { 239 int error, cpu; 240 241 if (!svm_available()) 242 return (ENXIO); 243 244 error = check_svm_features(); 245 if (error) 246 return (error); 247 248 vmcb_clean &= VMCB_CACHE_DEFAULT; 249 250 for (cpu = 0; cpu < MAXCPU; cpu++) { 251 /* 252 * Initialize the host ASIDs to their "highest" valid values. 253 * 254 * The next ASID allocation will rollover both 'gen' and 'num' 255 * and start off the sequence at {1,1}. 256 */ 257 asid[cpu].gen = ~0UL; 258 asid[cpu].num = nasid - 1; 259 } 260 261 svm_msr_init(); 262 svm_npt_init(ipinum); 263 264 /* Enable SVM on all CPUs */ 265 smp_rendezvous(NULL, svm_enable, NULL, NULL); 266 267 return (0); 268 } 269 270 static void 271 svm_restore(void) 272 { 273 274 svm_enable(NULL); 275 } 276 277 /* Pentium compatible MSRs */ 278 #define MSR_PENTIUM_START 0 279 #define MSR_PENTIUM_END 0x1FFF 280 /* AMD 6th generation and Intel compatible MSRs */ 281 #define MSR_AMD6TH_START 0xC0000000UL 282 #define MSR_AMD6TH_END 0xC0001FFFUL 283 /* AMD 7th and 8th generation compatible MSRs */ 284 #define MSR_AMD7TH_START 0xC0010000UL 285 #define MSR_AMD7TH_END 0xC0011FFFUL 286 287 /* 288 * Get the index and bit position for a MSR in permission bitmap. 289 * Two bits are used for each MSR: lower bit for read and higher bit for write. 290 */ 291 static int 292 svm_msr_index(uint64_t msr, int *index, int *bit) 293 { 294 uint32_t base, off; 295 296 *index = -1; 297 *bit = (msr % 4) * 2; 298 base = 0; 299 300 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 301 *index = msr / 4; 302 return (0); 303 } 304 305 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 306 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 307 off = (msr - MSR_AMD6TH_START); 308 *index = (off + base) / 4; 309 return (0); 310 } 311 312 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 313 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 314 off = (msr - MSR_AMD7TH_START); 315 *index = (off + base) / 4; 316 return (0); 317 } 318 319 return (EINVAL); 320 } 321 322 /* 323 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 324 */ 325 static void 326 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 327 { 328 int index, bit, error; 329 330 error = svm_msr_index(msr, &index, &bit); 331 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 332 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 333 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 334 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 335 "msr %#lx", __func__, bit, msr)); 336 337 if (read) 338 perm_bitmap[index] &= ~(1UL << bit); 339 340 if (write) 341 perm_bitmap[index] &= ~(2UL << bit); 342 } 343 344 static void 345 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 346 { 347 348 svm_msr_perm(perm_bitmap, msr, true, true); 349 } 350 351 static void 352 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 353 { 354 355 svm_msr_perm(perm_bitmap, msr, true, false); 356 } 357 358 static __inline int 359 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 360 { 361 struct vmcb_ctrl *ctrl; 362 363 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 364 365 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 366 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 367 } 368 369 static __inline void 370 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 371 int enabled) 372 { 373 struct vmcb_ctrl *ctrl; 374 uint32_t oldval; 375 376 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 377 378 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 379 oldval = ctrl->intercept[idx]; 380 381 if (enabled) 382 ctrl->intercept[idx] |= bitmask; 383 else 384 ctrl->intercept[idx] &= ~bitmask; 385 386 if (ctrl->intercept[idx] != oldval) { 387 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 388 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 389 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 390 } 391 } 392 393 static __inline void 394 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 395 { 396 397 svm_set_intercept(sc, vcpu, off, bitmask, 0); 398 } 399 400 static __inline void 401 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 402 { 403 404 svm_set_intercept(sc, vcpu, off, bitmask, 1); 405 } 406 407 static void 408 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 409 uint64_t msrpm_base_pa, uint64_t np_pml4) 410 { 411 struct vmcb_ctrl *ctrl; 412 struct vmcb_state *state; 413 uint32_t mask; 414 int n; 415 416 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 417 state = svm_get_vmcb_state(sc, vcpu); 418 419 ctrl->iopm_base_pa = iopm_base_pa; 420 ctrl->msrpm_base_pa = msrpm_base_pa; 421 422 /* Enable nested paging */ 423 ctrl->np_enable = 1; 424 ctrl->n_cr3 = np_pml4; 425 426 /* 427 * Intercept accesses to the control registers that are not shadowed 428 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 429 */ 430 for (n = 0; n < 16; n++) { 431 mask = (BIT(n) << 16) | BIT(n); 432 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 433 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 434 else 435 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 436 } 437 438 439 /* 440 * Intercept everything when tracing guest exceptions otherwise 441 * just intercept machine check exception. 442 */ 443 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 444 for (n = 0; n < 32; n++) { 445 /* 446 * Skip unimplemented vectors in the exception bitmap. 447 */ 448 if (n == 2 || n == 9) { 449 continue; 450 } 451 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 452 } 453 } else { 454 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 455 } 456 457 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 458 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 459 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 460 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 461 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 463 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 464 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 465 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 466 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 467 VMCB_INTCPT_FERR_FREEZE); 468 469 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 470 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 471 472 /* 473 * From section "Canonicalization and Consistency Checks" in APMv2 474 * the VMRUN intercept bit must be set to pass the consistency check. 475 */ 476 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 477 478 /* 479 * The ASID will be set to a non-zero value just before VMRUN. 480 */ 481 ctrl->asid = 0; 482 483 /* 484 * Section 15.21.1, Interrupt Masking in EFLAGS 485 * Section 15.21.2, Virtualizing APIC.TPR 486 * 487 * This must be set for %rflag and %cr8 isolation of guest and host. 488 */ 489 ctrl->v_intr_masking = 1; 490 491 /* Enable Last Branch Record aka LBR for debugging */ 492 ctrl->lbr_virt_en = 1; 493 state->dbgctl = BIT(0); 494 495 /* EFER_SVM must always be set when the guest is executing */ 496 state->efer = EFER_SVM; 497 498 /* Set up the PAT to power-on state */ 499 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 500 PAT_VALUE(1, PAT_WRITE_THROUGH) | 501 PAT_VALUE(2, PAT_UNCACHED) | 502 PAT_VALUE(3, PAT_UNCACHEABLE) | 503 PAT_VALUE(4, PAT_WRITE_BACK) | 504 PAT_VALUE(5, PAT_WRITE_THROUGH) | 505 PAT_VALUE(6, PAT_UNCACHED) | 506 PAT_VALUE(7, PAT_UNCACHEABLE); 507 } 508 509 /* 510 * Initialize a virtual machine. 511 */ 512 static void * 513 svm_vminit(struct vm *vm, pmap_t pmap) 514 { 515 struct svm_softc *svm_sc; 516 struct svm_vcpu *vcpu; 517 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 518 int i; 519 520 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 521 if (((uintptr_t)svm_sc & PAGE_MASK) != 0) 522 panic("malloc of svm_softc not aligned on page boundary"); 523 524 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 525 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 526 if (svm_sc->msr_bitmap == NULL) 527 panic("contigmalloc of SVM MSR bitmap failed"); 528 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 529 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 530 if (svm_sc->iopm_bitmap == NULL) 531 panic("contigmalloc of SVM IO bitmap failed"); 532 533 svm_sc->vm = vm; 534 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 535 536 /* 537 * Intercept read and write accesses to all MSRs. 538 */ 539 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 540 541 /* 542 * Access to the following MSRs is redirected to the VMCB when the 543 * guest is executing. Therefore it is safe to allow the guest to 544 * read/write these MSRs directly without hypervisor involvement. 545 */ 546 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 547 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 548 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 549 550 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 551 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 552 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 553 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 554 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 555 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 556 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 557 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 558 559 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 560 561 /* 562 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 563 */ 564 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 565 566 /* Intercept access to all I/O ports. */ 567 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 568 569 iopm_pa = vtophys(svm_sc->iopm_bitmap); 570 msrpm_pa = vtophys(svm_sc->msr_bitmap); 571 pml4_pa = svm_sc->nptp; 572 for (i = 0; i < VM_MAXCPU; i++) { 573 vcpu = svm_get_vcpu(svm_sc, i); 574 vcpu->nextrip = ~0; 575 vcpu->lastcpu = NOCPU; 576 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 577 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 578 svm_msr_guest_init(svm_sc, i); 579 } 580 return (svm_sc); 581 } 582 583 /* 584 * Collateral for a generic SVM VM-exit. 585 */ 586 static void 587 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 588 { 589 590 vme->exitcode = VM_EXITCODE_SVM; 591 vme->u.svm.exitcode = code; 592 vme->u.svm.exitinfo1 = info1; 593 vme->u.svm.exitinfo2 = info2; 594 } 595 596 static int 597 svm_cpl(struct vmcb_state *state) 598 { 599 600 /* 601 * From APMv2: 602 * "Retrieve the CPL from the CPL field in the VMCB, not 603 * from any segment DPL" 604 */ 605 return (state->cpl); 606 } 607 608 static enum vm_cpu_mode 609 svm_vcpu_mode(struct vmcb *vmcb) 610 { 611 struct vmcb_segment seg; 612 struct vmcb_state *state; 613 int error; 614 615 state = &vmcb->state; 616 617 if (state->efer & EFER_LMA) { 618 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 619 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 620 error)); 621 622 /* 623 * Section 4.8.1 for APM2, check if Code Segment has 624 * Long attribute set in descriptor. 625 */ 626 if (seg.attrib & VMCB_CS_ATTRIB_L) 627 return (CPU_MODE_64BIT); 628 else 629 return (CPU_MODE_COMPATIBILITY); 630 } else if (state->cr0 & CR0_PE) { 631 return (CPU_MODE_PROTECTED); 632 } else { 633 return (CPU_MODE_REAL); 634 } 635 } 636 637 static enum vm_paging_mode 638 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 639 { 640 641 if ((cr0 & CR0_PG) == 0) 642 return (PAGING_MODE_FLAT); 643 if ((cr4 & CR4_PAE) == 0) 644 return (PAGING_MODE_32); 645 if (efer & EFER_LME) 646 return (PAGING_MODE_64); 647 else 648 return (PAGING_MODE_PAE); 649 } 650 651 /* 652 * ins/outs utility routines 653 */ 654 static uint64_t 655 svm_inout_str_index(struct svm_regctx *regs, int in) 656 { 657 uint64_t val; 658 659 val = in ? regs->sctx_rdi : regs->sctx_rsi; 660 661 return (val); 662 } 663 664 static uint64_t 665 svm_inout_str_count(struct svm_regctx *regs, int rep) 666 { 667 uint64_t val; 668 669 val = rep ? regs->sctx_rcx : 1; 670 671 return (val); 672 } 673 674 static void 675 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 676 int in, struct vm_inout_str *vis) 677 { 678 int error, s; 679 680 if (in) { 681 vis->seg_name = VM_REG_GUEST_ES; 682 } else { 683 /* The segment field has standard encoding */ 684 s = (info1 >> 10) & 0x7; 685 vis->seg_name = vm_segment_name(s); 686 } 687 688 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 689 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 690 } 691 692 static int 693 svm_inout_str_addrsize(uint64_t info1) 694 { 695 uint32_t size; 696 697 size = (info1 >> 7) & 0x7; 698 switch (size) { 699 case 1: 700 return (2); /* 16 bit */ 701 case 2: 702 return (4); /* 32 bit */ 703 case 4: 704 return (8); /* 64 bit */ 705 default: 706 panic("%s: invalid size encoding %d", __func__, size); 707 } 708 } 709 710 static void 711 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 712 { 713 struct vmcb_state *state; 714 715 state = &vmcb->state; 716 paging->cr3 = state->cr3; 717 paging->cpl = svm_cpl(state); 718 paging->cpu_mode = svm_vcpu_mode(vmcb); 719 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 720 state->efer); 721 } 722 723 #define UNHANDLED 0 724 725 /* 726 * Handle guest I/O intercept. 727 */ 728 static int 729 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 730 { 731 struct vmcb_ctrl *ctrl; 732 struct vmcb_state *state; 733 struct svm_regctx *regs; 734 struct vm_inout_str *vis; 735 uint64_t info1; 736 int inout_string; 737 738 state = svm_get_vmcb_state(svm_sc, vcpu); 739 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 740 regs = svm_get_guest_regctx(svm_sc, vcpu); 741 742 info1 = ctrl->exitinfo1; 743 inout_string = info1 & BIT(2) ? 1 : 0; 744 745 /* 746 * The effective segment number in EXITINFO1[12:10] is populated 747 * only if the processor has the DecodeAssist capability. 748 * 749 * XXX this is not specified explicitly in APMv2 but can be verified 750 * empirically. 751 */ 752 if (inout_string && !decode_assist()) 753 return (UNHANDLED); 754 755 vmexit->exitcode = VM_EXITCODE_INOUT; 756 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 757 vmexit->u.inout.string = inout_string; 758 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 759 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 760 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 761 vmexit->u.inout.eax = (uint32_t)(state->rax); 762 763 if (inout_string) { 764 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 765 vis = &vmexit->u.inout_str; 766 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 767 vis->rflags = state->rflags; 768 vis->cr0 = state->cr0; 769 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 770 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 771 vis->addrsize = svm_inout_str_addrsize(info1); 772 svm_inout_str_seginfo(svm_sc, vcpu, info1, 773 vmexit->u.inout.in, vis); 774 } 775 776 return (UNHANDLED); 777 } 778 779 static int 780 npf_fault_type(uint64_t exitinfo1) 781 { 782 783 if (exitinfo1 & VMCB_NPF_INFO1_W) 784 return (VM_PROT_WRITE); 785 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 786 return (VM_PROT_EXECUTE); 787 else 788 return (VM_PROT_READ); 789 } 790 791 static bool 792 svm_npf_emul_fault(uint64_t exitinfo1) 793 { 794 795 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 796 return (false); 797 } 798 799 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 800 return (false); 801 } 802 803 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 804 return (false); 805 } 806 807 return (true); 808 } 809 810 static void 811 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 812 { 813 struct vm_guest_paging *paging; 814 struct vmcb_segment seg; 815 struct vmcb_ctrl *ctrl; 816 char *inst_bytes; 817 int error, inst_len; 818 819 ctrl = &vmcb->ctrl; 820 paging = &vmexit->u.inst_emul.paging; 821 822 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 823 vmexit->u.inst_emul.gpa = gpa; 824 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 825 svm_paging_info(vmcb, paging); 826 827 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 828 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 829 830 switch(paging->cpu_mode) { 831 case CPU_MODE_REAL: 832 vmexit->u.inst_emul.cs_base = seg.base; 833 vmexit->u.inst_emul.cs_d = 0; 834 break; 835 case CPU_MODE_PROTECTED: 836 case CPU_MODE_COMPATIBILITY: 837 vmexit->u.inst_emul.cs_base = seg.base; 838 839 /* 840 * Section 4.8.1 of APM2, Default Operand Size or D bit. 841 */ 842 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 843 1 : 0; 844 break; 845 default: 846 vmexit->u.inst_emul.cs_base = 0; 847 vmexit->u.inst_emul.cs_d = 0; 848 break; 849 } 850 851 /* 852 * Copy the instruction bytes into 'vie' if available. 853 */ 854 if (decode_assist() && !disable_npf_assist) { 855 inst_len = ctrl->inst_len; 856 inst_bytes = ctrl->inst_bytes; 857 } else { 858 inst_len = 0; 859 inst_bytes = NULL; 860 } 861 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 862 } 863 864 #ifdef KTR 865 static const char * 866 intrtype_to_str(int intr_type) 867 { 868 switch (intr_type) { 869 case VMCB_EVENTINJ_TYPE_INTR: 870 return ("hwintr"); 871 case VMCB_EVENTINJ_TYPE_NMI: 872 return ("nmi"); 873 case VMCB_EVENTINJ_TYPE_INTn: 874 return ("swintr"); 875 case VMCB_EVENTINJ_TYPE_EXCEPTION: 876 return ("exception"); 877 default: 878 panic("%s: unknown intr_type %d", __func__, intr_type); 879 } 880 } 881 #endif 882 883 /* 884 * Inject an event to vcpu as described in section 15.20, "Event injection". 885 */ 886 static void 887 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 888 uint32_t error, bool ec_valid) 889 { 890 struct vmcb_ctrl *ctrl; 891 892 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 893 894 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 895 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 896 897 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 898 __func__, vector)); 899 900 switch (intr_type) { 901 case VMCB_EVENTINJ_TYPE_INTR: 902 case VMCB_EVENTINJ_TYPE_NMI: 903 case VMCB_EVENTINJ_TYPE_INTn: 904 break; 905 case VMCB_EVENTINJ_TYPE_EXCEPTION: 906 if (vector >= 0 && vector <= 31 && vector != 2) 907 break; 908 /* FALLTHROUGH */ 909 default: 910 panic("%s: invalid intr_type/vector: %d/%d", __func__, 911 intr_type, vector); 912 } 913 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 914 if (ec_valid) { 915 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 916 ctrl->eventinj |= (uint64_t)error << 32; 917 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 918 intrtype_to_str(intr_type), vector, error); 919 } else { 920 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 921 intrtype_to_str(intr_type), vector); 922 } 923 } 924 925 static void 926 svm_update_virqinfo(struct svm_softc *sc, int vcpu) 927 { 928 struct vm *vm; 929 struct vlapic *vlapic; 930 struct vmcb_ctrl *ctrl; 931 int pending; 932 933 vm = sc->vm; 934 vlapic = vm_lapic(vm, vcpu); 935 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 936 937 /* Update %cr8 in the emulated vlapic */ 938 vlapic_set_cr8(vlapic, ctrl->v_tpr); 939 940 /* 941 * If V_IRQ indicates that the interrupt injection attempted on then 942 * last VMRUN was successful then update the vlapic accordingly. 943 */ 944 if (ctrl->v_intr_vector != 0) { 945 pending = ctrl->v_irq; 946 KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid " 947 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 948 KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 949 VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector, 950 pending ? "pending" : "accepted"); 951 if (!pending) 952 vlapic_intr_accepted(vlapic, ctrl->v_intr_vector); 953 } 954 } 955 956 static void 957 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 958 { 959 struct vmcb_ctrl *ctrl; 960 uint64_t intinfo; 961 962 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 963 intinfo = ctrl->exitintinfo; 964 if (!VMCB_EXITINTINFO_VALID(intinfo)) 965 return; 966 967 /* 968 * From APMv2, Section "Intercepts during IDT interrupt delivery" 969 * 970 * If a #VMEXIT happened during event delivery then record the event 971 * that was being delivered. 972 */ 973 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 974 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 975 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 976 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 977 } 978 979 static __inline int 980 vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 981 { 982 983 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 984 VMCB_INTCPT_VINTR)); 985 } 986 987 static __inline void 988 enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 989 { 990 struct vmcb_ctrl *ctrl; 991 992 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 993 994 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 995 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 996 KASSERT(vintr_intercept_enabled(sc, vcpu), 997 ("%s: vintr intercept should be enabled", __func__)); 998 return; 999 } 1000 1001 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1002 ctrl->v_irq = 1; 1003 ctrl->v_ign_tpr = 1; 1004 ctrl->v_intr_vector = 0; 1005 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1006 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1007 } 1008 1009 static __inline void 1010 disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1011 { 1012 struct vmcb_ctrl *ctrl; 1013 1014 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1015 1016 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1017 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1018 ("%s: vintr intercept should be disabled", __func__)); 1019 return; 1020 } 1021 1022 #ifdef KTR 1023 if (ctrl->v_intr_vector == 0) 1024 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1025 else 1026 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); 1027 #endif 1028 ctrl->v_irq = 0; 1029 ctrl->v_intr_vector = 0; 1030 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1031 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1032 } 1033 1034 static int 1035 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1036 { 1037 struct vmcb_ctrl *ctrl; 1038 int oldval, newval; 1039 1040 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1041 oldval = ctrl->intr_shadow; 1042 newval = val ? 1 : 0; 1043 if (newval != oldval) { 1044 ctrl->intr_shadow = newval; 1045 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1046 } 1047 return (0); 1048 } 1049 1050 static int 1051 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1052 { 1053 struct vmcb_ctrl *ctrl; 1054 1055 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1056 *val = ctrl->intr_shadow; 1057 return (0); 1058 } 1059 1060 /* 1061 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1062 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1063 * to track when the vcpu is done handling the NMI. 1064 */ 1065 static int 1066 nmi_blocked(struct svm_softc *sc, int vcpu) 1067 { 1068 int blocked; 1069 1070 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1071 VMCB_INTCPT_IRET); 1072 return (blocked); 1073 } 1074 1075 static void 1076 enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1077 { 1078 1079 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1080 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1081 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1082 } 1083 1084 static void 1085 clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1086 { 1087 int error; 1088 1089 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1090 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1091 /* 1092 * When the IRET intercept is cleared the vcpu will attempt to execute 1093 * the "iret" when it runs next. However, it is possible to inject 1094 * another NMI into the vcpu before the "iret" has actually executed. 1095 * 1096 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1097 * it will trap back into the hypervisor. If an NMI is pending for 1098 * the vcpu it will be injected into the guest. 1099 * 1100 * XXX this needs to be fixed 1101 */ 1102 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1103 1104 /* 1105 * Set 'intr_shadow' to prevent an NMI from being injected on the 1106 * immediate VMRUN. 1107 */ 1108 error = svm_modify_intr_shadow(sc, vcpu, 1); 1109 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1110 } 1111 1112 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1113 1114 static int 1115 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) 1116 { 1117 struct vm_exit *vme; 1118 struct vmcb_state *state; 1119 uint64_t changed, lma, oldval; 1120 int error; 1121 1122 state = svm_get_vmcb_state(sc, vcpu); 1123 1124 oldval = state->efer; 1125 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1126 1127 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1128 changed = oldval ^ newval; 1129 1130 if (newval & EFER_MBZ_BITS) 1131 goto gpf; 1132 1133 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1134 if (changed & EFER_LME) { 1135 if (state->cr0 & CR0_PG) 1136 goto gpf; 1137 } 1138 1139 /* EFER.LMA = EFER.LME & CR0.PG */ 1140 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1141 lma = EFER_LMA; 1142 else 1143 lma = 0; 1144 1145 if ((newval & EFER_LMA) != lma) 1146 goto gpf; 1147 1148 if (newval & EFER_NXE) { 1149 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) 1150 goto gpf; 1151 } 1152 1153 /* 1154 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1155 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1156 */ 1157 if (newval & EFER_LMSLE) { 1158 vme = vm_exitinfo(sc->vm, vcpu); 1159 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1160 *retu = true; 1161 return (0); 1162 } 1163 1164 if (newval & EFER_FFXSR) { 1165 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) 1166 goto gpf; 1167 } 1168 1169 if (newval & EFER_TCE) { 1170 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) 1171 goto gpf; 1172 } 1173 1174 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1175 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1176 return (0); 1177 gpf: 1178 vm_inject_gp(sc->vm, vcpu); 1179 return (0); 1180 } 1181 1182 static int 1183 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1184 bool *retu) 1185 { 1186 int error; 1187 1188 if (lapic_msr(num)) 1189 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1190 else if (num == MSR_EFER) 1191 error = svm_write_efer(sc, vcpu, val, retu); 1192 else 1193 error = svm_wrmsr(sc, vcpu, num, val, retu); 1194 1195 return (error); 1196 } 1197 1198 static int 1199 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1200 { 1201 struct vmcb_state *state; 1202 struct svm_regctx *ctx; 1203 uint64_t result; 1204 int error; 1205 1206 if (lapic_msr(num)) 1207 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1208 else 1209 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1210 1211 if (error == 0) { 1212 state = svm_get_vmcb_state(sc, vcpu); 1213 ctx = svm_get_guest_regctx(sc, vcpu); 1214 state->rax = result & 0xffffffff; 1215 ctx->sctx_rdx = result >> 32; 1216 } 1217 1218 return (error); 1219 } 1220 1221 #ifdef KTR 1222 static const char * 1223 exit_reason_to_str(uint64_t reason) 1224 { 1225 static char reasonbuf[32]; 1226 1227 switch (reason) { 1228 case VMCB_EXIT_INVALID: 1229 return ("invalvmcb"); 1230 case VMCB_EXIT_SHUTDOWN: 1231 return ("shutdown"); 1232 case VMCB_EXIT_NPF: 1233 return ("nptfault"); 1234 case VMCB_EXIT_PAUSE: 1235 return ("pause"); 1236 case VMCB_EXIT_HLT: 1237 return ("hlt"); 1238 case VMCB_EXIT_CPUID: 1239 return ("cpuid"); 1240 case VMCB_EXIT_IO: 1241 return ("inout"); 1242 case VMCB_EXIT_MC: 1243 return ("mchk"); 1244 case VMCB_EXIT_INTR: 1245 return ("extintr"); 1246 case VMCB_EXIT_NMI: 1247 return ("nmi"); 1248 case VMCB_EXIT_VINTR: 1249 return ("vintr"); 1250 case VMCB_EXIT_MSR: 1251 return ("msr"); 1252 case VMCB_EXIT_IRET: 1253 return ("iret"); 1254 case VMCB_EXIT_MONITOR: 1255 return ("monitor"); 1256 case VMCB_EXIT_MWAIT: 1257 return ("mwait"); 1258 default: 1259 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1260 return (reasonbuf); 1261 } 1262 } 1263 #endif /* KTR */ 1264 1265 /* 1266 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1267 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1268 * and exceptions caused by INT3, INTO and BOUND instructions. 1269 * 1270 * Return 1 if the nRIP is valid and 0 otherwise. 1271 */ 1272 static int 1273 nrip_valid(uint64_t exitcode) 1274 { 1275 switch (exitcode) { 1276 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1277 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1278 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1279 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1280 case 0x43: /* INT3 */ 1281 case 0x44: /* INTO */ 1282 case 0x45: /* BOUND */ 1283 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1284 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1285 return (1); 1286 default: 1287 return (0); 1288 } 1289 } 1290 1291 static int 1292 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1293 { 1294 struct vmcb *vmcb; 1295 struct vmcb_state *state; 1296 struct vmcb_ctrl *ctrl; 1297 struct svm_regctx *ctx; 1298 uint64_t code, info1, info2, val; 1299 uint32_t eax, ecx, edx; 1300 int error, errcode_valid, handled, idtvec, reflect; 1301 bool retu; 1302 1303 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1304 vmcb = svm_get_vmcb(svm_sc, vcpu); 1305 state = &vmcb->state; 1306 ctrl = &vmcb->ctrl; 1307 1308 handled = 0; 1309 code = ctrl->exitcode; 1310 info1 = ctrl->exitinfo1; 1311 info2 = ctrl->exitinfo2; 1312 1313 vmexit->exitcode = VM_EXITCODE_BOGUS; 1314 vmexit->rip = state->rip; 1315 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1316 1317 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1318 1319 /* 1320 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1321 * in an inconsistent state and can trigger assertions that would 1322 * never happen otherwise. 1323 */ 1324 if (code == VMCB_EXIT_INVALID) { 1325 vm_exit_svm(vmexit, code, info1, info2); 1326 return (0); 1327 } 1328 1329 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1330 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1331 1332 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1333 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1334 vmexit->inst_length, code, info1, info2)); 1335 1336 svm_update_virqinfo(svm_sc, vcpu); 1337 svm_save_intinfo(svm_sc, vcpu); 1338 1339 switch (code) { 1340 case VMCB_EXIT_IRET: 1341 /* 1342 * Restart execution at "iret" but with the intercept cleared. 1343 */ 1344 vmexit->inst_length = 0; 1345 clear_nmi_blocking(svm_sc, vcpu); 1346 handled = 1; 1347 break; 1348 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1349 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1350 handled = 1; 1351 break; 1352 case VMCB_EXIT_INTR: /* external interrupt */ 1353 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1354 handled = 1; 1355 break; 1356 case VMCB_EXIT_NMI: /* external NMI */ 1357 handled = 1; 1358 break; 1359 case 0x40 ... 0x5F: 1360 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1361 reflect = 1; 1362 idtvec = code - 0x40; 1363 switch (idtvec) { 1364 case IDT_MC: 1365 /* 1366 * Call the machine check handler by hand. Also don't 1367 * reflect the machine check back into the guest. 1368 */ 1369 reflect = 0; 1370 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1371 __asm __volatile("int $18"); 1372 break; 1373 case IDT_PF: 1374 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1375 info2); 1376 KASSERT(error == 0, ("%s: error %d updating cr2", 1377 __func__, error)); 1378 /* fallthru */ 1379 case IDT_NP: 1380 case IDT_SS: 1381 case IDT_GP: 1382 case IDT_AC: 1383 case IDT_TS: 1384 errcode_valid = 1; 1385 break; 1386 1387 case IDT_DF: 1388 errcode_valid = 1; 1389 info1 = 0; 1390 break; 1391 1392 case IDT_BP: 1393 case IDT_OF: 1394 case IDT_BR: 1395 /* 1396 * The 'nrip' field is populated for INT3, INTO and 1397 * BOUND exceptions and this also implies that 1398 * 'inst_length' is non-zero. 1399 * 1400 * Reset 'inst_length' to zero so the guest %rip at 1401 * event injection is identical to what it was when 1402 * the exception originally happened. 1403 */ 1404 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1405 "to zero before injecting exception %d", 1406 vmexit->inst_length, idtvec); 1407 vmexit->inst_length = 0; 1408 /* fallthru */ 1409 default: 1410 errcode_valid = 0; 1411 info1 = 0; 1412 break; 1413 } 1414 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1415 "when reflecting exception %d into guest", 1416 vmexit->inst_length, idtvec)); 1417 1418 if (reflect) { 1419 /* Reflect the exception back into the guest */ 1420 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1421 "%d/%#x into the guest", idtvec, (int)info1); 1422 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1423 errcode_valid, info1, 0); 1424 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1425 __func__, error)); 1426 } 1427 handled = 1; 1428 break; 1429 case VMCB_EXIT_MSR: /* MSR access. */ 1430 eax = state->rax; 1431 ecx = ctx->sctx_rcx; 1432 edx = ctx->sctx_rdx; 1433 retu = false; 1434 1435 if (info1) { 1436 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1437 val = (uint64_t)edx << 32 | eax; 1438 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1439 ecx, val); 1440 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1441 vmexit->exitcode = VM_EXITCODE_WRMSR; 1442 vmexit->u.msr.code = ecx; 1443 vmexit->u.msr.wval = val; 1444 } else if (!retu) { 1445 handled = 1; 1446 } else { 1447 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1448 ("emulate_wrmsr retu with bogus exitcode")); 1449 } 1450 } else { 1451 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1452 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1453 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1454 vmexit->exitcode = VM_EXITCODE_RDMSR; 1455 vmexit->u.msr.code = ecx; 1456 } else if (!retu) { 1457 handled = 1; 1458 } else { 1459 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1460 ("emulate_rdmsr retu with bogus exitcode")); 1461 } 1462 } 1463 break; 1464 case VMCB_EXIT_IO: 1465 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1466 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1467 break; 1468 case VMCB_EXIT_CPUID: 1469 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1470 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1471 (uint32_t *)&state->rax, 1472 (uint32_t *)&ctx->sctx_rbx, 1473 (uint32_t *)&ctx->sctx_rcx, 1474 (uint32_t *)&ctx->sctx_rdx); 1475 break; 1476 case VMCB_EXIT_HLT: 1477 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1478 vmexit->exitcode = VM_EXITCODE_HLT; 1479 vmexit->u.hlt.rflags = state->rflags; 1480 break; 1481 case VMCB_EXIT_PAUSE: 1482 vmexit->exitcode = VM_EXITCODE_PAUSE; 1483 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1484 break; 1485 case VMCB_EXIT_NPF: 1486 /* EXITINFO2 contains the faulting guest physical address */ 1487 if (info1 & VMCB_NPF_INFO1_RSV) { 1488 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1489 "reserved bits set: info1(%#lx) info2(%#lx)", 1490 info1, info2); 1491 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { 1492 vmexit->exitcode = VM_EXITCODE_PAGING; 1493 vmexit->u.paging.gpa = info2; 1494 vmexit->u.paging.fault_type = npf_fault_type(info1); 1495 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1496 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1497 "on gpa %#lx/%#lx at rip %#lx", 1498 info2, info1, state->rip); 1499 } else if (svm_npf_emul_fault(info1)) { 1500 svm_handle_inst_emul(vmcb, info2, vmexit); 1501 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1502 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1503 "for gpa %#lx/%#lx at rip %#lx", 1504 info2, info1, state->rip); 1505 } 1506 break; 1507 case VMCB_EXIT_MONITOR: 1508 vmexit->exitcode = VM_EXITCODE_MONITOR; 1509 break; 1510 case VMCB_EXIT_MWAIT: 1511 vmexit->exitcode = VM_EXITCODE_MWAIT; 1512 break; 1513 default: 1514 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1515 break; 1516 } 1517 1518 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1519 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1520 vmexit->rip, vmexit->inst_length); 1521 1522 if (handled) { 1523 vmexit->rip += vmexit->inst_length; 1524 vmexit->inst_length = 0; 1525 state->rip = vmexit->rip; 1526 } else { 1527 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1528 /* 1529 * If this VM exit was not claimed by anybody then 1530 * treat it as a generic SVM exit. 1531 */ 1532 vm_exit_svm(vmexit, code, info1, info2); 1533 } else { 1534 /* 1535 * The exitcode and collateral have been populated. 1536 * The VM exit will be processed further in userland. 1537 */ 1538 } 1539 } 1540 return (handled); 1541 } 1542 1543 static void 1544 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1545 { 1546 uint64_t intinfo; 1547 1548 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1549 return; 1550 1551 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1552 "valid: %#lx", __func__, intinfo)); 1553 1554 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1555 VMCB_EXITINTINFO_VECTOR(intinfo), 1556 VMCB_EXITINTINFO_EC(intinfo), 1557 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1558 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1559 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1560 } 1561 1562 /* 1563 * Inject event to virtual cpu. 1564 */ 1565 static void 1566 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1567 { 1568 struct vmcb_ctrl *ctrl; 1569 struct vmcb_state *state; 1570 struct svm_vcpu *vcpustate; 1571 uint8_t v_tpr; 1572 int vector, need_intr_window, pending_apic_vector; 1573 1574 state = svm_get_vmcb_state(sc, vcpu); 1575 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1576 vcpustate = svm_get_vcpu(sc, vcpu); 1577 1578 need_intr_window = 0; 1579 pending_apic_vector = 0; 1580 1581 if (vcpustate->nextrip != state->rip) { 1582 ctrl->intr_shadow = 0; 1583 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1584 "cleared due to rip change: %#lx/%#lx", 1585 vcpustate->nextrip, state->rip); 1586 } 1587 1588 /* 1589 * Inject pending events or exceptions for this vcpu. 1590 * 1591 * An event might be pending because the previous #VMEXIT happened 1592 * during event delivery (i.e. ctrl->exitintinfo). 1593 * 1594 * An event might also be pending because an exception was injected 1595 * by the hypervisor (e.g. #PF during instruction emulation). 1596 */ 1597 svm_inj_intinfo(sc, vcpu); 1598 1599 /* NMI event has priority over interrupts. */ 1600 if (vm_nmi_pending(sc->vm, vcpu)) { 1601 if (nmi_blocked(sc, vcpu)) { 1602 /* 1603 * Can't inject another NMI if the guest has not 1604 * yet executed an "iret" after the last NMI. 1605 */ 1606 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1607 "to NMI-blocking"); 1608 } else if (ctrl->intr_shadow) { 1609 /* 1610 * Can't inject an NMI if the vcpu is in an intr_shadow. 1611 */ 1612 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1613 "interrupt shadow"); 1614 need_intr_window = 1; 1615 goto done; 1616 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1617 /* 1618 * If there is already an exception/interrupt pending 1619 * then defer the NMI until after that. 1620 */ 1621 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1622 "eventinj %#lx", ctrl->eventinj); 1623 1624 /* 1625 * Use self-IPI to trigger a VM-exit as soon as 1626 * possible after the event injection is completed. 1627 * 1628 * This works only if the external interrupt exiting 1629 * is at a lower priority than the event injection. 1630 * 1631 * Although not explicitly specified in APMv2 the 1632 * relative priorities were verified empirically. 1633 */ 1634 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1635 } else { 1636 vm_nmi_clear(sc->vm, vcpu); 1637 1638 /* Inject NMI, vector number is not used */ 1639 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1640 IDT_NMI, 0, false); 1641 1642 /* virtual NMI blocking is now in effect */ 1643 enable_nmi_blocking(sc, vcpu); 1644 1645 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1646 } 1647 } 1648 1649 if (!vm_extint_pending(sc->vm, vcpu)) { 1650 /* 1651 * APIC interrupts are delivered using the V_IRQ offload. 1652 * 1653 * The primary benefit is that the hypervisor doesn't need to 1654 * deal with the various conditions that inhibit interrupts. 1655 * It also means that TPR changes via CR8 will be handled 1656 * without any hypervisor involvement. 1657 * 1658 * Note that the APIC vector must remain pending in the vIRR 1659 * until it is confirmed that it was delivered to the guest. 1660 * This can be confirmed based on the value of V_IRQ at the 1661 * next #VMEXIT (1 = pending, 0 = delivered). 1662 * 1663 * Also note that it is possible that another higher priority 1664 * vector can become pending before this vector is delivered 1665 * to the guest. This is alright because vcpu_notify_event() 1666 * will send an IPI and force the vcpu to trap back into the 1667 * hypervisor. The higher priority vector will be injected on 1668 * the next VMRUN. 1669 */ 1670 if (vlapic_pending_intr(vlapic, &vector)) { 1671 KASSERT(vector >= 16 && vector <= 255, 1672 ("invalid vector %d from local APIC", vector)); 1673 pending_apic_vector = vector; 1674 } 1675 goto done; 1676 } 1677 1678 /* Ask the legacy pic for a vector to inject */ 1679 vatpic_pending_intr(sc->vm, &vector); 1680 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", 1681 vector)); 1682 1683 /* 1684 * If the guest has disabled interrupts or is in an interrupt shadow 1685 * then we cannot inject the pending interrupt. 1686 */ 1687 if ((state->rflags & PSL_I) == 0) { 1688 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1689 "rflags %#lx", vector, state->rflags); 1690 need_intr_window = 1; 1691 goto done; 1692 } 1693 1694 if (ctrl->intr_shadow) { 1695 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1696 "interrupt shadow", vector); 1697 need_intr_window = 1; 1698 goto done; 1699 } 1700 1701 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1702 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1703 "eventinj %#lx", vector, ctrl->eventinj); 1704 need_intr_window = 1; 1705 goto done; 1706 } 1707 1708 /* 1709 * Legacy PIC interrupts are delivered via the event injection 1710 * mechanism. 1711 */ 1712 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1713 1714 vm_extint_clear(sc->vm, vcpu); 1715 vatpic_intr_accepted(sc->vm, vector); 1716 1717 /* 1718 * Force a VM-exit as soon as the vcpu is ready to accept another 1719 * interrupt. This is done because the PIC might have another vector 1720 * that it wants to inject. Also, if the APIC has a pending interrupt 1721 * that was preempted by the ExtInt then it allows us to inject the 1722 * APIC vector as soon as possible. 1723 */ 1724 need_intr_window = 1; 1725 done: 1726 /* 1727 * The guest can modify the TPR by writing to %CR8. In guest mode 1728 * the processor reflects this write to V_TPR without hypervisor 1729 * intervention. 1730 * 1731 * The guest can also modify the TPR by writing to it via the memory 1732 * mapped APIC page. In this case, the write will be emulated by the 1733 * hypervisor. For this reason V_TPR must be updated before every 1734 * VMRUN. 1735 */ 1736 v_tpr = vlapic_get_cr8(vlapic); 1737 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1738 if (ctrl->v_tpr != v_tpr) { 1739 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1740 ctrl->v_tpr, v_tpr); 1741 ctrl->v_tpr = v_tpr; 1742 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1743 } 1744 1745 if (pending_apic_vector) { 1746 /* 1747 * If an APIC vector is being injected then interrupt window 1748 * exiting is not possible on this VMRUN. 1749 */ 1750 KASSERT(!need_intr_window, ("intr_window exiting impossible")); 1751 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", 1752 pending_apic_vector); 1753 1754 ctrl->v_irq = 1; 1755 ctrl->v_ign_tpr = 0; 1756 ctrl->v_intr_vector = pending_apic_vector; 1757 ctrl->v_intr_prio = pending_apic_vector >> 4; 1758 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1759 } else if (need_intr_window) { 1760 /* 1761 * We use V_IRQ in conjunction with the VINTR intercept to 1762 * trap into the hypervisor as soon as a virtual interrupt 1763 * can be delivered. 1764 * 1765 * Since injected events are not subject to intercept checks 1766 * we need to ensure that the V_IRQ is not actually going to 1767 * be delivered on VM entry. The KASSERT below enforces this. 1768 */ 1769 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1770 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1771 ("Bogus intr_window_exiting: eventinj (%#lx), " 1772 "intr_shadow (%u), rflags (%#lx)", 1773 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1774 enable_intr_window_exiting(sc, vcpu); 1775 } else { 1776 disable_intr_window_exiting(sc, vcpu); 1777 } 1778 } 1779 1780 static __inline void 1781 restore_host_tss(void) 1782 { 1783 struct system_segment_descriptor *tss_sd; 1784 1785 /* 1786 * The TSS descriptor was in use prior to launching the guest so it 1787 * has been marked busy. 1788 * 1789 * 'ltr' requires the descriptor to be marked available so change the 1790 * type to "64-bit available TSS". 1791 */ 1792 tss_sd = PCPU_GET(tss); 1793 tss_sd->sd_type = SDT_SYSTSS; 1794 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1795 } 1796 1797 static void 1798 check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1799 { 1800 struct svm_vcpu *vcpustate; 1801 struct vmcb_ctrl *ctrl; 1802 long eptgen; 1803 bool alloc_asid; 1804 1805 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1806 "active on cpu %u", __func__, thiscpu)); 1807 1808 vcpustate = svm_get_vcpu(sc, vcpuid); 1809 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1810 1811 /* 1812 * The TLB entries associated with the vcpu's ASID are not valid 1813 * if either of the following conditions is true: 1814 * 1815 * 1. The vcpu's ASID generation is different than the host cpu's 1816 * ASID generation. This happens when the vcpu migrates to a new 1817 * host cpu. It can also happen when the number of vcpus executing 1818 * on a host cpu is greater than the number of ASIDs available. 1819 * 1820 * 2. The pmap generation number is different than the value cached in 1821 * the 'vcpustate'. This happens when the host invalidates pages 1822 * belonging to the guest. 1823 * 1824 * asidgen eptgen Action 1825 * mismatch mismatch 1826 * 0 0 (a) 1827 * 0 1 (b1) or (b2) 1828 * 1 0 (c) 1829 * 1 1 (d) 1830 * 1831 * (a) There is no mismatch in eptgen or ASID generation and therefore 1832 * no further action is needed. 1833 * 1834 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1835 * retained and the TLB entries associated with this ASID 1836 * are flushed by VMRUN. 1837 * 1838 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1839 * allocated. 1840 * 1841 * (c) A new ASID is allocated. 1842 * 1843 * (d) A new ASID is allocated. 1844 */ 1845 1846 alloc_asid = false; 1847 eptgen = pmap->pm_eptgen; 1848 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1849 1850 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1851 alloc_asid = true; /* (c) and (d) */ 1852 } else if (vcpustate->eptgen != eptgen) { 1853 if (flush_by_asid()) 1854 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1855 else 1856 alloc_asid = true; /* (b2) */ 1857 } else { 1858 /* 1859 * This is the common case (a). 1860 */ 1861 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1862 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1863 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1864 } 1865 1866 if (alloc_asid) { 1867 if (++asid[thiscpu].num >= nasid) { 1868 asid[thiscpu].num = 1; 1869 if (++asid[thiscpu].gen == 0) 1870 asid[thiscpu].gen = 1; 1871 /* 1872 * If this cpu does not support "flush-by-asid" 1873 * then flush the entire TLB on a generation 1874 * bump. Subsequent ASID allocation in this 1875 * generation can be done without a TLB flush. 1876 */ 1877 if (!flush_by_asid()) 1878 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1879 } 1880 vcpustate->asid.gen = asid[thiscpu].gen; 1881 vcpustate->asid.num = asid[thiscpu].num; 1882 1883 ctrl->asid = vcpustate->asid.num; 1884 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1885 /* 1886 * If this cpu supports "flush-by-asid" then the TLB 1887 * was not flushed after the generation bump. The TLB 1888 * is flushed selectively after every new ASID allocation. 1889 */ 1890 if (flush_by_asid()) 1891 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1892 } 1893 vcpustate->eptgen = eptgen; 1894 1895 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1896 KASSERT(ctrl->asid == vcpustate->asid.num, 1897 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1898 } 1899 1900 static __inline void 1901 disable_gintr(void) 1902 { 1903 1904 __asm __volatile("clgi"); 1905 } 1906 1907 static __inline void 1908 enable_gintr(void) 1909 { 1910 1911 __asm __volatile("stgi"); 1912 } 1913 1914 /* 1915 * Start vcpu with specified RIP. 1916 */ 1917 static int 1918 svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1919 struct vm_eventinfo *evinfo) 1920 { 1921 struct svm_regctx *gctx; 1922 struct svm_softc *svm_sc; 1923 struct svm_vcpu *vcpustate; 1924 struct vmcb_state *state; 1925 struct vmcb_ctrl *ctrl; 1926 struct vm_exit *vmexit; 1927 struct vlapic *vlapic; 1928 struct vm *vm; 1929 uint64_t vmcb_pa; 1930 int handled; 1931 1932 svm_sc = arg; 1933 vm = svm_sc->vm; 1934 1935 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1936 state = svm_get_vmcb_state(svm_sc, vcpu); 1937 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1938 vmexit = vm_exitinfo(vm, vcpu); 1939 vlapic = vm_lapic(vm, vcpu); 1940 1941 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1942 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1943 1944 if (vcpustate->lastcpu != curcpu) { 1945 /* 1946 * Force new ASID allocation by invalidating the generation. 1947 */ 1948 vcpustate->asid.gen = 0; 1949 1950 /* 1951 * Invalidate the VMCB state cache by marking all fields dirty. 1952 */ 1953 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1954 1955 /* 1956 * XXX 1957 * Setting 'vcpustate->lastcpu' here is bit premature because 1958 * we may return from this function without actually executing 1959 * the VMRUN instruction. This could happen if a rendezvous 1960 * or an AST is pending on the first time through the loop. 1961 * 1962 * This works for now but any new side-effects of vcpu 1963 * migration should take this case into account. 1964 */ 1965 vcpustate->lastcpu = curcpu; 1966 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1967 } 1968 1969 svm_msr_guest_enter(svm_sc, vcpu); 1970 1971 /* Update Guest RIP */ 1972 state->rip = rip; 1973 1974 do { 1975 /* 1976 * Disable global interrupts to guarantee atomicity during 1977 * loading of guest state. This includes not only the state 1978 * loaded by the "vmrun" instruction but also software state 1979 * maintained by the hypervisor: suspended and rendezvous 1980 * state, NPT generation number, vlapic interrupts etc. 1981 */ 1982 disable_gintr(); 1983 1984 if (vcpu_suspended(evinfo)) { 1985 enable_gintr(); 1986 vm_exit_suspended(vm, vcpu, state->rip); 1987 break; 1988 } 1989 1990 if (vcpu_rendezvous_pending(evinfo)) { 1991 enable_gintr(); 1992 vm_exit_rendezvous(vm, vcpu, state->rip); 1993 break; 1994 } 1995 1996 if (vcpu_reqidle(evinfo)) { 1997 enable_gintr(); 1998 vm_exit_reqidle(vm, vcpu, state->rip); 1999 break; 2000 } 2001 2002 /* We are asked to give the cpu by scheduler. */ 2003 if (vcpu_should_yield(vm, vcpu)) { 2004 enable_gintr(); 2005 vm_exit_astpending(vm, vcpu, state->rip); 2006 break; 2007 } 2008 2009 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2010 2011 /* Activate the nested pmap on 'curcpu' */ 2012 CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active); 2013 2014 /* 2015 * Check the pmap generation and the ASID generation to 2016 * ensure that the vcpu does not use stale TLB mappings. 2017 */ 2018 check_asid(svm_sc, vcpu, pmap, curcpu); 2019 2020 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 2021 vcpustate->dirty = 0; 2022 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2023 2024 /* Launch Virtual Machine. */ 2025 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 2026 svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]); 2027 2028 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 2029 2030 /* 2031 * The host GDTR and IDTR is saved by VMRUN and restored 2032 * automatically on #VMEXIT. However, the host TSS needs 2033 * to be restored explicitly. 2034 */ 2035 restore_host_tss(); 2036 2037 /* #VMEXIT disables interrupts so re-enable them here. */ 2038 enable_gintr(); 2039 2040 /* Update 'nextrip' */ 2041 vcpustate->nextrip = state->rip; 2042 2043 /* Handle #VMEXIT and if required return to user space. */ 2044 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2045 } while (handled); 2046 2047 svm_msr_guest_exit(svm_sc, vcpu); 2048 2049 return (0); 2050 } 2051 2052 static void 2053 svm_vmcleanup(void *arg) 2054 { 2055 struct svm_softc *sc = arg; 2056 2057 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2058 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2059 free(sc, M_SVM); 2060 } 2061 2062 static register_t * 2063 swctx_regptr(struct svm_regctx *regctx, int reg) 2064 { 2065 2066 switch (reg) { 2067 case VM_REG_GUEST_RBX: 2068 return (®ctx->sctx_rbx); 2069 case VM_REG_GUEST_RCX: 2070 return (®ctx->sctx_rcx); 2071 case VM_REG_GUEST_RDX: 2072 return (®ctx->sctx_rdx); 2073 case VM_REG_GUEST_RDI: 2074 return (®ctx->sctx_rdi); 2075 case VM_REG_GUEST_RSI: 2076 return (®ctx->sctx_rsi); 2077 case VM_REG_GUEST_RBP: 2078 return (®ctx->sctx_rbp); 2079 case VM_REG_GUEST_R8: 2080 return (®ctx->sctx_r8); 2081 case VM_REG_GUEST_R9: 2082 return (®ctx->sctx_r9); 2083 case VM_REG_GUEST_R10: 2084 return (®ctx->sctx_r10); 2085 case VM_REG_GUEST_R11: 2086 return (®ctx->sctx_r11); 2087 case VM_REG_GUEST_R12: 2088 return (®ctx->sctx_r12); 2089 case VM_REG_GUEST_R13: 2090 return (®ctx->sctx_r13); 2091 case VM_REG_GUEST_R14: 2092 return (®ctx->sctx_r14); 2093 case VM_REG_GUEST_R15: 2094 return (®ctx->sctx_r15); 2095 default: 2096 return (NULL); 2097 } 2098 } 2099 2100 static int 2101 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2102 { 2103 struct svm_softc *svm_sc; 2104 register_t *reg; 2105 2106 svm_sc = arg; 2107 2108 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2109 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2110 } 2111 2112 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2113 return (0); 2114 } 2115 2116 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2117 2118 if (reg != NULL) { 2119 *val = *reg; 2120 return (0); 2121 } 2122 2123 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2124 return (EINVAL); 2125 } 2126 2127 static int 2128 svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2129 { 2130 struct svm_softc *svm_sc; 2131 register_t *reg; 2132 2133 svm_sc = arg; 2134 2135 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2136 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2137 } 2138 2139 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2140 return (0); 2141 } 2142 2143 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2144 2145 if (reg != NULL) { 2146 *reg = val; 2147 return (0); 2148 } 2149 2150 /* 2151 * XXX deal with CR3 and invalidate TLB entries tagged with the 2152 * vcpu's ASID. This needs to be treated differently depending on 2153 * whether 'running' is true/false. 2154 */ 2155 2156 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2157 return (EINVAL); 2158 } 2159 2160 static int 2161 svm_setcap(void *arg, int vcpu, int type, int val) 2162 { 2163 struct svm_softc *sc; 2164 int error; 2165 2166 sc = arg; 2167 error = 0; 2168 switch (type) { 2169 case VM_CAP_HALT_EXIT: 2170 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2171 VMCB_INTCPT_HLT, val); 2172 break; 2173 case VM_CAP_PAUSE_EXIT: 2174 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2175 VMCB_INTCPT_PAUSE, val); 2176 break; 2177 case VM_CAP_UNRESTRICTED_GUEST: 2178 /* Unrestricted guest execution cannot be disabled in SVM */ 2179 if (val == 0) 2180 error = EINVAL; 2181 break; 2182 default: 2183 error = ENOENT; 2184 break; 2185 } 2186 return (error); 2187 } 2188 2189 static int 2190 svm_getcap(void *arg, int vcpu, int type, int *retval) 2191 { 2192 struct svm_softc *sc; 2193 int error; 2194 2195 sc = arg; 2196 error = 0; 2197 2198 switch (type) { 2199 case VM_CAP_HALT_EXIT: 2200 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2201 VMCB_INTCPT_HLT); 2202 break; 2203 case VM_CAP_PAUSE_EXIT: 2204 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2205 VMCB_INTCPT_PAUSE); 2206 break; 2207 case VM_CAP_UNRESTRICTED_GUEST: 2208 *retval = 1; /* unrestricted guest is always enabled */ 2209 break; 2210 default: 2211 error = ENOENT; 2212 break; 2213 } 2214 return (error); 2215 } 2216 2217 static struct vlapic * 2218 svm_vlapic_init(void *arg, int vcpuid) 2219 { 2220 struct svm_softc *svm_sc; 2221 struct vlapic *vlapic; 2222 2223 svm_sc = arg; 2224 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2225 vlapic->vm = svm_sc->vm; 2226 vlapic->vcpuid = vcpuid; 2227 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2228 2229 vlapic_init(vlapic); 2230 2231 return (vlapic); 2232 } 2233 2234 static void 2235 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2236 { 2237 2238 vlapic_cleanup(vlapic); 2239 free(vlapic, M_SVM_VLAPIC); 2240 } 2241 2242 struct vmm_ops vmm_ops_amd = { 2243 svm_init, 2244 svm_cleanup, 2245 svm_restore, 2246 svm_vminit, 2247 svm_vmrun, 2248 svm_vmcleanup, 2249 svm_getreg, 2250 svm_setreg, 2251 vmcb_getdesc, 2252 vmcb_setdesc, 2253 svm_getcap, 2254 svm_setcap, 2255 svm_npt_alloc, 2256 svm_npt_free, 2257 svm_vlapic_init, 2258 svm_vlapic_cleanup 2259 }; 2260