1 /*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/smp.h> 33 #include <sys/kernel.h> 34 #include <sys/malloc.h> 35 #include <sys/pcpu.h> 36 #include <sys/proc.h> 37 #include <sys/sysctl.h> 38 39 #include <vm/vm.h> 40 #include <vm/pmap.h> 41 42 #include <machine/cpufunc.h> 43 #include <machine/psl.h> 44 #include <machine/md_var.h> 45 #include <machine/reg.h> 46 #include <machine/specialreg.h> 47 #include <machine/smp.h> 48 #include <machine/vmm.h> 49 #include <machine/vmm_dev.h> 50 #include <machine/vmm_instruction_emul.h> 51 52 #include "vmm_lapic.h" 53 #include "vmm_stat.h" 54 #include "vmm_ktr.h" 55 #include "vmm_ioport.h" 56 #include "vatpic.h" 57 #include "vlapic.h" 58 #include "vlapic_priv.h" 59 60 #include "x86.h" 61 #include "vmcb.h" 62 #include "svm.h" 63 #include "svm_softc.h" 64 #include "svm_msr.h" 65 #include "npt.h" 66 67 SYSCTL_DECL(_hw_vmm); 68 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 69 70 /* 71 * SVM CPUID function 0x8000_000A, edx bit decoding. 72 */ 73 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 74 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 75 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 76 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 77 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 78 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 79 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 80 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 81 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 82 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 83 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 84 85 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 86 VMCB_CACHE_IOPM | \ 87 VMCB_CACHE_I | \ 88 VMCB_CACHE_TPR | \ 89 VMCB_CACHE_CR2 | \ 90 VMCB_CACHE_CR | \ 91 VMCB_CACHE_DR | \ 92 VMCB_CACHE_DT | \ 93 VMCB_CACHE_SEG | \ 94 VMCB_CACHE_NP) 95 96 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 97 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 98 0, NULL); 99 100 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 101 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 102 103 /* Per-CPU context area. */ 104 extern struct pcpu __pcpu[]; 105 106 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 107 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 108 "SVM features advertised by CPUID.8000000AH:EDX"); 109 110 static int disable_npf_assist; 111 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 112 &disable_npf_assist, 0, NULL); 113 114 /* Maximum ASIDs supported by the processor */ 115 static uint32_t nasid; 116 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 117 "Number of ASIDs supported by this processor"); 118 119 /* Current ASID generation for each host cpu */ 120 static struct asid asid[MAXCPU]; 121 122 /* 123 * SVM host state saved area of size 4KB for each core. 124 */ 125 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 126 127 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 128 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 129 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 130 131 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 132 133 static __inline int 134 flush_by_asid(void) 135 { 136 137 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 138 } 139 140 static __inline int 141 decode_assist(void) 142 { 143 144 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 145 } 146 147 static void 148 svm_disable(void *arg __unused) 149 { 150 uint64_t efer; 151 152 efer = rdmsr(MSR_EFER); 153 efer &= ~EFER_SVM; 154 wrmsr(MSR_EFER, efer); 155 } 156 157 /* 158 * Disable SVM on all CPUs. 159 */ 160 static int 161 svm_cleanup(void) 162 { 163 164 smp_rendezvous(NULL, svm_disable, NULL, NULL); 165 return (0); 166 } 167 168 /* 169 * Verify that all the features required by bhyve are available. 170 */ 171 static int 172 check_svm_features(void) 173 { 174 u_int regs[4]; 175 176 /* CPUID Fn8000_000A is for SVM */ 177 do_cpuid(0x8000000A, regs); 178 svm_feature &= regs[3]; 179 180 /* 181 * The number of ASIDs can be configured to be less than what is 182 * supported by the hardware but not more. 183 */ 184 if (nasid == 0 || nasid > regs[1]) 185 nasid = regs[1]; 186 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 187 188 /* bhyve requires the Nested Paging feature */ 189 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 190 printf("SVM: Nested Paging feature not available.\n"); 191 return (ENXIO); 192 } 193 194 /* bhyve requires the NRIP Save feature */ 195 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 196 printf("SVM: NRIP Save feature not available.\n"); 197 return (ENXIO); 198 } 199 200 return (0); 201 } 202 203 static void 204 svm_enable(void *arg __unused) 205 { 206 uint64_t efer; 207 208 efer = rdmsr(MSR_EFER); 209 efer |= EFER_SVM; 210 wrmsr(MSR_EFER, efer); 211 212 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 213 } 214 215 /* 216 * Return 1 if SVM is enabled on this processor and 0 otherwise. 217 */ 218 static int 219 svm_available(void) 220 { 221 uint64_t msr; 222 223 /* Section 15.4 Enabling SVM from APM2. */ 224 if ((amd_feature2 & AMDID2_SVM) == 0) { 225 printf("SVM: not available.\n"); 226 return (0); 227 } 228 229 msr = rdmsr(MSR_VM_CR); 230 if ((msr & VM_CR_SVMDIS) != 0) { 231 printf("SVM: disabled by BIOS.\n"); 232 return (0); 233 } 234 235 return (1); 236 } 237 238 static int 239 svm_init(int ipinum) 240 { 241 int error, cpu; 242 243 if (!svm_available()) 244 return (ENXIO); 245 246 error = check_svm_features(); 247 if (error) 248 return (error); 249 250 vmcb_clean &= VMCB_CACHE_DEFAULT; 251 252 for (cpu = 0; cpu < MAXCPU; cpu++) { 253 /* 254 * Initialize the host ASIDs to their "highest" valid values. 255 * 256 * The next ASID allocation will rollover both 'gen' and 'num' 257 * and start off the sequence at {1,1}. 258 */ 259 asid[cpu].gen = ~0UL; 260 asid[cpu].num = nasid - 1; 261 } 262 263 svm_msr_init(); 264 svm_npt_init(ipinum); 265 266 /* Enable SVM on all CPUs */ 267 smp_rendezvous(NULL, svm_enable, NULL, NULL); 268 269 return (0); 270 } 271 272 static void 273 svm_restore(void) 274 { 275 276 svm_enable(NULL); 277 } 278 279 /* Pentium compatible MSRs */ 280 #define MSR_PENTIUM_START 0 281 #define MSR_PENTIUM_END 0x1FFF 282 /* AMD 6th generation and Intel compatible MSRs */ 283 #define MSR_AMD6TH_START 0xC0000000UL 284 #define MSR_AMD6TH_END 0xC0001FFFUL 285 /* AMD 7th and 8th generation compatible MSRs */ 286 #define MSR_AMD7TH_START 0xC0010000UL 287 #define MSR_AMD7TH_END 0xC0011FFFUL 288 289 /* 290 * Get the index and bit position for a MSR in permission bitmap. 291 * Two bits are used for each MSR: lower bit for read and higher bit for write. 292 */ 293 static int 294 svm_msr_index(uint64_t msr, int *index, int *bit) 295 { 296 uint32_t base, off; 297 298 *index = -1; 299 *bit = (msr % 4) * 2; 300 base = 0; 301 302 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 303 *index = msr / 4; 304 return (0); 305 } 306 307 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 308 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 309 off = (msr - MSR_AMD6TH_START); 310 *index = (off + base) / 4; 311 return (0); 312 } 313 314 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 315 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 316 off = (msr - MSR_AMD7TH_START); 317 *index = (off + base) / 4; 318 return (0); 319 } 320 321 return (EINVAL); 322 } 323 324 /* 325 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 326 */ 327 static void 328 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 329 { 330 int index, bit, error; 331 332 error = svm_msr_index(msr, &index, &bit); 333 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 334 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 335 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 336 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 337 "msr %#lx", __func__, bit, msr)); 338 339 if (read) 340 perm_bitmap[index] &= ~(1UL << bit); 341 342 if (write) 343 perm_bitmap[index] &= ~(2UL << bit); 344 } 345 346 static void 347 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 348 { 349 350 svm_msr_perm(perm_bitmap, msr, true, true); 351 } 352 353 static void 354 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 355 { 356 357 svm_msr_perm(perm_bitmap, msr, true, false); 358 } 359 360 static __inline int 361 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 362 { 363 struct vmcb_ctrl *ctrl; 364 365 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 366 367 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 368 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 369 } 370 371 static __inline void 372 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 373 int enabled) 374 { 375 struct vmcb_ctrl *ctrl; 376 uint32_t oldval; 377 378 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 379 380 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 381 oldval = ctrl->intercept[idx]; 382 383 if (enabled) 384 ctrl->intercept[idx] |= bitmask; 385 else 386 ctrl->intercept[idx] &= ~bitmask; 387 388 if (ctrl->intercept[idx] != oldval) { 389 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 390 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 391 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 392 } 393 } 394 395 static __inline void 396 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 397 { 398 399 svm_set_intercept(sc, vcpu, off, bitmask, 0); 400 } 401 402 static __inline void 403 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 404 { 405 406 svm_set_intercept(sc, vcpu, off, bitmask, 1); 407 } 408 409 static void 410 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 411 uint64_t msrpm_base_pa, uint64_t np_pml4) 412 { 413 struct vmcb_ctrl *ctrl; 414 struct vmcb_state *state; 415 uint32_t mask; 416 int n; 417 418 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 419 state = svm_get_vmcb_state(sc, vcpu); 420 421 ctrl->iopm_base_pa = iopm_base_pa; 422 ctrl->msrpm_base_pa = msrpm_base_pa; 423 424 /* Enable nested paging */ 425 ctrl->np_enable = 1; 426 ctrl->n_cr3 = np_pml4; 427 428 /* 429 * Intercept accesses to the control registers that are not shadowed 430 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 431 */ 432 for (n = 0; n < 16; n++) { 433 mask = (BIT(n) << 16) | BIT(n); 434 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 435 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 436 else 437 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 438 } 439 440 441 /* 442 * Intercept everything when tracing guest exceptions otherwise 443 * just intercept machine check exception. 444 */ 445 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 446 for (n = 0; n < 32; n++) { 447 /* 448 * Skip unimplemented vectors in the exception bitmap. 449 */ 450 if (n == 2 || n == 9) { 451 continue; 452 } 453 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 454 } 455 } else { 456 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 457 } 458 459 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 460 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 461 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 463 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 464 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 465 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 466 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 467 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 468 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 469 VMCB_INTCPT_FERR_FREEZE); 470 471 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 472 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 473 474 /* 475 * From section "Canonicalization and Consistency Checks" in APMv2 476 * the VMRUN intercept bit must be set to pass the consistency check. 477 */ 478 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 479 480 /* 481 * The ASID will be set to a non-zero value just before VMRUN. 482 */ 483 ctrl->asid = 0; 484 485 /* 486 * Section 15.21.1, Interrupt Masking in EFLAGS 487 * Section 15.21.2, Virtualizing APIC.TPR 488 * 489 * This must be set for %rflag and %cr8 isolation of guest and host. 490 */ 491 ctrl->v_intr_masking = 1; 492 493 /* Enable Last Branch Record aka LBR for debugging */ 494 ctrl->lbr_virt_en = 1; 495 state->dbgctl = BIT(0); 496 497 /* EFER_SVM must always be set when the guest is executing */ 498 state->efer = EFER_SVM; 499 500 /* Set up the PAT to power-on state */ 501 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 502 PAT_VALUE(1, PAT_WRITE_THROUGH) | 503 PAT_VALUE(2, PAT_UNCACHED) | 504 PAT_VALUE(3, PAT_UNCACHEABLE) | 505 PAT_VALUE(4, PAT_WRITE_BACK) | 506 PAT_VALUE(5, PAT_WRITE_THROUGH) | 507 PAT_VALUE(6, PAT_UNCACHED) | 508 PAT_VALUE(7, PAT_UNCACHEABLE); 509 510 /* Set up DR6/7 to power-on state */ 511 state->dr6 = DBREG_DR6_RESERVED1; 512 state->dr7 = DBREG_DR7_RESERVED1; 513 } 514 515 /* 516 * Initialize a virtual machine. 517 */ 518 static void * 519 svm_vminit(struct vm *vm, pmap_t pmap) 520 { 521 struct svm_softc *svm_sc; 522 struct svm_vcpu *vcpu; 523 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 524 int i; 525 526 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 527 if (((uintptr_t)svm_sc & PAGE_MASK) != 0) 528 panic("malloc of svm_softc not aligned on page boundary"); 529 530 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 531 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 532 if (svm_sc->msr_bitmap == NULL) 533 panic("contigmalloc of SVM MSR bitmap failed"); 534 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 535 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 536 if (svm_sc->iopm_bitmap == NULL) 537 panic("contigmalloc of SVM IO bitmap failed"); 538 539 svm_sc->vm = vm; 540 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 541 542 /* 543 * Intercept read and write accesses to all MSRs. 544 */ 545 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 546 547 /* 548 * Access to the following MSRs is redirected to the VMCB when the 549 * guest is executing. Therefore it is safe to allow the guest to 550 * read/write these MSRs directly without hypervisor involvement. 551 */ 552 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 553 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 554 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 555 556 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 557 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 558 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 559 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 560 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 561 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 562 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 563 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 564 565 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 566 567 /* 568 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 569 */ 570 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 571 572 /* Intercept access to all I/O ports. */ 573 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 574 575 iopm_pa = vtophys(svm_sc->iopm_bitmap); 576 msrpm_pa = vtophys(svm_sc->msr_bitmap); 577 pml4_pa = svm_sc->nptp; 578 for (i = 0; i < VM_MAXCPU; i++) { 579 vcpu = svm_get_vcpu(svm_sc, i); 580 vcpu->nextrip = ~0; 581 vcpu->lastcpu = NOCPU; 582 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 583 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 584 svm_msr_guest_init(svm_sc, i); 585 } 586 return (svm_sc); 587 } 588 589 /* 590 * Collateral for a generic SVM VM-exit. 591 */ 592 static void 593 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 594 { 595 596 vme->exitcode = VM_EXITCODE_SVM; 597 vme->u.svm.exitcode = code; 598 vme->u.svm.exitinfo1 = info1; 599 vme->u.svm.exitinfo2 = info2; 600 } 601 602 static int 603 svm_cpl(struct vmcb_state *state) 604 { 605 606 /* 607 * From APMv2: 608 * "Retrieve the CPL from the CPL field in the VMCB, not 609 * from any segment DPL" 610 */ 611 return (state->cpl); 612 } 613 614 static enum vm_cpu_mode 615 svm_vcpu_mode(struct vmcb *vmcb) 616 { 617 struct vmcb_segment seg; 618 struct vmcb_state *state; 619 int error; 620 621 state = &vmcb->state; 622 623 if (state->efer & EFER_LMA) { 624 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 625 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 626 error)); 627 628 /* 629 * Section 4.8.1 for APM2, check if Code Segment has 630 * Long attribute set in descriptor. 631 */ 632 if (seg.attrib & VMCB_CS_ATTRIB_L) 633 return (CPU_MODE_64BIT); 634 else 635 return (CPU_MODE_COMPATIBILITY); 636 } else if (state->cr0 & CR0_PE) { 637 return (CPU_MODE_PROTECTED); 638 } else { 639 return (CPU_MODE_REAL); 640 } 641 } 642 643 static enum vm_paging_mode 644 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 645 { 646 647 if ((cr0 & CR0_PG) == 0) 648 return (PAGING_MODE_FLAT); 649 if ((cr4 & CR4_PAE) == 0) 650 return (PAGING_MODE_32); 651 if (efer & EFER_LME) 652 return (PAGING_MODE_64); 653 else 654 return (PAGING_MODE_PAE); 655 } 656 657 /* 658 * ins/outs utility routines 659 */ 660 static uint64_t 661 svm_inout_str_index(struct svm_regctx *regs, int in) 662 { 663 uint64_t val; 664 665 val = in ? regs->sctx_rdi : regs->sctx_rsi; 666 667 return (val); 668 } 669 670 static uint64_t 671 svm_inout_str_count(struct svm_regctx *regs, int rep) 672 { 673 uint64_t val; 674 675 val = rep ? regs->sctx_rcx : 1; 676 677 return (val); 678 } 679 680 static void 681 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 682 int in, struct vm_inout_str *vis) 683 { 684 int error, s; 685 686 if (in) { 687 vis->seg_name = VM_REG_GUEST_ES; 688 } else { 689 /* The segment field has standard encoding */ 690 s = (info1 >> 10) & 0x7; 691 vis->seg_name = vm_segment_name(s); 692 } 693 694 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 695 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 696 } 697 698 static int 699 svm_inout_str_addrsize(uint64_t info1) 700 { 701 uint32_t size; 702 703 size = (info1 >> 7) & 0x7; 704 switch (size) { 705 case 1: 706 return (2); /* 16 bit */ 707 case 2: 708 return (4); /* 32 bit */ 709 case 4: 710 return (8); /* 64 bit */ 711 default: 712 panic("%s: invalid size encoding %d", __func__, size); 713 } 714 } 715 716 static void 717 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 718 { 719 struct vmcb_state *state; 720 721 state = &vmcb->state; 722 paging->cr3 = state->cr3; 723 paging->cpl = svm_cpl(state); 724 paging->cpu_mode = svm_vcpu_mode(vmcb); 725 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 726 state->efer); 727 } 728 729 #define UNHANDLED 0 730 731 /* 732 * Handle guest I/O intercept. 733 */ 734 static int 735 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 736 { 737 struct vmcb_ctrl *ctrl; 738 struct vmcb_state *state; 739 struct svm_regctx *regs; 740 struct vm_inout_str *vis; 741 uint64_t info1; 742 int inout_string; 743 744 state = svm_get_vmcb_state(svm_sc, vcpu); 745 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 746 regs = svm_get_guest_regctx(svm_sc, vcpu); 747 748 info1 = ctrl->exitinfo1; 749 inout_string = info1 & BIT(2) ? 1 : 0; 750 751 /* 752 * The effective segment number in EXITINFO1[12:10] is populated 753 * only if the processor has the DecodeAssist capability. 754 * 755 * XXX this is not specified explicitly in APMv2 but can be verified 756 * empirically. 757 */ 758 if (inout_string && !decode_assist()) 759 return (UNHANDLED); 760 761 vmexit->exitcode = VM_EXITCODE_INOUT; 762 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 763 vmexit->u.inout.string = inout_string; 764 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 765 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 766 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 767 vmexit->u.inout.eax = (uint32_t)(state->rax); 768 769 if (inout_string) { 770 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 771 vis = &vmexit->u.inout_str; 772 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 773 vis->rflags = state->rflags; 774 vis->cr0 = state->cr0; 775 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 776 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 777 vis->addrsize = svm_inout_str_addrsize(info1); 778 svm_inout_str_seginfo(svm_sc, vcpu, info1, 779 vmexit->u.inout.in, vis); 780 } 781 782 return (UNHANDLED); 783 } 784 785 static int 786 npf_fault_type(uint64_t exitinfo1) 787 { 788 789 if (exitinfo1 & VMCB_NPF_INFO1_W) 790 return (VM_PROT_WRITE); 791 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 792 return (VM_PROT_EXECUTE); 793 else 794 return (VM_PROT_READ); 795 } 796 797 static bool 798 svm_npf_emul_fault(uint64_t exitinfo1) 799 { 800 801 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 802 return (false); 803 } 804 805 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 806 return (false); 807 } 808 809 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 810 return (false); 811 } 812 813 return (true); 814 } 815 816 static void 817 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 818 { 819 struct vm_guest_paging *paging; 820 struct vmcb_segment seg; 821 struct vmcb_ctrl *ctrl; 822 char *inst_bytes; 823 int error, inst_len; 824 825 ctrl = &vmcb->ctrl; 826 paging = &vmexit->u.inst_emul.paging; 827 828 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 829 vmexit->u.inst_emul.gpa = gpa; 830 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 831 svm_paging_info(vmcb, paging); 832 833 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 834 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 835 836 switch(paging->cpu_mode) { 837 case CPU_MODE_REAL: 838 vmexit->u.inst_emul.cs_base = seg.base; 839 vmexit->u.inst_emul.cs_d = 0; 840 break; 841 case CPU_MODE_PROTECTED: 842 case CPU_MODE_COMPATIBILITY: 843 vmexit->u.inst_emul.cs_base = seg.base; 844 845 /* 846 * Section 4.8.1 of APM2, Default Operand Size or D bit. 847 */ 848 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 849 1 : 0; 850 break; 851 default: 852 vmexit->u.inst_emul.cs_base = 0; 853 vmexit->u.inst_emul.cs_d = 0; 854 break; 855 } 856 857 /* 858 * Copy the instruction bytes into 'vie' if available. 859 */ 860 if (decode_assist() && !disable_npf_assist) { 861 inst_len = ctrl->inst_len; 862 inst_bytes = ctrl->inst_bytes; 863 } else { 864 inst_len = 0; 865 inst_bytes = NULL; 866 } 867 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 868 } 869 870 #ifdef KTR 871 static const char * 872 intrtype_to_str(int intr_type) 873 { 874 switch (intr_type) { 875 case VMCB_EVENTINJ_TYPE_INTR: 876 return ("hwintr"); 877 case VMCB_EVENTINJ_TYPE_NMI: 878 return ("nmi"); 879 case VMCB_EVENTINJ_TYPE_INTn: 880 return ("swintr"); 881 case VMCB_EVENTINJ_TYPE_EXCEPTION: 882 return ("exception"); 883 default: 884 panic("%s: unknown intr_type %d", __func__, intr_type); 885 } 886 } 887 #endif 888 889 /* 890 * Inject an event to vcpu as described in section 15.20, "Event injection". 891 */ 892 static void 893 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 894 uint32_t error, bool ec_valid) 895 { 896 struct vmcb_ctrl *ctrl; 897 898 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 899 900 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 901 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 902 903 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 904 __func__, vector)); 905 906 switch (intr_type) { 907 case VMCB_EVENTINJ_TYPE_INTR: 908 case VMCB_EVENTINJ_TYPE_NMI: 909 case VMCB_EVENTINJ_TYPE_INTn: 910 break; 911 case VMCB_EVENTINJ_TYPE_EXCEPTION: 912 if (vector >= 0 && vector <= 31 && vector != 2) 913 break; 914 /* FALLTHROUGH */ 915 default: 916 panic("%s: invalid intr_type/vector: %d/%d", __func__, 917 intr_type, vector); 918 } 919 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 920 if (ec_valid) { 921 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 922 ctrl->eventinj |= (uint64_t)error << 32; 923 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 924 intrtype_to_str(intr_type), vector, error); 925 } else { 926 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 927 intrtype_to_str(intr_type), vector); 928 } 929 } 930 931 static void 932 svm_update_virqinfo(struct svm_softc *sc, int vcpu) 933 { 934 struct vm *vm; 935 struct vlapic *vlapic; 936 struct vmcb_ctrl *ctrl; 937 938 vm = sc->vm; 939 vlapic = vm_lapic(vm, vcpu); 940 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 941 942 /* Update %cr8 in the emulated vlapic */ 943 vlapic_set_cr8(vlapic, ctrl->v_tpr); 944 945 /* Virtual interrupt injection is not used. */ 946 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 947 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 948 } 949 950 static void 951 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 952 { 953 struct vmcb_ctrl *ctrl; 954 uint64_t intinfo; 955 956 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 957 intinfo = ctrl->exitintinfo; 958 if (!VMCB_EXITINTINFO_VALID(intinfo)) 959 return; 960 961 /* 962 * From APMv2, Section "Intercepts during IDT interrupt delivery" 963 * 964 * If a #VMEXIT happened during event delivery then record the event 965 * that was being delivered. 966 */ 967 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 968 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 969 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 970 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 971 } 972 973 #ifdef INVARIANTS 974 static __inline int 975 vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 976 { 977 978 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 979 VMCB_INTCPT_VINTR)); 980 } 981 #endif 982 983 static __inline void 984 enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 985 { 986 struct vmcb_ctrl *ctrl; 987 988 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 989 990 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 991 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 992 KASSERT(vintr_intercept_enabled(sc, vcpu), 993 ("%s: vintr intercept should be enabled", __func__)); 994 return; 995 } 996 997 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 998 ctrl->v_irq = 1; 999 ctrl->v_ign_tpr = 1; 1000 ctrl->v_intr_vector = 0; 1001 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1002 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1003 } 1004 1005 static __inline void 1006 disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1007 { 1008 struct vmcb_ctrl *ctrl; 1009 1010 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1011 1012 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1013 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1014 ("%s: vintr intercept should be disabled", __func__)); 1015 return; 1016 } 1017 1018 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1019 ctrl->v_irq = 0; 1020 ctrl->v_intr_vector = 0; 1021 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1022 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1023 } 1024 1025 static int 1026 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1027 { 1028 struct vmcb_ctrl *ctrl; 1029 int oldval, newval; 1030 1031 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1032 oldval = ctrl->intr_shadow; 1033 newval = val ? 1 : 0; 1034 if (newval != oldval) { 1035 ctrl->intr_shadow = newval; 1036 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1037 } 1038 return (0); 1039 } 1040 1041 static int 1042 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1043 { 1044 struct vmcb_ctrl *ctrl; 1045 1046 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1047 *val = ctrl->intr_shadow; 1048 return (0); 1049 } 1050 1051 /* 1052 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1053 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1054 * to track when the vcpu is done handling the NMI. 1055 */ 1056 static int 1057 nmi_blocked(struct svm_softc *sc, int vcpu) 1058 { 1059 int blocked; 1060 1061 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1062 VMCB_INTCPT_IRET); 1063 return (blocked); 1064 } 1065 1066 static void 1067 enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1068 { 1069 1070 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1071 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1072 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1073 } 1074 1075 static void 1076 clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1077 { 1078 int error; 1079 1080 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1081 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1082 /* 1083 * When the IRET intercept is cleared the vcpu will attempt to execute 1084 * the "iret" when it runs next. However, it is possible to inject 1085 * another NMI into the vcpu before the "iret" has actually executed. 1086 * 1087 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1088 * it will trap back into the hypervisor. If an NMI is pending for 1089 * the vcpu it will be injected into the guest. 1090 * 1091 * XXX this needs to be fixed 1092 */ 1093 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1094 1095 /* 1096 * Set 'intr_shadow' to prevent an NMI from being injected on the 1097 * immediate VMRUN. 1098 */ 1099 error = svm_modify_intr_shadow(sc, vcpu, 1); 1100 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1101 } 1102 1103 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1104 1105 static int 1106 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) 1107 { 1108 struct vm_exit *vme; 1109 struct vmcb_state *state; 1110 uint64_t changed, lma, oldval; 1111 int error; 1112 1113 state = svm_get_vmcb_state(sc, vcpu); 1114 1115 oldval = state->efer; 1116 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1117 1118 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1119 changed = oldval ^ newval; 1120 1121 if (newval & EFER_MBZ_BITS) 1122 goto gpf; 1123 1124 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1125 if (changed & EFER_LME) { 1126 if (state->cr0 & CR0_PG) 1127 goto gpf; 1128 } 1129 1130 /* EFER.LMA = EFER.LME & CR0.PG */ 1131 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1132 lma = EFER_LMA; 1133 else 1134 lma = 0; 1135 1136 if ((newval & EFER_LMA) != lma) 1137 goto gpf; 1138 1139 if (newval & EFER_NXE) { 1140 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) 1141 goto gpf; 1142 } 1143 1144 /* 1145 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1146 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1147 */ 1148 if (newval & EFER_LMSLE) { 1149 vme = vm_exitinfo(sc->vm, vcpu); 1150 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1151 *retu = true; 1152 return (0); 1153 } 1154 1155 if (newval & EFER_FFXSR) { 1156 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) 1157 goto gpf; 1158 } 1159 1160 if (newval & EFER_TCE) { 1161 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) 1162 goto gpf; 1163 } 1164 1165 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1166 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1167 return (0); 1168 gpf: 1169 vm_inject_gp(sc->vm, vcpu); 1170 return (0); 1171 } 1172 1173 static int 1174 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1175 bool *retu) 1176 { 1177 int error; 1178 1179 if (lapic_msr(num)) 1180 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1181 else if (num == MSR_EFER) 1182 error = svm_write_efer(sc, vcpu, val, retu); 1183 else 1184 error = svm_wrmsr(sc, vcpu, num, val, retu); 1185 1186 return (error); 1187 } 1188 1189 static int 1190 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1191 { 1192 struct vmcb_state *state; 1193 struct svm_regctx *ctx; 1194 uint64_t result; 1195 int error; 1196 1197 if (lapic_msr(num)) 1198 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1199 else 1200 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1201 1202 if (error == 0) { 1203 state = svm_get_vmcb_state(sc, vcpu); 1204 ctx = svm_get_guest_regctx(sc, vcpu); 1205 state->rax = result & 0xffffffff; 1206 ctx->sctx_rdx = result >> 32; 1207 } 1208 1209 return (error); 1210 } 1211 1212 #ifdef KTR 1213 static const char * 1214 exit_reason_to_str(uint64_t reason) 1215 { 1216 static char reasonbuf[32]; 1217 1218 switch (reason) { 1219 case VMCB_EXIT_INVALID: 1220 return ("invalvmcb"); 1221 case VMCB_EXIT_SHUTDOWN: 1222 return ("shutdown"); 1223 case VMCB_EXIT_NPF: 1224 return ("nptfault"); 1225 case VMCB_EXIT_PAUSE: 1226 return ("pause"); 1227 case VMCB_EXIT_HLT: 1228 return ("hlt"); 1229 case VMCB_EXIT_CPUID: 1230 return ("cpuid"); 1231 case VMCB_EXIT_IO: 1232 return ("inout"); 1233 case VMCB_EXIT_MC: 1234 return ("mchk"); 1235 case VMCB_EXIT_INTR: 1236 return ("extintr"); 1237 case VMCB_EXIT_NMI: 1238 return ("nmi"); 1239 case VMCB_EXIT_VINTR: 1240 return ("vintr"); 1241 case VMCB_EXIT_MSR: 1242 return ("msr"); 1243 case VMCB_EXIT_IRET: 1244 return ("iret"); 1245 case VMCB_EXIT_MONITOR: 1246 return ("monitor"); 1247 case VMCB_EXIT_MWAIT: 1248 return ("mwait"); 1249 default: 1250 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1251 return (reasonbuf); 1252 } 1253 } 1254 #endif /* KTR */ 1255 1256 /* 1257 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1258 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1259 * and exceptions caused by INT3, INTO and BOUND instructions. 1260 * 1261 * Return 1 if the nRIP is valid and 0 otherwise. 1262 */ 1263 static int 1264 nrip_valid(uint64_t exitcode) 1265 { 1266 switch (exitcode) { 1267 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1268 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1269 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1270 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1271 case 0x43: /* INT3 */ 1272 case 0x44: /* INTO */ 1273 case 0x45: /* BOUND */ 1274 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1275 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1276 return (1); 1277 default: 1278 return (0); 1279 } 1280 } 1281 1282 static int 1283 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1284 { 1285 struct vmcb *vmcb; 1286 struct vmcb_state *state; 1287 struct vmcb_ctrl *ctrl; 1288 struct svm_regctx *ctx; 1289 uint64_t code, info1, info2, val; 1290 uint32_t eax, ecx, edx; 1291 int error, errcode_valid, handled, idtvec, reflect; 1292 bool retu; 1293 1294 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1295 vmcb = svm_get_vmcb(svm_sc, vcpu); 1296 state = &vmcb->state; 1297 ctrl = &vmcb->ctrl; 1298 1299 handled = 0; 1300 code = ctrl->exitcode; 1301 info1 = ctrl->exitinfo1; 1302 info2 = ctrl->exitinfo2; 1303 1304 vmexit->exitcode = VM_EXITCODE_BOGUS; 1305 vmexit->rip = state->rip; 1306 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1307 1308 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1309 1310 /* 1311 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1312 * in an inconsistent state and can trigger assertions that would 1313 * never happen otherwise. 1314 */ 1315 if (code == VMCB_EXIT_INVALID) { 1316 vm_exit_svm(vmexit, code, info1, info2); 1317 return (0); 1318 } 1319 1320 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1321 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1322 1323 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1324 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1325 vmexit->inst_length, code, info1, info2)); 1326 1327 svm_update_virqinfo(svm_sc, vcpu); 1328 svm_save_intinfo(svm_sc, vcpu); 1329 1330 switch (code) { 1331 case VMCB_EXIT_IRET: 1332 /* 1333 * Restart execution at "iret" but with the intercept cleared. 1334 */ 1335 vmexit->inst_length = 0; 1336 clear_nmi_blocking(svm_sc, vcpu); 1337 handled = 1; 1338 break; 1339 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1340 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1341 handled = 1; 1342 break; 1343 case VMCB_EXIT_INTR: /* external interrupt */ 1344 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1345 handled = 1; 1346 break; 1347 case VMCB_EXIT_NMI: /* external NMI */ 1348 handled = 1; 1349 break; 1350 case 0x40 ... 0x5F: 1351 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1352 reflect = 1; 1353 idtvec = code - 0x40; 1354 switch (idtvec) { 1355 case IDT_MC: 1356 /* 1357 * Call the machine check handler by hand. Also don't 1358 * reflect the machine check back into the guest. 1359 */ 1360 reflect = 0; 1361 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1362 __asm __volatile("int $18"); 1363 break; 1364 case IDT_PF: 1365 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1366 info2); 1367 KASSERT(error == 0, ("%s: error %d updating cr2", 1368 __func__, error)); 1369 /* fallthru */ 1370 case IDT_NP: 1371 case IDT_SS: 1372 case IDT_GP: 1373 case IDT_AC: 1374 case IDT_TS: 1375 errcode_valid = 1; 1376 break; 1377 1378 case IDT_DF: 1379 errcode_valid = 1; 1380 info1 = 0; 1381 break; 1382 1383 case IDT_BP: 1384 case IDT_OF: 1385 case IDT_BR: 1386 /* 1387 * The 'nrip' field is populated for INT3, INTO and 1388 * BOUND exceptions and this also implies that 1389 * 'inst_length' is non-zero. 1390 * 1391 * Reset 'inst_length' to zero so the guest %rip at 1392 * event injection is identical to what it was when 1393 * the exception originally happened. 1394 */ 1395 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1396 "to zero before injecting exception %d", 1397 vmexit->inst_length, idtvec); 1398 vmexit->inst_length = 0; 1399 /* fallthru */ 1400 default: 1401 errcode_valid = 0; 1402 info1 = 0; 1403 break; 1404 } 1405 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1406 "when reflecting exception %d into guest", 1407 vmexit->inst_length, idtvec)); 1408 1409 if (reflect) { 1410 /* Reflect the exception back into the guest */ 1411 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1412 "%d/%#x into the guest", idtvec, (int)info1); 1413 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1414 errcode_valid, info1, 0); 1415 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1416 __func__, error)); 1417 } 1418 handled = 1; 1419 break; 1420 case VMCB_EXIT_MSR: /* MSR access. */ 1421 eax = state->rax; 1422 ecx = ctx->sctx_rcx; 1423 edx = ctx->sctx_rdx; 1424 retu = false; 1425 1426 if (info1) { 1427 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1428 val = (uint64_t)edx << 32 | eax; 1429 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1430 ecx, val); 1431 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1432 vmexit->exitcode = VM_EXITCODE_WRMSR; 1433 vmexit->u.msr.code = ecx; 1434 vmexit->u.msr.wval = val; 1435 } else if (!retu) { 1436 handled = 1; 1437 } else { 1438 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1439 ("emulate_wrmsr retu with bogus exitcode")); 1440 } 1441 } else { 1442 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1443 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1444 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1445 vmexit->exitcode = VM_EXITCODE_RDMSR; 1446 vmexit->u.msr.code = ecx; 1447 } else if (!retu) { 1448 handled = 1; 1449 } else { 1450 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1451 ("emulate_rdmsr retu with bogus exitcode")); 1452 } 1453 } 1454 break; 1455 case VMCB_EXIT_IO: 1456 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1457 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1458 break; 1459 case VMCB_EXIT_CPUID: 1460 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1461 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1462 (uint32_t *)&state->rax, 1463 (uint32_t *)&ctx->sctx_rbx, 1464 (uint32_t *)&ctx->sctx_rcx, 1465 (uint32_t *)&ctx->sctx_rdx); 1466 break; 1467 case VMCB_EXIT_HLT: 1468 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1469 vmexit->exitcode = VM_EXITCODE_HLT; 1470 vmexit->u.hlt.rflags = state->rflags; 1471 break; 1472 case VMCB_EXIT_PAUSE: 1473 vmexit->exitcode = VM_EXITCODE_PAUSE; 1474 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1475 break; 1476 case VMCB_EXIT_NPF: 1477 /* EXITINFO2 contains the faulting guest physical address */ 1478 if (info1 & VMCB_NPF_INFO1_RSV) { 1479 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1480 "reserved bits set: info1(%#lx) info2(%#lx)", 1481 info1, info2); 1482 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { 1483 vmexit->exitcode = VM_EXITCODE_PAGING; 1484 vmexit->u.paging.gpa = info2; 1485 vmexit->u.paging.fault_type = npf_fault_type(info1); 1486 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1487 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1488 "on gpa %#lx/%#lx at rip %#lx", 1489 info2, info1, state->rip); 1490 } else if (svm_npf_emul_fault(info1)) { 1491 svm_handle_inst_emul(vmcb, info2, vmexit); 1492 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1493 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1494 "for gpa %#lx/%#lx at rip %#lx", 1495 info2, info1, state->rip); 1496 } 1497 break; 1498 case VMCB_EXIT_MONITOR: 1499 vmexit->exitcode = VM_EXITCODE_MONITOR; 1500 break; 1501 case VMCB_EXIT_MWAIT: 1502 vmexit->exitcode = VM_EXITCODE_MWAIT; 1503 break; 1504 default: 1505 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1506 break; 1507 } 1508 1509 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1510 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1511 vmexit->rip, vmexit->inst_length); 1512 1513 if (handled) { 1514 vmexit->rip += vmexit->inst_length; 1515 vmexit->inst_length = 0; 1516 state->rip = vmexit->rip; 1517 } else { 1518 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1519 /* 1520 * If this VM exit was not claimed by anybody then 1521 * treat it as a generic SVM exit. 1522 */ 1523 vm_exit_svm(vmexit, code, info1, info2); 1524 } else { 1525 /* 1526 * The exitcode and collateral have been populated. 1527 * The VM exit will be processed further in userland. 1528 */ 1529 } 1530 } 1531 return (handled); 1532 } 1533 1534 static void 1535 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1536 { 1537 uint64_t intinfo; 1538 1539 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1540 return; 1541 1542 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1543 "valid: %#lx", __func__, intinfo)); 1544 1545 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1546 VMCB_EXITINTINFO_VECTOR(intinfo), 1547 VMCB_EXITINTINFO_EC(intinfo), 1548 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1549 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1550 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1551 } 1552 1553 /* 1554 * Inject event to virtual cpu. 1555 */ 1556 static void 1557 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1558 { 1559 struct vmcb_ctrl *ctrl; 1560 struct vmcb_state *state; 1561 struct svm_vcpu *vcpustate; 1562 uint8_t v_tpr; 1563 int vector, need_intr_window; 1564 int extint_pending; 1565 1566 state = svm_get_vmcb_state(sc, vcpu); 1567 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1568 vcpustate = svm_get_vcpu(sc, vcpu); 1569 1570 need_intr_window = 0; 1571 1572 if (vcpustate->nextrip != state->rip) { 1573 ctrl->intr_shadow = 0; 1574 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1575 "cleared due to rip change: %#lx/%#lx", 1576 vcpustate->nextrip, state->rip); 1577 } 1578 1579 /* 1580 * Inject pending events or exceptions for this vcpu. 1581 * 1582 * An event might be pending because the previous #VMEXIT happened 1583 * during event delivery (i.e. ctrl->exitintinfo). 1584 * 1585 * An event might also be pending because an exception was injected 1586 * by the hypervisor (e.g. #PF during instruction emulation). 1587 */ 1588 svm_inj_intinfo(sc, vcpu); 1589 1590 /* NMI event has priority over interrupts. */ 1591 if (vm_nmi_pending(sc->vm, vcpu)) { 1592 if (nmi_blocked(sc, vcpu)) { 1593 /* 1594 * Can't inject another NMI if the guest has not 1595 * yet executed an "iret" after the last NMI. 1596 */ 1597 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1598 "to NMI-blocking"); 1599 } else if (ctrl->intr_shadow) { 1600 /* 1601 * Can't inject an NMI if the vcpu is in an intr_shadow. 1602 */ 1603 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1604 "interrupt shadow"); 1605 need_intr_window = 1; 1606 goto done; 1607 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1608 /* 1609 * If there is already an exception/interrupt pending 1610 * then defer the NMI until after that. 1611 */ 1612 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1613 "eventinj %#lx", ctrl->eventinj); 1614 1615 /* 1616 * Use self-IPI to trigger a VM-exit as soon as 1617 * possible after the event injection is completed. 1618 * 1619 * This works only if the external interrupt exiting 1620 * is at a lower priority than the event injection. 1621 * 1622 * Although not explicitly specified in APMv2 the 1623 * relative priorities were verified empirically. 1624 */ 1625 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1626 } else { 1627 vm_nmi_clear(sc->vm, vcpu); 1628 1629 /* Inject NMI, vector number is not used */ 1630 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1631 IDT_NMI, 0, false); 1632 1633 /* virtual NMI blocking is now in effect */ 1634 enable_nmi_blocking(sc, vcpu); 1635 1636 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1637 } 1638 } 1639 1640 extint_pending = vm_extint_pending(sc->vm, vcpu); 1641 if (!extint_pending) { 1642 if (!vlapic_pending_intr(vlapic, &vector)) 1643 goto done; 1644 KASSERT(vector >= 16 && vector <= 255, 1645 ("invalid vector %d from local APIC", vector)); 1646 } else { 1647 /* Ask the legacy pic for a vector to inject */ 1648 vatpic_pending_intr(sc->vm, &vector); 1649 KASSERT(vector >= 0 && vector <= 255, 1650 ("invalid vector %d from INTR", vector)); 1651 } 1652 1653 /* 1654 * If the guest has disabled interrupts or is in an interrupt shadow 1655 * then we cannot inject the pending interrupt. 1656 */ 1657 if ((state->rflags & PSL_I) == 0) { 1658 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1659 "rflags %#lx", vector, state->rflags); 1660 need_intr_window = 1; 1661 goto done; 1662 } 1663 1664 if (ctrl->intr_shadow) { 1665 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1666 "interrupt shadow", vector); 1667 need_intr_window = 1; 1668 goto done; 1669 } 1670 1671 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1672 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1673 "eventinj %#lx", vector, ctrl->eventinj); 1674 need_intr_window = 1; 1675 goto done; 1676 } 1677 1678 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1679 1680 if (!extint_pending) { 1681 vlapic_intr_accepted(vlapic, vector); 1682 } else { 1683 vm_extint_clear(sc->vm, vcpu); 1684 vatpic_intr_accepted(sc->vm, vector); 1685 } 1686 1687 /* 1688 * Force a VM-exit as soon as the vcpu is ready to accept another 1689 * interrupt. This is done because the PIC might have another vector 1690 * that it wants to inject. Also, if the APIC has a pending interrupt 1691 * that was preempted by the ExtInt then it allows us to inject the 1692 * APIC vector as soon as possible. 1693 */ 1694 need_intr_window = 1; 1695 done: 1696 /* 1697 * The guest can modify the TPR by writing to %CR8. In guest mode 1698 * the processor reflects this write to V_TPR without hypervisor 1699 * intervention. 1700 * 1701 * The guest can also modify the TPR by writing to it via the memory 1702 * mapped APIC page. In this case, the write will be emulated by the 1703 * hypervisor. For this reason V_TPR must be updated before every 1704 * VMRUN. 1705 */ 1706 v_tpr = vlapic_get_cr8(vlapic); 1707 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1708 if (ctrl->v_tpr != v_tpr) { 1709 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1710 ctrl->v_tpr, v_tpr); 1711 ctrl->v_tpr = v_tpr; 1712 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1713 } 1714 1715 if (need_intr_window) { 1716 /* 1717 * We use V_IRQ in conjunction with the VINTR intercept to 1718 * trap into the hypervisor as soon as a virtual interrupt 1719 * can be delivered. 1720 * 1721 * Since injected events are not subject to intercept checks 1722 * we need to ensure that the V_IRQ is not actually going to 1723 * be delivered on VM entry. The KASSERT below enforces this. 1724 */ 1725 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1726 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1727 ("Bogus intr_window_exiting: eventinj (%#lx), " 1728 "intr_shadow (%u), rflags (%#lx)", 1729 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1730 enable_intr_window_exiting(sc, vcpu); 1731 } else { 1732 disable_intr_window_exiting(sc, vcpu); 1733 } 1734 } 1735 1736 static __inline void 1737 restore_host_tss(void) 1738 { 1739 struct system_segment_descriptor *tss_sd; 1740 1741 /* 1742 * The TSS descriptor was in use prior to launching the guest so it 1743 * has been marked busy. 1744 * 1745 * 'ltr' requires the descriptor to be marked available so change the 1746 * type to "64-bit available TSS". 1747 */ 1748 tss_sd = PCPU_GET(tss); 1749 tss_sd->sd_type = SDT_SYSTSS; 1750 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1751 } 1752 1753 static void 1754 check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1755 { 1756 struct svm_vcpu *vcpustate; 1757 struct vmcb_ctrl *ctrl; 1758 long eptgen; 1759 bool alloc_asid; 1760 1761 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1762 "active on cpu %u", __func__, thiscpu)); 1763 1764 vcpustate = svm_get_vcpu(sc, vcpuid); 1765 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1766 1767 /* 1768 * The TLB entries associated with the vcpu's ASID are not valid 1769 * if either of the following conditions is true: 1770 * 1771 * 1. The vcpu's ASID generation is different than the host cpu's 1772 * ASID generation. This happens when the vcpu migrates to a new 1773 * host cpu. It can also happen when the number of vcpus executing 1774 * on a host cpu is greater than the number of ASIDs available. 1775 * 1776 * 2. The pmap generation number is different than the value cached in 1777 * the 'vcpustate'. This happens when the host invalidates pages 1778 * belonging to the guest. 1779 * 1780 * asidgen eptgen Action 1781 * mismatch mismatch 1782 * 0 0 (a) 1783 * 0 1 (b1) or (b2) 1784 * 1 0 (c) 1785 * 1 1 (d) 1786 * 1787 * (a) There is no mismatch in eptgen or ASID generation and therefore 1788 * no further action is needed. 1789 * 1790 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1791 * retained and the TLB entries associated with this ASID 1792 * are flushed by VMRUN. 1793 * 1794 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1795 * allocated. 1796 * 1797 * (c) A new ASID is allocated. 1798 * 1799 * (d) A new ASID is allocated. 1800 */ 1801 1802 alloc_asid = false; 1803 eptgen = pmap->pm_eptgen; 1804 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1805 1806 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1807 alloc_asid = true; /* (c) and (d) */ 1808 } else if (vcpustate->eptgen != eptgen) { 1809 if (flush_by_asid()) 1810 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1811 else 1812 alloc_asid = true; /* (b2) */ 1813 } else { 1814 /* 1815 * This is the common case (a). 1816 */ 1817 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1818 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1819 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1820 } 1821 1822 if (alloc_asid) { 1823 if (++asid[thiscpu].num >= nasid) { 1824 asid[thiscpu].num = 1; 1825 if (++asid[thiscpu].gen == 0) 1826 asid[thiscpu].gen = 1; 1827 /* 1828 * If this cpu does not support "flush-by-asid" 1829 * then flush the entire TLB on a generation 1830 * bump. Subsequent ASID allocation in this 1831 * generation can be done without a TLB flush. 1832 */ 1833 if (!flush_by_asid()) 1834 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1835 } 1836 vcpustate->asid.gen = asid[thiscpu].gen; 1837 vcpustate->asid.num = asid[thiscpu].num; 1838 1839 ctrl->asid = vcpustate->asid.num; 1840 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1841 /* 1842 * If this cpu supports "flush-by-asid" then the TLB 1843 * was not flushed after the generation bump. The TLB 1844 * is flushed selectively after every new ASID allocation. 1845 */ 1846 if (flush_by_asid()) 1847 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1848 } 1849 vcpustate->eptgen = eptgen; 1850 1851 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1852 KASSERT(ctrl->asid == vcpustate->asid.num, 1853 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1854 } 1855 1856 static __inline void 1857 disable_gintr(void) 1858 { 1859 1860 __asm __volatile("clgi"); 1861 } 1862 1863 static __inline void 1864 enable_gintr(void) 1865 { 1866 1867 __asm __volatile("stgi"); 1868 } 1869 1870 static __inline void 1871 svm_dr_enter_guest(struct svm_regctx *gctx) 1872 { 1873 1874 /* Save host control debug registers. */ 1875 gctx->host_dr7 = rdr7(); 1876 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 1877 1878 /* 1879 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 1880 * exceptions in the host based on the guest DRx values. The 1881 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 1882 * VMCB. 1883 */ 1884 load_dr7(0); 1885 wrmsr(MSR_DEBUGCTLMSR, 0); 1886 1887 /* Save host debug registers. */ 1888 gctx->host_dr0 = rdr0(); 1889 gctx->host_dr1 = rdr1(); 1890 gctx->host_dr2 = rdr2(); 1891 gctx->host_dr3 = rdr3(); 1892 gctx->host_dr6 = rdr6(); 1893 1894 /* Restore guest debug registers. */ 1895 load_dr0(gctx->sctx_dr0); 1896 load_dr1(gctx->sctx_dr1); 1897 load_dr2(gctx->sctx_dr2); 1898 load_dr3(gctx->sctx_dr3); 1899 } 1900 1901 static __inline void 1902 svm_dr_leave_guest(struct svm_regctx *gctx) 1903 { 1904 1905 /* Save guest debug registers. */ 1906 gctx->sctx_dr0 = rdr0(); 1907 gctx->sctx_dr1 = rdr1(); 1908 gctx->sctx_dr2 = rdr2(); 1909 gctx->sctx_dr3 = rdr3(); 1910 1911 /* 1912 * Restore host debug registers. Restore DR7 and DEBUGCTL 1913 * last. 1914 */ 1915 load_dr0(gctx->host_dr0); 1916 load_dr1(gctx->host_dr1); 1917 load_dr2(gctx->host_dr2); 1918 load_dr3(gctx->host_dr3); 1919 load_dr6(gctx->host_dr6); 1920 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 1921 load_dr7(gctx->host_dr7); 1922 } 1923 1924 /* 1925 * Start vcpu with specified RIP. 1926 */ 1927 static int 1928 svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1929 struct vm_eventinfo *evinfo) 1930 { 1931 struct svm_regctx *gctx; 1932 struct svm_softc *svm_sc; 1933 struct svm_vcpu *vcpustate; 1934 struct vmcb_state *state; 1935 struct vmcb_ctrl *ctrl; 1936 struct vm_exit *vmexit; 1937 struct vlapic *vlapic; 1938 struct vm *vm; 1939 uint64_t vmcb_pa; 1940 int handled; 1941 1942 svm_sc = arg; 1943 vm = svm_sc->vm; 1944 1945 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1946 state = svm_get_vmcb_state(svm_sc, vcpu); 1947 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1948 vmexit = vm_exitinfo(vm, vcpu); 1949 vlapic = vm_lapic(vm, vcpu); 1950 1951 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1952 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1953 1954 if (vcpustate->lastcpu != curcpu) { 1955 /* 1956 * Force new ASID allocation by invalidating the generation. 1957 */ 1958 vcpustate->asid.gen = 0; 1959 1960 /* 1961 * Invalidate the VMCB state cache by marking all fields dirty. 1962 */ 1963 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1964 1965 /* 1966 * XXX 1967 * Setting 'vcpustate->lastcpu' here is bit premature because 1968 * we may return from this function without actually executing 1969 * the VMRUN instruction. This could happen if a rendezvous 1970 * or an AST is pending on the first time through the loop. 1971 * 1972 * This works for now but any new side-effects of vcpu 1973 * migration should take this case into account. 1974 */ 1975 vcpustate->lastcpu = curcpu; 1976 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1977 } 1978 1979 svm_msr_guest_enter(svm_sc, vcpu); 1980 1981 /* Update Guest RIP */ 1982 state->rip = rip; 1983 1984 do { 1985 /* 1986 * Disable global interrupts to guarantee atomicity during 1987 * loading of guest state. This includes not only the state 1988 * loaded by the "vmrun" instruction but also software state 1989 * maintained by the hypervisor: suspended and rendezvous 1990 * state, NPT generation number, vlapic interrupts etc. 1991 */ 1992 disable_gintr(); 1993 1994 if (vcpu_suspended(evinfo)) { 1995 enable_gintr(); 1996 vm_exit_suspended(vm, vcpu, state->rip); 1997 break; 1998 } 1999 2000 if (vcpu_rendezvous_pending(evinfo)) { 2001 enable_gintr(); 2002 vm_exit_rendezvous(vm, vcpu, state->rip); 2003 break; 2004 } 2005 2006 if (vcpu_reqidle(evinfo)) { 2007 enable_gintr(); 2008 vm_exit_reqidle(vm, vcpu, state->rip); 2009 break; 2010 } 2011 2012 /* We are asked to give the cpu by scheduler. */ 2013 if (vcpu_should_yield(vm, vcpu)) { 2014 enable_gintr(); 2015 vm_exit_astpending(vm, vcpu, state->rip); 2016 break; 2017 } 2018 2019 if (vcpu_debugged(vm, vcpu)) { 2020 enable_gintr(); 2021 vm_exit_debug(vm, vcpu, state->rip); 2022 break; 2023 } 2024 2025 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2026 2027 /* Activate the nested pmap on 'curcpu' */ 2028 CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active); 2029 2030 /* 2031 * Check the pmap generation and the ASID generation to 2032 * ensure that the vcpu does not use stale TLB mappings. 2033 */ 2034 check_asid(svm_sc, vcpu, pmap, curcpu); 2035 2036 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 2037 vcpustate->dirty = 0; 2038 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2039 2040 /* Launch Virtual Machine. */ 2041 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 2042 svm_dr_enter_guest(gctx); 2043 svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]); 2044 svm_dr_leave_guest(gctx); 2045 2046 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 2047 2048 /* 2049 * The host GDTR and IDTR is saved by VMRUN and restored 2050 * automatically on #VMEXIT. However, the host TSS needs 2051 * to be restored explicitly. 2052 */ 2053 restore_host_tss(); 2054 2055 /* #VMEXIT disables interrupts so re-enable them here. */ 2056 enable_gintr(); 2057 2058 /* Update 'nextrip' */ 2059 vcpustate->nextrip = state->rip; 2060 2061 /* Handle #VMEXIT and if required return to user space. */ 2062 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2063 } while (handled); 2064 2065 svm_msr_guest_exit(svm_sc, vcpu); 2066 2067 return (0); 2068 } 2069 2070 static void 2071 svm_vmcleanup(void *arg) 2072 { 2073 struct svm_softc *sc = arg; 2074 2075 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2076 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2077 free(sc, M_SVM); 2078 } 2079 2080 static register_t * 2081 swctx_regptr(struct svm_regctx *regctx, int reg) 2082 { 2083 2084 switch (reg) { 2085 case VM_REG_GUEST_RBX: 2086 return (®ctx->sctx_rbx); 2087 case VM_REG_GUEST_RCX: 2088 return (®ctx->sctx_rcx); 2089 case VM_REG_GUEST_RDX: 2090 return (®ctx->sctx_rdx); 2091 case VM_REG_GUEST_RDI: 2092 return (®ctx->sctx_rdi); 2093 case VM_REG_GUEST_RSI: 2094 return (®ctx->sctx_rsi); 2095 case VM_REG_GUEST_RBP: 2096 return (®ctx->sctx_rbp); 2097 case VM_REG_GUEST_R8: 2098 return (®ctx->sctx_r8); 2099 case VM_REG_GUEST_R9: 2100 return (®ctx->sctx_r9); 2101 case VM_REG_GUEST_R10: 2102 return (®ctx->sctx_r10); 2103 case VM_REG_GUEST_R11: 2104 return (®ctx->sctx_r11); 2105 case VM_REG_GUEST_R12: 2106 return (®ctx->sctx_r12); 2107 case VM_REG_GUEST_R13: 2108 return (®ctx->sctx_r13); 2109 case VM_REG_GUEST_R14: 2110 return (®ctx->sctx_r14); 2111 case VM_REG_GUEST_R15: 2112 return (®ctx->sctx_r15); 2113 case VM_REG_GUEST_DR0: 2114 return (®ctx->sctx_dr0); 2115 case VM_REG_GUEST_DR1: 2116 return (®ctx->sctx_dr1); 2117 case VM_REG_GUEST_DR2: 2118 return (®ctx->sctx_dr2); 2119 case VM_REG_GUEST_DR3: 2120 return (®ctx->sctx_dr3); 2121 default: 2122 return (NULL); 2123 } 2124 } 2125 2126 static int 2127 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2128 { 2129 struct svm_softc *svm_sc; 2130 register_t *reg; 2131 2132 svm_sc = arg; 2133 2134 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2135 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2136 } 2137 2138 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2139 return (0); 2140 } 2141 2142 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2143 2144 if (reg != NULL) { 2145 *val = *reg; 2146 return (0); 2147 } 2148 2149 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2150 return (EINVAL); 2151 } 2152 2153 static int 2154 svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2155 { 2156 struct svm_softc *svm_sc; 2157 register_t *reg; 2158 2159 svm_sc = arg; 2160 2161 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2162 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2163 } 2164 2165 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2166 return (0); 2167 } 2168 2169 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2170 2171 if (reg != NULL) { 2172 *reg = val; 2173 return (0); 2174 } 2175 2176 /* 2177 * XXX deal with CR3 and invalidate TLB entries tagged with the 2178 * vcpu's ASID. This needs to be treated differently depending on 2179 * whether 'running' is true/false. 2180 */ 2181 2182 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2183 return (EINVAL); 2184 } 2185 2186 static int 2187 svm_setcap(void *arg, int vcpu, int type, int val) 2188 { 2189 struct svm_softc *sc; 2190 int error; 2191 2192 sc = arg; 2193 error = 0; 2194 switch (type) { 2195 case VM_CAP_HALT_EXIT: 2196 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2197 VMCB_INTCPT_HLT, val); 2198 break; 2199 case VM_CAP_PAUSE_EXIT: 2200 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2201 VMCB_INTCPT_PAUSE, val); 2202 break; 2203 case VM_CAP_UNRESTRICTED_GUEST: 2204 /* Unrestricted guest execution cannot be disabled in SVM */ 2205 if (val == 0) 2206 error = EINVAL; 2207 break; 2208 default: 2209 error = ENOENT; 2210 break; 2211 } 2212 return (error); 2213 } 2214 2215 static int 2216 svm_getcap(void *arg, int vcpu, int type, int *retval) 2217 { 2218 struct svm_softc *sc; 2219 int error; 2220 2221 sc = arg; 2222 error = 0; 2223 2224 switch (type) { 2225 case VM_CAP_HALT_EXIT: 2226 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2227 VMCB_INTCPT_HLT); 2228 break; 2229 case VM_CAP_PAUSE_EXIT: 2230 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2231 VMCB_INTCPT_PAUSE); 2232 break; 2233 case VM_CAP_UNRESTRICTED_GUEST: 2234 *retval = 1; /* unrestricted guest is always enabled */ 2235 break; 2236 default: 2237 error = ENOENT; 2238 break; 2239 } 2240 return (error); 2241 } 2242 2243 static struct vlapic * 2244 svm_vlapic_init(void *arg, int vcpuid) 2245 { 2246 struct svm_softc *svm_sc; 2247 struct vlapic *vlapic; 2248 2249 svm_sc = arg; 2250 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2251 vlapic->vm = svm_sc->vm; 2252 vlapic->vcpuid = vcpuid; 2253 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2254 2255 vlapic_init(vlapic); 2256 2257 return (vlapic); 2258 } 2259 2260 static void 2261 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2262 { 2263 2264 vlapic_cleanup(vlapic); 2265 free(vlapic, M_SVM_VLAPIC); 2266 } 2267 2268 struct vmm_ops vmm_ops_amd = { 2269 svm_init, 2270 svm_cleanup, 2271 svm_restore, 2272 svm_vminit, 2273 svm_vmrun, 2274 svm_vmcleanup, 2275 svm_getreg, 2276 svm_setreg, 2277 vmcb_getdesc, 2278 vmcb_setdesc, 2279 svm_getcap, 2280 svm_setcap, 2281 svm_npt_alloc, 2282 svm_npt_free, 2283 svm_vlapic_init, 2284 svm_vlapic_cleanup 2285 }; 2286