1 /*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/smp.h> 33 #include <sys/kernel.h> 34 #include <sys/malloc.h> 35 #include <sys/pcpu.h> 36 #include <sys/proc.h> 37 #include <sys/sysctl.h> 38 39 #include <vm/vm.h> 40 #include <vm/pmap.h> 41 42 #include <machine/cpufunc.h> 43 #include <machine/psl.h> 44 #include <machine/pmap.h> 45 #include <machine/md_var.h> 46 #include <machine/specialreg.h> 47 #include <machine/smp.h> 48 #include <machine/vmm.h> 49 #include <machine/vmm_dev.h> 50 #include <machine/vmm_instruction_emul.h> 51 52 #include "vmm_lapic.h" 53 #include "vmm_stat.h" 54 #include "vmm_ktr.h" 55 #include "vmm_ioport.h" 56 #include "vatpic.h" 57 #include "vlapic.h" 58 #include "vlapic_priv.h" 59 60 #include "x86.h" 61 #include "vmcb.h" 62 #include "svm.h" 63 #include "svm_softc.h" 64 #include "svm_msr.h" 65 #include "npt.h" 66 67 SYSCTL_DECL(_hw_vmm); 68 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 69 70 /* 71 * SVM CPUID function 0x8000_000A, edx bit decoding. 72 */ 73 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 74 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 75 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 76 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 77 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 78 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 79 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 80 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 81 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 82 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 83 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 84 85 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 86 VMCB_CACHE_IOPM | \ 87 VMCB_CACHE_I | \ 88 VMCB_CACHE_TPR | \ 89 VMCB_CACHE_CR2 | \ 90 VMCB_CACHE_CR | \ 91 VMCB_CACHE_DT | \ 92 VMCB_CACHE_SEG | \ 93 VMCB_CACHE_NP) 94 95 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 96 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 97 0, NULL); 98 99 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 100 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 101 102 /* Per-CPU context area. */ 103 extern struct pcpu __pcpu[]; 104 105 static uint32_t svm_feature; /* AMD SVM features. */ 106 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0, 107 "SVM features advertised by CPUID.8000000AH:EDX"); 108 109 static int disable_npf_assist; 110 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 111 &disable_npf_assist, 0, NULL); 112 113 /* Maximum ASIDs supported by the processor */ 114 static uint32_t nasid; 115 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0, 116 "Number of ASIDs supported by this processor"); 117 118 /* Current ASID generation for each host cpu */ 119 static struct asid asid[MAXCPU]; 120 121 /* 122 * SVM host state saved area of size 4KB for each core. 123 */ 124 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 125 126 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 127 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 128 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 129 130 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 131 132 static __inline int 133 flush_by_asid(void) 134 { 135 136 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 137 } 138 139 static __inline int 140 decode_assist(void) 141 { 142 143 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 144 } 145 146 static void 147 svm_disable(void *arg __unused) 148 { 149 uint64_t efer; 150 151 efer = rdmsr(MSR_EFER); 152 efer &= ~EFER_SVM; 153 wrmsr(MSR_EFER, efer); 154 } 155 156 /* 157 * Disable SVM on all CPUs. 158 */ 159 static int 160 svm_cleanup(void) 161 { 162 163 smp_rendezvous(NULL, svm_disable, NULL, NULL); 164 return (0); 165 } 166 167 /* 168 * Verify that all the features required by bhyve are available. 169 */ 170 static int 171 check_svm_features(void) 172 { 173 u_int regs[4]; 174 175 /* CPUID Fn8000_000A is for SVM */ 176 do_cpuid(0x8000000A, regs); 177 svm_feature = regs[3]; 178 179 nasid = regs[1]; 180 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 181 182 /* bhyve requires the Nested Paging feature */ 183 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 184 printf("SVM: Nested Paging feature not available.\n"); 185 return (ENXIO); 186 } 187 188 /* bhyve requires the NRIP Save feature */ 189 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 190 printf("SVM: NRIP Save feature not available.\n"); 191 return (ENXIO); 192 } 193 194 return (0); 195 } 196 197 static void 198 svm_enable(void *arg __unused) 199 { 200 uint64_t efer; 201 202 efer = rdmsr(MSR_EFER); 203 efer |= EFER_SVM; 204 wrmsr(MSR_EFER, efer); 205 206 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 207 } 208 209 /* 210 * Return 1 if SVM is enabled on this processor and 0 otherwise. 211 */ 212 static int 213 svm_available(void) 214 { 215 uint64_t msr; 216 217 /* Section 15.4 Enabling SVM from APM2. */ 218 if ((amd_feature2 & AMDID2_SVM) == 0) { 219 printf("SVM: not available.\n"); 220 return (0); 221 } 222 223 msr = rdmsr(MSR_VM_CR); 224 if ((msr & VM_CR_SVMDIS) != 0) { 225 printf("SVM: disabled by BIOS.\n"); 226 return (0); 227 } 228 229 return (1); 230 } 231 232 static int 233 svm_init(int ipinum) 234 { 235 int error, cpu; 236 237 if (!svm_available()) 238 return (ENXIO); 239 240 error = check_svm_features(); 241 if (error) 242 return (error); 243 244 vmcb_clean &= VMCB_CACHE_DEFAULT; 245 246 for (cpu = 0; cpu < MAXCPU; cpu++) { 247 /* 248 * Initialize the host ASIDs to their "highest" valid values. 249 * 250 * The next ASID allocation will rollover both 'gen' and 'num' 251 * and start off the sequence at {1,1}. 252 */ 253 asid[cpu].gen = ~0UL; 254 asid[cpu].num = nasid - 1; 255 } 256 257 svm_msr_init(); 258 svm_npt_init(ipinum); 259 260 /* Enable SVM on all CPUs */ 261 smp_rendezvous(NULL, svm_enable, NULL, NULL); 262 263 return (0); 264 } 265 266 static void 267 svm_restore(void) 268 { 269 270 svm_enable(NULL); 271 } 272 273 /* Pentium compatible MSRs */ 274 #define MSR_PENTIUM_START 0 275 #define MSR_PENTIUM_END 0x1FFF 276 /* AMD 6th generation and Intel compatible MSRs */ 277 #define MSR_AMD6TH_START 0xC0000000UL 278 #define MSR_AMD6TH_END 0xC0001FFFUL 279 /* AMD 7th and 8th generation compatible MSRs */ 280 #define MSR_AMD7TH_START 0xC0010000UL 281 #define MSR_AMD7TH_END 0xC0011FFFUL 282 283 /* 284 * Get the index and bit position for a MSR in permission bitmap. 285 * Two bits are used for each MSR: lower bit for read and higher bit for write. 286 */ 287 static int 288 svm_msr_index(uint64_t msr, int *index, int *bit) 289 { 290 uint32_t base, off; 291 292 *index = -1; 293 *bit = (msr % 4) * 2; 294 base = 0; 295 296 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 297 *index = msr / 4; 298 return (0); 299 } 300 301 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 302 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 303 off = (msr - MSR_AMD6TH_START); 304 *index = (off + base) / 4; 305 return (0); 306 } 307 308 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 309 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 310 off = (msr - MSR_AMD7TH_START); 311 *index = (off + base) / 4; 312 return (0); 313 } 314 315 return (EINVAL); 316 } 317 318 /* 319 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 320 */ 321 static void 322 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 323 { 324 int index, bit, error; 325 326 error = svm_msr_index(msr, &index, &bit); 327 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 328 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 329 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 330 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 331 "msr %#lx", __func__, bit, msr)); 332 333 if (read) 334 perm_bitmap[index] &= ~(1UL << bit); 335 336 if (write) 337 perm_bitmap[index] &= ~(2UL << bit); 338 } 339 340 static void 341 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 342 { 343 344 svm_msr_perm(perm_bitmap, msr, true, true); 345 } 346 347 static void 348 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 349 { 350 351 svm_msr_perm(perm_bitmap, msr, true, false); 352 } 353 354 static __inline int 355 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 356 { 357 struct vmcb_ctrl *ctrl; 358 359 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 360 361 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 362 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 363 } 364 365 static __inline void 366 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 367 int enabled) 368 { 369 struct vmcb_ctrl *ctrl; 370 uint32_t oldval; 371 372 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 373 374 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 375 oldval = ctrl->intercept[idx]; 376 377 if (enabled) 378 ctrl->intercept[idx] |= bitmask; 379 else 380 ctrl->intercept[idx] &= ~bitmask; 381 382 if (ctrl->intercept[idx] != oldval) { 383 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 384 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 385 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 386 } 387 } 388 389 static __inline void 390 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 391 { 392 393 svm_set_intercept(sc, vcpu, off, bitmask, 0); 394 } 395 396 static __inline void 397 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 398 { 399 400 svm_set_intercept(sc, vcpu, off, bitmask, 1); 401 } 402 403 static void 404 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 405 uint64_t msrpm_base_pa, uint64_t np_pml4) 406 { 407 struct vmcb_ctrl *ctrl; 408 struct vmcb_state *state; 409 uint32_t mask; 410 int n; 411 412 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 413 state = svm_get_vmcb_state(sc, vcpu); 414 415 ctrl->iopm_base_pa = iopm_base_pa; 416 ctrl->msrpm_base_pa = msrpm_base_pa; 417 418 /* Enable nested paging */ 419 ctrl->np_enable = 1; 420 ctrl->n_cr3 = np_pml4; 421 422 /* 423 * Intercept accesses to the control registers that are not shadowed 424 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 425 */ 426 for (n = 0; n < 16; n++) { 427 mask = (BIT(n) << 16) | BIT(n); 428 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 429 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 430 else 431 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 432 } 433 434 435 /* 436 * Intercept everything when tracing guest exceptions otherwise 437 * just intercept machine check exception. 438 */ 439 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 440 for (n = 0; n < 32; n++) { 441 /* 442 * Skip unimplemented vectors in the exception bitmap. 443 */ 444 if (n == 2 || n == 9) { 445 continue; 446 } 447 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 448 } 449 } else { 450 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 451 } 452 453 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 454 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 455 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 456 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 457 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 458 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 459 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 460 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 461 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 463 VMCB_INTCPT_FERR_FREEZE); 464 465 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 466 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 467 468 /* 469 * From section "Canonicalization and Consistency Checks" in APMv2 470 * the VMRUN intercept bit must be set to pass the consistency check. 471 */ 472 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 473 474 /* 475 * The ASID will be set to a non-zero value just before VMRUN. 476 */ 477 ctrl->asid = 0; 478 479 /* 480 * Section 15.21.1, Interrupt Masking in EFLAGS 481 * Section 15.21.2, Virtualizing APIC.TPR 482 * 483 * This must be set for %rflag and %cr8 isolation of guest and host. 484 */ 485 ctrl->v_intr_masking = 1; 486 487 /* Enable Last Branch Record aka LBR for debugging */ 488 ctrl->lbr_virt_en = 1; 489 state->dbgctl = BIT(0); 490 491 /* EFER_SVM must always be set when the guest is executing */ 492 state->efer = EFER_SVM; 493 494 /* Set up the PAT to power-on state */ 495 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 496 PAT_VALUE(1, PAT_WRITE_THROUGH) | 497 PAT_VALUE(2, PAT_UNCACHED) | 498 PAT_VALUE(3, PAT_UNCACHEABLE) | 499 PAT_VALUE(4, PAT_WRITE_BACK) | 500 PAT_VALUE(5, PAT_WRITE_THROUGH) | 501 PAT_VALUE(6, PAT_UNCACHED) | 502 PAT_VALUE(7, PAT_UNCACHEABLE); 503 } 504 505 /* 506 * Initialize a virtual machine. 507 */ 508 static void * 509 svm_vminit(struct vm *vm, pmap_t pmap) 510 { 511 struct svm_softc *svm_sc; 512 struct svm_vcpu *vcpu; 513 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 514 int i; 515 516 svm_sc = malloc(sizeof (struct svm_softc), M_SVM, M_WAITOK | M_ZERO); 517 svm_sc->vm = vm; 518 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 519 520 /* 521 * Intercept read and write accesses to all MSRs. 522 */ 523 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 524 525 /* 526 * Access to the following MSRs is redirected to the VMCB when the 527 * guest is executing. Therefore it is safe to allow the guest to 528 * read/write these MSRs directly without hypervisor involvement. 529 */ 530 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 531 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 532 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 533 534 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 535 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 536 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 537 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 538 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 539 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 540 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 541 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 542 543 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 544 545 /* 546 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 547 */ 548 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 549 550 /* Intercept access to all I/O ports. */ 551 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 552 553 iopm_pa = vtophys(svm_sc->iopm_bitmap); 554 msrpm_pa = vtophys(svm_sc->msr_bitmap); 555 pml4_pa = svm_sc->nptp; 556 for (i = 0; i < VM_MAXCPU; i++) { 557 vcpu = svm_get_vcpu(svm_sc, i); 558 vcpu->nextrip = ~0; 559 vcpu->lastcpu = NOCPU; 560 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 561 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 562 svm_msr_guest_init(svm_sc, i); 563 } 564 return (svm_sc); 565 } 566 567 static int 568 svm_cpl(struct vmcb_state *state) 569 { 570 571 /* 572 * From APMv2: 573 * "Retrieve the CPL from the CPL field in the VMCB, not 574 * from any segment DPL" 575 */ 576 return (state->cpl); 577 } 578 579 static enum vm_cpu_mode 580 svm_vcpu_mode(struct vmcb *vmcb) 581 { 582 struct vmcb_segment seg; 583 struct vmcb_state *state; 584 int error; 585 586 state = &vmcb->state; 587 588 if (state->efer & EFER_LMA) { 589 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 590 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 591 error)); 592 593 /* 594 * Section 4.8.1 for APM2, check if Code Segment has 595 * Long attribute set in descriptor. 596 */ 597 if (seg.attrib & VMCB_CS_ATTRIB_L) 598 return (CPU_MODE_64BIT); 599 else 600 return (CPU_MODE_COMPATIBILITY); 601 } else if (state->cr0 & CR0_PE) { 602 return (CPU_MODE_PROTECTED); 603 } else { 604 return (CPU_MODE_REAL); 605 } 606 } 607 608 static enum vm_paging_mode 609 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 610 { 611 612 if ((cr0 & CR0_PG) == 0) 613 return (PAGING_MODE_FLAT); 614 if ((cr4 & CR4_PAE) == 0) 615 return (PAGING_MODE_32); 616 if (efer & EFER_LME) 617 return (PAGING_MODE_64); 618 else 619 return (PAGING_MODE_PAE); 620 } 621 622 /* 623 * ins/outs utility routines 624 */ 625 static uint64_t 626 svm_inout_str_index(struct svm_regctx *regs, int in) 627 { 628 uint64_t val; 629 630 val = in ? regs->sctx_rdi : regs->sctx_rsi; 631 632 return (val); 633 } 634 635 static uint64_t 636 svm_inout_str_count(struct svm_regctx *regs, int rep) 637 { 638 uint64_t val; 639 640 val = rep ? regs->sctx_rcx : 1; 641 642 return (val); 643 } 644 645 static void 646 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 647 int in, struct vm_inout_str *vis) 648 { 649 int error, s; 650 651 if (in) { 652 vis->seg_name = VM_REG_GUEST_ES; 653 } else { 654 /* The segment field has standard encoding */ 655 s = (info1 >> 10) & 0x7; 656 vis->seg_name = vm_segment_name(s); 657 } 658 659 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 660 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 661 } 662 663 static int 664 svm_inout_str_addrsize(uint64_t info1) 665 { 666 uint32_t size; 667 668 size = (info1 >> 7) & 0x7; 669 switch (size) { 670 case 1: 671 return (2); /* 16 bit */ 672 case 2: 673 return (4); /* 32 bit */ 674 case 4: 675 return (8); /* 64 bit */ 676 default: 677 panic("%s: invalid size encoding %d", __func__, size); 678 } 679 } 680 681 static void 682 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 683 { 684 struct vmcb_state *state; 685 686 state = &vmcb->state; 687 paging->cr3 = state->cr3; 688 paging->cpl = svm_cpl(state); 689 paging->cpu_mode = svm_vcpu_mode(vmcb); 690 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 691 state->efer); 692 } 693 694 #define UNHANDLED 0 695 696 /* 697 * Handle guest I/O intercept. 698 */ 699 static int 700 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 701 { 702 struct vmcb_ctrl *ctrl; 703 struct vmcb_state *state; 704 struct svm_regctx *regs; 705 struct vm_inout_str *vis; 706 uint64_t info1; 707 int inout_string; 708 709 state = svm_get_vmcb_state(svm_sc, vcpu); 710 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 711 regs = svm_get_guest_regctx(svm_sc, vcpu); 712 713 info1 = ctrl->exitinfo1; 714 inout_string = info1 & BIT(2) ? 1 : 0; 715 716 /* 717 * The effective segment number in EXITINFO1[12:10] is populated 718 * only if the processor has the DecodeAssist capability. 719 * 720 * XXX this is not specified explicitly in APMv2 but can be verified 721 * empirically. 722 */ 723 if (inout_string && !decode_assist()) 724 return (UNHANDLED); 725 726 vmexit->exitcode = VM_EXITCODE_INOUT; 727 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 728 vmexit->u.inout.string = inout_string; 729 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 730 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 731 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 732 vmexit->u.inout.eax = (uint32_t)(state->rax); 733 734 if (inout_string) { 735 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 736 vis = &vmexit->u.inout_str; 737 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 738 vis->rflags = state->rflags; 739 vis->cr0 = state->cr0; 740 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 741 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 742 vis->addrsize = svm_inout_str_addrsize(info1); 743 svm_inout_str_seginfo(svm_sc, vcpu, info1, 744 vmexit->u.inout.in, vis); 745 } 746 747 return (UNHANDLED); 748 } 749 750 static int 751 npf_fault_type(uint64_t exitinfo1) 752 { 753 754 if (exitinfo1 & VMCB_NPF_INFO1_W) 755 return (VM_PROT_WRITE); 756 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 757 return (VM_PROT_EXECUTE); 758 else 759 return (VM_PROT_READ); 760 } 761 762 static bool 763 svm_npf_emul_fault(uint64_t exitinfo1) 764 { 765 766 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 767 return (false); 768 } 769 770 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 771 return (false); 772 } 773 774 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 775 return (false); 776 } 777 778 return (true); 779 } 780 781 static void 782 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 783 { 784 struct vm_guest_paging *paging; 785 struct vmcb_segment seg; 786 struct vmcb_ctrl *ctrl; 787 char *inst_bytes; 788 int error, inst_len; 789 790 ctrl = &vmcb->ctrl; 791 paging = &vmexit->u.inst_emul.paging; 792 793 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 794 vmexit->u.inst_emul.gpa = gpa; 795 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 796 svm_paging_info(vmcb, paging); 797 798 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 799 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 800 801 switch(paging->cpu_mode) { 802 case CPU_MODE_REAL: 803 vmexit->u.inst_emul.cs_base = seg.base; 804 vmexit->u.inst_emul.cs_d = 0; 805 break; 806 case CPU_MODE_PROTECTED: 807 case CPU_MODE_COMPATIBILITY: 808 vmexit->u.inst_emul.cs_base = seg.base; 809 810 /* 811 * Section 4.8.1 of APM2, Default Operand Size or D bit. 812 */ 813 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 814 1 : 0; 815 break; 816 default: 817 vmexit->u.inst_emul.cs_base = 0; 818 vmexit->u.inst_emul.cs_d = 0; 819 break; 820 } 821 822 /* 823 * Copy the instruction bytes into 'vie' if available. 824 */ 825 if (decode_assist() && !disable_npf_assist) { 826 inst_len = ctrl->inst_len; 827 inst_bytes = ctrl->inst_bytes; 828 } else { 829 inst_len = 0; 830 inst_bytes = NULL; 831 } 832 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 833 } 834 835 #ifdef KTR 836 static const char * 837 intrtype_to_str(int intr_type) 838 { 839 switch (intr_type) { 840 case VMCB_EVENTINJ_TYPE_INTR: 841 return ("hwintr"); 842 case VMCB_EVENTINJ_TYPE_NMI: 843 return ("nmi"); 844 case VMCB_EVENTINJ_TYPE_INTn: 845 return ("swintr"); 846 case VMCB_EVENTINJ_TYPE_EXCEPTION: 847 return ("exception"); 848 default: 849 panic("%s: unknown intr_type %d", __func__, intr_type); 850 } 851 } 852 #endif 853 854 /* 855 * Inject an event to vcpu as described in section 15.20, "Event injection". 856 */ 857 static void 858 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 859 uint32_t error, bool ec_valid) 860 { 861 struct vmcb_ctrl *ctrl; 862 863 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 864 865 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 866 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 867 868 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 869 __func__, vector)); 870 871 switch (intr_type) { 872 case VMCB_EVENTINJ_TYPE_INTR: 873 case VMCB_EVENTINJ_TYPE_NMI: 874 case VMCB_EVENTINJ_TYPE_INTn: 875 break; 876 case VMCB_EVENTINJ_TYPE_EXCEPTION: 877 if (vector >= 0 && vector <= 31 && vector != 2) 878 break; 879 /* FALLTHROUGH */ 880 default: 881 panic("%s: invalid intr_type/vector: %d/%d", __func__, 882 intr_type, vector); 883 } 884 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 885 if (ec_valid) { 886 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 887 ctrl->eventinj |= (uint64_t)error << 32; 888 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 889 intrtype_to_str(intr_type), vector, error); 890 } else { 891 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 892 intrtype_to_str(intr_type), vector); 893 } 894 } 895 896 static void 897 svm_update_virqinfo(struct svm_softc *sc, int vcpu) 898 { 899 struct vm *vm; 900 struct vlapic *vlapic; 901 struct vmcb_ctrl *ctrl; 902 int pending; 903 904 vm = sc->vm; 905 vlapic = vm_lapic(vm, vcpu); 906 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 907 908 /* Update %cr8 in the emulated vlapic */ 909 vlapic_set_cr8(vlapic, ctrl->v_tpr); 910 911 /* 912 * If V_IRQ indicates that the interrupt injection attempted on then 913 * last VMRUN was successful then update the vlapic accordingly. 914 */ 915 if (ctrl->v_intr_vector != 0) { 916 pending = ctrl->v_irq; 917 KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid " 918 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 919 KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 920 VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector, 921 pending ? "pending" : "accepted"); 922 if (!pending) 923 vlapic_intr_accepted(vlapic, ctrl->v_intr_vector); 924 } 925 } 926 927 static void 928 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 929 { 930 struct vmcb_ctrl *ctrl; 931 uint64_t intinfo; 932 933 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 934 intinfo = ctrl->exitintinfo; 935 if (!VMCB_EXITINTINFO_VALID(intinfo)) 936 return; 937 938 /* 939 * From APMv2, Section "Intercepts during IDT interrupt delivery" 940 * 941 * If a #VMEXIT happened during event delivery then record the event 942 * that was being delivered. 943 */ 944 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 945 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 946 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 947 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 948 } 949 950 static __inline int 951 vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 952 { 953 954 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 955 VMCB_INTCPT_VINTR)); 956 } 957 958 static __inline void 959 enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 960 { 961 struct vmcb_ctrl *ctrl; 962 963 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 964 965 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 966 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 967 KASSERT(vintr_intercept_enabled(sc, vcpu), 968 ("%s: vintr intercept should be enabled", __func__)); 969 return; 970 } 971 972 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 973 ctrl->v_irq = 1; 974 ctrl->v_ign_tpr = 1; 975 ctrl->v_intr_vector = 0; 976 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 977 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 978 } 979 980 static __inline void 981 disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 982 { 983 struct vmcb_ctrl *ctrl; 984 985 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 986 987 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 988 KASSERT(!vintr_intercept_enabled(sc, vcpu), 989 ("%s: vintr intercept should be disabled", __func__)); 990 return; 991 } 992 993 #ifdef KTR 994 if (ctrl->v_intr_vector == 0) 995 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 996 else 997 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); 998 #endif 999 ctrl->v_irq = 0; 1000 ctrl->v_intr_vector = 0; 1001 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1002 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1003 } 1004 1005 static int 1006 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1007 { 1008 struct vmcb_ctrl *ctrl; 1009 int oldval, newval; 1010 1011 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1012 oldval = ctrl->intr_shadow; 1013 newval = val ? 1 : 0; 1014 if (newval != oldval) { 1015 ctrl->intr_shadow = newval; 1016 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1017 } 1018 return (0); 1019 } 1020 1021 static int 1022 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1023 { 1024 struct vmcb_ctrl *ctrl; 1025 1026 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1027 *val = ctrl->intr_shadow; 1028 return (0); 1029 } 1030 1031 /* 1032 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1033 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1034 * to track when the vcpu is done handling the NMI. 1035 */ 1036 static int 1037 nmi_blocked(struct svm_softc *sc, int vcpu) 1038 { 1039 int blocked; 1040 1041 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1042 VMCB_INTCPT_IRET); 1043 return (blocked); 1044 } 1045 1046 static void 1047 enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1048 { 1049 1050 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1051 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1052 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1053 } 1054 1055 static void 1056 clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1057 { 1058 int error; 1059 1060 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1061 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1062 /* 1063 * When the IRET intercept is cleared the vcpu will attempt to execute 1064 * the "iret" when it runs next. However, it is possible to inject 1065 * another NMI into the vcpu before the "iret" has actually executed. 1066 * 1067 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1068 * it will trap back into the hypervisor. If an NMI is pending for 1069 * the vcpu it will be injected into the guest. 1070 * 1071 * XXX this needs to be fixed 1072 */ 1073 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1074 1075 /* 1076 * Set 'intr_shadow' to prevent an NMI from being injected on the 1077 * immediate VMRUN. 1078 */ 1079 error = svm_modify_intr_shadow(sc, vcpu, 1); 1080 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1081 } 1082 1083 static int 1084 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1085 bool *retu) 1086 { 1087 int error; 1088 1089 if (lapic_msr(num)) 1090 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1091 else if (num == MSR_EFER) 1092 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, val); 1093 else 1094 error = svm_wrmsr(sc, vcpu, num, val, retu); 1095 1096 return (error); 1097 } 1098 1099 static int 1100 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1101 { 1102 struct vmcb_state *state; 1103 struct svm_regctx *ctx; 1104 uint64_t result; 1105 int error; 1106 1107 if (lapic_msr(num)) 1108 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1109 else 1110 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1111 1112 if (error == 0) { 1113 state = svm_get_vmcb_state(sc, vcpu); 1114 ctx = svm_get_guest_regctx(sc, vcpu); 1115 state->rax = result & 0xffffffff; 1116 ctx->sctx_rdx = result >> 32; 1117 } 1118 1119 return (error); 1120 } 1121 1122 #ifdef KTR 1123 static const char * 1124 exit_reason_to_str(uint64_t reason) 1125 { 1126 static char reasonbuf[32]; 1127 1128 switch (reason) { 1129 case VMCB_EXIT_INVALID: 1130 return ("invalvmcb"); 1131 case VMCB_EXIT_SHUTDOWN: 1132 return ("shutdown"); 1133 case VMCB_EXIT_NPF: 1134 return ("nptfault"); 1135 case VMCB_EXIT_PAUSE: 1136 return ("pause"); 1137 case VMCB_EXIT_HLT: 1138 return ("hlt"); 1139 case VMCB_EXIT_CPUID: 1140 return ("cpuid"); 1141 case VMCB_EXIT_IO: 1142 return ("inout"); 1143 case VMCB_EXIT_MC: 1144 return ("mchk"); 1145 case VMCB_EXIT_INTR: 1146 return ("extintr"); 1147 case VMCB_EXIT_NMI: 1148 return ("nmi"); 1149 case VMCB_EXIT_VINTR: 1150 return ("vintr"); 1151 case VMCB_EXIT_MSR: 1152 return ("msr"); 1153 case VMCB_EXIT_IRET: 1154 return ("iret"); 1155 case VMCB_EXIT_MONITOR: 1156 return ("monitor"); 1157 case VMCB_EXIT_MWAIT: 1158 return ("mwait"); 1159 default: 1160 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1161 return (reasonbuf); 1162 } 1163 } 1164 #endif /* KTR */ 1165 1166 /* 1167 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1168 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1169 * and exceptions caused by INT3, INTO and BOUND instructions. 1170 * 1171 * Return 1 if the nRIP is valid and 0 otherwise. 1172 */ 1173 static int 1174 nrip_valid(uint64_t exitcode) 1175 { 1176 switch (exitcode) { 1177 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1178 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1179 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1180 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1181 case 0x43: /* INT3 */ 1182 case 0x44: /* INTO */ 1183 case 0x45: /* BOUND */ 1184 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1185 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1186 return (1); 1187 default: 1188 return (0); 1189 } 1190 } 1191 1192 /* 1193 * Collateral for a generic SVM VM-exit. 1194 */ 1195 static void 1196 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 1197 { 1198 1199 vme->exitcode = VM_EXITCODE_SVM; 1200 vme->u.svm.exitcode = code; 1201 vme->u.svm.exitinfo1 = info1; 1202 vme->u.svm.exitinfo2 = info2; 1203 } 1204 1205 static int 1206 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1207 { 1208 struct vmcb *vmcb; 1209 struct vmcb_state *state; 1210 struct vmcb_ctrl *ctrl; 1211 struct svm_regctx *ctx; 1212 uint64_t code, info1, info2, val; 1213 uint32_t eax, ecx, edx; 1214 int error, errcode_valid, handled, idtvec, reflect; 1215 bool retu; 1216 1217 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1218 vmcb = svm_get_vmcb(svm_sc, vcpu); 1219 state = &vmcb->state; 1220 ctrl = &vmcb->ctrl; 1221 1222 handled = 0; 1223 code = ctrl->exitcode; 1224 info1 = ctrl->exitinfo1; 1225 info2 = ctrl->exitinfo2; 1226 1227 vmexit->exitcode = VM_EXITCODE_BOGUS; 1228 vmexit->rip = state->rip; 1229 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1230 1231 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1232 1233 /* 1234 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1235 * in an inconsistent state and can trigger assertions that would 1236 * never happen otherwise. 1237 */ 1238 if (code == VMCB_EXIT_INVALID) { 1239 vm_exit_svm(vmexit, code, info1, info2); 1240 return (0); 1241 } 1242 1243 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1244 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1245 1246 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1247 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1248 vmexit->inst_length, code, info1, info2)); 1249 1250 svm_update_virqinfo(svm_sc, vcpu); 1251 svm_save_intinfo(svm_sc, vcpu); 1252 1253 switch (code) { 1254 case VMCB_EXIT_IRET: 1255 /* 1256 * Restart execution at "iret" but with the intercept cleared. 1257 */ 1258 vmexit->inst_length = 0; 1259 clear_nmi_blocking(svm_sc, vcpu); 1260 handled = 1; 1261 break; 1262 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1263 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1264 handled = 1; 1265 break; 1266 case VMCB_EXIT_INTR: /* external interrupt */ 1267 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1268 handled = 1; 1269 break; 1270 case VMCB_EXIT_NMI: /* external NMI */ 1271 handled = 1; 1272 break; 1273 case 0x40 ... 0x5F: 1274 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1275 reflect = 1; 1276 idtvec = code - 0x40; 1277 switch (idtvec) { 1278 case IDT_MC: 1279 /* 1280 * Call the machine check handler by hand. Also don't 1281 * reflect the machine check back into the guest. 1282 */ 1283 reflect = 0; 1284 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1285 __asm __volatile("int $18"); 1286 break; 1287 case IDT_PF: 1288 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1289 info2); 1290 KASSERT(error == 0, ("%s: error %d updating cr2", 1291 __func__, error)); 1292 /* fallthru */ 1293 case IDT_NP: 1294 case IDT_SS: 1295 case IDT_GP: 1296 case IDT_AC: 1297 case IDT_TS: 1298 errcode_valid = 1; 1299 break; 1300 1301 case IDT_DF: 1302 errcode_valid = 1; 1303 info1 = 0; 1304 break; 1305 1306 case IDT_BP: 1307 case IDT_OF: 1308 case IDT_BR: 1309 /* 1310 * The 'nrip' field is populated for INT3, INTO and 1311 * BOUND exceptions and this also implies that 1312 * 'inst_length' is non-zero. 1313 * 1314 * Reset 'inst_length' to zero so the guest %rip at 1315 * event injection is identical to what it was when 1316 * the exception originally happened. 1317 */ 1318 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1319 "to zero before injecting exception %d", 1320 vmexit->inst_length, idtvec); 1321 vmexit->inst_length = 0; 1322 /* fallthru */ 1323 default: 1324 errcode_valid = 0; 1325 info1 = 0; 1326 break; 1327 } 1328 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1329 "when reflecting exception %d into guest", 1330 vmexit->inst_length, idtvec)); 1331 1332 if (reflect) { 1333 /* Reflect the exception back into the guest */ 1334 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1335 "%d/%#x into the guest", idtvec, (int)info1); 1336 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1337 errcode_valid, info1, 0); 1338 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1339 __func__, error)); 1340 } 1341 handled = 1; 1342 break; 1343 case VMCB_EXIT_MSR: /* MSR access. */ 1344 eax = state->rax; 1345 ecx = ctx->sctx_rcx; 1346 edx = ctx->sctx_rdx; 1347 retu = false; 1348 1349 if (info1) { 1350 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1351 val = (uint64_t)edx << 32 | eax; 1352 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1353 ecx, val); 1354 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1355 vmexit->exitcode = VM_EXITCODE_WRMSR; 1356 vmexit->u.msr.code = ecx; 1357 vmexit->u.msr.wval = val; 1358 } else if (!retu) { 1359 handled = 1; 1360 } else { 1361 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1362 ("emulate_wrmsr retu with bogus exitcode")); 1363 } 1364 } else { 1365 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1366 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1367 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1368 vmexit->exitcode = VM_EXITCODE_RDMSR; 1369 vmexit->u.msr.code = ecx; 1370 } else if (!retu) { 1371 handled = 1; 1372 } else { 1373 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1374 ("emulate_rdmsr retu with bogus exitcode")); 1375 } 1376 } 1377 break; 1378 case VMCB_EXIT_IO: 1379 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1380 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1381 break; 1382 case VMCB_EXIT_CPUID: 1383 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1384 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1385 (uint32_t *)&state->rax, 1386 (uint32_t *)&ctx->sctx_rbx, 1387 (uint32_t *)&ctx->sctx_rcx, 1388 (uint32_t *)&ctx->sctx_rdx); 1389 break; 1390 case VMCB_EXIT_HLT: 1391 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1392 vmexit->exitcode = VM_EXITCODE_HLT; 1393 vmexit->u.hlt.rflags = state->rflags; 1394 break; 1395 case VMCB_EXIT_PAUSE: 1396 vmexit->exitcode = VM_EXITCODE_PAUSE; 1397 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1398 break; 1399 case VMCB_EXIT_NPF: 1400 /* EXITINFO2 contains the faulting guest physical address */ 1401 if (info1 & VMCB_NPF_INFO1_RSV) { 1402 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1403 "reserved bits set: info1(%#lx) info2(%#lx)", 1404 info1, info2); 1405 } else if (vm_mem_allocated(svm_sc->vm, info2)) { 1406 vmexit->exitcode = VM_EXITCODE_PAGING; 1407 vmexit->u.paging.gpa = info2; 1408 vmexit->u.paging.fault_type = npf_fault_type(info1); 1409 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1410 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1411 "on gpa %#lx/%#lx at rip %#lx", 1412 info2, info1, state->rip); 1413 } else if (svm_npf_emul_fault(info1)) { 1414 svm_handle_inst_emul(vmcb, info2, vmexit); 1415 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1416 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1417 "for gpa %#lx/%#lx at rip %#lx", 1418 info2, info1, state->rip); 1419 } 1420 break; 1421 case VMCB_EXIT_MONITOR: 1422 vmexit->exitcode = VM_EXITCODE_MONITOR; 1423 break; 1424 case VMCB_EXIT_MWAIT: 1425 vmexit->exitcode = VM_EXITCODE_MWAIT; 1426 break; 1427 default: 1428 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1429 break; 1430 } 1431 1432 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1433 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1434 vmexit->rip, vmexit->inst_length); 1435 1436 if (handled) { 1437 vmexit->rip += vmexit->inst_length; 1438 vmexit->inst_length = 0; 1439 state->rip = vmexit->rip; 1440 } else { 1441 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1442 /* 1443 * If this VM exit was not claimed by anybody then 1444 * treat it as a generic SVM exit. 1445 */ 1446 vm_exit_svm(vmexit, code, info1, info2); 1447 } else { 1448 /* 1449 * The exitcode and collateral have been populated. 1450 * The VM exit will be processed further in userland. 1451 */ 1452 } 1453 } 1454 return (handled); 1455 } 1456 1457 static void 1458 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1459 { 1460 uint64_t intinfo; 1461 1462 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1463 return; 1464 1465 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1466 "valid: %#lx", __func__, intinfo)); 1467 1468 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1469 VMCB_EXITINTINFO_VECTOR(intinfo), 1470 VMCB_EXITINTINFO_EC(intinfo), 1471 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1472 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1473 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1474 } 1475 1476 /* 1477 * Inject event to virtual cpu. 1478 */ 1479 static void 1480 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1481 { 1482 struct vmcb_ctrl *ctrl; 1483 struct vmcb_state *state; 1484 struct svm_vcpu *vcpustate; 1485 uint8_t v_tpr; 1486 int vector, need_intr_window, pending_apic_vector; 1487 1488 state = svm_get_vmcb_state(sc, vcpu); 1489 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1490 vcpustate = svm_get_vcpu(sc, vcpu); 1491 1492 need_intr_window = 0; 1493 pending_apic_vector = 0; 1494 1495 if (vcpustate->nextrip != state->rip) { 1496 ctrl->intr_shadow = 0; 1497 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1498 "cleared due to rip change: %#lx/%#lx", 1499 vcpustate->nextrip, state->rip); 1500 } 1501 1502 /* 1503 * Inject pending events or exceptions for this vcpu. 1504 * 1505 * An event might be pending because the previous #VMEXIT happened 1506 * during event delivery (i.e. ctrl->exitintinfo). 1507 * 1508 * An event might also be pending because an exception was injected 1509 * by the hypervisor (e.g. #PF during instruction emulation). 1510 */ 1511 svm_inj_intinfo(sc, vcpu); 1512 1513 /* NMI event has priority over interrupts. */ 1514 if (vm_nmi_pending(sc->vm, vcpu)) { 1515 if (nmi_blocked(sc, vcpu)) { 1516 /* 1517 * Can't inject another NMI if the guest has not 1518 * yet executed an "iret" after the last NMI. 1519 */ 1520 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1521 "to NMI-blocking"); 1522 } else if (ctrl->intr_shadow) { 1523 /* 1524 * Can't inject an NMI if the vcpu is in an intr_shadow. 1525 */ 1526 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1527 "interrupt shadow"); 1528 need_intr_window = 1; 1529 goto done; 1530 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1531 /* 1532 * If there is already an exception/interrupt pending 1533 * then defer the NMI until after that. 1534 */ 1535 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1536 "eventinj %#lx", ctrl->eventinj); 1537 1538 /* 1539 * Use self-IPI to trigger a VM-exit as soon as 1540 * possible after the event injection is completed. 1541 * 1542 * This works only if the external interrupt exiting 1543 * is at a lower priority than the event injection. 1544 * 1545 * Although not explicitly specified in APMv2 the 1546 * relative priorities were verified empirically. 1547 */ 1548 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1549 } else { 1550 vm_nmi_clear(sc->vm, vcpu); 1551 1552 /* Inject NMI, vector number is not used */ 1553 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1554 IDT_NMI, 0, false); 1555 1556 /* virtual NMI blocking is now in effect */ 1557 enable_nmi_blocking(sc, vcpu); 1558 1559 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1560 } 1561 } 1562 1563 if (!vm_extint_pending(sc->vm, vcpu)) { 1564 /* 1565 * APIC interrupts are delivered using the V_IRQ offload. 1566 * 1567 * The primary benefit is that the hypervisor doesn't need to 1568 * deal with the various conditions that inhibit interrupts. 1569 * It also means that TPR changes via CR8 will be handled 1570 * without any hypervisor involvement. 1571 * 1572 * Note that the APIC vector must remain pending in the vIRR 1573 * until it is confirmed that it was delivered to the guest. 1574 * This can be confirmed based on the value of V_IRQ at the 1575 * next #VMEXIT (1 = pending, 0 = delivered). 1576 * 1577 * Also note that it is possible that another higher priority 1578 * vector can become pending before this vector is delivered 1579 * to the guest. This is alright because vcpu_notify_event() 1580 * will send an IPI and force the vcpu to trap back into the 1581 * hypervisor. The higher priority vector will be injected on 1582 * the next VMRUN. 1583 */ 1584 if (vlapic_pending_intr(vlapic, &vector)) { 1585 KASSERT(vector >= 16 && vector <= 255, 1586 ("invalid vector %d from local APIC", vector)); 1587 pending_apic_vector = vector; 1588 } 1589 goto done; 1590 } 1591 1592 /* Ask the legacy pic for a vector to inject */ 1593 vatpic_pending_intr(sc->vm, &vector); 1594 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", 1595 vector)); 1596 1597 /* 1598 * If the guest has disabled interrupts or is in an interrupt shadow 1599 * then we cannot inject the pending interrupt. 1600 */ 1601 if ((state->rflags & PSL_I) == 0) { 1602 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1603 "rflags %#lx", vector, state->rflags); 1604 need_intr_window = 1; 1605 goto done; 1606 } 1607 1608 if (ctrl->intr_shadow) { 1609 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1610 "interrupt shadow", vector); 1611 need_intr_window = 1; 1612 goto done; 1613 } 1614 1615 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1616 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1617 "eventinj %#lx", vector, ctrl->eventinj); 1618 need_intr_window = 1; 1619 goto done; 1620 } 1621 1622 /* 1623 * Legacy PIC interrupts are delivered via the event injection 1624 * mechanism. 1625 */ 1626 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1627 1628 vm_extint_clear(sc->vm, vcpu); 1629 vatpic_intr_accepted(sc->vm, vector); 1630 1631 /* 1632 * Force a VM-exit as soon as the vcpu is ready to accept another 1633 * interrupt. This is done because the PIC might have another vector 1634 * that it wants to inject. Also, if the APIC has a pending interrupt 1635 * that was preempted by the ExtInt then it allows us to inject the 1636 * APIC vector as soon as possible. 1637 */ 1638 need_intr_window = 1; 1639 done: 1640 /* 1641 * The guest can modify the TPR by writing to %CR8. In guest mode 1642 * the processor reflects this write to V_TPR without hypervisor 1643 * intervention. 1644 * 1645 * The guest can also modify the TPR by writing to it via the memory 1646 * mapped APIC page. In this case, the write will be emulated by the 1647 * hypervisor. For this reason V_TPR must be updated before every 1648 * VMRUN. 1649 */ 1650 v_tpr = vlapic_get_cr8(vlapic); 1651 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1652 if (ctrl->v_tpr != v_tpr) { 1653 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1654 ctrl->v_tpr, v_tpr); 1655 ctrl->v_tpr = v_tpr; 1656 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1657 } 1658 1659 if (pending_apic_vector) { 1660 /* 1661 * If an APIC vector is being injected then interrupt window 1662 * exiting is not possible on this VMRUN. 1663 */ 1664 KASSERT(!need_intr_window, ("intr_window exiting impossible")); 1665 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", 1666 pending_apic_vector); 1667 1668 ctrl->v_irq = 1; 1669 ctrl->v_ign_tpr = 0; 1670 ctrl->v_intr_vector = pending_apic_vector; 1671 ctrl->v_intr_prio = pending_apic_vector >> 4; 1672 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1673 } else if (need_intr_window) { 1674 /* 1675 * We use V_IRQ in conjunction with the VINTR intercept to 1676 * trap into the hypervisor as soon as a virtual interrupt 1677 * can be delivered. 1678 * 1679 * Since injected events are not subject to intercept checks 1680 * we need to ensure that the V_IRQ is not actually going to 1681 * be delivered on VM entry. The KASSERT below enforces this. 1682 */ 1683 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1684 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1685 ("Bogus intr_window_exiting: eventinj (%#lx), " 1686 "intr_shadow (%u), rflags (%#lx)", 1687 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1688 enable_intr_window_exiting(sc, vcpu); 1689 } else { 1690 disable_intr_window_exiting(sc, vcpu); 1691 } 1692 } 1693 1694 static __inline void 1695 restore_host_tss(void) 1696 { 1697 struct system_segment_descriptor *tss_sd; 1698 1699 /* 1700 * The TSS descriptor was in use prior to launching the guest so it 1701 * has been marked busy. 1702 * 1703 * 'ltr' requires the descriptor to be marked available so change the 1704 * type to "64-bit available TSS". 1705 */ 1706 tss_sd = PCPU_GET(tss); 1707 tss_sd->sd_type = SDT_SYSTSS; 1708 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1709 } 1710 1711 static void 1712 check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1713 { 1714 struct svm_vcpu *vcpustate; 1715 struct vmcb_ctrl *ctrl; 1716 long eptgen; 1717 bool alloc_asid; 1718 1719 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1720 "active on cpu %u", __func__, thiscpu)); 1721 1722 vcpustate = svm_get_vcpu(sc, vcpuid); 1723 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1724 1725 /* 1726 * The TLB entries associated with the vcpu's ASID are not valid 1727 * if either of the following conditions is true: 1728 * 1729 * 1. The vcpu's ASID generation is different than the host cpu's 1730 * ASID generation. This happens when the vcpu migrates to a new 1731 * host cpu. It can also happen when the number of vcpus executing 1732 * on a host cpu is greater than the number of ASIDs available. 1733 * 1734 * 2. The pmap generation number is different than the value cached in 1735 * the 'vcpustate'. This happens when the host invalidates pages 1736 * belonging to the guest. 1737 * 1738 * asidgen eptgen Action 1739 * mismatch mismatch 1740 * 0 0 (a) 1741 * 0 1 (b1) or (b2) 1742 * 1 0 (c) 1743 * 1 1 (d) 1744 * 1745 * (a) There is no mismatch in eptgen or ASID generation and therefore 1746 * no further action is needed. 1747 * 1748 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1749 * retained and the TLB entries associated with this ASID 1750 * are flushed by VMRUN. 1751 * 1752 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1753 * allocated. 1754 * 1755 * (c) A new ASID is allocated. 1756 * 1757 * (d) A new ASID is allocated. 1758 */ 1759 1760 alloc_asid = false; 1761 eptgen = pmap->pm_eptgen; 1762 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1763 1764 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1765 alloc_asid = true; /* (c) and (d) */ 1766 } else if (vcpustate->eptgen != eptgen) { 1767 if (flush_by_asid()) 1768 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1769 else 1770 alloc_asid = true; /* (b2) */ 1771 } else { 1772 /* 1773 * This is the common case (a). 1774 */ 1775 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1776 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1777 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1778 } 1779 1780 if (alloc_asid) { 1781 if (++asid[thiscpu].num >= nasid) { 1782 asid[thiscpu].num = 1; 1783 if (++asid[thiscpu].gen == 0) 1784 asid[thiscpu].gen = 1; 1785 /* 1786 * If this cpu does not support "flush-by-asid" 1787 * then flush the entire TLB on a generation 1788 * bump. Subsequent ASID allocation in this 1789 * generation can be done without a TLB flush. 1790 */ 1791 if (!flush_by_asid()) 1792 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1793 } 1794 vcpustate->asid.gen = asid[thiscpu].gen; 1795 vcpustate->asid.num = asid[thiscpu].num; 1796 1797 ctrl->asid = vcpustate->asid.num; 1798 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1799 /* 1800 * If this cpu supports "flush-by-asid" then the TLB 1801 * was not flushed after the generation bump. The TLB 1802 * is flushed selectively after every new ASID allocation. 1803 */ 1804 if (flush_by_asid()) 1805 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1806 } 1807 vcpustate->eptgen = eptgen; 1808 1809 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1810 KASSERT(ctrl->asid == vcpustate->asid.num, 1811 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1812 } 1813 1814 static __inline void 1815 disable_gintr(void) 1816 { 1817 1818 __asm __volatile("clgi"); 1819 } 1820 1821 static __inline void 1822 enable_gintr(void) 1823 { 1824 1825 __asm __volatile("stgi"); 1826 } 1827 1828 /* 1829 * Start vcpu with specified RIP. 1830 */ 1831 static int 1832 svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1833 void *rend_cookie, void *suspended_cookie) 1834 { 1835 struct svm_regctx *gctx; 1836 struct svm_softc *svm_sc; 1837 struct svm_vcpu *vcpustate; 1838 struct vmcb_state *state; 1839 struct vmcb_ctrl *ctrl; 1840 struct vm_exit *vmexit; 1841 struct vlapic *vlapic; 1842 struct vm *vm; 1843 uint64_t vmcb_pa; 1844 u_int thiscpu; 1845 int handled; 1846 1847 svm_sc = arg; 1848 vm = svm_sc->vm; 1849 1850 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1851 state = svm_get_vmcb_state(svm_sc, vcpu); 1852 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1853 vmexit = vm_exitinfo(vm, vcpu); 1854 vlapic = vm_lapic(vm, vcpu); 1855 1856 /* 1857 * Stash 'curcpu' on the stack as 'thiscpu'. 1858 * 1859 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1860 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1861 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1862 */ 1863 thiscpu = curcpu; 1864 1865 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1866 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1867 1868 if (vcpustate->lastcpu != thiscpu) { 1869 /* 1870 * Force new ASID allocation by invalidating the generation. 1871 */ 1872 vcpustate->asid.gen = 0; 1873 1874 /* 1875 * Invalidate the VMCB state cache by marking all fields dirty. 1876 */ 1877 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1878 1879 /* 1880 * XXX 1881 * Setting 'vcpustate->lastcpu' here is bit premature because 1882 * we may return from this function without actually executing 1883 * the VMRUN instruction. This could happen if a rendezvous 1884 * or an AST is pending on the first time through the loop. 1885 * 1886 * This works for now but any new side-effects of vcpu 1887 * migration should take this case into account. 1888 */ 1889 vcpustate->lastcpu = thiscpu; 1890 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1891 } 1892 1893 svm_msr_guest_enter(svm_sc, vcpu); 1894 1895 /* Update Guest RIP */ 1896 state->rip = rip; 1897 1898 do { 1899 /* 1900 * Disable global interrupts to guarantee atomicity during 1901 * loading of guest state. This includes not only the state 1902 * loaded by the "vmrun" instruction but also software state 1903 * maintained by the hypervisor: suspended and rendezvous 1904 * state, NPT generation number, vlapic interrupts etc. 1905 */ 1906 disable_gintr(); 1907 1908 if (vcpu_suspended(suspended_cookie)) { 1909 enable_gintr(); 1910 vm_exit_suspended(vm, vcpu, state->rip); 1911 break; 1912 } 1913 1914 if (vcpu_rendezvous_pending(rend_cookie)) { 1915 enable_gintr(); 1916 vm_exit_rendezvous(vm, vcpu, state->rip); 1917 break; 1918 } 1919 1920 /* We are asked to give the cpu by scheduler. */ 1921 if (vcpu_should_yield(vm, vcpu)) { 1922 enable_gintr(); 1923 vm_exit_astpending(vm, vcpu, state->rip); 1924 break; 1925 } 1926 1927 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1928 1929 /* Activate the nested pmap on 'thiscpu' */ 1930 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1931 1932 /* 1933 * Check the pmap generation and the ASID generation to 1934 * ensure that the vcpu does not use stale TLB mappings. 1935 */ 1936 check_asid(svm_sc, vcpu, pmap, thiscpu); 1937 1938 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 1939 vcpustate->dirty = 0; 1940 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1941 1942 /* Launch Virtual Machine. */ 1943 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1944 svm_launch(vmcb_pa, gctx); 1945 1946 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 1947 1948 /* 1949 * Restore MSR_GSBASE to point to the pcpu data area. 1950 * 1951 * Note that accesses done via PCPU_GET/PCPU_SET will work 1952 * only after MSR_GSBASE is restored. 1953 * 1954 * Also note that we don't bother restoring MSR_KGSBASE 1955 * since it is not used in the kernel and will be restored 1956 * when the VMRUN ioctl returns to userspace. 1957 */ 1958 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 1959 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 1960 thiscpu, curcpu)); 1961 1962 /* 1963 * The host GDTR and IDTR is saved by VMRUN and restored 1964 * automatically on #VMEXIT. However, the host TSS needs 1965 * to be restored explicitly. 1966 */ 1967 restore_host_tss(); 1968 1969 /* #VMEXIT disables interrupts so re-enable them here. */ 1970 enable_gintr(); 1971 1972 /* Update 'nextrip' */ 1973 vcpustate->nextrip = state->rip; 1974 1975 /* Handle #VMEXIT and if required return to user space. */ 1976 handled = svm_vmexit(svm_sc, vcpu, vmexit); 1977 } while (handled); 1978 1979 svm_msr_guest_exit(svm_sc, vcpu); 1980 1981 return (0); 1982 } 1983 1984 static void 1985 svm_vmcleanup(void *arg) 1986 { 1987 struct svm_softc *sc = arg; 1988 1989 free(sc, M_SVM); 1990 } 1991 1992 static register_t * 1993 swctx_regptr(struct svm_regctx *regctx, int reg) 1994 { 1995 1996 switch (reg) { 1997 case VM_REG_GUEST_RBX: 1998 return (®ctx->sctx_rbx); 1999 case VM_REG_GUEST_RCX: 2000 return (®ctx->sctx_rcx); 2001 case VM_REG_GUEST_RDX: 2002 return (®ctx->sctx_rdx); 2003 case VM_REG_GUEST_RDI: 2004 return (®ctx->sctx_rdi); 2005 case VM_REG_GUEST_RSI: 2006 return (®ctx->sctx_rsi); 2007 case VM_REG_GUEST_RBP: 2008 return (®ctx->sctx_rbp); 2009 case VM_REG_GUEST_R8: 2010 return (®ctx->sctx_r8); 2011 case VM_REG_GUEST_R9: 2012 return (®ctx->sctx_r9); 2013 case VM_REG_GUEST_R10: 2014 return (®ctx->sctx_r10); 2015 case VM_REG_GUEST_R11: 2016 return (®ctx->sctx_r11); 2017 case VM_REG_GUEST_R12: 2018 return (®ctx->sctx_r12); 2019 case VM_REG_GUEST_R13: 2020 return (®ctx->sctx_r13); 2021 case VM_REG_GUEST_R14: 2022 return (®ctx->sctx_r14); 2023 case VM_REG_GUEST_R15: 2024 return (®ctx->sctx_r15); 2025 default: 2026 return (NULL); 2027 } 2028 } 2029 2030 static int 2031 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2032 { 2033 struct svm_softc *svm_sc; 2034 register_t *reg; 2035 2036 svm_sc = arg; 2037 2038 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2039 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2040 } 2041 2042 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2043 return (0); 2044 } 2045 2046 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2047 2048 if (reg != NULL) { 2049 *val = *reg; 2050 return (0); 2051 } 2052 2053 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2054 return (EINVAL); 2055 } 2056 2057 static int 2058 svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2059 { 2060 struct svm_softc *svm_sc; 2061 register_t *reg; 2062 2063 svm_sc = arg; 2064 2065 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2066 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2067 } 2068 2069 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2070 return (0); 2071 } 2072 2073 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2074 2075 if (reg != NULL) { 2076 *reg = val; 2077 return (0); 2078 } 2079 2080 /* 2081 * XXX deal with CR3 and invalidate TLB entries tagged with the 2082 * vcpu's ASID. This needs to be treated differently depending on 2083 * whether 'running' is true/false. 2084 */ 2085 2086 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2087 return (EINVAL); 2088 } 2089 2090 static int 2091 svm_setcap(void *arg, int vcpu, int type, int val) 2092 { 2093 struct svm_softc *sc; 2094 int error; 2095 2096 sc = arg; 2097 error = 0; 2098 switch (type) { 2099 case VM_CAP_HALT_EXIT: 2100 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2101 VMCB_INTCPT_HLT, val); 2102 break; 2103 case VM_CAP_PAUSE_EXIT: 2104 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2105 VMCB_INTCPT_PAUSE, val); 2106 break; 2107 case VM_CAP_UNRESTRICTED_GUEST: 2108 /* Unrestricted guest execution cannot be disabled in SVM */ 2109 if (val == 0) 2110 error = EINVAL; 2111 break; 2112 default: 2113 error = ENOENT; 2114 break; 2115 } 2116 return (error); 2117 } 2118 2119 static int 2120 svm_getcap(void *arg, int vcpu, int type, int *retval) 2121 { 2122 struct svm_softc *sc; 2123 int error; 2124 2125 sc = arg; 2126 error = 0; 2127 2128 switch (type) { 2129 case VM_CAP_HALT_EXIT: 2130 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2131 VMCB_INTCPT_HLT); 2132 break; 2133 case VM_CAP_PAUSE_EXIT: 2134 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2135 VMCB_INTCPT_PAUSE); 2136 break; 2137 case VM_CAP_UNRESTRICTED_GUEST: 2138 *retval = 1; /* unrestricted guest is always enabled */ 2139 break; 2140 default: 2141 error = ENOENT; 2142 break; 2143 } 2144 return (error); 2145 } 2146 2147 static struct vlapic * 2148 svm_vlapic_init(void *arg, int vcpuid) 2149 { 2150 struct svm_softc *svm_sc; 2151 struct vlapic *vlapic; 2152 2153 svm_sc = arg; 2154 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2155 vlapic->vm = svm_sc->vm; 2156 vlapic->vcpuid = vcpuid; 2157 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2158 2159 vlapic_init(vlapic); 2160 2161 return (vlapic); 2162 } 2163 2164 static void 2165 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2166 { 2167 2168 vlapic_cleanup(vlapic); 2169 free(vlapic, M_SVM_VLAPIC); 2170 } 2171 2172 struct vmm_ops vmm_ops_amd = { 2173 svm_init, 2174 svm_cleanup, 2175 svm_restore, 2176 svm_vminit, 2177 svm_vmrun, 2178 svm_vmcleanup, 2179 svm_getreg, 2180 svm_setreg, 2181 vmcb_getdesc, 2182 vmcb_setdesc, 2183 svm_getcap, 2184 svm_setcap, 2185 svm_npt_alloc, 2186 svm_npt_free, 2187 svm_vlapic_init, 2188 svm_vlapic_cleanup 2189 }; 2190