1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/smp.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/pcpu.h> 38 #include <sys/proc.h> 39 #include <sys/sysctl.h> 40 41 #include <vm/vm.h> 42 #include <vm/pmap.h> 43 44 #include <machine/cpufunc.h> 45 #include <machine/psl.h> 46 #include <machine/md_var.h> 47 #include <machine/reg.h> 48 #include <machine/specialreg.h> 49 #include <machine/smp.h> 50 #include <machine/vmm.h> 51 #include <machine/vmm_dev.h> 52 #include <machine/vmm_instruction_emul.h> 53 54 #include "vmm_lapic.h" 55 #include "vmm_stat.h" 56 #include "vmm_ktr.h" 57 #include "vmm_ioport.h" 58 #include "vatpic.h" 59 #include "vlapic.h" 60 #include "vlapic_priv.h" 61 62 #include "x86.h" 63 #include "vmcb.h" 64 #include "svm.h" 65 #include "svm_softc.h" 66 #include "svm_msr.h" 67 #include "npt.h" 68 69 SYSCTL_DECL(_hw_vmm); 70 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 71 72 /* 73 * SVM CPUID function 0x8000_000A, edx bit decoding. 74 */ 75 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 76 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 77 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 78 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 79 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 80 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 81 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 82 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 83 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 84 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 85 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 86 87 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 88 VMCB_CACHE_IOPM | \ 89 VMCB_CACHE_I | \ 90 VMCB_CACHE_TPR | \ 91 VMCB_CACHE_CR2 | \ 92 VMCB_CACHE_CR | \ 93 VMCB_CACHE_DR | \ 94 VMCB_CACHE_DT | \ 95 VMCB_CACHE_SEG | \ 96 VMCB_CACHE_NP) 97 98 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 99 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 100 0, NULL); 101 102 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 103 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 104 105 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 106 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 107 "SVM features advertised by CPUID.8000000AH:EDX"); 108 109 static int disable_npf_assist; 110 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 111 &disable_npf_assist, 0, NULL); 112 113 /* Maximum ASIDs supported by the processor */ 114 static uint32_t nasid; 115 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 116 "Number of ASIDs supported by this processor"); 117 118 /* Current ASID generation for each host cpu */ 119 static struct asid asid[MAXCPU]; 120 121 /* 122 * SVM host state saved area of size 4KB for each core. 123 */ 124 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 125 126 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 127 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 128 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 129 130 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 131 132 static __inline int 133 flush_by_asid(void) 134 { 135 136 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 137 } 138 139 static __inline int 140 decode_assist(void) 141 { 142 143 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 144 } 145 146 static void 147 svm_disable(void *arg __unused) 148 { 149 uint64_t efer; 150 151 efer = rdmsr(MSR_EFER); 152 efer &= ~EFER_SVM; 153 wrmsr(MSR_EFER, efer); 154 } 155 156 /* 157 * Disable SVM on all CPUs. 158 */ 159 static int 160 svm_cleanup(void) 161 { 162 163 smp_rendezvous(NULL, svm_disable, NULL, NULL); 164 return (0); 165 } 166 167 /* 168 * Verify that all the features required by bhyve are available. 169 */ 170 static int 171 check_svm_features(void) 172 { 173 u_int regs[4]; 174 175 /* CPUID Fn8000_000A is for SVM */ 176 do_cpuid(0x8000000A, regs); 177 svm_feature &= regs[3]; 178 179 /* 180 * The number of ASIDs can be configured to be less than what is 181 * supported by the hardware but not more. 182 */ 183 if (nasid == 0 || nasid > regs[1]) 184 nasid = regs[1]; 185 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 186 187 /* bhyve requires the Nested Paging feature */ 188 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 189 printf("SVM: Nested Paging feature not available.\n"); 190 return (ENXIO); 191 } 192 193 /* bhyve requires the NRIP Save feature */ 194 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 195 printf("SVM: NRIP Save feature not available.\n"); 196 return (ENXIO); 197 } 198 199 return (0); 200 } 201 202 static void 203 svm_enable(void *arg __unused) 204 { 205 uint64_t efer; 206 207 efer = rdmsr(MSR_EFER); 208 efer |= EFER_SVM; 209 wrmsr(MSR_EFER, efer); 210 211 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 212 } 213 214 /* 215 * Return 1 if SVM is enabled on this processor and 0 otherwise. 216 */ 217 static int 218 svm_available(void) 219 { 220 uint64_t msr; 221 222 /* Section 15.4 Enabling SVM from APM2. */ 223 if ((amd_feature2 & AMDID2_SVM) == 0) { 224 printf("SVM: not available.\n"); 225 return (0); 226 } 227 228 msr = rdmsr(MSR_VM_CR); 229 if ((msr & VM_CR_SVMDIS) != 0) { 230 printf("SVM: disabled by BIOS.\n"); 231 return (0); 232 } 233 234 return (1); 235 } 236 237 static int 238 svm_init(int ipinum) 239 { 240 int error, cpu; 241 242 if (!svm_available()) 243 return (ENXIO); 244 245 error = check_svm_features(); 246 if (error) 247 return (error); 248 249 vmcb_clean &= VMCB_CACHE_DEFAULT; 250 251 for (cpu = 0; cpu < MAXCPU; cpu++) { 252 /* 253 * Initialize the host ASIDs to their "highest" valid values. 254 * 255 * The next ASID allocation will rollover both 'gen' and 'num' 256 * and start off the sequence at {1,1}. 257 */ 258 asid[cpu].gen = ~0UL; 259 asid[cpu].num = nasid - 1; 260 } 261 262 svm_msr_init(); 263 svm_npt_init(ipinum); 264 265 /* Enable SVM on all CPUs */ 266 smp_rendezvous(NULL, svm_enable, NULL, NULL); 267 268 return (0); 269 } 270 271 static void 272 svm_restore(void) 273 { 274 275 svm_enable(NULL); 276 } 277 278 /* Pentium compatible MSRs */ 279 #define MSR_PENTIUM_START 0 280 #define MSR_PENTIUM_END 0x1FFF 281 /* AMD 6th generation and Intel compatible MSRs */ 282 #define MSR_AMD6TH_START 0xC0000000UL 283 #define MSR_AMD6TH_END 0xC0001FFFUL 284 /* AMD 7th and 8th generation compatible MSRs */ 285 #define MSR_AMD7TH_START 0xC0010000UL 286 #define MSR_AMD7TH_END 0xC0011FFFUL 287 288 /* 289 * Get the index and bit position for a MSR in permission bitmap. 290 * Two bits are used for each MSR: lower bit for read and higher bit for write. 291 */ 292 static int 293 svm_msr_index(uint64_t msr, int *index, int *bit) 294 { 295 uint32_t base, off; 296 297 *index = -1; 298 *bit = (msr % 4) * 2; 299 base = 0; 300 301 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 302 *index = msr / 4; 303 return (0); 304 } 305 306 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 307 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 308 off = (msr - MSR_AMD6TH_START); 309 *index = (off + base) / 4; 310 return (0); 311 } 312 313 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 314 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 315 off = (msr - MSR_AMD7TH_START); 316 *index = (off + base) / 4; 317 return (0); 318 } 319 320 return (EINVAL); 321 } 322 323 /* 324 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 325 */ 326 static void 327 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 328 { 329 int index, bit, error; 330 331 error = svm_msr_index(msr, &index, &bit); 332 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 333 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 334 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 335 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 336 "msr %#lx", __func__, bit, msr)); 337 338 if (read) 339 perm_bitmap[index] &= ~(1UL << bit); 340 341 if (write) 342 perm_bitmap[index] &= ~(2UL << bit); 343 } 344 345 static void 346 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 347 { 348 349 svm_msr_perm(perm_bitmap, msr, true, true); 350 } 351 352 static void 353 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 354 { 355 356 svm_msr_perm(perm_bitmap, msr, true, false); 357 } 358 359 static __inline int 360 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 361 { 362 struct vmcb_ctrl *ctrl; 363 364 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 365 366 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 367 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 368 } 369 370 static __inline void 371 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 372 int enabled) 373 { 374 struct vmcb_ctrl *ctrl; 375 uint32_t oldval; 376 377 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 378 379 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 380 oldval = ctrl->intercept[idx]; 381 382 if (enabled) 383 ctrl->intercept[idx] |= bitmask; 384 else 385 ctrl->intercept[idx] &= ~bitmask; 386 387 if (ctrl->intercept[idx] != oldval) { 388 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 389 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 390 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 391 } 392 } 393 394 static __inline void 395 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 396 { 397 398 svm_set_intercept(sc, vcpu, off, bitmask, 0); 399 } 400 401 static __inline void 402 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 403 { 404 405 svm_set_intercept(sc, vcpu, off, bitmask, 1); 406 } 407 408 static void 409 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 410 uint64_t msrpm_base_pa, uint64_t np_pml4) 411 { 412 struct vmcb_ctrl *ctrl; 413 struct vmcb_state *state; 414 uint32_t mask; 415 int n; 416 417 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 418 state = svm_get_vmcb_state(sc, vcpu); 419 420 ctrl->iopm_base_pa = iopm_base_pa; 421 ctrl->msrpm_base_pa = msrpm_base_pa; 422 423 /* Enable nested paging */ 424 ctrl->np_enable = 1; 425 ctrl->n_cr3 = np_pml4; 426 427 /* 428 * Intercept accesses to the control registers that are not shadowed 429 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 430 */ 431 for (n = 0; n < 16; n++) { 432 mask = (BIT(n) << 16) | BIT(n); 433 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 434 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 435 else 436 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 437 } 438 439 440 /* 441 * Intercept everything when tracing guest exceptions otherwise 442 * just intercept machine check exception. 443 */ 444 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 445 for (n = 0; n < 32; n++) { 446 /* 447 * Skip unimplemented vectors in the exception bitmap. 448 */ 449 if (n == 2 || n == 9) { 450 continue; 451 } 452 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 453 } 454 } else { 455 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 456 } 457 458 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 459 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 460 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 461 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 463 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 464 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 465 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 466 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 467 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 468 VMCB_INTCPT_FERR_FREEZE); 469 470 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 471 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 472 473 /* 474 * From section "Canonicalization and Consistency Checks" in APMv2 475 * the VMRUN intercept bit must be set to pass the consistency check. 476 */ 477 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 478 479 /* 480 * The ASID will be set to a non-zero value just before VMRUN. 481 */ 482 ctrl->asid = 0; 483 484 /* 485 * Section 15.21.1, Interrupt Masking in EFLAGS 486 * Section 15.21.2, Virtualizing APIC.TPR 487 * 488 * This must be set for %rflag and %cr8 isolation of guest and host. 489 */ 490 ctrl->v_intr_masking = 1; 491 492 /* Enable Last Branch Record aka LBR for debugging */ 493 ctrl->lbr_virt_en = 1; 494 state->dbgctl = BIT(0); 495 496 /* EFER_SVM must always be set when the guest is executing */ 497 state->efer = EFER_SVM; 498 499 /* Set up the PAT to power-on state */ 500 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 501 PAT_VALUE(1, PAT_WRITE_THROUGH) | 502 PAT_VALUE(2, PAT_UNCACHED) | 503 PAT_VALUE(3, PAT_UNCACHEABLE) | 504 PAT_VALUE(4, PAT_WRITE_BACK) | 505 PAT_VALUE(5, PAT_WRITE_THROUGH) | 506 PAT_VALUE(6, PAT_UNCACHED) | 507 PAT_VALUE(7, PAT_UNCACHEABLE); 508 509 /* Set up DR6/7 to power-on state */ 510 state->dr6 = DBREG_DR6_RESERVED1; 511 state->dr7 = DBREG_DR7_RESERVED1; 512 } 513 514 /* 515 * Initialize a virtual machine. 516 */ 517 static void * 518 svm_vminit(struct vm *vm, pmap_t pmap) 519 { 520 struct svm_softc *svm_sc; 521 struct svm_vcpu *vcpu; 522 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 523 int i; 524 uint16_t maxcpus; 525 526 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 527 if (((uintptr_t)svm_sc & PAGE_MASK) != 0) 528 panic("malloc of svm_softc not aligned on page boundary"); 529 530 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 531 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 532 if (svm_sc->msr_bitmap == NULL) 533 panic("contigmalloc of SVM MSR bitmap failed"); 534 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 535 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 536 if (svm_sc->iopm_bitmap == NULL) 537 panic("contigmalloc of SVM IO bitmap failed"); 538 539 svm_sc->vm = vm; 540 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 541 542 /* 543 * Intercept read and write accesses to all MSRs. 544 */ 545 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 546 547 /* 548 * Access to the following MSRs is redirected to the VMCB when the 549 * guest is executing. Therefore it is safe to allow the guest to 550 * read/write these MSRs directly without hypervisor involvement. 551 */ 552 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 553 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 554 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 555 556 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 557 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 558 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 559 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 560 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 561 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 562 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 563 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 564 565 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 566 567 /* 568 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 569 */ 570 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 571 572 /* Intercept access to all I/O ports. */ 573 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 574 575 iopm_pa = vtophys(svm_sc->iopm_bitmap); 576 msrpm_pa = vtophys(svm_sc->msr_bitmap); 577 pml4_pa = svm_sc->nptp; 578 maxcpus = vm_get_maxcpus(svm_sc->vm); 579 for (i = 0; i < maxcpus; i++) { 580 vcpu = svm_get_vcpu(svm_sc, i); 581 vcpu->nextrip = ~0; 582 vcpu->lastcpu = NOCPU; 583 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 584 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 585 svm_msr_guest_init(svm_sc, i); 586 } 587 return (svm_sc); 588 } 589 590 /* 591 * Collateral for a generic SVM VM-exit. 592 */ 593 static void 594 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 595 { 596 597 vme->exitcode = VM_EXITCODE_SVM; 598 vme->u.svm.exitcode = code; 599 vme->u.svm.exitinfo1 = info1; 600 vme->u.svm.exitinfo2 = info2; 601 } 602 603 static int 604 svm_cpl(struct vmcb_state *state) 605 { 606 607 /* 608 * From APMv2: 609 * "Retrieve the CPL from the CPL field in the VMCB, not 610 * from any segment DPL" 611 */ 612 return (state->cpl); 613 } 614 615 static enum vm_cpu_mode 616 svm_vcpu_mode(struct vmcb *vmcb) 617 { 618 struct vmcb_segment seg; 619 struct vmcb_state *state; 620 int error; 621 622 state = &vmcb->state; 623 624 if (state->efer & EFER_LMA) { 625 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 626 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 627 error)); 628 629 /* 630 * Section 4.8.1 for APM2, check if Code Segment has 631 * Long attribute set in descriptor. 632 */ 633 if (seg.attrib & VMCB_CS_ATTRIB_L) 634 return (CPU_MODE_64BIT); 635 else 636 return (CPU_MODE_COMPATIBILITY); 637 } else if (state->cr0 & CR0_PE) { 638 return (CPU_MODE_PROTECTED); 639 } else { 640 return (CPU_MODE_REAL); 641 } 642 } 643 644 static enum vm_paging_mode 645 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 646 { 647 648 if ((cr0 & CR0_PG) == 0) 649 return (PAGING_MODE_FLAT); 650 if ((cr4 & CR4_PAE) == 0) 651 return (PAGING_MODE_32); 652 if (efer & EFER_LME) 653 return (PAGING_MODE_64); 654 else 655 return (PAGING_MODE_PAE); 656 } 657 658 /* 659 * ins/outs utility routines 660 */ 661 static uint64_t 662 svm_inout_str_index(struct svm_regctx *regs, int in) 663 { 664 uint64_t val; 665 666 val = in ? regs->sctx_rdi : regs->sctx_rsi; 667 668 return (val); 669 } 670 671 static uint64_t 672 svm_inout_str_count(struct svm_regctx *regs, int rep) 673 { 674 uint64_t val; 675 676 val = rep ? regs->sctx_rcx : 1; 677 678 return (val); 679 } 680 681 static void 682 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 683 int in, struct vm_inout_str *vis) 684 { 685 int error, s; 686 687 if (in) { 688 vis->seg_name = VM_REG_GUEST_ES; 689 } else { 690 /* The segment field has standard encoding */ 691 s = (info1 >> 10) & 0x7; 692 vis->seg_name = vm_segment_name(s); 693 } 694 695 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 696 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 697 } 698 699 static int 700 svm_inout_str_addrsize(uint64_t info1) 701 { 702 uint32_t size; 703 704 size = (info1 >> 7) & 0x7; 705 switch (size) { 706 case 1: 707 return (2); /* 16 bit */ 708 case 2: 709 return (4); /* 32 bit */ 710 case 4: 711 return (8); /* 64 bit */ 712 default: 713 panic("%s: invalid size encoding %d", __func__, size); 714 } 715 } 716 717 static void 718 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 719 { 720 struct vmcb_state *state; 721 722 state = &vmcb->state; 723 paging->cr3 = state->cr3; 724 paging->cpl = svm_cpl(state); 725 paging->cpu_mode = svm_vcpu_mode(vmcb); 726 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 727 state->efer); 728 } 729 730 #define UNHANDLED 0 731 732 /* 733 * Handle guest I/O intercept. 734 */ 735 static int 736 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 737 { 738 struct vmcb_ctrl *ctrl; 739 struct vmcb_state *state; 740 struct svm_regctx *regs; 741 struct vm_inout_str *vis; 742 uint64_t info1; 743 int inout_string; 744 745 state = svm_get_vmcb_state(svm_sc, vcpu); 746 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 747 regs = svm_get_guest_regctx(svm_sc, vcpu); 748 749 info1 = ctrl->exitinfo1; 750 inout_string = info1 & BIT(2) ? 1 : 0; 751 752 /* 753 * The effective segment number in EXITINFO1[12:10] is populated 754 * only if the processor has the DecodeAssist capability. 755 * 756 * XXX this is not specified explicitly in APMv2 but can be verified 757 * empirically. 758 */ 759 if (inout_string && !decode_assist()) 760 return (UNHANDLED); 761 762 vmexit->exitcode = VM_EXITCODE_INOUT; 763 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 764 vmexit->u.inout.string = inout_string; 765 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 766 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 767 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 768 vmexit->u.inout.eax = (uint32_t)(state->rax); 769 770 if (inout_string) { 771 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 772 vis = &vmexit->u.inout_str; 773 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 774 vis->rflags = state->rflags; 775 vis->cr0 = state->cr0; 776 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 777 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 778 vis->addrsize = svm_inout_str_addrsize(info1); 779 svm_inout_str_seginfo(svm_sc, vcpu, info1, 780 vmexit->u.inout.in, vis); 781 } 782 783 return (UNHANDLED); 784 } 785 786 static int 787 npf_fault_type(uint64_t exitinfo1) 788 { 789 790 if (exitinfo1 & VMCB_NPF_INFO1_W) 791 return (VM_PROT_WRITE); 792 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 793 return (VM_PROT_EXECUTE); 794 else 795 return (VM_PROT_READ); 796 } 797 798 static bool 799 svm_npf_emul_fault(uint64_t exitinfo1) 800 { 801 802 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 803 return (false); 804 } 805 806 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 807 return (false); 808 } 809 810 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 811 return (false); 812 } 813 814 return (true); 815 } 816 817 static void 818 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 819 { 820 struct vm_guest_paging *paging; 821 struct vmcb_segment seg; 822 struct vmcb_ctrl *ctrl; 823 char *inst_bytes; 824 int error, inst_len; 825 826 ctrl = &vmcb->ctrl; 827 paging = &vmexit->u.inst_emul.paging; 828 829 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 830 vmexit->u.inst_emul.gpa = gpa; 831 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 832 svm_paging_info(vmcb, paging); 833 834 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 835 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 836 837 switch(paging->cpu_mode) { 838 case CPU_MODE_REAL: 839 vmexit->u.inst_emul.cs_base = seg.base; 840 vmexit->u.inst_emul.cs_d = 0; 841 break; 842 case CPU_MODE_PROTECTED: 843 case CPU_MODE_COMPATIBILITY: 844 vmexit->u.inst_emul.cs_base = seg.base; 845 846 /* 847 * Section 4.8.1 of APM2, Default Operand Size or D bit. 848 */ 849 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 850 1 : 0; 851 break; 852 default: 853 vmexit->u.inst_emul.cs_base = 0; 854 vmexit->u.inst_emul.cs_d = 0; 855 break; 856 } 857 858 /* 859 * Copy the instruction bytes into 'vie' if available. 860 */ 861 if (decode_assist() && !disable_npf_assist) { 862 inst_len = ctrl->inst_len; 863 inst_bytes = ctrl->inst_bytes; 864 } else { 865 inst_len = 0; 866 inst_bytes = NULL; 867 } 868 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 869 } 870 871 #ifdef KTR 872 static const char * 873 intrtype_to_str(int intr_type) 874 { 875 switch (intr_type) { 876 case VMCB_EVENTINJ_TYPE_INTR: 877 return ("hwintr"); 878 case VMCB_EVENTINJ_TYPE_NMI: 879 return ("nmi"); 880 case VMCB_EVENTINJ_TYPE_INTn: 881 return ("swintr"); 882 case VMCB_EVENTINJ_TYPE_EXCEPTION: 883 return ("exception"); 884 default: 885 panic("%s: unknown intr_type %d", __func__, intr_type); 886 } 887 } 888 #endif 889 890 /* 891 * Inject an event to vcpu as described in section 15.20, "Event injection". 892 */ 893 static void 894 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 895 uint32_t error, bool ec_valid) 896 { 897 struct vmcb_ctrl *ctrl; 898 899 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 900 901 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 902 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 903 904 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 905 __func__, vector)); 906 907 switch (intr_type) { 908 case VMCB_EVENTINJ_TYPE_INTR: 909 case VMCB_EVENTINJ_TYPE_NMI: 910 case VMCB_EVENTINJ_TYPE_INTn: 911 break; 912 case VMCB_EVENTINJ_TYPE_EXCEPTION: 913 if (vector >= 0 && vector <= 31 && vector != 2) 914 break; 915 /* FALLTHROUGH */ 916 default: 917 panic("%s: invalid intr_type/vector: %d/%d", __func__, 918 intr_type, vector); 919 } 920 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 921 if (ec_valid) { 922 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 923 ctrl->eventinj |= (uint64_t)error << 32; 924 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 925 intrtype_to_str(intr_type), vector, error); 926 } else { 927 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 928 intrtype_to_str(intr_type), vector); 929 } 930 } 931 932 static void 933 svm_update_virqinfo(struct svm_softc *sc, int vcpu) 934 { 935 struct vm *vm; 936 struct vlapic *vlapic; 937 struct vmcb_ctrl *ctrl; 938 939 vm = sc->vm; 940 vlapic = vm_lapic(vm, vcpu); 941 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 942 943 /* Update %cr8 in the emulated vlapic */ 944 vlapic_set_cr8(vlapic, ctrl->v_tpr); 945 946 /* Virtual interrupt injection is not used. */ 947 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 948 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 949 } 950 951 static void 952 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 953 { 954 struct vmcb_ctrl *ctrl; 955 uint64_t intinfo; 956 957 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 958 intinfo = ctrl->exitintinfo; 959 if (!VMCB_EXITINTINFO_VALID(intinfo)) 960 return; 961 962 /* 963 * From APMv2, Section "Intercepts during IDT interrupt delivery" 964 * 965 * If a #VMEXIT happened during event delivery then record the event 966 * that was being delivered. 967 */ 968 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 969 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 970 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 971 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 972 } 973 974 #ifdef INVARIANTS 975 static __inline int 976 vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 977 { 978 979 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 980 VMCB_INTCPT_VINTR)); 981 } 982 #endif 983 984 static __inline void 985 enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 986 { 987 struct vmcb_ctrl *ctrl; 988 989 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 990 991 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 992 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 993 KASSERT(vintr_intercept_enabled(sc, vcpu), 994 ("%s: vintr intercept should be enabled", __func__)); 995 return; 996 } 997 998 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 999 ctrl->v_irq = 1; 1000 ctrl->v_ign_tpr = 1; 1001 ctrl->v_intr_vector = 0; 1002 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1003 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1004 } 1005 1006 static __inline void 1007 disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1008 { 1009 struct vmcb_ctrl *ctrl; 1010 1011 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1012 1013 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1014 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1015 ("%s: vintr intercept should be disabled", __func__)); 1016 return; 1017 } 1018 1019 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1020 ctrl->v_irq = 0; 1021 ctrl->v_intr_vector = 0; 1022 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1023 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1024 } 1025 1026 static int 1027 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1028 { 1029 struct vmcb_ctrl *ctrl; 1030 int oldval, newval; 1031 1032 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1033 oldval = ctrl->intr_shadow; 1034 newval = val ? 1 : 0; 1035 if (newval != oldval) { 1036 ctrl->intr_shadow = newval; 1037 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1038 } 1039 return (0); 1040 } 1041 1042 static int 1043 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1044 { 1045 struct vmcb_ctrl *ctrl; 1046 1047 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1048 *val = ctrl->intr_shadow; 1049 return (0); 1050 } 1051 1052 /* 1053 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1054 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1055 * to track when the vcpu is done handling the NMI. 1056 */ 1057 static int 1058 nmi_blocked(struct svm_softc *sc, int vcpu) 1059 { 1060 int blocked; 1061 1062 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1063 VMCB_INTCPT_IRET); 1064 return (blocked); 1065 } 1066 1067 static void 1068 enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1069 { 1070 1071 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1072 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1073 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1074 } 1075 1076 static void 1077 clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1078 { 1079 int error; 1080 1081 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1082 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1083 /* 1084 * When the IRET intercept is cleared the vcpu will attempt to execute 1085 * the "iret" when it runs next. However, it is possible to inject 1086 * another NMI into the vcpu before the "iret" has actually executed. 1087 * 1088 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1089 * it will trap back into the hypervisor. If an NMI is pending for 1090 * the vcpu it will be injected into the guest. 1091 * 1092 * XXX this needs to be fixed 1093 */ 1094 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1095 1096 /* 1097 * Set 'intr_shadow' to prevent an NMI from being injected on the 1098 * immediate VMRUN. 1099 */ 1100 error = svm_modify_intr_shadow(sc, vcpu, 1); 1101 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1102 } 1103 1104 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1105 1106 static int 1107 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) 1108 { 1109 struct vm_exit *vme; 1110 struct vmcb_state *state; 1111 uint64_t changed, lma, oldval; 1112 int error; 1113 1114 state = svm_get_vmcb_state(sc, vcpu); 1115 1116 oldval = state->efer; 1117 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1118 1119 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1120 changed = oldval ^ newval; 1121 1122 if (newval & EFER_MBZ_BITS) 1123 goto gpf; 1124 1125 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1126 if (changed & EFER_LME) { 1127 if (state->cr0 & CR0_PG) 1128 goto gpf; 1129 } 1130 1131 /* EFER.LMA = EFER.LME & CR0.PG */ 1132 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1133 lma = EFER_LMA; 1134 else 1135 lma = 0; 1136 1137 if ((newval & EFER_LMA) != lma) 1138 goto gpf; 1139 1140 if (newval & EFER_NXE) { 1141 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) 1142 goto gpf; 1143 } 1144 1145 /* 1146 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1147 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1148 */ 1149 if (newval & EFER_LMSLE) { 1150 vme = vm_exitinfo(sc->vm, vcpu); 1151 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1152 *retu = true; 1153 return (0); 1154 } 1155 1156 if (newval & EFER_FFXSR) { 1157 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) 1158 goto gpf; 1159 } 1160 1161 if (newval & EFER_TCE) { 1162 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) 1163 goto gpf; 1164 } 1165 1166 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1167 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1168 return (0); 1169 gpf: 1170 vm_inject_gp(sc->vm, vcpu); 1171 return (0); 1172 } 1173 1174 static int 1175 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1176 bool *retu) 1177 { 1178 int error; 1179 1180 if (lapic_msr(num)) 1181 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1182 else if (num == MSR_EFER) 1183 error = svm_write_efer(sc, vcpu, val, retu); 1184 else 1185 error = svm_wrmsr(sc, vcpu, num, val, retu); 1186 1187 return (error); 1188 } 1189 1190 static int 1191 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1192 { 1193 struct vmcb_state *state; 1194 struct svm_regctx *ctx; 1195 uint64_t result; 1196 int error; 1197 1198 if (lapic_msr(num)) 1199 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1200 else 1201 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1202 1203 if (error == 0) { 1204 state = svm_get_vmcb_state(sc, vcpu); 1205 ctx = svm_get_guest_regctx(sc, vcpu); 1206 state->rax = result & 0xffffffff; 1207 ctx->sctx_rdx = result >> 32; 1208 } 1209 1210 return (error); 1211 } 1212 1213 #ifdef KTR 1214 static const char * 1215 exit_reason_to_str(uint64_t reason) 1216 { 1217 static char reasonbuf[32]; 1218 1219 switch (reason) { 1220 case VMCB_EXIT_INVALID: 1221 return ("invalvmcb"); 1222 case VMCB_EXIT_SHUTDOWN: 1223 return ("shutdown"); 1224 case VMCB_EXIT_NPF: 1225 return ("nptfault"); 1226 case VMCB_EXIT_PAUSE: 1227 return ("pause"); 1228 case VMCB_EXIT_HLT: 1229 return ("hlt"); 1230 case VMCB_EXIT_CPUID: 1231 return ("cpuid"); 1232 case VMCB_EXIT_IO: 1233 return ("inout"); 1234 case VMCB_EXIT_MC: 1235 return ("mchk"); 1236 case VMCB_EXIT_INTR: 1237 return ("extintr"); 1238 case VMCB_EXIT_NMI: 1239 return ("nmi"); 1240 case VMCB_EXIT_VINTR: 1241 return ("vintr"); 1242 case VMCB_EXIT_MSR: 1243 return ("msr"); 1244 case VMCB_EXIT_IRET: 1245 return ("iret"); 1246 case VMCB_EXIT_MONITOR: 1247 return ("monitor"); 1248 case VMCB_EXIT_MWAIT: 1249 return ("mwait"); 1250 default: 1251 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1252 return (reasonbuf); 1253 } 1254 } 1255 #endif /* KTR */ 1256 1257 /* 1258 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1259 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1260 * and exceptions caused by INT3, INTO and BOUND instructions. 1261 * 1262 * Return 1 if the nRIP is valid and 0 otherwise. 1263 */ 1264 static int 1265 nrip_valid(uint64_t exitcode) 1266 { 1267 switch (exitcode) { 1268 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1269 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1270 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1271 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1272 case 0x43: /* INT3 */ 1273 case 0x44: /* INTO */ 1274 case 0x45: /* BOUND */ 1275 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1276 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1277 return (1); 1278 default: 1279 return (0); 1280 } 1281 } 1282 1283 static int 1284 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1285 { 1286 struct vmcb *vmcb; 1287 struct vmcb_state *state; 1288 struct vmcb_ctrl *ctrl; 1289 struct svm_regctx *ctx; 1290 uint64_t code, info1, info2, val; 1291 uint32_t eax, ecx, edx; 1292 int error, errcode_valid, handled, idtvec, reflect; 1293 bool retu; 1294 1295 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1296 vmcb = svm_get_vmcb(svm_sc, vcpu); 1297 state = &vmcb->state; 1298 ctrl = &vmcb->ctrl; 1299 1300 handled = 0; 1301 code = ctrl->exitcode; 1302 info1 = ctrl->exitinfo1; 1303 info2 = ctrl->exitinfo2; 1304 1305 vmexit->exitcode = VM_EXITCODE_BOGUS; 1306 vmexit->rip = state->rip; 1307 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1308 1309 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1310 1311 /* 1312 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1313 * in an inconsistent state and can trigger assertions that would 1314 * never happen otherwise. 1315 */ 1316 if (code == VMCB_EXIT_INVALID) { 1317 vm_exit_svm(vmexit, code, info1, info2); 1318 return (0); 1319 } 1320 1321 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1322 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1323 1324 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1325 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1326 vmexit->inst_length, code, info1, info2)); 1327 1328 svm_update_virqinfo(svm_sc, vcpu); 1329 svm_save_intinfo(svm_sc, vcpu); 1330 1331 switch (code) { 1332 case VMCB_EXIT_IRET: 1333 /* 1334 * Restart execution at "iret" but with the intercept cleared. 1335 */ 1336 vmexit->inst_length = 0; 1337 clear_nmi_blocking(svm_sc, vcpu); 1338 handled = 1; 1339 break; 1340 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1341 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1342 handled = 1; 1343 break; 1344 case VMCB_EXIT_INTR: /* external interrupt */ 1345 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1346 handled = 1; 1347 break; 1348 case VMCB_EXIT_NMI: /* external NMI */ 1349 handled = 1; 1350 break; 1351 case 0x40 ... 0x5F: 1352 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1353 reflect = 1; 1354 idtvec = code - 0x40; 1355 switch (idtvec) { 1356 case IDT_MC: 1357 /* 1358 * Call the machine check handler by hand. Also don't 1359 * reflect the machine check back into the guest. 1360 */ 1361 reflect = 0; 1362 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1363 __asm __volatile("int $18"); 1364 break; 1365 case IDT_PF: 1366 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1367 info2); 1368 KASSERT(error == 0, ("%s: error %d updating cr2", 1369 __func__, error)); 1370 /* fallthru */ 1371 case IDT_NP: 1372 case IDT_SS: 1373 case IDT_GP: 1374 case IDT_AC: 1375 case IDT_TS: 1376 errcode_valid = 1; 1377 break; 1378 1379 case IDT_DF: 1380 errcode_valid = 1; 1381 info1 = 0; 1382 break; 1383 1384 case IDT_BP: 1385 case IDT_OF: 1386 case IDT_BR: 1387 /* 1388 * The 'nrip' field is populated for INT3, INTO and 1389 * BOUND exceptions and this also implies that 1390 * 'inst_length' is non-zero. 1391 * 1392 * Reset 'inst_length' to zero so the guest %rip at 1393 * event injection is identical to what it was when 1394 * the exception originally happened. 1395 */ 1396 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1397 "to zero before injecting exception %d", 1398 vmexit->inst_length, idtvec); 1399 vmexit->inst_length = 0; 1400 /* fallthru */ 1401 default: 1402 errcode_valid = 0; 1403 info1 = 0; 1404 break; 1405 } 1406 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1407 "when reflecting exception %d into guest", 1408 vmexit->inst_length, idtvec)); 1409 1410 if (reflect) { 1411 /* Reflect the exception back into the guest */ 1412 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1413 "%d/%#x into the guest", idtvec, (int)info1); 1414 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1415 errcode_valid, info1, 0); 1416 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1417 __func__, error)); 1418 } 1419 handled = 1; 1420 break; 1421 case VMCB_EXIT_MSR: /* MSR access. */ 1422 eax = state->rax; 1423 ecx = ctx->sctx_rcx; 1424 edx = ctx->sctx_rdx; 1425 retu = false; 1426 1427 if (info1) { 1428 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1429 val = (uint64_t)edx << 32 | eax; 1430 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1431 ecx, val); 1432 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1433 vmexit->exitcode = VM_EXITCODE_WRMSR; 1434 vmexit->u.msr.code = ecx; 1435 vmexit->u.msr.wval = val; 1436 } else if (!retu) { 1437 handled = 1; 1438 } else { 1439 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1440 ("emulate_wrmsr retu with bogus exitcode")); 1441 } 1442 } else { 1443 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1444 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1445 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1446 vmexit->exitcode = VM_EXITCODE_RDMSR; 1447 vmexit->u.msr.code = ecx; 1448 } else if (!retu) { 1449 handled = 1; 1450 } else { 1451 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1452 ("emulate_rdmsr retu with bogus exitcode")); 1453 } 1454 } 1455 break; 1456 case VMCB_EXIT_IO: 1457 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1458 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1459 break; 1460 case VMCB_EXIT_CPUID: 1461 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1462 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1463 (uint32_t *)&state->rax, 1464 (uint32_t *)&ctx->sctx_rbx, 1465 (uint32_t *)&ctx->sctx_rcx, 1466 (uint32_t *)&ctx->sctx_rdx); 1467 break; 1468 case VMCB_EXIT_HLT: 1469 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1470 vmexit->exitcode = VM_EXITCODE_HLT; 1471 vmexit->u.hlt.rflags = state->rflags; 1472 break; 1473 case VMCB_EXIT_PAUSE: 1474 vmexit->exitcode = VM_EXITCODE_PAUSE; 1475 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1476 break; 1477 case VMCB_EXIT_NPF: 1478 /* EXITINFO2 contains the faulting guest physical address */ 1479 if (info1 & VMCB_NPF_INFO1_RSV) { 1480 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1481 "reserved bits set: info1(%#lx) info2(%#lx)", 1482 info1, info2); 1483 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { 1484 vmexit->exitcode = VM_EXITCODE_PAGING; 1485 vmexit->u.paging.gpa = info2; 1486 vmexit->u.paging.fault_type = npf_fault_type(info1); 1487 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1488 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1489 "on gpa %#lx/%#lx at rip %#lx", 1490 info2, info1, state->rip); 1491 } else if (svm_npf_emul_fault(info1)) { 1492 svm_handle_inst_emul(vmcb, info2, vmexit); 1493 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1494 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1495 "for gpa %#lx/%#lx at rip %#lx", 1496 info2, info1, state->rip); 1497 } 1498 break; 1499 case VMCB_EXIT_MONITOR: 1500 vmexit->exitcode = VM_EXITCODE_MONITOR; 1501 break; 1502 case VMCB_EXIT_MWAIT: 1503 vmexit->exitcode = VM_EXITCODE_MWAIT; 1504 break; 1505 default: 1506 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1507 break; 1508 } 1509 1510 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1511 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1512 vmexit->rip, vmexit->inst_length); 1513 1514 if (handled) { 1515 vmexit->rip += vmexit->inst_length; 1516 vmexit->inst_length = 0; 1517 state->rip = vmexit->rip; 1518 } else { 1519 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1520 /* 1521 * If this VM exit was not claimed by anybody then 1522 * treat it as a generic SVM exit. 1523 */ 1524 vm_exit_svm(vmexit, code, info1, info2); 1525 } else { 1526 /* 1527 * The exitcode and collateral have been populated. 1528 * The VM exit will be processed further in userland. 1529 */ 1530 } 1531 } 1532 return (handled); 1533 } 1534 1535 static void 1536 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1537 { 1538 uint64_t intinfo; 1539 1540 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1541 return; 1542 1543 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1544 "valid: %#lx", __func__, intinfo)); 1545 1546 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1547 VMCB_EXITINTINFO_VECTOR(intinfo), 1548 VMCB_EXITINTINFO_EC(intinfo), 1549 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1550 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1551 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1552 } 1553 1554 /* 1555 * Inject event to virtual cpu. 1556 */ 1557 static void 1558 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1559 { 1560 struct vmcb_ctrl *ctrl; 1561 struct vmcb_state *state; 1562 struct svm_vcpu *vcpustate; 1563 uint8_t v_tpr; 1564 int vector, need_intr_window; 1565 int extint_pending; 1566 1567 state = svm_get_vmcb_state(sc, vcpu); 1568 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1569 vcpustate = svm_get_vcpu(sc, vcpu); 1570 1571 need_intr_window = 0; 1572 1573 if (vcpustate->nextrip != state->rip) { 1574 ctrl->intr_shadow = 0; 1575 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1576 "cleared due to rip change: %#lx/%#lx", 1577 vcpustate->nextrip, state->rip); 1578 } 1579 1580 /* 1581 * Inject pending events or exceptions for this vcpu. 1582 * 1583 * An event might be pending because the previous #VMEXIT happened 1584 * during event delivery (i.e. ctrl->exitintinfo). 1585 * 1586 * An event might also be pending because an exception was injected 1587 * by the hypervisor (e.g. #PF during instruction emulation). 1588 */ 1589 svm_inj_intinfo(sc, vcpu); 1590 1591 /* NMI event has priority over interrupts. */ 1592 if (vm_nmi_pending(sc->vm, vcpu)) { 1593 if (nmi_blocked(sc, vcpu)) { 1594 /* 1595 * Can't inject another NMI if the guest has not 1596 * yet executed an "iret" after the last NMI. 1597 */ 1598 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1599 "to NMI-blocking"); 1600 } else if (ctrl->intr_shadow) { 1601 /* 1602 * Can't inject an NMI if the vcpu is in an intr_shadow. 1603 */ 1604 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1605 "interrupt shadow"); 1606 need_intr_window = 1; 1607 goto done; 1608 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1609 /* 1610 * If there is already an exception/interrupt pending 1611 * then defer the NMI until after that. 1612 */ 1613 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1614 "eventinj %#lx", ctrl->eventinj); 1615 1616 /* 1617 * Use self-IPI to trigger a VM-exit as soon as 1618 * possible after the event injection is completed. 1619 * 1620 * This works only if the external interrupt exiting 1621 * is at a lower priority than the event injection. 1622 * 1623 * Although not explicitly specified in APMv2 the 1624 * relative priorities were verified empirically. 1625 */ 1626 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1627 } else { 1628 vm_nmi_clear(sc->vm, vcpu); 1629 1630 /* Inject NMI, vector number is not used */ 1631 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1632 IDT_NMI, 0, false); 1633 1634 /* virtual NMI blocking is now in effect */ 1635 enable_nmi_blocking(sc, vcpu); 1636 1637 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1638 } 1639 } 1640 1641 extint_pending = vm_extint_pending(sc->vm, vcpu); 1642 if (!extint_pending) { 1643 if (!vlapic_pending_intr(vlapic, &vector)) 1644 goto done; 1645 KASSERT(vector >= 16 && vector <= 255, 1646 ("invalid vector %d from local APIC", vector)); 1647 } else { 1648 /* Ask the legacy pic for a vector to inject */ 1649 vatpic_pending_intr(sc->vm, &vector); 1650 KASSERT(vector >= 0 && vector <= 255, 1651 ("invalid vector %d from INTR", vector)); 1652 } 1653 1654 /* 1655 * If the guest has disabled interrupts or is in an interrupt shadow 1656 * then we cannot inject the pending interrupt. 1657 */ 1658 if ((state->rflags & PSL_I) == 0) { 1659 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1660 "rflags %#lx", vector, state->rflags); 1661 need_intr_window = 1; 1662 goto done; 1663 } 1664 1665 if (ctrl->intr_shadow) { 1666 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1667 "interrupt shadow", vector); 1668 need_intr_window = 1; 1669 goto done; 1670 } 1671 1672 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1673 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1674 "eventinj %#lx", vector, ctrl->eventinj); 1675 need_intr_window = 1; 1676 goto done; 1677 } 1678 1679 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1680 1681 if (!extint_pending) { 1682 vlapic_intr_accepted(vlapic, vector); 1683 } else { 1684 vm_extint_clear(sc->vm, vcpu); 1685 vatpic_intr_accepted(sc->vm, vector); 1686 } 1687 1688 /* 1689 * Force a VM-exit as soon as the vcpu is ready to accept another 1690 * interrupt. This is done because the PIC might have another vector 1691 * that it wants to inject. Also, if the APIC has a pending interrupt 1692 * that was preempted by the ExtInt then it allows us to inject the 1693 * APIC vector as soon as possible. 1694 */ 1695 need_intr_window = 1; 1696 done: 1697 /* 1698 * The guest can modify the TPR by writing to %CR8. In guest mode 1699 * the processor reflects this write to V_TPR without hypervisor 1700 * intervention. 1701 * 1702 * The guest can also modify the TPR by writing to it via the memory 1703 * mapped APIC page. In this case, the write will be emulated by the 1704 * hypervisor. For this reason V_TPR must be updated before every 1705 * VMRUN. 1706 */ 1707 v_tpr = vlapic_get_cr8(vlapic); 1708 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1709 if (ctrl->v_tpr != v_tpr) { 1710 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1711 ctrl->v_tpr, v_tpr); 1712 ctrl->v_tpr = v_tpr; 1713 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1714 } 1715 1716 if (need_intr_window) { 1717 /* 1718 * We use V_IRQ in conjunction with the VINTR intercept to 1719 * trap into the hypervisor as soon as a virtual interrupt 1720 * can be delivered. 1721 * 1722 * Since injected events are not subject to intercept checks 1723 * we need to ensure that the V_IRQ is not actually going to 1724 * be delivered on VM entry. The KASSERT below enforces this. 1725 */ 1726 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1727 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1728 ("Bogus intr_window_exiting: eventinj (%#lx), " 1729 "intr_shadow (%u), rflags (%#lx)", 1730 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1731 enable_intr_window_exiting(sc, vcpu); 1732 } else { 1733 disable_intr_window_exiting(sc, vcpu); 1734 } 1735 } 1736 1737 static __inline void 1738 restore_host_tss(void) 1739 { 1740 struct system_segment_descriptor *tss_sd; 1741 1742 /* 1743 * The TSS descriptor was in use prior to launching the guest so it 1744 * has been marked busy. 1745 * 1746 * 'ltr' requires the descriptor to be marked available so change the 1747 * type to "64-bit available TSS". 1748 */ 1749 tss_sd = PCPU_GET(tss); 1750 tss_sd->sd_type = SDT_SYSTSS; 1751 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1752 } 1753 1754 static void 1755 check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1756 { 1757 struct svm_vcpu *vcpustate; 1758 struct vmcb_ctrl *ctrl; 1759 long eptgen; 1760 bool alloc_asid; 1761 1762 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1763 "active on cpu %u", __func__, thiscpu)); 1764 1765 vcpustate = svm_get_vcpu(sc, vcpuid); 1766 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1767 1768 /* 1769 * The TLB entries associated with the vcpu's ASID are not valid 1770 * if either of the following conditions is true: 1771 * 1772 * 1. The vcpu's ASID generation is different than the host cpu's 1773 * ASID generation. This happens when the vcpu migrates to a new 1774 * host cpu. It can also happen when the number of vcpus executing 1775 * on a host cpu is greater than the number of ASIDs available. 1776 * 1777 * 2. The pmap generation number is different than the value cached in 1778 * the 'vcpustate'. This happens when the host invalidates pages 1779 * belonging to the guest. 1780 * 1781 * asidgen eptgen Action 1782 * mismatch mismatch 1783 * 0 0 (a) 1784 * 0 1 (b1) or (b2) 1785 * 1 0 (c) 1786 * 1 1 (d) 1787 * 1788 * (a) There is no mismatch in eptgen or ASID generation and therefore 1789 * no further action is needed. 1790 * 1791 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1792 * retained and the TLB entries associated with this ASID 1793 * are flushed by VMRUN. 1794 * 1795 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1796 * allocated. 1797 * 1798 * (c) A new ASID is allocated. 1799 * 1800 * (d) A new ASID is allocated. 1801 */ 1802 1803 alloc_asid = false; 1804 eptgen = pmap->pm_eptgen; 1805 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1806 1807 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1808 alloc_asid = true; /* (c) and (d) */ 1809 } else if (vcpustate->eptgen != eptgen) { 1810 if (flush_by_asid()) 1811 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1812 else 1813 alloc_asid = true; /* (b2) */ 1814 } else { 1815 /* 1816 * This is the common case (a). 1817 */ 1818 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1819 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1820 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1821 } 1822 1823 if (alloc_asid) { 1824 if (++asid[thiscpu].num >= nasid) { 1825 asid[thiscpu].num = 1; 1826 if (++asid[thiscpu].gen == 0) 1827 asid[thiscpu].gen = 1; 1828 /* 1829 * If this cpu does not support "flush-by-asid" 1830 * then flush the entire TLB on a generation 1831 * bump. Subsequent ASID allocation in this 1832 * generation can be done without a TLB flush. 1833 */ 1834 if (!flush_by_asid()) 1835 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1836 } 1837 vcpustate->asid.gen = asid[thiscpu].gen; 1838 vcpustate->asid.num = asid[thiscpu].num; 1839 1840 ctrl->asid = vcpustate->asid.num; 1841 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1842 /* 1843 * If this cpu supports "flush-by-asid" then the TLB 1844 * was not flushed after the generation bump. The TLB 1845 * is flushed selectively after every new ASID allocation. 1846 */ 1847 if (flush_by_asid()) 1848 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1849 } 1850 vcpustate->eptgen = eptgen; 1851 1852 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1853 KASSERT(ctrl->asid == vcpustate->asid.num, 1854 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1855 } 1856 1857 static __inline void 1858 disable_gintr(void) 1859 { 1860 1861 __asm __volatile("clgi"); 1862 } 1863 1864 static __inline void 1865 enable_gintr(void) 1866 { 1867 1868 __asm __volatile("stgi"); 1869 } 1870 1871 static __inline void 1872 svm_dr_enter_guest(struct svm_regctx *gctx) 1873 { 1874 1875 /* Save host control debug registers. */ 1876 gctx->host_dr7 = rdr7(); 1877 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 1878 1879 /* 1880 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 1881 * exceptions in the host based on the guest DRx values. The 1882 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 1883 * VMCB. 1884 */ 1885 load_dr7(0); 1886 wrmsr(MSR_DEBUGCTLMSR, 0); 1887 1888 /* Save host debug registers. */ 1889 gctx->host_dr0 = rdr0(); 1890 gctx->host_dr1 = rdr1(); 1891 gctx->host_dr2 = rdr2(); 1892 gctx->host_dr3 = rdr3(); 1893 gctx->host_dr6 = rdr6(); 1894 1895 /* Restore guest debug registers. */ 1896 load_dr0(gctx->sctx_dr0); 1897 load_dr1(gctx->sctx_dr1); 1898 load_dr2(gctx->sctx_dr2); 1899 load_dr3(gctx->sctx_dr3); 1900 } 1901 1902 static __inline void 1903 svm_dr_leave_guest(struct svm_regctx *gctx) 1904 { 1905 1906 /* Save guest debug registers. */ 1907 gctx->sctx_dr0 = rdr0(); 1908 gctx->sctx_dr1 = rdr1(); 1909 gctx->sctx_dr2 = rdr2(); 1910 gctx->sctx_dr3 = rdr3(); 1911 1912 /* 1913 * Restore host debug registers. Restore DR7 and DEBUGCTL 1914 * last. 1915 */ 1916 load_dr0(gctx->host_dr0); 1917 load_dr1(gctx->host_dr1); 1918 load_dr2(gctx->host_dr2); 1919 load_dr3(gctx->host_dr3); 1920 load_dr6(gctx->host_dr6); 1921 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 1922 load_dr7(gctx->host_dr7); 1923 } 1924 1925 /* 1926 * Start vcpu with specified RIP. 1927 */ 1928 static int 1929 svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1930 struct vm_eventinfo *evinfo) 1931 { 1932 struct svm_regctx *gctx; 1933 struct svm_softc *svm_sc; 1934 struct svm_vcpu *vcpustate; 1935 struct vmcb_state *state; 1936 struct vmcb_ctrl *ctrl; 1937 struct vm_exit *vmexit; 1938 struct vlapic *vlapic; 1939 struct vm *vm; 1940 uint64_t vmcb_pa; 1941 int handled; 1942 uint16_t ldt_sel; 1943 1944 svm_sc = arg; 1945 vm = svm_sc->vm; 1946 1947 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1948 state = svm_get_vmcb_state(svm_sc, vcpu); 1949 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1950 vmexit = vm_exitinfo(vm, vcpu); 1951 vlapic = vm_lapic(vm, vcpu); 1952 1953 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1954 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1955 1956 if (vcpustate->lastcpu != curcpu) { 1957 /* 1958 * Force new ASID allocation by invalidating the generation. 1959 */ 1960 vcpustate->asid.gen = 0; 1961 1962 /* 1963 * Invalidate the VMCB state cache by marking all fields dirty. 1964 */ 1965 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1966 1967 /* 1968 * XXX 1969 * Setting 'vcpustate->lastcpu' here is bit premature because 1970 * we may return from this function without actually executing 1971 * the VMRUN instruction. This could happen if a rendezvous 1972 * or an AST is pending on the first time through the loop. 1973 * 1974 * This works for now but any new side-effects of vcpu 1975 * migration should take this case into account. 1976 */ 1977 vcpustate->lastcpu = curcpu; 1978 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1979 } 1980 1981 svm_msr_guest_enter(svm_sc, vcpu); 1982 1983 /* Update Guest RIP */ 1984 state->rip = rip; 1985 1986 do { 1987 /* 1988 * Disable global interrupts to guarantee atomicity during 1989 * loading of guest state. This includes not only the state 1990 * loaded by the "vmrun" instruction but also software state 1991 * maintained by the hypervisor: suspended and rendezvous 1992 * state, NPT generation number, vlapic interrupts etc. 1993 */ 1994 disable_gintr(); 1995 1996 if (vcpu_suspended(evinfo)) { 1997 enable_gintr(); 1998 vm_exit_suspended(vm, vcpu, state->rip); 1999 break; 2000 } 2001 2002 if (vcpu_rendezvous_pending(evinfo)) { 2003 enable_gintr(); 2004 vm_exit_rendezvous(vm, vcpu, state->rip); 2005 break; 2006 } 2007 2008 if (vcpu_reqidle(evinfo)) { 2009 enable_gintr(); 2010 vm_exit_reqidle(vm, vcpu, state->rip); 2011 break; 2012 } 2013 2014 /* We are asked to give the cpu by scheduler. */ 2015 if (vcpu_should_yield(vm, vcpu)) { 2016 enable_gintr(); 2017 vm_exit_astpending(vm, vcpu, state->rip); 2018 break; 2019 } 2020 2021 if (vcpu_debugged(vm, vcpu)) { 2022 enable_gintr(); 2023 vm_exit_debug(vm, vcpu, state->rip); 2024 break; 2025 } 2026 2027 /* 2028 * #VMEXIT resumes the host with the guest LDTR, so 2029 * save the current LDT selector so it can be restored 2030 * after an exit. The userspace hypervisor probably 2031 * doesn't use a LDT, but save and restore it to be 2032 * safe. 2033 */ 2034 ldt_sel = sldt(); 2035 2036 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2037 2038 /* Activate the nested pmap on 'curcpu' */ 2039 CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active); 2040 2041 /* 2042 * Check the pmap generation and the ASID generation to 2043 * ensure that the vcpu does not use stale TLB mappings. 2044 */ 2045 check_asid(svm_sc, vcpu, pmap, curcpu); 2046 2047 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 2048 vcpustate->dirty = 0; 2049 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2050 2051 /* Launch Virtual Machine. */ 2052 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 2053 svm_dr_enter_guest(gctx); 2054 svm_launch(vmcb_pa, gctx, get_pcpu()); 2055 svm_dr_leave_guest(gctx); 2056 2057 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 2058 2059 /* 2060 * The host GDTR and IDTR is saved by VMRUN and restored 2061 * automatically on #VMEXIT. However, the host TSS needs 2062 * to be restored explicitly. 2063 */ 2064 restore_host_tss(); 2065 2066 /* Restore host LDTR. */ 2067 lldt(ldt_sel); 2068 2069 /* #VMEXIT disables interrupts so re-enable them here. */ 2070 enable_gintr(); 2071 2072 /* Update 'nextrip' */ 2073 vcpustate->nextrip = state->rip; 2074 2075 /* Handle #VMEXIT and if required return to user space. */ 2076 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2077 } while (handled); 2078 2079 svm_msr_guest_exit(svm_sc, vcpu); 2080 2081 return (0); 2082 } 2083 2084 static void 2085 svm_vmcleanup(void *arg) 2086 { 2087 struct svm_softc *sc = arg; 2088 2089 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2090 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2091 free(sc, M_SVM); 2092 } 2093 2094 static register_t * 2095 swctx_regptr(struct svm_regctx *regctx, int reg) 2096 { 2097 2098 switch (reg) { 2099 case VM_REG_GUEST_RBX: 2100 return (®ctx->sctx_rbx); 2101 case VM_REG_GUEST_RCX: 2102 return (®ctx->sctx_rcx); 2103 case VM_REG_GUEST_RDX: 2104 return (®ctx->sctx_rdx); 2105 case VM_REG_GUEST_RDI: 2106 return (®ctx->sctx_rdi); 2107 case VM_REG_GUEST_RSI: 2108 return (®ctx->sctx_rsi); 2109 case VM_REG_GUEST_RBP: 2110 return (®ctx->sctx_rbp); 2111 case VM_REG_GUEST_R8: 2112 return (®ctx->sctx_r8); 2113 case VM_REG_GUEST_R9: 2114 return (®ctx->sctx_r9); 2115 case VM_REG_GUEST_R10: 2116 return (®ctx->sctx_r10); 2117 case VM_REG_GUEST_R11: 2118 return (®ctx->sctx_r11); 2119 case VM_REG_GUEST_R12: 2120 return (®ctx->sctx_r12); 2121 case VM_REG_GUEST_R13: 2122 return (®ctx->sctx_r13); 2123 case VM_REG_GUEST_R14: 2124 return (®ctx->sctx_r14); 2125 case VM_REG_GUEST_R15: 2126 return (®ctx->sctx_r15); 2127 case VM_REG_GUEST_DR0: 2128 return (®ctx->sctx_dr0); 2129 case VM_REG_GUEST_DR1: 2130 return (®ctx->sctx_dr1); 2131 case VM_REG_GUEST_DR2: 2132 return (®ctx->sctx_dr2); 2133 case VM_REG_GUEST_DR3: 2134 return (®ctx->sctx_dr3); 2135 default: 2136 return (NULL); 2137 } 2138 } 2139 2140 static int 2141 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2142 { 2143 struct svm_softc *svm_sc; 2144 register_t *reg; 2145 2146 svm_sc = arg; 2147 2148 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2149 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2150 } 2151 2152 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2153 return (0); 2154 } 2155 2156 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2157 2158 if (reg != NULL) { 2159 *val = *reg; 2160 return (0); 2161 } 2162 2163 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2164 return (EINVAL); 2165 } 2166 2167 static int 2168 svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2169 { 2170 struct svm_softc *svm_sc; 2171 register_t *reg; 2172 2173 svm_sc = arg; 2174 2175 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2176 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2177 } 2178 2179 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2180 return (0); 2181 } 2182 2183 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2184 2185 if (reg != NULL) { 2186 *reg = val; 2187 return (0); 2188 } 2189 2190 /* 2191 * XXX deal with CR3 and invalidate TLB entries tagged with the 2192 * vcpu's ASID. This needs to be treated differently depending on 2193 * whether 'running' is true/false. 2194 */ 2195 2196 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2197 return (EINVAL); 2198 } 2199 2200 static int 2201 svm_setcap(void *arg, int vcpu, int type, int val) 2202 { 2203 struct svm_softc *sc; 2204 int error; 2205 2206 sc = arg; 2207 error = 0; 2208 switch (type) { 2209 case VM_CAP_HALT_EXIT: 2210 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2211 VMCB_INTCPT_HLT, val); 2212 break; 2213 case VM_CAP_PAUSE_EXIT: 2214 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2215 VMCB_INTCPT_PAUSE, val); 2216 break; 2217 case VM_CAP_UNRESTRICTED_GUEST: 2218 /* Unrestricted guest execution cannot be disabled in SVM */ 2219 if (val == 0) 2220 error = EINVAL; 2221 break; 2222 default: 2223 error = ENOENT; 2224 break; 2225 } 2226 return (error); 2227 } 2228 2229 static int 2230 svm_getcap(void *arg, int vcpu, int type, int *retval) 2231 { 2232 struct svm_softc *sc; 2233 int error; 2234 2235 sc = arg; 2236 error = 0; 2237 2238 switch (type) { 2239 case VM_CAP_HALT_EXIT: 2240 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2241 VMCB_INTCPT_HLT); 2242 break; 2243 case VM_CAP_PAUSE_EXIT: 2244 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2245 VMCB_INTCPT_PAUSE); 2246 break; 2247 case VM_CAP_UNRESTRICTED_GUEST: 2248 *retval = 1; /* unrestricted guest is always enabled */ 2249 break; 2250 default: 2251 error = ENOENT; 2252 break; 2253 } 2254 return (error); 2255 } 2256 2257 static struct vlapic * 2258 svm_vlapic_init(void *arg, int vcpuid) 2259 { 2260 struct svm_softc *svm_sc; 2261 struct vlapic *vlapic; 2262 2263 svm_sc = arg; 2264 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2265 vlapic->vm = svm_sc->vm; 2266 vlapic->vcpuid = vcpuid; 2267 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2268 2269 vlapic_init(vlapic); 2270 2271 return (vlapic); 2272 } 2273 2274 static void 2275 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2276 { 2277 2278 vlapic_cleanup(vlapic); 2279 free(vlapic, M_SVM_VLAPIC); 2280 } 2281 2282 struct vmm_ops vmm_ops_amd = { 2283 .init = svm_init, 2284 .cleanup = svm_cleanup, 2285 .resume = svm_restore, 2286 .vminit = svm_vminit, 2287 .vmrun = svm_vmrun, 2288 .vmcleanup = svm_vmcleanup, 2289 .vmgetreg = svm_getreg, 2290 .vmsetreg = svm_setreg, 2291 .vmgetdesc = vmcb_getdesc, 2292 .vmsetdesc = vmcb_setdesc, 2293 .vmgetcap = svm_getcap, 2294 .vmsetcap = svm_setcap, 2295 .vmspace_alloc = svm_npt_alloc, 2296 .vmspace_free = svm_npt_free, 2297 .vlapic_init = svm_vlapic_init, 2298 .vlapic_cleanup = svm_vlapic_cleanup, 2299 }; 2300