1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/smp.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/pcpu.h> 38 #include <sys/proc.h> 39 #include <sys/sysctl.h> 40 41 #include <vm/vm.h> 42 #include <vm/pmap.h> 43 44 #include <machine/cpufunc.h> 45 #include <machine/psl.h> 46 #include <machine/md_var.h> 47 #include <machine/reg.h> 48 #include <machine/specialreg.h> 49 #include <machine/smp.h> 50 #include <machine/vmm.h> 51 #include <machine/vmm_dev.h> 52 #include <machine/vmm_instruction_emul.h> 53 54 #include "vmm_lapic.h" 55 #include "vmm_stat.h" 56 #include "vmm_ktr.h" 57 #include "vmm_ioport.h" 58 #include "vatpic.h" 59 #include "vlapic.h" 60 #include "vlapic_priv.h" 61 62 #include "x86.h" 63 #include "vmcb.h" 64 #include "svm.h" 65 #include "svm_softc.h" 66 #include "svm_msr.h" 67 #include "npt.h" 68 69 SYSCTL_DECL(_hw_vmm); 70 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 71 72 /* 73 * SVM CPUID function 0x8000_000A, edx bit decoding. 74 */ 75 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 76 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 77 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 78 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 79 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 80 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 81 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 82 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 83 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 84 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 85 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 86 87 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 88 VMCB_CACHE_IOPM | \ 89 VMCB_CACHE_I | \ 90 VMCB_CACHE_TPR | \ 91 VMCB_CACHE_CR2 | \ 92 VMCB_CACHE_CR | \ 93 VMCB_CACHE_DR | \ 94 VMCB_CACHE_DT | \ 95 VMCB_CACHE_SEG | \ 96 VMCB_CACHE_NP) 97 98 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 99 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 100 0, NULL); 101 102 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 103 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 104 105 /* Per-CPU context area. */ 106 extern struct pcpu __pcpu[]; 107 108 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 109 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 110 "SVM features advertised by CPUID.8000000AH:EDX"); 111 112 static int disable_npf_assist; 113 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 114 &disable_npf_assist, 0, NULL); 115 116 /* Maximum ASIDs supported by the processor */ 117 static uint32_t nasid; 118 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 119 "Number of ASIDs supported by this processor"); 120 121 /* Current ASID generation for each host cpu */ 122 static struct asid asid[MAXCPU]; 123 124 /* 125 * SVM host state saved area of size 4KB for each core. 126 */ 127 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 128 129 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 130 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 131 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 132 133 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 134 135 static __inline int 136 flush_by_asid(void) 137 { 138 139 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 140 } 141 142 static __inline int 143 decode_assist(void) 144 { 145 146 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 147 } 148 149 static void 150 svm_disable(void *arg __unused) 151 { 152 uint64_t efer; 153 154 efer = rdmsr(MSR_EFER); 155 efer &= ~EFER_SVM; 156 wrmsr(MSR_EFER, efer); 157 } 158 159 /* 160 * Disable SVM on all CPUs. 161 */ 162 static int 163 svm_cleanup(void) 164 { 165 166 smp_rendezvous(NULL, svm_disable, NULL, NULL); 167 return (0); 168 } 169 170 /* 171 * Verify that all the features required by bhyve are available. 172 */ 173 static int 174 check_svm_features(void) 175 { 176 u_int regs[4]; 177 178 /* CPUID Fn8000_000A is for SVM */ 179 do_cpuid(0x8000000A, regs); 180 svm_feature &= regs[3]; 181 182 /* 183 * The number of ASIDs can be configured to be less than what is 184 * supported by the hardware but not more. 185 */ 186 if (nasid == 0 || nasid > regs[1]) 187 nasid = regs[1]; 188 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 189 190 /* bhyve requires the Nested Paging feature */ 191 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 192 printf("SVM: Nested Paging feature not available.\n"); 193 return (ENXIO); 194 } 195 196 /* bhyve requires the NRIP Save feature */ 197 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 198 printf("SVM: NRIP Save feature not available.\n"); 199 return (ENXIO); 200 } 201 202 return (0); 203 } 204 205 static void 206 svm_enable(void *arg __unused) 207 { 208 uint64_t efer; 209 210 efer = rdmsr(MSR_EFER); 211 efer |= EFER_SVM; 212 wrmsr(MSR_EFER, efer); 213 214 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 215 } 216 217 /* 218 * Return 1 if SVM is enabled on this processor and 0 otherwise. 219 */ 220 static int 221 svm_available(void) 222 { 223 uint64_t msr; 224 225 /* Section 15.4 Enabling SVM from APM2. */ 226 if ((amd_feature2 & AMDID2_SVM) == 0) { 227 printf("SVM: not available.\n"); 228 return (0); 229 } 230 231 msr = rdmsr(MSR_VM_CR); 232 if ((msr & VM_CR_SVMDIS) != 0) { 233 printf("SVM: disabled by BIOS.\n"); 234 return (0); 235 } 236 237 return (1); 238 } 239 240 static int 241 svm_init(int ipinum) 242 { 243 int error, cpu; 244 245 if (!svm_available()) 246 return (ENXIO); 247 248 error = check_svm_features(); 249 if (error) 250 return (error); 251 252 vmcb_clean &= VMCB_CACHE_DEFAULT; 253 254 for (cpu = 0; cpu < MAXCPU; cpu++) { 255 /* 256 * Initialize the host ASIDs to their "highest" valid values. 257 * 258 * The next ASID allocation will rollover both 'gen' and 'num' 259 * and start off the sequence at {1,1}. 260 */ 261 asid[cpu].gen = ~0UL; 262 asid[cpu].num = nasid - 1; 263 } 264 265 svm_msr_init(); 266 svm_npt_init(ipinum); 267 268 /* Enable SVM on all CPUs */ 269 smp_rendezvous(NULL, svm_enable, NULL, NULL); 270 271 return (0); 272 } 273 274 static void 275 svm_restore(void) 276 { 277 278 svm_enable(NULL); 279 } 280 281 /* Pentium compatible MSRs */ 282 #define MSR_PENTIUM_START 0 283 #define MSR_PENTIUM_END 0x1FFF 284 /* AMD 6th generation and Intel compatible MSRs */ 285 #define MSR_AMD6TH_START 0xC0000000UL 286 #define MSR_AMD6TH_END 0xC0001FFFUL 287 /* AMD 7th and 8th generation compatible MSRs */ 288 #define MSR_AMD7TH_START 0xC0010000UL 289 #define MSR_AMD7TH_END 0xC0011FFFUL 290 291 /* 292 * Get the index and bit position for a MSR in permission bitmap. 293 * Two bits are used for each MSR: lower bit for read and higher bit for write. 294 */ 295 static int 296 svm_msr_index(uint64_t msr, int *index, int *bit) 297 { 298 uint32_t base, off; 299 300 *index = -1; 301 *bit = (msr % 4) * 2; 302 base = 0; 303 304 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 305 *index = msr / 4; 306 return (0); 307 } 308 309 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 310 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 311 off = (msr - MSR_AMD6TH_START); 312 *index = (off + base) / 4; 313 return (0); 314 } 315 316 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 317 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 318 off = (msr - MSR_AMD7TH_START); 319 *index = (off + base) / 4; 320 return (0); 321 } 322 323 return (EINVAL); 324 } 325 326 /* 327 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 328 */ 329 static void 330 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 331 { 332 int index, bit, error; 333 334 error = svm_msr_index(msr, &index, &bit); 335 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 336 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 337 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 338 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 339 "msr %#lx", __func__, bit, msr)); 340 341 if (read) 342 perm_bitmap[index] &= ~(1UL << bit); 343 344 if (write) 345 perm_bitmap[index] &= ~(2UL << bit); 346 } 347 348 static void 349 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 350 { 351 352 svm_msr_perm(perm_bitmap, msr, true, true); 353 } 354 355 static void 356 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 357 { 358 359 svm_msr_perm(perm_bitmap, msr, true, false); 360 } 361 362 static __inline int 363 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 364 { 365 struct vmcb_ctrl *ctrl; 366 367 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 368 369 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 370 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 371 } 372 373 static __inline void 374 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 375 int enabled) 376 { 377 struct vmcb_ctrl *ctrl; 378 uint32_t oldval; 379 380 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 381 382 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 383 oldval = ctrl->intercept[idx]; 384 385 if (enabled) 386 ctrl->intercept[idx] |= bitmask; 387 else 388 ctrl->intercept[idx] &= ~bitmask; 389 390 if (ctrl->intercept[idx] != oldval) { 391 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 392 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 393 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 394 } 395 } 396 397 static __inline void 398 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 399 { 400 401 svm_set_intercept(sc, vcpu, off, bitmask, 0); 402 } 403 404 static __inline void 405 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 406 { 407 408 svm_set_intercept(sc, vcpu, off, bitmask, 1); 409 } 410 411 static void 412 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 413 uint64_t msrpm_base_pa, uint64_t np_pml4) 414 { 415 struct vmcb_ctrl *ctrl; 416 struct vmcb_state *state; 417 uint32_t mask; 418 int n; 419 420 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 421 state = svm_get_vmcb_state(sc, vcpu); 422 423 ctrl->iopm_base_pa = iopm_base_pa; 424 ctrl->msrpm_base_pa = msrpm_base_pa; 425 426 /* Enable nested paging */ 427 ctrl->np_enable = 1; 428 ctrl->n_cr3 = np_pml4; 429 430 /* 431 * Intercept accesses to the control registers that are not shadowed 432 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 433 */ 434 for (n = 0; n < 16; n++) { 435 mask = (BIT(n) << 16) | BIT(n); 436 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 437 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 438 else 439 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 440 } 441 442 443 /* 444 * Intercept everything when tracing guest exceptions otherwise 445 * just intercept machine check exception. 446 */ 447 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 448 for (n = 0; n < 32; n++) { 449 /* 450 * Skip unimplemented vectors in the exception bitmap. 451 */ 452 if (n == 2 || n == 9) { 453 continue; 454 } 455 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 456 } 457 } else { 458 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 459 } 460 461 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 463 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 464 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 465 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 466 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 467 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 468 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 469 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 470 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 471 VMCB_INTCPT_FERR_FREEZE); 472 473 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 474 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 475 476 /* 477 * From section "Canonicalization and Consistency Checks" in APMv2 478 * the VMRUN intercept bit must be set to pass the consistency check. 479 */ 480 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 481 482 /* 483 * The ASID will be set to a non-zero value just before VMRUN. 484 */ 485 ctrl->asid = 0; 486 487 /* 488 * Section 15.21.1, Interrupt Masking in EFLAGS 489 * Section 15.21.2, Virtualizing APIC.TPR 490 * 491 * This must be set for %rflag and %cr8 isolation of guest and host. 492 */ 493 ctrl->v_intr_masking = 1; 494 495 /* Enable Last Branch Record aka LBR for debugging */ 496 ctrl->lbr_virt_en = 1; 497 state->dbgctl = BIT(0); 498 499 /* EFER_SVM must always be set when the guest is executing */ 500 state->efer = EFER_SVM; 501 502 /* Set up the PAT to power-on state */ 503 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 504 PAT_VALUE(1, PAT_WRITE_THROUGH) | 505 PAT_VALUE(2, PAT_UNCACHED) | 506 PAT_VALUE(3, PAT_UNCACHEABLE) | 507 PAT_VALUE(4, PAT_WRITE_BACK) | 508 PAT_VALUE(5, PAT_WRITE_THROUGH) | 509 PAT_VALUE(6, PAT_UNCACHED) | 510 PAT_VALUE(7, PAT_UNCACHEABLE); 511 512 /* Set up DR6/7 to power-on state */ 513 state->dr6 = DBREG_DR6_RESERVED1; 514 state->dr7 = DBREG_DR7_RESERVED1; 515 } 516 517 /* 518 * Initialize a virtual machine. 519 */ 520 static void * 521 svm_vminit(struct vm *vm, pmap_t pmap) 522 { 523 struct svm_softc *svm_sc; 524 struct svm_vcpu *vcpu; 525 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 526 int i; 527 uint16_t maxcpus; 528 529 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 530 if (((uintptr_t)svm_sc & PAGE_MASK) != 0) 531 panic("malloc of svm_softc not aligned on page boundary"); 532 533 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 534 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 535 if (svm_sc->msr_bitmap == NULL) 536 panic("contigmalloc of SVM MSR bitmap failed"); 537 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 538 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 539 if (svm_sc->iopm_bitmap == NULL) 540 panic("contigmalloc of SVM IO bitmap failed"); 541 542 svm_sc->vm = vm; 543 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 544 545 /* 546 * Intercept read and write accesses to all MSRs. 547 */ 548 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 549 550 /* 551 * Access to the following MSRs is redirected to the VMCB when the 552 * guest is executing. Therefore it is safe to allow the guest to 553 * read/write these MSRs directly without hypervisor involvement. 554 */ 555 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 556 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 557 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 558 559 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 560 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 561 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 562 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 563 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 564 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 565 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 566 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 567 568 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 569 570 /* 571 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 572 */ 573 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 574 575 /* Intercept access to all I/O ports. */ 576 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 577 578 iopm_pa = vtophys(svm_sc->iopm_bitmap); 579 msrpm_pa = vtophys(svm_sc->msr_bitmap); 580 pml4_pa = svm_sc->nptp; 581 maxcpus = vm_get_maxcpus(svm_sc->vm); 582 for (i = 0; i < maxcpus; i++) { 583 vcpu = svm_get_vcpu(svm_sc, i); 584 vcpu->nextrip = ~0; 585 vcpu->lastcpu = NOCPU; 586 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 587 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 588 svm_msr_guest_init(svm_sc, i); 589 } 590 return (svm_sc); 591 } 592 593 /* 594 * Collateral for a generic SVM VM-exit. 595 */ 596 static void 597 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 598 { 599 600 vme->exitcode = VM_EXITCODE_SVM; 601 vme->u.svm.exitcode = code; 602 vme->u.svm.exitinfo1 = info1; 603 vme->u.svm.exitinfo2 = info2; 604 } 605 606 static int 607 svm_cpl(struct vmcb_state *state) 608 { 609 610 /* 611 * From APMv2: 612 * "Retrieve the CPL from the CPL field in the VMCB, not 613 * from any segment DPL" 614 */ 615 return (state->cpl); 616 } 617 618 static enum vm_cpu_mode 619 svm_vcpu_mode(struct vmcb *vmcb) 620 { 621 struct vmcb_segment seg; 622 struct vmcb_state *state; 623 int error; 624 625 state = &vmcb->state; 626 627 if (state->efer & EFER_LMA) { 628 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 629 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 630 error)); 631 632 /* 633 * Section 4.8.1 for APM2, check if Code Segment has 634 * Long attribute set in descriptor. 635 */ 636 if (seg.attrib & VMCB_CS_ATTRIB_L) 637 return (CPU_MODE_64BIT); 638 else 639 return (CPU_MODE_COMPATIBILITY); 640 } else if (state->cr0 & CR0_PE) { 641 return (CPU_MODE_PROTECTED); 642 } else { 643 return (CPU_MODE_REAL); 644 } 645 } 646 647 static enum vm_paging_mode 648 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 649 { 650 651 if ((cr0 & CR0_PG) == 0) 652 return (PAGING_MODE_FLAT); 653 if ((cr4 & CR4_PAE) == 0) 654 return (PAGING_MODE_32); 655 if (efer & EFER_LME) 656 return (PAGING_MODE_64); 657 else 658 return (PAGING_MODE_PAE); 659 } 660 661 /* 662 * ins/outs utility routines 663 */ 664 static uint64_t 665 svm_inout_str_index(struct svm_regctx *regs, int in) 666 { 667 uint64_t val; 668 669 val = in ? regs->sctx_rdi : regs->sctx_rsi; 670 671 return (val); 672 } 673 674 static uint64_t 675 svm_inout_str_count(struct svm_regctx *regs, int rep) 676 { 677 uint64_t val; 678 679 val = rep ? regs->sctx_rcx : 1; 680 681 return (val); 682 } 683 684 static void 685 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 686 int in, struct vm_inout_str *vis) 687 { 688 int error, s; 689 690 if (in) { 691 vis->seg_name = VM_REG_GUEST_ES; 692 } else { 693 /* The segment field has standard encoding */ 694 s = (info1 >> 10) & 0x7; 695 vis->seg_name = vm_segment_name(s); 696 } 697 698 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 699 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 700 } 701 702 static int 703 svm_inout_str_addrsize(uint64_t info1) 704 { 705 uint32_t size; 706 707 size = (info1 >> 7) & 0x7; 708 switch (size) { 709 case 1: 710 return (2); /* 16 bit */ 711 case 2: 712 return (4); /* 32 bit */ 713 case 4: 714 return (8); /* 64 bit */ 715 default: 716 panic("%s: invalid size encoding %d", __func__, size); 717 } 718 } 719 720 static void 721 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 722 { 723 struct vmcb_state *state; 724 725 state = &vmcb->state; 726 paging->cr3 = state->cr3; 727 paging->cpl = svm_cpl(state); 728 paging->cpu_mode = svm_vcpu_mode(vmcb); 729 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 730 state->efer); 731 } 732 733 #define UNHANDLED 0 734 735 /* 736 * Handle guest I/O intercept. 737 */ 738 static int 739 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 740 { 741 struct vmcb_ctrl *ctrl; 742 struct vmcb_state *state; 743 struct svm_regctx *regs; 744 struct vm_inout_str *vis; 745 uint64_t info1; 746 int inout_string; 747 748 state = svm_get_vmcb_state(svm_sc, vcpu); 749 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 750 regs = svm_get_guest_regctx(svm_sc, vcpu); 751 752 info1 = ctrl->exitinfo1; 753 inout_string = info1 & BIT(2) ? 1 : 0; 754 755 /* 756 * The effective segment number in EXITINFO1[12:10] is populated 757 * only if the processor has the DecodeAssist capability. 758 * 759 * XXX this is not specified explicitly in APMv2 but can be verified 760 * empirically. 761 */ 762 if (inout_string && !decode_assist()) 763 return (UNHANDLED); 764 765 vmexit->exitcode = VM_EXITCODE_INOUT; 766 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 767 vmexit->u.inout.string = inout_string; 768 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 769 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 770 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 771 vmexit->u.inout.eax = (uint32_t)(state->rax); 772 773 if (inout_string) { 774 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 775 vis = &vmexit->u.inout_str; 776 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 777 vis->rflags = state->rflags; 778 vis->cr0 = state->cr0; 779 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 780 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 781 vis->addrsize = svm_inout_str_addrsize(info1); 782 svm_inout_str_seginfo(svm_sc, vcpu, info1, 783 vmexit->u.inout.in, vis); 784 } 785 786 return (UNHANDLED); 787 } 788 789 static int 790 npf_fault_type(uint64_t exitinfo1) 791 { 792 793 if (exitinfo1 & VMCB_NPF_INFO1_W) 794 return (VM_PROT_WRITE); 795 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 796 return (VM_PROT_EXECUTE); 797 else 798 return (VM_PROT_READ); 799 } 800 801 static bool 802 svm_npf_emul_fault(uint64_t exitinfo1) 803 { 804 805 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 806 return (false); 807 } 808 809 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 810 return (false); 811 } 812 813 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 814 return (false); 815 } 816 817 return (true); 818 } 819 820 static void 821 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 822 { 823 struct vm_guest_paging *paging; 824 struct vmcb_segment seg; 825 struct vmcb_ctrl *ctrl; 826 char *inst_bytes; 827 int error, inst_len; 828 829 ctrl = &vmcb->ctrl; 830 paging = &vmexit->u.inst_emul.paging; 831 832 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 833 vmexit->u.inst_emul.gpa = gpa; 834 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 835 svm_paging_info(vmcb, paging); 836 837 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 838 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 839 840 switch(paging->cpu_mode) { 841 case CPU_MODE_REAL: 842 vmexit->u.inst_emul.cs_base = seg.base; 843 vmexit->u.inst_emul.cs_d = 0; 844 break; 845 case CPU_MODE_PROTECTED: 846 case CPU_MODE_COMPATIBILITY: 847 vmexit->u.inst_emul.cs_base = seg.base; 848 849 /* 850 * Section 4.8.1 of APM2, Default Operand Size or D bit. 851 */ 852 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 853 1 : 0; 854 break; 855 default: 856 vmexit->u.inst_emul.cs_base = 0; 857 vmexit->u.inst_emul.cs_d = 0; 858 break; 859 } 860 861 /* 862 * Copy the instruction bytes into 'vie' if available. 863 */ 864 if (decode_assist() && !disable_npf_assist) { 865 inst_len = ctrl->inst_len; 866 inst_bytes = ctrl->inst_bytes; 867 } else { 868 inst_len = 0; 869 inst_bytes = NULL; 870 } 871 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 872 } 873 874 #ifdef KTR 875 static const char * 876 intrtype_to_str(int intr_type) 877 { 878 switch (intr_type) { 879 case VMCB_EVENTINJ_TYPE_INTR: 880 return ("hwintr"); 881 case VMCB_EVENTINJ_TYPE_NMI: 882 return ("nmi"); 883 case VMCB_EVENTINJ_TYPE_INTn: 884 return ("swintr"); 885 case VMCB_EVENTINJ_TYPE_EXCEPTION: 886 return ("exception"); 887 default: 888 panic("%s: unknown intr_type %d", __func__, intr_type); 889 } 890 } 891 #endif 892 893 /* 894 * Inject an event to vcpu as described in section 15.20, "Event injection". 895 */ 896 static void 897 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 898 uint32_t error, bool ec_valid) 899 { 900 struct vmcb_ctrl *ctrl; 901 902 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 903 904 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 905 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 906 907 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 908 __func__, vector)); 909 910 switch (intr_type) { 911 case VMCB_EVENTINJ_TYPE_INTR: 912 case VMCB_EVENTINJ_TYPE_NMI: 913 case VMCB_EVENTINJ_TYPE_INTn: 914 break; 915 case VMCB_EVENTINJ_TYPE_EXCEPTION: 916 if (vector >= 0 && vector <= 31 && vector != 2) 917 break; 918 /* FALLTHROUGH */ 919 default: 920 panic("%s: invalid intr_type/vector: %d/%d", __func__, 921 intr_type, vector); 922 } 923 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 924 if (ec_valid) { 925 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 926 ctrl->eventinj |= (uint64_t)error << 32; 927 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 928 intrtype_to_str(intr_type), vector, error); 929 } else { 930 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 931 intrtype_to_str(intr_type), vector); 932 } 933 } 934 935 static void 936 svm_update_virqinfo(struct svm_softc *sc, int vcpu) 937 { 938 struct vm *vm; 939 struct vlapic *vlapic; 940 struct vmcb_ctrl *ctrl; 941 942 vm = sc->vm; 943 vlapic = vm_lapic(vm, vcpu); 944 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 945 946 /* Update %cr8 in the emulated vlapic */ 947 vlapic_set_cr8(vlapic, ctrl->v_tpr); 948 949 /* Virtual interrupt injection is not used. */ 950 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 951 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 952 } 953 954 static void 955 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 956 { 957 struct vmcb_ctrl *ctrl; 958 uint64_t intinfo; 959 960 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 961 intinfo = ctrl->exitintinfo; 962 if (!VMCB_EXITINTINFO_VALID(intinfo)) 963 return; 964 965 /* 966 * From APMv2, Section "Intercepts during IDT interrupt delivery" 967 * 968 * If a #VMEXIT happened during event delivery then record the event 969 * that was being delivered. 970 */ 971 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 972 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 973 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 974 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 975 } 976 977 #ifdef INVARIANTS 978 static __inline int 979 vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 980 { 981 982 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 983 VMCB_INTCPT_VINTR)); 984 } 985 #endif 986 987 static __inline void 988 enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 989 { 990 struct vmcb_ctrl *ctrl; 991 992 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 993 994 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 995 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 996 KASSERT(vintr_intercept_enabled(sc, vcpu), 997 ("%s: vintr intercept should be enabled", __func__)); 998 return; 999 } 1000 1001 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1002 ctrl->v_irq = 1; 1003 ctrl->v_ign_tpr = 1; 1004 ctrl->v_intr_vector = 0; 1005 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1006 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1007 } 1008 1009 static __inline void 1010 disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1011 { 1012 struct vmcb_ctrl *ctrl; 1013 1014 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1015 1016 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1017 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1018 ("%s: vintr intercept should be disabled", __func__)); 1019 return; 1020 } 1021 1022 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1023 ctrl->v_irq = 0; 1024 ctrl->v_intr_vector = 0; 1025 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1026 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1027 } 1028 1029 static int 1030 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1031 { 1032 struct vmcb_ctrl *ctrl; 1033 int oldval, newval; 1034 1035 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1036 oldval = ctrl->intr_shadow; 1037 newval = val ? 1 : 0; 1038 if (newval != oldval) { 1039 ctrl->intr_shadow = newval; 1040 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1041 } 1042 return (0); 1043 } 1044 1045 static int 1046 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1047 { 1048 struct vmcb_ctrl *ctrl; 1049 1050 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1051 *val = ctrl->intr_shadow; 1052 return (0); 1053 } 1054 1055 /* 1056 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1057 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1058 * to track when the vcpu is done handling the NMI. 1059 */ 1060 static int 1061 nmi_blocked(struct svm_softc *sc, int vcpu) 1062 { 1063 int blocked; 1064 1065 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1066 VMCB_INTCPT_IRET); 1067 return (blocked); 1068 } 1069 1070 static void 1071 enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1072 { 1073 1074 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1075 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1076 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1077 } 1078 1079 static void 1080 clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1081 { 1082 int error; 1083 1084 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1085 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1086 /* 1087 * When the IRET intercept is cleared the vcpu will attempt to execute 1088 * the "iret" when it runs next. However, it is possible to inject 1089 * another NMI into the vcpu before the "iret" has actually executed. 1090 * 1091 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1092 * it will trap back into the hypervisor. If an NMI is pending for 1093 * the vcpu it will be injected into the guest. 1094 * 1095 * XXX this needs to be fixed 1096 */ 1097 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1098 1099 /* 1100 * Set 'intr_shadow' to prevent an NMI from being injected on the 1101 * immediate VMRUN. 1102 */ 1103 error = svm_modify_intr_shadow(sc, vcpu, 1); 1104 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1105 } 1106 1107 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1108 1109 static int 1110 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) 1111 { 1112 struct vm_exit *vme; 1113 struct vmcb_state *state; 1114 uint64_t changed, lma, oldval; 1115 int error; 1116 1117 state = svm_get_vmcb_state(sc, vcpu); 1118 1119 oldval = state->efer; 1120 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1121 1122 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1123 changed = oldval ^ newval; 1124 1125 if (newval & EFER_MBZ_BITS) 1126 goto gpf; 1127 1128 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1129 if (changed & EFER_LME) { 1130 if (state->cr0 & CR0_PG) 1131 goto gpf; 1132 } 1133 1134 /* EFER.LMA = EFER.LME & CR0.PG */ 1135 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1136 lma = EFER_LMA; 1137 else 1138 lma = 0; 1139 1140 if ((newval & EFER_LMA) != lma) 1141 goto gpf; 1142 1143 if (newval & EFER_NXE) { 1144 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) 1145 goto gpf; 1146 } 1147 1148 /* 1149 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1150 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1151 */ 1152 if (newval & EFER_LMSLE) { 1153 vme = vm_exitinfo(sc->vm, vcpu); 1154 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1155 *retu = true; 1156 return (0); 1157 } 1158 1159 if (newval & EFER_FFXSR) { 1160 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) 1161 goto gpf; 1162 } 1163 1164 if (newval & EFER_TCE) { 1165 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) 1166 goto gpf; 1167 } 1168 1169 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1170 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1171 return (0); 1172 gpf: 1173 vm_inject_gp(sc->vm, vcpu); 1174 return (0); 1175 } 1176 1177 static int 1178 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1179 bool *retu) 1180 { 1181 int error; 1182 1183 if (lapic_msr(num)) 1184 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1185 else if (num == MSR_EFER) 1186 error = svm_write_efer(sc, vcpu, val, retu); 1187 else 1188 error = svm_wrmsr(sc, vcpu, num, val, retu); 1189 1190 return (error); 1191 } 1192 1193 static int 1194 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1195 { 1196 struct vmcb_state *state; 1197 struct svm_regctx *ctx; 1198 uint64_t result; 1199 int error; 1200 1201 if (lapic_msr(num)) 1202 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1203 else 1204 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1205 1206 if (error == 0) { 1207 state = svm_get_vmcb_state(sc, vcpu); 1208 ctx = svm_get_guest_regctx(sc, vcpu); 1209 state->rax = result & 0xffffffff; 1210 ctx->sctx_rdx = result >> 32; 1211 } 1212 1213 return (error); 1214 } 1215 1216 #ifdef KTR 1217 static const char * 1218 exit_reason_to_str(uint64_t reason) 1219 { 1220 static char reasonbuf[32]; 1221 1222 switch (reason) { 1223 case VMCB_EXIT_INVALID: 1224 return ("invalvmcb"); 1225 case VMCB_EXIT_SHUTDOWN: 1226 return ("shutdown"); 1227 case VMCB_EXIT_NPF: 1228 return ("nptfault"); 1229 case VMCB_EXIT_PAUSE: 1230 return ("pause"); 1231 case VMCB_EXIT_HLT: 1232 return ("hlt"); 1233 case VMCB_EXIT_CPUID: 1234 return ("cpuid"); 1235 case VMCB_EXIT_IO: 1236 return ("inout"); 1237 case VMCB_EXIT_MC: 1238 return ("mchk"); 1239 case VMCB_EXIT_INTR: 1240 return ("extintr"); 1241 case VMCB_EXIT_NMI: 1242 return ("nmi"); 1243 case VMCB_EXIT_VINTR: 1244 return ("vintr"); 1245 case VMCB_EXIT_MSR: 1246 return ("msr"); 1247 case VMCB_EXIT_IRET: 1248 return ("iret"); 1249 case VMCB_EXIT_MONITOR: 1250 return ("monitor"); 1251 case VMCB_EXIT_MWAIT: 1252 return ("mwait"); 1253 default: 1254 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1255 return (reasonbuf); 1256 } 1257 } 1258 #endif /* KTR */ 1259 1260 /* 1261 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1262 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1263 * and exceptions caused by INT3, INTO and BOUND instructions. 1264 * 1265 * Return 1 if the nRIP is valid and 0 otherwise. 1266 */ 1267 static int 1268 nrip_valid(uint64_t exitcode) 1269 { 1270 switch (exitcode) { 1271 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1272 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1273 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1274 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1275 case 0x43: /* INT3 */ 1276 case 0x44: /* INTO */ 1277 case 0x45: /* BOUND */ 1278 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1279 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1280 return (1); 1281 default: 1282 return (0); 1283 } 1284 } 1285 1286 static int 1287 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1288 { 1289 struct vmcb *vmcb; 1290 struct vmcb_state *state; 1291 struct vmcb_ctrl *ctrl; 1292 struct svm_regctx *ctx; 1293 uint64_t code, info1, info2, val; 1294 uint32_t eax, ecx, edx; 1295 int error, errcode_valid, handled, idtvec, reflect; 1296 bool retu; 1297 1298 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1299 vmcb = svm_get_vmcb(svm_sc, vcpu); 1300 state = &vmcb->state; 1301 ctrl = &vmcb->ctrl; 1302 1303 handled = 0; 1304 code = ctrl->exitcode; 1305 info1 = ctrl->exitinfo1; 1306 info2 = ctrl->exitinfo2; 1307 1308 vmexit->exitcode = VM_EXITCODE_BOGUS; 1309 vmexit->rip = state->rip; 1310 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1311 1312 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1313 1314 /* 1315 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1316 * in an inconsistent state and can trigger assertions that would 1317 * never happen otherwise. 1318 */ 1319 if (code == VMCB_EXIT_INVALID) { 1320 vm_exit_svm(vmexit, code, info1, info2); 1321 return (0); 1322 } 1323 1324 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1325 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1326 1327 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1328 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1329 vmexit->inst_length, code, info1, info2)); 1330 1331 svm_update_virqinfo(svm_sc, vcpu); 1332 svm_save_intinfo(svm_sc, vcpu); 1333 1334 switch (code) { 1335 case VMCB_EXIT_IRET: 1336 /* 1337 * Restart execution at "iret" but with the intercept cleared. 1338 */ 1339 vmexit->inst_length = 0; 1340 clear_nmi_blocking(svm_sc, vcpu); 1341 handled = 1; 1342 break; 1343 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1344 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1345 handled = 1; 1346 break; 1347 case VMCB_EXIT_INTR: /* external interrupt */ 1348 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1349 handled = 1; 1350 break; 1351 case VMCB_EXIT_NMI: /* external NMI */ 1352 handled = 1; 1353 break; 1354 case 0x40 ... 0x5F: 1355 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1356 reflect = 1; 1357 idtvec = code - 0x40; 1358 switch (idtvec) { 1359 case IDT_MC: 1360 /* 1361 * Call the machine check handler by hand. Also don't 1362 * reflect the machine check back into the guest. 1363 */ 1364 reflect = 0; 1365 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1366 __asm __volatile("int $18"); 1367 break; 1368 case IDT_PF: 1369 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1370 info2); 1371 KASSERT(error == 0, ("%s: error %d updating cr2", 1372 __func__, error)); 1373 /* fallthru */ 1374 case IDT_NP: 1375 case IDT_SS: 1376 case IDT_GP: 1377 case IDT_AC: 1378 case IDT_TS: 1379 errcode_valid = 1; 1380 break; 1381 1382 case IDT_DF: 1383 errcode_valid = 1; 1384 info1 = 0; 1385 break; 1386 1387 case IDT_BP: 1388 case IDT_OF: 1389 case IDT_BR: 1390 /* 1391 * The 'nrip' field is populated for INT3, INTO and 1392 * BOUND exceptions and this also implies that 1393 * 'inst_length' is non-zero. 1394 * 1395 * Reset 'inst_length' to zero so the guest %rip at 1396 * event injection is identical to what it was when 1397 * the exception originally happened. 1398 */ 1399 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1400 "to zero before injecting exception %d", 1401 vmexit->inst_length, idtvec); 1402 vmexit->inst_length = 0; 1403 /* fallthru */ 1404 default: 1405 errcode_valid = 0; 1406 info1 = 0; 1407 break; 1408 } 1409 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1410 "when reflecting exception %d into guest", 1411 vmexit->inst_length, idtvec)); 1412 1413 if (reflect) { 1414 /* Reflect the exception back into the guest */ 1415 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1416 "%d/%#x into the guest", idtvec, (int)info1); 1417 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1418 errcode_valid, info1, 0); 1419 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1420 __func__, error)); 1421 } 1422 handled = 1; 1423 break; 1424 case VMCB_EXIT_MSR: /* MSR access. */ 1425 eax = state->rax; 1426 ecx = ctx->sctx_rcx; 1427 edx = ctx->sctx_rdx; 1428 retu = false; 1429 1430 if (info1) { 1431 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1432 val = (uint64_t)edx << 32 | eax; 1433 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1434 ecx, val); 1435 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1436 vmexit->exitcode = VM_EXITCODE_WRMSR; 1437 vmexit->u.msr.code = ecx; 1438 vmexit->u.msr.wval = val; 1439 } else if (!retu) { 1440 handled = 1; 1441 } else { 1442 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1443 ("emulate_wrmsr retu with bogus exitcode")); 1444 } 1445 } else { 1446 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1447 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1448 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1449 vmexit->exitcode = VM_EXITCODE_RDMSR; 1450 vmexit->u.msr.code = ecx; 1451 } else if (!retu) { 1452 handled = 1; 1453 } else { 1454 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1455 ("emulate_rdmsr retu with bogus exitcode")); 1456 } 1457 } 1458 break; 1459 case VMCB_EXIT_IO: 1460 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1461 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1462 break; 1463 case VMCB_EXIT_CPUID: 1464 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1465 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1466 (uint32_t *)&state->rax, 1467 (uint32_t *)&ctx->sctx_rbx, 1468 (uint32_t *)&ctx->sctx_rcx, 1469 (uint32_t *)&ctx->sctx_rdx); 1470 break; 1471 case VMCB_EXIT_HLT: 1472 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1473 vmexit->exitcode = VM_EXITCODE_HLT; 1474 vmexit->u.hlt.rflags = state->rflags; 1475 break; 1476 case VMCB_EXIT_PAUSE: 1477 vmexit->exitcode = VM_EXITCODE_PAUSE; 1478 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1479 break; 1480 case VMCB_EXIT_NPF: 1481 /* EXITINFO2 contains the faulting guest physical address */ 1482 if (info1 & VMCB_NPF_INFO1_RSV) { 1483 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1484 "reserved bits set: info1(%#lx) info2(%#lx)", 1485 info1, info2); 1486 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { 1487 vmexit->exitcode = VM_EXITCODE_PAGING; 1488 vmexit->u.paging.gpa = info2; 1489 vmexit->u.paging.fault_type = npf_fault_type(info1); 1490 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1491 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1492 "on gpa %#lx/%#lx at rip %#lx", 1493 info2, info1, state->rip); 1494 } else if (svm_npf_emul_fault(info1)) { 1495 svm_handle_inst_emul(vmcb, info2, vmexit); 1496 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1497 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1498 "for gpa %#lx/%#lx at rip %#lx", 1499 info2, info1, state->rip); 1500 } 1501 break; 1502 case VMCB_EXIT_MONITOR: 1503 vmexit->exitcode = VM_EXITCODE_MONITOR; 1504 break; 1505 case VMCB_EXIT_MWAIT: 1506 vmexit->exitcode = VM_EXITCODE_MWAIT; 1507 break; 1508 default: 1509 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1510 break; 1511 } 1512 1513 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1514 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1515 vmexit->rip, vmexit->inst_length); 1516 1517 if (handled) { 1518 vmexit->rip += vmexit->inst_length; 1519 vmexit->inst_length = 0; 1520 state->rip = vmexit->rip; 1521 } else { 1522 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1523 /* 1524 * If this VM exit was not claimed by anybody then 1525 * treat it as a generic SVM exit. 1526 */ 1527 vm_exit_svm(vmexit, code, info1, info2); 1528 } else { 1529 /* 1530 * The exitcode and collateral have been populated. 1531 * The VM exit will be processed further in userland. 1532 */ 1533 } 1534 } 1535 return (handled); 1536 } 1537 1538 static void 1539 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1540 { 1541 uint64_t intinfo; 1542 1543 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1544 return; 1545 1546 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1547 "valid: %#lx", __func__, intinfo)); 1548 1549 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1550 VMCB_EXITINTINFO_VECTOR(intinfo), 1551 VMCB_EXITINTINFO_EC(intinfo), 1552 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1553 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1554 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1555 } 1556 1557 /* 1558 * Inject event to virtual cpu. 1559 */ 1560 static void 1561 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1562 { 1563 struct vmcb_ctrl *ctrl; 1564 struct vmcb_state *state; 1565 struct svm_vcpu *vcpustate; 1566 uint8_t v_tpr; 1567 int vector, need_intr_window; 1568 int extint_pending; 1569 1570 state = svm_get_vmcb_state(sc, vcpu); 1571 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1572 vcpustate = svm_get_vcpu(sc, vcpu); 1573 1574 need_intr_window = 0; 1575 1576 if (vcpustate->nextrip != state->rip) { 1577 ctrl->intr_shadow = 0; 1578 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1579 "cleared due to rip change: %#lx/%#lx", 1580 vcpustate->nextrip, state->rip); 1581 } 1582 1583 /* 1584 * Inject pending events or exceptions for this vcpu. 1585 * 1586 * An event might be pending because the previous #VMEXIT happened 1587 * during event delivery (i.e. ctrl->exitintinfo). 1588 * 1589 * An event might also be pending because an exception was injected 1590 * by the hypervisor (e.g. #PF during instruction emulation). 1591 */ 1592 svm_inj_intinfo(sc, vcpu); 1593 1594 /* NMI event has priority over interrupts. */ 1595 if (vm_nmi_pending(sc->vm, vcpu)) { 1596 if (nmi_blocked(sc, vcpu)) { 1597 /* 1598 * Can't inject another NMI if the guest has not 1599 * yet executed an "iret" after the last NMI. 1600 */ 1601 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1602 "to NMI-blocking"); 1603 } else if (ctrl->intr_shadow) { 1604 /* 1605 * Can't inject an NMI if the vcpu is in an intr_shadow. 1606 */ 1607 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1608 "interrupt shadow"); 1609 need_intr_window = 1; 1610 goto done; 1611 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1612 /* 1613 * If there is already an exception/interrupt pending 1614 * then defer the NMI until after that. 1615 */ 1616 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1617 "eventinj %#lx", ctrl->eventinj); 1618 1619 /* 1620 * Use self-IPI to trigger a VM-exit as soon as 1621 * possible after the event injection is completed. 1622 * 1623 * This works only if the external interrupt exiting 1624 * is at a lower priority than the event injection. 1625 * 1626 * Although not explicitly specified in APMv2 the 1627 * relative priorities were verified empirically. 1628 */ 1629 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1630 } else { 1631 vm_nmi_clear(sc->vm, vcpu); 1632 1633 /* Inject NMI, vector number is not used */ 1634 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1635 IDT_NMI, 0, false); 1636 1637 /* virtual NMI blocking is now in effect */ 1638 enable_nmi_blocking(sc, vcpu); 1639 1640 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1641 } 1642 } 1643 1644 extint_pending = vm_extint_pending(sc->vm, vcpu); 1645 if (!extint_pending) { 1646 if (!vlapic_pending_intr(vlapic, &vector)) 1647 goto done; 1648 KASSERT(vector >= 16 && vector <= 255, 1649 ("invalid vector %d from local APIC", vector)); 1650 } else { 1651 /* Ask the legacy pic for a vector to inject */ 1652 vatpic_pending_intr(sc->vm, &vector); 1653 KASSERT(vector >= 0 && vector <= 255, 1654 ("invalid vector %d from INTR", vector)); 1655 } 1656 1657 /* 1658 * If the guest has disabled interrupts or is in an interrupt shadow 1659 * then we cannot inject the pending interrupt. 1660 */ 1661 if ((state->rflags & PSL_I) == 0) { 1662 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1663 "rflags %#lx", vector, state->rflags); 1664 need_intr_window = 1; 1665 goto done; 1666 } 1667 1668 if (ctrl->intr_shadow) { 1669 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1670 "interrupt shadow", vector); 1671 need_intr_window = 1; 1672 goto done; 1673 } 1674 1675 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1676 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1677 "eventinj %#lx", vector, ctrl->eventinj); 1678 need_intr_window = 1; 1679 goto done; 1680 } 1681 1682 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1683 1684 if (!extint_pending) { 1685 vlapic_intr_accepted(vlapic, vector); 1686 } else { 1687 vm_extint_clear(sc->vm, vcpu); 1688 vatpic_intr_accepted(sc->vm, vector); 1689 } 1690 1691 /* 1692 * Force a VM-exit as soon as the vcpu is ready to accept another 1693 * interrupt. This is done because the PIC might have another vector 1694 * that it wants to inject. Also, if the APIC has a pending interrupt 1695 * that was preempted by the ExtInt then it allows us to inject the 1696 * APIC vector as soon as possible. 1697 */ 1698 need_intr_window = 1; 1699 done: 1700 /* 1701 * The guest can modify the TPR by writing to %CR8. In guest mode 1702 * the processor reflects this write to V_TPR without hypervisor 1703 * intervention. 1704 * 1705 * The guest can also modify the TPR by writing to it via the memory 1706 * mapped APIC page. In this case, the write will be emulated by the 1707 * hypervisor. For this reason V_TPR must be updated before every 1708 * VMRUN. 1709 */ 1710 v_tpr = vlapic_get_cr8(vlapic); 1711 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1712 if (ctrl->v_tpr != v_tpr) { 1713 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1714 ctrl->v_tpr, v_tpr); 1715 ctrl->v_tpr = v_tpr; 1716 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1717 } 1718 1719 if (need_intr_window) { 1720 /* 1721 * We use V_IRQ in conjunction with the VINTR intercept to 1722 * trap into the hypervisor as soon as a virtual interrupt 1723 * can be delivered. 1724 * 1725 * Since injected events are not subject to intercept checks 1726 * we need to ensure that the V_IRQ is not actually going to 1727 * be delivered on VM entry. The KASSERT below enforces this. 1728 */ 1729 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1730 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1731 ("Bogus intr_window_exiting: eventinj (%#lx), " 1732 "intr_shadow (%u), rflags (%#lx)", 1733 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1734 enable_intr_window_exiting(sc, vcpu); 1735 } else { 1736 disable_intr_window_exiting(sc, vcpu); 1737 } 1738 } 1739 1740 static __inline void 1741 restore_host_tss(void) 1742 { 1743 struct system_segment_descriptor *tss_sd; 1744 1745 /* 1746 * The TSS descriptor was in use prior to launching the guest so it 1747 * has been marked busy. 1748 * 1749 * 'ltr' requires the descriptor to be marked available so change the 1750 * type to "64-bit available TSS". 1751 */ 1752 tss_sd = PCPU_GET(tss); 1753 tss_sd->sd_type = SDT_SYSTSS; 1754 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1755 } 1756 1757 static void 1758 check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1759 { 1760 struct svm_vcpu *vcpustate; 1761 struct vmcb_ctrl *ctrl; 1762 long eptgen; 1763 bool alloc_asid; 1764 1765 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1766 "active on cpu %u", __func__, thiscpu)); 1767 1768 vcpustate = svm_get_vcpu(sc, vcpuid); 1769 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1770 1771 /* 1772 * The TLB entries associated with the vcpu's ASID are not valid 1773 * if either of the following conditions is true: 1774 * 1775 * 1. The vcpu's ASID generation is different than the host cpu's 1776 * ASID generation. This happens when the vcpu migrates to a new 1777 * host cpu. It can also happen when the number of vcpus executing 1778 * on a host cpu is greater than the number of ASIDs available. 1779 * 1780 * 2. The pmap generation number is different than the value cached in 1781 * the 'vcpustate'. This happens when the host invalidates pages 1782 * belonging to the guest. 1783 * 1784 * asidgen eptgen Action 1785 * mismatch mismatch 1786 * 0 0 (a) 1787 * 0 1 (b1) or (b2) 1788 * 1 0 (c) 1789 * 1 1 (d) 1790 * 1791 * (a) There is no mismatch in eptgen or ASID generation and therefore 1792 * no further action is needed. 1793 * 1794 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1795 * retained and the TLB entries associated with this ASID 1796 * are flushed by VMRUN. 1797 * 1798 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1799 * allocated. 1800 * 1801 * (c) A new ASID is allocated. 1802 * 1803 * (d) A new ASID is allocated. 1804 */ 1805 1806 alloc_asid = false; 1807 eptgen = pmap->pm_eptgen; 1808 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1809 1810 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1811 alloc_asid = true; /* (c) and (d) */ 1812 } else if (vcpustate->eptgen != eptgen) { 1813 if (flush_by_asid()) 1814 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1815 else 1816 alloc_asid = true; /* (b2) */ 1817 } else { 1818 /* 1819 * This is the common case (a). 1820 */ 1821 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1822 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1823 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1824 } 1825 1826 if (alloc_asid) { 1827 if (++asid[thiscpu].num >= nasid) { 1828 asid[thiscpu].num = 1; 1829 if (++asid[thiscpu].gen == 0) 1830 asid[thiscpu].gen = 1; 1831 /* 1832 * If this cpu does not support "flush-by-asid" 1833 * then flush the entire TLB on a generation 1834 * bump. Subsequent ASID allocation in this 1835 * generation can be done without a TLB flush. 1836 */ 1837 if (!flush_by_asid()) 1838 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1839 } 1840 vcpustate->asid.gen = asid[thiscpu].gen; 1841 vcpustate->asid.num = asid[thiscpu].num; 1842 1843 ctrl->asid = vcpustate->asid.num; 1844 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1845 /* 1846 * If this cpu supports "flush-by-asid" then the TLB 1847 * was not flushed after the generation bump. The TLB 1848 * is flushed selectively after every new ASID allocation. 1849 */ 1850 if (flush_by_asid()) 1851 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1852 } 1853 vcpustate->eptgen = eptgen; 1854 1855 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1856 KASSERT(ctrl->asid == vcpustate->asid.num, 1857 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1858 } 1859 1860 static __inline void 1861 disable_gintr(void) 1862 { 1863 1864 __asm __volatile("clgi"); 1865 } 1866 1867 static __inline void 1868 enable_gintr(void) 1869 { 1870 1871 __asm __volatile("stgi"); 1872 } 1873 1874 static __inline void 1875 svm_dr_enter_guest(struct svm_regctx *gctx) 1876 { 1877 1878 /* Save host control debug registers. */ 1879 gctx->host_dr7 = rdr7(); 1880 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 1881 1882 /* 1883 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 1884 * exceptions in the host based on the guest DRx values. The 1885 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 1886 * VMCB. 1887 */ 1888 load_dr7(0); 1889 wrmsr(MSR_DEBUGCTLMSR, 0); 1890 1891 /* Save host debug registers. */ 1892 gctx->host_dr0 = rdr0(); 1893 gctx->host_dr1 = rdr1(); 1894 gctx->host_dr2 = rdr2(); 1895 gctx->host_dr3 = rdr3(); 1896 gctx->host_dr6 = rdr6(); 1897 1898 /* Restore guest debug registers. */ 1899 load_dr0(gctx->sctx_dr0); 1900 load_dr1(gctx->sctx_dr1); 1901 load_dr2(gctx->sctx_dr2); 1902 load_dr3(gctx->sctx_dr3); 1903 } 1904 1905 static __inline void 1906 svm_dr_leave_guest(struct svm_regctx *gctx) 1907 { 1908 1909 /* Save guest debug registers. */ 1910 gctx->sctx_dr0 = rdr0(); 1911 gctx->sctx_dr1 = rdr1(); 1912 gctx->sctx_dr2 = rdr2(); 1913 gctx->sctx_dr3 = rdr3(); 1914 1915 /* 1916 * Restore host debug registers. Restore DR7 and DEBUGCTL 1917 * last. 1918 */ 1919 load_dr0(gctx->host_dr0); 1920 load_dr1(gctx->host_dr1); 1921 load_dr2(gctx->host_dr2); 1922 load_dr3(gctx->host_dr3); 1923 load_dr6(gctx->host_dr6); 1924 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 1925 load_dr7(gctx->host_dr7); 1926 } 1927 1928 /* 1929 * Start vcpu with specified RIP. 1930 */ 1931 static int 1932 svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1933 struct vm_eventinfo *evinfo) 1934 { 1935 struct svm_regctx *gctx; 1936 struct svm_softc *svm_sc; 1937 struct svm_vcpu *vcpustate; 1938 struct vmcb_state *state; 1939 struct vmcb_ctrl *ctrl; 1940 struct vm_exit *vmexit; 1941 struct vlapic *vlapic; 1942 struct vm *vm; 1943 uint64_t vmcb_pa; 1944 int handled; 1945 uint16_t ldt_sel; 1946 1947 svm_sc = arg; 1948 vm = svm_sc->vm; 1949 1950 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1951 state = svm_get_vmcb_state(svm_sc, vcpu); 1952 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1953 vmexit = vm_exitinfo(vm, vcpu); 1954 vlapic = vm_lapic(vm, vcpu); 1955 1956 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1957 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1958 1959 if (vcpustate->lastcpu != curcpu) { 1960 /* 1961 * Force new ASID allocation by invalidating the generation. 1962 */ 1963 vcpustate->asid.gen = 0; 1964 1965 /* 1966 * Invalidate the VMCB state cache by marking all fields dirty. 1967 */ 1968 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1969 1970 /* 1971 * XXX 1972 * Setting 'vcpustate->lastcpu' here is bit premature because 1973 * we may return from this function without actually executing 1974 * the VMRUN instruction. This could happen if a rendezvous 1975 * or an AST is pending on the first time through the loop. 1976 * 1977 * This works for now but any new side-effects of vcpu 1978 * migration should take this case into account. 1979 */ 1980 vcpustate->lastcpu = curcpu; 1981 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1982 } 1983 1984 svm_msr_guest_enter(svm_sc, vcpu); 1985 1986 /* Update Guest RIP */ 1987 state->rip = rip; 1988 1989 do { 1990 /* 1991 * Disable global interrupts to guarantee atomicity during 1992 * loading of guest state. This includes not only the state 1993 * loaded by the "vmrun" instruction but also software state 1994 * maintained by the hypervisor: suspended and rendezvous 1995 * state, NPT generation number, vlapic interrupts etc. 1996 */ 1997 disable_gintr(); 1998 1999 if (vcpu_suspended(evinfo)) { 2000 enable_gintr(); 2001 vm_exit_suspended(vm, vcpu, state->rip); 2002 break; 2003 } 2004 2005 if (vcpu_rendezvous_pending(evinfo)) { 2006 enable_gintr(); 2007 vm_exit_rendezvous(vm, vcpu, state->rip); 2008 break; 2009 } 2010 2011 if (vcpu_reqidle(evinfo)) { 2012 enable_gintr(); 2013 vm_exit_reqidle(vm, vcpu, state->rip); 2014 break; 2015 } 2016 2017 /* We are asked to give the cpu by scheduler. */ 2018 if (vcpu_should_yield(vm, vcpu)) { 2019 enable_gintr(); 2020 vm_exit_astpending(vm, vcpu, state->rip); 2021 break; 2022 } 2023 2024 if (vcpu_debugged(vm, vcpu)) { 2025 enable_gintr(); 2026 vm_exit_debug(vm, vcpu, state->rip); 2027 break; 2028 } 2029 2030 /* 2031 * #VMEXIT resumes the host with the guest LDTR, so 2032 * save the current LDT selector so it can be restored 2033 * after an exit. The userspace hypervisor probably 2034 * doesn't use a LDT, but save and restore it to be 2035 * safe. 2036 */ 2037 ldt_sel = sldt(); 2038 2039 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2040 2041 /* Activate the nested pmap on 'curcpu' */ 2042 CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active); 2043 2044 /* 2045 * Check the pmap generation and the ASID generation to 2046 * ensure that the vcpu does not use stale TLB mappings. 2047 */ 2048 check_asid(svm_sc, vcpu, pmap, curcpu); 2049 2050 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 2051 vcpustate->dirty = 0; 2052 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2053 2054 /* Launch Virtual Machine. */ 2055 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 2056 svm_dr_enter_guest(gctx); 2057 svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]); 2058 svm_dr_leave_guest(gctx); 2059 2060 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 2061 2062 /* 2063 * The host GDTR and IDTR is saved by VMRUN and restored 2064 * automatically on #VMEXIT. However, the host TSS needs 2065 * to be restored explicitly. 2066 */ 2067 restore_host_tss(); 2068 2069 /* Restore host LDTR. */ 2070 lldt(ldt_sel); 2071 2072 /* #VMEXIT disables interrupts so re-enable them here. */ 2073 enable_gintr(); 2074 2075 /* Update 'nextrip' */ 2076 vcpustate->nextrip = state->rip; 2077 2078 /* Handle #VMEXIT and if required return to user space. */ 2079 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2080 } while (handled); 2081 2082 svm_msr_guest_exit(svm_sc, vcpu); 2083 2084 return (0); 2085 } 2086 2087 static void 2088 svm_vmcleanup(void *arg) 2089 { 2090 struct svm_softc *sc = arg; 2091 2092 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2093 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2094 free(sc, M_SVM); 2095 } 2096 2097 static register_t * 2098 swctx_regptr(struct svm_regctx *regctx, int reg) 2099 { 2100 2101 switch (reg) { 2102 case VM_REG_GUEST_RBX: 2103 return (®ctx->sctx_rbx); 2104 case VM_REG_GUEST_RCX: 2105 return (®ctx->sctx_rcx); 2106 case VM_REG_GUEST_RDX: 2107 return (®ctx->sctx_rdx); 2108 case VM_REG_GUEST_RDI: 2109 return (®ctx->sctx_rdi); 2110 case VM_REG_GUEST_RSI: 2111 return (®ctx->sctx_rsi); 2112 case VM_REG_GUEST_RBP: 2113 return (®ctx->sctx_rbp); 2114 case VM_REG_GUEST_R8: 2115 return (®ctx->sctx_r8); 2116 case VM_REG_GUEST_R9: 2117 return (®ctx->sctx_r9); 2118 case VM_REG_GUEST_R10: 2119 return (®ctx->sctx_r10); 2120 case VM_REG_GUEST_R11: 2121 return (®ctx->sctx_r11); 2122 case VM_REG_GUEST_R12: 2123 return (®ctx->sctx_r12); 2124 case VM_REG_GUEST_R13: 2125 return (®ctx->sctx_r13); 2126 case VM_REG_GUEST_R14: 2127 return (®ctx->sctx_r14); 2128 case VM_REG_GUEST_R15: 2129 return (®ctx->sctx_r15); 2130 case VM_REG_GUEST_DR0: 2131 return (®ctx->sctx_dr0); 2132 case VM_REG_GUEST_DR1: 2133 return (®ctx->sctx_dr1); 2134 case VM_REG_GUEST_DR2: 2135 return (®ctx->sctx_dr2); 2136 case VM_REG_GUEST_DR3: 2137 return (®ctx->sctx_dr3); 2138 default: 2139 return (NULL); 2140 } 2141 } 2142 2143 static int 2144 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2145 { 2146 struct svm_softc *svm_sc; 2147 register_t *reg; 2148 2149 svm_sc = arg; 2150 2151 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2152 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2153 } 2154 2155 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2156 return (0); 2157 } 2158 2159 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2160 2161 if (reg != NULL) { 2162 *val = *reg; 2163 return (0); 2164 } 2165 2166 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2167 return (EINVAL); 2168 } 2169 2170 static int 2171 svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2172 { 2173 struct svm_softc *svm_sc; 2174 register_t *reg; 2175 2176 svm_sc = arg; 2177 2178 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2179 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2180 } 2181 2182 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2183 return (0); 2184 } 2185 2186 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2187 2188 if (reg != NULL) { 2189 *reg = val; 2190 return (0); 2191 } 2192 2193 /* 2194 * XXX deal with CR3 and invalidate TLB entries tagged with the 2195 * vcpu's ASID. This needs to be treated differently depending on 2196 * whether 'running' is true/false. 2197 */ 2198 2199 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2200 return (EINVAL); 2201 } 2202 2203 static int 2204 svm_setcap(void *arg, int vcpu, int type, int val) 2205 { 2206 struct svm_softc *sc; 2207 int error; 2208 2209 sc = arg; 2210 error = 0; 2211 switch (type) { 2212 case VM_CAP_HALT_EXIT: 2213 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2214 VMCB_INTCPT_HLT, val); 2215 break; 2216 case VM_CAP_PAUSE_EXIT: 2217 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2218 VMCB_INTCPT_PAUSE, val); 2219 break; 2220 case VM_CAP_UNRESTRICTED_GUEST: 2221 /* Unrestricted guest execution cannot be disabled in SVM */ 2222 if (val == 0) 2223 error = EINVAL; 2224 break; 2225 default: 2226 error = ENOENT; 2227 break; 2228 } 2229 return (error); 2230 } 2231 2232 static int 2233 svm_getcap(void *arg, int vcpu, int type, int *retval) 2234 { 2235 struct svm_softc *sc; 2236 int error; 2237 2238 sc = arg; 2239 error = 0; 2240 2241 switch (type) { 2242 case VM_CAP_HALT_EXIT: 2243 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2244 VMCB_INTCPT_HLT); 2245 break; 2246 case VM_CAP_PAUSE_EXIT: 2247 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2248 VMCB_INTCPT_PAUSE); 2249 break; 2250 case VM_CAP_UNRESTRICTED_GUEST: 2251 *retval = 1; /* unrestricted guest is always enabled */ 2252 break; 2253 default: 2254 error = ENOENT; 2255 break; 2256 } 2257 return (error); 2258 } 2259 2260 static struct vlapic * 2261 svm_vlapic_init(void *arg, int vcpuid) 2262 { 2263 struct svm_softc *svm_sc; 2264 struct vlapic *vlapic; 2265 2266 svm_sc = arg; 2267 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2268 vlapic->vm = svm_sc->vm; 2269 vlapic->vcpuid = vcpuid; 2270 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2271 2272 vlapic_init(vlapic); 2273 2274 return (vlapic); 2275 } 2276 2277 static void 2278 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2279 { 2280 2281 vlapic_cleanup(vlapic); 2282 free(vlapic, M_SVM_VLAPIC); 2283 } 2284 2285 struct vmm_ops vmm_ops_amd = { 2286 .init = svm_init, 2287 .cleanup = svm_cleanup, 2288 .resume = svm_restore, 2289 .vminit = svm_vminit, 2290 .vmrun = svm_vmrun, 2291 .vmcleanup = svm_vmcleanup, 2292 .vmgetreg = svm_getreg, 2293 .vmsetreg = svm_setreg, 2294 .vmgetdesc = vmcb_getdesc, 2295 .vmsetdesc = vmcb_setdesc, 2296 .vmgetcap = svm_getcap, 2297 .vmsetcap = svm_setcap, 2298 .vmspace_alloc = svm_npt_alloc, 2299 .vmspace_free = svm_npt_free, 2300 .vlapic_init = svm_vlapic_init, 2301 .vlapic_cleanup = svm_vlapic_cleanup, 2302 }; 2303