1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_bhyve_snapshot.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/smp.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/pcpu.h> 40 #include <sys/proc.h> 41 #include <sys/reg.h> 42 #include <sys/smr.h> 43 #include <sys/sysctl.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 48 #include <machine/cpufunc.h> 49 #include <machine/psl.h> 50 #include <machine/md_var.h> 51 #include <machine/specialreg.h> 52 #include <machine/smp.h> 53 #include <machine/vmm.h> 54 #include <machine/vmm_dev.h> 55 #include <machine/vmm_instruction_emul.h> 56 #include <machine/vmm_snapshot.h> 57 58 #include "vmm_lapic.h" 59 #include "vmm_stat.h" 60 #include "vmm_ktr.h" 61 #include "vmm_ioport.h" 62 #include "vatpic.h" 63 #include "vlapic.h" 64 #include "vlapic_priv.h" 65 66 #include "x86.h" 67 #include "vmcb.h" 68 #include "svm.h" 69 #include "svm_softc.h" 70 #include "svm_msr.h" 71 #include "npt.h" 72 73 SYSCTL_DECL(_hw_vmm); 74 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 75 NULL); 76 77 /* 78 * SVM CPUID function 0x8000_000A, edx bit decoding. 79 */ 80 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 81 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 82 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 83 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 84 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 85 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 86 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 87 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 88 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 89 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 90 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 91 92 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 93 VMCB_CACHE_IOPM | \ 94 VMCB_CACHE_I | \ 95 VMCB_CACHE_TPR | \ 96 VMCB_CACHE_CR2 | \ 97 VMCB_CACHE_CR | \ 98 VMCB_CACHE_DR | \ 99 VMCB_CACHE_DT | \ 100 VMCB_CACHE_SEG | \ 101 VMCB_CACHE_NP) 102 103 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 104 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 105 0, NULL); 106 107 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 108 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 109 110 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 111 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 112 "SVM features advertised by CPUID.8000000AH:EDX"); 113 114 static int disable_npf_assist; 115 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 116 &disable_npf_assist, 0, NULL); 117 118 /* Maximum ASIDs supported by the processor */ 119 static uint32_t nasid; 120 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 121 "Number of ASIDs supported by this processor"); 122 123 /* Current ASID generation for each host cpu */ 124 static struct asid asid[MAXCPU]; 125 126 /* 127 * SVM host state saved area of size 4KB for each core. 128 */ 129 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 130 131 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 132 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 133 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 134 135 static int svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 136 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 137 138 static __inline int 139 flush_by_asid(void) 140 { 141 142 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 143 } 144 145 static __inline int 146 decode_assist(void) 147 { 148 149 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 150 } 151 152 static void 153 svm_disable(void *arg __unused) 154 { 155 uint64_t efer; 156 157 efer = rdmsr(MSR_EFER); 158 efer &= ~EFER_SVM; 159 wrmsr(MSR_EFER, efer); 160 } 161 162 /* 163 * Disable SVM on all CPUs. 164 */ 165 static int 166 svm_modcleanup(void) 167 { 168 169 smp_rendezvous(NULL, svm_disable, NULL, NULL); 170 return (0); 171 } 172 173 /* 174 * Verify that all the features required by bhyve are available. 175 */ 176 static int 177 check_svm_features(void) 178 { 179 u_int regs[4]; 180 181 /* CPUID Fn8000_000A is for SVM */ 182 do_cpuid(0x8000000A, regs); 183 svm_feature &= regs[3]; 184 185 /* 186 * The number of ASIDs can be configured to be less than what is 187 * supported by the hardware but not more. 188 */ 189 if (nasid == 0 || nasid > regs[1]) 190 nasid = regs[1]; 191 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 192 193 /* bhyve requires the Nested Paging feature */ 194 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 195 printf("SVM: Nested Paging feature not available.\n"); 196 return (ENXIO); 197 } 198 199 /* bhyve requires the NRIP Save feature */ 200 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 201 printf("SVM: NRIP Save feature not available.\n"); 202 return (ENXIO); 203 } 204 205 return (0); 206 } 207 208 static void 209 svm_enable(void *arg __unused) 210 { 211 uint64_t efer; 212 213 efer = rdmsr(MSR_EFER); 214 efer |= EFER_SVM; 215 wrmsr(MSR_EFER, efer); 216 217 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 218 } 219 220 /* 221 * Return 1 if SVM is enabled on this processor and 0 otherwise. 222 */ 223 static int 224 svm_available(void) 225 { 226 uint64_t msr; 227 228 /* Section 15.4 Enabling SVM from APM2. */ 229 if ((amd_feature2 & AMDID2_SVM) == 0) { 230 printf("SVM: not available.\n"); 231 return (0); 232 } 233 234 msr = rdmsr(MSR_VM_CR); 235 if ((msr & VM_CR_SVMDIS) != 0) { 236 printf("SVM: disabled by BIOS.\n"); 237 return (0); 238 } 239 240 return (1); 241 } 242 243 static int 244 svm_modinit(int ipinum) 245 { 246 int error, cpu; 247 248 if (!svm_available()) 249 return (ENXIO); 250 251 error = check_svm_features(); 252 if (error) 253 return (error); 254 255 vmcb_clean &= VMCB_CACHE_DEFAULT; 256 257 for (cpu = 0; cpu < MAXCPU; cpu++) { 258 /* 259 * Initialize the host ASIDs to their "highest" valid values. 260 * 261 * The next ASID allocation will rollover both 'gen' and 'num' 262 * and start off the sequence at {1,1}. 263 */ 264 asid[cpu].gen = ~0UL; 265 asid[cpu].num = nasid - 1; 266 } 267 268 svm_msr_init(); 269 svm_npt_init(ipinum); 270 271 /* Enable SVM on all CPUs */ 272 smp_rendezvous(NULL, svm_enable, NULL, NULL); 273 274 return (0); 275 } 276 277 static void 278 svm_modresume(void) 279 { 280 281 svm_enable(NULL); 282 } 283 284 #ifdef BHYVE_SNAPSHOT 285 int 286 svm_set_tsc_offset(struct svm_softc *sc, int vcpu, uint64_t offset) 287 { 288 int error; 289 struct vmcb_ctrl *ctrl; 290 291 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 292 ctrl->tsc_offset = offset; 293 294 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 295 VCPU_CTR1(sc->vm, vcpu, "tsc offset changed to %#lx", offset); 296 297 error = vm_set_tsc_offset(sc->vm, vcpu, offset); 298 299 return (error); 300 } 301 #endif 302 303 /* Pentium compatible MSRs */ 304 #define MSR_PENTIUM_START 0 305 #define MSR_PENTIUM_END 0x1FFF 306 /* AMD 6th generation and Intel compatible MSRs */ 307 #define MSR_AMD6TH_START 0xC0000000UL 308 #define MSR_AMD6TH_END 0xC0001FFFUL 309 /* AMD 7th and 8th generation compatible MSRs */ 310 #define MSR_AMD7TH_START 0xC0010000UL 311 #define MSR_AMD7TH_END 0xC0011FFFUL 312 313 /* 314 * Get the index and bit position for a MSR in permission bitmap. 315 * Two bits are used for each MSR: lower bit for read and higher bit for write. 316 */ 317 static int 318 svm_msr_index(uint64_t msr, int *index, int *bit) 319 { 320 uint32_t base, off; 321 322 *index = -1; 323 *bit = (msr % 4) * 2; 324 base = 0; 325 326 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 327 *index = msr / 4; 328 return (0); 329 } 330 331 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 332 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 333 off = (msr - MSR_AMD6TH_START); 334 *index = (off + base) / 4; 335 return (0); 336 } 337 338 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 339 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 340 off = (msr - MSR_AMD7TH_START); 341 *index = (off + base) / 4; 342 return (0); 343 } 344 345 return (EINVAL); 346 } 347 348 /* 349 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 350 */ 351 static void 352 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 353 { 354 int index, bit, error __diagused; 355 356 error = svm_msr_index(msr, &index, &bit); 357 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 358 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 359 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 360 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 361 "msr %#lx", __func__, bit, msr)); 362 363 if (read) 364 perm_bitmap[index] &= ~(1UL << bit); 365 366 if (write) 367 perm_bitmap[index] &= ~(2UL << bit); 368 } 369 370 static void 371 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 372 { 373 374 svm_msr_perm(perm_bitmap, msr, true, true); 375 } 376 377 static void 378 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 379 { 380 381 svm_msr_perm(perm_bitmap, msr, true, false); 382 } 383 384 static __inline int 385 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 386 { 387 struct vmcb_ctrl *ctrl; 388 389 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 390 391 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 392 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 393 } 394 395 static __inline void 396 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 397 int enabled) 398 { 399 struct vmcb_ctrl *ctrl; 400 uint32_t oldval; 401 402 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 403 404 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 405 oldval = ctrl->intercept[idx]; 406 407 if (enabled) 408 ctrl->intercept[idx] |= bitmask; 409 else 410 ctrl->intercept[idx] &= ~bitmask; 411 412 if (ctrl->intercept[idx] != oldval) { 413 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 414 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 415 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 416 } 417 } 418 419 static __inline void 420 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 421 { 422 423 svm_set_intercept(sc, vcpu, off, bitmask, 0); 424 } 425 426 static __inline void 427 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 428 { 429 430 svm_set_intercept(sc, vcpu, off, bitmask, 1); 431 } 432 433 static void 434 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 435 uint64_t msrpm_base_pa, uint64_t np_pml4) 436 { 437 struct vmcb_ctrl *ctrl; 438 struct vmcb_state *state; 439 uint32_t mask; 440 int n; 441 442 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 443 state = svm_get_vmcb_state(sc, vcpu); 444 445 ctrl->iopm_base_pa = iopm_base_pa; 446 ctrl->msrpm_base_pa = msrpm_base_pa; 447 448 /* Enable nested paging */ 449 ctrl->np_enable = 1; 450 ctrl->n_cr3 = np_pml4; 451 452 /* 453 * Intercept accesses to the control registers that are not shadowed 454 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 455 */ 456 for (n = 0; n < 16; n++) { 457 mask = (BIT(n) << 16) | BIT(n); 458 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 459 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 460 else 461 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 462 } 463 464 /* 465 * Intercept everything when tracing guest exceptions otherwise 466 * just intercept machine check exception. 467 */ 468 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 469 for (n = 0; n < 32; n++) { 470 /* 471 * Skip unimplemented vectors in the exception bitmap. 472 */ 473 if (n == 2 || n == 9) { 474 continue; 475 } 476 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 477 } 478 } else { 479 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 480 } 481 482 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 483 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 484 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 485 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 486 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 487 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 488 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 489 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 490 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 491 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 492 VMCB_INTCPT_FERR_FREEZE); 493 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); 494 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); 495 496 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 497 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 498 499 /* 500 * Intercept SVM instructions since AMD enables them in guests otherwise. 501 * Non-intercepted VMMCALL causes #UD, skip it. 502 */ 503 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); 504 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); 505 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); 506 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); 507 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); 508 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); 509 if (vcpu_trap_wbinvd(sc->vm, vcpu)) { 510 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, 511 VMCB_INTCPT_WBINVD); 512 } 513 514 /* 515 * From section "Canonicalization and Consistency Checks" in APMv2 516 * the VMRUN intercept bit must be set to pass the consistency check. 517 */ 518 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 519 520 /* 521 * The ASID will be set to a non-zero value just before VMRUN. 522 */ 523 ctrl->asid = 0; 524 525 /* 526 * Section 15.21.1, Interrupt Masking in EFLAGS 527 * Section 15.21.2, Virtualizing APIC.TPR 528 * 529 * This must be set for %rflag and %cr8 isolation of guest and host. 530 */ 531 ctrl->v_intr_masking = 1; 532 533 /* Enable Last Branch Record aka LBR for debugging */ 534 ctrl->lbr_virt_en = 1; 535 state->dbgctl = BIT(0); 536 537 /* EFER_SVM must always be set when the guest is executing */ 538 state->efer = EFER_SVM; 539 540 /* Set up the PAT to power-on state */ 541 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 542 PAT_VALUE(1, PAT_WRITE_THROUGH) | 543 PAT_VALUE(2, PAT_UNCACHED) | 544 PAT_VALUE(3, PAT_UNCACHEABLE) | 545 PAT_VALUE(4, PAT_WRITE_BACK) | 546 PAT_VALUE(5, PAT_WRITE_THROUGH) | 547 PAT_VALUE(6, PAT_UNCACHED) | 548 PAT_VALUE(7, PAT_UNCACHEABLE); 549 550 /* Set up DR6/7 to power-on state */ 551 state->dr6 = DBREG_DR6_RESERVED1; 552 state->dr7 = DBREG_DR7_RESERVED1; 553 } 554 555 /* 556 * Initialize a virtual machine. 557 */ 558 static void * 559 svm_init(struct vm *vm, pmap_t pmap) 560 { 561 struct svm_softc *svm_sc; 562 struct svm_vcpu *vcpu; 563 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 564 int i; 565 uint16_t maxcpus; 566 567 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 568 if (((uintptr_t)svm_sc & PAGE_MASK) != 0) 569 panic("malloc of svm_softc not aligned on page boundary"); 570 571 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 572 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 573 if (svm_sc->msr_bitmap == NULL) 574 panic("contigmalloc of SVM MSR bitmap failed"); 575 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 576 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 577 if (svm_sc->iopm_bitmap == NULL) 578 panic("contigmalloc of SVM IO bitmap failed"); 579 580 svm_sc->vm = vm; 581 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pmltop); 582 583 /* 584 * Intercept read and write accesses to all MSRs. 585 */ 586 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 587 588 /* 589 * Access to the following MSRs is redirected to the VMCB when the 590 * guest is executing. Therefore it is safe to allow the guest to 591 * read/write these MSRs directly without hypervisor involvement. 592 */ 593 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 595 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 596 597 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 598 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 599 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 600 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 601 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 602 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 603 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 604 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 605 606 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 607 608 /* 609 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 610 */ 611 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 612 613 /* Intercept access to all I/O ports. */ 614 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 615 616 iopm_pa = vtophys(svm_sc->iopm_bitmap); 617 msrpm_pa = vtophys(svm_sc->msr_bitmap); 618 pml4_pa = svm_sc->nptp; 619 maxcpus = vm_get_maxcpus(svm_sc->vm); 620 for (i = 0; i < maxcpus; i++) { 621 vcpu = svm_get_vcpu(svm_sc, i); 622 vcpu->nextrip = ~0; 623 vcpu->lastcpu = NOCPU; 624 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 625 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 626 svm_msr_guest_init(svm_sc, i); 627 } 628 return (svm_sc); 629 } 630 631 /* 632 * Collateral for a generic SVM VM-exit. 633 */ 634 static void 635 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 636 { 637 638 vme->exitcode = VM_EXITCODE_SVM; 639 vme->u.svm.exitcode = code; 640 vme->u.svm.exitinfo1 = info1; 641 vme->u.svm.exitinfo2 = info2; 642 } 643 644 static int 645 svm_cpl(struct vmcb_state *state) 646 { 647 648 /* 649 * From APMv2: 650 * "Retrieve the CPL from the CPL field in the VMCB, not 651 * from any segment DPL" 652 */ 653 return (state->cpl); 654 } 655 656 static enum vm_cpu_mode 657 svm_vcpu_mode(struct vmcb *vmcb) 658 { 659 struct vmcb_segment seg; 660 struct vmcb_state *state; 661 int error __diagused; 662 663 state = &vmcb->state; 664 665 if (state->efer & EFER_LMA) { 666 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 667 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 668 error)); 669 670 /* 671 * Section 4.8.1 for APM2, check if Code Segment has 672 * Long attribute set in descriptor. 673 */ 674 if (seg.attrib & VMCB_CS_ATTRIB_L) 675 return (CPU_MODE_64BIT); 676 else 677 return (CPU_MODE_COMPATIBILITY); 678 } else if (state->cr0 & CR0_PE) { 679 return (CPU_MODE_PROTECTED); 680 } else { 681 return (CPU_MODE_REAL); 682 } 683 } 684 685 static enum vm_paging_mode 686 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 687 { 688 689 if ((cr0 & CR0_PG) == 0) 690 return (PAGING_MODE_FLAT); 691 if ((cr4 & CR4_PAE) == 0) 692 return (PAGING_MODE_32); 693 if (efer & EFER_LME) 694 return (PAGING_MODE_64); 695 else 696 return (PAGING_MODE_PAE); 697 } 698 699 /* 700 * ins/outs utility routines 701 */ 702 static uint64_t 703 svm_inout_str_index(struct svm_regctx *regs, int in) 704 { 705 uint64_t val; 706 707 val = in ? regs->sctx_rdi : regs->sctx_rsi; 708 709 return (val); 710 } 711 712 static uint64_t 713 svm_inout_str_count(struct svm_regctx *regs, int rep) 714 { 715 uint64_t val; 716 717 val = rep ? regs->sctx_rcx : 1; 718 719 return (val); 720 } 721 722 static void 723 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 724 int in, struct vm_inout_str *vis) 725 { 726 int error __diagused, s; 727 728 if (in) { 729 vis->seg_name = VM_REG_GUEST_ES; 730 } else { 731 /* The segment field has standard encoding */ 732 s = (info1 >> 10) & 0x7; 733 vis->seg_name = vm_segment_name(s); 734 } 735 736 error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 737 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 738 } 739 740 static int 741 svm_inout_str_addrsize(uint64_t info1) 742 { 743 uint32_t size; 744 745 size = (info1 >> 7) & 0x7; 746 switch (size) { 747 case 1: 748 return (2); /* 16 bit */ 749 case 2: 750 return (4); /* 32 bit */ 751 case 4: 752 return (8); /* 64 bit */ 753 default: 754 panic("%s: invalid size encoding %d", __func__, size); 755 } 756 } 757 758 static void 759 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 760 { 761 struct vmcb_state *state; 762 763 state = &vmcb->state; 764 paging->cr3 = state->cr3; 765 paging->cpl = svm_cpl(state); 766 paging->cpu_mode = svm_vcpu_mode(vmcb); 767 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 768 state->efer); 769 } 770 771 #define UNHANDLED 0 772 773 /* 774 * Handle guest I/O intercept. 775 */ 776 static int 777 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 778 { 779 struct vmcb_ctrl *ctrl; 780 struct vmcb_state *state; 781 struct svm_regctx *regs; 782 struct vm_inout_str *vis; 783 uint64_t info1; 784 int inout_string; 785 786 state = svm_get_vmcb_state(svm_sc, vcpu); 787 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 788 regs = svm_get_guest_regctx(svm_sc, vcpu); 789 790 info1 = ctrl->exitinfo1; 791 inout_string = info1 & BIT(2) ? 1 : 0; 792 793 /* 794 * The effective segment number in EXITINFO1[12:10] is populated 795 * only if the processor has the DecodeAssist capability. 796 * 797 * XXX this is not specified explicitly in APMv2 but can be verified 798 * empirically. 799 */ 800 if (inout_string && !decode_assist()) 801 return (UNHANDLED); 802 803 vmexit->exitcode = VM_EXITCODE_INOUT; 804 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 805 vmexit->u.inout.string = inout_string; 806 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 807 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 808 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 809 vmexit->u.inout.eax = (uint32_t)(state->rax); 810 811 if (inout_string) { 812 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 813 vis = &vmexit->u.inout_str; 814 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 815 vis->rflags = state->rflags; 816 vis->cr0 = state->cr0; 817 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 818 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 819 vis->addrsize = svm_inout_str_addrsize(info1); 820 svm_inout_str_seginfo(svm_sc, vcpu, info1, 821 vmexit->u.inout.in, vis); 822 } 823 824 return (UNHANDLED); 825 } 826 827 static int 828 npf_fault_type(uint64_t exitinfo1) 829 { 830 831 if (exitinfo1 & VMCB_NPF_INFO1_W) 832 return (VM_PROT_WRITE); 833 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 834 return (VM_PROT_EXECUTE); 835 else 836 return (VM_PROT_READ); 837 } 838 839 static bool 840 svm_npf_emul_fault(uint64_t exitinfo1) 841 { 842 843 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 844 return (false); 845 } 846 847 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 848 return (false); 849 } 850 851 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 852 return (false); 853 } 854 855 return (true); 856 } 857 858 static void 859 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 860 { 861 struct vm_guest_paging *paging; 862 struct vmcb_segment seg; 863 struct vmcb_ctrl *ctrl; 864 char *inst_bytes; 865 int error __diagused, inst_len; 866 867 ctrl = &vmcb->ctrl; 868 paging = &vmexit->u.inst_emul.paging; 869 870 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 871 vmexit->u.inst_emul.gpa = gpa; 872 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 873 svm_paging_info(vmcb, paging); 874 875 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 876 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 877 878 switch(paging->cpu_mode) { 879 case CPU_MODE_REAL: 880 vmexit->u.inst_emul.cs_base = seg.base; 881 vmexit->u.inst_emul.cs_d = 0; 882 break; 883 case CPU_MODE_PROTECTED: 884 case CPU_MODE_COMPATIBILITY: 885 vmexit->u.inst_emul.cs_base = seg.base; 886 887 /* 888 * Section 4.8.1 of APM2, Default Operand Size or D bit. 889 */ 890 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 891 1 : 0; 892 break; 893 default: 894 vmexit->u.inst_emul.cs_base = 0; 895 vmexit->u.inst_emul.cs_d = 0; 896 break; 897 } 898 899 /* 900 * Copy the instruction bytes into 'vie' if available. 901 */ 902 if (decode_assist() && !disable_npf_assist) { 903 inst_len = ctrl->inst_len; 904 inst_bytes = ctrl->inst_bytes; 905 } else { 906 inst_len = 0; 907 inst_bytes = NULL; 908 } 909 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 910 } 911 912 #ifdef KTR 913 static const char * 914 intrtype_to_str(int intr_type) 915 { 916 switch (intr_type) { 917 case VMCB_EVENTINJ_TYPE_INTR: 918 return ("hwintr"); 919 case VMCB_EVENTINJ_TYPE_NMI: 920 return ("nmi"); 921 case VMCB_EVENTINJ_TYPE_INTn: 922 return ("swintr"); 923 case VMCB_EVENTINJ_TYPE_EXCEPTION: 924 return ("exception"); 925 default: 926 panic("%s: unknown intr_type %d", __func__, intr_type); 927 } 928 } 929 #endif 930 931 /* 932 * Inject an event to vcpu as described in section 15.20, "Event injection". 933 */ 934 static void 935 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 936 uint32_t error, bool ec_valid) 937 { 938 struct vmcb_ctrl *ctrl; 939 940 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 941 942 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 943 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 944 945 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 946 __func__, vector)); 947 948 switch (intr_type) { 949 case VMCB_EVENTINJ_TYPE_INTR: 950 case VMCB_EVENTINJ_TYPE_NMI: 951 case VMCB_EVENTINJ_TYPE_INTn: 952 break; 953 case VMCB_EVENTINJ_TYPE_EXCEPTION: 954 if (vector >= 0 && vector <= 31 && vector != 2) 955 break; 956 /* FALLTHROUGH */ 957 default: 958 panic("%s: invalid intr_type/vector: %d/%d", __func__, 959 intr_type, vector); 960 } 961 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 962 if (ec_valid) { 963 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 964 ctrl->eventinj |= (uint64_t)error << 32; 965 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 966 intrtype_to_str(intr_type), vector, error); 967 } else { 968 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 969 intrtype_to_str(intr_type), vector); 970 } 971 } 972 973 static void 974 svm_update_virqinfo(struct svm_softc *sc, int vcpu) 975 { 976 struct vm *vm; 977 struct vlapic *vlapic; 978 struct vmcb_ctrl *ctrl; 979 980 vm = sc->vm; 981 vlapic = vm_lapic(vm, vcpu); 982 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 983 984 /* Update %cr8 in the emulated vlapic */ 985 vlapic_set_cr8(vlapic, ctrl->v_tpr); 986 987 /* Virtual interrupt injection is not used. */ 988 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 989 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 990 } 991 992 static void 993 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 994 { 995 struct vmcb_ctrl *ctrl; 996 uint64_t intinfo; 997 998 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 999 intinfo = ctrl->exitintinfo; 1000 if (!VMCB_EXITINTINFO_VALID(intinfo)) 1001 return; 1002 1003 /* 1004 * From APMv2, Section "Intercepts during IDT interrupt delivery" 1005 * 1006 * If a #VMEXIT happened during event delivery then record the event 1007 * that was being delivered. 1008 */ 1009 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 1010 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 1011 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 1012 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 1013 } 1014 1015 #ifdef INVARIANTS 1016 static __inline int 1017 vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 1018 { 1019 1020 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1021 VMCB_INTCPT_VINTR)); 1022 } 1023 #endif 1024 1025 static __inline void 1026 enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1027 { 1028 struct vmcb_ctrl *ctrl; 1029 1030 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1031 1032 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1033 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1034 KASSERT(vintr_intercept_enabled(sc, vcpu), 1035 ("%s: vintr intercept should be enabled", __func__)); 1036 return; 1037 } 1038 1039 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1040 ctrl->v_irq = 1; 1041 ctrl->v_ign_tpr = 1; 1042 ctrl->v_intr_vector = 0; 1043 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1044 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1045 } 1046 1047 static __inline void 1048 disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1049 { 1050 struct vmcb_ctrl *ctrl; 1051 1052 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1053 1054 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1055 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1056 ("%s: vintr intercept should be disabled", __func__)); 1057 return; 1058 } 1059 1060 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1061 ctrl->v_irq = 0; 1062 ctrl->v_intr_vector = 0; 1063 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1064 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1065 } 1066 1067 static int 1068 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1069 { 1070 struct vmcb_ctrl *ctrl; 1071 int oldval, newval; 1072 1073 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1074 oldval = ctrl->intr_shadow; 1075 newval = val ? 1 : 0; 1076 if (newval != oldval) { 1077 ctrl->intr_shadow = newval; 1078 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1079 } 1080 return (0); 1081 } 1082 1083 static int 1084 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1085 { 1086 struct vmcb_ctrl *ctrl; 1087 1088 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1089 *val = ctrl->intr_shadow; 1090 return (0); 1091 } 1092 1093 /* 1094 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1095 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1096 * to track when the vcpu is done handling the NMI. 1097 */ 1098 static int 1099 nmi_blocked(struct svm_softc *sc, int vcpu) 1100 { 1101 int blocked; 1102 1103 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1104 VMCB_INTCPT_IRET); 1105 return (blocked); 1106 } 1107 1108 static void 1109 enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1110 { 1111 1112 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1113 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1114 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1115 } 1116 1117 static void 1118 clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1119 { 1120 int error __diagused; 1121 1122 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1123 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1124 /* 1125 * When the IRET intercept is cleared the vcpu will attempt to execute 1126 * the "iret" when it runs next. However, it is possible to inject 1127 * another NMI into the vcpu before the "iret" has actually executed. 1128 * 1129 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1130 * it will trap back into the hypervisor. If an NMI is pending for 1131 * the vcpu it will be injected into the guest. 1132 * 1133 * XXX this needs to be fixed 1134 */ 1135 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1136 1137 /* 1138 * Set 'intr_shadow' to prevent an NMI from being injected on the 1139 * immediate VMRUN. 1140 */ 1141 error = svm_modify_intr_shadow(sc, vcpu, 1); 1142 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1143 } 1144 1145 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1146 1147 static int 1148 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) 1149 { 1150 struct vm_exit *vme; 1151 struct vmcb_state *state; 1152 uint64_t changed, lma, oldval; 1153 int error __diagused; 1154 1155 state = svm_get_vmcb_state(sc, vcpu); 1156 1157 oldval = state->efer; 1158 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1159 1160 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1161 changed = oldval ^ newval; 1162 1163 if (newval & EFER_MBZ_BITS) 1164 goto gpf; 1165 1166 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1167 if (changed & EFER_LME) { 1168 if (state->cr0 & CR0_PG) 1169 goto gpf; 1170 } 1171 1172 /* EFER.LMA = EFER.LME & CR0.PG */ 1173 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1174 lma = EFER_LMA; 1175 else 1176 lma = 0; 1177 1178 if ((newval & EFER_LMA) != lma) 1179 goto gpf; 1180 1181 if (newval & EFER_NXE) { 1182 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) 1183 goto gpf; 1184 } 1185 1186 /* 1187 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1188 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1189 */ 1190 if (newval & EFER_LMSLE) { 1191 vme = vm_exitinfo(sc->vm, vcpu); 1192 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1193 *retu = true; 1194 return (0); 1195 } 1196 1197 if (newval & EFER_FFXSR) { 1198 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) 1199 goto gpf; 1200 } 1201 1202 if (newval & EFER_TCE) { 1203 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) 1204 goto gpf; 1205 } 1206 1207 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1208 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1209 return (0); 1210 gpf: 1211 vm_inject_gp(sc->vm, vcpu); 1212 return (0); 1213 } 1214 1215 static int 1216 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1217 bool *retu) 1218 { 1219 int error; 1220 1221 if (lapic_msr(num)) 1222 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1223 else if (num == MSR_EFER) 1224 error = svm_write_efer(sc, vcpu, val, retu); 1225 else 1226 error = svm_wrmsr(sc, vcpu, num, val, retu); 1227 1228 return (error); 1229 } 1230 1231 static int 1232 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1233 { 1234 struct vmcb_state *state; 1235 struct svm_regctx *ctx; 1236 uint64_t result; 1237 int error; 1238 1239 if (lapic_msr(num)) 1240 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1241 else 1242 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1243 1244 if (error == 0) { 1245 state = svm_get_vmcb_state(sc, vcpu); 1246 ctx = svm_get_guest_regctx(sc, vcpu); 1247 state->rax = result & 0xffffffff; 1248 ctx->sctx_rdx = result >> 32; 1249 } 1250 1251 return (error); 1252 } 1253 1254 #ifdef KTR 1255 static const char * 1256 exit_reason_to_str(uint64_t reason) 1257 { 1258 int i; 1259 static char reasonbuf[32]; 1260 static const struct { 1261 int reason; 1262 const char *str; 1263 } reasons[] = { 1264 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" }, 1265 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" }, 1266 { .reason = VMCB_EXIT_NPF, .str = "nptfault" }, 1267 { .reason = VMCB_EXIT_PAUSE, .str = "pause" }, 1268 { .reason = VMCB_EXIT_HLT, .str = "hlt" }, 1269 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" }, 1270 { .reason = VMCB_EXIT_IO, .str = "inout" }, 1271 { .reason = VMCB_EXIT_MC, .str = "mchk" }, 1272 { .reason = VMCB_EXIT_INTR, .str = "extintr" }, 1273 { .reason = VMCB_EXIT_NMI, .str = "nmi" }, 1274 { .reason = VMCB_EXIT_VINTR, .str = "vintr" }, 1275 { .reason = VMCB_EXIT_MSR, .str = "msr" }, 1276 { .reason = VMCB_EXIT_IRET, .str = "iret" }, 1277 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" }, 1278 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" }, 1279 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" }, 1280 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" }, 1281 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" }, 1282 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" }, 1283 { .reason = VMCB_EXIT_STGI, .str = "stgi" }, 1284 { .reason = VMCB_EXIT_CLGI, .str = "clgi" }, 1285 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" }, 1286 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" }, 1287 { .reason = VMCB_EXIT_INVD, .str = "invd" }, 1288 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" }, 1289 }; 1290 1291 for (i = 0; i < nitems(reasons); i++) { 1292 if (reasons[i].reason == reason) 1293 return (reasons[i].str); 1294 } 1295 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1296 return (reasonbuf); 1297 } 1298 #endif /* KTR */ 1299 1300 /* 1301 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1302 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1303 * and exceptions caused by INT3, INTO and BOUND instructions. 1304 * 1305 * Return 1 if the nRIP is valid and 0 otherwise. 1306 */ 1307 static int 1308 nrip_valid(uint64_t exitcode) 1309 { 1310 switch (exitcode) { 1311 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1312 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1313 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1314 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1315 case 0x43: /* INT3 */ 1316 case 0x44: /* INTO */ 1317 case 0x45: /* BOUND */ 1318 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1319 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1320 return (1); 1321 default: 1322 return (0); 1323 } 1324 } 1325 1326 static int 1327 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1328 { 1329 struct vmcb *vmcb; 1330 struct vmcb_state *state; 1331 struct vmcb_ctrl *ctrl; 1332 struct svm_regctx *ctx; 1333 uint64_t code, info1, info2, val; 1334 uint32_t eax, ecx, edx; 1335 int error __diagused, errcode_valid, handled, idtvec, reflect; 1336 bool retu; 1337 1338 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1339 vmcb = svm_get_vmcb(svm_sc, vcpu); 1340 state = &vmcb->state; 1341 ctrl = &vmcb->ctrl; 1342 1343 handled = 0; 1344 code = ctrl->exitcode; 1345 info1 = ctrl->exitinfo1; 1346 info2 = ctrl->exitinfo2; 1347 1348 vmexit->exitcode = VM_EXITCODE_BOGUS; 1349 vmexit->rip = state->rip; 1350 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1351 1352 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1353 1354 /* 1355 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1356 * in an inconsistent state and can trigger assertions that would 1357 * never happen otherwise. 1358 */ 1359 if (code == VMCB_EXIT_INVALID) { 1360 vm_exit_svm(vmexit, code, info1, info2); 1361 return (0); 1362 } 1363 1364 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1365 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1366 1367 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1368 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1369 vmexit->inst_length, code, info1, info2)); 1370 1371 svm_update_virqinfo(svm_sc, vcpu); 1372 svm_save_intinfo(svm_sc, vcpu); 1373 1374 switch (code) { 1375 case VMCB_EXIT_IRET: 1376 /* 1377 * Restart execution at "iret" but with the intercept cleared. 1378 */ 1379 vmexit->inst_length = 0; 1380 clear_nmi_blocking(svm_sc, vcpu); 1381 handled = 1; 1382 break; 1383 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1384 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1385 handled = 1; 1386 break; 1387 case VMCB_EXIT_INTR: /* external interrupt */ 1388 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1389 handled = 1; 1390 break; 1391 case VMCB_EXIT_NMI: /* external NMI */ 1392 handled = 1; 1393 break; 1394 case 0x40 ... 0x5F: 1395 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1396 reflect = 1; 1397 idtvec = code - 0x40; 1398 switch (idtvec) { 1399 case IDT_MC: 1400 /* 1401 * Call the machine check handler by hand. Also don't 1402 * reflect the machine check back into the guest. 1403 */ 1404 reflect = 0; 1405 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1406 __asm __volatile("int $18"); 1407 break; 1408 case IDT_PF: 1409 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1410 info2); 1411 KASSERT(error == 0, ("%s: error %d updating cr2", 1412 __func__, error)); 1413 /* fallthru */ 1414 case IDT_NP: 1415 case IDT_SS: 1416 case IDT_GP: 1417 case IDT_AC: 1418 case IDT_TS: 1419 errcode_valid = 1; 1420 break; 1421 1422 case IDT_DF: 1423 errcode_valid = 1; 1424 info1 = 0; 1425 break; 1426 1427 case IDT_BP: 1428 case IDT_OF: 1429 case IDT_BR: 1430 /* 1431 * The 'nrip' field is populated for INT3, INTO and 1432 * BOUND exceptions and this also implies that 1433 * 'inst_length' is non-zero. 1434 * 1435 * Reset 'inst_length' to zero so the guest %rip at 1436 * event injection is identical to what it was when 1437 * the exception originally happened. 1438 */ 1439 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1440 "to zero before injecting exception %d", 1441 vmexit->inst_length, idtvec); 1442 vmexit->inst_length = 0; 1443 /* fallthru */ 1444 default: 1445 errcode_valid = 0; 1446 info1 = 0; 1447 break; 1448 } 1449 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1450 "when reflecting exception %d into guest", 1451 vmexit->inst_length, idtvec)); 1452 1453 if (reflect) { 1454 /* Reflect the exception back into the guest */ 1455 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1456 "%d/%#x into the guest", idtvec, (int)info1); 1457 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1458 errcode_valid, info1, 0); 1459 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1460 __func__, error)); 1461 } 1462 handled = 1; 1463 break; 1464 case VMCB_EXIT_MSR: /* MSR access. */ 1465 eax = state->rax; 1466 ecx = ctx->sctx_rcx; 1467 edx = ctx->sctx_rdx; 1468 retu = false; 1469 1470 if (info1) { 1471 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1472 val = (uint64_t)edx << 32 | eax; 1473 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1474 ecx, val); 1475 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1476 vmexit->exitcode = VM_EXITCODE_WRMSR; 1477 vmexit->u.msr.code = ecx; 1478 vmexit->u.msr.wval = val; 1479 } else if (!retu) { 1480 handled = 1; 1481 } else { 1482 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1483 ("emulate_wrmsr retu with bogus exitcode")); 1484 } 1485 } else { 1486 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1487 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1488 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1489 vmexit->exitcode = VM_EXITCODE_RDMSR; 1490 vmexit->u.msr.code = ecx; 1491 } else if (!retu) { 1492 handled = 1; 1493 } else { 1494 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1495 ("emulate_rdmsr retu with bogus exitcode")); 1496 } 1497 } 1498 break; 1499 case VMCB_EXIT_IO: 1500 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1501 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1502 break; 1503 case VMCB_EXIT_CPUID: 1504 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1505 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, &state->rax, 1506 &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx); 1507 break; 1508 case VMCB_EXIT_HLT: 1509 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1510 vmexit->exitcode = VM_EXITCODE_HLT; 1511 vmexit->u.hlt.rflags = state->rflags; 1512 break; 1513 case VMCB_EXIT_PAUSE: 1514 vmexit->exitcode = VM_EXITCODE_PAUSE; 1515 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1516 break; 1517 case VMCB_EXIT_NPF: 1518 /* EXITINFO2 contains the faulting guest physical address */ 1519 if (info1 & VMCB_NPF_INFO1_RSV) { 1520 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1521 "reserved bits set: info1(%#lx) info2(%#lx)", 1522 info1, info2); 1523 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { 1524 vmexit->exitcode = VM_EXITCODE_PAGING; 1525 vmexit->u.paging.gpa = info2; 1526 vmexit->u.paging.fault_type = npf_fault_type(info1); 1527 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1528 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1529 "on gpa %#lx/%#lx at rip %#lx", 1530 info2, info1, state->rip); 1531 } else if (svm_npf_emul_fault(info1)) { 1532 svm_handle_inst_emul(vmcb, info2, vmexit); 1533 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1534 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1535 "for gpa %#lx/%#lx at rip %#lx", 1536 info2, info1, state->rip); 1537 } 1538 break; 1539 case VMCB_EXIT_MONITOR: 1540 vmexit->exitcode = VM_EXITCODE_MONITOR; 1541 break; 1542 case VMCB_EXIT_MWAIT: 1543 vmexit->exitcode = VM_EXITCODE_MWAIT; 1544 break; 1545 case VMCB_EXIT_SHUTDOWN: 1546 case VMCB_EXIT_VMRUN: 1547 case VMCB_EXIT_VMMCALL: 1548 case VMCB_EXIT_VMLOAD: 1549 case VMCB_EXIT_VMSAVE: 1550 case VMCB_EXIT_STGI: 1551 case VMCB_EXIT_CLGI: 1552 case VMCB_EXIT_SKINIT: 1553 case VMCB_EXIT_ICEBP: 1554 case VMCB_EXIT_INVD: 1555 case VMCB_EXIT_INVLPGA: 1556 vm_inject_ud(svm_sc->vm, vcpu); 1557 handled = 1; 1558 break; 1559 case VMCB_EXIT_WBINVD: 1560 /* ignore WBINVD */ 1561 handled = 1; 1562 break; 1563 default: 1564 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1565 break; 1566 } 1567 1568 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1569 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1570 vmexit->rip, vmexit->inst_length); 1571 1572 if (handled) { 1573 vmexit->rip += vmexit->inst_length; 1574 vmexit->inst_length = 0; 1575 state->rip = vmexit->rip; 1576 } else { 1577 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1578 /* 1579 * If this VM exit was not claimed by anybody then 1580 * treat it as a generic SVM exit. 1581 */ 1582 vm_exit_svm(vmexit, code, info1, info2); 1583 } else { 1584 /* 1585 * The exitcode and collateral have been populated. 1586 * The VM exit will be processed further in userland. 1587 */ 1588 } 1589 } 1590 return (handled); 1591 } 1592 1593 static void 1594 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1595 { 1596 uint64_t intinfo; 1597 1598 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1599 return; 1600 1601 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1602 "valid: %#lx", __func__, intinfo)); 1603 1604 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1605 VMCB_EXITINTINFO_VECTOR(intinfo), 1606 VMCB_EXITINTINFO_EC(intinfo), 1607 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1608 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1609 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1610 } 1611 1612 /* 1613 * Inject event to virtual cpu. 1614 */ 1615 static void 1616 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1617 { 1618 struct vmcb_ctrl *ctrl; 1619 struct vmcb_state *state; 1620 struct svm_vcpu *vcpustate; 1621 uint8_t v_tpr; 1622 int vector, need_intr_window; 1623 int extint_pending; 1624 1625 state = svm_get_vmcb_state(sc, vcpu); 1626 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1627 vcpustate = svm_get_vcpu(sc, vcpu); 1628 1629 need_intr_window = 0; 1630 1631 if (vcpustate->nextrip != state->rip) { 1632 ctrl->intr_shadow = 0; 1633 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1634 "cleared due to rip change: %#lx/%#lx", 1635 vcpustate->nextrip, state->rip); 1636 } 1637 1638 /* 1639 * Inject pending events or exceptions for this vcpu. 1640 * 1641 * An event might be pending because the previous #VMEXIT happened 1642 * during event delivery (i.e. ctrl->exitintinfo). 1643 * 1644 * An event might also be pending because an exception was injected 1645 * by the hypervisor (e.g. #PF during instruction emulation). 1646 */ 1647 svm_inj_intinfo(sc, vcpu); 1648 1649 /* NMI event has priority over interrupts. */ 1650 if (vm_nmi_pending(sc->vm, vcpu)) { 1651 if (nmi_blocked(sc, vcpu)) { 1652 /* 1653 * Can't inject another NMI if the guest has not 1654 * yet executed an "iret" after the last NMI. 1655 */ 1656 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1657 "to NMI-blocking"); 1658 } else if (ctrl->intr_shadow) { 1659 /* 1660 * Can't inject an NMI if the vcpu is in an intr_shadow. 1661 */ 1662 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1663 "interrupt shadow"); 1664 need_intr_window = 1; 1665 goto done; 1666 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1667 /* 1668 * If there is already an exception/interrupt pending 1669 * then defer the NMI until after that. 1670 */ 1671 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1672 "eventinj %#lx", ctrl->eventinj); 1673 1674 /* 1675 * Use self-IPI to trigger a VM-exit as soon as 1676 * possible after the event injection is completed. 1677 * 1678 * This works only if the external interrupt exiting 1679 * is at a lower priority than the event injection. 1680 * 1681 * Although not explicitly specified in APMv2 the 1682 * relative priorities were verified empirically. 1683 */ 1684 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1685 } else { 1686 vm_nmi_clear(sc->vm, vcpu); 1687 1688 /* Inject NMI, vector number is not used */ 1689 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1690 IDT_NMI, 0, false); 1691 1692 /* virtual NMI blocking is now in effect */ 1693 enable_nmi_blocking(sc, vcpu); 1694 1695 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1696 } 1697 } 1698 1699 extint_pending = vm_extint_pending(sc->vm, vcpu); 1700 if (!extint_pending) { 1701 if (!vlapic_pending_intr(vlapic, &vector)) 1702 goto done; 1703 KASSERT(vector >= 16 && vector <= 255, 1704 ("invalid vector %d from local APIC", vector)); 1705 } else { 1706 /* Ask the legacy pic for a vector to inject */ 1707 vatpic_pending_intr(sc->vm, &vector); 1708 KASSERT(vector >= 0 && vector <= 255, 1709 ("invalid vector %d from INTR", vector)); 1710 } 1711 1712 /* 1713 * If the guest has disabled interrupts or is in an interrupt shadow 1714 * then we cannot inject the pending interrupt. 1715 */ 1716 if ((state->rflags & PSL_I) == 0) { 1717 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1718 "rflags %#lx", vector, state->rflags); 1719 need_intr_window = 1; 1720 goto done; 1721 } 1722 1723 if (ctrl->intr_shadow) { 1724 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1725 "interrupt shadow", vector); 1726 need_intr_window = 1; 1727 goto done; 1728 } 1729 1730 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1731 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1732 "eventinj %#lx", vector, ctrl->eventinj); 1733 need_intr_window = 1; 1734 goto done; 1735 } 1736 1737 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1738 1739 if (!extint_pending) { 1740 vlapic_intr_accepted(vlapic, vector); 1741 } else { 1742 vm_extint_clear(sc->vm, vcpu); 1743 vatpic_intr_accepted(sc->vm, vector); 1744 } 1745 1746 /* 1747 * Force a VM-exit as soon as the vcpu is ready to accept another 1748 * interrupt. This is done because the PIC might have another vector 1749 * that it wants to inject. Also, if the APIC has a pending interrupt 1750 * that was preempted by the ExtInt then it allows us to inject the 1751 * APIC vector as soon as possible. 1752 */ 1753 need_intr_window = 1; 1754 done: 1755 /* 1756 * The guest can modify the TPR by writing to %CR8. In guest mode 1757 * the processor reflects this write to V_TPR without hypervisor 1758 * intervention. 1759 * 1760 * The guest can also modify the TPR by writing to it via the memory 1761 * mapped APIC page. In this case, the write will be emulated by the 1762 * hypervisor. For this reason V_TPR must be updated before every 1763 * VMRUN. 1764 */ 1765 v_tpr = vlapic_get_cr8(vlapic); 1766 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1767 if (ctrl->v_tpr != v_tpr) { 1768 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1769 ctrl->v_tpr, v_tpr); 1770 ctrl->v_tpr = v_tpr; 1771 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1772 } 1773 1774 if (need_intr_window) { 1775 /* 1776 * We use V_IRQ in conjunction with the VINTR intercept to 1777 * trap into the hypervisor as soon as a virtual interrupt 1778 * can be delivered. 1779 * 1780 * Since injected events are not subject to intercept checks 1781 * we need to ensure that the V_IRQ is not actually going to 1782 * be delivered on VM entry. The KASSERT below enforces this. 1783 */ 1784 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1785 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1786 ("Bogus intr_window_exiting: eventinj (%#lx), " 1787 "intr_shadow (%u), rflags (%#lx)", 1788 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1789 enable_intr_window_exiting(sc, vcpu); 1790 } else { 1791 disable_intr_window_exiting(sc, vcpu); 1792 } 1793 } 1794 1795 static __inline void 1796 restore_host_tss(void) 1797 { 1798 struct system_segment_descriptor *tss_sd; 1799 1800 /* 1801 * The TSS descriptor was in use prior to launching the guest so it 1802 * has been marked busy. 1803 * 1804 * 'ltr' requires the descriptor to be marked available so change the 1805 * type to "64-bit available TSS". 1806 */ 1807 tss_sd = PCPU_GET(tss); 1808 tss_sd->sd_type = SDT_SYSTSS; 1809 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1810 } 1811 1812 static void 1813 svm_pmap_activate(struct svm_softc *sc, int vcpuid, pmap_t pmap) 1814 { 1815 struct svm_vcpu *vcpustate; 1816 struct vmcb_ctrl *ctrl; 1817 long eptgen; 1818 int cpu; 1819 bool alloc_asid; 1820 1821 cpu = curcpu; 1822 CPU_SET_ATOMIC(cpu, &pmap->pm_active); 1823 smr_enter(pmap->pm_eptsmr); 1824 1825 vcpustate = svm_get_vcpu(sc, vcpuid); 1826 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1827 1828 /* 1829 * The TLB entries associated with the vcpu's ASID are not valid 1830 * if either of the following conditions is true: 1831 * 1832 * 1. The vcpu's ASID generation is different than the host cpu's 1833 * ASID generation. This happens when the vcpu migrates to a new 1834 * host cpu. It can also happen when the number of vcpus executing 1835 * on a host cpu is greater than the number of ASIDs available. 1836 * 1837 * 2. The pmap generation number is different than the value cached in 1838 * the 'vcpustate'. This happens when the host invalidates pages 1839 * belonging to the guest. 1840 * 1841 * asidgen eptgen Action 1842 * mismatch mismatch 1843 * 0 0 (a) 1844 * 0 1 (b1) or (b2) 1845 * 1 0 (c) 1846 * 1 1 (d) 1847 * 1848 * (a) There is no mismatch in eptgen or ASID generation and therefore 1849 * no further action is needed. 1850 * 1851 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1852 * retained and the TLB entries associated with this ASID 1853 * are flushed by VMRUN. 1854 * 1855 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1856 * allocated. 1857 * 1858 * (c) A new ASID is allocated. 1859 * 1860 * (d) A new ASID is allocated. 1861 */ 1862 1863 alloc_asid = false; 1864 eptgen = atomic_load_long(&pmap->pm_eptgen); 1865 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1866 1867 if (vcpustate->asid.gen != asid[cpu].gen) { 1868 alloc_asid = true; /* (c) and (d) */ 1869 } else if (vcpustate->eptgen != eptgen) { 1870 if (flush_by_asid()) 1871 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1872 else 1873 alloc_asid = true; /* (b2) */ 1874 } else { 1875 /* 1876 * This is the common case (a). 1877 */ 1878 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1879 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1880 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1881 } 1882 1883 if (alloc_asid) { 1884 if (++asid[cpu].num >= nasid) { 1885 asid[cpu].num = 1; 1886 if (++asid[cpu].gen == 0) 1887 asid[cpu].gen = 1; 1888 /* 1889 * If this cpu does not support "flush-by-asid" 1890 * then flush the entire TLB on a generation 1891 * bump. Subsequent ASID allocation in this 1892 * generation can be done without a TLB flush. 1893 */ 1894 if (!flush_by_asid()) 1895 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1896 } 1897 vcpustate->asid.gen = asid[cpu].gen; 1898 vcpustate->asid.num = asid[cpu].num; 1899 1900 ctrl->asid = vcpustate->asid.num; 1901 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1902 /* 1903 * If this cpu supports "flush-by-asid" then the TLB 1904 * was not flushed after the generation bump. The TLB 1905 * is flushed selectively after every new ASID allocation. 1906 */ 1907 if (flush_by_asid()) 1908 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1909 } 1910 vcpustate->eptgen = eptgen; 1911 1912 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1913 KASSERT(ctrl->asid == vcpustate->asid.num, 1914 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1915 } 1916 1917 static void 1918 svm_pmap_deactivate(pmap_t pmap) 1919 { 1920 smr_exit(pmap->pm_eptsmr); 1921 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 1922 } 1923 1924 static __inline void 1925 disable_gintr(void) 1926 { 1927 1928 __asm __volatile("clgi"); 1929 } 1930 1931 static __inline void 1932 enable_gintr(void) 1933 { 1934 1935 __asm __volatile("stgi"); 1936 } 1937 1938 static __inline void 1939 svm_dr_enter_guest(struct svm_regctx *gctx) 1940 { 1941 1942 /* Save host control debug registers. */ 1943 gctx->host_dr7 = rdr7(); 1944 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 1945 1946 /* 1947 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 1948 * exceptions in the host based on the guest DRx values. The 1949 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 1950 * VMCB. 1951 */ 1952 load_dr7(0); 1953 wrmsr(MSR_DEBUGCTLMSR, 0); 1954 1955 /* Save host debug registers. */ 1956 gctx->host_dr0 = rdr0(); 1957 gctx->host_dr1 = rdr1(); 1958 gctx->host_dr2 = rdr2(); 1959 gctx->host_dr3 = rdr3(); 1960 gctx->host_dr6 = rdr6(); 1961 1962 /* Restore guest debug registers. */ 1963 load_dr0(gctx->sctx_dr0); 1964 load_dr1(gctx->sctx_dr1); 1965 load_dr2(gctx->sctx_dr2); 1966 load_dr3(gctx->sctx_dr3); 1967 } 1968 1969 static __inline void 1970 svm_dr_leave_guest(struct svm_regctx *gctx) 1971 { 1972 1973 /* Save guest debug registers. */ 1974 gctx->sctx_dr0 = rdr0(); 1975 gctx->sctx_dr1 = rdr1(); 1976 gctx->sctx_dr2 = rdr2(); 1977 gctx->sctx_dr3 = rdr3(); 1978 1979 /* 1980 * Restore host debug registers. Restore DR7 and DEBUGCTL 1981 * last. 1982 */ 1983 load_dr0(gctx->host_dr0); 1984 load_dr1(gctx->host_dr1); 1985 load_dr2(gctx->host_dr2); 1986 load_dr3(gctx->host_dr3); 1987 load_dr6(gctx->host_dr6); 1988 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 1989 load_dr7(gctx->host_dr7); 1990 } 1991 1992 /* 1993 * Start vcpu with specified RIP. 1994 */ 1995 static int 1996 svm_run(void *arg, int vcpu, register_t rip, pmap_t pmap, 1997 struct vm_eventinfo *evinfo) 1998 { 1999 struct svm_regctx *gctx; 2000 struct svm_softc *svm_sc; 2001 struct svm_vcpu *vcpustate; 2002 struct vmcb_state *state; 2003 struct vmcb_ctrl *ctrl; 2004 struct vm_exit *vmexit; 2005 struct vlapic *vlapic; 2006 struct vm *vm; 2007 uint64_t vmcb_pa; 2008 int handled; 2009 uint16_t ldt_sel; 2010 2011 svm_sc = arg; 2012 vm = svm_sc->vm; 2013 2014 vcpustate = svm_get_vcpu(svm_sc, vcpu); 2015 state = svm_get_vmcb_state(svm_sc, vcpu); 2016 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 2017 vmexit = vm_exitinfo(vm, vcpu); 2018 vlapic = vm_lapic(vm, vcpu); 2019 2020 gctx = svm_get_guest_regctx(svm_sc, vcpu); 2021 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 2022 2023 if (vcpustate->lastcpu != curcpu) { 2024 /* 2025 * Force new ASID allocation by invalidating the generation. 2026 */ 2027 vcpustate->asid.gen = 0; 2028 2029 /* 2030 * Invalidate the VMCB state cache by marking all fields dirty. 2031 */ 2032 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 2033 2034 /* 2035 * XXX 2036 * Setting 'vcpustate->lastcpu' here is bit premature because 2037 * we may return from this function without actually executing 2038 * the VMRUN instruction. This could happen if a rendezvous 2039 * or an AST is pending on the first time through the loop. 2040 * 2041 * This works for now but any new side-effects of vcpu 2042 * migration should take this case into account. 2043 */ 2044 vcpustate->lastcpu = curcpu; 2045 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 2046 } 2047 2048 svm_msr_guest_enter(svm_sc, vcpu); 2049 2050 /* Update Guest RIP */ 2051 state->rip = rip; 2052 2053 do { 2054 /* 2055 * Disable global interrupts to guarantee atomicity during 2056 * loading of guest state. This includes not only the state 2057 * loaded by the "vmrun" instruction but also software state 2058 * maintained by the hypervisor: suspended and rendezvous 2059 * state, NPT generation number, vlapic interrupts etc. 2060 */ 2061 disable_gintr(); 2062 2063 if (vcpu_suspended(evinfo)) { 2064 enable_gintr(); 2065 vm_exit_suspended(vm, vcpu, state->rip); 2066 break; 2067 } 2068 2069 if (vcpu_rendezvous_pending(evinfo)) { 2070 enable_gintr(); 2071 vm_exit_rendezvous(vm, vcpu, state->rip); 2072 break; 2073 } 2074 2075 if (vcpu_reqidle(evinfo)) { 2076 enable_gintr(); 2077 vm_exit_reqidle(vm, vcpu, state->rip); 2078 break; 2079 } 2080 2081 /* We are asked to give the cpu by scheduler. */ 2082 if (vcpu_should_yield(vm, vcpu)) { 2083 enable_gintr(); 2084 vm_exit_astpending(vm, vcpu, state->rip); 2085 break; 2086 } 2087 2088 if (vcpu_debugged(vm, vcpu)) { 2089 enable_gintr(); 2090 vm_exit_debug(vm, vcpu, state->rip); 2091 break; 2092 } 2093 2094 /* 2095 * #VMEXIT resumes the host with the guest LDTR, so 2096 * save the current LDT selector so it can be restored 2097 * after an exit. The userspace hypervisor probably 2098 * doesn't use a LDT, but save and restore it to be 2099 * safe. 2100 */ 2101 ldt_sel = sldt(); 2102 2103 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2104 2105 /* 2106 * Check the pmap generation and the ASID generation to 2107 * ensure that the vcpu does not use stale TLB mappings. 2108 */ 2109 svm_pmap_activate(svm_sc, vcpu, pmap); 2110 2111 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 2112 vcpustate->dirty = 0; 2113 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2114 2115 /* Launch Virtual Machine. */ 2116 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 2117 svm_dr_enter_guest(gctx); 2118 svm_launch(vmcb_pa, gctx, get_pcpu()); 2119 svm_dr_leave_guest(gctx); 2120 2121 svm_pmap_deactivate(pmap); 2122 2123 /* 2124 * The host GDTR and IDTR is saved by VMRUN and restored 2125 * automatically on #VMEXIT. However, the host TSS needs 2126 * to be restored explicitly. 2127 */ 2128 restore_host_tss(); 2129 2130 /* Restore host LDTR. */ 2131 lldt(ldt_sel); 2132 2133 /* #VMEXIT disables interrupts so re-enable them here. */ 2134 enable_gintr(); 2135 2136 /* Update 'nextrip' */ 2137 vcpustate->nextrip = state->rip; 2138 2139 /* Handle #VMEXIT and if required return to user space. */ 2140 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2141 } while (handled); 2142 2143 svm_msr_guest_exit(svm_sc, vcpu); 2144 2145 return (0); 2146 } 2147 2148 static void 2149 svm_cleanup(void *arg) 2150 { 2151 struct svm_softc *sc = arg; 2152 2153 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2154 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2155 free(sc, M_SVM); 2156 } 2157 2158 static register_t * 2159 swctx_regptr(struct svm_regctx *regctx, int reg) 2160 { 2161 2162 switch (reg) { 2163 case VM_REG_GUEST_RBX: 2164 return (®ctx->sctx_rbx); 2165 case VM_REG_GUEST_RCX: 2166 return (®ctx->sctx_rcx); 2167 case VM_REG_GUEST_RDX: 2168 return (®ctx->sctx_rdx); 2169 case VM_REG_GUEST_RDI: 2170 return (®ctx->sctx_rdi); 2171 case VM_REG_GUEST_RSI: 2172 return (®ctx->sctx_rsi); 2173 case VM_REG_GUEST_RBP: 2174 return (®ctx->sctx_rbp); 2175 case VM_REG_GUEST_R8: 2176 return (®ctx->sctx_r8); 2177 case VM_REG_GUEST_R9: 2178 return (®ctx->sctx_r9); 2179 case VM_REG_GUEST_R10: 2180 return (®ctx->sctx_r10); 2181 case VM_REG_GUEST_R11: 2182 return (®ctx->sctx_r11); 2183 case VM_REG_GUEST_R12: 2184 return (®ctx->sctx_r12); 2185 case VM_REG_GUEST_R13: 2186 return (®ctx->sctx_r13); 2187 case VM_REG_GUEST_R14: 2188 return (®ctx->sctx_r14); 2189 case VM_REG_GUEST_R15: 2190 return (®ctx->sctx_r15); 2191 case VM_REG_GUEST_DR0: 2192 return (®ctx->sctx_dr0); 2193 case VM_REG_GUEST_DR1: 2194 return (®ctx->sctx_dr1); 2195 case VM_REG_GUEST_DR2: 2196 return (®ctx->sctx_dr2); 2197 case VM_REG_GUEST_DR3: 2198 return (®ctx->sctx_dr3); 2199 default: 2200 return (NULL); 2201 } 2202 } 2203 2204 static int 2205 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2206 { 2207 struct svm_softc *svm_sc; 2208 register_t *reg; 2209 2210 svm_sc = arg; 2211 2212 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2213 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2214 } 2215 2216 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2217 return (0); 2218 } 2219 2220 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2221 2222 if (reg != NULL) { 2223 *val = *reg; 2224 return (0); 2225 } 2226 2227 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2228 return (EINVAL); 2229 } 2230 2231 static int 2232 svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2233 { 2234 struct svm_softc *svm_sc; 2235 register_t *reg; 2236 2237 svm_sc = arg; 2238 2239 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2240 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2241 } 2242 2243 /* Do not permit user write access to VMCB fields by offset. */ 2244 if (!VMCB_ACCESS_OK(ident)) { 2245 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2246 return (0); 2247 } 2248 } 2249 2250 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2251 2252 if (reg != NULL) { 2253 *reg = val; 2254 return (0); 2255 } 2256 2257 if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) { 2258 /* Ignore. */ 2259 return (0); 2260 } 2261 2262 /* 2263 * XXX deal with CR3 and invalidate TLB entries tagged with the 2264 * vcpu's ASID. This needs to be treated differently depending on 2265 * whether 'running' is true/false. 2266 */ 2267 2268 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2269 return (EINVAL); 2270 } 2271 2272 static int 2273 svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2274 { 2275 return (vmcb_getdesc(arg, vcpu, reg, desc)); 2276 } 2277 2278 static int 2279 svm_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2280 { 2281 return (vmcb_setdesc(arg, vcpu, reg, desc)); 2282 } 2283 2284 #ifdef BHYVE_SNAPSHOT 2285 static int 2286 svm_snapshot_reg(void *arg, int vcpu, int ident, 2287 struct vm_snapshot_meta *meta) 2288 { 2289 int ret; 2290 uint64_t val; 2291 2292 if (meta->op == VM_SNAPSHOT_SAVE) { 2293 ret = svm_getreg(arg, vcpu, ident, &val); 2294 if (ret != 0) 2295 goto done; 2296 2297 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2298 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 2299 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2300 2301 ret = svm_setreg(arg, vcpu, ident, val); 2302 if (ret != 0) 2303 goto done; 2304 } else { 2305 ret = EINVAL; 2306 goto done; 2307 } 2308 2309 done: 2310 return (ret); 2311 } 2312 #endif 2313 2314 static int 2315 svm_setcap(void *arg, int vcpu, int type, int val) 2316 { 2317 struct svm_softc *sc; 2318 int error; 2319 2320 sc = arg; 2321 error = 0; 2322 switch (type) { 2323 case VM_CAP_HALT_EXIT: 2324 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2325 VMCB_INTCPT_HLT, val); 2326 break; 2327 case VM_CAP_PAUSE_EXIT: 2328 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2329 VMCB_INTCPT_PAUSE, val); 2330 break; 2331 case VM_CAP_UNRESTRICTED_GUEST: 2332 /* Unrestricted guest execution cannot be disabled in SVM */ 2333 if (val == 0) 2334 error = EINVAL; 2335 break; 2336 default: 2337 error = ENOENT; 2338 break; 2339 } 2340 return (error); 2341 } 2342 2343 static int 2344 svm_getcap(void *arg, int vcpu, int type, int *retval) 2345 { 2346 struct svm_softc *sc; 2347 int error; 2348 2349 sc = arg; 2350 error = 0; 2351 2352 switch (type) { 2353 case VM_CAP_HALT_EXIT: 2354 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2355 VMCB_INTCPT_HLT); 2356 break; 2357 case VM_CAP_PAUSE_EXIT: 2358 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2359 VMCB_INTCPT_PAUSE); 2360 break; 2361 case VM_CAP_UNRESTRICTED_GUEST: 2362 *retval = 1; /* unrestricted guest is always enabled */ 2363 break; 2364 default: 2365 error = ENOENT; 2366 break; 2367 } 2368 return (error); 2369 } 2370 2371 static struct vmspace * 2372 svm_vmspace_alloc(vm_offset_t min, vm_offset_t max) 2373 { 2374 return (svm_npt_alloc(min, max)); 2375 } 2376 2377 static void 2378 svm_vmspace_free(struct vmspace *vmspace) 2379 { 2380 svm_npt_free(vmspace); 2381 } 2382 2383 static struct vlapic * 2384 svm_vlapic_init(void *arg, int vcpuid) 2385 { 2386 struct svm_softc *svm_sc; 2387 struct vlapic *vlapic; 2388 2389 svm_sc = arg; 2390 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2391 vlapic->vm = svm_sc->vm; 2392 vlapic->vcpuid = vcpuid; 2393 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2394 2395 vlapic_init(vlapic); 2396 2397 return (vlapic); 2398 } 2399 2400 static void 2401 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2402 { 2403 2404 vlapic_cleanup(vlapic); 2405 free(vlapic, M_SVM_VLAPIC); 2406 } 2407 2408 #ifdef BHYVE_SNAPSHOT 2409 static int 2410 svm_snapshot(void *arg, struct vm_snapshot_meta *meta) 2411 { 2412 /* struct svm_softc is AMD's representation for SVM softc */ 2413 struct svm_softc *sc; 2414 struct svm_vcpu *vcpu; 2415 struct vmcb *vmcb; 2416 uint64_t val; 2417 int i; 2418 int ret; 2419 2420 sc = arg; 2421 2422 KASSERT(sc != NULL, ("%s: arg was NULL", __func__)); 2423 2424 SNAPSHOT_VAR_OR_LEAVE(sc->nptp, meta, ret, done); 2425 2426 for (i = 0; i < VM_MAXCPU; i++) { 2427 vcpu = &sc->vcpu[i]; 2428 vmcb = &vcpu->vmcb; 2429 2430 /* VMCB fields for virtual cpu i */ 2431 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.v_tpr, meta, ret, done); 2432 val = vmcb->ctrl.v_tpr; 2433 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2434 vmcb->ctrl.v_tpr = val; 2435 2436 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.asid, meta, ret, done); 2437 val = vmcb->ctrl.np_enable; 2438 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2439 vmcb->ctrl.np_enable = val; 2440 2441 val = vmcb->ctrl.intr_shadow; 2442 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2443 vmcb->ctrl.intr_shadow = val; 2444 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.tlb_ctrl, meta, ret, done); 2445 2446 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad1, 2447 sizeof(vmcb->state.pad1), 2448 meta, ret, done); 2449 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cpl, meta, ret, done); 2450 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad2, 2451 sizeof(vmcb->state.pad2), 2452 meta, ret, done); 2453 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.efer, meta, ret, done); 2454 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad3, 2455 sizeof(vmcb->state.pad3), 2456 meta, ret, done); 2457 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr4, meta, ret, done); 2458 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr3, meta, ret, done); 2459 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr0, meta, ret, done); 2460 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dr7, meta, ret, done); 2461 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dr6, meta, ret, done); 2462 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rflags, meta, ret, done); 2463 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rip, meta, ret, done); 2464 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad4, 2465 sizeof(vmcb->state.pad4), 2466 meta, ret, done); 2467 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rsp, meta, ret, done); 2468 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad5, 2469 sizeof(vmcb->state.pad5), 2470 meta, ret, done); 2471 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rax, meta, ret, done); 2472 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.star, meta, ret, done); 2473 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.lstar, meta, ret, done); 2474 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cstar, meta, ret, done); 2475 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sfmask, meta, ret, done); 2476 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.kernelgsbase, 2477 meta, ret, done); 2478 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_cs, meta, ret, done); 2479 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_esp, 2480 meta, ret, done); 2481 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_eip, 2482 meta, ret, done); 2483 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr2, meta, ret, done); 2484 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad6, 2485 sizeof(vmcb->state.pad6), 2486 meta, ret, done); 2487 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.g_pat, meta, ret, done); 2488 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dbgctl, meta, ret, done); 2489 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.br_from, meta, ret, done); 2490 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.br_to, meta, ret, done); 2491 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.int_from, meta, ret, done); 2492 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.int_to, meta, ret, done); 2493 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad7, 2494 sizeof(vmcb->state.pad7), 2495 meta, ret, done); 2496 2497 /* Snapshot swctx for virtual cpu i */ 2498 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, ret, done); 2499 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, ret, done); 2500 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, ret, done); 2501 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, ret, done); 2502 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, ret, done); 2503 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, ret, done); 2504 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, ret, done); 2505 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, ret, done); 2506 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, ret, done); 2507 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, ret, done); 2508 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, ret, done); 2509 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, ret, done); 2510 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, ret, done); 2511 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, ret, done); 2512 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, ret, done); 2513 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, ret, done); 2514 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, ret, done); 2515 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, ret, done); 2516 2517 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr0, meta, ret, done); 2518 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr1, meta, ret, done); 2519 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr2, meta, ret, done); 2520 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr3, meta, ret, done); 2521 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr6, meta, ret, done); 2522 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr7, meta, ret, done); 2523 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_debugctl, meta, ret, 2524 done); 2525 2526 /* Restore other svm_vcpu struct fields */ 2527 2528 /* Restore NEXTRIP field */ 2529 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done); 2530 2531 /* Restore lastcpu field */ 2532 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, ret, done); 2533 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, ret, done); 2534 2535 /* Restore EPTGEN field - EPT is Extended Page Tabel */ 2536 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, ret, done); 2537 2538 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, ret, done); 2539 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, ret, done); 2540 2541 /* Set all caches dirty */ 2542 if (meta->op == VM_SNAPSHOT_RESTORE) { 2543 svm_set_dirty(sc, i, VMCB_CACHE_ASID); 2544 svm_set_dirty(sc, i, VMCB_CACHE_IOPM); 2545 svm_set_dirty(sc, i, VMCB_CACHE_I); 2546 svm_set_dirty(sc, i, VMCB_CACHE_TPR); 2547 svm_set_dirty(sc, i, VMCB_CACHE_CR2); 2548 svm_set_dirty(sc, i, VMCB_CACHE_CR); 2549 svm_set_dirty(sc, i, VMCB_CACHE_DT); 2550 svm_set_dirty(sc, i, VMCB_CACHE_SEG); 2551 svm_set_dirty(sc, i, VMCB_CACHE_NP); 2552 } 2553 } 2554 2555 if (meta->op == VM_SNAPSHOT_RESTORE) 2556 flush_by_asid(); 2557 2558 done: 2559 return (ret); 2560 } 2561 2562 static int 2563 svm_vmcx_snapshot(void *arg, struct vm_snapshot_meta *meta, int vcpu) 2564 { 2565 struct svm_softc *sc; 2566 int err, running, hostcpu; 2567 2568 sc = (struct svm_softc *)arg; 2569 err = 0; 2570 2571 KASSERT(arg != NULL, ("%s: arg was NULL", __func__)); 2572 2573 running = vcpu_is_running(sc->vm, vcpu, &hostcpu); 2574 if (running && hostcpu !=curcpu) { 2575 printf("%s: %s%d is running", __func__, vm_name(sc->vm), vcpu); 2576 return (EINVAL); 2577 } 2578 2579 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR0, meta); 2580 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR2, meta); 2581 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR3, meta); 2582 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR4, meta); 2583 2584 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DR7, meta); 2585 2586 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RAX, meta); 2587 2588 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RSP, meta); 2589 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RIP, meta); 2590 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RFLAGS, meta); 2591 2592 /* Guest segments */ 2593 /* ES */ 2594 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_ES, meta); 2595 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_ES, meta); 2596 2597 /* CS */ 2598 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CS, meta); 2599 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_CS, meta); 2600 2601 /* SS */ 2602 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_SS, meta); 2603 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_SS, meta); 2604 2605 /* DS */ 2606 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DS, meta); 2607 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_DS, meta); 2608 2609 /* FS */ 2610 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_FS, meta); 2611 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_FS, meta); 2612 2613 /* GS */ 2614 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_GS, meta); 2615 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GS, meta); 2616 2617 /* TR */ 2618 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_TR, meta); 2619 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_TR, meta); 2620 2621 /* LDTR */ 2622 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_LDTR, meta); 2623 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_LDTR, meta); 2624 2625 /* EFER */ 2626 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_EFER, meta); 2627 2628 /* IDTR and GDTR */ 2629 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_IDTR, meta); 2630 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GDTR, meta); 2631 2632 /* Specific AMD registers */ 2633 err += vmcb_snapshot_any(sc, vcpu, 2634 VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta); 2635 err += vmcb_snapshot_any(sc, vcpu, 2636 VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta); 2637 err += vmcb_snapshot_any(sc, vcpu, 2638 VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta); 2639 2640 err += vmcb_snapshot_any(sc, vcpu, 2641 VMCB_ACCESS(VMCB_OFF_NPT_BASE, 8), meta); 2642 2643 err += vmcb_snapshot_any(sc, vcpu, 2644 VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta); 2645 err += vmcb_snapshot_any(sc, vcpu, 2646 VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta); 2647 err += vmcb_snapshot_any(sc, vcpu, 2648 VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta); 2649 err += vmcb_snapshot_any(sc, vcpu, 2650 VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta); 2651 err += vmcb_snapshot_any(sc, vcpu, 2652 VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta); 2653 2654 err += vmcb_snapshot_any(sc, vcpu, 2655 VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta); 2656 2657 err += vmcb_snapshot_any(sc, vcpu, 2658 VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta); 2659 err += vmcb_snapshot_any(sc, vcpu, 2660 VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta); 2661 err += vmcb_snapshot_any(sc, vcpu, 2662 VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta); 2663 2664 err += vmcb_snapshot_any(sc, vcpu, 2665 VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta); 2666 2667 err += vmcb_snapshot_any(sc, vcpu, 2668 VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta); 2669 2670 err += vmcb_snapshot_any(sc, vcpu, 2671 VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta); 2672 err += vmcb_snapshot_any(sc, vcpu, 2673 VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta); 2674 err += vmcb_snapshot_any(sc, vcpu, 2675 VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta); 2676 err += vmcb_snapshot_any(sc, vcpu, 2677 VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta); 2678 2679 err += vmcb_snapshot_any(sc, vcpu, 2680 VMCB_ACCESS(VMCB_OFF_IO_PERM, 8), meta); 2681 err += vmcb_snapshot_any(sc, vcpu, 2682 VMCB_ACCESS(VMCB_OFF_MSR_PERM, 8), meta); 2683 2684 err += vmcb_snapshot_any(sc, vcpu, 2685 VMCB_ACCESS(VMCB_OFF_ASID, 4), meta); 2686 2687 err += vmcb_snapshot_any(sc, vcpu, 2688 VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta); 2689 2690 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_INTR_SHADOW, meta); 2691 2692 return (err); 2693 } 2694 2695 static int 2696 svm_restore_tsc(void *arg, int vcpu, uint64_t offset) 2697 { 2698 int err; 2699 2700 err = svm_set_tsc_offset(arg, vcpu, offset); 2701 2702 return (err); 2703 } 2704 #endif 2705 2706 const struct vmm_ops vmm_ops_amd = { 2707 .modinit = svm_modinit, 2708 .modcleanup = svm_modcleanup, 2709 .modresume = svm_modresume, 2710 .init = svm_init, 2711 .run = svm_run, 2712 .cleanup = svm_cleanup, 2713 .getreg = svm_getreg, 2714 .setreg = svm_setreg, 2715 .getdesc = svm_getdesc, 2716 .setdesc = svm_setdesc, 2717 .getcap = svm_getcap, 2718 .setcap = svm_setcap, 2719 .vmspace_alloc = svm_vmspace_alloc, 2720 .vmspace_free = svm_vmspace_free, 2721 .vlapic_init = svm_vlapic_init, 2722 .vlapic_cleanup = svm_vlapic_cleanup, 2723 #ifdef BHYVE_SNAPSHOT 2724 .snapshot = svm_snapshot, 2725 .vmcx_snapshot = svm_vmcx_snapshot, 2726 .restore_tsc = svm_restore_tsc, 2727 #endif 2728 }; 2729