1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_bhyve_snapshot.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/smp.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/pcpu.h> 40 #include <sys/proc.h> 41 #include <sys/smr.h> 42 #include <sys/sysctl.h> 43 44 #include <vm/vm.h> 45 #include <vm/pmap.h> 46 47 #include <machine/cpufunc.h> 48 #include <machine/psl.h> 49 #include <machine/md_var.h> 50 #include <machine/reg.h> 51 #include <machine/specialreg.h> 52 #include <machine/smp.h> 53 #include <machine/vmm.h> 54 #include <machine/vmm_dev.h> 55 #include <machine/vmm_instruction_emul.h> 56 #include <machine/vmm_snapshot.h> 57 58 #include "vmm_lapic.h" 59 #include "vmm_stat.h" 60 #include "vmm_ktr.h" 61 #include "vmm_ioport.h" 62 #include "vatpic.h" 63 #include "vlapic.h" 64 #include "vlapic_priv.h" 65 66 #include "x86.h" 67 #include "vmcb.h" 68 #include "svm.h" 69 #include "svm_softc.h" 70 #include "svm_msr.h" 71 #include "npt.h" 72 73 SYSCTL_DECL(_hw_vmm); 74 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 75 NULL); 76 77 /* 78 * SVM CPUID function 0x8000_000A, edx bit decoding. 79 */ 80 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 81 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 82 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 83 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 84 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 85 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 86 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 87 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 88 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 89 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 90 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 91 92 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 93 VMCB_CACHE_IOPM | \ 94 VMCB_CACHE_I | \ 95 VMCB_CACHE_TPR | \ 96 VMCB_CACHE_CR2 | \ 97 VMCB_CACHE_CR | \ 98 VMCB_CACHE_DR | \ 99 VMCB_CACHE_DT | \ 100 VMCB_CACHE_SEG | \ 101 VMCB_CACHE_NP) 102 103 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 104 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 105 0, NULL); 106 107 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 108 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 109 110 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 111 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 112 "SVM features advertised by CPUID.8000000AH:EDX"); 113 114 static int disable_npf_assist; 115 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 116 &disable_npf_assist, 0, NULL); 117 118 /* Maximum ASIDs supported by the processor */ 119 static uint32_t nasid; 120 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 121 "Number of ASIDs supported by this processor"); 122 123 /* Current ASID generation for each host cpu */ 124 static struct asid asid[MAXCPU]; 125 126 /* 127 * SVM host state saved area of size 4KB for each core. 128 */ 129 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 130 131 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 132 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 133 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 134 135 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 136 137 static __inline int 138 flush_by_asid(void) 139 { 140 141 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 142 } 143 144 static __inline int 145 decode_assist(void) 146 { 147 148 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 149 } 150 151 static void 152 svm_disable(void *arg __unused) 153 { 154 uint64_t efer; 155 156 efer = rdmsr(MSR_EFER); 157 efer &= ~EFER_SVM; 158 wrmsr(MSR_EFER, efer); 159 } 160 161 /* 162 * Disable SVM on all CPUs. 163 */ 164 static int 165 svm_cleanup(void) 166 { 167 168 smp_rendezvous(NULL, svm_disable, NULL, NULL); 169 return (0); 170 } 171 172 /* 173 * Verify that all the features required by bhyve are available. 174 */ 175 static int 176 check_svm_features(void) 177 { 178 u_int regs[4]; 179 180 /* CPUID Fn8000_000A is for SVM */ 181 do_cpuid(0x8000000A, regs); 182 svm_feature &= regs[3]; 183 184 /* 185 * The number of ASIDs can be configured to be less than what is 186 * supported by the hardware but not more. 187 */ 188 if (nasid == 0 || nasid > regs[1]) 189 nasid = regs[1]; 190 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 191 192 /* bhyve requires the Nested Paging feature */ 193 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 194 printf("SVM: Nested Paging feature not available.\n"); 195 return (ENXIO); 196 } 197 198 /* bhyve requires the NRIP Save feature */ 199 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 200 printf("SVM: NRIP Save feature not available.\n"); 201 return (ENXIO); 202 } 203 204 return (0); 205 } 206 207 static void 208 svm_enable(void *arg __unused) 209 { 210 uint64_t efer; 211 212 efer = rdmsr(MSR_EFER); 213 efer |= EFER_SVM; 214 wrmsr(MSR_EFER, efer); 215 216 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 217 } 218 219 /* 220 * Return 1 if SVM is enabled on this processor and 0 otherwise. 221 */ 222 static int 223 svm_available(void) 224 { 225 uint64_t msr; 226 227 /* Section 15.4 Enabling SVM from APM2. */ 228 if ((amd_feature2 & AMDID2_SVM) == 0) { 229 printf("SVM: not available.\n"); 230 return (0); 231 } 232 233 msr = rdmsr(MSR_VM_CR); 234 if ((msr & VM_CR_SVMDIS) != 0) { 235 printf("SVM: disabled by BIOS.\n"); 236 return (0); 237 } 238 239 return (1); 240 } 241 242 static int 243 svm_init(int ipinum) 244 { 245 int error, cpu; 246 247 if (!svm_available()) 248 return (ENXIO); 249 250 error = check_svm_features(); 251 if (error) 252 return (error); 253 254 vmcb_clean &= VMCB_CACHE_DEFAULT; 255 256 for (cpu = 0; cpu < MAXCPU; cpu++) { 257 /* 258 * Initialize the host ASIDs to their "highest" valid values. 259 * 260 * The next ASID allocation will rollover both 'gen' and 'num' 261 * and start off the sequence at {1,1}. 262 */ 263 asid[cpu].gen = ~0UL; 264 asid[cpu].num = nasid - 1; 265 } 266 267 svm_msr_init(); 268 svm_npt_init(ipinum); 269 270 /* Enable SVM on all CPUs */ 271 smp_rendezvous(NULL, svm_enable, NULL, NULL); 272 273 return (0); 274 } 275 276 static void 277 svm_restore(void) 278 { 279 280 svm_enable(NULL); 281 } 282 283 #ifdef BHYVE_SNAPSHOT 284 int 285 svm_set_tsc_offset(struct svm_softc *sc, int vcpu, uint64_t offset) 286 { 287 int error; 288 struct vmcb_ctrl *ctrl; 289 290 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 291 ctrl->tsc_offset = offset; 292 293 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 294 VCPU_CTR1(sc->vm, vcpu, "tsc offset changed to %#lx", offset); 295 296 error = vm_set_tsc_offset(sc->vm, vcpu, offset); 297 298 return (error); 299 } 300 #endif 301 302 /* Pentium compatible MSRs */ 303 #define MSR_PENTIUM_START 0 304 #define MSR_PENTIUM_END 0x1FFF 305 /* AMD 6th generation and Intel compatible MSRs */ 306 #define MSR_AMD6TH_START 0xC0000000UL 307 #define MSR_AMD6TH_END 0xC0001FFFUL 308 /* AMD 7th and 8th generation compatible MSRs */ 309 #define MSR_AMD7TH_START 0xC0010000UL 310 #define MSR_AMD7TH_END 0xC0011FFFUL 311 312 /* 313 * Get the index and bit position for a MSR in permission bitmap. 314 * Two bits are used for each MSR: lower bit for read and higher bit for write. 315 */ 316 static int 317 svm_msr_index(uint64_t msr, int *index, int *bit) 318 { 319 uint32_t base, off; 320 321 *index = -1; 322 *bit = (msr % 4) * 2; 323 base = 0; 324 325 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 326 *index = msr / 4; 327 return (0); 328 } 329 330 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 331 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 332 off = (msr - MSR_AMD6TH_START); 333 *index = (off + base) / 4; 334 return (0); 335 } 336 337 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 338 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 339 off = (msr - MSR_AMD7TH_START); 340 *index = (off + base) / 4; 341 return (0); 342 } 343 344 return (EINVAL); 345 } 346 347 /* 348 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 349 */ 350 static void 351 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 352 { 353 int index, bit, error; 354 355 error = svm_msr_index(msr, &index, &bit); 356 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 357 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 358 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 359 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 360 "msr %#lx", __func__, bit, msr)); 361 362 if (read) 363 perm_bitmap[index] &= ~(1UL << bit); 364 365 if (write) 366 perm_bitmap[index] &= ~(2UL << bit); 367 } 368 369 static void 370 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 371 { 372 373 svm_msr_perm(perm_bitmap, msr, true, true); 374 } 375 376 static void 377 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 378 { 379 380 svm_msr_perm(perm_bitmap, msr, true, false); 381 } 382 383 static __inline int 384 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 385 { 386 struct vmcb_ctrl *ctrl; 387 388 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 389 390 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 391 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 392 } 393 394 static __inline void 395 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 396 int enabled) 397 { 398 struct vmcb_ctrl *ctrl; 399 uint32_t oldval; 400 401 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 402 403 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 404 oldval = ctrl->intercept[idx]; 405 406 if (enabled) 407 ctrl->intercept[idx] |= bitmask; 408 else 409 ctrl->intercept[idx] &= ~bitmask; 410 411 if (ctrl->intercept[idx] != oldval) { 412 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 413 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 414 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 415 } 416 } 417 418 static __inline void 419 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 420 { 421 422 svm_set_intercept(sc, vcpu, off, bitmask, 0); 423 } 424 425 static __inline void 426 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 427 { 428 429 svm_set_intercept(sc, vcpu, off, bitmask, 1); 430 } 431 432 static void 433 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 434 uint64_t msrpm_base_pa, uint64_t np_pml4) 435 { 436 struct vmcb_ctrl *ctrl; 437 struct vmcb_state *state; 438 uint32_t mask; 439 int n; 440 441 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 442 state = svm_get_vmcb_state(sc, vcpu); 443 444 ctrl->iopm_base_pa = iopm_base_pa; 445 ctrl->msrpm_base_pa = msrpm_base_pa; 446 447 /* Enable nested paging */ 448 ctrl->np_enable = 1; 449 ctrl->n_cr3 = np_pml4; 450 451 /* 452 * Intercept accesses to the control registers that are not shadowed 453 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 454 */ 455 for (n = 0; n < 16; n++) { 456 mask = (BIT(n) << 16) | BIT(n); 457 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 458 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 459 else 460 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 461 } 462 463 /* 464 * Intercept everything when tracing guest exceptions otherwise 465 * just intercept machine check exception. 466 */ 467 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 468 for (n = 0; n < 32; n++) { 469 /* 470 * Skip unimplemented vectors in the exception bitmap. 471 */ 472 if (n == 2 || n == 9) { 473 continue; 474 } 475 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 476 } 477 } else { 478 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 479 } 480 481 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 482 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 483 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 484 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 485 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 486 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 487 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 488 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 489 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 490 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 491 VMCB_INTCPT_FERR_FREEZE); 492 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); 493 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); 494 495 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 496 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 497 498 /* 499 * Intercept SVM instructions since AMD enables them in guests otherwise. 500 * Non-intercepted VMMCALL causes #UD, skip it. 501 */ 502 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); 503 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); 504 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); 505 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); 506 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); 507 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); 508 509 /* 510 * From section "Canonicalization and Consistency Checks" in APMv2 511 * the VMRUN intercept bit must be set to pass the consistency check. 512 */ 513 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 514 515 /* 516 * The ASID will be set to a non-zero value just before VMRUN. 517 */ 518 ctrl->asid = 0; 519 520 /* 521 * Section 15.21.1, Interrupt Masking in EFLAGS 522 * Section 15.21.2, Virtualizing APIC.TPR 523 * 524 * This must be set for %rflag and %cr8 isolation of guest and host. 525 */ 526 ctrl->v_intr_masking = 1; 527 528 /* Enable Last Branch Record aka LBR for debugging */ 529 ctrl->lbr_virt_en = 1; 530 state->dbgctl = BIT(0); 531 532 /* EFER_SVM must always be set when the guest is executing */ 533 state->efer = EFER_SVM; 534 535 /* Set up the PAT to power-on state */ 536 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 537 PAT_VALUE(1, PAT_WRITE_THROUGH) | 538 PAT_VALUE(2, PAT_UNCACHED) | 539 PAT_VALUE(3, PAT_UNCACHEABLE) | 540 PAT_VALUE(4, PAT_WRITE_BACK) | 541 PAT_VALUE(5, PAT_WRITE_THROUGH) | 542 PAT_VALUE(6, PAT_UNCACHED) | 543 PAT_VALUE(7, PAT_UNCACHEABLE); 544 545 /* Set up DR6/7 to power-on state */ 546 state->dr6 = DBREG_DR6_RESERVED1; 547 state->dr7 = DBREG_DR7_RESERVED1; 548 } 549 550 /* 551 * Initialize a virtual machine. 552 */ 553 static void * 554 svm_vminit(struct vm *vm, pmap_t pmap) 555 { 556 struct svm_softc *svm_sc; 557 struct svm_vcpu *vcpu; 558 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 559 int i; 560 uint16_t maxcpus; 561 562 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 563 if (((uintptr_t)svm_sc & PAGE_MASK) != 0) 564 panic("malloc of svm_softc not aligned on page boundary"); 565 566 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 567 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 568 if (svm_sc->msr_bitmap == NULL) 569 panic("contigmalloc of SVM MSR bitmap failed"); 570 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 571 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 572 if (svm_sc->iopm_bitmap == NULL) 573 panic("contigmalloc of SVM IO bitmap failed"); 574 575 svm_sc->vm = vm; 576 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pmltop); 577 578 /* 579 * Intercept read and write accesses to all MSRs. 580 */ 581 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 582 583 /* 584 * Access to the following MSRs is redirected to the VMCB when the 585 * guest is executing. Therefore it is safe to allow the guest to 586 * read/write these MSRs directly without hypervisor involvement. 587 */ 588 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 589 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 590 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 591 592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 593 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 595 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 596 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 597 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 598 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 599 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 600 601 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 602 603 /* 604 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 605 */ 606 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 607 608 /* Intercept access to all I/O ports. */ 609 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 610 611 iopm_pa = vtophys(svm_sc->iopm_bitmap); 612 msrpm_pa = vtophys(svm_sc->msr_bitmap); 613 pml4_pa = svm_sc->nptp; 614 maxcpus = vm_get_maxcpus(svm_sc->vm); 615 for (i = 0; i < maxcpus; i++) { 616 vcpu = svm_get_vcpu(svm_sc, i); 617 vcpu->nextrip = ~0; 618 vcpu->lastcpu = NOCPU; 619 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 620 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 621 svm_msr_guest_init(svm_sc, i); 622 } 623 return (svm_sc); 624 } 625 626 /* 627 * Collateral for a generic SVM VM-exit. 628 */ 629 static void 630 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 631 { 632 633 vme->exitcode = VM_EXITCODE_SVM; 634 vme->u.svm.exitcode = code; 635 vme->u.svm.exitinfo1 = info1; 636 vme->u.svm.exitinfo2 = info2; 637 } 638 639 static int 640 svm_cpl(struct vmcb_state *state) 641 { 642 643 /* 644 * From APMv2: 645 * "Retrieve the CPL from the CPL field in the VMCB, not 646 * from any segment DPL" 647 */ 648 return (state->cpl); 649 } 650 651 static enum vm_cpu_mode 652 svm_vcpu_mode(struct vmcb *vmcb) 653 { 654 struct vmcb_segment seg; 655 struct vmcb_state *state; 656 int error; 657 658 state = &vmcb->state; 659 660 if (state->efer & EFER_LMA) { 661 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 662 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 663 error)); 664 665 /* 666 * Section 4.8.1 for APM2, check if Code Segment has 667 * Long attribute set in descriptor. 668 */ 669 if (seg.attrib & VMCB_CS_ATTRIB_L) 670 return (CPU_MODE_64BIT); 671 else 672 return (CPU_MODE_COMPATIBILITY); 673 } else if (state->cr0 & CR0_PE) { 674 return (CPU_MODE_PROTECTED); 675 } else { 676 return (CPU_MODE_REAL); 677 } 678 } 679 680 static enum vm_paging_mode 681 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 682 { 683 684 if ((cr0 & CR0_PG) == 0) 685 return (PAGING_MODE_FLAT); 686 if ((cr4 & CR4_PAE) == 0) 687 return (PAGING_MODE_32); 688 if (efer & EFER_LME) 689 return (PAGING_MODE_64); 690 else 691 return (PAGING_MODE_PAE); 692 } 693 694 /* 695 * ins/outs utility routines 696 */ 697 static uint64_t 698 svm_inout_str_index(struct svm_regctx *regs, int in) 699 { 700 uint64_t val; 701 702 val = in ? regs->sctx_rdi : regs->sctx_rsi; 703 704 return (val); 705 } 706 707 static uint64_t 708 svm_inout_str_count(struct svm_regctx *regs, int rep) 709 { 710 uint64_t val; 711 712 val = rep ? regs->sctx_rcx : 1; 713 714 return (val); 715 } 716 717 static void 718 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 719 int in, struct vm_inout_str *vis) 720 { 721 int error, s; 722 723 if (in) { 724 vis->seg_name = VM_REG_GUEST_ES; 725 } else { 726 /* The segment field has standard encoding */ 727 s = (info1 >> 10) & 0x7; 728 vis->seg_name = vm_segment_name(s); 729 } 730 731 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 732 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 733 } 734 735 static int 736 svm_inout_str_addrsize(uint64_t info1) 737 { 738 uint32_t size; 739 740 size = (info1 >> 7) & 0x7; 741 switch (size) { 742 case 1: 743 return (2); /* 16 bit */ 744 case 2: 745 return (4); /* 32 bit */ 746 case 4: 747 return (8); /* 64 bit */ 748 default: 749 panic("%s: invalid size encoding %d", __func__, size); 750 } 751 } 752 753 static void 754 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 755 { 756 struct vmcb_state *state; 757 758 state = &vmcb->state; 759 paging->cr3 = state->cr3; 760 paging->cpl = svm_cpl(state); 761 paging->cpu_mode = svm_vcpu_mode(vmcb); 762 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 763 state->efer); 764 } 765 766 #define UNHANDLED 0 767 768 /* 769 * Handle guest I/O intercept. 770 */ 771 static int 772 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 773 { 774 struct vmcb_ctrl *ctrl; 775 struct vmcb_state *state; 776 struct svm_regctx *regs; 777 struct vm_inout_str *vis; 778 uint64_t info1; 779 int inout_string; 780 781 state = svm_get_vmcb_state(svm_sc, vcpu); 782 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 783 regs = svm_get_guest_regctx(svm_sc, vcpu); 784 785 info1 = ctrl->exitinfo1; 786 inout_string = info1 & BIT(2) ? 1 : 0; 787 788 /* 789 * The effective segment number in EXITINFO1[12:10] is populated 790 * only if the processor has the DecodeAssist capability. 791 * 792 * XXX this is not specified explicitly in APMv2 but can be verified 793 * empirically. 794 */ 795 if (inout_string && !decode_assist()) 796 return (UNHANDLED); 797 798 vmexit->exitcode = VM_EXITCODE_INOUT; 799 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 800 vmexit->u.inout.string = inout_string; 801 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 802 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 803 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 804 vmexit->u.inout.eax = (uint32_t)(state->rax); 805 806 if (inout_string) { 807 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 808 vis = &vmexit->u.inout_str; 809 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 810 vis->rflags = state->rflags; 811 vis->cr0 = state->cr0; 812 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 813 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 814 vis->addrsize = svm_inout_str_addrsize(info1); 815 svm_inout_str_seginfo(svm_sc, vcpu, info1, 816 vmexit->u.inout.in, vis); 817 } 818 819 return (UNHANDLED); 820 } 821 822 static int 823 npf_fault_type(uint64_t exitinfo1) 824 { 825 826 if (exitinfo1 & VMCB_NPF_INFO1_W) 827 return (VM_PROT_WRITE); 828 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 829 return (VM_PROT_EXECUTE); 830 else 831 return (VM_PROT_READ); 832 } 833 834 static bool 835 svm_npf_emul_fault(uint64_t exitinfo1) 836 { 837 838 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 839 return (false); 840 } 841 842 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 843 return (false); 844 } 845 846 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 847 return (false); 848 } 849 850 return (true); 851 } 852 853 static void 854 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 855 { 856 struct vm_guest_paging *paging; 857 struct vmcb_segment seg; 858 struct vmcb_ctrl *ctrl; 859 char *inst_bytes; 860 int error, inst_len; 861 862 ctrl = &vmcb->ctrl; 863 paging = &vmexit->u.inst_emul.paging; 864 865 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 866 vmexit->u.inst_emul.gpa = gpa; 867 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 868 svm_paging_info(vmcb, paging); 869 870 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 871 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 872 873 switch(paging->cpu_mode) { 874 case CPU_MODE_REAL: 875 vmexit->u.inst_emul.cs_base = seg.base; 876 vmexit->u.inst_emul.cs_d = 0; 877 break; 878 case CPU_MODE_PROTECTED: 879 case CPU_MODE_COMPATIBILITY: 880 vmexit->u.inst_emul.cs_base = seg.base; 881 882 /* 883 * Section 4.8.1 of APM2, Default Operand Size or D bit. 884 */ 885 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 886 1 : 0; 887 break; 888 default: 889 vmexit->u.inst_emul.cs_base = 0; 890 vmexit->u.inst_emul.cs_d = 0; 891 break; 892 } 893 894 /* 895 * Copy the instruction bytes into 'vie' if available. 896 */ 897 if (decode_assist() && !disable_npf_assist) { 898 inst_len = ctrl->inst_len; 899 inst_bytes = ctrl->inst_bytes; 900 } else { 901 inst_len = 0; 902 inst_bytes = NULL; 903 } 904 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 905 } 906 907 #ifdef KTR 908 static const char * 909 intrtype_to_str(int intr_type) 910 { 911 switch (intr_type) { 912 case VMCB_EVENTINJ_TYPE_INTR: 913 return ("hwintr"); 914 case VMCB_EVENTINJ_TYPE_NMI: 915 return ("nmi"); 916 case VMCB_EVENTINJ_TYPE_INTn: 917 return ("swintr"); 918 case VMCB_EVENTINJ_TYPE_EXCEPTION: 919 return ("exception"); 920 default: 921 panic("%s: unknown intr_type %d", __func__, intr_type); 922 } 923 } 924 #endif 925 926 /* 927 * Inject an event to vcpu as described in section 15.20, "Event injection". 928 */ 929 static void 930 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 931 uint32_t error, bool ec_valid) 932 { 933 struct vmcb_ctrl *ctrl; 934 935 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 936 937 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 938 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 939 940 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 941 __func__, vector)); 942 943 switch (intr_type) { 944 case VMCB_EVENTINJ_TYPE_INTR: 945 case VMCB_EVENTINJ_TYPE_NMI: 946 case VMCB_EVENTINJ_TYPE_INTn: 947 break; 948 case VMCB_EVENTINJ_TYPE_EXCEPTION: 949 if (vector >= 0 && vector <= 31 && vector != 2) 950 break; 951 /* FALLTHROUGH */ 952 default: 953 panic("%s: invalid intr_type/vector: %d/%d", __func__, 954 intr_type, vector); 955 } 956 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 957 if (ec_valid) { 958 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 959 ctrl->eventinj |= (uint64_t)error << 32; 960 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 961 intrtype_to_str(intr_type), vector, error); 962 } else { 963 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 964 intrtype_to_str(intr_type), vector); 965 } 966 } 967 968 static void 969 svm_update_virqinfo(struct svm_softc *sc, int vcpu) 970 { 971 struct vm *vm; 972 struct vlapic *vlapic; 973 struct vmcb_ctrl *ctrl; 974 975 vm = sc->vm; 976 vlapic = vm_lapic(vm, vcpu); 977 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 978 979 /* Update %cr8 in the emulated vlapic */ 980 vlapic_set_cr8(vlapic, ctrl->v_tpr); 981 982 /* Virtual interrupt injection is not used. */ 983 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 984 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 985 } 986 987 static void 988 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 989 { 990 struct vmcb_ctrl *ctrl; 991 uint64_t intinfo; 992 993 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 994 intinfo = ctrl->exitintinfo; 995 if (!VMCB_EXITINTINFO_VALID(intinfo)) 996 return; 997 998 /* 999 * From APMv2, Section "Intercepts during IDT interrupt delivery" 1000 * 1001 * If a #VMEXIT happened during event delivery then record the event 1002 * that was being delivered. 1003 */ 1004 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 1005 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 1006 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 1007 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 1008 } 1009 1010 #ifdef INVARIANTS 1011 static __inline int 1012 vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 1013 { 1014 1015 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1016 VMCB_INTCPT_VINTR)); 1017 } 1018 #endif 1019 1020 static __inline void 1021 enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1022 { 1023 struct vmcb_ctrl *ctrl; 1024 1025 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1026 1027 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1028 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1029 KASSERT(vintr_intercept_enabled(sc, vcpu), 1030 ("%s: vintr intercept should be enabled", __func__)); 1031 return; 1032 } 1033 1034 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1035 ctrl->v_irq = 1; 1036 ctrl->v_ign_tpr = 1; 1037 ctrl->v_intr_vector = 0; 1038 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1039 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1040 } 1041 1042 static __inline void 1043 disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1044 { 1045 struct vmcb_ctrl *ctrl; 1046 1047 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1048 1049 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1050 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1051 ("%s: vintr intercept should be disabled", __func__)); 1052 return; 1053 } 1054 1055 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1056 ctrl->v_irq = 0; 1057 ctrl->v_intr_vector = 0; 1058 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1059 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1060 } 1061 1062 static int 1063 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1064 { 1065 struct vmcb_ctrl *ctrl; 1066 int oldval, newval; 1067 1068 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1069 oldval = ctrl->intr_shadow; 1070 newval = val ? 1 : 0; 1071 if (newval != oldval) { 1072 ctrl->intr_shadow = newval; 1073 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1074 } 1075 return (0); 1076 } 1077 1078 static int 1079 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1080 { 1081 struct vmcb_ctrl *ctrl; 1082 1083 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1084 *val = ctrl->intr_shadow; 1085 return (0); 1086 } 1087 1088 /* 1089 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1090 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1091 * to track when the vcpu is done handling the NMI. 1092 */ 1093 static int 1094 nmi_blocked(struct svm_softc *sc, int vcpu) 1095 { 1096 int blocked; 1097 1098 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1099 VMCB_INTCPT_IRET); 1100 return (blocked); 1101 } 1102 1103 static void 1104 enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1105 { 1106 1107 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1108 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1109 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1110 } 1111 1112 static void 1113 clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1114 { 1115 int error; 1116 1117 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1118 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1119 /* 1120 * When the IRET intercept is cleared the vcpu will attempt to execute 1121 * the "iret" when it runs next. However, it is possible to inject 1122 * another NMI into the vcpu before the "iret" has actually executed. 1123 * 1124 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1125 * it will trap back into the hypervisor. If an NMI is pending for 1126 * the vcpu it will be injected into the guest. 1127 * 1128 * XXX this needs to be fixed 1129 */ 1130 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1131 1132 /* 1133 * Set 'intr_shadow' to prevent an NMI from being injected on the 1134 * immediate VMRUN. 1135 */ 1136 error = svm_modify_intr_shadow(sc, vcpu, 1); 1137 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1138 } 1139 1140 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1141 1142 static int 1143 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) 1144 { 1145 struct vm_exit *vme; 1146 struct vmcb_state *state; 1147 uint64_t changed, lma, oldval; 1148 int error; 1149 1150 state = svm_get_vmcb_state(sc, vcpu); 1151 1152 oldval = state->efer; 1153 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1154 1155 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1156 changed = oldval ^ newval; 1157 1158 if (newval & EFER_MBZ_BITS) 1159 goto gpf; 1160 1161 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1162 if (changed & EFER_LME) { 1163 if (state->cr0 & CR0_PG) 1164 goto gpf; 1165 } 1166 1167 /* EFER.LMA = EFER.LME & CR0.PG */ 1168 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1169 lma = EFER_LMA; 1170 else 1171 lma = 0; 1172 1173 if ((newval & EFER_LMA) != lma) 1174 goto gpf; 1175 1176 if (newval & EFER_NXE) { 1177 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) 1178 goto gpf; 1179 } 1180 1181 /* 1182 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1183 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1184 */ 1185 if (newval & EFER_LMSLE) { 1186 vme = vm_exitinfo(sc->vm, vcpu); 1187 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1188 *retu = true; 1189 return (0); 1190 } 1191 1192 if (newval & EFER_FFXSR) { 1193 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) 1194 goto gpf; 1195 } 1196 1197 if (newval & EFER_TCE) { 1198 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) 1199 goto gpf; 1200 } 1201 1202 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1203 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1204 return (0); 1205 gpf: 1206 vm_inject_gp(sc->vm, vcpu); 1207 return (0); 1208 } 1209 1210 static int 1211 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1212 bool *retu) 1213 { 1214 int error; 1215 1216 if (lapic_msr(num)) 1217 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1218 else if (num == MSR_EFER) 1219 error = svm_write_efer(sc, vcpu, val, retu); 1220 else 1221 error = svm_wrmsr(sc, vcpu, num, val, retu); 1222 1223 return (error); 1224 } 1225 1226 static int 1227 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1228 { 1229 struct vmcb_state *state; 1230 struct svm_regctx *ctx; 1231 uint64_t result; 1232 int error; 1233 1234 if (lapic_msr(num)) 1235 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1236 else 1237 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1238 1239 if (error == 0) { 1240 state = svm_get_vmcb_state(sc, vcpu); 1241 ctx = svm_get_guest_regctx(sc, vcpu); 1242 state->rax = result & 0xffffffff; 1243 ctx->sctx_rdx = result >> 32; 1244 } 1245 1246 return (error); 1247 } 1248 1249 #ifdef KTR 1250 static const char * 1251 exit_reason_to_str(uint64_t reason) 1252 { 1253 int i; 1254 static char reasonbuf[32]; 1255 static const struct { 1256 int reason; 1257 const char *str; 1258 } reasons[] = { 1259 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" }, 1260 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" }, 1261 { .reason = VMCB_EXIT_NPF, .str = "nptfault" }, 1262 { .reason = VMCB_EXIT_PAUSE, .str = "pause" }, 1263 { .reason = VMCB_EXIT_HLT, .str = "hlt" }, 1264 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" }, 1265 { .reason = VMCB_EXIT_IO, .str = "inout" }, 1266 { .reason = VMCB_EXIT_MC, .str = "mchk" }, 1267 { .reason = VMCB_EXIT_INTR, .str = "extintr" }, 1268 { .reason = VMCB_EXIT_NMI, .str = "nmi" }, 1269 { .reason = VMCB_EXIT_VINTR, .str = "vintr" }, 1270 { .reason = VMCB_EXIT_MSR, .str = "msr" }, 1271 { .reason = VMCB_EXIT_IRET, .str = "iret" }, 1272 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" }, 1273 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" }, 1274 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" }, 1275 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" }, 1276 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" }, 1277 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" }, 1278 { .reason = VMCB_EXIT_STGI, .str = "stgi" }, 1279 { .reason = VMCB_EXIT_CLGI, .str = "clgi" }, 1280 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" }, 1281 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" }, 1282 { .reason = VMCB_EXIT_INVD, .str = "invd" }, 1283 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" }, 1284 }; 1285 1286 for (i = 0; i < nitems(reasons); i++) { 1287 if (reasons[i].reason == reason) 1288 return (reasons[i].str); 1289 } 1290 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1291 return (reasonbuf); 1292 } 1293 #endif /* KTR */ 1294 1295 /* 1296 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1297 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1298 * and exceptions caused by INT3, INTO and BOUND instructions. 1299 * 1300 * Return 1 if the nRIP is valid and 0 otherwise. 1301 */ 1302 static int 1303 nrip_valid(uint64_t exitcode) 1304 { 1305 switch (exitcode) { 1306 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1307 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1308 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1309 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1310 case 0x43: /* INT3 */ 1311 case 0x44: /* INTO */ 1312 case 0x45: /* BOUND */ 1313 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1314 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1315 return (1); 1316 default: 1317 return (0); 1318 } 1319 } 1320 1321 static int 1322 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1323 { 1324 struct vmcb *vmcb; 1325 struct vmcb_state *state; 1326 struct vmcb_ctrl *ctrl; 1327 struct svm_regctx *ctx; 1328 uint64_t code, info1, info2, val; 1329 uint32_t eax, ecx, edx; 1330 int error, errcode_valid, handled, idtvec, reflect; 1331 bool retu; 1332 1333 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1334 vmcb = svm_get_vmcb(svm_sc, vcpu); 1335 state = &vmcb->state; 1336 ctrl = &vmcb->ctrl; 1337 1338 handled = 0; 1339 code = ctrl->exitcode; 1340 info1 = ctrl->exitinfo1; 1341 info2 = ctrl->exitinfo2; 1342 1343 vmexit->exitcode = VM_EXITCODE_BOGUS; 1344 vmexit->rip = state->rip; 1345 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1346 1347 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1348 1349 /* 1350 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1351 * in an inconsistent state and can trigger assertions that would 1352 * never happen otherwise. 1353 */ 1354 if (code == VMCB_EXIT_INVALID) { 1355 vm_exit_svm(vmexit, code, info1, info2); 1356 return (0); 1357 } 1358 1359 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1360 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1361 1362 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1363 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1364 vmexit->inst_length, code, info1, info2)); 1365 1366 svm_update_virqinfo(svm_sc, vcpu); 1367 svm_save_intinfo(svm_sc, vcpu); 1368 1369 switch (code) { 1370 case VMCB_EXIT_IRET: 1371 /* 1372 * Restart execution at "iret" but with the intercept cleared. 1373 */ 1374 vmexit->inst_length = 0; 1375 clear_nmi_blocking(svm_sc, vcpu); 1376 handled = 1; 1377 break; 1378 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1379 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1380 handled = 1; 1381 break; 1382 case VMCB_EXIT_INTR: /* external interrupt */ 1383 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1384 handled = 1; 1385 break; 1386 case VMCB_EXIT_NMI: /* external NMI */ 1387 handled = 1; 1388 break; 1389 case 0x40 ... 0x5F: 1390 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1391 reflect = 1; 1392 idtvec = code - 0x40; 1393 switch (idtvec) { 1394 case IDT_MC: 1395 /* 1396 * Call the machine check handler by hand. Also don't 1397 * reflect the machine check back into the guest. 1398 */ 1399 reflect = 0; 1400 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1401 __asm __volatile("int $18"); 1402 break; 1403 case IDT_PF: 1404 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1405 info2); 1406 KASSERT(error == 0, ("%s: error %d updating cr2", 1407 __func__, error)); 1408 /* fallthru */ 1409 case IDT_NP: 1410 case IDT_SS: 1411 case IDT_GP: 1412 case IDT_AC: 1413 case IDT_TS: 1414 errcode_valid = 1; 1415 break; 1416 1417 case IDT_DF: 1418 errcode_valid = 1; 1419 info1 = 0; 1420 break; 1421 1422 case IDT_BP: 1423 case IDT_OF: 1424 case IDT_BR: 1425 /* 1426 * The 'nrip' field is populated for INT3, INTO and 1427 * BOUND exceptions and this also implies that 1428 * 'inst_length' is non-zero. 1429 * 1430 * Reset 'inst_length' to zero so the guest %rip at 1431 * event injection is identical to what it was when 1432 * the exception originally happened. 1433 */ 1434 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1435 "to zero before injecting exception %d", 1436 vmexit->inst_length, idtvec); 1437 vmexit->inst_length = 0; 1438 /* fallthru */ 1439 default: 1440 errcode_valid = 0; 1441 info1 = 0; 1442 break; 1443 } 1444 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1445 "when reflecting exception %d into guest", 1446 vmexit->inst_length, idtvec)); 1447 1448 if (reflect) { 1449 /* Reflect the exception back into the guest */ 1450 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1451 "%d/%#x into the guest", idtvec, (int)info1); 1452 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1453 errcode_valid, info1, 0); 1454 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1455 __func__, error)); 1456 } 1457 handled = 1; 1458 break; 1459 case VMCB_EXIT_MSR: /* MSR access. */ 1460 eax = state->rax; 1461 ecx = ctx->sctx_rcx; 1462 edx = ctx->sctx_rdx; 1463 retu = false; 1464 1465 if (info1) { 1466 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1467 val = (uint64_t)edx << 32 | eax; 1468 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1469 ecx, val); 1470 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1471 vmexit->exitcode = VM_EXITCODE_WRMSR; 1472 vmexit->u.msr.code = ecx; 1473 vmexit->u.msr.wval = val; 1474 } else if (!retu) { 1475 handled = 1; 1476 } else { 1477 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1478 ("emulate_wrmsr retu with bogus exitcode")); 1479 } 1480 } else { 1481 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1482 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1483 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1484 vmexit->exitcode = VM_EXITCODE_RDMSR; 1485 vmexit->u.msr.code = ecx; 1486 } else if (!retu) { 1487 handled = 1; 1488 } else { 1489 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1490 ("emulate_rdmsr retu with bogus exitcode")); 1491 } 1492 } 1493 break; 1494 case VMCB_EXIT_IO: 1495 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1496 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1497 break; 1498 case VMCB_EXIT_CPUID: 1499 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1500 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, &state->rax, 1501 &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx); 1502 break; 1503 case VMCB_EXIT_HLT: 1504 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1505 vmexit->exitcode = VM_EXITCODE_HLT; 1506 vmexit->u.hlt.rflags = state->rflags; 1507 break; 1508 case VMCB_EXIT_PAUSE: 1509 vmexit->exitcode = VM_EXITCODE_PAUSE; 1510 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1511 break; 1512 case VMCB_EXIT_NPF: 1513 /* EXITINFO2 contains the faulting guest physical address */ 1514 if (info1 & VMCB_NPF_INFO1_RSV) { 1515 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1516 "reserved bits set: info1(%#lx) info2(%#lx)", 1517 info1, info2); 1518 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { 1519 vmexit->exitcode = VM_EXITCODE_PAGING; 1520 vmexit->u.paging.gpa = info2; 1521 vmexit->u.paging.fault_type = npf_fault_type(info1); 1522 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1523 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1524 "on gpa %#lx/%#lx at rip %#lx", 1525 info2, info1, state->rip); 1526 } else if (svm_npf_emul_fault(info1)) { 1527 svm_handle_inst_emul(vmcb, info2, vmexit); 1528 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1529 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1530 "for gpa %#lx/%#lx at rip %#lx", 1531 info2, info1, state->rip); 1532 } 1533 break; 1534 case VMCB_EXIT_MONITOR: 1535 vmexit->exitcode = VM_EXITCODE_MONITOR; 1536 break; 1537 case VMCB_EXIT_MWAIT: 1538 vmexit->exitcode = VM_EXITCODE_MWAIT; 1539 break; 1540 case VMCB_EXIT_SHUTDOWN: 1541 case VMCB_EXIT_VMRUN: 1542 case VMCB_EXIT_VMMCALL: 1543 case VMCB_EXIT_VMLOAD: 1544 case VMCB_EXIT_VMSAVE: 1545 case VMCB_EXIT_STGI: 1546 case VMCB_EXIT_CLGI: 1547 case VMCB_EXIT_SKINIT: 1548 case VMCB_EXIT_ICEBP: 1549 case VMCB_EXIT_INVD: 1550 case VMCB_EXIT_INVLPGA: 1551 vm_inject_ud(svm_sc->vm, vcpu); 1552 handled = 1; 1553 break; 1554 default: 1555 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1556 break; 1557 } 1558 1559 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1560 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1561 vmexit->rip, vmexit->inst_length); 1562 1563 if (handled) { 1564 vmexit->rip += vmexit->inst_length; 1565 vmexit->inst_length = 0; 1566 state->rip = vmexit->rip; 1567 } else { 1568 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1569 /* 1570 * If this VM exit was not claimed by anybody then 1571 * treat it as a generic SVM exit. 1572 */ 1573 vm_exit_svm(vmexit, code, info1, info2); 1574 } else { 1575 /* 1576 * The exitcode and collateral have been populated. 1577 * The VM exit will be processed further in userland. 1578 */ 1579 } 1580 } 1581 return (handled); 1582 } 1583 1584 static void 1585 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1586 { 1587 uint64_t intinfo; 1588 1589 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1590 return; 1591 1592 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1593 "valid: %#lx", __func__, intinfo)); 1594 1595 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1596 VMCB_EXITINTINFO_VECTOR(intinfo), 1597 VMCB_EXITINTINFO_EC(intinfo), 1598 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1599 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1600 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1601 } 1602 1603 /* 1604 * Inject event to virtual cpu. 1605 */ 1606 static void 1607 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1608 { 1609 struct vmcb_ctrl *ctrl; 1610 struct vmcb_state *state; 1611 struct svm_vcpu *vcpustate; 1612 uint8_t v_tpr; 1613 int vector, need_intr_window; 1614 int extint_pending; 1615 1616 state = svm_get_vmcb_state(sc, vcpu); 1617 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1618 vcpustate = svm_get_vcpu(sc, vcpu); 1619 1620 need_intr_window = 0; 1621 1622 if (vcpustate->nextrip != state->rip) { 1623 ctrl->intr_shadow = 0; 1624 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1625 "cleared due to rip change: %#lx/%#lx", 1626 vcpustate->nextrip, state->rip); 1627 } 1628 1629 /* 1630 * Inject pending events or exceptions for this vcpu. 1631 * 1632 * An event might be pending because the previous #VMEXIT happened 1633 * during event delivery (i.e. ctrl->exitintinfo). 1634 * 1635 * An event might also be pending because an exception was injected 1636 * by the hypervisor (e.g. #PF during instruction emulation). 1637 */ 1638 svm_inj_intinfo(sc, vcpu); 1639 1640 /* NMI event has priority over interrupts. */ 1641 if (vm_nmi_pending(sc->vm, vcpu)) { 1642 if (nmi_blocked(sc, vcpu)) { 1643 /* 1644 * Can't inject another NMI if the guest has not 1645 * yet executed an "iret" after the last NMI. 1646 */ 1647 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1648 "to NMI-blocking"); 1649 } else if (ctrl->intr_shadow) { 1650 /* 1651 * Can't inject an NMI if the vcpu is in an intr_shadow. 1652 */ 1653 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1654 "interrupt shadow"); 1655 need_intr_window = 1; 1656 goto done; 1657 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1658 /* 1659 * If there is already an exception/interrupt pending 1660 * then defer the NMI until after that. 1661 */ 1662 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1663 "eventinj %#lx", ctrl->eventinj); 1664 1665 /* 1666 * Use self-IPI to trigger a VM-exit as soon as 1667 * possible after the event injection is completed. 1668 * 1669 * This works only if the external interrupt exiting 1670 * is at a lower priority than the event injection. 1671 * 1672 * Although not explicitly specified in APMv2 the 1673 * relative priorities were verified empirically. 1674 */ 1675 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1676 } else { 1677 vm_nmi_clear(sc->vm, vcpu); 1678 1679 /* Inject NMI, vector number is not used */ 1680 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1681 IDT_NMI, 0, false); 1682 1683 /* virtual NMI blocking is now in effect */ 1684 enable_nmi_blocking(sc, vcpu); 1685 1686 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1687 } 1688 } 1689 1690 extint_pending = vm_extint_pending(sc->vm, vcpu); 1691 if (!extint_pending) { 1692 if (!vlapic_pending_intr(vlapic, &vector)) 1693 goto done; 1694 KASSERT(vector >= 16 && vector <= 255, 1695 ("invalid vector %d from local APIC", vector)); 1696 } else { 1697 /* Ask the legacy pic for a vector to inject */ 1698 vatpic_pending_intr(sc->vm, &vector); 1699 KASSERT(vector >= 0 && vector <= 255, 1700 ("invalid vector %d from INTR", vector)); 1701 } 1702 1703 /* 1704 * If the guest has disabled interrupts or is in an interrupt shadow 1705 * then we cannot inject the pending interrupt. 1706 */ 1707 if ((state->rflags & PSL_I) == 0) { 1708 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1709 "rflags %#lx", vector, state->rflags); 1710 need_intr_window = 1; 1711 goto done; 1712 } 1713 1714 if (ctrl->intr_shadow) { 1715 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1716 "interrupt shadow", vector); 1717 need_intr_window = 1; 1718 goto done; 1719 } 1720 1721 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1722 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1723 "eventinj %#lx", vector, ctrl->eventinj); 1724 need_intr_window = 1; 1725 goto done; 1726 } 1727 1728 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1729 1730 if (!extint_pending) { 1731 vlapic_intr_accepted(vlapic, vector); 1732 } else { 1733 vm_extint_clear(sc->vm, vcpu); 1734 vatpic_intr_accepted(sc->vm, vector); 1735 } 1736 1737 /* 1738 * Force a VM-exit as soon as the vcpu is ready to accept another 1739 * interrupt. This is done because the PIC might have another vector 1740 * that it wants to inject. Also, if the APIC has a pending interrupt 1741 * that was preempted by the ExtInt then it allows us to inject the 1742 * APIC vector as soon as possible. 1743 */ 1744 need_intr_window = 1; 1745 done: 1746 /* 1747 * The guest can modify the TPR by writing to %CR8. In guest mode 1748 * the processor reflects this write to V_TPR without hypervisor 1749 * intervention. 1750 * 1751 * The guest can also modify the TPR by writing to it via the memory 1752 * mapped APIC page. In this case, the write will be emulated by the 1753 * hypervisor. For this reason V_TPR must be updated before every 1754 * VMRUN. 1755 */ 1756 v_tpr = vlapic_get_cr8(vlapic); 1757 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1758 if (ctrl->v_tpr != v_tpr) { 1759 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1760 ctrl->v_tpr, v_tpr); 1761 ctrl->v_tpr = v_tpr; 1762 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1763 } 1764 1765 if (need_intr_window) { 1766 /* 1767 * We use V_IRQ in conjunction with the VINTR intercept to 1768 * trap into the hypervisor as soon as a virtual interrupt 1769 * can be delivered. 1770 * 1771 * Since injected events are not subject to intercept checks 1772 * we need to ensure that the V_IRQ is not actually going to 1773 * be delivered on VM entry. The KASSERT below enforces this. 1774 */ 1775 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1776 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1777 ("Bogus intr_window_exiting: eventinj (%#lx), " 1778 "intr_shadow (%u), rflags (%#lx)", 1779 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1780 enable_intr_window_exiting(sc, vcpu); 1781 } else { 1782 disable_intr_window_exiting(sc, vcpu); 1783 } 1784 } 1785 1786 static __inline void 1787 restore_host_tss(void) 1788 { 1789 struct system_segment_descriptor *tss_sd; 1790 1791 /* 1792 * The TSS descriptor was in use prior to launching the guest so it 1793 * has been marked busy. 1794 * 1795 * 'ltr' requires the descriptor to be marked available so change the 1796 * type to "64-bit available TSS". 1797 */ 1798 tss_sd = PCPU_GET(tss); 1799 tss_sd->sd_type = SDT_SYSTSS; 1800 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1801 } 1802 1803 static void 1804 svm_pmap_activate(struct svm_softc *sc, int vcpuid, pmap_t pmap) 1805 { 1806 struct svm_vcpu *vcpustate; 1807 struct vmcb_ctrl *ctrl; 1808 long eptgen; 1809 int cpu; 1810 bool alloc_asid; 1811 1812 cpu = curcpu; 1813 CPU_SET_ATOMIC(cpu, &pmap->pm_active); 1814 smr_enter(pmap->pm_eptsmr); 1815 1816 vcpustate = svm_get_vcpu(sc, vcpuid); 1817 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1818 1819 /* 1820 * The TLB entries associated with the vcpu's ASID are not valid 1821 * if either of the following conditions is true: 1822 * 1823 * 1. The vcpu's ASID generation is different than the host cpu's 1824 * ASID generation. This happens when the vcpu migrates to a new 1825 * host cpu. It can also happen when the number of vcpus executing 1826 * on a host cpu is greater than the number of ASIDs available. 1827 * 1828 * 2. The pmap generation number is different than the value cached in 1829 * the 'vcpustate'. This happens when the host invalidates pages 1830 * belonging to the guest. 1831 * 1832 * asidgen eptgen Action 1833 * mismatch mismatch 1834 * 0 0 (a) 1835 * 0 1 (b1) or (b2) 1836 * 1 0 (c) 1837 * 1 1 (d) 1838 * 1839 * (a) There is no mismatch in eptgen or ASID generation and therefore 1840 * no further action is needed. 1841 * 1842 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1843 * retained and the TLB entries associated with this ASID 1844 * are flushed by VMRUN. 1845 * 1846 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1847 * allocated. 1848 * 1849 * (c) A new ASID is allocated. 1850 * 1851 * (d) A new ASID is allocated. 1852 */ 1853 1854 alloc_asid = false; 1855 eptgen = atomic_load_long(&pmap->pm_eptgen); 1856 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1857 1858 if (vcpustate->asid.gen != asid[cpu].gen) { 1859 alloc_asid = true; /* (c) and (d) */ 1860 } else if (vcpustate->eptgen != eptgen) { 1861 if (flush_by_asid()) 1862 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1863 else 1864 alloc_asid = true; /* (b2) */ 1865 } else { 1866 /* 1867 * This is the common case (a). 1868 */ 1869 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1870 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1871 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1872 } 1873 1874 if (alloc_asid) { 1875 if (++asid[cpu].num >= nasid) { 1876 asid[cpu].num = 1; 1877 if (++asid[cpu].gen == 0) 1878 asid[cpu].gen = 1; 1879 /* 1880 * If this cpu does not support "flush-by-asid" 1881 * then flush the entire TLB on a generation 1882 * bump. Subsequent ASID allocation in this 1883 * generation can be done without a TLB flush. 1884 */ 1885 if (!flush_by_asid()) 1886 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1887 } 1888 vcpustate->asid.gen = asid[cpu].gen; 1889 vcpustate->asid.num = asid[cpu].num; 1890 1891 ctrl->asid = vcpustate->asid.num; 1892 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1893 /* 1894 * If this cpu supports "flush-by-asid" then the TLB 1895 * was not flushed after the generation bump. The TLB 1896 * is flushed selectively after every new ASID allocation. 1897 */ 1898 if (flush_by_asid()) 1899 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1900 } 1901 vcpustate->eptgen = eptgen; 1902 1903 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1904 KASSERT(ctrl->asid == vcpustate->asid.num, 1905 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1906 } 1907 1908 static void 1909 svm_pmap_deactivate(pmap_t pmap) 1910 { 1911 smr_exit(pmap->pm_eptsmr); 1912 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 1913 } 1914 1915 static __inline void 1916 disable_gintr(void) 1917 { 1918 1919 __asm __volatile("clgi"); 1920 } 1921 1922 static __inline void 1923 enable_gintr(void) 1924 { 1925 1926 __asm __volatile("stgi"); 1927 } 1928 1929 static __inline void 1930 svm_dr_enter_guest(struct svm_regctx *gctx) 1931 { 1932 1933 /* Save host control debug registers. */ 1934 gctx->host_dr7 = rdr7(); 1935 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 1936 1937 /* 1938 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 1939 * exceptions in the host based on the guest DRx values. The 1940 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 1941 * VMCB. 1942 */ 1943 load_dr7(0); 1944 wrmsr(MSR_DEBUGCTLMSR, 0); 1945 1946 /* Save host debug registers. */ 1947 gctx->host_dr0 = rdr0(); 1948 gctx->host_dr1 = rdr1(); 1949 gctx->host_dr2 = rdr2(); 1950 gctx->host_dr3 = rdr3(); 1951 gctx->host_dr6 = rdr6(); 1952 1953 /* Restore guest debug registers. */ 1954 load_dr0(gctx->sctx_dr0); 1955 load_dr1(gctx->sctx_dr1); 1956 load_dr2(gctx->sctx_dr2); 1957 load_dr3(gctx->sctx_dr3); 1958 } 1959 1960 static __inline void 1961 svm_dr_leave_guest(struct svm_regctx *gctx) 1962 { 1963 1964 /* Save guest debug registers. */ 1965 gctx->sctx_dr0 = rdr0(); 1966 gctx->sctx_dr1 = rdr1(); 1967 gctx->sctx_dr2 = rdr2(); 1968 gctx->sctx_dr3 = rdr3(); 1969 1970 /* 1971 * Restore host debug registers. Restore DR7 and DEBUGCTL 1972 * last. 1973 */ 1974 load_dr0(gctx->host_dr0); 1975 load_dr1(gctx->host_dr1); 1976 load_dr2(gctx->host_dr2); 1977 load_dr3(gctx->host_dr3); 1978 load_dr6(gctx->host_dr6); 1979 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 1980 load_dr7(gctx->host_dr7); 1981 } 1982 1983 /* 1984 * Start vcpu with specified RIP. 1985 */ 1986 static int 1987 svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1988 struct vm_eventinfo *evinfo) 1989 { 1990 struct svm_regctx *gctx; 1991 struct svm_softc *svm_sc; 1992 struct svm_vcpu *vcpustate; 1993 struct vmcb_state *state; 1994 struct vmcb_ctrl *ctrl; 1995 struct vm_exit *vmexit; 1996 struct vlapic *vlapic; 1997 struct vm *vm; 1998 uint64_t vmcb_pa; 1999 int handled; 2000 uint16_t ldt_sel; 2001 2002 svm_sc = arg; 2003 vm = svm_sc->vm; 2004 2005 vcpustate = svm_get_vcpu(svm_sc, vcpu); 2006 state = svm_get_vmcb_state(svm_sc, vcpu); 2007 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 2008 vmexit = vm_exitinfo(vm, vcpu); 2009 vlapic = vm_lapic(vm, vcpu); 2010 2011 gctx = svm_get_guest_regctx(svm_sc, vcpu); 2012 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 2013 2014 if (vcpustate->lastcpu != curcpu) { 2015 /* 2016 * Force new ASID allocation by invalidating the generation. 2017 */ 2018 vcpustate->asid.gen = 0; 2019 2020 /* 2021 * Invalidate the VMCB state cache by marking all fields dirty. 2022 */ 2023 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 2024 2025 /* 2026 * XXX 2027 * Setting 'vcpustate->lastcpu' here is bit premature because 2028 * we may return from this function without actually executing 2029 * the VMRUN instruction. This could happen if a rendezvous 2030 * or an AST is pending on the first time through the loop. 2031 * 2032 * This works for now but any new side-effects of vcpu 2033 * migration should take this case into account. 2034 */ 2035 vcpustate->lastcpu = curcpu; 2036 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 2037 } 2038 2039 svm_msr_guest_enter(svm_sc, vcpu); 2040 2041 /* Update Guest RIP */ 2042 state->rip = rip; 2043 2044 do { 2045 /* 2046 * Disable global interrupts to guarantee atomicity during 2047 * loading of guest state. This includes not only the state 2048 * loaded by the "vmrun" instruction but also software state 2049 * maintained by the hypervisor: suspended and rendezvous 2050 * state, NPT generation number, vlapic interrupts etc. 2051 */ 2052 disable_gintr(); 2053 2054 if (vcpu_suspended(evinfo)) { 2055 enable_gintr(); 2056 vm_exit_suspended(vm, vcpu, state->rip); 2057 break; 2058 } 2059 2060 if (vcpu_rendezvous_pending(evinfo)) { 2061 enable_gintr(); 2062 vm_exit_rendezvous(vm, vcpu, state->rip); 2063 break; 2064 } 2065 2066 if (vcpu_reqidle(evinfo)) { 2067 enable_gintr(); 2068 vm_exit_reqidle(vm, vcpu, state->rip); 2069 break; 2070 } 2071 2072 /* We are asked to give the cpu by scheduler. */ 2073 if (vcpu_should_yield(vm, vcpu)) { 2074 enable_gintr(); 2075 vm_exit_astpending(vm, vcpu, state->rip); 2076 break; 2077 } 2078 2079 if (vcpu_debugged(vm, vcpu)) { 2080 enable_gintr(); 2081 vm_exit_debug(vm, vcpu, state->rip); 2082 break; 2083 } 2084 2085 /* 2086 * #VMEXIT resumes the host with the guest LDTR, so 2087 * save the current LDT selector so it can be restored 2088 * after an exit. The userspace hypervisor probably 2089 * doesn't use a LDT, but save and restore it to be 2090 * safe. 2091 */ 2092 ldt_sel = sldt(); 2093 2094 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2095 2096 /* 2097 * Check the pmap generation and the ASID generation to 2098 * ensure that the vcpu does not use stale TLB mappings. 2099 */ 2100 svm_pmap_activate(svm_sc, vcpu, pmap); 2101 2102 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 2103 vcpustate->dirty = 0; 2104 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2105 2106 /* Launch Virtual Machine. */ 2107 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 2108 svm_dr_enter_guest(gctx); 2109 svm_launch(vmcb_pa, gctx, get_pcpu()); 2110 svm_dr_leave_guest(gctx); 2111 2112 svm_pmap_deactivate(pmap); 2113 2114 /* 2115 * The host GDTR and IDTR is saved by VMRUN and restored 2116 * automatically on #VMEXIT. However, the host TSS needs 2117 * to be restored explicitly. 2118 */ 2119 restore_host_tss(); 2120 2121 /* Restore host LDTR. */ 2122 lldt(ldt_sel); 2123 2124 /* #VMEXIT disables interrupts so re-enable them here. */ 2125 enable_gintr(); 2126 2127 /* Update 'nextrip' */ 2128 vcpustate->nextrip = state->rip; 2129 2130 /* Handle #VMEXIT and if required return to user space. */ 2131 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2132 } while (handled); 2133 2134 svm_msr_guest_exit(svm_sc, vcpu); 2135 2136 return (0); 2137 } 2138 2139 static void 2140 svm_vmcleanup(void *arg) 2141 { 2142 struct svm_softc *sc = arg; 2143 2144 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2145 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2146 free(sc, M_SVM); 2147 } 2148 2149 static register_t * 2150 swctx_regptr(struct svm_regctx *regctx, int reg) 2151 { 2152 2153 switch (reg) { 2154 case VM_REG_GUEST_RBX: 2155 return (®ctx->sctx_rbx); 2156 case VM_REG_GUEST_RCX: 2157 return (®ctx->sctx_rcx); 2158 case VM_REG_GUEST_RDX: 2159 return (®ctx->sctx_rdx); 2160 case VM_REG_GUEST_RDI: 2161 return (®ctx->sctx_rdi); 2162 case VM_REG_GUEST_RSI: 2163 return (®ctx->sctx_rsi); 2164 case VM_REG_GUEST_RBP: 2165 return (®ctx->sctx_rbp); 2166 case VM_REG_GUEST_R8: 2167 return (®ctx->sctx_r8); 2168 case VM_REG_GUEST_R9: 2169 return (®ctx->sctx_r9); 2170 case VM_REG_GUEST_R10: 2171 return (®ctx->sctx_r10); 2172 case VM_REG_GUEST_R11: 2173 return (®ctx->sctx_r11); 2174 case VM_REG_GUEST_R12: 2175 return (®ctx->sctx_r12); 2176 case VM_REG_GUEST_R13: 2177 return (®ctx->sctx_r13); 2178 case VM_REG_GUEST_R14: 2179 return (®ctx->sctx_r14); 2180 case VM_REG_GUEST_R15: 2181 return (®ctx->sctx_r15); 2182 case VM_REG_GUEST_DR0: 2183 return (®ctx->sctx_dr0); 2184 case VM_REG_GUEST_DR1: 2185 return (®ctx->sctx_dr1); 2186 case VM_REG_GUEST_DR2: 2187 return (®ctx->sctx_dr2); 2188 case VM_REG_GUEST_DR3: 2189 return (®ctx->sctx_dr3); 2190 default: 2191 return (NULL); 2192 } 2193 } 2194 2195 static int 2196 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2197 { 2198 struct svm_softc *svm_sc; 2199 register_t *reg; 2200 2201 svm_sc = arg; 2202 2203 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2204 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2205 } 2206 2207 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2208 return (0); 2209 } 2210 2211 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2212 2213 if (reg != NULL) { 2214 *val = *reg; 2215 return (0); 2216 } 2217 2218 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2219 return (EINVAL); 2220 } 2221 2222 static int 2223 svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2224 { 2225 struct svm_softc *svm_sc; 2226 register_t *reg; 2227 2228 svm_sc = arg; 2229 2230 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2231 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2232 } 2233 2234 /* Do not permit user write access to VMCB fields by offset. */ 2235 if (!VMCB_ACCESS_OK(ident)) { 2236 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2237 return (0); 2238 } 2239 } 2240 2241 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2242 2243 if (reg != NULL) { 2244 *reg = val; 2245 return (0); 2246 } 2247 2248 if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) { 2249 /* Ignore. */ 2250 return (0); 2251 } 2252 2253 /* 2254 * XXX deal with CR3 and invalidate TLB entries tagged with the 2255 * vcpu's ASID. This needs to be treated differently depending on 2256 * whether 'running' is true/false. 2257 */ 2258 2259 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2260 return (EINVAL); 2261 } 2262 2263 #ifdef BHYVE_SNAPSHOT 2264 static int 2265 svm_snapshot_reg(void *arg, int vcpu, int ident, 2266 struct vm_snapshot_meta *meta) 2267 { 2268 int ret; 2269 uint64_t val; 2270 2271 if (meta->op == VM_SNAPSHOT_SAVE) { 2272 ret = svm_getreg(arg, vcpu, ident, &val); 2273 if (ret != 0) 2274 goto done; 2275 2276 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2277 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 2278 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2279 2280 ret = svm_setreg(arg, vcpu, ident, val); 2281 if (ret != 0) 2282 goto done; 2283 } else { 2284 ret = EINVAL; 2285 goto done; 2286 } 2287 2288 done: 2289 return (ret); 2290 } 2291 #endif 2292 2293 static int 2294 svm_setcap(void *arg, int vcpu, int type, int val) 2295 { 2296 struct svm_softc *sc; 2297 int error; 2298 2299 sc = arg; 2300 error = 0; 2301 switch (type) { 2302 case VM_CAP_HALT_EXIT: 2303 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2304 VMCB_INTCPT_HLT, val); 2305 break; 2306 case VM_CAP_PAUSE_EXIT: 2307 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2308 VMCB_INTCPT_PAUSE, val); 2309 break; 2310 case VM_CAP_UNRESTRICTED_GUEST: 2311 /* Unrestricted guest execution cannot be disabled in SVM */ 2312 if (val == 0) 2313 error = EINVAL; 2314 break; 2315 default: 2316 error = ENOENT; 2317 break; 2318 } 2319 return (error); 2320 } 2321 2322 static int 2323 svm_getcap(void *arg, int vcpu, int type, int *retval) 2324 { 2325 struct svm_softc *sc; 2326 int error; 2327 2328 sc = arg; 2329 error = 0; 2330 2331 switch (type) { 2332 case VM_CAP_HALT_EXIT: 2333 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2334 VMCB_INTCPT_HLT); 2335 break; 2336 case VM_CAP_PAUSE_EXIT: 2337 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2338 VMCB_INTCPT_PAUSE); 2339 break; 2340 case VM_CAP_UNRESTRICTED_GUEST: 2341 *retval = 1; /* unrestricted guest is always enabled */ 2342 break; 2343 default: 2344 error = ENOENT; 2345 break; 2346 } 2347 return (error); 2348 } 2349 2350 static struct vlapic * 2351 svm_vlapic_init(void *arg, int vcpuid) 2352 { 2353 struct svm_softc *svm_sc; 2354 struct vlapic *vlapic; 2355 2356 svm_sc = arg; 2357 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2358 vlapic->vm = svm_sc->vm; 2359 vlapic->vcpuid = vcpuid; 2360 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2361 2362 vlapic_init(vlapic); 2363 2364 return (vlapic); 2365 } 2366 2367 static void 2368 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2369 { 2370 2371 vlapic_cleanup(vlapic); 2372 free(vlapic, M_SVM_VLAPIC); 2373 } 2374 2375 #ifdef BHYVE_SNAPSHOT 2376 static int 2377 svm_snapshot_vmi(void *arg, struct vm_snapshot_meta *meta) 2378 { 2379 /* struct svm_softc is AMD's representation for SVM softc */ 2380 struct svm_softc *sc; 2381 struct svm_vcpu *vcpu; 2382 struct vmcb *vmcb; 2383 uint64_t val; 2384 int i; 2385 int ret; 2386 2387 sc = arg; 2388 2389 KASSERT(sc != NULL, ("%s: arg was NULL", __func__)); 2390 2391 SNAPSHOT_VAR_OR_LEAVE(sc->nptp, meta, ret, done); 2392 2393 for (i = 0; i < VM_MAXCPU; i++) { 2394 vcpu = &sc->vcpu[i]; 2395 vmcb = &vcpu->vmcb; 2396 2397 /* VMCB fields for virtual cpu i */ 2398 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.v_tpr, meta, ret, done); 2399 val = vmcb->ctrl.v_tpr; 2400 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2401 vmcb->ctrl.v_tpr = val; 2402 2403 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.asid, meta, ret, done); 2404 val = vmcb->ctrl.np_enable; 2405 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2406 vmcb->ctrl.np_enable = val; 2407 2408 val = vmcb->ctrl.intr_shadow; 2409 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2410 vmcb->ctrl.intr_shadow = val; 2411 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.tlb_ctrl, meta, ret, done); 2412 2413 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad1, 2414 sizeof(vmcb->state.pad1), 2415 meta, ret, done); 2416 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cpl, meta, ret, done); 2417 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad2, 2418 sizeof(vmcb->state.pad2), 2419 meta, ret, done); 2420 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.efer, meta, ret, done); 2421 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad3, 2422 sizeof(vmcb->state.pad3), 2423 meta, ret, done); 2424 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr4, meta, ret, done); 2425 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr3, meta, ret, done); 2426 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr0, meta, ret, done); 2427 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dr7, meta, ret, done); 2428 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dr6, meta, ret, done); 2429 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rflags, meta, ret, done); 2430 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rip, meta, ret, done); 2431 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad4, 2432 sizeof(vmcb->state.pad4), 2433 meta, ret, done); 2434 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rsp, meta, ret, done); 2435 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad5, 2436 sizeof(vmcb->state.pad5), 2437 meta, ret, done); 2438 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rax, meta, ret, done); 2439 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.star, meta, ret, done); 2440 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.lstar, meta, ret, done); 2441 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cstar, meta, ret, done); 2442 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sfmask, meta, ret, done); 2443 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.kernelgsbase, 2444 meta, ret, done); 2445 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_cs, meta, ret, done); 2446 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_esp, 2447 meta, ret, done); 2448 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_eip, 2449 meta, ret, done); 2450 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr2, meta, ret, done); 2451 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad6, 2452 sizeof(vmcb->state.pad6), 2453 meta, ret, done); 2454 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.g_pat, meta, ret, done); 2455 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dbgctl, meta, ret, done); 2456 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.br_from, meta, ret, done); 2457 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.br_to, meta, ret, done); 2458 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.int_from, meta, ret, done); 2459 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.int_to, meta, ret, done); 2460 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad7, 2461 sizeof(vmcb->state.pad7), 2462 meta, ret, done); 2463 2464 /* Snapshot swctx for virtual cpu i */ 2465 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, ret, done); 2466 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, ret, done); 2467 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, ret, done); 2468 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, ret, done); 2469 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, ret, done); 2470 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, ret, done); 2471 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, ret, done); 2472 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, ret, done); 2473 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, ret, done); 2474 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, ret, done); 2475 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, ret, done); 2476 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, ret, done); 2477 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, ret, done); 2478 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, ret, done); 2479 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, ret, done); 2480 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, ret, done); 2481 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, ret, done); 2482 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, ret, done); 2483 2484 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr0, meta, ret, done); 2485 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr1, meta, ret, done); 2486 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr2, meta, ret, done); 2487 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr3, meta, ret, done); 2488 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr6, meta, ret, done); 2489 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr7, meta, ret, done); 2490 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_debugctl, meta, ret, 2491 done); 2492 2493 /* Restore other svm_vcpu struct fields */ 2494 2495 /* Restore NEXTRIP field */ 2496 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done); 2497 2498 /* Restore lastcpu field */ 2499 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, ret, done); 2500 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, ret, done); 2501 2502 /* Restore EPTGEN field - EPT is Extended Page Tabel */ 2503 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, ret, done); 2504 2505 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, ret, done); 2506 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, ret, done); 2507 2508 /* Set all caches dirty */ 2509 if (meta->op == VM_SNAPSHOT_RESTORE) { 2510 svm_set_dirty(sc, i, VMCB_CACHE_ASID); 2511 svm_set_dirty(sc, i, VMCB_CACHE_IOPM); 2512 svm_set_dirty(sc, i, VMCB_CACHE_I); 2513 svm_set_dirty(sc, i, VMCB_CACHE_TPR); 2514 svm_set_dirty(sc, i, VMCB_CACHE_CR2); 2515 svm_set_dirty(sc, i, VMCB_CACHE_CR); 2516 svm_set_dirty(sc, i, VMCB_CACHE_DT); 2517 svm_set_dirty(sc, i, VMCB_CACHE_SEG); 2518 svm_set_dirty(sc, i, VMCB_CACHE_NP); 2519 } 2520 } 2521 2522 if (meta->op == VM_SNAPSHOT_RESTORE) 2523 flush_by_asid(); 2524 2525 done: 2526 return (ret); 2527 } 2528 2529 static int 2530 svm_snapshot_vmcx(void *arg, struct vm_snapshot_meta *meta, int vcpu) 2531 { 2532 struct vmcb *vmcb; 2533 struct svm_softc *sc; 2534 int err, running, hostcpu; 2535 2536 sc = (struct svm_softc *)arg; 2537 err = 0; 2538 2539 KASSERT(arg != NULL, ("%s: arg was NULL", __func__)); 2540 vmcb = svm_get_vmcb(sc, vcpu); 2541 2542 running = vcpu_is_running(sc->vm, vcpu, &hostcpu); 2543 if (running && hostcpu !=curcpu) { 2544 printf("%s: %s%d is running", __func__, vm_name(sc->vm), vcpu); 2545 return (EINVAL); 2546 } 2547 2548 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR0, meta); 2549 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR2, meta); 2550 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR3, meta); 2551 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR4, meta); 2552 2553 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DR7, meta); 2554 2555 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RAX, meta); 2556 2557 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RSP, meta); 2558 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RIP, meta); 2559 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RFLAGS, meta); 2560 2561 /* Guest segments */ 2562 /* ES */ 2563 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_ES, meta); 2564 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_ES, meta); 2565 2566 /* CS */ 2567 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CS, meta); 2568 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_CS, meta); 2569 2570 /* SS */ 2571 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_SS, meta); 2572 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_SS, meta); 2573 2574 /* DS */ 2575 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DS, meta); 2576 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_DS, meta); 2577 2578 /* FS */ 2579 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_FS, meta); 2580 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_FS, meta); 2581 2582 /* GS */ 2583 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_GS, meta); 2584 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GS, meta); 2585 2586 /* TR */ 2587 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_TR, meta); 2588 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_TR, meta); 2589 2590 /* LDTR */ 2591 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_LDTR, meta); 2592 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_LDTR, meta); 2593 2594 /* EFER */ 2595 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_EFER, meta); 2596 2597 /* IDTR and GDTR */ 2598 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_IDTR, meta); 2599 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GDTR, meta); 2600 2601 /* Specific AMD registers */ 2602 err += vmcb_snapshot_any(sc, vcpu, 2603 VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta); 2604 err += vmcb_snapshot_any(sc, vcpu, 2605 VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta); 2606 err += vmcb_snapshot_any(sc, vcpu, 2607 VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta); 2608 2609 err += vmcb_snapshot_any(sc, vcpu, 2610 VMCB_ACCESS(VMCB_OFF_NPT_BASE, 8), meta); 2611 2612 err += vmcb_snapshot_any(sc, vcpu, 2613 VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta); 2614 err += vmcb_snapshot_any(sc, vcpu, 2615 VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta); 2616 err += vmcb_snapshot_any(sc, vcpu, 2617 VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta); 2618 err += vmcb_snapshot_any(sc, vcpu, 2619 VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta); 2620 err += vmcb_snapshot_any(sc, vcpu, 2621 VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta); 2622 2623 err += vmcb_snapshot_any(sc, vcpu, 2624 VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta); 2625 2626 err += vmcb_snapshot_any(sc, vcpu, 2627 VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta); 2628 err += vmcb_snapshot_any(sc, vcpu, 2629 VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta); 2630 err += vmcb_snapshot_any(sc, vcpu, 2631 VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta); 2632 2633 err += vmcb_snapshot_any(sc, vcpu, 2634 VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta); 2635 2636 err += vmcb_snapshot_any(sc, vcpu, 2637 VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta); 2638 2639 err += vmcb_snapshot_any(sc, vcpu, 2640 VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta); 2641 err += vmcb_snapshot_any(sc, vcpu, 2642 VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta); 2643 err += vmcb_snapshot_any(sc, vcpu, 2644 VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta); 2645 err += vmcb_snapshot_any(sc, vcpu, 2646 VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta); 2647 2648 err += vmcb_snapshot_any(sc, vcpu, 2649 VMCB_ACCESS(VMCB_OFF_IO_PERM, 8), meta); 2650 err += vmcb_snapshot_any(sc, vcpu, 2651 VMCB_ACCESS(VMCB_OFF_MSR_PERM, 8), meta); 2652 2653 err += vmcb_snapshot_any(sc, vcpu, 2654 VMCB_ACCESS(VMCB_OFF_ASID, 4), meta); 2655 2656 err += vmcb_snapshot_any(sc, vcpu, 2657 VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta); 2658 2659 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_INTR_SHADOW, meta); 2660 2661 return (err); 2662 } 2663 2664 static int 2665 svm_restore_tsc(void *arg, int vcpu, uint64_t offset) 2666 { 2667 int err; 2668 2669 err = svm_set_tsc_offset(arg, vcpu, offset); 2670 2671 return (err); 2672 } 2673 #endif 2674 2675 struct vmm_ops vmm_ops_amd = { 2676 .init = svm_init, 2677 .cleanup = svm_cleanup, 2678 .resume = svm_restore, 2679 .vminit = svm_vminit, 2680 .vmrun = svm_vmrun, 2681 .vmcleanup = svm_vmcleanup, 2682 .vmgetreg = svm_getreg, 2683 .vmsetreg = svm_setreg, 2684 .vmgetdesc = vmcb_getdesc, 2685 .vmsetdesc = vmcb_setdesc, 2686 .vmgetcap = svm_getcap, 2687 .vmsetcap = svm_setcap, 2688 .vmspace_alloc = svm_npt_alloc, 2689 .vmspace_free = svm_npt_free, 2690 .vlapic_init = svm_vlapic_init, 2691 .vlapic_cleanup = svm_vlapic_cleanup, 2692 #ifdef BHYVE_SNAPSHOT 2693 .vmsnapshot = svm_snapshot_vmi, 2694 .vmcx_snapshot = svm_snapshot_vmcx, 2695 .vm_restore_tsc = svm_restore_tsc, 2696 #endif 2697 }; 2698