1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_bhyve_snapshot.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/smp.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/pcpu.h> 40 #include <sys/proc.h> 41 #include <sys/sysctl.h> 42 43 #include <vm/vm.h> 44 #include <vm/pmap.h> 45 46 #include <machine/cpufunc.h> 47 #include <machine/psl.h> 48 #include <machine/md_var.h> 49 #include <machine/reg.h> 50 #include <machine/specialreg.h> 51 #include <machine/smp.h> 52 #include <machine/vmm.h> 53 #include <machine/vmm_dev.h> 54 #include <machine/vmm_instruction_emul.h> 55 #include <machine/vmm_snapshot.h> 56 57 #include "vmm_lapic.h" 58 #include "vmm_stat.h" 59 #include "vmm_ktr.h" 60 #include "vmm_ioport.h" 61 #include "vatpic.h" 62 #include "vlapic.h" 63 #include "vlapic_priv.h" 64 65 #include "x86.h" 66 #include "vmcb.h" 67 #include "svm.h" 68 #include "svm_softc.h" 69 #include "svm_msr.h" 70 #include "npt.h" 71 72 SYSCTL_DECL(_hw_vmm); 73 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 74 NULL); 75 76 /* 77 * SVM CPUID function 0x8000_000A, edx bit decoding. 78 */ 79 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 80 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 81 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 82 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 83 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 84 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 85 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 86 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 87 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 88 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 89 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 90 91 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 92 VMCB_CACHE_IOPM | \ 93 VMCB_CACHE_I | \ 94 VMCB_CACHE_TPR | \ 95 VMCB_CACHE_CR2 | \ 96 VMCB_CACHE_CR | \ 97 VMCB_CACHE_DR | \ 98 VMCB_CACHE_DT | \ 99 VMCB_CACHE_SEG | \ 100 VMCB_CACHE_NP) 101 102 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 103 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 104 0, NULL); 105 106 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 107 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 108 109 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 110 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 111 "SVM features advertised by CPUID.8000000AH:EDX"); 112 113 static int disable_npf_assist; 114 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 115 &disable_npf_assist, 0, NULL); 116 117 /* Maximum ASIDs supported by the processor */ 118 static uint32_t nasid; 119 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 120 "Number of ASIDs supported by this processor"); 121 122 /* Current ASID generation for each host cpu */ 123 static struct asid asid[MAXCPU]; 124 125 /* 126 * SVM host state saved area of size 4KB for each core. 127 */ 128 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 129 130 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 131 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 132 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 133 134 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 135 136 static __inline int 137 flush_by_asid(void) 138 { 139 140 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 141 } 142 143 static __inline int 144 decode_assist(void) 145 { 146 147 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 148 } 149 150 static void 151 svm_disable(void *arg __unused) 152 { 153 uint64_t efer; 154 155 efer = rdmsr(MSR_EFER); 156 efer &= ~EFER_SVM; 157 wrmsr(MSR_EFER, efer); 158 } 159 160 /* 161 * Disable SVM on all CPUs. 162 */ 163 static int 164 svm_cleanup(void) 165 { 166 167 smp_rendezvous(NULL, svm_disable, NULL, NULL); 168 return (0); 169 } 170 171 /* 172 * Verify that all the features required by bhyve are available. 173 */ 174 static int 175 check_svm_features(void) 176 { 177 u_int regs[4]; 178 179 /* CPUID Fn8000_000A is for SVM */ 180 do_cpuid(0x8000000A, regs); 181 svm_feature &= regs[3]; 182 183 /* 184 * The number of ASIDs can be configured to be less than what is 185 * supported by the hardware but not more. 186 */ 187 if (nasid == 0 || nasid > regs[1]) 188 nasid = regs[1]; 189 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 190 191 /* bhyve requires the Nested Paging feature */ 192 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 193 printf("SVM: Nested Paging feature not available.\n"); 194 return (ENXIO); 195 } 196 197 /* bhyve requires the NRIP Save feature */ 198 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 199 printf("SVM: NRIP Save feature not available.\n"); 200 return (ENXIO); 201 } 202 203 return (0); 204 } 205 206 static void 207 svm_enable(void *arg __unused) 208 { 209 uint64_t efer; 210 211 efer = rdmsr(MSR_EFER); 212 efer |= EFER_SVM; 213 wrmsr(MSR_EFER, efer); 214 215 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 216 } 217 218 /* 219 * Return 1 if SVM is enabled on this processor and 0 otherwise. 220 */ 221 static int 222 svm_available(void) 223 { 224 uint64_t msr; 225 226 /* Section 15.4 Enabling SVM from APM2. */ 227 if ((amd_feature2 & AMDID2_SVM) == 0) { 228 printf("SVM: not available.\n"); 229 return (0); 230 } 231 232 msr = rdmsr(MSR_VM_CR); 233 if ((msr & VM_CR_SVMDIS) != 0) { 234 printf("SVM: disabled by BIOS.\n"); 235 return (0); 236 } 237 238 return (1); 239 } 240 241 static int 242 svm_init(int ipinum) 243 { 244 int error, cpu; 245 246 if (!svm_available()) 247 return (ENXIO); 248 249 error = check_svm_features(); 250 if (error) 251 return (error); 252 253 vmcb_clean &= VMCB_CACHE_DEFAULT; 254 255 for (cpu = 0; cpu < MAXCPU; cpu++) { 256 /* 257 * Initialize the host ASIDs to their "highest" valid values. 258 * 259 * The next ASID allocation will rollover both 'gen' and 'num' 260 * and start off the sequence at {1,1}. 261 */ 262 asid[cpu].gen = ~0UL; 263 asid[cpu].num = nasid - 1; 264 } 265 266 svm_msr_init(); 267 svm_npt_init(ipinum); 268 269 /* Enable SVM on all CPUs */ 270 smp_rendezvous(NULL, svm_enable, NULL, NULL); 271 272 return (0); 273 } 274 275 static void 276 svm_restore(void) 277 { 278 279 svm_enable(NULL); 280 } 281 282 #ifdef BHYVE_SNAPSHOT 283 int 284 svm_set_tsc_offset(struct svm_softc *sc, int vcpu, uint64_t offset) 285 { 286 int error; 287 struct vmcb_ctrl *ctrl; 288 289 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 290 ctrl->tsc_offset = offset; 291 292 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 293 VCPU_CTR1(sc->vm, vcpu, "tsc offset changed to %#lx", offset); 294 295 error = vm_set_tsc_offset(sc->vm, vcpu, offset); 296 297 return (error); 298 } 299 #endif 300 301 /* Pentium compatible MSRs */ 302 #define MSR_PENTIUM_START 0 303 #define MSR_PENTIUM_END 0x1FFF 304 /* AMD 6th generation and Intel compatible MSRs */ 305 #define MSR_AMD6TH_START 0xC0000000UL 306 #define MSR_AMD6TH_END 0xC0001FFFUL 307 /* AMD 7th and 8th generation compatible MSRs */ 308 #define MSR_AMD7TH_START 0xC0010000UL 309 #define MSR_AMD7TH_END 0xC0011FFFUL 310 311 /* 312 * Get the index and bit position for a MSR in permission bitmap. 313 * Two bits are used for each MSR: lower bit for read and higher bit for write. 314 */ 315 static int 316 svm_msr_index(uint64_t msr, int *index, int *bit) 317 { 318 uint32_t base, off; 319 320 *index = -1; 321 *bit = (msr % 4) * 2; 322 base = 0; 323 324 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 325 *index = msr / 4; 326 return (0); 327 } 328 329 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 330 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 331 off = (msr - MSR_AMD6TH_START); 332 *index = (off + base) / 4; 333 return (0); 334 } 335 336 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 337 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 338 off = (msr - MSR_AMD7TH_START); 339 *index = (off + base) / 4; 340 return (0); 341 } 342 343 return (EINVAL); 344 } 345 346 /* 347 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 348 */ 349 static void 350 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 351 { 352 int index, bit, error; 353 354 error = svm_msr_index(msr, &index, &bit); 355 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 356 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 357 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 358 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 359 "msr %#lx", __func__, bit, msr)); 360 361 if (read) 362 perm_bitmap[index] &= ~(1UL << bit); 363 364 if (write) 365 perm_bitmap[index] &= ~(2UL << bit); 366 } 367 368 static void 369 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 370 { 371 372 svm_msr_perm(perm_bitmap, msr, true, true); 373 } 374 375 static void 376 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 377 { 378 379 svm_msr_perm(perm_bitmap, msr, true, false); 380 } 381 382 static __inline int 383 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 384 { 385 struct vmcb_ctrl *ctrl; 386 387 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 388 389 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 390 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 391 } 392 393 static __inline void 394 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 395 int enabled) 396 { 397 struct vmcb_ctrl *ctrl; 398 uint32_t oldval; 399 400 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 401 402 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 403 oldval = ctrl->intercept[idx]; 404 405 if (enabled) 406 ctrl->intercept[idx] |= bitmask; 407 else 408 ctrl->intercept[idx] &= ~bitmask; 409 410 if (ctrl->intercept[idx] != oldval) { 411 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 412 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 413 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 414 } 415 } 416 417 static __inline void 418 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 419 { 420 421 svm_set_intercept(sc, vcpu, off, bitmask, 0); 422 } 423 424 static __inline void 425 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 426 { 427 428 svm_set_intercept(sc, vcpu, off, bitmask, 1); 429 } 430 431 static void 432 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 433 uint64_t msrpm_base_pa, uint64_t np_pml4) 434 { 435 struct vmcb_ctrl *ctrl; 436 struct vmcb_state *state; 437 uint32_t mask; 438 int n; 439 440 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 441 state = svm_get_vmcb_state(sc, vcpu); 442 443 ctrl->iopm_base_pa = iopm_base_pa; 444 ctrl->msrpm_base_pa = msrpm_base_pa; 445 446 /* Enable nested paging */ 447 ctrl->np_enable = 1; 448 ctrl->n_cr3 = np_pml4; 449 450 /* 451 * Intercept accesses to the control registers that are not shadowed 452 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 453 */ 454 for (n = 0; n < 16; n++) { 455 mask = (BIT(n) << 16) | BIT(n); 456 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 457 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 458 else 459 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 460 } 461 462 /* 463 * Intercept everything when tracing guest exceptions otherwise 464 * just intercept machine check exception. 465 */ 466 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 467 for (n = 0; n < 32; n++) { 468 /* 469 * Skip unimplemented vectors in the exception bitmap. 470 */ 471 if (n == 2 || n == 9) { 472 continue; 473 } 474 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 475 } 476 } else { 477 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 478 } 479 480 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 481 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 482 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 483 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 484 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 485 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 486 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 487 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 488 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 489 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 490 VMCB_INTCPT_FERR_FREEZE); 491 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); 492 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); 493 494 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 495 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 496 497 /* 498 * Intercept SVM instructions since AMD enables them in guests otherwise. 499 * Non-intercepted VMMCALL causes #UD, skip it. 500 */ 501 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); 502 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); 503 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); 504 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); 505 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); 506 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); 507 508 /* 509 * From section "Canonicalization and Consistency Checks" in APMv2 510 * the VMRUN intercept bit must be set to pass the consistency check. 511 */ 512 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 513 514 /* 515 * The ASID will be set to a non-zero value just before VMRUN. 516 */ 517 ctrl->asid = 0; 518 519 /* 520 * Section 15.21.1, Interrupt Masking in EFLAGS 521 * Section 15.21.2, Virtualizing APIC.TPR 522 * 523 * This must be set for %rflag and %cr8 isolation of guest and host. 524 */ 525 ctrl->v_intr_masking = 1; 526 527 /* Enable Last Branch Record aka LBR for debugging */ 528 ctrl->lbr_virt_en = 1; 529 state->dbgctl = BIT(0); 530 531 /* EFER_SVM must always be set when the guest is executing */ 532 state->efer = EFER_SVM; 533 534 /* Set up the PAT to power-on state */ 535 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 536 PAT_VALUE(1, PAT_WRITE_THROUGH) | 537 PAT_VALUE(2, PAT_UNCACHED) | 538 PAT_VALUE(3, PAT_UNCACHEABLE) | 539 PAT_VALUE(4, PAT_WRITE_BACK) | 540 PAT_VALUE(5, PAT_WRITE_THROUGH) | 541 PAT_VALUE(6, PAT_UNCACHED) | 542 PAT_VALUE(7, PAT_UNCACHEABLE); 543 544 /* Set up DR6/7 to power-on state */ 545 state->dr6 = DBREG_DR6_RESERVED1; 546 state->dr7 = DBREG_DR7_RESERVED1; 547 } 548 549 /* 550 * Initialize a virtual machine. 551 */ 552 static void * 553 svm_vminit(struct vm *vm, pmap_t pmap) 554 { 555 struct svm_softc *svm_sc; 556 struct svm_vcpu *vcpu; 557 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 558 int i; 559 uint16_t maxcpus; 560 561 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 562 if (((uintptr_t)svm_sc & PAGE_MASK) != 0) 563 panic("malloc of svm_softc not aligned on page boundary"); 564 565 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 566 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 567 if (svm_sc->msr_bitmap == NULL) 568 panic("contigmalloc of SVM MSR bitmap failed"); 569 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 570 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 571 if (svm_sc->iopm_bitmap == NULL) 572 panic("contigmalloc of SVM IO bitmap failed"); 573 574 svm_sc->vm = vm; 575 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pmltop); 576 577 /* 578 * Intercept read and write accesses to all MSRs. 579 */ 580 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 581 582 /* 583 * Access to the following MSRs is redirected to the VMCB when the 584 * guest is executing. Therefore it is safe to allow the guest to 585 * read/write these MSRs directly without hypervisor involvement. 586 */ 587 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 588 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 589 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 590 591 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 593 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 595 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 596 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 597 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 598 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 599 600 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 601 602 /* 603 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 604 */ 605 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 606 607 /* Intercept access to all I/O ports. */ 608 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 609 610 iopm_pa = vtophys(svm_sc->iopm_bitmap); 611 msrpm_pa = vtophys(svm_sc->msr_bitmap); 612 pml4_pa = svm_sc->nptp; 613 maxcpus = vm_get_maxcpus(svm_sc->vm); 614 for (i = 0; i < maxcpus; i++) { 615 vcpu = svm_get_vcpu(svm_sc, i); 616 vcpu->nextrip = ~0; 617 vcpu->lastcpu = NOCPU; 618 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 619 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 620 svm_msr_guest_init(svm_sc, i); 621 } 622 return (svm_sc); 623 } 624 625 /* 626 * Collateral for a generic SVM VM-exit. 627 */ 628 static void 629 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 630 { 631 632 vme->exitcode = VM_EXITCODE_SVM; 633 vme->u.svm.exitcode = code; 634 vme->u.svm.exitinfo1 = info1; 635 vme->u.svm.exitinfo2 = info2; 636 } 637 638 static int 639 svm_cpl(struct vmcb_state *state) 640 { 641 642 /* 643 * From APMv2: 644 * "Retrieve the CPL from the CPL field in the VMCB, not 645 * from any segment DPL" 646 */ 647 return (state->cpl); 648 } 649 650 static enum vm_cpu_mode 651 svm_vcpu_mode(struct vmcb *vmcb) 652 { 653 struct vmcb_segment seg; 654 struct vmcb_state *state; 655 int error; 656 657 state = &vmcb->state; 658 659 if (state->efer & EFER_LMA) { 660 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 661 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 662 error)); 663 664 /* 665 * Section 4.8.1 for APM2, check if Code Segment has 666 * Long attribute set in descriptor. 667 */ 668 if (seg.attrib & VMCB_CS_ATTRIB_L) 669 return (CPU_MODE_64BIT); 670 else 671 return (CPU_MODE_COMPATIBILITY); 672 } else if (state->cr0 & CR0_PE) { 673 return (CPU_MODE_PROTECTED); 674 } else { 675 return (CPU_MODE_REAL); 676 } 677 } 678 679 static enum vm_paging_mode 680 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 681 { 682 683 if ((cr0 & CR0_PG) == 0) 684 return (PAGING_MODE_FLAT); 685 if ((cr4 & CR4_PAE) == 0) 686 return (PAGING_MODE_32); 687 if (efer & EFER_LME) 688 return (PAGING_MODE_64); 689 else 690 return (PAGING_MODE_PAE); 691 } 692 693 /* 694 * ins/outs utility routines 695 */ 696 static uint64_t 697 svm_inout_str_index(struct svm_regctx *regs, int in) 698 { 699 uint64_t val; 700 701 val = in ? regs->sctx_rdi : regs->sctx_rsi; 702 703 return (val); 704 } 705 706 static uint64_t 707 svm_inout_str_count(struct svm_regctx *regs, int rep) 708 { 709 uint64_t val; 710 711 val = rep ? regs->sctx_rcx : 1; 712 713 return (val); 714 } 715 716 static void 717 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 718 int in, struct vm_inout_str *vis) 719 { 720 int error, s; 721 722 if (in) { 723 vis->seg_name = VM_REG_GUEST_ES; 724 } else { 725 /* The segment field has standard encoding */ 726 s = (info1 >> 10) & 0x7; 727 vis->seg_name = vm_segment_name(s); 728 } 729 730 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 731 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 732 } 733 734 static int 735 svm_inout_str_addrsize(uint64_t info1) 736 { 737 uint32_t size; 738 739 size = (info1 >> 7) & 0x7; 740 switch (size) { 741 case 1: 742 return (2); /* 16 bit */ 743 case 2: 744 return (4); /* 32 bit */ 745 case 4: 746 return (8); /* 64 bit */ 747 default: 748 panic("%s: invalid size encoding %d", __func__, size); 749 } 750 } 751 752 static void 753 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 754 { 755 struct vmcb_state *state; 756 757 state = &vmcb->state; 758 paging->cr3 = state->cr3; 759 paging->cpl = svm_cpl(state); 760 paging->cpu_mode = svm_vcpu_mode(vmcb); 761 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 762 state->efer); 763 } 764 765 #define UNHANDLED 0 766 767 /* 768 * Handle guest I/O intercept. 769 */ 770 static int 771 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 772 { 773 struct vmcb_ctrl *ctrl; 774 struct vmcb_state *state; 775 struct svm_regctx *regs; 776 struct vm_inout_str *vis; 777 uint64_t info1; 778 int inout_string; 779 780 state = svm_get_vmcb_state(svm_sc, vcpu); 781 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 782 regs = svm_get_guest_regctx(svm_sc, vcpu); 783 784 info1 = ctrl->exitinfo1; 785 inout_string = info1 & BIT(2) ? 1 : 0; 786 787 /* 788 * The effective segment number in EXITINFO1[12:10] is populated 789 * only if the processor has the DecodeAssist capability. 790 * 791 * XXX this is not specified explicitly in APMv2 but can be verified 792 * empirically. 793 */ 794 if (inout_string && !decode_assist()) 795 return (UNHANDLED); 796 797 vmexit->exitcode = VM_EXITCODE_INOUT; 798 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 799 vmexit->u.inout.string = inout_string; 800 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 801 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 802 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 803 vmexit->u.inout.eax = (uint32_t)(state->rax); 804 805 if (inout_string) { 806 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 807 vis = &vmexit->u.inout_str; 808 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 809 vis->rflags = state->rflags; 810 vis->cr0 = state->cr0; 811 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 812 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 813 vis->addrsize = svm_inout_str_addrsize(info1); 814 svm_inout_str_seginfo(svm_sc, vcpu, info1, 815 vmexit->u.inout.in, vis); 816 } 817 818 return (UNHANDLED); 819 } 820 821 static int 822 npf_fault_type(uint64_t exitinfo1) 823 { 824 825 if (exitinfo1 & VMCB_NPF_INFO1_W) 826 return (VM_PROT_WRITE); 827 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 828 return (VM_PROT_EXECUTE); 829 else 830 return (VM_PROT_READ); 831 } 832 833 static bool 834 svm_npf_emul_fault(uint64_t exitinfo1) 835 { 836 837 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 838 return (false); 839 } 840 841 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 842 return (false); 843 } 844 845 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 846 return (false); 847 } 848 849 return (true); 850 } 851 852 static void 853 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 854 { 855 struct vm_guest_paging *paging; 856 struct vmcb_segment seg; 857 struct vmcb_ctrl *ctrl; 858 char *inst_bytes; 859 int error, inst_len; 860 861 ctrl = &vmcb->ctrl; 862 paging = &vmexit->u.inst_emul.paging; 863 864 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 865 vmexit->u.inst_emul.gpa = gpa; 866 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 867 svm_paging_info(vmcb, paging); 868 869 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 870 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 871 872 switch(paging->cpu_mode) { 873 case CPU_MODE_REAL: 874 vmexit->u.inst_emul.cs_base = seg.base; 875 vmexit->u.inst_emul.cs_d = 0; 876 break; 877 case CPU_MODE_PROTECTED: 878 case CPU_MODE_COMPATIBILITY: 879 vmexit->u.inst_emul.cs_base = seg.base; 880 881 /* 882 * Section 4.8.1 of APM2, Default Operand Size or D bit. 883 */ 884 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 885 1 : 0; 886 break; 887 default: 888 vmexit->u.inst_emul.cs_base = 0; 889 vmexit->u.inst_emul.cs_d = 0; 890 break; 891 } 892 893 /* 894 * Copy the instruction bytes into 'vie' if available. 895 */ 896 if (decode_assist() && !disable_npf_assist) { 897 inst_len = ctrl->inst_len; 898 inst_bytes = ctrl->inst_bytes; 899 } else { 900 inst_len = 0; 901 inst_bytes = NULL; 902 } 903 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 904 } 905 906 #ifdef KTR 907 static const char * 908 intrtype_to_str(int intr_type) 909 { 910 switch (intr_type) { 911 case VMCB_EVENTINJ_TYPE_INTR: 912 return ("hwintr"); 913 case VMCB_EVENTINJ_TYPE_NMI: 914 return ("nmi"); 915 case VMCB_EVENTINJ_TYPE_INTn: 916 return ("swintr"); 917 case VMCB_EVENTINJ_TYPE_EXCEPTION: 918 return ("exception"); 919 default: 920 panic("%s: unknown intr_type %d", __func__, intr_type); 921 } 922 } 923 #endif 924 925 /* 926 * Inject an event to vcpu as described in section 15.20, "Event injection". 927 */ 928 static void 929 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 930 uint32_t error, bool ec_valid) 931 { 932 struct vmcb_ctrl *ctrl; 933 934 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 935 936 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 937 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 938 939 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 940 __func__, vector)); 941 942 switch (intr_type) { 943 case VMCB_EVENTINJ_TYPE_INTR: 944 case VMCB_EVENTINJ_TYPE_NMI: 945 case VMCB_EVENTINJ_TYPE_INTn: 946 break; 947 case VMCB_EVENTINJ_TYPE_EXCEPTION: 948 if (vector >= 0 && vector <= 31 && vector != 2) 949 break; 950 /* FALLTHROUGH */ 951 default: 952 panic("%s: invalid intr_type/vector: %d/%d", __func__, 953 intr_type, vector); 954 } 955 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 956 if (ec_valid) { 957 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 958 ctrl->eventinj |= (uint64_t)error << 32; 959 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 960 intrtype_to_str(intr_type), vector, error); 961 } else { 962 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 963 intrtype_to_str(intr_type), vector); 964 } 965 } 966 967 static void 968 svm_update_virqinfo(struct svm_softc *sc, int vcpu) 969 { 970 struct vm *vm; 971 struct vlapic *vlapic; 972 struct vmcb_ctrl *ctrl; 973 974 vm = sc->vm; 975 vlapic = vm_lapic(vm, vcpu); 976 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 977 978 /* Update %cr8 in the emulated vlapic */ 979 vlapic_set_cr8(vlapic, ctrl->v_tpr); 980 981 /* Virtual interrupt injection is not used. */ 982 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 983 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 984 } 985 986 static void 987 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 988 { 989 struct vmcb_ctrl *ctrl; 990 uint64_t intinfo; 991 992 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 993 intinfo = ctrl->exitintinfo; 994 if (!VMCB_EXITINTINFO_VALID(intinfo)) 995 return; 996 997 /* 998 * From APMv2, Section "Intercepts during IDT interrupt delivery" 999 * 1000 * If a #VMEXIT happened during event delivery then record the event 1001 * that was being delivered. 1002 */ 1003 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 1004 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 1005 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 1006 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 1007 } 1008 1009 #ifdef INVARIANTS 1010 static __inline int 1011 vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 1012 { 1013 1014 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1015 VMCB_INTCPT_VINTR)); 1016 } 1017 #endif 1018 1019 static __inline void 1020 enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1021 { 1022 struct vmcb_ctrl *ctrl; 1023 1024 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1025 1026 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1027 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1028 KASSERT(vintr_intercept_enabled(sc, vcpu), 1029 ("%s: vintr intercept should be enabled", __func__)); 1030 return; 1031 } 1032 1033 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1034 ctrl->v_irq = 1; 1035 ctrl->v_ign_tpr = 1; 1036 ctrl->v_intr_vector = 0; 1037 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1038 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1039 } 1040 1041 static __inline void 1042 disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1043 { 1044 struct vmcb_ctrl *ctrl; 1045 1046 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1047 1048 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1049 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1050 ("%s: vintr intercept should be disabled", __func__)); 1051 return; 1052 } 1053 1054 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1055 ctrl->v_irq = 0; 1056 ctrl->v_intr_vector = 0; 1057 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1058 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1059 } 1060 1061 static int 1062 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1063 { 1064 struct vmcb_ctrl *ctrl; 1065 int oldval, newval; 1066 1067 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1068 oldval = ctrl->intr_shadow; 1069 newval = val ? 1 : 0; 1070 if (newval != oldval) { 1071 ctrl->intr_shadow = newval; 1072 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1073 } 1074 return (0); 1075 } 1076 1077 static int 1078 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1079 { 1080 struct vmcb_ctrl *ctrl; 1081 1082 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1083 *val = ctrl->intr_shadow; 1084 return (0); 1085 } 1086 1087 /* 1088 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1089 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1090 * to track when the vcpu is done handling the NMI. 1091 */ 1092 static int 1093 nmi_blocked(struct svm_softc *sc, int vcpu) 1094 { 1095 int blocked; 1096 1097 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1098 VMCB_INTCPT_IRET); 1099 return (blocked); 1100 } 1101 1102 static void 1103 enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1104 { 1105 1106 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1107 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1108 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1109 } 1110 1111 static void 1112 clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1113 { 1114 int error; 1115 1116 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1117 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1118 /* 1119 * When the IRET intercept is cleared the vcpu will attempt to execute 1120 * the "iret" when it runs next. However, it is possible to inject 1121 * another NMI into the vcpu before the "iret" has actually executed. 1122 * 1123 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1124 * it will trap back into the hypervisor. If an NMI is pending for 1125 * the vcpu it will be injected into the guest. 1126 * 1127 * XXX this needs to be fixed 1128 */ 1129 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1130 1131 /* 1132 * Set 'intr_shadow' to prevent an NMI from being injected on the 1133 * immediate VMRUN. 1134 */ 1135 error = svm_modify_intr_shadow(sc, vcpu, 1); 1136 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1137 } 1138 1139 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1140 1141 static int 1142 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) 1143 { 1144 struct vm_exit *vme; 1145 struct vmcb_state *state; 1146 uint64_t changed, lma, oldval; 1147 int error; 1148 1149 state = svm_get_vmcb_state(sc, vcpu); 1150 1151 oldval = state->efer; 1152 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1153 1154 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1155 changed = oldval ^ newval; 1156 1157 if (newval & EFER_MBZ_BITS) 1158 goto gpf; 1159 1160 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1161 if (changed & EFER_LME) { 1162 if (state->cr0 & CR0_PG) 1163 goto gpf; 1164 } 1165 1166 /* EFER.LMA = EFER.LME & CR0.PG */ 1167 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1168 lma = EFER_LMA; 1169 else 1170 lma = 0; 1171 1172 if ((newval & EFER_LMA) != lma) 1173 goto gpf; 1174 1175 if (newval & EFER_NXE) { 1176 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) 1177 goto gpf; 1178 } 1179 1180 /* 1181 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1182 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1183 */ 1184 if (newval & EFER_LMSLE) { 1185 vme = vm_exitinfo(sc->vm, vcpu); 1186 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1187 *retu = true; 1188 return (0); 1189 } 1190 1191 if (newval & EFER_FFXSR) { 1192 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) 1193 goto gpf; 1194 } 1195 1196 if (newval & EFER_TCE) { 1197 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) 1198 goto gpf; 1199 } 1200 1201 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1202 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1203 return (0); 1204 gpf: 1205 vm_inject_gp(sc->vm, vcpu); 1206 return (0); 1207 } 1208 1209 static int 1210 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1211 bool *retu) 1212 { 1213 int error; 1214 1215 if (lapic_msr(num)) 1216 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1217 else if (num == MSR_EFER) 1218 error = svm_write_efer(sc, vcpu, val, retu); 1219 else 1220 error = svm_wrmsr(sc, vcpu, num, val, retu); 1221 1222 return (error); 1223 } 1224 1225 static int 1226 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1227 { 1228 struct vmcb_state *state; 1229 struct svm_regctx *ctx; 1230 uint64_t result; 1231 int error; 1232 1233 if (lapic_msr(num)) 1234 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1235 else 1236 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1237 1238 if (error == 0) { 1239 state = svm_get_vmcb_state(sc, vcpu); 1240 ctx = svm_get_guest_regctx(sc, vcpu); 1241 state->rax = result & 0xffffffff; 1242 ctx->sctx_rdx = result >> 32; 1243 } 1244 1245 return (error); 1246 } 1247 1248 #ifdef KTR 1249 static const char * 1250 exit_reason_to_str(uint64_t reason) 1251 { 1252 int i; 1253 static char reasonbuf[32]; 1254 static const struct { 1255 int reason; 1256 const char *str; 1257 } reasons[] = { 1258 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" }, 1259 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" }, 1260 { .reason = VMCB_EXIT_NPF, .str = "nptfault" }, 1261 { .reason = VMCB_EXIT_PAUSE, .str = "pause" }, 1262 { .reason = VMCB_EXIT_HLT, .str = "hlt" }, 1263 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" }, 1264 { .reason = VMCB_EXIT_IO, .str = "inout" }, 1265 { .reason = VMCB_EXIT_MC, .str = "mchk" }, 1266 { .reason = VMCB_EXIT_INTR, .str = "extintr" }, 1267 { .reason = VMCB_EXIT_NMI, .str = "nmi" }, 1268 { .reason = VMCB_EXIT_VINTR, .str = "vintr" }, 1269 { .reason = VMCB_EXIT_MSR, .str = "msr" }, 1270 { .reason = VMCB_EXIT_IRET, .str = "iret" }, 1271 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" }, 1272 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" }, 1273 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" }, 1274 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" }, 1275 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" }, 1276 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" }, 1277 { .reason = VMCB_EXIT_STGI, .str = "stgi" }, 1278 { .reason = VMCB_EXIT_CLGI, .str = "clgi" }, 1279 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" }, 1280 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" }, 1281 { .reason = VMCB_EXIT_INVD, .str = "invd" }, 1282 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" }, 1283 }; 1284 1285 for (i = 0; i < nitems(reasons); i++) { 1286 if (reasons[i].reason == reason) 1287 return (reasons[i].str); 1288 } 1289 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1290 return (reasonbuf); 1291 } 1292 #endif /* KTR */ 1293 1294 /* 1295 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1296 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1297 * and exceptions caused by INT3, INTO and BOUND instructions. 1298 * 1299 * Return 1 if the nRIP is valid and 0 otherwise. 1300 */ 1301 static int 1302 nrip_valid(uint64_t exitcode) 1303 { 1304 switch (exitcode) { 1305 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1306 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1307 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1308 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1309 case 0x43: /* INT3 */ 1310 case 0x44: /* INTO */ 1311 case 0x45: /* BOUND */ 1312 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1313 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1314 return (1); 1315 default: 1316 return (0); 1317 } 1318 } 1319 1320 static int 1321 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1322 { 1323 struct vmcb *vmcb; 1324 struct vmcb_state *state; 1325 struct vmcb_ctrl *ctrl; 1326 struct svm_regctx *ctx; 1327 uint64_t code, info1, info2, val; 1328 uint32_t eax, ecx, edx; 1329 int error, errcode_valid, handled, idtvec, reflect; 1330 bool retu; 1331 1332 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1333 vmcb = svm_get_vmcb(svm_sc, vcpu); 1334 state = &vmcb->state; 1335 ctrl = &vmcb->ctrl; 1336 1337 handled = 0; 1338 code = ctrl->exitcode; 1339 info1 = ctrl->exitinfo1; 1340 info2 = ctrl->exitinfo2; 1341 1342 vmexit->exitcode = VM_EXITCODE_BOGUS; 1343 vmexit->rip = state->rip; 1344 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1345 1346 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1347 1348 /* 1349 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1350 * in an inconsistent state and can trigger assertions that would 1351 * never happen otherwise. 1352 */ 1353 if (code == VMCB_EXIT_INVALID) { 1354 vm_exit_svm(vmexit, code, info1, info2); 1355 return (0); 1356 } 1357 1358 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1359 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1360 1361 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1362 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1363 vmexit->inst_length, code, info1, info2)); 1364 1365 svm_update_virqinfo(svm_sc, vcpu); 1366 svm_save_intinfo(svm_sc, vcpu); 1367 1368 switch (code) { 1369 case VMCB_EXIT_IRET: 1370 /* 1371 * Restart execution at "iret" but with the intercept cleared. 1372 */ 1373 vmexit->inst_length = 0; 1374 clear_nmi_blocking(svm_sc, vcpu); 1375 handled = 1; 1376 break; 1377 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1378 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1379 handled = 1; 1380 break; 1381 case VMCB_EXIT_INTR: /* external interrupt */ 1382 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1383 handled = 1; 1384 break; 1385 case VMCB_EXIT_NMI: /* external NMI */ 1386 handled = 1; 1387 break; 1388 case 0x40 ... 0x5F: 1389 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1390 reflect = 1; 1391 idtvec = code - 0x40; 1392 switch (idtvec) { 1393 case IDT_MC: 1394 /* 1395 * Call the machine check handler by hand. Also don't 1396 * reflect the machine check back into the guest. 1397 */ 1398 reflect = 0; 1399 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1400 __asm __volatile("int $18"); 1401 break; 1402 case IDT_PF: 1403 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1404 info2); 1405 KASSERT(error == 0, ("%s: error %d updating cr2", 1406 __func__, error)); 1407 /* fallthru */ 1408 case IDT_NP: 1409 case IDT_SS: 1410 case IDT_GP: 1411 case IDT_AC: 1412 case IDT_TS: 1413 errcode_valid = 1; 1414 break; 1415 1416 case IDT_DF: 1417 errcode_valid = 1; 1418 info1 = 0; 1419 break; 1420 1421 case IDT_BP: 1422 case IDT_OF: 1423 case IDT_BR: 1424 /* 1425 * The 'nrip' field is populated for INT3, INTO and 1426 * BOUND exceptions and this also implies that 1427 * 'inst_length' is non-zero. 1428 * 1429 * Reset 'inst_length' to zero so the guest %rip at 1430 * event injection is identical to what it was when 1431 * the exception originally happened. 1432 */ 1433 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1434 "to zero before injecting exception %d", 1435 vmexit->inst_length, idtvec); 1436 vmexit->inst_length = 0; 1437 /* fallthru */ 1438 default: 1439 errcode_valid = 0; 1440 info1 = 0; 1441 break; 1442 } 1443 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1444 "when reflecting exception %d into guest", 1445 vmexit->inst_length, idtvec)); 1446 1447 if (reflect) { 1448 /* Reflect the exception back into the guest */ 1449 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1450 "%d/%#x into the guest", idtvec, (int)info1); 1451 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1452 errcode_valid, info1, 0); 1453 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1454 __func__, error)); 1455 } 1456 handled = 1; 1457 break; 1458 case VMCB_EXIT_MSR: /* MSR access. */ 1459 eax = state->rax; 1460 ecx = ctx->sctx_rcx; 1461 edx = ctx->sctx_rdx; 1462 retu = false; 1463 1464 if (info1) { 1465 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1466 val = (uint64_t)edx << 32 | eax; 1467 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1468 ecx, val); 1469 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1470 vmexit->exitcode = VM_EXITCODE_WRMSR; 1471 vmexit->u.msr.code = ecx; 1472 vmexit->u.msr.wval = val; 1473 } else if (!retu) { 1474 handled = 1; 1475 } else { 1476 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1477 ("emulate_wrmsr retu with bogus exitcode")); 1478 } 1479 } else { 1480 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1481 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1482 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1483 vmexit->exitcode = VM_EXITCODE_RDMSR; 1484 vmexit->u.msr.code = ecx; 1485 } else if (!retu) { 1486 handled = 1; 1487 } else { 1488 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1489 ("emulate_rdmsr retu with bogus exitcode")); 1490 } 1491 } 1492 break; 1493 case VMCB_EXIT_IO: 1494 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1495 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1496 break; 1497 case VMCB_EXIT_CPUID: 1498 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1499 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, &state->rax, 1500 &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx); 1501 break; 1502 case VMCB_EXIT_HLT: 1503 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1504 vmexit->exitcode = VM_EXITCODE_HLT; 1505 vmexit->u.hlt.rflags = state->rflags; 1506 break; 1507 case VMCB_EXIT_PAUSE: 1508 vmexit->exitcode = VM_EXITCODE_PAUSE; 1509 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1510 break; 1511 case VMCB_EXIT_NPF: 1512 /* EXITINFO2 contains the faulting guest physical address */ 1513 if (info1 & VMCB_NPF_INFO1_RSV) { 1514 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1515 "reserved bits set: info1(%#lx) info2(%#lx)", 1516 info1, info2); 1517 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { 1518 vmexit->exitcode = VM_EXITCODE_PAGING; 1519 vmexit->u.paging.gpa = info2; 1520 vmexit->u.paging.fault_type = npf_fault_type(info1); 1521 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1522 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1523 "on gpa %#lx/%#lx at rip %#lx", 1524 info2, info1, state->rip); 1525 } else if (svm_npf_emul_fault(info1)) { 1526 svm_handle_inst_emul(vmcb, info2, vmexit); 1527 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1528 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1529 "for gpa %#lx/%#lx at rip %#lx", 1530 info2, info1, state->rip); 1531 } 1532 break; 1533 case VMCB_EXIT_MONITOR: 1534 vmexit->exitcode = VM_EXITCODE_MONITOR; 1535 break; 1536 case VMCB_EXIT_MWAIT: 1537 vmexit->exitcode = VM_EXITCODE_MWAIT; 1538 break; 1539 case VMCB_EXIT_SHUTDOWN: 1540 case VMCB_EXIT_VMRUN: 1541 case VMCB_EXIT_VMMCALL: 1542 case VMCB_EXIT_VMLOAD: 1543 case VMCB_EXIT_VMSAVE: 1544 case VMCB_EXIT_STGI: 1545 case VMCB_EXIT_CLGI: 1546 case VMCB_EXIT_SKINIT: 1547 case VMCB_EXIT_ICEBP: 1548 case VMCB_EXIT_INVD: 1549 case VMCB_EXIT_INVLPGA: 1550 vm_inject_ud(svm_sc->vm, vcpu); 1551 handled = 1; 1552 break; 1553 default: 1554 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1555 break; 1556 } 1557 1558 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1559 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1560 vmexit->rip, vmexit->inst_length); 1561 1562 if (handled) { 1563 vmexit->rip += vmexit->inst_length; 1564 vmexit->inst_length = 0; 1565 state->rip = vmexit->rip; 1566 } else { 1567 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1568 /* 1569 * If this VM exit was not claimed by anybody then 1570 * treat it as a generic SVM exit. 1571 */ 1572 vm_exit_svm(vmexit, code, info1, info2); 1573 } else { 1574 /* 1575 * The exitcode and collateral have been populated. 1576 * The VM exit will be processed further in userland. 1577 */ 1578 } 1579 } 1580 return (handled); 1581 } 1582 1583 static void 1584 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1585 { 1586 uint64_t intinfo; 1587 1588 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1589 return; 1590 1591 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1592 "valid: %#lx", __func__, intinfo)); 1593 1594 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1595 VMCB_EXITINTINFO_VECTOR(intinfo), 1596 VMCB_EXITINTINFO_EC(intinfo), 1597 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1598 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1599 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1600 } 1601 1602 /* 1603 * Inject event to virtual cpu. 1604 */ 1605 static void 1606 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1607 { 1608 struct vmcb_ctrl *ctrl; 1609 struct vmcb_state *state; 1610 struct svm_vcpu *vcpustate; 1611 uint8_t v_tpr; 1612 int vector, need_intr_window; 1613 int extint_pending; 1614 1615 state = svm_get_vmcb_state(sc, vcpu); 1616 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1617 vcpustate = svm_get_vcpu(sc, vcpu); 1618 1619 need_intr_window = 0; 1620 1621 if (vcpustate->nextrip != state->rip) { 1622 ctrl->intr_shadow = 0; 1623 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1624 "cleared due to rip change: %#lx/%#lx", 1625 vcpustate->nextrip, state->rip); 1626 } 1627 1628 /* 1629 * Inject pending events or exceptions for this vcpu. 1630 * 1631 * An event might be pending because the previous #VMEXIT happened 1632 * during event delivery (i.e. ctrl->exitintinfo). 1633 * 1634 * An event might also be pending because an exception was injected 1635 * by the hypervisor (e.g. #PF during instruction emulation). 1636 */ 1637 svm_inj_intinfo(sc, vcpu); 1638 1639 /* NMI event has priority over interrupts. */ 1640 if (vm_nmi_pending(sc->vm, vcpu)) { 1641 if (nmi_blocked(sc, vcpu)) { 1642 /* 1643 * Can't inject another NMI if the guest has not 1644 * yet executed an "iret" after the last NMI. 1645 */ 1646 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1647 "to NMI-blocking"); 1648 } else if (ctrl->intr_shadow) { 1649 /* 1650 * Can't inject an NMI if the vcpu is in an intr_shadow. 1651 */ 1652 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1653 "interrupt shadow"); 1654 need_intr_window = 1; 1655 goto done; 1656 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1657 /* 1658 * If there is already an exception/interrupt pending 1659 * then defer the NMI until after that. 1660 */ 1661 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1662 "eventinj %#lx", ctrl->eventinj); 1663 1664 /* 1665 * Use self-IPI to trigger a VM-exit as soon as 1666 * possible after the event injection is completed. 1667 * 1668 * This works only if the external interrupt exiting 1669 * is at a lower priority than the event injection. 1670 * 1671 * Although not explicitly specified in APMv2 the 1672 * relative priorities were verified empirically. 1673 */ 1674 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1675 } else { 1676 vm_nmi_clear(sc->vm, vcpu); 1677 1678 /* Inject NMI, vector number is not used */ 1679 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1680 IDT_NMI, 0, false); 1681 1682 /* virtual NMI blocking is now in effect */ 1683 enable_nmi_blocking(sc, vcpu); 1684 1685 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1686 } 1687 } 1688 1689 extint_pending = vm_extint_pending(sc->vm, vcpu); 1690 if (!extint_pending) { 1691 if (!vlapic_pending_intr(vlapic, &vector)) 1692 goto done; 1693 KASSERT(vector >= 16 && vector <= 255, 1694 ("invalid vector %d from local APIC", vector)); 1695 } else { 1696 /* Ask the legacy pic for a vector to inject */ 1697 vatpic_pending_intr(sc->vm, &vector); 1698 KASSERT(vector >= 0 && vector <= 255, 1699 ("invalid vector %d from INTR", vector)); 1700 } 1701 1702 /* 1703 * If the guest has disabled interrupts or is in an interrupt shadow 1704 * then we cannot inject the pending interrupt. 1705 */ 1706 if ((state->rflags & PSL_I) == 0) { 1707 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1708 "rflags %#lx", vector, state->rflags); 1709 need_intr_window = 1; 1710 goto done; 1711 } 1712 1713 if (ctrl->intr_shadow) { 1714 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1715 "interrupt shadow", vector); 1716 need_intr_window = 1; 1717 goto done; 1718 } 1719 1720 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1721 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1722 "eventinj %#lx", vector, ctrl->eventinj); 1723 need_intr_window = 1; 1724 goto done; 1725 } 1726 1727 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1728 1729 if (!extint_pending) { 1730 vlapic_intr_accepted(vlapic, vector); 1731 } else { 1732 vm_extint_clear(sc->vm, vcpu); 1733 vatpic_intr_accepted(sc->vm, vector); 1734 } 1735 1736 /* 1737 * Force a VM-exit as soon as the vcpu is ready to accept another 1738 * interrupt. This is done because the PIC might have another vector 1739 * that it wants to inject. Also, if the APIC has a pending interrupt 1740 * that was preempted by the ExtInt then it allows us to inject the 1741 * APIC vector as soon as possible. 1742 */ 1743 need_intr_window = 1; 1744 done: 1745 /* 1746 * The guest can modify the TPR by writing to %CR8. In guest mode 1747 * the processor reflects this write to V_TPR without hypervisor 1748 * intervention. 1749 * 1750 * The guest can also modify the TPR by writing to it via the memory 1751 * mapped APIC page. In this case, the write will be emulated by the 1752 * hypervisor. For this reason V_TPR must be updated before every 1753 * VMRUN. 1754 */ 1755 v_tpr = vlapic_get_cr8(vlapic); 1756 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1757 if (ctrl->v_tpr != v_tpr) { 1758 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1759 ctrl->v_tpr, v_tpr); 1760 ctrl->v_tpr = v_tpr; 1761 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1762 } 1763 1764 if (need_intr_window) { 1765 /* 1766 * We use V_IRQ in conjunction with the VINTR intercept to 1767 * trap into the hypervisor as soon as a virtual interrupt 1768 * can be delivered. 1769 * 1770 * Since injected events are not subject to intercept checks 1771 * we need to ensure that the V_IRQ is not actually going to 1772 * be delivered on VM entry. The KASSERT below enforces this. 1773 */ 1774 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1775 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1776 ("Bogus intr_window_exiting: eventinj (%#lx), " 1777 "intr_shadow (%u), rflags (%#lx)", 1778 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1779 enable_intr_window_exiting(sc, vcpu); 1780 } else { 1781 disable_intr_window_exiting(sc, vcpu); 1782 } 1783 } 1784 1785 static __inline void 1786 restore_host_tss(void) 1787 { 1788 struct system_segment_descriptor *tss_sd; 1789 1790 /* 1791 * The TSS descriptor was in use prior to launching the guest so it 1792 * has been marked busy. 1793 * 1794 * 'ltr' requires the descriptor to be marked available so change the 1795 * type to "64-bit available TSS". 1796 */ 1797 tss_sd = PCPU_GET(tss); 1798 tss_sd->sd_type = SDT_SYSTSS; 1799 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1800 } 1801 1802 static void 1803 check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1804 { 1805 struct svm_vcpu *vcpustate; 1806 struct vmcb_ctrl *ctrl; 1807 long eptgen; 1808 bool alloc_asid; 1809 1810 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1811 "active on cpu %u", __func__, thiscpu)); 1812 1813 vcpustate = svm_get_vcpu(sc, vcpuid); 1814 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1815 1816 /* 1817 * The TLB entries associated with the vcpu's ASID are not valid 1818 * if either of the following conditions is true: 1819 * 1820 * 1. The vcpu's ASID generation is different than the host cpu's 1821 * ASID generation. This happens when the vcpu migrates to a new 1822 * host cpu. It can also happen when the number of vcpus executing 1823 * on a host cpu is greater than the number of ASIDs available. 1824 * 1825 * 2. The pmap generation number is different than the value cached in 1826 * the 'vcpustate'. This happens when the host invalidates pages 1827 * belonging to the guest. 1828 * 1829 * asidgen eptgen Action 1830 * mismatch mismatch 1831 * 0 0 (a) 1832 * 0 1 (b1) or (b2) 1833 * 1 0 (c) 1834 * 1 1 (d) 1835 * 1836 * (a) There is no mismatch in eptgen or ASID generation and therefore 1837 * no further action is needed. 1838 * 1839 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1840 * retained and the TLB entries associated with this ASID 1841 * are flushed by VMRUN. 1842 * 1843 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1844 * allocated. 1845 * 1846 * (c) A new ASID is allocated. 1847 * 1848 * (d) A new ASID is allocated. 1849 */ 1850 1851 alloc_asid = false; 1852 eptgen = pmap->pm_eptgen; 1853 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1854 1855 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1856 alloc_asid = true; /* (c) and (d) */ 1857 } else if (vcpustate->eptgen != eptgen) { 1858 if (flush_by_asid()) 1859 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1860 else 1861 alloc_asid = true; /* (b2) */ 1862 } else { 1863 /* 1864 * This is the common case (a). 1865 */ 1866 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1867 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1868 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1869 } 1870 1871 if (alloc_asid) { 1872 if (++asid[thiscpu].num >= nasid) { 1873 asid[thiscpu].num = 1; 1874 if (++asid[thiscpu].gen == 0) 1875 asid[thiscpu].gen = 1; 1876 /* 1877 * If this cpu does not support "flush-by-asid" 1878 * then flush the entire TLB on a generation 1879 * bump. Subsequent ASID allocation in this 1880 * generation can be done without a TLB flush. 1881 */ 1882 if (!flush_by_asid()) 1883 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1884 } 1885 vcpustate->asid.gen = asid[thiscpu].gen; 1886 vcpustate->asid.num = asid[thiscpu].num; 1887 1888 ctrl->asid = vcpustate->asid.num; 1889 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1890 /* 1891 * If this cpu supports "flush-by-asid" then the TLB 1892 * was not flushed after the generation bump. The TLB 1893 * is flushed selectively after every new ASID allocation. 1894 */ 1895 if (flush_by_asid()) 1896 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1897 } 1898 vcpustate->eptgen = eptgen; 1899 1900 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1901 KASSERT(ctrl->asid == vcpustate->asid.num, 1902 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1903 } 1904 1905 static __inline void 1906 disable_gintr(void) 1907 { 1908 1909 __asm __volatile("clgi"); 1910 } 1911 1912 static __inline void 1913 enable_gintr(void) 1914 { 1915 1916 __asm __volatile("stgi"); 1917 } 1918 1919 static __inline void 1920 svm_dr_enter_guest(struct svm_regctx *gctx) 1921 { 1922 1923 /* Save host control debug registers. */ 1924 gctx->host_dr7 = rdr7(); 1925 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 1926 1927 /* 1928 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 1929 * exceptions in the host based on the guest DRx values. The 1930 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 1931 * VMCB. 1932 */ 1933 load_dr7(0); 1934 wrmsr(MSR_DEBUGCTLMSR, 0); 1935 1936 /* Save host debug registers. */ 1937 gctx->host_dr0 = rdr0(); 1938 gctx->host_dr1 = rdr1(); 1939 gctx->host_dr2 = rdr2(); 1940 gctx->host_dr3 = rdr3(); 1941 gctx->host_dr6 = rdr6(); 1942 1943 /* Restore guest debug registers. */ 1944 load_dr0(gctx->sctx_dr0); 1945 load_dr1(gctx->sctx_dr1); 1946 load_dr2(gctx->sctx_dr2); 1947 load_dr3(gctx->sctx_dr3); 1948 } 1949 1950 static __inline void 1951 svm_dr_leave_guest(struct svm_regctx *gctx) 1952 { 1953 1954 /* Save guest debug registers. */ 1955 gctx->sctx_dr0 = rdr0(); 1956 gctx->sctx_dr1 = rdr1(); 1957 gctx->sctx_dr2 = rdr2(); 1958 gctx->sctx_dr3 = rdr3(); 1959 1960 /* 1961 * Restore host debug registers. Restore DR7 and DEBUGCTL 1962 * last. 1963 */ 1964 load_dr0(gctx->host_dr0); 1965 load_dr1(gctx->host_dr1); 1966 load_dr2(gctx->host_dr2); 1967 load_dr3(gctx->host_dr3); 1968 load_dr6(gctx->host_dr6); 1969 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 1970 load_dr7(gctx->host_dr7); 1971 } 1972 1973 /* 1974 * Start vcpu with specified RIP. 1975 */ 1976 static int 1977 svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1978 struct vm_eventinfo *evinfo) 1979 { 1980 struct svm_regctx *gctx; 1981 struct svm_softc *svm_sc; 1982 struct svm_vcpu *vcpustate; 1983 struct vmcb_state *state; 1984 struct vmcb_ctrl *ctrl; 1985 struct vm_exit *vmexit; 1986 struct vlapic *vlapic; 1987 struct vm *vm; 1988 uint64_t vmcb_pa; 1989 int handled; 1990 uint16_t ldt_sel; 1991 1992 svm_sc = arg; 1993 vm = svm_sc->vm; 1994 1995 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1996 state = svm_get_vmcb_state(svm_sc, vcpu); 1997 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1998 vmexit = vm_exitinfo(vm, vcpu); 1999 vlapic = vm_lapic(vm, vcpu); 2000 2001 gctx = svm_get_guest_regctx(svm_sc, vcpu); 2002 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 2003 2004 if (vcpustate->lastcpu != curcpu) { 2005 /* 2006 * Force new ASID allocation by invalidating the generation. 2007 */ 2008 vcpustate->asid.gen = 0; 2009 2010 /* 2011 * Invalidate the VMCB state cache by marking all fields dirty. 2012 */ 2013 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 2014 2015 /* 2016 * XXX 2017 * Setting 'vcpustate->lastcpu' here is bit premature because 2018 * we may return from this function without actually executing 2019 * the VMRUN instruction. This could happen if a rendezvous 2020 * or an AST is pending on the first time through the loop. 2021 * 2022 * This works for now but any new side-effects of vcpu 2023 * migration should take this case into account. 2024 */ 2025 vcpustate->lastcpu = curcpu; 2026 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 2027 } 2028 2029 svm_msr_guest_enter(svm_sc, vcpu); 2030 2031 /* Update Guest RIP */ 2032 state->rip = rip; 2033 2034 do { 2035 /* 2036 * Disable global interrupts to guarantee atomicity during 2037 * loading of guest state. This includes not only the state 2038 * loaded by the "vmrun" instruction but also software state 2039 * maintained by the hypervisor: suspended and rendezvous 2040 * state, NPT generation number, vlapic interrupts etc. 2041 */ 2042 disable_gintr(); 2043 2044 if (vcpu_suspended(evinfo)) { 2045 enable_gintr(); 2046 vm_exit_suspended(vm, vcpu, state->rip); 2047 break; 2048 } 2049 2050 if (vcpu_rendezvous_pending(evinfo)) { 2051 enable_gintr(); 2052 vm_exit_rendezvous(vm, vcpu, state->rip); 2053 break; 2054 } 2055 2056 if (vcpu_reqidle(evinfo)) { 2057 enable_gintr(); 2058 vm_exit_reqidle(vm, vcpu, state->rip); 2059 break; 2060 } 2061 2062 /* We are asked to give the cpu by scheduler. */ 2063 if (vcpu_should_yield(vm, vcpu)) { 2064 enable_gintr(); 2065 vm_exit_astpending(vm, vcpu, state->rip); 2066 break; 2067 } 2068 2069 if (vcpu_debugged(vm, vcpu)) { 2070 enable_gintr(); 2071 vm_exit_debug(vm, vcpu, state->rip); 2072 break; 2073 } 2074 2075 /* 2076 * #VMEXIT resumes the host with the guest LDTR, so 2077 * save the current LDT selector so it can be restored 2078 * after an exit. The userspace hypervisor probably 2079 * doesn't use a LDT, but save and restore it to be 2080 * safe. 2081 */ 2082 ldt_sel = sldt(); 2083 2084 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2085 2086 /* Activate the nested pmap on 'curcpu' */ 2087 CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active); 2088 2089 /* 2090 * Check the pmap generation and the ASID generation to 2091 * ensure that the vcpu does not use stale TLB mappings. 2092 */ 2093 check_asid(svm_sc, vcpu, pmap, curcpu); 2094 2095 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 2096 vcpustate->dirty = 0; 2097 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2098 2099 /* Launch Virtual Machine. */ 2100 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 2101 svm_dr_enter_guest(gctx); 2102 svm_launch(vmcb_pa, gctx, get_pcpu()); 2103 svm_dr_leave_guest(gctx); 2104 2105 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 2106 2107 /* 2108 * The host GDTR and IDTR is saved by VMRUN and restored 2109 * automatically on #VMEXIT. However, the host TSS needs 2110 * to be restored explicitly. 2111 */ 2112 restore_host_tss(); 2113 2114 /* Restore host LDTR. */ 2115 lldt(ldt_sel); 2116 2117 /* #VMEXIT disables interrupts so re-enable them here. */ 2118 enable_gintr(); 2119 2120 /* Update 'nextrip' */ 2121 vcpustate->nextrip = state->rip; 2122 2123 /* Handle #VMEXIT and if required return to user space. */ 2124 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2125 } while (handled); 2126 2127 svm_msr_guest_exit(svm_sc, vcpu); 2128 2129 return (0); 2130 } 2131 2132 static void 2133 svm_vmcleanup(void *arg) 2134 { 2135 struct svm_softc *sc = arg; 2136 2137 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2138 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2139 free(sc, M_SVM); 2140 } 2141 2142 static register_t * 2143 swctx_regptr(struct svm_regctx *regctx, int reg) 2144 { 2145 2146 switch (reg) { 2147 case VM_REG_GUEST_RBX: 2148 return (®ctx->sctx_rbx); 2149 case VM_REG_GUEST_RCX: 2150 return (®ctx->sctx_rcx); 2151 case VM_REG_GUEST_RDX: 2152 return (®ctx->sctx_rdx); 2153 case VM_REG_GUEST_RDI: 2154 return (®ctx->sctx_rdi); 2155 case VM_REG_GUEST_RSI: 2156 return (®ctx->sctx_rsi); 2157 case VM_REG_GUEST_RBP: 2158 return (®ctx->sctx_rbp); 2159 case VM_REG_GUEST_R8: 2160 return (®ctx->sctx_r8); 2161 case VM_REG_GUEST_R9: 2162 return (®ctx->sctx_r9); 2163 case VM_REG_GUEST_R10: 2164 return (®ctx->sctx_r10); 2165 case VM_REG_GUEST_R11: 2166 return (®ctx->sctx_r11); 2167 case VM_REG_GUEST_R12: 2168 return (®ctx->sctx_r12); 2169 case VM_REG_GUEST_R13: 2170 return (®ctx->sctx_r13); 2171 case VM_REG_GUEST_R14: 2172 return (®ctx->sctx_r14); 2173 case VM_REG_GUEST_R15: 2174 return (®ctx->sctx_r15); 2175 case VM_REG_GUEST_DR0: 2176 return (®ctx->sctx_dr0); 2177 case VM_REG_GUEST_DR1: 2178 return (®ctx->sctx_dr1); 2179 case VM_REG_GUEST_DR2: 2180 return (®ctx->sctx_dr2); 2181 case VM_REG_GUEST_DR3: 2182 return (®ctx->sctx_dr3); 2183 default: 2184 return (NULL); 2185 } 2186 } 2187 2188 static int 2189 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2190 { 2191 struct svm_softc *svm_sc; 2192 register_t *reg; 2193 2194 svm_sc = arg; 2195 2196 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2197 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2198 } 2199 2200 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2201 return (0); 2202 } 2203 2204 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2205 2206 if (reg != NULL) { 2207 *val = *reg; 2208 return (0); 2209 } 2210 2211 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2212 return (EINVAL); 2213 } 2214 2215 static int 2216 svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2217 { 2218 struct svm_softc *svm_sc; 2219 register_t *reg; 2220 2221 svm_sc = arg; 2222 2223 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2224 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2225 } 2226 2227 /* Do not permit user write access to VMCB fields by offset. */ 2228 if (!VMCB_ACCESS_OK(ident)) { 2229 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2230 return (0); 2231 } 2232 } 2233 2234 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2235 2236 if (reg != NULL) { 2237 *reg = val; 2238 return (0); 2239 } 2240 2241 if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) { 2242 /* Ignore. */ 2243 return (0); 2244 } 2245 2246 /* 2247 * XXX deal with CR3 and invalidate TLB entries tagged with the 2248 * vcpu's ASID. This needs to be treated differently depending on 2249 * whether 'running' is true/false. 2250 */ 2251 2252 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2253 return (EINVAL); 2254 } 2255 2256 #ifdef BHYVE_SNAPSHOT 2257 static int 2258 svm_snapshot_reg(void *arg, int vcpu, int ident, 2259 struct vm_snapshot_meta *meta) 2260 { 2261 int ret; 2262 uint64_t val; 2263 2264 if (meta->op == VM_SNAPSHOT_SAVE) { 2265 ret = svm_getreg(arg, vcpu, ident, &val); 2266 if (ret != 0) 2267 goto done; 2268 2269 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2270 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 2271 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2272 2273 ret = svm_setreg(arg, vcpu, ident, val); 2274 if (ret != 0) 2275 goto done; 2276 } else { 2277 ret = EINVAL; 2278 goto done; 2279 } 2280 2281 done: 2282 return (ret); 2283 } 2284 #endif 2285 2286 static int 2287 svm_setcap(void *arg, int vcpu, int type, int val) 2288 { 2289 struct svm_softc *sc; 2290 int error; 2291 2292 sc = arg; 2293 error = 0; 2294 switch (type) { 2295 case VM_CAP_HALT_EXIT: 2296 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2297 VMCB_INTCPT_HLT, val); 2298 break; 2299 case VM_CAP_PAUSE_EXIT: 2300 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2301 VMCB_INTCPT_PAUSE, val); 2302 break; 2303 case VM_CAP_UNRESTRICTED_GUEST: 2304 /* Unrestricted guest execution cannot be disabled in SVM */ 2305 if (val == 0) 2306 error = EINVAL; 2307 break; 2308 default: 2309 error = ENOENT; 2310 break; 2311 } 2312 return (error); 2313 } 2314 2315 static int 2316 svm_getcap(void *arg, int vcpu, int type, int *retval) 2317 { 2318 struct svm_softc *sc; 2319 int error; 2320 2321 sc = arg; 2322 error = 0; 2323 2324 switch (type) { 2325 case VM_CAP_HALT_EXIT: 2326 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2327 VMCB_INTCPT_HLT); 2328 break; 2329 case VM_CAP_PAUSE_EXIT: 2330 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2331 VMCB_INTCPT_PAUSE); 2332 break; 2333 case VM_CAP_UNRESTRICTED_GUEST: 2334 *retval = 1; /* unrestricted guest is always enabled */ 2335 break; 2336 default: 2337 error = ENOENT; 2338 break; 2339 } 2340 return (error); 2341 } 2342 2343 static struct vlapic * 2344 svm_vlapic_init(void *arg, int vcpuid) 2345 { 2346 struct svm_softc *svm_sc; 2347 struct vlapic *vlapic; 2348 2349 svm_sc = arg; 2350 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2351 vlapic->vm = svm_sc->vm; 2352 vlapic->vcpuid = vcpuid; 2353 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2354 2355 vlapic_init(vlapic); 2356 2357 return (vlapic); 2358 } 2359 2360 static void 2361 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2362 { 2363 2364 vlapic_cleanup(vlapic); 2365 free(vlapic, M_SVM_VLAPIC); 2366 } 2367 2368 #ifdef BHYVE_SNAPSHOT 2369 static int 2370 svm_snapshot_vmi(void *arg, struct vm_snapshot_meta *meta) 2371 { 2372 /* struct svm_softc is AMD's representation for SVM softc */ 2373 struct svm_softc *sc; 2374 struct svm_vcpu *vcpu; 2375 struct vmcb *vmcb; 2376 uint64_t val; 2377 int i; 2378 int ret; 2379 2380 sc = arg; 2381 2382 KASSERT(sc != NULL, ("%s: arg was NULL", __func__)); 2383 2384 SNAPSHOT_VAR_OR_LEAVE(sc->nptp, meta, ret, done); 2385 2386 for (i = 0; i < VM_MAXCPU; i++) { 2387 vcpu = &sc->vcpu[i]; 2388 vmcb = &vcpu->vmcb; 2389 2390 /* VMCB fields for virtual cpu i */ 2391 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.v_tpr, meta, ret, done); 2392 val = vmcb->ctrl.v_tpr; 2393 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2394 vmcb->ctrl.v_tpr = val; 2395 2396 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.asid, meta, ret, done); 2397 val = vmcb->ctrl.np_enable; 2398 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2399 vmcb->ctrl.np_enable = val; 2400 2401 val = vmcb->ctrl.intr_shadow; 2402 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2403 vmcb->ctrl.intr_shadow = val; 2404 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.tlb_ctrl, meta, ret, done); 2405 2406 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad1, 2407 sizeof(vmcb->state.pad1), 2408 meta, ret, done); 2409 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cpl, meta, ret, done); 2410 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad2, 2411 sizeof(vmcb->state.pad2), 2412 meta, ret, done); 2413 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.efer, meta, ret, done); 2414 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad3, 2415 sizeof(vmcb->state.pad3), 2416 meta, ret, done); 2417 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr4, meta, ret, done); 2418 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr3, meta, ret, done); 2419 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr0, meta, ret, done); 2420 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dr7, meta, ret, done); 2421 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dr6, meta, ret, done); 2422 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rflags, meta, ret, done); 2423 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rip, meta, ret, done); 2424 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad4, 2425 sizeof(vmcb->state.pad4), 2426 meta, ret, done); 2427 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rsp, meta, ret, done); 2428 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad5, 2429 sizeof(vmcb->state.pad5), 2430 meta, ret, done); 2431 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rax, meta, ret, done); 2432 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.star, meta, ret, done); 2433 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.lstar, meta, ret, done); 2434 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cstar, meta, ret, done); 2435 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sfmask, meta, ret, done); 2436 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.kernelgsbase, 2437 meta, ret, done); 2438 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_cs, meta, ret, done); 2439 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_esp, 2440 meta, ret, done); 2441 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_eip, 2442 meta, ret, done); 2443 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr2, meta, ret, done); 2444 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad6, 2445 sizeof(vmcb->state.pad6), 2446 meta, ret, done); 2447 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.g_pat, meta, ret, done); 2448 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dbgctl, meta, ret, done); 2449 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.br_from, meta, ret, done); 2450 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.br_to, meta, ret, done); 2451 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.int_from, meta, ret, done); 2452 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.int_to, meta, ret, done); 2453 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad7, 2454 sizeof(vmcb->state.pad7), 2455 meta, ret, done); 2456 2457 /* Snapshot swctx for virtual cpu i */ 2458 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, ret, done); 2459 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, ret, done); 2460 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, ret, done); 2461 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, ret, done); 2462 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, ret, done); 2463 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, ret, done); 2464 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, ret, done); 2465 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, ret, done); 2466 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, ret, done); 2467 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, ret, done); 2468 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, ret, done); 2469 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, ret, done); 2470 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, ret, done); 2471 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, ret, done); 2472 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, ret, done); 2473 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, ret, done); 2474 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, ret, done); 2475 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, ret, done); 2476 2477 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr0, meta, ret, done); 2478 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr1, meta, ret, done); 2479 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr2, meta, ret, done); 2480 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr3, meta, ret, done); 2481 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr6, meta, ret, done); 2482 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr7, meta, ret, done); 2483 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_debugctl, meta, ret, 2484 done); 2485 2486 /* Restore other svm_vcpu struct fields */ 2487 2488 /* Restore NEXTRIP field */ 2489 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done); 2490 2491 /* Restore lastcpu field */ 2492 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, ret, done); 2493 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, ret, done); 2494 2495 /* Restore EPTGEN field - EPT is Extended Page Tabel */ 2496 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, ret, done); 2497 2498 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, ret, done); 2499 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, ret, done); 2500 2501 /* Set all caches dirty */ 2502 if (meta->op == VM_SNAPSHOT_RESTORE) { 2503 svm_set_dirty(sc, i, VMCB_CACHE_ASID); 2504 svm_set_dirty(sc, i, VMCB_CACHE_IOPM); 2505 svm_set_dirty(sc, i, VMCB_CACHE_I); 2506 svm_set_dirty(sc, i, VMCB_CACHE_TPR); 2507 svm_set_dirty(sc, i, VMCB_CACHE_CR2); 2508 svm_set_dirty(sc, i, VMCB_CACHE_CR); 2509 svm_set_dirty(sc, i, VMCB_CACHE_DT); 2510 svm_set_dirty(sc, i, VMCB_CACHE_SEG); 2511 svm_set_dirty(sc, i, VMCB_CACHE_NP); 2512 } 2513 } 2514 2515 if (meta->op == VM_SNAPSHOT_RESTORE) 2516 flush_by_asid(); 2517 2518 done: 2519 return (ret); 2520 } 2521 2522 static int 2523 svm_snapshot_vmcx(void *arg, struct vm_snapshot_meta *meta, int vcpu) 2524 { 2525 struct vmcb *vmcb; 2526 struct svm_softc *sc; 2527 int err, running, hostcpu; 2528 2529 sc = (struct svm_softc *)arg; 2530 err = 0; 2531 2532 KASSERT(arg != NULL, ("%s: arg was NULL", __func__)); 2533 vmcb = svm_get_vmcb(sc, vcpu); 2534 2535 running = vcpu_is_running(sc->vm, vcpu, &hostcpu); 2536 if (running && hostcpu !=curcpu) { 2537 printf("%s: %s%d is running", __func__, vm_name(sc->vm), vcpu); 2538 return (EINVAL); 2539 } 2540 2541 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR0, meta); 2542 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR2, meta); 2543 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR3, meta); 2544 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR4, meta); 2545 2546 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DR7, meta); 2547 2548 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RAX, meta); 2549 2550 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RSP, meta); 2551 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RIP, meta); 2552 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RFLAGS, meta); 2553 2554 /* Guest segments */ 2555 /* ES */ 2556 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_ES, meta); 2557 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_ES, meta); 2558 2559 /* CS */ 2560 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CS, meta); 2561 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_CS, meta); 2562 2563 /* SS */ 2564 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_SS, meta); 2565 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_SS, meta); 2566 2567 /* DS */ 2568 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DS, meta); 2569 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_DS, meta); 2570 2571 /* FS */ 2572 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_FS, meta); 2573 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_FS, meta); 2574 2575 /* GS */ 2576 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_GS, meta); 2577 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GS, meta); 2578 2579 /* TR */ 2580 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_TR, meta); 2581 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_TR, meta); 2582 2583 /* LDTR */ 2584 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_LDTR, meta); 2585 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_LDTR, meta); 2586 2587 /* EFER */ 2588 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_EFER, meta); 2589 2590 /* IDTR and GDTR */ 2591 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_IDTR, meta); 2592 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GDTR, meta); 2593 2594 /* Specific AMD registers */ 2595 err += vmcb_snapshot_any(sc, vcpu, 2596 VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta); 2597 err += vmcb_snapshot_any(sc, vcpu, 2598 VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta); 2599 err += vmcb_snapshot_any(sc, vcpu, 2600 VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta); 2601 2602 err += vmcb_snapshot_any(sc, vcpu, 2603 VMCB_ACCESS(VMCB_OFF_NPT_BASE, 8), meta); 2604 2605 err += vmcb_snapshot_any(sc, vcpu, 2606 VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta); 2607 err += vmcb_snapshot_any(sc, vcpu, 2608 VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta); 2609 err += vmcb_snapshot_any(sc, vcpu, 2610 VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta); 2611 err += vmcb_snapshot_any(sc, vcpu, 2612 VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta); 2613 err += vmcb_snapshot_any(sc, vcpu, 2614 VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta); 2615 2616 err += vmcb_snapshot_any(sc, vcpu, 2617 VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta); 2618 2619 err += vmcb_snapshot_any(sc, vcpu, 2620 VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta); 2621 err += vmcb_snapshot_any(sc, vcpu, 2622 VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta); 2623 err += vmcb_snapshot_any(sc, vcpu, 2624 VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta); 2625 2626 err += vmcb_snapshot_any(sc, vcpu, 2627 VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta); 2628 2629 err += vmcb_snapshot_any(sc, vcpu, 2630 VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta); 2631 2632 err += vmcb_snapshot_any(sc, vcpu, 2633 VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta); 2634 err += vmcb_snapshot_any(sc, vcpu, 2635 VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta); 2636 err += vmcb_snapshot_any(sc, vcpu, 2637 VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta); 2638 err += vmcb_snapshot_any(sc, vcpu, 2639 VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta); 2640 2641 err += vmcb_snapshot_any(sc, vcpu, 2642 VMCB_ACCESS(VMCB_OFF_IO_PERM, 8), meta); 2643 err += vmcb_snapshot_any(sc, vcpu, 2644 VMCB_ACCESS(VMCB_OFF_MSR_PERM, 8), meta); 2645 2646 err += vmcb_snapshot_any(sc, vcpu, 2647 VMCB_ACCESS(VMCB_OFF_ASID, 4), meta); 2648 2649 err += vmcb_snapshot_any(sc, vcpu, 2650 VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta); 2651 2652 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_INTR_SHADOW, meta); 2653 2654 return (err); 2655 } 2656 2657 static int 2658 svm_restore_tsc(void *arg, int vcpu, uint64_t offset) 2659 { 2660 int err; 2661 2662 err = svm_set_tsc_offset(arg, vcpu, offset); 2663 2664 return (err); 2665 } 2666 #endif 2667 2668 struct vmm_ops vmm_ops_amd = { 2669 .init = svm_init, 2670 .cleanup = svm_cleanup, 2671 .resume = svm_restore, 2672 .vminit = svm_vminit, 2673 .vmrun = svm_vmrun, 2674 .vmcleanup = svm_vmcleanup, 2675 .vmgetreg = svm_getreg, 2676 .vmsetreg = svm_setreg, 2677 .vmgetdesc = vmcb_getdesc, 2678 .vmsetdesc = vmcb_setdesc, 2679 .vmgetcap = svm_getcap, 2680 .vmsetcap = svm_setcap, 2681 .vmspace_alloc = svm_npt_alloc, 2682 .vmspace_free = svm_npt_free, 2683 .vlapic_init = svm_vlapic_init, 2684 .vlapic_cleanup = svm_vlapic_cleanup, 2685 #ifdef BHYVE_SNAPSHOT 2686 .vmsnapshot = svm_snapshot_vmi, 2687 .vmcx_snapshot = svm_snapshot_vmcx, 2688 .vm_restore_tsc = svm_restore_tsc, 2689 #endif 2690 }; 2691