1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_bhyve_snapshot.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/smp.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/pcpu.h> 40 #include <sys/proc.h> 41 #include <sys/reg.h> 42 #include <sys/smr.h> 43 #include <sys/sysctl.h> 44 45 #include <vm/vm.h> 46 #include <vm/vm_extern.h> 47 #include <vm/pmap.h> 48 49 #include <machine/cpufunc.h> 50 #include <machine/psl.h> 51 #include <machine/md_var.h> 52 #include <machine/specialreg.h> 53 #include <machine/smp.h> 54 #include <machine/vmm.h> 55 #include <machine/vmm_dev.h> 56 #include <machine/vmm_instruction_emul.h> 57 #include <machine/vmm_snapshot.h> 58 59 #include "vmm_lapic.h" 60 #include "vmm_stat.h" 61 #include "vmm_ktr.h" 62 #include "vmm_ioport.h" 63 #include "vatpic.h" 64 #include "vlapic.h" 65 #include "vlapic_priv.h" 66 67 #include "x86.h" 68 #include "vmcb.h" 69 #include "svm.h" 70 #include "svm_softc.h" 71 #include "svm_msr.h" 72 #include "npt.h" 73 74 SYSCTL_DECL(_hw_vmm); 75 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 76 NULL); 77 78 /* 79 * SVM CPUID function 0x8000_000A, edx bit decoding. 80 */ 81 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 82 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 83 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 84 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 85 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 86 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 87 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 88 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 89 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 90 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 91 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 92 93 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 94 VMCB_CACHE_IOPM | \ 95 VMCB_CACHE_I | \ 96 VMCB_CACHE_TPR | \ 97 VMCB_CACHE_CR2 | \ 98 VMCB_CACHE_CR | \ 99 VMCB_CACHE_DR | \ 100 VMCB_CACHE_DT | \ 101 VMCB_CACHE_SEG | \ 102 VMCB_CACHE_NP) 103 104 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 105 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 106 0, NULL); 107 108 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 109 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 110 111 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 112 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 113 "SVM features advertised by CPUID.8000000AH:EDX"); 114 115 static int disable_npf_assist; 116 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 117 &disable_npf_assist, 0, NULL); 118 119 /* Maximum ASIDs supported by the processor */ 120 static uint32_t nasid; 121 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 122 "Number of ASIDs supported by this processor"); 123 124 /* Current ASID generation for each host cpu */ 125 static struct asid asid[MAXCPU]; 126 127 /* SVM host state saved area of size 4KB for each physical core. */ 128 static uint8_t *hsave; 129 130 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 131 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 132 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 133 134 static int svm_getdesc(void *vcpui, int reg, struct seg_desc *desc); 135 static int svm_setreg(void *vcpui, int ident, uint64_t val); 136 137 static __inline int 138 flush_by_asid(void) 139 { 140 141 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 142 } 143 144 static __inline int 145 decode_assist(void) 146 { 147 148 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 149 } 150 151 static void 152 svm_disable(void *arg __unused) 153 { 154 uint64_t efer; 155 156 efer = rdmsr(MSR_EFER); 157 efer &= ~EFER_SVM; 158 wrmsr(MSR_EFER, efer); 159 } 160 161 /* 162 * Disable SVM on all CPUs. 163 */ 164 static int 165 svm_modcleanup(void) 166 { 167 168 smp_rendezvous(NULL, svm_disable, NULL, NULL); 169 170 if (hsave != NULL) 171 kmem_free(hsave, (mp_maxid + 1) * PAGE_SIZE); 172 173 return (0); 174 } 175 176 /* 177 * Verify that all the features required by bhyve are available. 178 */ 179 static int 180 check_svm_features(void) 181 { 182 u_int regs[4]; 183 184 /* CPUID Fn8000_000A is for SVM */ 185 do_cpuid(0x8000000A, regs); 186 svm_feature &= regs[3]; 187 188 /* 189 * The number of ASIDs can be configured to be less than what is 190 * supported by the hardware but not more. 191 */ 192 if (nasid == 0 || nasid > regs[1]) 193 nasid = regs[1]; 194 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 195 196 /* bhyve requires the Nested Paging feature */ 197 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 198 printf("SVM: Nested Paging feature not available.\n"); 199 return (ENXIO); 200 } 201 202 /* bhyve requires the NRIP Save feature */ 203 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 204 printf("SVM: NRIP Save feature not available.\n"); 205 return (ENXIO); 206 } 207 208 return (0); 209 } 210 211 static void 212 svm_enable(void *arg __unused) 213 { 214 uint64_t efer; 215 216 efer = rdmsr(MSR_EFER); 217 efer |= EFER_SVM; 218 wrmsr(MSR_EFER, efer); 219 220 wrmsr(MSR_VM_HSAVE_PA, vtophys(&hsave[curcpu * PAGE_SIZE])); 221 } 222 223 /* 224 * Return 1 if SVM is enabled on this processor and 0 otherwise. 225 */ 226 static int 227 svm_available(void) 228 { 229 uint64_t msr; 230 231 /* Section 15.4 Enabling SVM from APM2. */ 232 if ((amd_feature2 & AMDID2_SVM) == 0) { 233 printf("SVM: not available.\n"); 234 return (0); 235 } 236 237 msr = rdmsr(MSR_VM_CR); 238 if ((msr & VM_CR_SVMDIS) != 0) { 239 printf("SVM: disabled by BIOS.\n"); 240 return (0); 241 } 242 243 return (1); 244 } 245 246 static int 247 svm_modinit(int ipinum) 248 { 249 int error, cpu; 250 251 if (!svm_available()) 252 return (ENXIO); 253 254 error = check_svm_features(); 255 if (error) 256 return (error); 257 258 vmcb_clean &= VMCB_CACHE_DEFAULT; 259 260 for (cpu = 0; cpu < MAXCPU; cpu++) { 261 /* 262 * Initialize the host ASIDs to their "highest" valid values. 263 * 264 * The next ASID allocation will rollover both 'gen' and 'num' 265 * and start off the sequence at {1,1}. 266 */ 267 asid[cpu].gen = ~0UL; 268 asid[cpu].num = nasid - 1; 269 } 270 271 svm_msr_init(); 272 svm_npt_init(ipinum); 273 274 /* Enable SVM on all CPUs */ 275 hsave = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, M_WAITOK | M_ZERO); 276 smp_rendezvous(NULL, svm_enable, NULL, NULL); 277 278 return (0); 279 } 280 281 static void 282 svm_modresume(void) 283 { 284 285 svm_enable(NULL); 286 } 287 288 #ifdef BHYVE_SNAPSHOT 289 void 290 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset) 291 { 292 struct vmcb_ctrl *ctrl; 293 294 ctrl = svm_get_vmcb_ctrl(vcpu); 295 ctrl->tsc_offset = offset; 296 297 svm_set_dirty(vcpu, VMCB_CACHE_I); 298 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset); 299 300 vm_set_tsc_offset(vcpu->vcpu, offset); 301 } 302 #endif 303 304 /* Pentium compatible MSRs */ 305 #define MSR_PENTIUM_START 0 306 #define MSR_PENTIUM_END 0x1FFF 307 /* AMD 6th generation and Intel compatible MSRs */ 308 #define MSR_AMD6TH_START 0xC0000000UL 309 #define MSR_AMD6TH_END 0xC0001FFFUL 310 /* AMD 7th and 8th generation compatible MSRs */ 311 #define MSR_AMD7TH_START 0xC0010000UL 312 #define MSR_AMD7TH_END 0xC0011FFFUL 313 314 /* 315 * Get the index and bit position for a MSR in permission bitmap. 316 * Two bits are used for each MSR: lower bit for read and higher bit for write. 317 */ 318 static int 319 svm_msr_index(uint64_t msr, int *index, int *bit) 320 { 321 uint32_t base, off; 322 323 *index = -1; 324 *bit = (msr % 4) * 2; 325 base = 0; 326 327 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 328 *index = msr / 4; 329 return (0); 330 } 331 332 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 333 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 334 off = (msr - MSR_AMD6TH_START); 335 *index = (off + base) / 4; 336 return (0); 337 } 338 339 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 340 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 341 off = (msr - MSR_AMD7TH_START); 342 *index = (off + base) / 4; 343 return (0); 344 } 345 346 return (EINVAL); 347 } 348 349 /* 350 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 351 */ 352 static void 353 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 354 { 355 int index, bit, error __diagused; 356 357 error = svm_msr_index(msr, &index, &bit); 358 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 359 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 360 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 361 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 362 "msr %#lx", __func__, bit, msr)); 363 364 if (read) 365 perm_bitmap[index] &= ~(1UL << bit); 366 367 if (write) 368 perm_bitmap[index] &= ~(2UL << bit); 369 } 370 371 static void 372 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 373 { 374 375 svm_msr_perm(perm_bitmap, msr, true, true); 376 } 377 378 static void 379 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 380 { 381 382 svm_msr_perm(perm_bitmap, msr, true, false); 383 } 384 385 static __inline int 386 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask) 387 { 388 struct vmcb_ctrl *ctrl; 389 390 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 391 392 ctrl = svm_get_vmcb_ctrl(vcpu); 393 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 394 } 395 396 static __inline void 397 svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled) 398 { 399 struct vmcb_ctrl *ctrl; 400 uint32_t oldval; 401 402 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 403 404 ctrl = svm_get_vmcb_ctrl(vcpu); 405 oldval = ctrl->intercept[idx]; 406 407 if (enabled) 408 ctrl->intercept[idx] |= bitmask; 409 else 410 ctrl->intercept[idx] &= ~bitmask; 411 412 if (ctrl->intercept[idx] != oldval) { 413 svm_set_dirty(vcpu, VMCB_CACHE_I); 414 SVM_CTR3(vcpu, "intercept[%d] modified from %#x to %#x", idx, 415 oldval, ctrl->intercept[idx]); 416 } 417 } 418 419 static __inline void 420 svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 421 { 422 423 svm_set_intercept(vcpu, off, bitmask, 0); 424 } 425 426 static __inline void 427 svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 428 { 429 430 svm_set_intercept(vcpu, off, bitmask, 1); 431 } 432 433 static void 434 vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa, 435 uint64_t msrpm_base_pa, uint64_t np_pml4) 436 { 437 struct vmcb_ctrl *ctrl; 438 struct vmcb_state *state; 439 uint32_t mask; 440 int n; 441 442 ctrl = svm_get_vmcb_ctrl(vcpu); 443 state = svm_get_vmcb_state(vcpu); 444 445 ctrl->iopm_base_pa = iopm_base_pa; 446 ctrl->msrpm_base_pa = msrpm_base_pa; 447 448 /* Enable nested paging */ 449 ctrl->np_enable = 1; 450 ctrl->n_cr3 = np_pml4; 451 452 /* 453 * Intercept accesses to the control registers that are not shadowed 454 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 455 */ 456 for (n = 0; n < 16; n++) { 457 mask = (BIT(n) << 16) | BIT(n); 458 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 459 svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask); 460 else 461 svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask); 462 } 463 464 /* 465 * Intercept everything when tracing guest exceptions otherwise 466 * just intercept machine check exception. 467 */ 468 if (vcpu_trace_exceptions(vcpu->vcpu)) { 469 for (n = 0; n < 32; n++) { 470 /* 471 * Skip unimplemented vectors in the exception bitmap. 472 */ 473 if (n == 2 || n == 9) { 474 continue; 475 } 476 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n)); 477 } 478 } else { 479 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 480 } 481 482 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 483 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 484 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 485 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 486 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 487 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 488 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 489 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 490 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 491 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE); 492 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); 493 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); 494 495 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 496 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 497 498 /* 499 * Intercept SVM instructions since AMD enables them in guests otherwise. 500 * Non-intercepted VMMCALL causes #UD, skip it. 501 */ 502 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); 503 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); 504 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); 505 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); 506 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); 507 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); 508 if (vcpu_trap_wbinvd(vcpu->vcpu)) { 509 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, 510 VMCB_INTCPT_WBINVD); 511 } 512 513 /* 514 * From section "Canonicalization and Consistency Checks" in APMv2 515 * the VMRUN intercept bit must be set to pass the consistency check. 516 */ 517 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 518 519 /* 520 * The ASID will be set to a non-zero value just before VMRUN. 521 */ 522 ctrl->asid = 0; 523 524 /* 525 * Section 15.21.1, Interrupt Masking in EFLAGS 526 * Section 15.21.2, Virtualizing APIC.TPR 527 * 528 * This must be set for %rflag and %cr8 isolation of guest and host. 529 */ 530 ctrl->v_intr_masking = 1; 531 532 /* Enable Last Branch Record aka LBR for debugging */ 533 ctrl->lbr_virt_en = 1; 534 state->dbgctl = BIT(0); 535 536 /* EFER_SVM must always be set when the guest is executing */ 537 state->efer = EFER_SVM; 538 539 /* Set up the PAT to power-on state */ 540 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 541 PAT_VALUE(1, PAT_WRITE_THROUGH) | 542 PAT_VALUE(2, PAT_UNCACHED) | 543 PAT_VALUE(3, PAT_UNCACHEABLE) | 544 PAT_VALUE(4, PAT_WRITE_BACK) | 545 PAT_VALUE(5, PAT_WRITE_THROUGH) | 546 PAT_VALUE(6, PAT_UNCACHED) | 547 PAT_VALUE(7, PAT_UNCACHEABLE); 548 549 /* Set up DR6/7 to power-on state */ 550 state->dr6 = DBREG_DR6_RESERVED1; 551 state->dr7 = DBREG_DR7_RESERVED1; 552 } 553 554 /* 555 * Initialize a virtual machine. 556 */ 557 static void * 558 svm_init(struct vm *vm, pmap_t pmap) 559 { 560 struct svm_softc *svm_sc; 561 562 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 563 564 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 565 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 566 if (svm_sc->msr_bitmap == NULL) 567 panic("contigmalloc of SVM MSR bitmap failed"); 568 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 569 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 570 if (svm_sc->iopm_bitmap == NULL) 571 panic("contigmalloc of SVM IO bitmap failed"); 572 573 svm_sc->vm = vm; 574 svm_sc->nptp = vtophys(pmap->pm_pmltop); 575 576 /* 577 * Intercept read and write accesses to all MSRs. 578 */ 579 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 580 581 /* 582 * Access to the following MSRs is redirected to the VMCB when the 583 * guest is executing. Therefore it is safe to allow the guest to 584 * read/write these MSRs directly without hypervisor involvement. 585 */ 586 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 587 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 588 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 589 590 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 591 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 593 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 595 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 596 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 597 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 598 599 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 600 601 /* 602 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 603 */ 604 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 605 606 /* Intercept access to all I/O ports. */ 607 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 608 609 return (svm_sc); 610 } 611 612 static void * 613 svm_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) 614 { 615 struct svm_softc *sc = vmi; 616 struct svm_vcpu *vcpu; 617 618 vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO); 619 vcpu->sc = sc; 620 vcpu->vcpu = vcpu1; 621 vcpu->vcpuid = vcpuid; 622 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM, 623 M_WAITOK | M_ZERO); 624 vcpu->nextrip = ~0; 625 vcpu->lastcpu = NOCPU; 626 vcpu->vmcb_pa = vtophys(vcpu->vmcb); 627 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap), 628 sc->nptp); 629 svm_msr_guest_init(sc, vcpu); 630 return (vcpu); 631 } 632 633 /* 634 * Collateral for a generic SVM VM-exit. 635 */ 636 static void 637 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 638 { 639 640 vme->exitcode = VM_EXITCODE_SVM; 641 vme->u.svm.exitcode = code; 642 vme->u.svm.exitinfo1 = info1; 643 vme->u.svm.exitinfo2 = info2; 644 } 645 646 static int 647 svm_cpl(struct vmcb_state *state) 648 { 649 650 /* 651 * From APMv2: 652 * "Retrieve the CPL from the CPL field in the VMCB, not 653 * from any segment DPL" 654 */ 655 return (state->cpl); 656 } 657 658 static enum vm_cpu_mode 659 svm_vcpu_mode(struct vmcb *vmcb) 660 { 661 struct vmcb_segment seg; 662 struct vmcb_state *state; 663 int error __diagused; 664 665 state = &vmcb->state; 666 667 if (state->efer & EFER_LMA) { 668 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 669 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 670 error)); 671 672 /* 673 * Section 4.8.1 for APM2, check if Code Segment has 674 * Long attribute set in descriptor. 675 */ 676 if (seg.attrib & VMCB_CS_ATTRIB_L) 677 return (CPU_MODE_64BIT); 678 else 679 return (CPU_MODE_COMPATIBILITY); 680 } else if (state->cr0 & CR0_PE) { 681 return (CPU_MODE_PROTECTED); 682 } else { 683 return (CPU_MODE_REAL); 684 } 685 } 686 687 static enum vm_paging_mode 688 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 689 { 690 691 if ((cr0 & CR0_PG) == 0) 692 return (PAGING_MODE_FLAT); 693 if ((cr4 & CR4_PAE) == 0) 694 return (PAGING_MODE_32); 695 if (efer & EFER_LME) 696 return (PAGING_MODE_64); 697 else 698 return (PAGING_MODE_PAE); 699 } 700 701 /* 702 * ins/outs utility routines 703 */ 704 static uint64_t 705 svm_inout_str_index(struct svm_regctx *regs, int in) 706 { 707 uint64_t val; 708 709 val = in ? regs->sctx_rdi : regs->sctx_rsi; 710 711 return (val); 712 } 713 714 static uint64_t 715 svm_inout_str_count(struct svm_regctx *regs, int rep) 716 { 717 uint64_t val; 718 719 val = rep ? regs->sctx_rcx : 1; 720 721 return (val); 722 } 723 724 static void 725 svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in, 726 struct vm_inout_str *vis) 727 { 728 int error __diagused, s; 729 730 if (in) { 731 vis->seg_name = VM_REG_GUEST_ES; 732 } else { 733 /* The segment field has standard encoding */ 734 s = (info1 >> 10) & 0x7; 735 vis->seg_name = vm_segment_name(s); 736 } 737 738 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc); 739 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 740 } 741 742 static int 743 svm_inout_str_addrsize(uint64_t info1) 744 { 745 uint32_t size; 746 747 size = (info1 >> 7) & 0x7; 748 switch (size) { 749 case 1: 750 return (2); /* 16 bit */ 751 case 2: 752 return (4); /* 32 bit */ 753 case 4: 754 return (8); /* 64 bit */ 755 default: 756 panic("%s: invalid size encoding %d", __func__, size); 757 } 758 } 759 760 static void 761 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 762 { 763 struct vmcb_state *state; 764 765 state = &vmcb->state; 766 paging->cr3 = state->cr3; 767 paging->cpl = svm_cpl(state); 768 paging->cpu_mode = svm_vcpu_mode(vmcb); 769 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 770 state->efer); 771 } 772 773 #define UNHANDLED 0 774 775 /* 776 * Handle guest I/O intercept. 777 */ 778 static int 779 svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit) 780 { 781 struct vmcb_ctrl *ctrl; 782 struct vmcb_state *state; 783 struct svm_regctx *regs; 784 struct vm_inout_str *vis; 785 uint64_t info1; 786 int inout_string; 787 788 state = svm_get_vmcb_state(vcpu); 789 ctrl = svm_get_vmcb_ctrl(vcpu); 790 regs = svm_get_guest_regctx(vcpu); 791 792 info1 = ctrl->exitinfo1; 793 inout_string = info1 & BIT(2) ? 1 : 0; 794 795 /* 796 * The effective segment number in EXITINFO1[12:10] is populated 797 * only if the processor has the DecodeAssist capability. 798 * 799 * XXX this is not specified explicitly in APMv2 but can be verified 800 * empirically. 801 */ 802 if (inout_string && !decode_assist()) 803 return (UNHANDLED); 804 805 vmexit->exitcode = VM_EXITCODE_INOUT; 806 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 807 vmexit->u.inout.string = inout_string; 808 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 809 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 810 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 811 vmexit->u.inout.eax = (uint32_t)(state->rax); 812 813 if (inout_string) { 814 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 815 vis = &vmexit->u.inout_str; 816 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging); 817 vis->rflags = state->rflags; 818 vis->cr0 = state->cr0; 819 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 820 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 821 vis->addrsize = svm_inout_str_addrsize(info1); 822 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis); 823 } 824 825 return (UNHANDLED); 826 } 827 828 static int 829 npf_fault_type(uint64_t exitinfo1) 830 { 831 832 if (exitinfo1 & VMCB_NPF_INFO1_W) 833 return (VM_PROT_WRITE); 834 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 835 return (VM_PROT_EXECUTE); 836 else 837 return (VM_PROT_READ); 838 } 839 840 static bool 841 svm_npf_emul_fault(uint64_t exitinfo1) 842 { 843 844 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 845 return (false); 846 } 847 848 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 849 return (false); 850 } 851 852 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 853 return (false); 854 } 855 856 return (true); 857 } 858 859 static void 860 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 861 { 862 struct vm_guest_paging *paging; 863 struct vmcb_segment seg; 864 struct vmcb_ctrl *ctrl; 865 char *inst_bytes; 866 int error __diagused, inst_len; 867 868 ctrl = &vmcb->ctrl; 869 paging = &vmexit->u.inst_emul.paging; 870 871 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 872 vmexit->u.inst_emul.gpa = gpa; 873 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 874 svm_paging_info(vmcb, paging); 875 876 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 877 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 878 879 switch(paging->cpu_mode) { 880 case CPU_MODE_REAL: 881 vmexit->u.inst_emul.cs_base = seg.base; 882 vmexit->u.inst_emul.cs_d = 0; 883 break; 884 case CPU_MODE_PROTECTED: 885 case CPU_MODE_COMPATIBILITY: 886 vmexit->u.inst_emul.cs_base = seg.base; 887 888 /* 889 * Section 4.8.1 of APM2, Default Operand Size or D bit. 890 */ 891 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 892 1 : 0; 893 break; 894 default: 895 vmexit->u.inst_emul.cs_base = 0; 896 vmexit->u.inst_emul.cs_d = 0; 897 break; 898 } 899 900 /* 901 * Copy the instruction bytes into 'vie' if available. 902 */ 903 if (decode_assist() && !disable_npf_assist) { 904 inst_len = ctrl->inst_len; 905 inst_bytes = ctrl->inst_bytes; 906 } else { 907 inst_len = 0; 908 inst_bytes = NULL; 909 } 910 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 911 } 912 913 #ifdef KTR 914 static const char * 915 intrtype_to_str(int intr_type) 916 { 917 switch (intr_type) { 918 case VMCB_EVENTINJ_TYPE_INTR: 919 return ("hwintr"); 920 case VMCB_EVENTINJ_TYPE_NMI: 921 return ("nmi"); 922 case VMCB_EVENTINJ_TYPE_INTn: 923 return ("swintr"); 924 case VMCB_EVENTINJ_TYPE_EXCEPTION: 925 return ("exception"); 926 default: 927 panic("%s: unknown intr_type %d", __func__, intr_type); 928 } 929 } 930 #endif 931 932 /* 933 * Inject an event to vcpu as described in section 15.20, "Event injection". 934 */ 935 static void 936 svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector, 937 uint32_t error, bool ec_valid) 938 { 939 struct vmcb_ctrl *ctrl; 940 941 ctrl = svm_get_vmcb_ctrl(vcpu); 942 943 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 944 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 945 946 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 947 __func__, vector)); 948 949 switch (intr_type) { 950 case VMCB_EVENTINJ_TYPE_INTR: 951 case VMCB_EVENTINJ_TYPE_NMI: 952 case VMCB_EVENTINJ_TYPE_INTn: 953 break; 954 case VMCB_EVENTINJ_TYPE_EXCEPTION: 955 if (vector >= 0 && vector <= 31 && vector != 2) 956 break; 957 /* FALLTHROUGH */ 958 default: 959 panic("%s: invalid intr_type/vector: %d/%d", __func__, 960 intr_type, vector); 961 } 962 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 963 if (ec_valid) { 964 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 965 ctrl->eventinj |= (uint64_t)error << 32; 966 SVM_CTR3(vcpu, "Injecting %s at vector %d errcode %#x", 967 intrtype_to_str(intr_type), vector, error); 968 } else { 969 SVM_CTR2(vcpu, "Injecting %s at vector %d", 970 intrtype_to_str(intr_type), vector); 971 } 972 } 973 974 static void 975 svm_update_virqinfo(struct svm_vcpu *vcpu) 976 { 977 struct vlapic *vlapic; 978 struct vmcb_ctrl *ctrl; 979 980 vlapic = vm_lapic(vcpu->vcpu); 981 ctrl = svm_get_vmcb_ctrl(vcpu); 982 983 /* Update %cr8 in the emulated vlapic */ 984 vlapic_set_cr8(vlapic, ctrl->v_tpr); 985 986 /* Virtual interrupt injection is not used. */ 987 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 988 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 989 } 990 991 static void 992 svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 993 { 994 struct vmcb_ctrl *ctrl; 995 uint64_t intinfo; 996 997 ctrl = svm_get_vmcb_ctrl(vcpu); 998 intinfo = ctrl->exitintinfo; 999 if (!VMCB_EXITINTINFO_VALID(intinfo)) 1000 return; 1001 1002 /* 1003 * From APMv2, Section "Intercepts during IDT interrupt delivery" 1004 * 1005 * If a #VMEXIT happened during event delivery then record the event 1006 * that was being delivered. 1007 */ 1008 SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo, 1009 VMCB_EXITINTINFO_VECTOR(intinfo)); 1010 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); 1011 vm_exit_intinfo(vcpu->vcpu, intinfo); 1012 } 1013 1014 #ifdef INVARIANTS 1015 static __inline int 1016 vintr_intercept_enabled(struct svm_vcpu *vcpu) 1017 { 1018 1019 return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR)); 1020 } 1021 #endif 1022 1023 static __inline void 1024 enable_intr_window_exiting(struct svm_vcpu *vcpu) 1025 { 1026 struct vmcb_ctrl *ctrl; 1027 1028 ctrl = svm_get_vmcb_ctrl(vcpu); 1029 1030 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1031 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1032 KASSERT(vintr_intercept_enabled(vcpu), 1033 ("%s: vintr intercept should be enabled", __func__)); 1034 return; 1035 } 1036 1037 SVM_CTR0(vcpu, "Enable intr window exiting"); 1038 ctrl->v_irq = 1; 1039 ctrl->v_ign_tpr = 1; 1040 ctrl->v_intr_vector = 0; 1041 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1042 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1043 } 1044 1045 static __inline void 1046 disable_intr_window_exiting(struct svm_vcpu *vcpu) 1047 { 1048 struct vmcb_ctrl *ctrl; 1049 1050 ctrl = svm_get_vmcb_ctrl(vcpu); 1051 1052 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1053 KASSERT(!vintr_intercept_enabled(vcpu), 1054 ("%s: vintr intercept should be disabled", __func__)); 1055 return; 1056 } 1057 1058 SVM_CTR0(vcpu, "Disable intr window exiting"); 1059 ctrl->v_irq = 0; 1060 ctrl->v_intr_vector = 0; 1061 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1062 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1063 } 1064 1065 static int 1066 svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val) 1067 { 1068 struct vmcb_ctrl *ctrl; 1069 int oldval, newval; 1070 1071 ctrl = svm_get_vmcb_ctrl(vcpu); 1072 oldval = ctrl->intr_shadow; 1073 newval = val ? 1 : 0; 1074 if (newval != oldval) { 1075 ctrl->intr_shadow = newval; 1076 SVM_CTR1(vcpu, "Setting intr_shadow to %d", newval); 1077 } 1078 return (0); 1079 } 1080 1081 static int 1082 svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val) 1083 { 1084 struct vmcb_ctrl *ctrl; 1085 1086 ctrl = svm_get_vmcb_ctrl(vcpu); 1087 *val = ctrl->intr_shadow; 1088 return (0); 1089 } 1090 1091 /* 1092 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1093 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1094 * to track when the vcpu is done handling the NMI. 1095 */ 1096 static int 1097 nmi_blocked(struct svm_vcpu *vcpu) 1098 { 1099 int blocked; 1100 1101 blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1102 return (blocked); 1103 } 1104 1105 static void 1106 enable_nmi_blocking(struct svm_vcpu *vcpu) 1107 { 1108 1109 KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked")); 1110 SVM_CTR0(vcpu, "vNMI blocking enabled"); 1111 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1112 } 1113 1114 static void 1115 clear_nmi_blocking(struct svm_vcpu *vcpu) 1116 { 1117 int error __diagused; 1118 1119 KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked")); 1120 SVM_CTR0(vcpu, "vNMI blocking cleared"); 1121 /* 1122 * When the IRET intercept is cleared the vcpu will attempt to execute 1123 * the "iret" when it runs next. However, it is possible to inject 1124 * another NMI into the vcpu before the "iret" has actually executed. 1125 * 1126 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1127 * it will trap back into the hypervisor. If an NMI is pending for 1128 * the vcpu it will be injected into the guest. 1129 * 1130 * XXX this needs to be fixed 1131 */ 1132 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1133 1134 /* 1135 * Set 'intr_shadow' to prevent an NMI from being injected on the 1136 * immediate VMRUN. 1137 */ 1138 error = svm_modify_intr_shadow(vcpu, 1); 1139 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1140 } 1141 1142 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1143 1144 static int 1145 svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval, 1146 bool *retu) 1147 { 1148 struct vm_exit *vme; 1149 struct vmcb_state *state; 1150 uint64_t changed, lma, oldval; 1151 int error __diagused; 1152 1153 state = svm_get_vmcb_state(vcpu); 1154 1155 oldval = state->efer; 1156 SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1157 1158 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1159 changed = oldval ^ newval; 1160 1161 if (newval & EFER_MBZ_BITS) 1162 goto gpf; 1163 1164 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1165 if (changed & EFER_LME) { 1166 if (state->cr0 & CR0_PG) 1167 goto gpf; 1168 } 1169 1170 /* EFER.LMA = EFER.LME & CR0.PG */ 1171 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1172 lma = EFER_LMA; 1173 else 1174 lma = 0; 1175 1176 if ((newval & EFER_LMA) != lma) 1177 goto gpf; 1178 1179 if (newval & EFER_NXE) { 1180 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE)) 1181 goto gpf; 1182 } 1183 1184 /* 1185 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1186 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1187 */ 1188 if (newval & EFER_LMSLE) { 1189 vme = vm_exitinfo(vcpu->vcpu); 1190 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1191 *retu = true; 1192 return (0); 1193 } 1194 1195 if (newval & EFER_FFXSR) { 1196 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR)) 1197 goto gpf; 1198 } 1199 1200 if (newval & EFER_TCE) { 1201 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE)) 1202 goto gpf; 1203 } 1204 1205 error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval); 1206 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1207 return (0); 1208 gpf: 1209 vm_inject_gp(vcpu->vcpu); 1210 return (0); 1211 } 1212 1213 static int 1214 emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, 1215 uint64_t val, bool *retu) 1216 { 1217 int error; 1218 1219 if (lapic_msr(num)) 1220 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); 1221 else if (num == MSR_EFER) 1222 error = svm_write_efer(sc, vcpu, val, retu); 1223 else 1224 error = svm_wrmsr(vcpu, num, val, retu); 1225 1226 return (error); 1227 } 1228 1229 static int 1230 emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu) 1231 { 1232 struct vmcb_state *state; 1233 struct svm_regctx *ctx; 1234 uint64_t result; 1235 int error; 1236 1237 if (lapic_msr(num)) 1238 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); 1239 else 1240 error = svm_rdmsr(vcpu, num, &result, retu); 1241 1242 if (error == 0) { 1243 state = svm_get_vmcb_state(vcpu); 1244 ctx = svm_get_guest_regctx(vcpu); 1245 state->rax = result & 0xffffffff; 1246 ctx->sctx_rdx = result >> 32; 1247 } 1248 1249 return (error); 1250 } 1251 1252 #ifdef KTR 1253 static const char * 1254 exit_reason_to_str(uint64_t reason) 1255 { 1256 int i; 1257 static char reasonbuf[32]; 1258 static const struct { 1259 int reason; 1260 const char *str; 1261 } reasons[] = { 1262 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" }, 1263 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" }, 1264 { .reason = VMCB_EXIT_NPF, .str = "nptfault" }, 1265 { .reason = VMCB_EXIT_PAUSE, .str = "pause" }, 1266 { .reason = VMCB_EXIT_HLT, .str = "hlt" }, 1267 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" }, 1268 { .reason = VMCB_EXIT_IO, .str = "inout" }, 1269 { .reason = VMCB_EXIT_MC, .str = "mchk" }, 1270 { .reason = VMCB_EXIT_INTR, .str = "extintr" }, 1271 { .reason = VMCB_EXIT_NMI, .str = "nmi" }, 1272 { .reason = VMCB_EXIT_VINTR, .str = "vintr" }, 1273 { .reason = VMCB_EXIT_MSR, .str = "msr" }, 1274 { .reason = VMCB_EXIT_IRET, .str = "iret" }, 1275 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" }, 1276 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" }, 1277 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" }, 1278 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" }, 1279 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" }, 1280 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" }, 1281 { .reason = VMCB_EXIT_STGI, .str = "stgi" }, 1282 { .reason = VMCB_EXIT_CLGI, .str = "clgi" }, 1283 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" }, 1284 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" }, 1285 { .reason = VMCB_EXIT_INVD, .str = "invd" }, 1286 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" }, 1287 }; 1288 1289 for (i = 0; i < nitems(reasons); i++) { 1290 if (reasons[i].reason == reason) 1291 return (reasons[i].str); 1292 } 1293 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1294 return (reasonbuf); 1295 } 1296 #endif /* KTR */ 1297 1298 /* 1299 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1300 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1301 * and exceptions caused by INT3, INTO and BOUND instructions. 1302 * 1303 * Return 1 if the nRIP is valid and 0 otherwise. 1304 */ 1305 static int 1306 nrip_valid(uint64_t exitcode) 1307 { 1308 switch (exitcode) { 1309 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1310 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1311 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1312 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1313 case 0x43: /* INT3 */ 1314 case 0x44: /* INTO */ 1315 case 0x45: /* BOUND */ 1316 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1317 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1318 return (1); 1319 default: 1320 return (0); 1321 } 1322 } 1323 1324 static int 1325 svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu, 1326 struct vm_exit *vmexit) 1327 { 1328 struct vmcb *vmcb; 1329 struct vmcb_state *state; 1330 struct vmcb_ctrl *ctrl; 1331 struct svm_regctx *ctx; 1332 uint64_t code, info1, info2, val; 1333 uint32_t eax, ecx, edx; 1334 int error __diagused, errcode_valid, handled, idtvec, reflect; 1335 bool retu; 1336 1337 ctx = svm_get_guest_regctx(vcpu); 1338 vmcb = svm_get_vmcb(vcpu); 1339 state = &vmcb->state; 1340 ctrl = &vmcb->ctrl; 1341 1342 handled = 0; 1343 code = ctrl->exitcode; 1344 info1 = ctrl->exitinfo1; 1345 info2 = ctrl->exitinfo2; 1346 1347 vmexit->exitcode = VM_EXITCODE_BOGUS; 1348 vmexit->rip = state->rip; 1349 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1350 1351 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); 1352 1353 /* 1354 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1355 * in an inconsistent state and can trigger assertions that would 1356 * never happen otherwise. 1357 */ 1358 if (code == VMCB_EXIT_INVALID) { 1359 vm_exit_svm(vmexit, code, info1, info2); 1360 return (0); 1361 } 1362 1363 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1364 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1365 1366 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1367 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1368 vmexit->inst_length, code, info1, info2)); 1369 1370 svm_update_virqinfo(vcpu); 1371 svm_save_intinfo(svm_sc, vcpu); 1372 1373 switch (code) { 1374 case VMCB_EXIT_IRET: 1375 /* 1376 * Restart execution at "iret" but with the intercept cleared. 1377 */ 1378 vmexit->inst_length = 0; 1379 clear_nmi_blocking(vcpu); 1380 handled = 1; 1381 break; 1382 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1383 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1); 1384 handled = 1; 1385 break; 1386 case VMCB_EXIT_INTR: /* external interrupt */ 1387 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); 1388 handled = 1; 1389 break; 1390 case VMCB_EXIT_NMI: /* external NMI */ 1391 handled = 1; 1392 break; 1393 case 0x40 ... 0x5F: 1394 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); 1395 reflect = 1; 1396 idtvec = code - 0x40; 1397 switch (idtvec) { 1398 case IDT_MC: 1399 /* 1400 * Call the machine check handler by hand. Also don't 1401 * reflect the machine check back into the guest. 1402 */ 1403 reflect = 0; 1404 SVM_CTR0(vcpu, "Vectoring to MCE handler"); 1405 __asm __volatile("int $18"); 1406 break; 1407 case IDT_PF: 1408 error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2); 1409 KASSERT(error == 0, ("%s: error %d updating cr2", 1410 __func__, error)); 1411 /* fallthru */ 1412 case IDT_NP: 1413 case IDT_SS: 1414 case IDT_GP: 1415 case IDT_AC: 1416 case IDT_TS: 1417 errcode_valid = 1; 1418 break; 1419 1420 case IDT_DF: 1421 errcode_valid = 1; 1422 info1 = 0; 1423 break; 1424 1425 case IDT_BP: 1426 case IDT_OF: 1427 case IDT_BR: 1428 /* 1429 * The 'nrip' field is populated for INT3, INTO and 1430 * BOUND exceptions and this also implies that 1431 * 'inst_length' is non-zero. 1432 * 1433 * Reset 'inst_length' to zero so the guest %rip at 1434 * event injection is identical to what it was when 1435 * the exception originally happened. 1436 */ 1437 SVM_CTR2(vcpu, "Reset inst_length from %d " 1438 "to zero before injecting exception %d", 1439 vmexit->inst_length, idtvec); 1440 vmexit->inst_length = 0; 1441 /* fallthru */ 1442 default: 1443 errcode_valid = 0; 1444 info1 = 0; 1445 break; 1446 } 1447 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1448 "when reflecting exception %d into guest", 1449 vmexit->inst_length, idtvec)); 1450 1451 if (reflect) { 1452 /* Reflect the exception back into the guest */ 1453 SVM_CTR2(vcpu, "Reflecting exception " 1454 "%d/%#x into the guest", idtvec, (int)info1); 1455 error = vm_inject_exception(vcpu->vcpu, idtvec, 1456 errcode_valid, info1, 0); 1457 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1458 __func__, error)); 1459 } 1460 handled = 1; 1461 break; 1462 case VMCB_EXIT_MSR: /* MSR access. */ 1463 eax = state->rax; 1464 ecx = ctx->sctx_rcx; 1465 edx = ctx->sctx_rdx; 1466 retu = false; 1467 1468 if (info1) { 1469 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); 1470 val = (uint64_t)edx << 32 | eax; 1471 SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val); 1472 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1473 vmexit->exitcode = VM_EXITCODE_WRMSR; 1474 vmexit->u.msr.code = ecx; 1475 vmexit->u.msr.wval = val; 1476 } else if (!retu) { 1477 handled = 1; 1478 } else { 1479 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1480 ("emulate_wrmsr retu with bogus exitcode")); 1481 } 1482 } else { 1483 SVM_CTR1(vcpu, "rdmsr %#x", ecx); 1484 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); 1485 if (emulate_rdmsr(vcpu, ecx, &retu)) { 1486 vmexit->exitcode = VM_EXITCODE_RDMSR; 1487 vmexit->u.msr.code = ecx; 1488 } else if (!retu) { 1489 handled = 1; 1490 } else { 1491 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1492 ("emulate_rdmsr retu with bogus exitcode")); 1493 } 1494 } 1495 break; 1496 case VMCB_EXIT_IO: 1497 handled = svm_handle_io(vcpu, vmexit); 1498 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); 1499 break; 1500 case VMCB_EXIT_CPUID: 1501 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); 1502 handled = x86_emulate_cpuid(vcpu->vcpu, 1503 &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx, 1504 &ctx->sctx_rdx); 1505 break; 1506 case VMCB_EXIT_HLT: 1507 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); 1508 vmexit->exitcode = VM_EXITCODE_HLT; 1509 vmexit->u.hlt.rflags = state->rflags; 1510 break; 1511 case VMCB_EXIT_PAUSE: 1512 vmexit->exitcode = VM_EXITCODE_PAUSE; 1513 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); 1514 break; 1515 case VMCB_EXIT_NPF: 1516 /* EXITINFO2 contains the faulting guest physical address */ 1517 if (info1 & VMCB_NPF_INFO1_RSV) { 1518 SVM_CTR2(vcpu, "nested page fault with " 1519 "reserved bits set: info1(%#lx) info2(%#lx)", 1520 info1, info2); 1521 } else if (vm_mem_allocated(vcpu->vcpu, info2)) { 1522 vmexit->exitcode = VM_EXITCODE_PAGING; 1523 vmexit->u.paging.gpa = info2; 1524 vmexit->u.paging.fault_type = npf_fault_type(info1); 1525 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); 1526 SVM_CTR3(vcpu, "nested page fault " 1527 "on gpa %#lx/%#lx at rip %#lx", 1528 info2, info1, state->rip); 1529 } else if (svm_npf_emul_fault(info1)) { 1530 svm_handle_inst_emul(vmcb, info2, vmexit); 1531 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); 1532 SVM_CTR3(vcpu, "inst_emul fault " 1533 "for gpa %#lx/%#lx at rip %#lx", 1534 info2, info1, state->rip); 1535 } 1536 break; 1537 case VMCB_EXIT_MONITOR: 1538 vmexit->exitcode = VM_EXITCODE_MONITOR; 1539 break; 1540 case VMCB_EXIT_MWAIT: 1541 vmexit->exitcode = VM_EXITCODE_MWAIT; 1542 break; 1543 case VMCB_EXIT_SHUTDOWN: 1544 case VMCB_EXIT_VMRUN: 1545 case VMCB_EXIT_VMMCALL: 1546 case VMCB_EXIT_VMLOAD: 1547 case VMCB_EXIT_VMSAVE: 1548 case VMCB_EXIT_STGI: 1549 case VMCB_EXIT_CLGI: 1550 case VMCB_EXIT_SKINIT: 1551 case VMCB_EXIT_ICEBP: 1552 case VMCB_EXIT_INVLPGA: 1553 vm_inject_ud(vcpu->vcpu); 1554 handled = 1; 1555 break; 1556 case VMCB_EXIT_INVD: 1557 case VMCB_EXIT_WBINVD: 1558 /* ignore exit */ 1559 handled = 1; 1560 break; 1561 default: 1562 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); 1563 break; 1564 } 1565 1566 SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d", 1567 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1568 vmexit->rip, vmexit->inst_length); 1569 1570 if (handled) { 1571 vmexit->rip += vmexit->inst_length; 1572 vmexit->inst_length = 0; 1573 state->rip = vmexit->rip; 1574 } else { 1575 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1576 /* 1577 * If this VM exit was not claimed by anybody then 1578 * treat it as a generic SVM exit. 1579 */ 1580 vm_exit_svm(vmexit, code, info1, info2); 1581 } else { 1582 /* 1583 * The exitcode and collateral have been populated. 1584 * The VM exit will be processed further in userland. 1585 */ 1586 } 1587 } 1588 return (handled); 1589 } 1590 1591 static void 1592 svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 1593 { 1594 uint64_t intinfo; 1595 1596 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo)) 1597 return; 1598 1599 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1600 "valid: %#lx", __func__, intinfo)); 1601 1602 svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1603 VMCB_EXITINTINFO_VECTOR(intinfo), 1604 VMCB_EXITINTINFO_EC(intinfo), 1605 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1606 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1); 1607 SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo); 1608 } 1609 1610 /* 1611 * Inject event to virtual cpu. 1612 */ 1613 static void 1614 svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu, 1615 struct vlapic *vlapic) 1616 { 1617 struct vmcb_ctrl *ctrl; 1618 struct vmcb_state *state; 1619 uint8_t v_tpr; 1620 int vector, need_intr_window; 1621 int extint_pending; 1622 1623 state = svm_get_vmcb_state(vcpu); 1624 ctrl = svm_get_vmcb_ctrl(vcpu); 1625 1626 need_intr_window = 0; 1627 1628 if (vcpu->nextrip != state->rip) { 1629 ctrl->intr_shadow = 0; 1630 SVM_CTR2(vcpu, "Guest interrupt blocking " 1631 "cleared due to rip change: %#lx/%#lx", 1632 vcpu->nextrip, state->rip); 1633 } 1634 1635 /* 1636 * Inject pending events or exceptions for this vcpu. 1637 * 1638 * An event might be pending because the previous #VMEXIT happened 1639 * during event delivery (i.e. ctrl->exitintinfo). 1640 * 1641 * An event might also be pending because an exception was injected 1642 * by the hypervisor (e.g. #PF during instruction emulation). 1643 */ 1644 svm_inj_intinfo(sc, vcpu); 1645 1646 /* NMI event has priority over interrupts. */ 1647 if (vm_nmi_pending(vcpu->vcpu)) { 1648 if (nmi_blocked(vcpu)) { 1649 /* 1650 * Can't inject another NMI if the guest has not 1651 * yet executed an "iret" after the last NMI. 1652 */ 1653 SVM_CTR0(vcpu, "Cannot inject NMI due " 1654 "to NMI-blocking"); 1655 } else if (ctrl->intr_shadow) { 1656 /* 1657 * Can't inject an NMI if the vcpu is in an intr_shadow. 1658 */ 1659 SVM_CTR0(vcpu, "Cannot inject NMI due to " 1660 "interrupt shadow"); 1661 need_intr_window = 1; 1662 goto done; 1663 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1664 /* 1665 * If there is already an exception/interrupt pending 1666 * then defer the NMI until after that. 1667 */ 1668 SVM_CTR1(vcpu, "Cannot inject NMI due to " 1669 "eventinj %#lx", ctrl->eventinj); 1670 1671 /* 1672 * Use self-IPI to trigger a VM-exit as soon as 1673 * possible after the event injection is completed. 1674 * 1675 * This works only if the external interrupt exiting 1676 * is at a lower priority than the event injection. 1677 * 1678 * Although not explicitly specified in APMv2 the 1679 * relative priorities were verified empirically. 1680 */ 1681 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1682 } else { 1683 vm_nmi_clear(vcpu->vcpu); 1684 1685 /* Inject NMI, vector number is not used */ 1686 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI, 1687 IDT_NMI, 0, false); 1688 1689 /* virtual NMI blocking is now in effect */ 1690 enable_nmi_blocking(vcpu); 1691 1692 SVM_CTR0(vcpu, "Injecting vNMI"); 1693 } 1694 } 1695 1696 extint_pending = vm_extint_pending(vcpu->vcpu); 1697 if (!extint_pending) { 1698 if (!vlapic_pending_intr(vlapic, &vector)) 1699 goto done; 1700 KASSERT(vector >= 16 && vector <= 255, 1701 ("invalid vector %d from local APIC", vector)); 1702 } else { 1703 /* Ask the legacy pic for a vector to inject */ 1704 vatpic_pending_intr(sc->vm, &vector); 1705 KASSERT(vector >= 0 && vector <= 255, 1706 ("invalid vector %d from INTR", vector)); 1707 } 1708 1709 /* 1710 * If the guest has disabled interrupts or is in an interrupt shadow 1711 * then we cannot inject the pending interrupt. 1712 */ 1713 if ((state->rflags & PSL_I) == 0) { 1714 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1715 "rflags %#lx", vector, state->rflags); 1716 need_intr_window = 1; 1717 goto done; 1718 } 1719 1720 if (ctrl->intr_shadow) { 1721 SVM_CTR1(vcpu, "Cannot inject vector %d due to " 1722 "interrupt shadow", vector); 1723 need_intr_window = 1; 1724 goto done; 1725 } 1726 1727 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1728 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1729 "eventinj %#lx", vector, ctrl->eventinj); 1730 need_intr_window = 1; 1731 goto done; 1732 } 1733 1734 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1735 1736 if (!extint_pending) { 1737 vlapic_intr_accepted(vlapic, vector); 1738 } else { 1739 vm_extint_clear(vcpu->vcpu); 1740 vatpic_intr_accepted(sc->vm, vector); 1741 } 1742 1743 /* 1744 * Force a VM-exit as soon as the vcpu is ready to accept another 1745 * interrupt. This is done because the PIC might have another vector 1746 * that it wants to inject. Also, if the APIC has a pending interrupt 1747 * that was preempted by the ExtInt then it allows us to inject the 1748 * APIC vector as soon as possible. 1749 */ 1750 need_intr_window = 1; 1751 done: 1752 /* 1753 * The guest can modify the TPR by writing to %CR8. In guest mode 1754 * the processor reflects this write to V_TPR without hypervisor 1755 * intervention. 1756 * 1757 * The guest can also modify the TPR by writing to it via the memory 1758 * mapped APIC page. In this case, the write will be emulated by the 1759 * hypervisor. For this reason V_TPR must be updated before every 1760 * VMRUN. 1761 */ 1762 v_tpr = vlapic_get_cr8(vlapic); 1763 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1764 if (ctrl->v_tpr != v_tpr) { 1765 SVM_CTR2(vcpu, "VMCB V_TPR changed from %#x to %#x", 1766 ctrl->v_tpr, v_tpr); 1767 ctrl->v_tpr = v_tpr; 1768 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1769 } 1770 1771 if (need_intr_window) { 1772 /* 1773 * We use V_IRQ in conjunction with the VINTR intercept to 1774 * trap into the hypervisor as soon as a virtual interrupt 1775 * can be delivered. 1776 * 1777 * Since injected events are not subject to intercept checks 1778 * we need to ensure that the V_IRQ is not actually going to 1779 * be delivered on VM entry. The KASSERT below enforces this. 1780 */ 1781 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1782 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1783 ("Bogus intr_window_exiting: eventinj (%#lx), " 1784 "intr_shadow (%u), rflags (%#lx)", 1785 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1786 enable_intr_window_exiting(vcpu); 1787 } else { 1788 disable_intr_window_exiting(vcpu); 1789 } 1790 } 1791 1792 static __inline void 1793 restore_host_tss(void) 1794 { 1795 struct system_segment_descriptor *tss_sd; 1796 1797 /* 1798 * The TSS descriptor was in use prior to launching the guest so it 1799 * has been marked busy. 1800 * 1801 * 'ltr' requires the descriptor to be marked available so change the 1802 * type to "64-bit available TSS". 1803 */ 1804 tss_sd = PCPU_GET(tss); 1805 tss_sd->sd_type = SDT_SYSTSS; 1806 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1807 } 1808 1809 static void 1810 svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap) 1811 { 1812 struct vmcb_ctrl *ctrl; 1813 long eptgen; 1814 int cpu; 1815 bool alloc_asid; 1816 1817 cpu = curcpu; 1818 CPU_SET_ATOMIC(cpu, &pmap->pm_active); 1819 smr_enter(pmap->pm_eptsmr); 1820 1821 ctrl = svm_get_vmcb_ctrl(vcpu); 1822 1823 /* 1824 * The TLB entries associated with the vcpu's ASID are not valid 1825 * if either of the following conditions is true: 1826 * 1827 * 1. The vcpu's ASID generation is different than the host cpu's 1828 * ASID generation. This happens when the vcpu migrates to a new 1829 * host cpu. It can also happen when the number of vcpus executing 1830 * on a host cpu is greater than the number of ASIDs available. 1831 * 1832 * 2. The pmap generation number is different than the value cached in 1833 * the 'vcpustate'. This happens when the host invalidates pages 1834 * belonging to the guest. 1835 * 1836 * asidgen eptgen Action 1837 * mismatch mismatch 1838 * 0 0 (a) 1839 * 0 1 (b1) or (b2) 1840 * 1 0 (c) 1841 * 1 1 (d) 1842 * 1843 * (a) There is no mismatch in eptgen or ASID generation and therefore 1844 * no further action is needed. 1845 * 1846 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1847 * retained and the TLB entries associated with this ASID 1848 * are flushed by VMRUN. 1849 * 1850 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1851 * allocated. 1852 * 1853 * (c) A new ASID is allocated. 1854 * 1855 * (d) A new ASID is allocated. 1856 */ 1857 1858 alloc_asid = false; 1859 eptgen = atomic_load_long(&pmap->pm_eptgen); 1860 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1861 1862 if (vcpu->asid.gen != asid[cpu].gen) { 1863 alloc_asid = true; /* (c) and (d) */ 1864 } else if (vcpu->eptgen != eptgen) { 1865 if (flush_by_asid()) 1866 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1867 else 1868 alloc_asid = true; /* (b2) */ 1869 } else { 1870 /* 1871 * This is the common case (a). 1872 */ 1873 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1874 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1875 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1876 } 1877 1878 if (alloc_asid) { 1879 if (++asid[cpu].num >= nasid) { 1880 asid[cpu].num = 1; 1881 if (++asid[cpu].gen == 0) 1882 asid[cpu].gen = 1; 1883 /* 1884 * If this cpu does not support "flush-by-asid" 1885 * then flush the entire TLB on a generation 1886 * bump. Subsequent ASID allocation in this 1887 * generation can be done without a TLB flush. 1888 */ 1889 if (!flush_by_asid()) 1890 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1891 } 1892 vcpu->asid.gen = asid[cpu].gen; 1893 vcpu->asid.num = asid[cpu].num; 1894 1895 ctrl->asid = vcpu->asid.num; 1896 svm_set_dirty(vcpu, VMCB_CACHE_ASID); 1897 /* 1898 * If this cpu supports "flush-by-asid" then the TLB 1899 * was not flushed after the generation bump. The TLB 1900 * is flushed selectively after every new ASID allocation. 1901 */ 1902 if (flush_by_asid()) 1903 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1904 } 1905 vcpu->eptgen = eptgen; 1906 1907 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1908 KASSERT(ctrl->asid == vcpu->asid.num, 1909 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num)); 1910 } 1911 1912 static void 1913 svm_pmap_deactivate(pmap_t pmap) 1914 { 1915 smr_exit(pmap->pm_eptsmr); 1916 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 1917 } 1918 1919 static __inline void 1920 disable_gintr(void) 1921 { 1922 1923 __asm __volatile("clgi"); 1924 } 1925 1926 static __inline void 1927 enable_gintr(void) 1928 { 1929 1930 __asm __volatile("stgi"); 1931 } 1932 1933 static __inline void 1934 svm_dr_enter_guest(struct svm_regctx *gctx) 1935 { 1936 1937 /* Save host control debug registers. */ 1938 gctx->host_dr7 = rdr7(); 1939 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 1940 1941 /* 1942 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 1943 * exceptions in the host based on the guest DRx values. The 1944 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 1945 * VMCB. 1946 */ 1947 load_dr7(0); 1948 wrmsr(MSR_DEBUGCTLMSR, 0); 1949 1950 /* Save host debug registers. */ 1951 gctx->host_dr0 = rdr0(); 1952 gctx->host_dr1 = rdr1(); 1953 gctx->host_dr2 = rdr2(); 1954 gctx->host_dr3 = rdr3(); 1955 gctx->host_dr6 = rdr6(); 1956 1957 /* Restore guest debug registers. */ 1958 load_dr0(gctx->sctx_dr0); 1959 load_dr1(gctx->sctx_dr1); 1960 load_dr2(gctx->sctx_dr2); 1961 load_dr3(gctx->sctx_dr3); 1962 } 1963 1964 static __inline void 1965 svm_dr_leave_guest(struct svm_regctx *gctx) 1966 { 1967 1968 /* Save guest debug registers. */ 1969 gctx->sctx_dr0 = rdr0(); 1970 gctx->sctx_dr1 = rdr1(); 1971 gctx->sctx_dr2 = rdr2(); 1972 gctx->sctx_dr3 = rdr3(); 1973 1974 /* 1975 * Restore host debug registers. Restore DR7 and DEBUGCTL 1976 * last. 1977 */ 1978 load_dr0(gctx->host_dr0); 1979 load_dr1(gctx->host_dr1); 1980 load_dr2(gctx->host_dr2); 1981 load_dr3(gctx->host_dr3); 1982 load_dr6(gctx->host_dr6); 1983 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 1984 load_dr7(gctx->host_dr7); 1985 } 1986 1987 /* 1988 * Start vcpu with specified RIP. 1989 */ 1990 static int 1991 svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) 1992 { 1993 struct svm_regctx *gctx; 1994 struct svm_softc *svm_sc; 1995 struct svm_vcpu *vcpu; 1996 struct vmcb_state *state; 1997 struct vmcb_ctrl *ctrl; 1998 struct vm_exit *vmexit; 1999 struct vlapic *vlapic; 2000 uint64_t vmcb_pa; 2001 int handled; 2002 uint16_t ldt_sel; 2003 2004 vcpu = vcpui; 2005 svm_sc = vcpu->sc; 2006 state = svm_get_vmcb_state(vcpu); 2007 ctrl = svm_get_vmcb_ctrl(vcpu); 2008 vmexit = vm_exitinfo(vcpu->vcpu); 2009 vlapic = vm_lapic(vcpu->vcpu); 2010 2011 gctx = svm_get_guest_regctx(vcpu); 2012 vmcb_pa = vcpu->vmcb_pa; 2013 2014 if (vcpu->lastcpu != curcpu) { 2015 /* 2016 * Force new ASID allocation by invalidating the generation. 2017 */ 2018 vcpu->asid.gen = 0; 2019 2020 /* 2021 * Invalidate the VMCB state cache by marking all fields dirty. 2022 */ 2023 svm_set_dirty(vcpu, 0xffffffff); 2024 2025 /* 2026 * XXX 2027 * Setting 'vcpu->lastcpu' here is bit premature because 2028 * we may return from this function without actually executing 2029 * the VMRUN instruction. This could happen if a rendezvous 2030 * or an AST is pending on the first time through the loop. 2031 * 2032 * This works for now but any new side-effects of vcpu 2033 * migration should take this case into account. 2034 */ 2035 vcpu->lastcpu = curcpu; 2036 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); 2037 } 2038 2039 svm_msr_guest_enter(vcpu); 2040 2041 /* Update Guest RIP */ 2042 state->rip = rip; 2043 2044 do { 2045 /* 2046 * Disable global interrupts to guarantee atomicity during 2047 * loading of guest state. This includes not only the state 2048 * loaded by the "vmrun" instruction but also software state 2049 * maintained by the hypervisor: suspended and rendezvous 2050 * state, NPT generation number, vlapic interrupts etc. 2051 */ 2052 disable_gintr(); 2053 2054 if (vcpu_suspended(evinfo)) { 2055 enable_gintr(); 2056 vm_exit_suspended(vcpu->vcpu, state->rip); 2057 break; 2058 } 2059 2060 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { 2061 enable_gintr(); 2062 vm_exit_rendezvous(vcpu->vcpu, state->rip); 2063 break; 2064 } 2065 2066 if (vcpu_reqidle(evinfo)) { 2067 enable_gintr(); 2068 vm_exit_reqidle(vcpu->vcpu, state->rip); 2069 break; 2070 } 2071 2072 /* We are asked to give the cpu by scheduler. */ 2073 if (vcpu_should_yield(vcpu->vcpu)) { 2074 enable_gintr(); 2075 vm_exit_astpending(vcpu->vcpu, state->rip); 2076 break; 2077 } 2078 2079 if (vcpu_debugged(vcpu->vcpu)) { 2080 enable_gintr(); 2081 vm_exit_debug(vcpu->vcpu, state->rip); 2082 break; 2083 } 2084 2085 /* 2086 * #VMEXIT resumes the host with the guest LDTR, so 2087 * save the current LDT selector so it can be restored 2088 * after an exit. The userspace hypervisor probably 2089 * doesn't use a LDT, but save and restore it to be 2090 * safe. 2091 */ 2092 ldt_sel = sldt(); 2093 2094 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2095 2096 /* 2097 * Check the pmap generation and the ASID generation to 2098 * ensure that the vcpu does not use stale TLB mappings. 2099 */ 2100 svm_pmap_activate(vcpu, pmap); 2101 2102 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty; 2103 vcpu->dirty = 0; 2104 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2105 2106 /* Launch Virtual Machine. */ 2107 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip); 2108 svm_dr_enter_guest(gctx); 2109 svm_launch(vmcb_pa, gctx, get_pcpu()); 2110 svm_dr_leave_guest(gctx); 2111 2112 svm_pmap_deactivate(pmap); 2113 2114 /* 2115 * The host GDTR and IDTR is saved by VMRUN and restored 2116 * automatically on #VMEXIT. However, the host TSS needs 2117 * to be restored explicitly. 2118 */ 2119 restore_host_tss(); 2120 2121 /* Restore host LDTR. */ 2122 lldt(ldt_sel); 2123 2124 /* #VMEXIT disables interrupts so re-enable them here. */ 2125 enable_gintr(); 2126 2127 /* Update 'nextrip' */ 2128 vcpu->nextrip = state->rip; 2129 2130 /* Handle #VMEXIT and if required return to user space. */ 2131 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2132 } while (handled); 2133 2134 svm_msr_guest_exit(vcpu); 2135 2136 return (0); 2137 } 2138 2139 static void 2140 svm_vcpu_cleanup(void *vcpui) 2141 { 2142 struct svm_vcpu *vcpu = vcpui; 2143 2144 free(vcpu->vmcb, M_SVM); 2145 free(vcpu, M_SVM); 2146 } 2147 2148 static void 2149 svm_cleanup(void *vmi) 2150 { 2151 struct svm_softc *sc = vmi; 2152 2153 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2154 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2155 free(sc, M_SVM); 2156 } 2157 2158 static register_t * 2159 swctx_regptr(struct svm_regctx *regctx, int reg) 2160 { 2161 2162 switch (reg) { 2163 case VM_REG_GUEST_RBX: 2164 return (®ctx->sctx_rbx); 2165 case VM_REG_GUEST_RCX: 2166 return (®ctx->sctx_rcx); 2167 case VM_REG_GUEST_RDX: 2168 return (®ctx->sctx_rdx); 2169 case VM_REG_GUEST_RDI: 2170 return (®ctx->sctx_rdi); 2171 case VM_REG_GUEST_RSI: 2172 return (®ctx->sctx_rsi); 2173 case VM_REG_GUEST_RBP: 2174 return (®ctx->sctx_rbp); 2175 case VM_REG_GUEST_R8: 2176 return (®ctx->sctx_r8); 2177 case VM_REG_GUEST_R9: 2178 return (®ctx->sctx_r9); 2179 case VM_REG_GUEST_R10: 2180 return (®ctx->sctx_r10); 2181 case VM_REG_GUEST_R11: 2182 return (®ctx->sctx_r11); 2183 case VM_REG_GUEST_R12: 2184 return (®ctx->sctx_r12); 2185 case VM_REG_GUEST_R13: 2186 return (®ctx->sctx_r13); 2187 case VM_REG_GUEST_R14: 2188 return (®ctx->sctx_r14); 2189 case VM_REG_GUEST_R15: 2190 return (®ctx->sctx_r15); 2191 case VM_REG_GUEST_DR0: 2192 return (®ctx->sctx_dr0); 2193 case VM_REG_GUEST_DR1: 2194 return (®ctx->sctx_dr1); 2195 case VM_REG_GUEST_DR2: 2196 return (®ctx->sctx_dr2); 2197 case VM_REG_GUEST_DR3: 2198 return (®ctx->sctx_dr3); 2199 default: 2200 return (NULL); 2201 } 2202 } 2203 2204 static int 2205 svm_getreg(void *vcpui, int ident, uint64_t *val) 2206 { 2207 struct svm_vcpu *vcpu; 2208 register_t *reg; 2209 2210 vcpu = vcpui; 2211 2212 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2213 return (svm_get_intr_shadow(vcpu, val)); 2214 } 2215 2216 if (vmcb_read(vcpu, ident, val) == 0) { 2217 return (0); 2218 } 2219 2220 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2221 2222 if (reg != NULL) { 2223 *val = *reg; 2224 return (0); 2225 } 2226 2227 SVM_CTR1(vcpu, "svm_getreg: unknown register %#x", ident); 2228 return (EINVAL); 2229 } 2230 2231 static int 2232 svm_setreg(void *vcpui, int ident, uint64_t val) 2233 { 2234 struct svm_vcpu *vcpu; 2235 register_t *reg; 2236 2237 vcpu = vcpui; 2238 2239 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2240 return (svm_modify_intr_shadow(vcpu, val)); 2241 } 2242 2243 /* Do not permit user write access to VMCB fields by offset. */ 2244 if (!VMCB_ACCESS_OK(ident)) { 2245 if (vmcb_write(vcpu, ident, val) == 0) { 2246 return (0); 2247 } 2248 } 2249 2250 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2251 2252 if (reg != NULL) { 2253 *reg = val; 2254 return (0); 2255 } 2256 2257 if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) { 2258 /* Ignore. */ 2259 return (0); 2260 } 2261 2262 /* 2263 * XXX deal with CR3 and invalidate TLB entries tagged with the 2264 * vcpu's ASID. This needs to be treated differently depending on 2265 * whether 'running' is true/false. 2266 */ 2267 2268 SVM_CTR1(vcpu, "svm_setreg: unknown register %#x", ident); 2269 return (EINVAL); 2270 } 2271 2272 static int 2273 svm_getdesc(void *vcpui, int reg, struct seg_desc *desc) 2274 { 2275 return (vmcb_getdesc(vcpui, reg, desc)); 2276 } 2277 2278 static int 2279 svm_setdesc(void *vcpui, int reg, struct seg_desc *desc) 2280 { 2281 return (vmcb_setdesc(vcpui, reg, desc)); 2282 } 2283 2284 #ifdef BHYVE_SNAPSHOT 2285 static int 2286 svm_snapshot_reg(void *vcpui, int ident, struct vm_snapshot_meta *meta) 2287 { 2288 int ret; 2289 uint64_t val; 2290 2291 if (meta->op == VM_SNAPSHOT_SAVE) { 2292 ret = svm_getreg(vcpui, ident, &val); 2293 if (ret != 0) 2294 goto done; 2295 2296 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2297 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 2298 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2299 2300 ret = svm_setreg(vcpui, ident, val); 2301 if (ret != 0) 2302 goto done; 2303 } else { 2304 ret = EINVAL; 2305 goto done; 2306 } 2307 2308 done: 2309 return (ret); 2310 } 2311 #endif 2312 2313 static int 2314 svm_setcap(void *vcpui, int type, int val) 2315 { 2316 struct svm_vcpu *vcpu; 2317 struct vlapic *vlapic; 2318 int error; 2319 2320 vcpu = vcpui; 2321 error = 0; 2322 2323 switch (type) { 2324 case VM_CAP_HALT_EXIT: 2325 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2326 VMCB_INTCPT_HLT, val); 2327 break; 2328 case VM_CAP_PAUSE_EXIT: 2329 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2330 VMCB_INTCPT_PAUSE, val); 2331 break; 2332 case VM_CAP_UNRESTRICTED_GUEST: 2333 /* Unrestricted guest execution cannot be disabled in SVM */ 2334 if (val == 0) 2335 error = EINVAL; 2336 break; 2337 case VM_CAP_IPI_EXIT: 2338 vlapic = vm_lapic(vcpu->vcpu); 2339 vlapic->ipi_exit = val; 2340 break; 2341 default: 2342 error = ENOENT; 2343 break; 2344 } 2345 return (error); 2346 } 2347 2348 static int 2349 svm_getcap(void *vcpui, int type, int *retval) 2350 { 2351 struct svm_vcpu *vcpu; 2352 struct vlapic *vlapic; 2353 int error; 2354 2355 vcpu = vcpui; 2356 error = 0; 2357 2358 switch (type) { 2359 case VM_CAP_HALT_EXIT: 2360 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2361 VMCB_INTCPT_HLT); 2362 break; 2363 case VM_CAP_PAUSE_EXIT: 2364 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2365 VMCB_INTCPT_PAUSE); 2366 break; 2367 case VM_CAP_UNRESTRICTED_GUEST: 2368 *retval = 1; /* unrestricted guest is always enabled */ 2369 break; 2370 case VM_CAP_IPI_EXIT: 2371 vlapic = vm_lapic(vcpu->vcpu); 2372 *retval = vlapic->ipi_exit; 2373 break; 2374 default: 2375 error = ENOENT; 2376 break; 2377 } 2378 return (error); 2379 } 2380 2381 static struct vmspace * 2382 svm_vmspace_alloc(vm_offset_t min, vm_offset_t max) 2383 { 2384 return (svm_npt_alloc(min, max)); 2385 } 2386 2387 static void 2388 svm_vmspace_free(struct vmspace *vmspace) 2389 { 2390 svm_npt_free(vmspace); 2391 } 2392 2393 static struct vlapic * 2394 svm_vlapic_init(void *vcpui) 2395 { 2396 struct svm_vcpu *vcpu; 2397 struct vlapic *vlapic; 2398 2399 vcpu = vcpui; 2400 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2401 vlapic->vm = vcpu->sc->vm; 2402 vlapic->vcpu = vcpu->vcpu; 2403 vlapic->vcpuid = vcpu->vcpuid; 2404 vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC, 2405 M_WAITOK | M_ZERO); 2406 2407 vlapic_init(vlapic); 2408 2409 return (vlapic); 2410 } 2411 2412 static void 2413 svm_vlapic_cleanup(struct vlapic *vlapic) 2414 { 2415 2416 vlapic_cleanup(vlapic); 2417 free(vlapic->apic_page, M_SVM_VLAPIC); 2418 free(vlapic, M_SVM_VLAPIC); 2419 } 2420 2421 #ifdef BHYVE_SNAPSHOT 2422 static int 2423 svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta) 2424 { 2425 struct svm_vcpu *vcpu; 2426 int err, running, hostcpu; 2427 2428 vcpu = vcpui; 2429 err = 0; 2430 2431 running = vcpu_is_running(vcpu->vcpu, &hostcpu); 2432 if (running && hostcpu != curcpu) { 2433 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), 2434 vcpu->vcpuid); 2435 return (EINVAL); 2436 } 2437 2438 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta); 2439 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta); 2440 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta); 2441 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta); 2442 2443 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta); 2444 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta); 2445 2446 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta); 2447 2448 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta); 2449 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta); 2450 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta); 2451 2452 /* Guest segments */ 2453 /* ES */ 2454 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta); 2455 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta); 2456 2457 /* CS */ 2458 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta); 2459 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta); 2460 2461 /* SS */ 2462 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta); 2463 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta); 2464 2465 /* DS */ 2466 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta); 2467 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta); 2468 2469 /* FS */ 2470 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta); 2471 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta); 2472 2473 /* GS */ 2474 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta); 2475 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta); 2476 2477 /* TR */ 2478 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta); 2479 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta); 2480 2481 /* LDTR */ 2482 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta); 2483 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta); 2484 2485 /* EFER */ 2486 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta); 2487 2488 /* IDTR and GDTR */ 2489 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta); 2490 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta); 2491 2492 /* Specific AMD registers */ 2493 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta); 2494 2495 err += vmcb_snapshot_any(vcpu, 2496 VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta); 2497 err += vmcb_snapshot_any(vcpu, 2498 VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta); 2499 err += vmcb_snapshot_any(vcpu, 2500 VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta); 2501 err += vmcb_snapshot_any(vcpu, 2502 VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta); 2503 err += vmcb_snapshot_any(vcpu, 2504 VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta); 2505 2506 err += vmcb_snapshot_any(vcpu, 2507 VMCB_ACCESS(VMCB_OFF_PAUSE_FILTHRESH, 2), meta); 2508 err += vmcb_snapshot_any(vcpu, 2509 VMCB_ACCESS(VMCB_OFF_PAUSE_FILCNT, 2), meta); 2510 2511 err += vmcb_snapshot_any(vcpu, 2512 VMCB_ACCESS(VMCB_OFF_ASID, 4), meta); 2513 2514 err += vmcb_snapshot_any(vcpu, 2515 VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta); 2516 2517 err += vmcb_snapshot_any(vcpu, 2518 VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta); 2519 2520 err += vmcb_snapshot_any(vcpu, 2521 VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta); 2522 err += vmcb_snapshot_any(vcpu, 2523 VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta); 2524 err += vmcb_snapshot_any(vcpu, 2525 VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta); 2526 err += vmcb_snapshot_any(vcpu, 2527 VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta); 2528 2529 err += vmcb_snapshot_any(vcpu, 2530 VMCB_ACCESS(VMCB_OFF_NP_ENABLE, 1), meta); 2531 2532 err += vmcb_snapshot_any(vcpu, 2533 VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta); 2534 err += vmcb_snapshot_any(vcpu, 2535 VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta); 2536 err += vmcb_snapshot_any(vcpu, 2537 VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta); 2538 err += vmcb_snapshot_any(vcpu, 2539 VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta); 2540 2541 err += vmcb_snapshot_any(vcpu, 2542 VMCB_ACCESS(VMCB_OFF_CPL, 1), meta); 2543 2544 err += vmcb_snapshot_any(vcpu, 2545 VMCB_ACCESS(VMCB_OFF_STAR, 8), meta); 2546 err += vmcb_snapshot_any(vcpu, 2547 VMCB_ACCESS(VMCB_OFF_LSTAR, 8), meta); 2548 err += vmcb_snapshot_any(vcpu, 2549 VMCB_ACCESS(VMCB_OFF_CSTAR, 8), meta); 2550 2551 err += vmcb_snapshot_any(vcpu, 2552 VMCB_ACCESS(VMCB_OFF_SFMASK, 8), meta); 2553 2554 err += vmcb_snapshot_any(vcpu, 2555 VMCB_ACCESS(VMCB_OFF_KERNELGBASE, 8), meta); 2556 2557 err += vmcb_snapshot_any(vcpu, 2558 VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta); 2559 err += vmcb_snapshot_any(vcpu, 2560 VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta); 2561 err += vmcb_snapshot_any(vcpu, 2562 VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta); 2563 2564 err += vmcb_snapshot_any(vcpu, 2565 VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta); 2566 2567 err += vmcb_snapshot_any(vcpu, 2568 VMCB_ACCESS(VMCB_OFF_DBGCTL, 8), meta); 2569 err += vmcb_snapshot_any(vcpu, 2570 VMCB_ACCESS(VMCB_OFF_BR_FROM, 8), meta); 2571 err += vmcb_snapshot_any(vcpu, 2572 VMCB_ACCESS(VMCB_OFF_BR_TO, 8), meta); 2573 err += vmcb_snapshot_any(vcpu, 2574 VMCB_ACCESS(VMCB_OFF_INT_FROM, 8), meta); 2575 err += vmcb_snapshot_any(vcpu, 2576 VMCB_ACCESS(VMCB_OFF_INT_TO, 8), meta); 2577 if (err != 0) 2578 goto done; 2579 2580 /* Snapshot swctx for virtual cpu */ 2581 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done); 2582 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done); 2583 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done); 2584 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done); 2585 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done); 2586 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done); 2587 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done); 2588 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done); 2589 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done); 2590 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done); 2591 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done); 2592 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done); 2593 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done); 2594 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done); 2595 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done); 2596 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done); 2597 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done); 2598 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done); 2599 2600 /* Restore other svm_vcpu struct fields */ 2601 2602 /* Restore NEXTRIP field */ 2603 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done); 2604 2605 /* Restore lastcpu field */ 2606 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done); 2607 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done); 2608 2609 /* Restore EPTGEN field - EPT is Extended Page Table */ 2610 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done); 2611 2612 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done); 2613 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done); 2614 2615 /* Set all caches dirty */ 2616 if (meta->op == VM_SNAPSHOT_RESTORE) 2617 svm_set_dirty(vcpu, 0xffffffff); 2618 2619 done: 2620 return (err); 2621 } 2622 2623 static int 2624 svm_restore_tsc(void *vcpui, uint64_t offset) 2625 { 2626 struct svm_vcpu *vcpu = vcpui; 2627 2628 svm_set_tsc_offset(vcpu, offset); 2629 2630 return (0); 2631 } 2632 #endif 2633 2634 const struct vmm_ops vmm_ops_amd = { 2635 .modinit = svm_modinit, 2636 .modcleanup = svm_modcleanup, 2637 .modresume = svm_modresume, 2638 .init = svm_init, 2639 .run = svm_run, 2640 .cleanup = svm_cleanup, 2641 .vcpu_init = svm_vcpu_init, 2642 .vcpu_cleanup = svm_vcpu_cleanup, 2643 .getreg = svm_getreg, 2644 .setreg = svm_setreg, 2645 .getdesc = svm_getdesc, 2646 .setdesc = svm_setdesc, 2647 .getcap = svm_getcap, 2648 .setcap = svm_setcap, 2649 .vmspace_alloc = svm_vmspace_alloc, 2650 .vmspace_free = svm_vmspace_free, 2651 .vlapic_init = svm_vlapic_init, 2652 .vlapic_cleanup = svm_vlapic_cleanup, 2653 #ifdef BHYVE_SNAPSHOT 2654 .vcpu_snapshot = svm_vcpu_snapshot, 2655 .restore_tsc = svm_restore_tsc, 2656 #endif 2657 }; 2658